From 81fca31408d3ecf4f7de4c193e34c293e721446c Mon Sep 17 00:00:00 2001 From: AurelienFT Date: Mon, 26 Sep 2022 23:20:40 +0200 Subject: [PATCH 01/40] Init v2 exports massa-graph. --- Cargo.lock | 11 ++++++++++- Cargo.toml | 1 + 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index ac544fb7b24..ebbdb9b9228 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1612,7 +1612,7 @@ dependencies = [ [[package]] name = "massa-sc-runtime" version = "0.6.9" -source = "git+https://github.com/massalabs/massa-sc-runtime?branch=datastore_abi_1#ee2e97d7408811607b90ad1c2c1178e617a3cc5d" +source = "git+https://github.com/massalabs/massa-sc-runtime?tag=v0.8.0#6209418a8038bd07332c8e539dcc64699343359f" dependencies = [ "anyhow", "as-ffi-bindings", @@ -1917,6 +1917,15 @@ dependencies = [ "tracing", ] +[[package]] +name = "massa_graph_exports" +version = "0.1.0" +dependencies = [ + "displaydoc", + "serde 1.0.144", + "serde_json", +] + [[package]] name = "massa_hash" version = "0.1.0" diff --git a/Cargo.toml b/Cargo.toml index 0f74f97e3dd..7ab6d5140bb 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -11,6 +11,7 @@ members = [ "massa-execution-worker", "massa-factory-exports", "massa-factory-worker", + "massa-graph-2-exports", "massa-graph", "massa-hash", "massa-logging", From d4cd93ff1db76b5d391b8f8a81afc737704172ec Mon Sep 17 00:00:00 2001 From: AurelienFT Date: Tue, 27 Sep 2022 12:37:20 +0200 Subject: [PATCH 02/40] Add skeleton of new graph. --- Cargo.lock | 16 +++- Cargo.toml | 1 + massa-api/src/public.rs | 13 +-- massa-graph-2-exports/Cargo.toml | 16 ++++ massa-graph-2-exports/src/controller_trait.rs | 61 ++++++++++++++ massa-graph-2-exports/src/lib.rs | 10 +++ massa-graph-2-exports/src/settings.rs | 4 + massa-graph-2-exports/src/types.rs | 4 + massa-graph-2-worker/Cargo.toml | 15 ++++ massa-graph-2-worker/src/commands.rs | 1 + massa-graph-2-worker/src/controller.rs | 82 +++++++++++++++++++ massa-graph-2-worker/src/lib.rs | 4 + massa-graph-2-worker/src/manager.rs | 13 +++ massa-graph-2-worker/src/worker.rs | 38 +++++++++ 14 files changed, 267 insertions(+), 11 deletions(-) create mode 100644 massa-graph-2-exports/Cargo.toml create mode 100644 massa-graph-2-exports/src/controller_trait.rs create mode 100644 massa-graph-2-exports/src/lib.rs create mode 100644 massa-graph-2-exports/src/settings.rs create mode 100644 massa-graph-2-exports/src/types.rs create mode 100644 massa-graph-2-worker/Cargo.toml create mode 100644 massa-graph-2-worker/src/commands.rs create mode 100644 massa-graph-2-worker/src/controller.rs create mode 100644 massa-graph-2-worker/src/lib.rs create mode 100644 massa-graph-2-worker/src/manager.rs create mode 100644 massa-graph-2-worker/src/worker.rs diff --git a/Cargo.lock b/Cargo.lock index ebbdb9b9228..8c0711c577f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1918,14 +1918,28 @@ dependencies = [ ] [[package]] -name = "massa_graph_exports" +name = "massa_graph_2_exports" version = "0.1.0" dependencies = [ "displaydoc", + "massa_graph", + "massa_models", + "massa_storage", "serde 1.0.144", "serde_json", ] +[[package]] +name = "massa_graph_2_worker" +version = "0.1.0" +dependencies = [ + "displaydoc", + "massa_graph", + "massa_graph_2_exports", + "massa_models", + "massa_storage", +] + [[package]] name = "massa_hash" version = "0.1.0" diff --git a/Cargo.toml b/Cargo.toml index 7ab6d5140bb..ce5c152f2ea 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -12,6 +12,7 @@ members = [ "massa-factory-exports", "massa-factory-worker", "massa-graph-2-exports", + "massa-graph-2-worker", "massa-graph", "massa-hash", "massa-logging", diff --git a/massa-api/src/public.rs b/massa-api/src/public.rs index 0fb13567845..ff05d720681 100644 --- a/massa-api/src/public.rs +++ b/massa-api/src/public.rs @@ -26,6 +26,7 @@ use massa_protocol_exports::ProtocolCommandSender; use massa_serialization::{DeserializeError, Deserializer}; use itertools::{izip, Itertools}; +use massa_models::datastore::DatastoreDeserializer; use massa_models::{ address::Address, api::{ @@ -52,7 +53,6 @@ use massa_signature::KeyPair; use massa_storage::Storage; use massa_time::MassaTime; use std::net::{IpAddr, SocketAddr}; -use massa_models::datastore::DatastoreDeserializer; impl API { /// generate a new public API @@ -113,7 +113,6 @@ impl Endpoints for API { &self, reqs: Vec, ) -> BoxFuture, ApiError>> { - if reqs.len() as u64 > self.0.api_settings.max_arguments { let closure = async move || Err(ApiError::TooManyArguments("too many arguments".into())); @@ -141,16 +140,11 @@ impl Endpoints for API { Ok((_, deserialized)) => Some(deserialized), Err(e) => { let err_str = format!("Operation datastore error: {}", e); - let closure = - async move || { - Err(ApiError::InconsistencyError( - err_str - )) - }; + let closure = async move || Err(ApiError::InconsistencyError(err_str)); return Box::pin(closure()); } } - }, + } None => None, }; @@ -294,7 +288,6 @@ impl Endpoints for API { } fn get_status(&self) -> BoxFuture> { - let execution_controller = self.0.execution_controller.clone(); let consensus_command_sender = self.0.consensus_command_sender.clone(); let network_command_sender = self.0.network_command_sender.clone(); diff --git a/massa-graph-2-exports/Cargo.toml b/massa-graph-2-exports/Cargo.toml new file mode 100644 index 00000000000..758a8eab860 --- /dev/null +++ b/massa-graph-2-exports/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "massa_graph_2_exports" +version = "0.1.0" +authors = ["Massa Labs "] +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +displaydoc = "0.2" +serde = { version = "1.0", features = ["derive"] } +serde_json = "1.0" +#custom modules +massa_graph = { path = "../massa-graph" } +massa_models = { path = "../massa-models" } +massa_storage = { path = "../massa-storage" } \ No newline at end of file diff --git a/massa-graph-2-exports/src/controller_trait.rs b/massa-graph-2-exports/src/controller_trait.rs new file mode 100644 index 00000000000..4dbd9155562 --- /dev/null +++ b/massa-graph-2-exports/src/controller_trait.rs @@ -0,0 +1,61 @@ +use massa_graph::{error::GraphResult, BlockGraphExport, BootstrapableGraph}; +use massa_models::{ + api::BlockGraphStatus, + block::{BlockHeader, BlockId}, + clique::Clique, + slot::Slot, + stats::ConsensusStats, + wrapped::Wrapped, +}; +use massa_storage::Storage; + +/// interface that communicates with the graph worker thread +pub trait GraphController: Send + Sync { + fn get_block_graph_status( + &self, + start_slot: Option, + end_slot: Option, + ) -> GraphResult; + + fn get_block_statuses(&self, ids: Vec) -> BlockGraphStatus; + + fn get_cliques(&self) -> Vec; + + fn get_bootstrap_graph(&self) -> GraphResult; + + fn get_stats(&self) -> GraphResult; + + fn get_best_parents(&self) -> &Vec<(BlockId, u64)>; + + fn get_blockclique_block_at_slot(&self, slot: Slot) -> Option; + + fn get_latest_blockclique_block_at_slot(&self, slot: Slot) -> BlockId; + + fn register_block( + &self, + block_id: BlockId, + slot: Slot, + block_storage: Storage, + ) -> GraphResult<()>; + + fn register_block_header( + &self, + block_id: BlockId, + header: Wrapped, + ) -> GraphResult<()>; + + fn mark_invalid_block( + &self, + block_id: BlockId, + header: Wrapped, + ) -> GraphResult<()>; +} + +/// Graph manager used to stop the graph thread +pub trait GraphManager { + /// Stop the graph thread + /// Note that we do not take self by value to consume it + /// because it is not allowed to move out of Box + /// This will improve if the `unsized_fn_params` feature stabilizes enough to be safely usable. + fn stop(&mut self); +} diff --git a/massa-graph-2-exports/src/lib.rs b/massa-graph-2-exports/src/lib.rs new file mode 100644 index 00000000000..4ee9bb4857a --- /dev/null +++ b/massa-graph-2-exports/src/lib.rs @@ -0,0 +1,10 @@ +// Copyright (c) 2022 MASSA LABS +//! Definition and exports of the graph types and errors. + +mod controller_trait; +mod settings; +mod types; + +pub use controller_trait::{GraphController, GraphManager}; +pub use settings::GraphConfig; +pub use types::GraphChannels; diff --git a/massa-graph-2-exports/src/settings.rs b/massa-graph-2-exports/src/settings.rs new file mode 100644 index 00000000000..a0a906f4931 --- /dev/null +++ b/massa-graph-2-exports/src/settings.rs @@ -0,0 +1,4 @@ +use serde::{Deserialize, Serialize}; + +#[derive(Debug, Clone, Deserialize, Serialize)] +pub struct GraphConfig {} diff --git a/massa-graph-2-exports/src/types.rs b/massa-graph-2-exports/src/types.rs new file mode 100644 index 00000000000..8daa4a43da0 --- /dev/null +++ b/massa-graph-2-exports/src/types.rs @@ -0,0 +1,4 @@ +/// Contains graph channels associated protocol +/// Contains a reference to the pool, selector and execution controller +#[derive(Clone)] +pub struct GraphChannels {} diff --git a/massa-graph-2-worker/Cargo.toml b/massa-graph-2-worker/Cargo.toml new file mode 100644 index 00000000000..559fd7f0251 --- /dev/null +++ b/massa-graph-2-worker/Cargo.toml @@ -0,0 +1,15 @@ +[package] +name = "massa_graph_2_worker" +version = "0.1.0" +authors = ["Massa Labs "] +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +displaydoc = "0.2" +#custom modules +massa_graph_2_exports = { path = "../massa-graph-2-exports" } +massa_graph = { path = "../massa-graph" } +massa_models = { path = "../massa-models" } +massa_storage = { path = "../massa-storage" } \ No newline at end of file diff --git a/massa-graph-2-worker/src/commands.rs b/massa-graph-2-worker/src/commands.rs new file mode 100644 index 00000000000..cd99ff059b8 --- /dev/null +++ b/massa-graph-2-worker/src/commands.rs @@ -0,0 +1 @@ +pub enum GraphCommand {} diff --git a/massa-graph-2-worker/src/controller.rs b/massa-graph-2-worker/src/controller.rs new file mode 100644 index 00000000000..97abc23f60c --- /dev/null +++ b/massa-graph-2-worker/src/controller.rs @@ -0,0 +1,82 @@ +use massa_graph::{error::GraphResult, BlockGraphExport, BootstrapableGraph}; +use massa_graph_2_exports::GraphController; +use massa_models::{ + api::BlockGraphStatus, + block::{BlockHeader, BlockId}, + clique::Clique, + slot::Slot, + stats::ConsensusStats, + wrapped::Wrapped, +}; +use massa_storage::Storage; +use std::sync::mpsc::SyncSender; + +use crate::commands::GraphCommand; + +#[derive(Clone)] +pub struct GraphControllerImpl { + pub command_sender: SyncSender, +} + +impl GraphController for GraphControllerImpl { + fn get_block_graph_status( + &self, + start_slot: Option, + end_slot: Option, + ) -> GraphResult { + todo!() + } + + fn get_block_statuses(&self, ids: Vec) -> BlockGraphStatus { + todo!() + } + + fn get_cliques(&self) -> Vec { + todo!() + } + + fn get_bootstrap_graph(&self) -> GraphResult { + todo!() + } + + fn get_stats(&self) -> GraphResult { + todo!() + } + + fn get_best_parents(&self) -> &Vec<(BlockId, u64)> { + todo!() + } + + fn get_blockclique_block_at_slot(&self, slot: Slot) -> Option { + todo!() + } + + fn get_latest_blockclique_block_at_slot(&self, slot: Slot) -> BlockId { + todo!() + } + + fn register_block( + &self, + block_id: BlockId, + slot: Slot, + block_storage: Storage, + ) -> GraphResult<()> { + todo!() + } + + fn register_block_header( + &self, + block_id: BlockId, + header: Wrapped, + ) -> GraphResult<()> { + todo!() + } + + fn mark_invalid_block( + &self, + block_id: BlockId, + header: Wrapped, + ) -> GraphResult<()> { + todo!() + } +} diff --git a/massa-graph-2-worker/src/lib.rs b/massa-graph-2-worker/src/lib.rs new file mode 100644 index 00000000000..e8428bd900b --- /dev/null +++ b/massa-graph-2-worker/src/lib.rs @@ -0,0 +1,4 @@ +mod commands; +mod controller; +mod manager; +mod worker; diff --git a/massa-graph-2-worker/src/manager.rs b/massa-graph-2-worker/src/manager.rs new file mode 100644 index 00000000000..31a891c682a --- /dev/null +++ b/massa-graph-2-worker/src/manager.rs @@ -0,0 +1,13 @@ +use massa_graph::error::GraphResult; +use massa_graph_2_exports::GraphManager; +use std::thread::JoinHandle; + +pub struct GraphManagerImpl { + pub thread_graph: JoinHandle>, +} + +impl GraphManager for GraphManagerImpl { + fn stop(&mut self) { + todo!() + } +} diff --git a/massa-graph-2-worker/src/worker.rs b/massa-graph-2-worker/src/worker.rs new file mode 100644 index 00000000000..3833b113a49 --- /dev/null +++ b/massa-graph-2-worker/src/worker.rs @@ -0,0 +1,38 @@ +use massa_graph_2_exports::{GraphChannels, GraphConfig, GraphController, GraphManager}; +use std::sync::mpsc; +use std::thread::spawn; + +use crate::commands::GraphCommand; +use crate::controller::GraphControllerImpl; +use crate::manager::GraphManagerImpl; + +pub struct GraphWorker { + pub command_receiver: mpsc::Receiver, +} + +impl GraphWorker { + pub fn new(command_receiver: mpsc::Receiver) -> Self { + Self { command_receiver } + } +} + +pub fn start_graph_worker( + config: GraphConfig, + channels: GraphChannels, + clock_compensation: i64, +) -> (Box, Box) { + let (tx, rx) = mpsc::sync_channel(10); + + let graph_thread = spawn(move || { + let graph_worker = GraphWorker::new(rx); + todo!(); + }); + + let manager = GraphManagerImpl { + thread_graph: graph_thread, + }; + + let controller = GraphControllerImpl { command_sender: tx }; + + (Box::new(controller), Box::new(manager)) +} From 14fac1c0e22d3f7698f59358e00d54ede111c29e Mon Sep 17 00:00:00 2001 From: AurelienFT Date: Tue, 27 Sep 2022 14:33:58 +0200 Subject: [PATCH 03/40] Add main loop of graph worker. --- Cargo.lock | 3 + massa-graph-2-exports/Cargo.toml | 3 +- massa-graph-2-exports/src/settings.rs | 8 +- massa-graph-2-exports/src/types.rs | 4 +- massa-graph-2-worker/Cargo.toml | 4 +- massa-graph-2-worker/src/lib.rs | 2 + massa-graph-2-worker/src/manager.rs | 2 +- massa-graph-2-worker/src/worker.rs | 128 +++++++++++++++++++++++--- 8 files changed, 137 insertions(+), 17 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8c0711c577f..0f2805a343e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1925,6 +1925,7 @@ dependencies = [ "massa_graph", "massa_models", "massa_storage", + "massa_time", "serde 1.0.144", "serde_json", ] @@ -1938,6 +1939,8 @@ dependencies = [ "massa_graph_2_exports", "massa_models", "massa_storage", + "massa_time", + "tracing", ] [[package]] diff --git a/massa-graph-2-exports/Cargo.toml b/massa-graph-2-exports/Cargo.toml index 758a8eab860..8b5a9c051f6 100644 --- a/massa-graph-2-exports/Cargo.toml +++ b/massa-graph-2-exports/Cargo.toml @@ -13,4 +13,5 @@ serde_json = "1.0" #custom modules massa_graph = { path = "../massa-graph" } massa_models = { path = "../massa-models" } -massa_storage = { path = "../massa-storage" } \ No newline at end of file +massa_storage = { path = "../massa-storage" } +massa_time = { path = "../massa-time" } diff --git a/massa-graph-2-exports/src/settings.rs b/massa-graph-2-exports/src/settings.rs index a0a906f4931..44fbd9c69e0 100644 --- a/massa-graph-2-exports/src/settings.rs +++ b/massa-graph-2-exports/src/settings.rs @@ -1,4 +1,10 @@ +use massa_time::MassaTime; use serde::{Deserialize, Serialize}; #[derive(Debug, Clone, Deserialize, Serialize)] -pub struct GraphConfig {} +pub struct GraphConfig { + pub clock_compensation_millis: i64, + pub thread_count: u8, + pub genesis_timestamp: MassaTime, + pub t0: MassaTime, +} diff --git a/massa-graph-2-exports/src/types.rs b/massa-graph-2-exports/src/types.rs index 8daa4a43da0..b5a693f9481 100644 --- a/massa-graph-2-exports/src/types.rs +++ b/massa-graph-2-exports/src/types.rs @@ -1,4 +1,6 @@ -/// Contains graph channels associated protocol +use std::sync::mpsc::Receiver; + /// Contains a reference to the pool, selector and execution controller +/// Contains a channel to send info to protocol #[derive(Clone)] pub struct GraphChannels {} diff --git a/massa-graph-2-worker/Cargo.toml b/massa-graph-2-worker/Cargo.toml index 559fd7f0251..874ca688630 100644 --- a/massa-graph-2-worker/Cargo.toml +++ b/massa-graph-2-worker/Cargo.toml @@ -8,8 +8,10 @@ edition = "2021" [dependencies] displaydoc = "0.2" +tracing = "0.1" #custom modules massa_graph_2_exports = { path = "../massa-graph-2-exports" } massa_graph = { path = "../massa-graph" } massa_models = { path = "../massa-models" } -massa_storage = { path = "../massa-storage" } \ No newline at end of file +massa_storage = { path = "../massa-storage" } +massa_time = { path = "../massa-time" } \ No newline at end of file diff --git a/massa-graph-2-worker/src/lib.rs b/massa-graph-2-worker/src/lib.rs index e8428bd900b..cf6d24f3457 100644 --- a/massa-graph-2-worker/src/lib.rs +++ b/massa-graph-2-worker/src/lib.rs @@ -1,3 +1,5 @@ +#![feature(deadline_api)] + mod commands; mod controller; mod manager; diff --git a/massa-graph-2-worker/src/manager.rs b/massa-graph-2-worker/src/manager.rs index 31a891c682a..58ff4ce3493 100644 --- a/massa-graph-2-worker/src/manager.rs +++ b/massa-graph-2-worker/src/manager.rs @@ -3,7 +3,7 @@ use massa_graph_2_exports::GraphManager; use std::thread::JoinHandle; pub struct GraphManagerImpl { - pub thread_graph: JoinHandle>, + pub thread_graph: JoinHandle<()>, } impl GraphManager for GraphManagerImpl { diff --git a/massa-graph-2-worker/src/worker.rs b/massa-graph-2-worker/src/worker.rs index 3833b113a49..a022c7478b8 100644 --- a/massa-graph-2-worker/src/worker.rs +++ b/massa-graph-2-worker/src/worker.rs @@ -1,36 +1,140 @@ +use massa_graph::error::GraphResult; use massa_graph_2_exports::{GraphChannels, GraphConfig, GraphController, GraphManager}; +use massa_models::slot::Slot; +use massa_models::timeslots::{get_block_slot_timestamp, get_closest_slot_to_timestamp}; +use massa_time::MassaTime; use std::sync::mpsc; -use std::thread::spawn; +use std::thread; +use std::time::Instant; +use tracing::log::warn; use crate::commands::GraphCommand; use crate::controller::GraphControllerImpl; use crate::manager::GraphManagerImpl; pub struct GraphWorker { - pub command_receiver: mpsc::Receiver, + command_receiver: mpsc::Receiver, + config: GraphConfig, + channels: GraphChannels, +} + +enum WaitingStatus { + Ended, + Interrupted, + Disconnected, } impl GraphWorker { - pub fn new(command_receiver: mpsc::Receiver) -> Self { - Self { command_receiver } + /// Wait and interrupt or wait until an instant or a stop signal + /// + /// # Return value + /// Returns the error of the process of the command if any. + /// Returns true if we reached the instant. + /// Returns false if we were interrupted by a command. + fn wait_slot_or_command(&self, deadline: Instant) -> WaitingStatus { + match self.command_receiver.recv_deadline(deadline) { + // message received => manage it + Ok(command) => { + // TODO: Manage it + WaitingStatus::Interrupted + } + // timeout => continue main loop + Err(mpsc::RecvTimeoutError::Timeout) => WaitingStatus::Ended, + // channel disconnected (sender dropped) => quit main loop + Err(mpsc::RecvTimeoutError::Disconnected) => WaitingStatus::Disconnected, + } + } + + /// Gets the next slot and the instant when it will happen. + /// Slots can be skipped if we waited too much in-between. + /// Extra safety against double-production caused by clock adjustments (this is the role of the `previous_slot` parameter). + fn get_next_slot(&self, previous_slot: Option) -> (Slot, Instant) { + // get current absolute time + let now = MassaTime::now(self.config.clock_compensation_millis) + .expect("could not get current time"); + + // get closest slot according to the current absolute time + let mut next_slot = get_closest_slot_to_timestamp( + self.config.thread_count, + self.config.t0, + self.config.genesis_timestamp, + now, + ); + + // ignore genesis + if next_slot.period == 0 { + next_slot.period = 1; + } + + // protection against double-production on unexpected system clock adjustment + if let Some(prev_slot) = previous_slot { + if next_slot <= prev_slot { + next_slot = prev_slot + .get_next_slot(self.config.thread_count) + .expect("could not compute next slot"); + } + } + + // get the timestamp of the target slot + let next_instant = get_block_slot_timestamp( + self.config.thread_count, + self.config.t0, + self.config.genesis_timestamp, + next_slot, + ) + .expect("could not get block slot timestamp") + .estimate_instant(self.config.clock_compensation_millis) + .expect("could not estimate block slot instant"); + + (next_slot, next_instant) + } + + fn new( + command_receiver: mpsc::Receiver, + config: GraphConfig, + channels: GraphChannels, + ) -> Self { + Self { + command_receiver, + config, + channels, + } + } + + fn run(&mut self) { + // TODO: Should we start from slot of final state after bootstrap ? + let prev_slot: Option = None; + loop { + let (next_slot, next_instant) = self.get_next_slot(prev_slot); + + match self.wait_slot_or_command(next_instant) { + WaitingStatus::Ended => { + // TODO: Desync, stats, block_db_changed + } + WaitingStatus::Disconnected => { + break; + } + WaitingStatus::Interrupted => {} + }; + } } } pub fn start_graph_worker( config: GraphConfig, channels: GraphChannels, - clock_compensation: i64, ) -> (Box, Box) { let (tx, rx) = mpsc::sync_channel(10); - let graph_thread = spawn(move || { - let graph_worker = GraphWorker::new(rx); - todo!(); - }); + let thread_graph = thread::Builder::new() + .name("graph worker".into()) + .spawn(move || { + let mut graph_worker = GraphWorker::new(rx, config, channels); + graph_worker.run() + }) + .expect("Can't spawn thread graph."); - let manager = GraphManagerImpl { - thread_graph: graph_thread, - }; + let manager = GraphManagerImpl { thread_graph }; let controller = GraphControllerImpl { command_sender: tx }; From 171349e3065541f3c0ea64fff2660db9ad487763 Mon Sep 17 00:00:00 2001 From: AurelienFT Date: Tue, 27 Sep 2022 14:55:12 +0200 Subject: [PATCH 04/40] Fix main loop graph to avoid skipping slot when receiving command from protocol. --- massa-graph-2-worker/src/commands.rs | 21 ++++++++++++++++++++- massa-graph-2-worker/src/manager.rs | 18 +++++++++++++++--- massa-graph-2-worker/src/worker.rs | 25 +++++++++++++++++++------ 3 files changed, 54 insertions(+), 10 deletions(-) diff --git a/massa-graph-2-worker/src/commands.rs b/massa-graph-2-worker/src/commands.rs index cd99ff059b8..60b3e269536 100644 --- a/massa-graph-2-worker/src/commands.rs +++ b/massa-graph-2-worker/src/commands.rs @@ -1 +1,20 @@ -pub enum GraphCommand {} +use massa_models::{ + block::{BlockHeader, BlockId}, + slot::Slot, + wrapped::Wrapped, +}; +use massa_storage::Storage; + +pub enum GraphCommand { + GetBlockGraphStatus(Option, Option), + GetBlockStatuses(Vec), + GetCliques, + GetBootstrapGraph, + GetStats, + GetBestParents, + GetBlockCliqueBlockAtSlot(Slot), + GetLatestBlockCliqueBlockAtSlot(Slot), + RegisterBlock(BlockId, Slot, Storage), + RegisterBlockHeader(BlockId, Wrapped), + Stop, +} diff --git a/massa-graph-2-worker/src/manager.rs b/massa-graph-2-worker/src/manager.rs index 58ff4ce3493..bbd2709fe90 100644 --- a/massa-graph-2-worker/src/manager.rs +++ b/massa-graph-2-worker/src/manager.rs @@ -1,13 +1,25 @@ use massa_graph::error::GraphResult; use massa_graph_2_exports::GraphManager; -use std::thread::JoinHandle; +use std::{sync::mpsc::SyncSender, thread::JoinHandle}; +use tracing::log::info; + +use crate::commands::GraphCommand; pub struct GraphManagerImpl { - pub thread_graph: JoinHandle<()>, + pub thread_graph: Option>, + pub graph_command_sender: SyncSender, } impl GraphManager for GraphManagerImpl { fn stop(&mut self) { - todo!() + info!("stopping graph worker..."); + let _ = self.graph_command_sender.send(GraphCommand::Stop); + // join the graph thread + if let Some(join_handle) = self.thread_graph.take() { + join_handle + .join() + .expect("graph thread panicked on try to join"); + } + info!("graph worker stopped"); } } diff --git a/massa-graph-2-worker/src/worker.rs b/massa-graph-2-worker/src/worker.rs index a022c7478b8..551131b01e8 100644 --- a/massa-graph-2-worker/src/worker.rs +++ b/massa-graph-2-worker/src/worker.rs @@ -25,6 +25,11 @@ enum WaitingStatus { } impl GraphWorker { + fn manage_command(&self, command: GraphCommand) -> GraphResult<()> { + //TODO: Manage + Ok(()) + } + /// Wait and interrupt or wait until an instant or a stop signal /// /// # Return value @@ -35,7 +40,10 @@ impl GraphWorker { match self.command_receiver.recv_deadline(deadline) { // message received => manage it Ok(command) => { - // TODO: Manage it + match self.manage_command(command) { + Err(err) => warn!("Error in graph: {}", err), + Ok(()) => {} + }; WaitingStatus::Interrupted } // timeout => continue main loop @@ -104,17 +112,19 @@ impl GraphWorker { fn run(&mut self) { // TODO: Should we start from slot of final state after bootstrap ? let prev_slot: Option = None; + let (mut next_slot, mut next_instant) = self.get_next_slot(prev_slot); loop { - let (next_slot, next_instant) = self.get_next_slot(prev_slot); - match self.wait_slot_or_command(next_instant) { WaitingStatus::Ended => { - // TODO: Desync, stats, block_db_changed + //TODO: Desync, stats, block_db changed + (next_slot, next_instant) = self.get_next_slot(prev_slot); } WaitingStatus::Disconnected => { break; } - WaitingStatus::Interrupted => {} + WaitingStatus::Interrupted => { + continue; + } }; } } @@ -134,7 +144,10 @@ pub fn start_graph_worker( }) .expect("Can't spawn thread graph."); - let manager = GraphManagerImpl { thread_graph }; + let manager = GraphManagerImpl { + thread_graph: Some(thread_graph), + graph_command_sender: tx.clone(), + }; let controller = GraphControllerImpl { command_sender: tx }; From c5a64fc657fd420275b48cecae278ba866567429 Mon Sep 17 00:00:00 2001 From: AurelienFT Date: Tue, 27 Sep 2022 16:17:13 +0200 Subject: [PATCH 05/40] Add shared state in graph thread and graph controller. --- Cargo.lock | 1 + .../src/{types.rs => channels.rs} | 0 massa-graph-2-exports/src/controller_trait.rs | 19 ++------- massa-graph-2-exports/src/lib.rs | 6 ++- massa-graph-2-exports/src/state.rs | 1 + massa-graph-2-worker/Cargo.toml | 1 + massa-graph-2-worker/src/commands.rs | 9 +---- massa-graph-2-worker/src/controller.rs | 39 ++++++++++--------- massa-graph-2-worker/src/worker.rs | 25 +++++++++--- 9 files changed, 51 insertions(+), 50 deletions(-) rename massa-graph-2-exports/src/{types.rs => channels.rs} (100%) create mode 100644 massa-graph-2-exports/src/state.rs diff --git a/Cargo.lock b/Cargo.lock index 0f2805a343e..05af5c5d3a6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1940,6 +1940,7 @@ dependencies = [ "massa_models", "massa_storage", "massa_time", + "parking_lot", "tracing", ] diff --git a/massa-graph-2-exports/src/types.rs b/massa-graph-2-exports/src/channels.rs similarity index 100% rename from massa-graph-2-exports/src/types.rs rename to massa-graph-2-exports/src/channels.rs diff --git a/massa-graph-2-exports/src/controller_trait.rs b/massa-graph-2-exports/src/controller_trait.rs index 4dbd9155562..1e9d306a14f 100644 --- a/massa-graph-2-exports/src/controller_trait.rs +++ b/massa-graph-2-exports/src/controller_trait.rs @@ -31,24 +31,11 @@ pub trait GraphController: Send + Sync { fn get_latest_blockclique_block_at_slot(&self, slot: Slot) -> BlockId; - fn register_block( - &self, - block_id: BlockId, - slot: Slot, - block_storage: Storage, - ) -> GraphResult<()>; + fn register_block(&self, block_id: BlockId, slot: Slot, block_storage: Storage); - fn register_block_header( - &self, - block_id: BlockId, - header: Wrapped, - ) -> GraphResult<()>; + fn register_block_header(&self, block_id: BlockId, header: Wrapped); - fn mark_invalid_block( - &self, - block_id: BlockId, - header: Wrapped, - ) -> GraphResult<()>; + fn mark_invalid_block(&self, block_id: BlockId, header: Wrapped); } /// Graph manager used to stop the graph thread diff --git a/massa-graph-2-exports/src/lib.rs b/massa-graph-2-exports/src/lib.rs index 4ee9bb4857a..35bd9e70a02 100644 --- a/massa-graph-2-exports/src/lib.rs +++ b/massa-graph-2-exports/src/lib.rs @@ -1,10 +1,12 @@ // Copyright (c) 2022 MASSA LABS //! Definition and exports of the graph types and errors. +mod channels; mod controller_trait; mod settings; -mod types; +mod state; +pub use channels::GraphChannels; pub use controller_trait::{GraphController, GraphManager}; pub use settings::GraphConfig; -pub use types::GraphChannels; +pub use state::GraphState; diff --git a/massa-graph-2-exports/src/state.rs b/massa-graph-2-exports/src/state.rs new file mode 100644 index 00000000000..3572e418dd8 --- /dev/null +++ b/massa-graph-2-exports/src/state.rs @@ -0,0 +1 @@ +pub struct GraphState {} diff --git a/massa-graph-2-worker/Cargo.toml b/massa-graph-2-worker/Cargo.toml index 874ca688630..eca3d7e2f34 100644 --- a/massa-graph-2-worker/Cargo.toml +++ b/massa-graph-2-worker/Cargo.toml @@ -9,6 +9,7 @@ edition = "2021" [dependencies] displaydoc = "0.2" tracing = "0.1" +parking_lot = { version = "0.12", features = ["deadlock_detection"] } #custom modules massa_graph_2_exports = { path = "../massa-graph-2-exports" } massa_graph = { path = "../massa-graph" } diff --git a/massa-graph-2-worker/src/commands.rs b/massa-graph-2-worker/src/commands.rs index 60b3e269536..f59bad59893 100644 --- a/massa-graph-2-worker/src/commands.rs +++ b/massa-graph-2-worker/src/commands.rs @@ -5,15 +5,8 @@ use massa_models::{ }; use massa_storage::Storage; +#[allow(clippy::large_enum_variant)] pub enum GraphCommand { - GetBlockGraphStatus(Option, Option), - GetBlockStatuses(Vec), - GetCliques, - GetBootstrapGraph, - GetStats, - GetBestParents, - GetBlockCliqueBlockAtSlot(Slot), - GetLatestBlockCliqueBlockAtSlot(Slot), RegisterBlock(BlockId, Slot, Storage), RegisterBlockHeader(BlockId, Wrapped), Stop, diff --git a/massa-graph-2-worker/src/controller.rs b/massa-graph-2-worker/src/controller.rs index 97abc23f60c..a8c6a729f32 100644 --- a/massa-graph-2-worker/src/controller.rs +++ b/massa-graph-2-worker/src/controller.rs @@ -1,5 +1,5 @@ use massa_graph::{error::GraphResult, BlockGraphExport, BootstrapableGraph}; -use massa_graph_2_exports::GraphController; +use massa_graph_2_exports::{GraphController, GraphState}; use massa_models::{ api::BlockGraphStatus, block::{BlockHeader, BlockId}, @@ -9,13 +9,27 @@ use massa_models::{ wrapped::Wrapped, }; use massa_storage::Storage; -use std::sync::mpsc::SyncSender; +use parking_lot::RwLock; +use std::sync::{mpsc::SyncSender, Arc}; use crate::commands::GraphCommand; #[derive(Clone)] pub struct GraphControllerImpl { - pub command_sender: SyncSender, + command_sender: SyncSender, + shared_state: Arc>, +} + +impl GraphControllerImpl { + pub fn new( + command_sender: SyncSender, + shared_state: Arc>, + ) -> Self { + Self { + command_sender, + shared_state, + } + } } impl GraphController for GraphControllerImpl { @@ -55,28 +69,15 @@ impl GraphController for GraphControllerImpl { todo!() } - fn register_block( - &self, - block_id: BlockId, - slot: Slot, - block_storage: Storage, - ) -> GraphResult<()> { + fn register_block(&self, block_id: BlockId, slot: Slot, block_storage: Storage) { todo!() } - fn register_block_header( - &self, - block_id: BlockId, - header: Wrapped, - ) -> GraphResult<()> { + fn register_block_header(&self, block_id: BlockId, header: Wrapped) { todo!() } - fn mark_invalid_block( - &self, - block_id: BlockId, - header: Wrapped, - ) -> GraphResult<()> { + fn mark_invalid_block(&self, block_id: BlockId, header: Wrapped) { todo!() } } diff --git a/massa-graph-2-worker/src/worker.rs b/massa-graph-2-worker/src/worker.rs index 551131b01e8..8b1531aba32 100644 --- a/massa-graph-2-worker/src/worker.rs +++ b/massa-graph-2-worker/src/worker.rs @@ -1,9 +1,12 @@ use massa_graph::error::GraphResult; -use massa_graph_2_exports::{GraphChannels, GraphConfig, GraphController, GraphManager}; +use massa_graph_2_exports::{ + GraphChannels, GraphConfig, GraphController, GraphManager, GraphState, +}; use massa_models::slot::Slot; use massa_models::timeslots::{get_block_slot_timestamp, get_closest_slot_to_timestamp}; use massa_time::MassaTime; -use std::sync::mpsc; +use parking_lot::RwLock; +use std::sync::{mpsc, Arc}; use std::thread; use std::time::Instant; use tracing::log::warn; @@ -16,6 +19,7 @@ pub struct GraphWorker { command_receiver: mpsc::Receiver, config: GraphConfig, channels: GraphChannels, + shared_state: Arc>, } enum WaitingStatus { @@ -26,7 +30,14 @@ enum WaitingStatus { impl GraphWorker { fn manage_command(&self, command: GraphCommand) -> GraphResult<()> { - //TODO: Manage + match command { + GraphCommand::RegisterBlock(_, _, _) => { + // TODO + } + _ => { + // TODO + } + } Ok(()) } @@ -101,11 +112,13 @@ impl GraphWorker { command_receiver: mpsc::Receiver, config: GraphConfig, channels: GraphChannels, + shared_state: Arc>, ) -> Self { Self { command_receiver, config, channels, + shared_state, } } @@ -135,11 +148,13 @@ pub fn start_graph_worker( channels: GraphChannels, ) -> (Box, Box) { let (tx, rx) = mpsc::sync_channel(10); + let shared_state = Arc::new(RwLock::new(GraphState {})); + let shared_state_cloned = shared_state.clone(); let thread_graph = thread::Builder::new() .name("graph worker".into()) .spawn(move || { - let mut graph_worker = GraphWorker::new(rx, config, channels); + let mut graph_worker = GraphWorker::new(rx, config, channels, shared_state_cloned); graph_worker.run() }) .expect("Can't spawn thread graph."); @@ -149,7 +164,7 @@ pub fn start_graph_worker( graph_command_sender: tx.clone(), }; - let controller = GraphControllerImpl { command_sender: tx }; + let controller = GraphControllerImpl::new(tx, shared_state); (Box::new(controller), Box::new(manager)) } From 6d33ce6e7a26931dcec106e315189d1ca6215cfe Mon Sep 17 00:00:00 2001 From: AurelienFT Date: Wed, 28 Sep 2022 12:44:28 +0200 Subject: [PATCH 06/40] Add first version of init of new block graph. --- Cargo.lock | 5 + massa-graph-2-exports/Cargo.toml | 1 + massa-graph-2-exports/src/settings.rs | 41 ++- massa-graph-2-exports/src/state.rs | 1 + massa-graph-2-worker/Cargo.toml | 5 +- massa-graph-2-worker/src/block_graph/init.rs | 1 + massa-graph-2-worker/src/block_graph/mod.rs | 1 + massa-graph-2-worker/src/block_status.rs | 120 ++++++ massa-graph-2-worker/src/lib.rs | 2 + massa-graph-2-worker/src/manager.rs | 1 - massa-graph-2-worker/src/worker/init.rs | 342 ++++++++++++++++++ .../src/{worker.rs => worker/main_loop.rs} | 76 +--- massa-graph-2-worker/src/worker/mod.rs | 118 ++++++ massa-graph/Cargo.toml | 1 + massa-graph/src/error.rs | 3 + 15 files changed, 648 insertions(+), 70 deletions(-) create mode 100644 massa-graph-2-worker/src/block_graph/init.rs create mode 100644 massa-graph-2-worker/src/block_graph/mod.rs create mode 100644 massa-graph-2-worker/src/block_status.rs create mode 100644 massa-graph-2-worker/src/worker/init.rs rename massa-graph-2-worker/src/{worker.rs => worker/main_loop.rs} (61%) create mode 100644 massa-graph-2-worker/src/worker/mod.rs diff --git a/Cargo.lock b/Cargo.lock index 05af5c5d3a6..cdb1563533b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1909,6 +1909,7 @@ dependencies = [ "massa_serialization", "massa_signature", "massa_storage", + "massa_time", "nom 7.1.1", "num", "serde 1.0.144", @@ -1924,6 +1925,7 @@ dependencies = [ "displaydoc", "massa_graph", "massa_models", + "massa_signature", "massa_storage", "massa_time", "serde 1.0.144", @@ -1937,10 +1939,13 @@ dependencies = [ "displaydoc", "massa_graph", "massa_graph_2_exports", + "massa_hash", "massa_models", "massa_storage", "massa_time", "parking_lot", + "serde 1.0.144", + "serde_json", "tracing", ] diff --git a/massa-graph-2-exports/Cargo.toml b/massa-graph-2-exports/Cargo.toml index 8b5a9c051f6..3222e553946 100644 --- a/massa-graph-2-exports/Cargo.toml +++ b/massa-graph-2-exports/Cargo.toml @@ -15,3 +15,4 @@ massa_graph = { path = "../massa-graph" } massa_models = { path = "../massa-models" } massa_storage = { path = "../massa-storage" } massa_time = { path = "../massa-time" } +massa_signature = { path = "../massa-signature" } \ No newline at end of file diff --git a/massa-graph-2-exports/src/settings.rs b/massa-graph-2-exports/src/settings.rs index 44fbd9c69e0..83bea96dc8d 100644 --- a/massa-graph-2-exports/src/settings.rs +++ b/massa-graph-2-exports/src/settings.rs @@ -1,10 +1,49 @@ +use massa_signature::KeyPair; use massa_time::MassaTime; use serde::{Deserialize, Serialize}; #[derive(Debug, Clone, Deserialize, Serialize)] pub struct GraphConfig { + /// Clock compensation pub clock_compensation_millis: i64, - pub thread_count: u8, + /// Genesis timestamp pub genesis_timestamp: MassaTime, + /// Delta time between two period pub t0: MassaTime, + /// Number of threads + pub thread_count: u8, + /// Keypair to sign genesis blocks. + pub genesis_key: KeyPair, + /// Maximum number of blocks allowed in discarded blocks. + pub max_discarded_blocks: usize, + /// If a block `is future_block_processing_max_periods` periods in the future, it is just discarded. + pub future_block_processing_max_periods: u64, + /// Maximum number of blocks allowed in `FutureIncomingBlocks`. + pub max_future_processing_blocks: usize, + /// Maximum number of blocks allowed in `DependencyWaitingBlocks`. + pub max_dependency_blocks: usize, + /// max event send wait + pub max_send_wait: MassaTime, + /// old blocks are pruned every `block_db_prune_interval` + pub block_db_prune_interval: MassaTime, + /// max number of items returned while querying + pub max_item_return_count: usize, + /// Max gas per block for the execution configuration + pub max_gas_per_block: u64, + /// Threshold for fitness. + pub delta_f0: u64, + /// Maximum operation validity period count + pub operation_validity_periods: u64, + /// cycle duration in periods + pub periods_per_cycle: u64, + /// force keep at least this number of final periods in RAM for each thread + pub force_keep_final_periods: u64, + /// target number of endorsement per block + pub endorsement_count: u32, + /// TESTNET: time when the blockclique is ended. + pub end_timestamp: Option, + /// stats time span + pub stats_timespan: MassaTime, + /// channel size + pub channel_size: usize, } diff --git a/massa-graph-2-exports/src/state.rs b/massa-graph-2-exports/src/state.rs index 3572e418dd8..23781ce8458 100644 --- a/massa-graph-2-exports/src/state.rs +++ b/massa-graph-2-exports/src/state.rs @@ -1 +1,2 @@ +#[derive(Default, Clone, Copy, Debug)] pub struct GraphState {} diff --git a/massa-graph-2-worker/Cargo.toml b/massa-graph-2-worker/Cargo.toml index eca3d7e2f34..5beda1a6ab8 100644 --- a/massa-graph-2-worker/Cargo.toml +++ b/massa-graph-2-worker/Cargo.toml @@ -9,10 +9,13 @@ edition = "2021" [dependencies] displaydoc = "0.2" tracing = "0.1" +serde = { version = "1.0", features = ["derive"] } +serde_json = "1.0" parking_lot = { version = "0.12", features = ["deadlock_detection"] } #custom modules massa_graph_2_exports = { path = "../massa-graph-2-exports" } massa_graph = { path = "../massa-graph" } massa_models = { path = "../massa-models" } massa_storage = { path = "../massa-storage" } -massa_time = { path = "../massa-time" } \ No newline at end of file +massa_time = { path = "../massa-time" } +massa_hash = { path = "../massa-hash" } \ No newline at end of file diff --git a/massa-graph-2-worker/src/block_graph/init.rs b/massa-graph-2-worker/src/block_graph/init.rs new file mode 100644 index 00000000000..8b137891791 --- /dev/null +++ b/massa-graph-2-worker/src/block_graph/init.rs @@ -0,0 +1 @@ + diff --git a/massa-graph-2-worker/src/block_graph/mod.rs b/massa-graph-2-worker/src/block_graph/mod.rs new file mode 100644 index 00000000000..6adc4f6e6b1 --- /dev/null +++ b/massa-graph-2-worker/src/block_graph/mod.rs @@ -0,0 +1 @@ +mod init; diff --git a/massa-graph-2-worker/src/block_status.rs b/massa-graph-2-worker/src/block_status.rs new file mode 100644 index 00000000000..bd3b9deda94 --- /dev/null +++ b/massa-graph-2-worker/src/block_status.rs @@ -0,0 +1,120 @@ +use massa_models::{ + active_block::ActiveBlock, + address::Address, + block::{Block, BlockId, WrappedHeader}, + prehash::PreHashSet, + slot::Slot, +}; +use massa_storage::Storage; +use serde::{Deserialize, Serialize}; + +#[derive(Debug, Clone)] +#[allow(clippy::large_enum_variant)] +pub(crate) enum HeaderOrBlock { + Header(WrappedHeader), + Block { + id: BlockId, + slot: Slot, + storage: Storage, + }, +} + +impl HeaderOrBlock { + /// Gets slot for that header or block + pub fn get_slot(&self) -> Slot { + match self { + HeaderOrBlock::Header(header) => header.content.slot, + HeaderOrBlock::Block { slot, .. } => *slot, + } + } +} + +/// Something can be discarded +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub enum DiscardReason { + /// Block is invalid, either structurally, or because of some incompatibility. The String contains the reason for info or debugging. + Invalid(String), + /// Block is incompatible with a final block. + Stale, + /// Block has enough fitness. + Final, +} + +/// Enum used in `BlockGraph`'s state machine +#[derive(Debug, Clone)] +pub(crate) enum BlockStatus { + /// The block/header has reached consensus but no consensus-level check has been performed. + /// It will be processed during the next iteration + Incoming(HeaderOrBlock), + /// The block's or header's slot is too much in the future. + /// It will be processed at the block/header slot + WaitingForSlot(HeaderOrBlock), + /// The block references an unknown Block id + WaitingForDependencies { + /// Given header/block + header_or_block: HeaderOrBlock, + /// includes self if it's only a header + unsatisfied_dependencies: PreHashSet, + /// Used to limit and sort the number of blocks/headers waiting for dependencies + sequence_number: u64, + }, + /// The block was checked and included in the blockgraph + Active { + a_block: Box, + storage: Storage, + }, + /// The block was discarded and is kept to avoid reprocessing it + Discarded { + /// Just the slot of that block + slot: Slot, + /// Address of the creator of the block + creator: Address, + /// Ids of parents blocks + parents: Vec, + /// why it was discarded + reason: DiscardReason, + /// Used to limit and sort the number of blocks/headers waiting for dependencies + sequence_number: u64, + }, +} + +/// Block status in the graph that can be exported. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub enum ExportBlockStatus { + /// received but not yet graph processed + Incoming, + /// waiting for its slot + WaitingForSlot, + /// waiting for a missing dependency + WaitingForDependencies, + /// valid and not yet final + Active(Block), + /// immutable + Final(Block), + /// not part of the graph + Discarded(DiscardReason), +} + +/// The block version that can be exported. +/// Note that the detailed list of operation is not exported +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ExportCompiledBlock { + /// Header of the corresponding block. + pub header: WrappedHeader, + /// For (i, set) in children, + /// set contains the headers' hashes + /// of blocks referencing exported block as a parent, + /// in thread i. + pub children: Vec>, + /// Active or final + pub is_final: bool, +} + +/// Status +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub enum Status { + /// without enough fitness to be part of immutable history + Active, + /// with enough fitness to be part of immutable history + Final, +} diff --git a/massa-graph-2-worker/src/lib.rs b/massa-graph-2-worker/src/lib.rs index cf6d24f3457..308f42a074f 100644 --- a/massa-graph-2-worker/src/lib.rs +++ b/massa-graph-2-worker/src/lib.rs @@ -1,5 +1,7 @@ #![feature(deadline_api)] +mod block_graph; +mod block_status; mod commands; mod controller; mod manager; diff --git a/massa-graph-2-worker/src/manager.rs b/massa-graph-2-worker/src/manager.rs index bbd2709fe90..03e7ca12dae 100644 --- a/massa-graph-2-worker/src/manager.rs +++ b/massa-graph-2-worker/src/manager.rs @@ -1,4 +1,3 @@ -use massa_graph::error::GraphResult; use massa_graph_2_exports::GraphManager; use std::{sync::mpsc::SyncSender, thread::JoinHandle}; use tracing::log::info; diff --git a/massa-graph-2-worker/src/worker/init.rs b/massa-graph-2-worker/src/worker/init.rs new file mode 100644 index 00000000000..081cfe1d1e4 --- /dev/null +++ b/massa-graph-2-worker/src/worker/init.rs @@ -0,0 +1,342 @@ +use std::{ + collections::VecDeque, + sync::{mpsc, Arc}, +}; + +use massa_graph::{ + error::{GraphError, GraphResult}, + BootstrapableGraph, +}; +use massa_graph_2_exports::{GraphChannels, GraphConfig, GraphState}; +use massa_hash::Hash; +use massa_models::{ + active_block::ActiveBlock, + address::Address, + block::{Block, BlockHeader, BlockHeaderSerializer, BlockId, BlockSerializer, WrappedBlock}, + clique::Clique, + prehash::{PreHashMap, PreHashSet}, + slot::Slot, + timeslots::{get_block_slot_timestamp, get_latest_block_slot_at_timestamp}, + wrapped::WrappedContent, +}; +use massa_storage::Storage; +use massa_time::MassaTime; +use parking_lot::RwLock; +use tracing::log::info; + +use crate::{block_status::BlockStatus, commands::GraphCommand}; + +use super::GraphWorker; + +/// Creates genesis block in given thread. +/// +/// # Arguments +/// * `cfg`: consensus configuration +/// * `thread_number`: thread in which we want a genesis block +pub fn create_genesis_block( + cfg: &GraphConfig, + thread_number: u8, +) -> GraphResult<(BlockId, WrappedBlock)> { + let keypair = &cfg.genesis_key; + let header = BlockHeader::new_wrapped( + BlockHeader { + slot: Slot::new(0, thread_number), + parents: Vec::new(), + operation_merkle_root: Hash::compute_from(&Vec::new()), + endorsements: Vec::new(), + }, + BlockHeaderSerializer::new(), + keypair, + )?; + + Ok(( + header.id, + Block::new_wrapped( + Block { + header, + operations: Default::default(), + }, + BlockSerializer::new(), + keypair, + )?, + )) +} + +impl GraphWorker { + pub fn new( + command_receiver: mpsc::Receiver, + config: GraphConfig, + channels: GraphChannels, + shared_state: Arc>, + init_graph: Option, + storage: Storage, + ) -> GraphResult { + let now = MassaTime::now(config.clock_compensation_millis) + .expect("Couldn't init timer consensus"); + let previous_slot = get_latest_block_slot_at_timestamp( + config.thread_count, + config.t0, + config.genesis_timestamp, + now, + ) + .expect("Couldn't get the init slot consensus."); + // load genesis blocks + + let mut block_statuses = PreHashMap::default(); + let mut genesis_block_ids = Vec::with_capacity(config.thread_count as usize); + for thread in 0u8..config.thread_count { + let (block_id, block) = create_genesis_block(&config, thread).map_err(|err| { + GraphError::GenesisCreationError(format!("genesis error {}", err)) + })?; + let mut storage = storage.clone_without_refs(); + storage.store_block(block.clone()); + genesis_block_ids.push(block_id); + block_statuses.insert( + block_id, + BlockStatus::Active { + a_block: Box::new(ActiveBlock { + creator_address: block.creator_address, + parents: Vec::new(), + children: vec![PreHashMap::default(); config.thread_count as usize], + descendants: Default::default(), + is_final: true, + block_id, + slot: block.content.header.content.slot, + fitness: block.get_fitness(), + }), + storage, + }, + ); + } + + let next_slot = previous_slot.map_or(Ok(Slot::new(0u64, 0u8)), |s| { + s.get_next_slot(config.thread_count) + })?; + let next_instant = get_block_slot_timestamp( + config.thread_count, + config.t0, + config.genesis_timestamp, + next_slot, + )? + .estimate_instant(config.clock_compensation_millis)?; + + info!( + "Started node at time {}, cycle {}, period {}, thread {}", + now.to_utc_string(), + next_slot.get_cycle(config.periods_per_cycle), + next_slot.period, + next_slot.thread, + ); + if config.genesis_timestamp > now { + let (days, hours, mins, secs) = config + .genesis_timestamp + .saturating_sub(now) + .days_hours_mins_secs()?; + info!( + "{} days, {} hours, {} minutes, {} seconds remaining to genesis", + days, hours, mins, secs, + ) + } + + // add genesis blocks to stats + let genesis_addr = Address::from_public_key(&config.genesis_key.get_public_key()); + let mut final_block_stats = VecDeque::new(); + for thread in 0..config.thread_count { + final_block_stats.push_back(( + get_block_slot_timestamp( + config.thread_count, + config.t0, + config.genesis_timestamp, + Slot::new(0, thread), + )?, + genesis_addr, + false, + )) + } + + // desync detection timespan + let stats_desync_detection_timespan = + config.t0.checked_mul(config.periods_per_cycle * 2)?; + + //TODO: Factorize this code to create graph worker only one time a lot of fields are redundant. + if let Some(BootstrapableGraph { final_blocks }) = init_graph { + // load final blocks + let final_blocks: Vec<(ActiveBlock, Storage)> = final_blocks + .into_iter() + .map(|export_b| export_b.to_active_block(&storage, config.thread_count)) + .collect::>()?; + + // compute latest_final_blocks_periods + let mut latest_final_blocks_periods: Vec<(BlockId, u64)> = + genesis_block_ids.iter().map(|id| (*id, 0u64)).collect(); + for (b, _) in &final_blocks { + if let Some(v) = latest_final_blocks_periods.get_mut(b.slot.thread as usize) { + if b.slot.period > v.1 { + *v = (b.block_id, b.slot.period); + } + } + } + + // generate graph + let mut res_graph = GraphWorker { + config: config.clone(), + command_receiver, + channels, + shared_state, + previous_slot, + next_slot, + next_instant, + wishlist: Default::default(), + final_block_stats, + protocol_blocks: Default::default(), + stale_block_stats: VecDeque::new(), + stats_desync_detection_timespan, + stats_history_timespan: std::cmp::max( + stats_desync_detection_timespan, + config.stats_timespan, + ), + launch_time: MassaTime::now(config.clock_compensation_millis)?, + sequence_counter: 0, + genesis_hashes: genesis_block_ids, + active_index: final_blocks.iter().map(|(b, _)| b.block_id).collect(), + incoming_index: Default::default(), + waiting_for_slot_index: Default::default(), + waiting_for_dependencies_index: Default::default(), + discarded_index: Default::default(), + best_parents: latest_final_blocks_periods.clone(), + latest_final_blocks_periods, + gi_head: Default::default(), + max_cliques: vec![Clique { + block_ids: PreHashSet::::default(), + fitness: 0, + is_blockclique: true, + }], + to_propagate: Default::default(), + attack_attempts: Default::default(), + new_final_blocks: Default::default(), + new_stale_blocks: Default::default(), + storage, + block_statuses: final_blocks + .into_iter() + .map(|(b, s)| { + Ok(( + b.block_id, + BlockStatus::Active { + a_block: Box::new(b), + storage: s, + }, + )) + }) + .collect::>()?, + }; + + // claim parent refs + for (_b_id, block_status) in res_graph.block_statuses.iter_mut() { + if let BlockStatus::Active { + a_block, + storage: block_storage, + } = block_status + { + // claim parent refs + let n_claimed_parents = block_storage + .claim_block_refs(&a_block.parents.iter().map(|(p_id, _)| *p_id).collect()) + .len(); + + if !a_block.is_final { + // note: parents of final blocks will be missing, that's ok, but it shouldn't be the case for non-finals + if n_claimed_parents != config.thread_count as usize { + return Err(GraphError::MissingBlock( + "block storage could not claim refs to all parent blocks".into(), + )); + } + } + } + } + + // list active block parents + let active_blocks_map: PreHashMap)> = res_graph + .block_statuses + .iter() + .filter_map(|(h, s)| { + if let BlockStatus::Active { a_block: a, .. } = s { + return Some((*h, (a.slot, a.parents.iter().map(|(ph, _)| *ph).collect()))); + } + None + }) + .collect(); + // deduce children and descendants + for (b_id, (b_slot, b_parents)) in active_blocks_map.into_iter() { + // deduce children + for parent_id in &b_parents { + if let Some(BlockStatus::Active { + a_block: parent, .. + }) = res_graph.block_statuses.get_mut(parent_id) + { + parent.children[b_slot.thread as usize].insert(b_id, b_slot.period); + } + } + + // deduce descendants + let mut ancestors: VecDeque = b_parents.into_iter().collect(); + let mut visited: PreHashSet = Default::default(); + while let Some(ancestor_h) = ancestors.pop_back() { + if !visited.insert(ancestor_h) { + continue; + } + if let Some(BlockStatus::Active { a_block: ab, .. }) = + res_graph.block_statuses.get_mut(&ancestor_h) + { + ab.descendants.insert(b_id); + for (ancestor_parent_h, _) in ab.parents.iter() { + ancestors.push_front(*ancestor_parent_h); + } + } + } + } + Ok(res_graph) + } else { + Ok(GraphWorker { + config: config.clone(), + command_receiver, + channels, + shared_state, + previous_slot, + next_slot, + next_instant, + wishlist: Default::default(), + final_block_stats, + protocol_blocks: Default::default(), + stale_block_stats: VecDeque::new(), + stats_desync_detection_timespan, + stats_history_timespan: std::cmp::max( + stats_desync_detection_timespan, + config.stats_timespan, + ), + launch_time: MassaTime::now(config.clock_compensation_millis)?, + sequence_counter: 0, + block_statuses, + incoming_index: Default::default(), + waiting_for_slot_index: Default::default(), + waiting_for_dependencies_index: Default::default(), + active_index: genesis_block_ids.iter().copied().collect(), + discarded_index: Default::default(), + latest_final_blocks_periods: genesis_block_ids.iter().map(|h| (*h, 0)).collect(), + best_parents: genesis_block_ids.iter().map(|v| (*v, 0)).collect(), + genesis_hashes: genesis_block_ids, + gi_head: PreHashMap::default(), + max_cliques: vec![Clique { + block_ids: PreHashSet::::default(), + fitness: 0, + is_blockclique: true, + }], + to_propagate: Default::default(), + attack_attempts: Default::default(), + new_final_blocks: Default::default(), + new_stale_blocks: Default::default(), + storage, + }) + + //TODO: Add notify execution + } + } +} diff --git a/massa-graph-2-worker/src/worker.rs b/massa-graph-2-worker/src/worker/main_loop.rs similarity index 61% rename from massa-graph-2-worker/src/worker.rs rename to massa-graph-2-worker/src/worker/main_loop.rs index 8b1531aba32..ad32fa66d62 100644 --- a/massa-graph-2-worker/src/worker.rs +++ b/massa-graph-2-worker/src/worker/main_loop.rs @@ -1,26 +1,16 @@ +use std::{sync::mpsc, time::Instant}; + use massa_graph::error::GraphResult; -use massa_graph_2_exports::{ - GraphChannels, GraphConfig, GraphController, GraphManager, GraphState, +use massa_models::{ + slot::Slot, + timeslots::{get_block_slot_timestamp, get_closest_slot_to_timestamp}, }; -use massa_models::slot::Slot; -use massa_models::timeslots::{get_block_slot_timestamp, get_closest_slot_to_timestamp}; use massa_time::MassaTime; -use parking_lot::RwLock; -use std::sync::{mpsc, Arc}; -use std::thread; -use std::time::Instant; use tracing::log::warn; use crate::commands::GraphCommand; -use crate::controller::GraphControllerImpl; -use crate::manager::GraphManagerImpl; -pub struct GraphWorker { - command_receiver: mpsc::Receiver, - config: GraphConfig, - channels: GraphChannels, - shared_state: Arc>, -} +use super::GraphWorker; enum WaitingStatus { Ended, @@ -80,11 +70,6 @@ impl GraphWorker { now, ); - // ignore genesis - if next_slot.period == 0 { - next_slot.period = 1; - } - // protection against double-production on unexpected system clock adjustment if let Some(prev_slot) = previous_slot { if next_slot <= prev_slot { @@ -108,29 +93,12 @@ impl GraphWorker { (next_slot, next_instant) } - fn new( - command_receiver: mpsc::Receiver, - config: GraphConfig, - channels: GraphChannels, - shared_state: Arc>, - ) -> Self { - Self { - command_receiver, - config, - channels, - shared_state, - } - } - - fn run(&mut self) { - // TODO: Should we start from slot of final state after bootstrap ? - let prev_slot: Option = None; - let (mut next_slot, mut next_instant) = self.get_next_slot(prev_slot); + pub fn run(&mut self) { loop { - match self.wait_slot_or_command(next_instant) { + match self.wait_slot_or_command(self.next_instant) { WaitingStatus::Ended => { //TODO: Desync, stats, block_db changed - (next_slot, next_instant) = self.get_next_slot(prev_slot); + (self.next_slot, self.next_instant) = self.get_next_slot(Some(self.next_slot)); } WaitingStatus::Disconnected => { break; @@ -142,29 +110,3 @@ impl GraphWorker { } } } - -pub fn start_graph_worker( - config: GraphConfig, - channels: GraphChannels, -) -> (Box, Box) { - let (tx, rx) = mpsc::sync_channel(10); - let shared_state = Arc::new(RwLock::new(GraphState {})); - - let shared_state_cloned = shared_state.clone(); - let thread_graph = thread::Builder::new() - .name("graph worker".into()) - .spawn(move || { - let mut graph_worker = GraphWorker::new(rx, config, channels, shared_state_cloned); - graph_worker.run() - }) - .expect("Can't spawn thread graph."); - - let manager = GraphManagerImpl { - thread_graph: Some(thread_graph), - graph_command_sender: tx.clone(), - }; - - let controller = GraphControllerImpl::new(tx, shared_state); - - (Box::new(controller), Box::new(manager)) -} diff --git a/massa-graph-2-worker/src/worker/mod.rs b/massa-graph-2-worker/src/worker/mod.rs new file mode 100644 index 00000000000..7f3beed5057 --- /dev/null +++ b/massa-graph-2-worker/src/worker/mod.rs @@ -0,0 +1,118 @@ +use massa_graph::BootstrapableGraph; +use massa_graph_2_exports::{ + GraphChannels, GraphConfig, GraphController, GraphManager, GraphState, +}; +use massa_models::address::Address; +use massa_models::block::{BlockId, WrappedHeader}; +use massa_models::clique::Clique; +use massa_models::prehash::{PreHashMap, PreHashSet}; +use massa_models::slot::Slot; +use massa_storage::Storage; +use massa_time::MassaTime; +use parking_lot::RwLock; +use std::collections::VecDeque; +use std::sync::{mpsc, Arc}; +use std::thread; +use std::time::Instant; + +use crate::block_status::BlockStatus; +use crate::commands::GraphCommand; +use crate::controller::GraphControllerImpl; +use crate::manager::GraphManagerImpl; + +pub struct GraphWorker { + command_receiver: mpsc::Receiver, + config: GraphConfig, + channels: GraphChannels, + shared_state: Arc>, + /// Previous slot. + previous_slot: Option, + /// Next slot + next_slot: Slot, + /// Next slot instant + next_instant: Instant, + /// blocks we want + wishlist: PreHashMap>, + /// Final block stats `(time, creator, is_from_protocol)` + final_block_stats: VecDeque<(MassaTime, Address, bool)>, + /// Blocks that come from protocol used for stats and ids are removed when inserted in `final_block_stats` + protocol_blocks: VecDeque<(MassaTime, BlockId)>, + /// Stale block timestamp + stale_block_stats: VecDeque, + /// the time span considered for stats + stats_history_timespan: MassaTime, + /// the time span considered for desynchronization detection + #[allow(dead_code)] + stats_desync_detection_timespan: MassaTime, + /// time at which the node was launched (used for desynchronization detection) + launch_time: MassaTime, + + /// Block ids of genesis blocks + genesis_hashes: Vec, + /// Used to limit the number of waiting and discarded blocks + sequence_counter: u64, + /// Every block we know about + block_statuses: PreHashMap, + /// Ids of incoming blocks/headers + incoming_index: PreHashSet, + /// ids of waiting for slot blocks/headers + waiting_for_slot_index: PreHashSet, + /// ids of waiting for dependencies blocks/headers + waiting_for_dependencies_index: PreHashSet, + /// ids of active blocks + active_index: PreHashSet, + /// ids of discarded blocks + discarded_index: PreHashSet, + /// One (block id, period) per thread + latest_final_blocks_periods: Vec<(BlockId, u64)>, + /// One `(block id, period)` per thread TODO not sure I understand the difference with `latest_final_blocks_periods` + best_parents: Vec<(BlockId, u64)>, + /// Incompatibility graph: maps a block id to the block ids it is incompatible with + /// One entry per Active Block + gi_head: PreHashMap>, + /// All the cliques + max_cliques: Vec, + /// Blocks that need to be propagated + to_propagate: PreHashMap, + /// List of block ids we think are attack attempts + attack_attempts: Vec, + /// Newly final blocks + new_final_blocks: PreHashSet, + /// Newly stale block mapped to creator and slot + new_stale_blocks: PreHashMap, + /// Shared storage, + storage: Storage, +} + +mod init; +mod main_loop; + +pub fn start_graph_worker( + config: GraphConfig, + channels: GraphChannels, + init_graph: Option, + storage: Storage, +) -> (Box, Box) { + let (tx, rx) = mpsc::sync_channel(10); + let shared_state = Arc::new(RwLock::new(GraphState {})); + + let shared_state_cloned = shared_state.clone(); + let thread_graph = thread::Builder::new() + .name("graph worker".into()) + .spawn(move || { + let mut graph_worker = + //TODO: Better error management + GraphWorker::new(rx, config, channels, shared_state_cloned, init_graph, storage).expect("Failed to initialize graph worker"); + graph_worker.run() + }) + .expect("Can't spawn thread graph."); + + let manager = GraphManagerImpl { + thread_graph: Some(thread_graph), + graph_command_sender: tx.clone(), + }; + + let controller = GraphControllerImpl::new(tx, shared_state); + + (Box::new(controller), Box::new(manager)) +} diff --git a/massa-graph/Cargo.toml b/massa-graph/Cargo.toml index dac2300d26f..2e3e154013d 100644 --- a/massa-graph/Cargo.toml +++ b/massa-graph/Cargo.toml @@ -23,4 +23,5 @@ massa_models = { path = "../massa-models" } massa_storage = { path = "../massa-storage" } massa_signature = { path = "../massa-signature" } massa_serialization = { path = "../massa-serialization"} +massa_time = { path = "../massa-time" } diff --git a/massa-graph/src/error.rs b/massa-graph/src/error.rs index 43822b0c09e..b01a53f4eca 100644 --- a/massa-graph/src/error.rs +++ b/massa-graph/src/error.rs @@ -2,6 +2,7 @@ use displaydoc::Display; use massa_execution_exports::ExecutionError; use massa_models::error::ModelsError; +use massa_time::TimeError; use std::array::TryFromSliceError; use thiserror::Error; @@ -39,6 +40,8 @@ pub enum GraphError { PosCycleUnavailable(String), /// Ledger error {0} LedgerError(#[from] LedgerError), + /// Massa time error {0} + MassaTimeError(#[from] TimeError), /// transaction error {0} TransactionError(String), } From f6eb3778cbc4fb2c15b88e260e68134178adf7a1 Mon Sep 17 00:00:00 2001 From: AurelienFT Date: Wed, 28 Sep 2022 13:58:57 +0200 Subject: [PATCH 07/40] Refactor init function. --- massa-graph-2-worker/src/worker/init.rs | 272 +++++++++++------------- 1 file changed, 122 insertions(+), 150 deletions(-) diff --git a/massa-graph-2-worker/src/worker/init.rs b/massa-graph-2-worker/src/worker/init.rs index 081cfe1d1e4..5f18e9ceb59 100644 --- a/massa-graph-2-worker/src/worker/init.rs +++ b/massa-graph-2-worker/src/worker/init.rs @@ -158,7 +158,47 @@ impl GraphWorker { let stats_desync_detection_timespan = config.t0.checked_mul(config.periods_per_cycle * 2)?; - //TODO: Factorize this code to create graph worker only one time a lot of fields are redundant. + let mut res_graph = GraphWorker { + config: config.clone(), + command_receiver, + channels, + shared_state, + previous_slot, + next_slot, + next_instant, + wishlist: Default::default(), + final_block_stats, + protocol_blocks: Default::default(), + stale_block_stats: VecDeque::new(), + stats_desync_detection_timespan, + stats_history_timespan: std::cmp::max( + stats_desync_detection_timespan, + config.stats_timespan, + ), + launch_time: MassaTime::now(config.clock_compensation_millis)?, + sequence_counter: 0, + block_statuses, + incoming_index: Default::default(), + waiting_for_slot_index: Default::default(), + waiting_for_dependencies_index: Default::default(), + active_index: genesis_block_ids.iter().copied().collect(), + discarded_index: Default::default(), + latest_final_blocks_periods: genesis_block_ids.iter().map(|h| (*h, 0)).collect(), + best_parents: genesis_block_ids.iter().map(|v| (*v, 0)).collect(), + genesis_hashes: genesis_block_ids.clone(), + gi_head: PreHashMap::default(), + max_cliques: vec![Clique { + block_ids: PreHashSet::::default(), + fitness: 0, + is_blockclique: true, + }], + to_propagate: Default::default(), + attack_attempts: Default::default(), + new_final_blocks: Default::default(), + new_stale_blocks: Default::default(), + storage: storage.clone(), + }; + if let Some(BootstrapableGraph { final_blocks }) = init_graph { // load final blocks let final_blocks: Vec<(ActiveBlock, Storage)> = final_blocks @@ -177,166 +217,98 @@ impl GraphWorker { } } - // generate graph - let mut res_graph = GraphWorker { - config: config.clone(), - command_receiver, - channels, - shared_state, - previous_slot, - next_slot, - next_instant, - wishlist: Default::default(), - final_block_stats, - protocol_blocks: Default::default(), - stale_block_stats: VecDeque::new(), - stats_desync_detection_timespan, - stats_history_timespan: std::cmp::max( - stats_desync_detection_timespan, - config.stats_timespan, - ), - launch_time: MassaTime::now(config.clock_compensation_millis)?, - sequence_counter: 0, - genesis_hashes: genesis_block_ids, - active_index: final_blocks.iter().map(|(b, _)| b.block_id).collect(), - incoming_index: Default::default(), - waiting_for_slot_index: Default::default(), - waiting_for_dependencies_index: Default::default(), - discarded_index: Default::default(), - best_parents: latest_final_blocks_periods.clone(), - latest_final_blocks_periods, - gi_head: Default::default(), - max_cliques: vec![Clique { - block_ids: PreHashSet::::default(), - fitness: 0, - is_blockclique: true, - }], - to_propagate: Default::default(), - attack_attempts: Default::default(), - new_final_blocks: Default::default(), - new_stale_blocks: Default::default(), - storage, - block_statuses: final_blocks - .into_iter() - .map(|(b, s)| { - Ok(( - b.block_id, - BlockStatus::Active { - a_block: Box::new(b), - storage: s, - }, - )) - }) - .collect::>()?, - }; + res_graph.active_index = final_blocks.iter().map(|(b, _)| b.block_id).collect(); + res_graph.best_parents = latest_final_blocks_periods.clone(); + res_graph.latest_final_blocks_periods = latest_final_blocks_periods; + res_graph.block_statuses = final_blocks + .into_iter() + .map(|(b, s)| { + Ok(( + b.block_id, + BlockStatus::Active { + a_block: Box::new(b), + storage: s, + }, + )) + }) + .collect::>()?; - // claim parent refs - for (_b_id, block_status) in res_graph.block_statuses.iter_mut() { - if let BlockStatus::Active { - a_block, - storage: block_storage, - } = block_status - { - // claim parent refs - let n_claimed_parents = block_storage - .claim_block_refs(&a_block.parents.iter().map(|(p_id, _)| *p_id).collect()) - .len(); + res_graph.claim_parent_refs()?; + } + Ok(res_graph) + //TODO: Add notify execution + } - if !a_block.is_final { - // note: parents of final blocks will be missing, that's ok, but it shouldn't be the case for non-finals - if n_claimed_parents != config.thread_count as usize { - return Err(GraphError::MissingBlock( - "block storage could not claim refs to all parent blocks".into(), - )); - } + fn claim_parent_refs(&mut self) -> GraphResult<()> { + for (_b_id, block_status) in self.block_statuses.iter_mut() { + if let BlockStatus::Active { + a_block, + storage: block_storage, + } = block_status + { + // claim parent refs + let n_claimed_parents = block_storage + .claim_block_refs(&a_block.parents.iter().map(|(p_id, _)| *p_id).collect()) + .len(); + + if !a_block.is_final { + // note: parents of final blocks will be missing, that's ok, but it shouldn't be the case for non-finals + if n_claimed_parents != self.config.thread_count as usize { + return Err(GraphError::MissingBlock( + "block storage could not claim refs to all parent blocks".into(), + )); } } } + } - // list active block parents - let active_blocks_map: PreHashMap)> = res_graph - .block_statuses - .iter() - .filter_map(|(h, s)| { - if let BlockStatus::Active { a_block: a, .. } = s { - return Some((*h, (a.slot, a.parents.iter().map(|(ph, _)| *ph).collect()))); - } - None - }) - .collect(); - // deduce children and descendants - for (b_id, (b_slot, b_parents)) in active_blocks_map.into_iter() { - // deduce children - for parent_id in &b_parents { - if let Some(BlockStatus::Active { - a_block: parent, .. - }) = res_graph.block_statuses.get_mut(parent_id) - { - parent.children[b_slot.thread as usize].insert(b_id, b_slot.period); - } + // list active block parents + let active_blocks_map: PreHashMap)> = self + .block_statuses + .iter() + .filter_map(|(h, s)| { + if let BlockStatus::Active { a_block: a, .. } = s { + return Some((*h, (a.slot, a.parents.iter().map(|(ph, _)| *ph).collect()))); } + None + }) + .collect(); - // deduce descendants - let mut ancestors: VecDeque = b_parents.into_iter().collect(); - let mut visited: PreHashSet = Default::default(); - while let Some(ancestor_h) = ancestors.pop_back() { - if !visited.insert(ancestor_h) { - continue; - } - if let Some(BlockStatus::Active { a_block: ab, .. }) = - res_graph.block_statuses.get_mut(&ancestor_h) - { - ab.descendants.insert(b_id); - for (ancestor_parent_h, _) in ab.parents.iter() { - ancestors.push_front(*ancestor_parent_h); - } - } + self.deduce_children_and_descendants(active_blocks_map); + Ok(()) + } + + fn deduce_children_and_descendants( + &mut self, + active_blocks_map: PreHashMap)>, + ) { + for (b_id, (b_slot, b_parents)) in active_blocks_map.into_iter() { + // deduce children + for parent_id in &b_parents { + if let Some(BlockStatus::Active { + a_block: parent, .. + }) = self.block_statuses.get_mut(parent_id) + { + parent.children[b_slot.thread as usize].insert(b_id, b_slot.period); } } - Ok(res_graph) - } else { - Ok(GraphWorker { - config: config.clone(), - command_receiver, - channels, - shared_state, - previous_slot, - next_slot, - next_instant, - wishlist: Default::default(), - final_block_stats, - protocol_blocks: Default::default(), - stale_block_stats: VecDeque::new(), - stats_desync_detection_timespan, - stats_history_timespan: std::cmp::max( - stats_desync_detection_timespan, - config.stats_timespan, - ), - launch_time: MassaTime::now(config.clock_compensation_millis)?, - sequence_counter: 0, - block_statuses, - incoming_index: Default::default(), - waiting_for_slot_index: Default::default(), - waiting_for_dependencies_index: Default::default(), - active_index: genesis_block_ids.iter().copied().collect(), - discarded_index: Default::default(), - latest_final_blocks_periods: genesis_block_ids.iter().map(|h| (*h, 0)).collect(), - best_parents: genesis_block_ids.iter().map(|v| (*v, 0)).collect(), - genesis_hashes: genesis_block_ids, - gi_head: PreHashMap::default(), - max_cliques: vec![Clique { - block_ids: PreHashSet::::default(), - fitness: 0, - is_blockclique: true, - }], - to_propagate: Default::default(), - attack_attempts: Default::default(), - new_final_blocks: Default::default(), - new_stale_blocks: Default::default(), - storage, - }) - //TODO: Add notify execution + // deduce descendants + let mut ancestors: VecDeque = b_parents.into_iter().collect(); + let mut visited: PreHashSet = Default::default(); + while let Some(ancestor_h) = ancestors.pop_back() { + if !visited.insert(ancestor_h) { + continue; + } + if let Some(BlockStatus::Active { a_block: ab, .. }) = + self.block_statuses.get_mut(&ancestor_h) + { + ab.descendants.insert(b_id); + for (ancestor_parent_h, _) in ab.parents.iter() { + ancestors.push_front(*ancestor_parent_h); + } + } + } } } } From b72fe505318a27c537975b9c1280bdaba701d416 Mon Sep 17 00:00:00 2001 From: AurelienFT Date: Wed, 28 Sep 2022 15:59:32 +0200 Subject: [PATCH 08/40] Add getter function and fill the state. --- .../src/block_graph_export.rs | 29 ++ .../src/block_status.rs | 4 +- massa-graph-2-exports/src/controller_trait.rs | 7 +- massa-graph-2-exports/src/lib.rs | 5 +- massa-graph-2-exports/src/state.rs | 2 - massa-graph-2-worker/src/block_graph/init.rs | 1 - massa-graph-2-worker/src/block_graph/mod.rs | 1 - massa-graph-2-worker/src/controller.rs | 69 +++- massa-graph-2-worker/src/lib.rs | 3 +- massa-graph-2-worker/src/state.rs | 346 ++++++++++++++++++ massa-graph-2-worker/src/worker/init.rs | 79 ++-- massa-graph-2-worker/src/worker/mod.rs | 35 +- 12 files changed, 498 insertions(+), 83 deletions(-) create mode 100644 massa-graph-2-exports/src/block_graph_export.rs rename {massa-graph-2-worker => massa-graph-2-exports}/src/block_status.rs (98%) delete mode 100644 massa-graph-2-exports/src/state.rs delete mode 100644 massa-graph-2-worker/src/block_graph/init.rs delete mode 100644 massa-graph-2-worker/src/block_graph/mod.rs create mode 100644 massa-graph-2-worker/src/state.rs diff --git a/massa-graph-2-exports/src/block_graph_export.rs b/massa-graph-2-exports/src/block_graph_export.rs new file mode 100644 index 00000000000..bd8f5d27069 --- /dev/null +++ b/massa-graph-2-exports/src/block_graph_export.rs @@ -0,0 +1,29 @@ +use massa_models::{ + address::Address, + block::BlockId, + clique::Clique, + prehash::{PreHashMap, PreHashSet}, + slot::Slot, +}; + +use crate::block_status::{DiscardReason, ExportCompiledBlock}; + +/// Bootstrap compatible version of the block graph +#[derive(Debug, Clone)] +#[allow(clippy::type_complexity)] +pub struct BlockGraphExport { + /// Genesis blocks. + pub genesis_blocks: Vec, + /// Map of active blocks, were blocks are in their exported version. + pub active_blocks: PreHashMap, + /// Finite cache of discarded blocks, in exported version `(slot, creator_address, parents)`. + pub discarded_blocks: PreHashMap))>, + /// Best parents hashes in each thread. + pub best_parents: Vec<(BlockId, u64)>, + /// Latest final period and block hash in each thread. + pub latest_final_blocks_periods: Vec<(BlockId, u64)>, + /// Head of the incompatibility graph. + pub gi_head: PreHashMap>, + /// List of maximal cliques of compatible blocks. + pub max_cliques: Vec, +} diff --git a/massa-graph-2-worker/src/block_status.rs b/massa-graph-2-exports/src/block_status.rs similarity index 98% rename from massa-graph-2-worker/src/block_status.rs rename to massa-graph-2-exports/src/block_status.rs index bd3b9deda94..2138a969489 100644 --- a/massa-graph-2-worker/src/block_status.rs +++ b/massa-graph-2-exports/src/block_status.rs @@ -10,7 +10,7 @@ use serde::{Deserialize, Serialize}; #[derive(Debug, Clone)] #[allow(clippy::large_enum_variant)] -pub(crate) enum HeaderOrBlock { +pub enum HeaderOrBlock { Header(WrappedHeader), Block { id: BlockId, @@ -42,7 +42,7 @@ pub enum DiscardReason { /// Enum used in `BlockGraph`'s state machine #[derive(Debug, Clone)] -pub(crate) enum BlockStatus { +pub enum BlockStatus { /// The block/header has reached consensus but no consensus-level check has been performed. /// It will be processed during the next iteration Incoming(HeaderOrBlock), diff --git a/massa-graph-2-exports/src/controller_trait.rs b/massa-graph-2-exports/src/controller_trait.rs index 1e9d306a14f..9265980ead9 100644 --- a/massa-graph-2-exports/src/controller_trait.rs +++ b/massa-graph-2-exports/src/controller_trait.rs @@ -1,4 +1,5 @@ -use massa_graph::{error::GraphResult, BlockGraphExport, BootstrapableGraph}; +use crate::block_graph_export::BlockGraphExport; +use massa_graph::{error::GraphResult, BootstrapableGraph}; use massa_models::{ api::BlockGraphStatus, block::{BlockHeader, BlockId}, @@ -17,7 +18,7 @@ pub trait GraphController: Send + Sync { end_slot: Option, ) -> GraphResult; - fn get_block_statuses(&self, ids: Vec) -> BlockGraphStatus; + fn get_block_statuses(&self, ids: Vec) -> Vec; fn get_cliques(&self) -> Vec; @@ -25,7 +26,7 @@ pub trait GraphController: Send + Sync { fn get_stats(&self) -> GraphResult; - fn get_best_parents(&self) -> &Vec<(BlockId, u64)>; + fn get_best_parents(&self) -> Vec<(BlockId, u64)>; fn get_blockclique_block_at_slot(&self, slot: Slot) -> Option; diff --git a/massa-graph-2-exports/src/lib.rs b/massa-graph-2-exports/src/lib.rs index 35bd9e70a02..b2f52826cde 100644 --- a/massa-graph-2-exports/src/lib.rs +++ b/massa-graph-2-exports/src/lib.rs @@ -4,9 +4,10 @@ mod channels; mod controller_trait; mod settings; -mod state; + +pub mod block_graph_export; +pub mod block_status; pub use channels::GraphChannels; pub use controller_trait::{GraphController, GraphManager}; pub use settings::GraphConfig; -pub use state::GraphState; diff --git a/massa-graph-2-exports/src/state.rs b/massa-graph-2-exports/src/state.rs deleted file mode 100644 index 23781ce8458..00000000000 --- a/massa-graph-2-exports/src/state.rs +++ /dev/null @@ -1,2 +0,0 @@ -#[derive(Default, Clone, Copy, Debug)] -pub struct GraphState {} diff --git a/massa-graph-2-worker/src/block_graph/init.rs b/massa-graph-2-worker/src/block_graph/init.rs deleted file mode 100644 index 8b137891791..00000000000 --- a/massa-graph-2-worker/src/block_graph/init.rs +++ /dev/null @@ -1 +0,0 @@ - diff --git a/massa-graph-2-worker/src/block_graph/mod.rs b/massa-graph-2-worker/src/block_graph/mod.rs deleted file mode 100644 index 6adc4f6e6b1..00000000000 --- a/massa-graph-2-worker/src/block_graph/mod.rs +++ /dev/null @@ -1 +0,0 @@ -mod init; diff --git a/massa-graph-2-worker/src/controller.rs b/massa-graph-2-worker/src/controller.rs index a8c6a729f32..a2c20002a67 100644 --- a/massa-graph-2-worker/src/controller.rs +++ b/massa-graph-2-worker/src/controller.rs @@ -1,9 +1,16 @@ -use massa_graph::{error::GraphResult, BlockGraphExport, BootstrapableGraph}; -use massa_graph_2_exports::{GraphController, GraphState}; +use massa_graph::{ + error::{GraphError, GraphResult}, + export_active_block::ExportActiveBlock, + BootstrapableGraph, +}; +use massa_graph_2_exports::{ + block_graph_export::BlockGraphExport, block_status::BlockStatus, GraphConfig, GraphController, +}; use massa_models::{ api::BlockGraphStatus, block::{BlockHeader, BlockId}, clique::Clique, + prehash::{CapacityAllocator, PreHashSet}, slot::Slot, stats::ConsensusStats, wrapped::Wrapped, @@ -12,7 +19,7 @@ use massa_storage::Storage; use parking_lot::RwLock; use std::sync::{mpsc::SyncSender, Arc}; -use crate::commands::GraphCommand; +use crate::{commands::GraphCommand, state::GraphState}; #[derive(Clone)] pub struct GraphControllerImpl { @@ -38,35 +45,73 @@ impl GraphController for GraphControllerImpl { start_slot: Option, end_slot: Option, ) -> GraphResult { - todo!() + self.shared_state + .read() + .extract_block_graph_part(start_slot, end_slot) } - fn get_block_statuses(&self, ids: Vec) -> BlockGraphStatus { - todo!() + fn get_block_statuses(&self, ids: Vec) -> Vec { + let read_shared_state = self.shared_state.read(); + ids.iter() + .map(|id| read_shared_state.get_block_status(id)) + .collect() } fn get_cliques(&self) -> Vec { - todo!() + self.shared_state.read().max_cliques.clone() } fn get_bootstrap_graph(&self) -> GraphResult { - todo!() + let read_shared_state = self.shared_state.read(); + let mut required_final_blocks: PreHashSet<_> = + read_shared_state.list_required_active_blocks()?; + required_final_blocks.retain(|b_id| { + if let Some(BlockStatus::Active { a_block, .. }) = + read_shared_state.block_statuses.get(b_id) + { + if a_block.is_final { + // filter only final actives + return true; + } + } + false + }); + let mut final_blocks: Vec = + Vec::with_capacity(required_final_blocks.len()); + for b_id in &required_final_blocks { + if let Some(BlockStatus::Active { a_block, storage }) = + read_shared_state.block_statuses.get(b_id) + { + final_blocks.push(ExportActiveBlock::from_active_block(a_block, storage)); + } else { + return Err(GraphError::ContainerInconsistency(format!( + "block {} was expected to be active but wasn't on bootstrap graph export", + b_id + ))); + } + } + + Ok(BootstrapableGraph { final_blocks }) } fn get_stats(&self) -> GraphResult { todo!() } - fn get_best_parents(&self) -> &Vec<(BlockId, u64)> { - todo!() + fn get_best_parents(&self) -> Vec<(BlockId, u64)> { + self.shared_state.read().best_parents.clone() } fn get_blockclique_block_at_slot(&self, slot: Slot) -> Option { - todo!() + self.shared_state + .read() + .get_blockclique_block_at_slot(&slot) } fn get_latest_blockclique_block_at_slot(&self, slot: Slot) -> BlockId { - todo!() + self.shared_state + .read() + .get_latest_blockclique_block_at_slot(&slot) } fn register_block(&self, block_id: BlockId, slot: Slot, block_storage: Storage) { diff --git a/massa-graph-2-worker/src/lib.rs b/massa-graph-2-worker/src/lib.rs index 308f42a074f..67e34577ed4 100644 --- a/massa-graph-2-worker/src/lib.rs +++ b/massa-graph-2-worker/src/lib.rs @@ -1,8 +1,7 @@ #![feature(deadline_api)] -mod block_graph; -mod block_status; mod commands; mod controller; mod manager; +mod state; mod worker; diff --git a/massa-graph-2-worker/src/state.rs b/massa-graph-2-worker/src/state.rs new file mode 100644 index 00000000000..8fa209e5826 --- /dev/null +++ b/massa-graph-2-worker/src/state.rs @@ -0,0 +1,346 @@ +use massa_graph::error::{GraphError, GraphResult}; +use massa_graph_2_exports::{ + block_graph_export::BlockGraphExport, + block_status::{BlockStatus, ExportCompiledBlock}, + GraphConfig, +}; +use massa_models::{ + active_block::ActiveBlock, + api::BlockGraphStatus, + block::BlockId, + clique::Clique, + prehash::{CapacityAllocator, PreHashMap, PreHashSet}, + slot::Slot, +}; +use massa_storage::Storage; + +#[derive(Clone, Debug)] +pub struct GraphState { + /// Configuration + pub config: GraphConfig, + /// Storage + pub storage: Storage, + /// Block ids of genesis blocks + pub genesis_hashes: Vec, + /// Incompatibility graph: maps a block id to the block ids it is incompatible with + /// One entry per Active Block + pub gi_head: PreHashMap>, + /// All the cliques + pub max_cliques: Vec, + /// ids of active blocks + pub active_index: PreHashSet, + /// One (block id, period) per thread + pub latest_final_blocks_periods: Vec<(BlockId, u64)>, + /// One `(block id, period)` per thread TODO not sure I understand the difference with `latest_final_blocks_periods` + pub best_parents: Vec<(BlockId, u64)>, + /// Every block we know about + pub block_statuses: PreHashMap, +} + +impl GraphState { + fn get_full_active_block(&self, block_id: &BlockId) -> Option<(&ActiveBlock, &Storage)> { + match self.block_statuses.get(&block_id) { + Some(BlockStatus::Active { a_block, storage }) => Some((a_block.as_ref(), storage)), + _ => None, + } + } + + /// get the blockclique (or final) block ID at a given slot, if any + pub(crate) fn get_blockclique_block_at_slot(&self, slot: &Slot) -> Option { + // List all blocks at this slot. + // The list should be small: make a copy of it to avoid holding the storage lock. + let blocks_at_slot = { + let storage_read = self.storage.read_blocks(); + let returned = match storage_read.get_blocks_by_slot(slot) { + Some(v) => v.clone(), + None => return None, + }; + returned + }; + + // search for the block in the blockclique + let search_in_blockclique = blocks_at_slot + .intersection( + &self + .max_cliques + .iter() + .find(|c| c.is_blockclique) + .expect("expected one clique to be the blockclique") + .block_ids, + ) + .next(); + if let Some(found_id) = search_in_blockclique { + return Some(*found_id); + } + + // block not found in the blockclique: search in the final blocks + blocks_at_slot + .into_iter() + .find(|b_id| match self.block_statuses.get(b_id) { + Some(BlockStatus::Active { a_block, .. }) => a_block.is_final, + _ => false, + }) + } + + /// get the latest blockclique (or final) block ID at a given slot, if any + pub(crate) fn get_latest_blockclique_block_at_slot(&self, slot: &Slot) -> BlockId { + let (mut best_block_id, mut best_block_period) = self + .latest_final_blocks_periods + .get(slot.thread as usize) + .unwrap_or_else(|| panic!("unexpected not found latest final block period")); + + self.max_cliques + .iter() + .find(|c| c.is_blockclique) + .expect("expected one clique to be the blockclique") + .block_ids + .iter() + .for_each(|id| match self.block_statuses.get(id) { + Some(BlockStatus::Active { + a_block, + storage: _, + }) => { + if a_block.is_final { + panic!( + "unexpected final block on getting latest blockclique block at slot" + ); + } + if a_block.slot.thread == slot.thread + && a_block.slot.period < slot.period + && a_block.slot.period > best_block_period + { + best_block_period = a_block.slot.period; + best_block_id = *id; + } + } + _ => { + panic!("expected to find only active block but found another status") + } + }); + best_block_id + } + + pub(crate) fn get_block_status(&self, block_id: &BlockId) -> BlockGraphStatus { + match self.block_statuses.get(block_id) { + None => BlockGraphStatus::NotFound, + Some(BlockStatus::Active { a_block, .. }) => { + if a_block.is_final { + BlockGraphStatus::Final + } else if self + .max_cliques + .iter() + .find(|clique| clique.is_blockclique) + .expect("blockclique absent") + .block_ids + .contains(block_id) + { + BlockGraphStatus::ActiveInBlockclique + } else { + BlockGraphStatus::ActiveInAlternativeCliques + } + } + Some(BlockStatus::Discarded { .. }) => BlockGraphStatus::Discarded, + Some(BlockStatus::Incoming(_)) => BlockGraphStatus::Incoming, + Some(BlockStatus::WaitingForDependencies { .. }) => { + BlockGraphStatus::WaitingForDependencies + } + Some(BlockStatus::WaitingForSlot(_)) => BlockGraphStatus::WaitingForSlot, + } + } + pub(crate) fn list_required_active_blocks(&self) -> GraphResult> { + // list all active blocks + let mut retain_active: PreHashSet = + PreHashSet::::with_capacity(self.active_index.len()); + + let latest_final_blocks: Vec = self + .latest_final_blocks_periods + .iter() + .map(|(hash, _)| *hash) + .collect(); + + // retain all non-final active blocks, + // the current "best parents", + // and the dependencies for both. + for block_id in self.active_index.iter() { + if let Some(BlockStatus::Active { + a_block: active_block, + .. + }) = self.block_statuses.get(block_id) + { + if !active_block.is_final + || self.best_parents.iter().any(|(b, _p)| b == block_id) + || latest_final_blocks.contains(block_id) + { + retain_active.extend(active_block.parents.iter().map(|(p, _)| *p)); + retain_active.insert(*block_id); + } + } + } + + // retain best parents + retain_active.extend(self.best_parents.iter().map(|(b, _p)| *b)); + + // retain last final blocks + retain_active.extend(self.latest_final_blocks_periods.iter().map(|(h, _)| *h)); + + for (thread, id) in latest_final_blocks.iter().enumerate() { + let mut current_block_id = *id; + while let Some((current_block, _)) = self.get_full_active_block(¤t_block_id) { + let parent_id = { + if !current_block.parents.is_empty() { + Some(current_block.parents[thread as usize].0) + } else { + None + } + }; + + // retain block + retain_active.insert(current_block_id); + + // stop traversing when reaching a block with period number low enough + // so that any of its operations will have their validity period expired at the latest final block in thread + // note: one more is kept because of the way we iterate + if current_block.slot.period + < self.latest_final_blocks_periods[thread] + .1 + .saturating_sub(self.config.operation_validity_periods) + { + break; + } + + // if not genesis, traverse parent + match parent_id { + Some(p_id) => current_block_id = p_id, + None => break, + } + } + } + + // grow with parents & fill thread holes twice + for _ in 0..2 { + // retain the parents of the selected blocks + let retain_clone = retain_active.clone(); + + for retain_h in retain_clone.into_iter() { + retain_active.extend( + self.get_full_active_block(&retain_h) + .ok_or_else(|| GraphError::ContainerInconsistency(format!("inconsistency inside block statuses pruning and retaining the parents of the selected blocks - {} is missing", retain_h)))? + .0.parents + .iter() + .map(|(b_id, _p)| *b_id), + ) + } + + // find earliest kept slots in each thread + let mut earliest_retained_periods: Vec = self + .latest_final_blocks_periods + .iter() + .map(|(_, p)| *p) + .collect(); + for retain_h in retain_active.iter() { + let retain_slot = &self + .get_full_active_block(retain_h) + .ok_or_else(|| GraphError::ContainerInconsistency(format!("inconsistency inside block statuses pruning and finding earliest kept slots in each thread - {} is missing", retain_h)))? + .0.slot; + earliest_retained_periods[retain_slot.thread as usize] = std::cmp::min( + earliest_retained_periods[retain_slot.thread as usize], + retain_slot.period, + ); + } + + // fill up from the latest final block back to the earliest for each thread + for thread in 0..self.config.thread_count { + let mut cursor = self.latest_final_blocks_periods[thread as usize].0; // hash of tha latest final in that thread + while let Some((c_block, _)) = self.get_full_active_block(&cursor) { + if c_block.slot.period < earliest_retained_periods[thread as usize] { + break; + } + retain_active.insert(cursor); + if c_block.parents.is_empty() { + // genesis + break; + } + cursor = c_block.parents[thread as usize].0; + } + } + } + + Ok(retain_active) + } + + pub fn extract_block_graph_part( + &self, + slot_start: Option, + slot_end: Option, + ) -> GraphResult { + let mut export = BlockGraphExport { + genesis_blocks: self.genesis_hashes.clone(), + active_blocks: PreHashMap::with_capacity(self.block_statuses.len()), + discarded_blocks: PreHashMap::with_capacity(self.block_statuses.len()), + best_parents: self.best_parents.clone(), + latest_final_blocks_periods: self.latest_final_blocks_periods.clone(), + gi_head: self.gi_head.clone(), + max_cliques: self.max_cliques.clone(), + }; + + let filter = |&s| { + if let Some(s_start) = slot_start { + if s < s_start { + return false; + } + } + if let Some(s_end) = slot_end { + if s >= s_end { + return false; + } + } + true + }; + + for (hash, block) in self.block_statuses.iter() { + match block { + BlockStatus::Discarded { + slot, + creator, + parents, + reason, + .. + } => { + if filter(slot) { + export + .discarded_blocks + .insert(*hash, (reason.clone(), (*slot, *creator, parents.clone()))); + } + } + BlockStatus::Active { a_block, storage } => { + if filter(&a_block.slot) { + let stored_block = + storage.read_blocks().get(hash).cloned().ok_or_else(|| { + GraphError::MissingBlock(format!( + "missing block in BlockGraphExport::extract_from: {}", + hash + )) + })?; + export.active_blocks.insert( + *hash, + ExportCompiledBlock { + header: stored_block.content.header, + children: a_block + .children + .iter() + .map(|thread| { + thread.keys().copied().collect::>() + }) + .collect(), + is_final: a_block.is_final, + }, + ); + } + } + _ => continue, + } + } + + Ok(export) + } +} diff --git a/massa-graph-2-worker/src/worker/init.rs b/massa-graph-2-worker/src/worker/init.rs index 5f18e9ceb59..25d14066ba8 100644 --- a/massa-graph-2-worker/src/worker/init.rs +++ b/massa-graph-2-worker/src/worker/init.rs @@ -7,7 +7,7 @@ use massa_graph::{ error::{GraphError, GraphResult}, BootstrapableGraph, }; -use massa_graph_2_exports::{GraphChannels, GraphConfig, GraphState}; +use massa_graph_2_exports::{block_status::BlockStatus, GraphChannels, GraphConfig}; use massa_hash::Hash; use massa_models::{ active_block::ActiveBlock, @@ -24,7 +24,7 @@ use massa_time::MassaTime; use parking_lot::RwLock; use tracing::log::info; -use crate::{block_status::BlockStatus, commands::GraphCommand}; +use crate::{commands::GraphCommand, state::GraphState}; use super::GraphWorker; @@ -177,21 +177,10 @@ impl GraphWorker { ), launch_time: MassaTime::now(config.clock_compensation_millis)?, sequence_counter: 0, - block_statuses, incoming_index: Default::default(), waiting_for_slot_index: Default::default(), waiting_for_dependencies_index: Default::default(), - active_index: genesis_block_ids.iter().copied().collect(), discarded_index: Default::default(), - latest_final_blocks_periods: genesis_block_ids.iter().map(|h| (*h, 0)).collect(), - best_parents: genesis_block_ids.iter().map(|v| (*v, 0)).collect(), - genesis_hashes: genesis_block_ids.clone(), - gi_head: PreHashMap::default(), - max_cliques: vec![Clique { - block_ids: PreHashSet::::default(), - fitness: 0, - is_blockclique: true, - }], to_propagate: Default::default(), attack_attempts: Default::default(), new_final_blocks: Default::default(), @@ -217,30 +206,47 @@ impl GraphWorker { } } - res_graph.active_index = final_blocks.iter().map(|(b, _)| b.block_id).collect(); - res_graph.best_parents = latest_final_blocks_periods.clone(); - res_graph.latest_final_blocks_periods = latest_final_blocks_periods; - res_graph.block_statuses = final_blocks - .into_iter() - .map(|(b, s)| { - Ok(( - b.block_id, - BlockStatus::Active { - a_block: Box::new(b), - storage: s, - }, - )) - }) - .collect::>()?; + { + let mut write_shared_state = res_graph.shared_state.write(); + write_shared_state.genesis_hashes = genesis_block_ids; + write_shared_state.active_index = + final_blocks.iter().map(|(b, _)| b.block_id).collect(); + write_shared_state.best_parents = latest_final_blocks_periods.clone(); + write_shared_state.latest_final_blocks_periods = latest_final_blocks_periods; + write_shared_state.block_statuses = final_blocks + .into_iter() + .map(|(b, s)| { + Ok(( + b.block_id, + BlockStatus::Active { + a_block: Box::new(b), + storage: s, + }, + )) + }) + .collect::>()?; + } res_graph.claim_parent_refs()?; + } else { + { + let mut write_shared_state = res_graph.shared_state.write(); + write_shared_state.active_index = genesis_block_ids.iter().copied().collect(); + write_shared_state.latest_final_blocks_periods = + genesis_block_ids.iter().map(|h| (*h, 0)).collect(); + write_shared_state.best_parents = + genesis_block_ids.iter().map(|v| (*v, 0)).collect(); + write_shared_state.genesis_hashes = genesis_block_ids; + write_shared_state.block_statuses = block_statuses; + } } Ok(res_graph) //TODO: Add notify execution } fn claim_parent_refs(&mut self) -> GraphResult<()> { - for (_b_id, block_status) in self.block_statuses.iter_mut() { + let mut write_shared_state = self.shared_state.write(); + for (_b_id, block_status) in write_shared_state.block_statuses.iter_mut() { if let BlockStatus::Active { a_block, storage: block_storage, @@ -263,7 +269,7 @@ impl GraphWorker { } // list active block parents - let active_blocks_map: PreHashMap)> = self + let active_blocks_map: PreHashMap)> = write_shared_state .block_statuses .iter() .filter_map(|(h, s)| { @@ -274,20 +280,12 @@ impl GraphWorker { }) .collect(); - self.deduce_children_and_descendants(active_blocks_map); - Ok(()) - } - - fn deduce_children_and_descendants( - &mut self, - active_blocks_map: PreHashMap)>, - ) { for (b_id, (b_slot, b_parents)) in active_blocks_map.into_iter() { // deduce children for parent_id in &b_parents { if let Some(BlockStatus::Active { a_block: parent, .. - }) = self.block_statuses.get_mut(parent_id) + }) = write_shared_state.block_statuses.get_mut(parent_id) { parent.children[b_slot.thread as usize].insert(b_id, b_slot.period); } @@ -301,7 +299,7 @@ impl GraphWorker { continue; } if let Some(BlockStatus::Active { a_block: ab, .. }) = - self.block_statuses.get_mut(&ancestor_h) + write_shared_state.block_statuses.get_mut(&ancestor_h) { ab.descendants.insert(b_id); for (ancestor_parent_h, _) in ab.parents.iter() { @@ -310,5 +308,6 @@ impl GraphWorker { } } } + Ok(()) } } diff --git a/massa-graph-2-worker/src/worker/mod.rs b/massa-graph-2-worker/src/worker/mod.rs index 7f3beed5057..22ac6f383c0 100644 --- a/massa-graph-2-worker/src/worker/mod.rs +++ b/massa-graph-2-worker/src/worker/mod.rs @@ -1,6 +1,6 @@ use massa_graph::BootstrapableGraph; use massa_graph_2_exports::{ - GraphChannels, GraphConfig, GraphController, GraphManager, GraphState, + block_status::BlockStatus, GraphChannels, GraphConfig, GraphController, GraphManager, }; use massa_models::address::Address; use massa_models::block::{BlockId, WrappedHeader}; @@ -15,10 +15,10 @@ use std::sync::{mpsc, Arc}; use std::thread; use std::time::Instant; -use crate::block_status::BlockStatus; use crate::commands::GraphCommand; use crate::controller::GraphControllerImpl; use crate::manager::GraphManagerImpl; +use crate::state::GraphState; pub struct GraphWorker { command_receiver: mpsc::Receiver, @@ -47,31 +47,16 @@ pub struct GraphWorker { /// time at which the node was launched (used for desynchronization detection) launch_time: MassaTime, - /// Block ids of genesis blocks - genesis_hashes: Vec, /// Used to limit the number of waiting and discarded blocks sequence_counter: u64, - /// Every block we know about - block_statuses: PreHashMap, /// Ids of incoming blocks/headers incoming_index: PreHashSet, /// ids of waiting for slot blocks/headers waiting_for_slot_index: PreHashSet, /// ids of waiting for dependencies blocks/headers waiting_for_dependencies_index: PreHashSet, - /// ids of active blocks - active_index: PreHashSet, /// ids of discarded blocks discarded_index: PreHashSet, - /// One (block id, period) per thread - latest_final_blocks_periods: Vec<(BlockId, u64)>, - /// One `(block id, period)` per thread TODO not sure I understand the difference with `latest_final_blocks_periods` - best_parents: Vec<(BlockId, u64)>, - /// Incompatibility graph: maps a block id to the block ids it is incompatible with - /// One entry per Active Block - gi_head: PreHashMap>, - /// All the cliques - max_cliques: Vec, /// Blocks that need to be propagated to_propagate: PreHashMap, /// List of block ids we think are attack attempts @@ -94,7 +79,21 @@ pub fn start_graph_worker( storage: Storage, ) -> (Box, Box) { let (tx, rx) = mpsc::sync_channel(10); - let shared_state = Arc::new(RwLock::new(GraphState {})); + let shared_state = Arc::new(RwLock::new(GraphState { + storage: storage.clone(), + config: config.clone(), + max_cliques: vec![Clique { + block_ids: PreHashSet::::default(), + fitness: 0, + is_blockclique: true, + }], + active_index: Default::default(), + latest_final_blocks_periods: Default::default(), + best_parents: Default::default(), + block_statuses: Default::default(), + genesis_hashes: Default::default(), + gi_head: Default::default(), + })); let shared_state_cloned = shared_state.clone(); let thread_graph = thread::Builder::new() From 3b4c9905c7b8e0a6975c7d397991cb97a9343f87 Mon Sep 17 00:00:00 2001 From: AurelienFT Date: Thu, 29 Sep 2022 10:17:04 +0200 Subject: [PATCH 09/40] Add commands manager. --- massa-graph-2-worker/src/commands.rs | 1 + massa-graph-2-worker/src/controller.rs | 14 ++++-- massa-graph-2-worker/src/worker/mod.rs | 1 + .../src/worker/process_commands.rs | 50 +++++++++++++++++++ 4 files changed, 63 insertions(+), 3 deletions(-) create mode 100644 massa-graph-2-worker/src/worker/process_commands.rs diff --git a/massa-graph-2-worker/src/commands.rs b/massa-graph-2-worker/src/commands.rs index f59bad59893..584f4b553d3 100644 --- a/massa-graph-2-worker/src/commands.rs +++ b/massa-graph-2-worker/src/commands.rs @@ -9,5 +9,6 @@ use massa_storage::Storage; pub enum GraphCommand { RegisterBlock(BlockId, Slot, Storage), RegisterBlockHeader(BlockId, Wrapped), + MarkInvalidBlock(BlockId, Wrapped), Stop, } diff --git a/massa-graph-2-worker/src/controller.rs b/massa-graph-2-worker/src/controller.rs index a2c20002a67..dab2ea3b5e0 100644 --- a/massa-graph-2-worker/src/controller.rs +++ b/massa-graph-2-worker/src/controller.rs @@ -115,14 +115,22 @@ impl GraphController for GraphControllerImpl { } fn register_block(&self, block_id: BlockId, slot: Slot, block_storage: Storage) { - todo!() + let _ = self.command_sender.try_send(GraphCommand::RegisterBlock( + block_id, + slot, + block_storage, + )); } fn register_block_header(&self, block_id: BlockId, header: Wrapped) { - todo!() + let _ = self + .command_sender + .try_send(GraphCommand::RegisterBlockHeader(block_id, header)); } fn mark_invalid_block(&self, block_id: BlockId, header: Wrapped) { - todo!() + let _ = self + .command_sender + .try_send(GraphCommand::MarkInvalidBlock(block_id, header)); } } diff --git a/massa-graph-2-worker/src/worker/mod.rs b/massa-graph-2-worker/src/worker/mod.rs index 22ac6f383c0..2fffac75290 100644 --- a/massa-graph-2-worker/src/worker/mod.rs +++ b/massa-graph-2-worker/src/worker/mod.rs @@ -71,6 +71,7 @@ pub struct GraphWorker { mod init; mod main_loop; +mod process_commands; pub fn start_graph_worker( config: GraphConfig, diff --git a/massa-graph-2-worker/src/worker/process_commands.rs b/massa-graph-2-worker/src/worker/process_commands.rs new file mode 100644 index 00000000000..77047df1c8a --- /dev/null +++ b/massa-graph-2-worker/src/worker/process_commands.rs @@ -0,0 +1,50 @@ +use massa_graph::error::GraphResult; + +use super::GraphWorker; + +impl GraphWorker { + // pub fn incoming_header( + // &mut self, + // block_id: BlockId, + // header: WrappedHeader, + // current_slot: Option, + // ) -> GraphResult<()> { + // // ignore genesis blocks + // if self.genesis_hashes.contains(&block_id) { + // return Ok(()); + // } + + // debug!( + // "received header {} for slot {}", + // block_id, header.content.slot + // ); + // massa_trace!("consensus.block_graph.incoming_header", {"block_id": block_id, "header": header}); + // let mut to_ack: BTreeSet<(Slot, BlockId)> = BTreeSet::new(); + // match self.block_statuses.entry(block_id) { + // // if absent => add as Incoming, call rec_ack on it + // hash_map::Entry::Vacant(vac) => { + // to_ack.insert((header.content.slot, block_id)); + // vac.insert(BlockStatus::Incoming(HeaderOrBlock::Header(header))); + // self.incoming_index.insert(block_id); + // } + // hash_map::Entry::Occupied(mut occ) => match occ.get_mut() { + // BlockStatus::Discarded { + // sequence_number, .. + // } => { + // // promote if discarded + // *sequence_number = BlockGraph::new_sequence_number(&mut self.sequence_counter); + // } + // BlockStatus::WaitingForDependencies { .. } => { + // // promote in dependencies + // self.promote_dep_tree(block_id)?; + // } + // _ => {} + // }, + // } + + // // process + // self.rec_process(to_ack, current_slot)?; + + // Ok(()) + // } +} From 4da39722159f5759b4ff4702a329a23f52ec88b2 Mon Sep 17 00:00:00 2001 From: AurelienFT Date: Thu, 29 Sep 2022 17:22:46 +0200 Subject: [PATCH 10/40] Add lock for state. --- Cargo.lock | 4 + massa-graph-2-exports/Cargo.toml | 1 + massa-graph-2-exports/src/channels.rs | 5 +- massa-graph-2-worker/Cargo.toml | 5 +- massa-graph-2-worker/src/controller.rs | 4 +- massa-graph-2-worker/src/lib.rs | 1 + massa-graph-2-worker/src/state.rs | 2 +- massa-graph-2-worker/src/worker/graph.rs | 974 ++++++++++++++++++ massa-graph-2-worker/src/worker/main_loop.rs | 1 + massa-graph-2-worker/src/worker/mod.rs | 2 + .../src/worker/process_commands.rs | 94 +- .../src/worker/verifications.rs | 393 +++++++ 12 files changed, 1439 insertions(+), 47 deletions(-) create mode 100644 massa-graph-2-worker/src/worker/graph.rs create mode 100644 massa-graph-2-worker/src/worker/verifications.rs diff --git a/Cargo.lock b/Cargo.lock index cdb1563533b..7084d163b6a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1925,6 +1925,7 @@ dependencies = [ "displaydoc", "massa_graph", "massa_models", + "massa_pos_exports", "massa_signature", "massa_storage", "massa_time", @@ -1940,9 +1941,12 @@ dependencies = [ "massa_graph", "massa_graph_2_exports", "massa_hash", + "massa_logging", "massa_models", + "massa_signature", "massa_storage", "massa_time", + "num", "parking_lot", "serde 1.0.144", "serde_json", diff --git a/massa-graph-2-exports/Cargo.toml b/massa-graph-2-exports/Cargo.toml index 3222e553946..6c5db29c044 100644 --- a/massa-graph-2-exports/Cargo.toml +++ b/massa-graph-2-exports/Cargo.toml @@ -13,6 +13,7 @@ serde_json = "1.0" #custom modules massa_graph = { path = "../massa-graph" } massa_models = { path = "../massa-models" } +massa_pos_exports = { path = "../massa-pos-exports" } massa_storage = { path = "../massa-storage" } massa_time = { path = "../massa-time" } massa_signature = { path = "../massa-signature" } \ No newline at end of file diff --git a/massa-graph-2-exports/src/channels.rs b/massa-graph-2-exports/src/channels.rs index b5a693f9481..9ea6b2d2bd9 100644 --- a/massa-graph-2-exports/src/channels.rs +++ b/massa-graph-2-exports/src/channels.rs @@ -1,6 +1,9 @@ use std::sync::mpsc::Receiver; +use massa_pos_exports::SelectorController; /// Contains a reference to the pool, selector and execution controller /// Contains a channel to send info to protocol #[derive(Clone)] -pub struct GraphChannels {} +pub struct GraphChannels { + pub selector_controller: Box, +} diff --git a/massa-graph-2-worker/Cargo.toml b/massa-graph-2-worker/Cargo.toml index 5beda1a6ab8..5d76d822b13 100644 --- a/massa-graph-2-worker/Cargo.toml +++ b/massa-graph-2-worker/Cargo.toml @@ -8,6 +8,7 @@ edition = "2021" [dependencies] displaydoc = "0.2" +num = { version = "0.4", features = ["serde"] } tracing = "0.1" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" @@ -17,5 +18,7 @@ massa_graph_2_exports = { path = "../massa-graph-2-exports" } massa_graph = { path = "../massa-graph" } massa_models = { path = "../massa-models" } massa_storage = { path = "../massa-storage" } +massa_signature = { path = "../massa-signature" } massa_time = { path = "../massa-time" } -massa_hash = { path = "../massa-hash" } \ No newline at end of file +massa_hash = { path = "../massa-hash" } +massa_logging = { path = "../massa-logging" } diff --git a/massa-graph-2-worker/src/controller.rs b/massa-graph-2-worker/src/controller.rs index dab2ea3b5e0..298494c0ba2 100644 --- a/massa-graph-2-worker/src/controller.rs +++ b/massa-graph-2-worker/src/controller.rs @@ -4,13 +4,13 @@ use massa_graph::{ BootstrapableGraph, }; use massa_graph_2_exports::{ - block_graph_export::BlockGraphExport, block_status::BlockStatus, GraphConfig, GraphController, + block_graph_export::BlockGraphExport, block_status::BlockStatus, GraphController, }; use massa_models::{ api::BlockGraphStatus, block::{BlockHeader, BlockId}, clique::Clique, - prehash::{CapacityAllocator, PreHashSet}, + prehash::PreHashSet, slot::Slot, stats::ConsensusStats, wrapped::Wrapped, diff --git a/massa-graph-2-worker/src/lib.rs b/massa-graph-2-worker/src/lib.rs index 67e34577ed4..fd4291bf512 100644 --- a/massa-graph-2-worker/src/lib.rs +++ b/massa-graph-2-worker/src/lib.rs @@ -1,4 +1,5 @@ #![feature(deadline_api)] +#![feature(map_first_last)] mod commands; mod controller; diff --git a/massa-graph-2-worker/src/state.rs b/massa-graph-2-worker/src/state.rs index 8fa209e5826..30d7201421e 100644 --- a/massa-graph-2-worker/src/state.rs +++ b/massa-graph-2-worker/src/state.rs @@ -39,7 +39,7 @@ pub struct GraphState { impl GraphState { fn get_full_active_block(&self, block_id: &BlockId) -> Option<(&ActiveBlock, &Storage)> { - match self.block_statuses.get(&block_id) { + match self.block_statuses.get(block_id) { Some(BlockStatus::Active { a_block, storage }) => Some((a_block.as_ref(), storage)), _ => None, } diff --git a/massa-graph-2-worker/src/worker/graph.rs b/massa-graph-2-worker/src/worker/graph.rs new file mode 100644 index 00000000000..c1a9d5f3864 --- /dev/null +++ b/massa-graph-2-worker/src/worker/graph.rs @@ -0,0 +1,974 @@ +use std::collections::{BTreeSet, VecDeque}; + +use crate::{worker::verifications::HeaderCheckOutcome, state::GraphState}; + +use super::GraphWorker; +use massa_graph::error::{GraphError, GraphResult}; +use massa_graph_2_exports::block_status::{BlockStatus, DiscardReason, HeaderOrBlock}; +use massa_logging::massa_trace; +use massa_models::{ + active_block::ActiveBlock, + address::Address, + block::BlockId, + clique::Clique, + prehash::{PreHashMap, PreHashSet}, + slot::Slot, +}; +use massa_signature::PublicKey; +use massa_storage::Storage; +use parking_lot::RwLockWriteGuard; +use tracing::log::{debug, info}; + +impl GraphWorker { + /// acknowledge a set of items recursively + pub fn rec_process( + &mut self, + mut to_ack: BTreeSet<(Slot, BlockId)>, + current_slot: Option, + mut write_shared_state: &mut RwLockWriteGuard, + ) -> GraphResult<()> { + // order processing by (slot, hash) + while let Some((_slot, hash)) = to_ack.pop_first() { + to_ack.extend(self.process(hash, current_slot, write_shared_state)?) + } + Ok(()) + } + + /// Acknowledge a single item, return a set of items to re-ack + pub fn process( + &mut self, + block_id: BlockId, + current_slot: Option, + mut write_shared_state: &mut RwLockWriteGuard, + ) -> GraphResult> { + // list items to reprocess + let mut reprocess = BTreeSet::new(); + + massa_trace!("consensus.block_graph.process", { "block_id": block_id }); + // control all the waiting states and try to get a valid block + let ( + valid_block_creator, + valid_block_slot, + valid_block_parents_hash_period, + valid_block_incomp, + valid_block_inherited_incomp_count, + valid_block_storage, + valid_block_fitness, + ) = match write_shared_state.block_statuses.get(&block_id) { + None => return Ok(BTreeSet::new()), // disappeared before being processed: do nothing + + // discarded: do nothing + Some(BlockStatus::Discarded { .. }) => { + massa_trace!("consensus.block_graph.process.discarded", { + "block_id": block_id + }); + return Ok(BTreeSet::new()); + } + + // already active: do nothing + Some(BlockStatus::Active { .. }) => { + massa_trace!("consensus.block_graph.process.active", { + "block_id": block_id + }); + return Ok(BTreeSet::new()); + } + + // incoming header + Some(BlockStatus::Incoming(HeaderOrBlock::Header(_))) => { + massa_trace!("consensus.block_graph.process.incoming_header", { + "block_id": block_id + }); + // remove header + let header = if let Some(BlockStatus::Incoming(HeaderOrBlock::Header(header))) = + write_shared_state.block_statuses.remove(&block_id) + { + self.incoming_index.remove(&block_id); + header + } else { + return Err(GraphError::ContainerInconsistency(format!( + "inconsistency inside block statuses removing incoming header {}", + block_id + ))); + }; + match self.check_header(&block_id, &header, current_slot, &write_shared_state)? { + HeaderCheckOutcome::Proceed { .. } => { + // set as waiting dependencies + let mut dependencies = PreHashSet::::default(); + dependencies.insert(block_id); // add self as unsatisfied + write_shared_state.block_statuses.insert( + block_id, + BlockStatus::WaitingForDependencies { + header_or_block: HeaderOrBlock::Header(header), + unsatisfied_dependencies: dependencies, + sequence_number: { + self.sequence_counter += 1; + self.sequence_counter + }, + }, + ); + self.waiting_for_dependencies_index.insert(block_id); + self.promote_dep_tree(block_id, write_shared_state)?; + + massa_trace!( + "consensus.block_graph.process.incoming_header.waiting_for_self", + { "block_id": block_id } + ); + return Ok(BTreeSet::new()); + } + HeaderCheckOutcome::WaitForDependencies(mut dependencies) => { + // set as waiting dependencies + dependencies.insert(block_id); // add self as unsatisfied + massa_trace!("consensus.block_graph.process.incoming_header.waiting_for_dependencies", {"block_id": block_id, "dependencies": dependencies}); + + write_shared_state.block_statuses.insert( + block_id, + BlockStatus::WaitingForDependencies { + header_or_block: HeaderOrBlock::Header(header), + unsatisfied_dependencies: dependencies, + sequence_number: { + self.sequence_counter += 1; + self.sequence_counter + }, + }, + ); + self.waiting_for_dependencies_index.insert(block_id); + self.promote_dep_tree(block_id, write_shared_state)?; + + return Ok(BTreeSet::new()); + } + HeaderCheckOutcome::WaitForSlot => { + // make it wait for slot + write_shared_state.block_statuses.insert( + block_id, + BlockStatus::WaitingForSlot(HeaderOrBlock::Header(header)), + ); + self.waiting_for_slot_index.insert(block_id); + + massa_trace!( + "consensus.block_graph.process.incoming_header.waiting_for_slot", + { "block_id": block_id } + ); + return Ok(BTreeSet::new()); + } + HeaderCheckOutcome::Discard(reason) => { + self.maybe_note_attack_attempt(&reason, &block_id); + massa_trace!("consensus.block_graph.process.incoming_header.discarded", {"block_id": block_id, "reason": reason}); + // count stales + if reason == DiscardReason::Stale { + self.new_stale_blocks + .insert(block_id, (header.creator_address, header.content.slot)); + } + // discard + write_shared_state.block_statuses.insert( + block_id, + BlockStatus::Discarded { + slot: header.content.slot, + creator: header.creator_address, + parents: header.content.parents, + reason, + sequence_number: { + self.sequence_counter += 1; + self.sequence_counter + }, + }, + ); + self.discarded_index.insert(block_id); + + return Ok(BTreeSet::new()); + } + } + } + + // incoming block + Some(BlockStatus::Incoming(HeaderOrBlock::Block { id: block_id, .. })) => { + let block_id = *block_id; + massa_trace!("consensus.block_graph.process.incoming_block", { + "block_id": block_id + }); + let (slot, storage) = + if let Some(BlockStatus::Incoming(HeaderOrBlock::Block { + slot, storage, .. + })) = write_shared_state.block_statuses.remove(&block_id) + { + self.incoming_index.remove(&block_id); + (slot, storage) + } else { + return Err(GraphError::ContainerInconsistency(format!( + "inconsistency inside block statuses removing incoming block {}", + block_id + ))); + }; + let stored_block = storage + .read_blocks() + .get(&block_id) + .cloned() + .expect("incoming block not found in storage"); + + match self.check_header(&block_id, &stored_block.content.header, current_slot, &write_shared_state)? { + HeaderCheckOutcome::Proceed { + parents_hash_period, + incompatibilities, + inherited_incompatibilities_count, + fitness, + } => { + // block is valid: remove it from Incoming and return it + massa_trace!("consensus.block_graph.process.incoming_block.valid", { + "block_id": block_id + }); + ( + stored_block.content.header.creator_public_key, + slot, + parents_hash_period, + incompatibilities, + inherited_incompatibilities_count, + storage, + fitness, + ) + } + HeaderCheckOutcome::WaitForDependencies(dependencies) => { + // set as waiting dependencies + write_shared_state.block_statuses.insert( + block_id, + BlockStatus::WaitingForDependencies { + header_or_block: HeaderOrBlock::Block { + id: block_id, + slot, + storage, + }, + unsatisfied_dependencies: dependencies, + sequence_number: { + self.sequence_counter += 1; + self.sequence_counter + }, + }, + ); + self.waiting_for_dependencies_index.insert(block_id); + self.promote_dep_tree(block_id, write_shared_state)?; + massa_trace!( + "consensus.block_graph.process.incoming_block.waiting_for_dependencies", + { "block_id": block_id } + ); + return Ok(BTreeSet::new()); + } + HeaderCheckOutcome::WaitForSlot => { + // set as waiting for slot + write_shared_state.block_statuses.insert( + block_id, + BlockStatus::WaitingForSlot(HeaderOrBlock::Block { + id: block_id, + slot, + storage, + }), + ); + self.waiting_for_slot_index.insert(block_id); + + massa_trace!( + "consensus.block_graph.process.incoming_block.waiting_for_slot", + { "block_id": block_id } + ); + return Ok(BTreeSet::new()); + } + HeaderCheckOutcome::Discard(reason) => { + self.maybe_note_attack_attempt(&reason, &block_id); + massa_trace!("consensus.block_graph.process.incoming_block.discarded", {"block_id": block_id, "reason": reason}); + // count stales + if reason == DiscardReason::Stale { + self.new_stale_blocks.insert( + block_id, + ( + stored_block.content.header.creator_address, + stored_block.content.header.content.slot, + ), + ); + } + // add to discard + write_shared_state.block_statuses.insert( + block_id, + BlockStatus::Discarded { + slot: stored_block.content.header.content.slot, + creator: stored_block.creator_address, + parents: stored_block.content.header.content.parents.clone(), + reason, + sequence_number: { + self.sequence_counter += 1; + self.sequence_counter + }, + }, + ); + self.discarded_index.insert(block_id); + + return Ok(BTreeSet::new()); + } + } + } + + Some(BlockStatus::WaitingForSlot(header_or_block)) => { + massa_trace!("consensus.block_graph.process.waiting_for_slot", { + "block_id": block_id + }); + let slot = header_or_block.get_slot(); + if Some(slot) > current_slot { + massa_trace!( + "consensus.block_graph.process.waiting_for_slot.in_the_future", + { "block_id": block_id } + ); + // in the future: ignore + return Ok(BTreeSet::new()); + } + // send back as incoming and ask for reprocess + if let Some(BlockStatus::WaitingForSlot(header_or_block)) = + write_shared_state.block_statuses.remove(&block_id) + { + self.waiting_for_slot_index.remove(&block_id); + write_shared_state.block_statuses + .insert(block_id, BlockStatus::Incoming(header_or_block)); + self.incoming_index.insert(block_id); + reprocess.insert((slot, block_id)); + massa_trace!( + "consensus.block_graph.process.waiting_for_slot.reprocess", + { "block_id": block_id } + ); + return Ok(reprocess); + } else { + return Err(GraphError::ContainerInconsistency(format!("inconsistency inside block statuses removing waiting for slot block or header {}", block_id))); + }; + } + + Some(BlockStatus::WaitingForDependencies { + unsatisfied_dependencies, + .. + }) => { + massa_trace!("consensus.block_graph.process.waiting_for_dependencies", { + "block_id": block_id + }); + if !unsatisfied_dependencies.is_empty() { + // still has unsatisfied dependencies: ignore + return Ok(BTreeSet::new()); + } + // send back as incoming and ask for reprocess + if let Some(BlockStatus::WaitingForDependencies { + header_or_block, .. + }) = write_shared_state.block_statuses.remove(&block_id) + { + self.waiting_for_dependencies_index.remove(&block_id); + reprocess.insert((header_or_block.get_slot(), block_id)); + write_shared_state.block_statuses + .insert(block_id, BlockStatus::Incoming(header_or_block)); + self.incoming_index.insert(block_id); + massa_trace!( + "consensus.block_graph.process.waiting_for_dependencies.reprocess", + { "block_id": block_id } + ); + return Ok(reprocess); + } else { + return Err(GraphError::ContainerInconsistency(format!("inconsistency inside block statuses removing waiting for slot header or block {}", block_id))); + } + } + }; + + // add block to graph + self.add_block_to_graph( + block_id, + valid_block_parents_hash_period, + valid_block_creator, + valid_block_slot, + valid_block_incomp, + valid_block_inherited_incomp_count, + valid_block_fitness, + valid_block_storage, + write_shared_state, + )?; + + // if the block was added, update linked dependencies and mark satisfied ones for recheck + if let Some(BlockStatus::Active { storage, .. }) = write_shared_state.block_statuses.get(&block_id) { + massa_trace!("consensus.block_graph.process.is_active", { + "block_id": block_id + }); + self.to_propagate.insert(block_id, storage.clone()); + for itm_block_id in self.waiting_for_dependencies_index.iter() { + if let Some(BlockStatus::WaitingForDependencies { + header_or_block, + unsatisfied_dependencies, + .. + }) = write_shared_state.block_statuses.get_mut(itm_block_id) + { + if unsatisfied_dependencies.remove(&block_id) { + // a dependency was satisfied: retry + reprocess.insert((header_or_block.get_slot(), *itm_block_id)); + } + } + } + } + + Ok(reprocess) + } + + pub fn promote_dep_tree(&mut self, hash: BlockId, mut write_shared_state: &mut RwLockWriteGuard) -> GraphResult<()> { + let mut to_explore = vec![hash]; + let mut to_promote: PreHashMap = PreHashMap::default(); + while let Some(h) = to_explore.pop() { + if to_promote.contains_key(&h) { + continue; + } + if let Some(BlockStatus::WaitingForDependencies { + header_or_block, + unsatisfied_dependencies, + sequence_number, + .. + }) = write_shared_state.block_statuses.get(&h) + { + // promote current block + to_promote.insert(h, (header_or_block.get_slot(), *sequence_number)); + // register dependencies for exploration + to_explore.extend(unsatisfied_dependencies); + } + } + + let mut to_promote: Vec<(Slot, u64, BlockId)> = to_promote + .into_iter() + .map(|(h, (slot, seq))| (slot, seq, h)) + .collect(); + to_promote.sort_unstable(); // last ones should have the highest seq number + for (_slot, _seq, h) in to_promote.into_iter() { + if let Some(BlockStatus::WaitingForDependencies { + sequence_number, .. + }) = write_shared_state.block_statuses.get_mut(&h) + { + self.sequence_counter += 1; + *sequence_number = self.sequence_counter; + } + } + Ok(()) + } + + /// Computes max cliques of compatible blocks + pub fn compute_max_cliques(&self, read_shared_state: &GraphState) -> Vec> { + let mut max_cliques: Vec> = Vec::new(); + + // algorithm adapted from IK_GPX as summarized in: + // Cazals et al., "A note on the problem of reporting maximal cliques" + // Theoretical Computer Science, 2008 + // https://doi.org/10.1016/j.tcs.2008.05.010 + + // stack: r, p, x + let mut stack: Vec<( + PreHashSet, + PreHashSet, + PreHashSet, + )> = vec![( + PreHashSet::::default(), + read_shared_state.gi_head.keys().cloned().collect(), + PreHashSet::::default(), + )]; + while let Some((r, mut p, mut x)) = stack.pop() { + if p.is_empty() && x.is_empty() { + max_cliques.push(r); + continue; + } + // choose the pivot vertex following the GPX scheme: + // u_p = node from (p \/ x) that maximizes the cardinality of (P \ Neighbors(u_p, GI)) + let &u_p = p + .union(&x) + .max_by_key(|&u| { + p.difference(&(&read_shared_state.gi_head[u] | &vec![*u].into_iter().collect())) + .count() + }) + .unwrap(); // p was checked to be non-empty before + + // iterate over u_set = (p /\ Neighbors(u_p, GI)) + let u_set: PreHashSet = + &p & &(&read_shared_state.gi_head[&u_p] | &vec![u_p].into_iter().collect()); + for u_i in u_set.into_iter() { + p.remove(&u_i); + let u_i_set: PreHashSet = vec![u_i].into_iter().collect(); + let comp_n_u_i: PreHashSet = &read_shared_state.gi_head[&u_i] | &u_i_set; + stack.push((&r | &u_i_set, &p - &comp_n_u_i, &x - &comp_n_u_i)); + x.insert(u_i); + } + } + if max_cliques.is_empty() { + // make sure at least one clique remains + max_cliques = vec![PreHashSet::::default()]; + } + max_cliques + } + + #[allow(clippy::too_many_arguments)] + fn add_block_to_graph( + &mut self, + add_block_id: BlockId, + parents_hash_period: Vec<(BlockId, u64)>, + add_block_creator: PublicKey, + add_block_slot: Slot, + incomp: PreHashSet, + inherited_incomp_count: usize, + fitness: u64, + mut storage: Storage, + mut write_shared_state: &mut RwLockWriteGuard, + ) -> GraphResult<()> { + massa_trace!("consensus.block_graph.add_block_to_graph", { + "block_id": add_block_id + }); + + // Ensure block parents are claimed by the block's storage. + // Note that operations and endorsements should already be there (claimed in Protocol). + storage.claim_block_refs(&parents_hash_period.iter().map(|(p_id, _)| *p_id).collect()); + + // add block to status structure + write_shared_state.block_statuses.insert( + add_block_id, + BlockStatus::Active { + a_block: Box::new(ActiveBlock { + creator_address: Address::from_public_key(&add_block_creator), + parents: parents_hash_period.clone(), + descendants: PreHashSet::::default(), + block_id: add_block_id, + children: vec![Default::default(); self.config.thread_count as usize], + is_final: false, + slot: add_block_slot, + fitness, + }), + storage, + }, + ); + write_shared_state.active_index.insert(add_block_id); + + // add as child to parents + for (parent_h, _parent_period) in parents_hash_period.iter() { + if let Some(BlockStatus::Active { + a_block: a_parent, .. + }) = write_shared_state.block_statuses.get_mut(parent_h) + { + a_parent.children[add_block_slot.thread as usize] + .insert(add_block_id, add_block_slot.period); + } else { + return Err(GraphError::ContainerInconsistency(format!( + "inconsistency inside block statuses adding child {} of block {}", + add_block_id, parent_h + ))); + } + } + + // add as descendant to ancestors. Note: descendants are never removed. + { + let mut ancestors: VecDeque = + parents_hash_period.iter().map(|(h, _)| *h).collect(); + let mut visited = PreHashSet::::default(); + while let Some(ancestor_h) = ancestors.pop_back() { + if !visited.insert(ancestor_h) { + continue; + } + if let Some(BlockStatus::Active { a_block: ab, .. }) = + write_shared_state.block_statuses.get_mut(&ancestor_h) + { + ab.descendants.insert(add_block_id); + for (ancestor_parent_h, _) in ab.parents.iter() { + ancestors.push_front(*ancestor_parent_h); + } + } + } + } + + // add incompatibilities to gi_head + massa_trace!( + "consensus.block_graph.add_block_to_graph.add_incompatibilities", + {} + ); + for incomp_h in incomp.iter() { + write_shared_state.gi_head + .get_mut(incomp_h) + .ok_or_else(|| { + GraphError::MissingBlock(format!( + "missing block when adding incomp to gi_head: {}", + incomp_h + )) + })? + .insert(add_block_id); + } + write_shared_state.gi_head.insert(add_block_id, incomp.clone()); + + // max cliques update + massa_trace!( + "consensus.block_graph.add_block_to_graph.max_cliques_update", + {} + ); + if incomp.len() == inherited_incomp_count { + // clique optimization routine: + // the block only has incompatibilities inherited from its parents + // therefore it is not forking and can simply be added to the cliques it is compatible with + write_shared_state.max_cliques + .iter_mut() + .filter(|c| incomp.is_disjoint(&c.block_ids)) + .for_each(|c| { + c.block_ids.insert(add_block_id); + }); + } else { + // fully recompute max cliques + massa_trace!( + "consensus.block_graph.add_block_to_graph.clique_full_computing", + { "hash": add_block_id } + ); + let before = write_shared_state.max_cliques.len(); + write_shared_state.max_cliques = self + .compute_max_cliques(&write_shared_state) + .into_iter() + .map(|c| Clique { + block_ids: c, + fitness: 0, + is_blockclique: false, + }) + .collect(); + let after = write_shared_state.max_cliques.len(); + if before != after { + massa_trace!( + "consensus.block_graph.add_block_to_graph.clique_full_computing more than one clique", + { "cliques": write_shared_state.max_cliques, "gi_head": write_shared_state.gi_head } + ); + // gi_head + debug!( + "clique number went from {} to {} after adding {}", + before, after, add_block_id + ); + } + } + + // compute clique fitnesses and find blockclique + massa_trace!("consensus.block_graph.add_block_to_graph.compute_clique_fitnesses_and_find_blockclique", {}); + // note: clique_fitnesses is pair (fitness, -hash_sum) where the second parameter is negative for sorting + { + let mut blockclique_i = 0usize; + let mut max_clique_fitness = (0u64, num::BigInt::default()); + for (clique_i, clique) in write_shared_state.max_cliques.iter_mut().enumerate() { + clique.fitness = 0; + clique.is_blockclique = false; + let mut sum_hash = num::BigInt::default(); + for block_h in clique.block_ids.iter() { + let fitness = match write_shared_state.block_statuses.get(block_h) { + Some(BlockStatus::Active { a_block, storage }) => a_block.fitness, + _ => return Err(GraphError::ContainerInconsistency(format!("inconsistency inside block statuses computing fitness while adding {} - missing {}", add_block_id, block_h))), + }; + clique.fitness = clique + .fitness + .checked_add(fitness) + .ok_or(GraphError::FitnessOverflow)?; + sum_hash -= + num::BigInt::from_bytes_be(num::bigint::Sign::Plus, block_h.to_bytes()); + } + let cur_fit = (clique.fitness, sum_hash); + if cur_fit > max_clique_fitness { + blockclique_i = clique_i; + max_clique_fitness = cur_fit; + } + } + write_shared_state.max_cliques[blockclique_i].is_blockclique = true; + } + + // update best parents + massa_trace!( + "consensus.block_graph.add_block_to_graph.update_best_parents", + {} + ); + { + // find blockclique + let blockclique_i = write_shared_state + .max_cliques + .iter() + .position(|c| c.is_blockclique) + .unwrap_or_default(); + let blockclique = &write_shared_state.max_cliques[blockclique_i]; + + // init best parents as latest_final_blocks_periods + write_shared_state.best_parents = write_shared_state.latest_final_blocks_periods.clone(); + // for each blockclique block, set it as best_parent in its own thread + // if its period is higher than the current best_parent in that thread + for block_h in blockclique.block_ids.iter() { + let b_slot = match write_shared_state.block_statuses.get(block_h) { + Some(BlockStatus::Active { a_block, storage: _ }) => a_block.slot, + _ => return Err(GraphError::ContainerInconsistency(format!("inconsistency inside block statuses updating best parents while adding {} - missing {}", add_block_id, block_h))), + }; + if b_slot.period > write_shared_state.best_parents[b_slot.thread as usize].1 { + write_shared_state.best_parents[b_slot.thread as usize] = (*block_h, b_slot.period); + } + } + } + + // list stale blocks + massa_trace!( + "consensus.block_graph.add_block_to_graph.list_stale_blocks", + {} + ); + let stale_blocks = { + let blockclique_i = write_shared_state + .max_cliques + .iter() + .position(|c| c.is_blockclique) + .unwrap_or_default(); + let fitness_threshold = write_shared_state.max_cliques[blockclique_i] + .fitness + .saturating_sub(self.config.delta_f0); + // iterate from largest to smallest to minimize reallocations + let mut indices: Vec = (0..write_shared_state.max_cliques.len()).collect(); + indices + .sort_unstable_by_key(|&i| std::cmp::Reverse(write_shared_state.max_cliques[i].block_ids.len())); + let mut high_set = PreHashSet::::default(); + let mut low_set = PreHashSet::::default(); + for clique_i in indices.into_iter() { + if write_shared_state.max_cliques[clique_i].fitness >= fitness_threshold { + high_set.extend(&write_shared_state.max_cliques[clique_i].block_ids); + } else { + low_set.extend(&write_shared_state.max_cliques[clique_i].block_ids); + } + } + write_shared_state.max_cliques.retain(|c| c.fitness >= fitness_threshold); + &low_set - &high_set + }; + // mark stale blocks + massa_trace!( + "consensus.block_graph.add_block_to_graph.mark_stale_blocks", + {} + ); + for stale_block_hash in stale_blocks.into_iter() { + if let Some(BlockStatus::Active { + a_block: active_block, + storage: _storage, + }) = write_shared_state.block_statuses.remove(&stale_block_hash) + { + write_shared_state.active_index.remove(&stale_block_hash); + if active_block.is_final { + return Err(GraphError::ContainerInconsistency(format!("inconsistency inside block statuses removing stale blocks adding {} - block {} was already final", add_block_id, stale_block_hash))); + } + + // remove from gi_head + if let Some(other_incomps) = write_shared_state.gi_head.remove(&stale_block_hash) { + for other_incomp in other_incomps.into_iter() { + if let Some(other_incomp_lst) = write_shared_state.gi_head.get_mut(&other_incomp) { + other_incomp_lst.remove(&stale_block_hash); + } + } + } + + // remove from cliques + let stale_block_fitness = active_block.fitness; + write_shared_state.max_cliques.iter_mut().for_each(|c| { + if c.block_ids.remove(&stale_block_hash) { + c.fitness -= stale_block_fitness; + } + }); + write_shared_state.max_cliques.retain(|c| !c.block_ids.is_empty()); // remove empty cliques + if write_shared_state.max_cliques.is_empty() { + // make sure at least one clique remains + write_shared_state.max_cliques = vec![Clique { + block_ids: PreHashSet::::default(), + fitness: 0, + is_blockclique: true, + }]; + } + + // remove from parent's children + for (parent_h, _parent_period) in active_block.parents.iter() { + if let Some(BlockStatus::Active { + a_block: parent_active_block, + .. + }) = write_shared_state.block_statuses.get_mut(parent_h) + { + parent_active_block.children[active_block.slot.thread as usize] + .remove(&stale_block_hash); + } + } + + massa_trace!("consensus.block_graph.add_block_to_graph.stale", { + "hash": stale_block_hash + }); + + // mark as stale + self.new_stale_blocks.insert( + stale_block_hash, + (active_block.creator_address, active_block.slot), + ); + write_shared_state.block_statuses.insert( + stale_block_hash, + BlockStatus::Discarded { + slot: active_block.slot, + creator: active_block.creator_address, + parents: active_block.parents.iter().map(|(h, _)| *h).collect(), + reason: DiscardReason::Stale, + sequence_number: { + self.sequence_counter += 1; + self.sequence_counter + }, + }, + ); + self.discarded_index.insert(stale_block_hash); + } else { + return Err(GraphError::ContainerInconsistency(format!("inconsistency inside block statuses removing stale blocks adding {} - block {} is missing", add_block_id, stale_block_hash))); + } + } + + // list final blocks + massa_trace!( + "consensus.block_graph.add_block_to_graph.list_final_blocks", + {} + ); + let final_blocks = { + // short-circuiting intersection of cliques from smallest to largest + let mut indices: Vec = (0..write_shared_state.max_cliques.len()).collect(); + indices.sort_unstable_by_key(|&i| write_shared_state.max_cliques[i].block_ids.len()); + let mut final_candidates = write_shared_state.max_cliques[indices[0]].block_ids.clone(); + for i in 1..indices.len() { + final_candidates.retain(|v| write_shared_state.max_cliques[i].block_ids.contains(v)); + if final_candidates.is_empty() { + break; + } + } + + // restrict search to cliques with high enough fitness, sort cliques by fitness (highest to lowest) + massa_trace!( + "consensus.block_graph.add_block_to_graph.list_final_blocks.restrict", + {} + ); + indices.retain(|&i| write_shared_state.max_cliques[i].fitness > self.config.delta_f0); + indices.sort_unstable_by_key(|&i| std::cmp::Reverse(write_shared_state.max_cliques[i].fitness)); + + let mut final_blocks = PreHashSet::::default(); + for clique_i in indices.into_iter() { + massa_trace!( + "consensus.block_graph.add_block_to_graph.list_final_blocks.loop", + { "clique_i": clique_i } + ); + // check in cliques from highest to lowest fitness + if final_candidates.is_empty() { + // no more final candidates + break; + } + let clique = &write_shared_state.max_cliques[clique_i]; + + // compute the total fitness of all the descendants of the candidate within the clique + let loc_candidates = final_candidates.clone(); + for candidate_h in loc_candidates.into_iter() { + let descendants = match write_shared_state.block_statuses.get(&candidate_h) { + Some(BlockStatus::Active { a_block, storage: _ }) => &a_block.descendants, + _ => return Err(GraphError::MissingBlock(format!( + "missing block when computing total fitness of descendants: {}", + candidate_h + ))), + }; + let desc_fit: u64 = descendants + .intersection(&clique.block_ids) + .map(|h| { + if let Some(BlockStatus::Active { a_block: ab, .. }) = + write_shared_state.block_statuses.get(h) + { + return ab.fitness; + } + 0 + }) + .sum(); + if desc_fit > self.config.delta_f0 { + // candidate is final + final_candidates.remove(&candidate_h); + final_blocks.insert(candidate_h); + } + } + } + final_blocks + }; + + // mark final blocks and update latest_final_blocks_periods + massa_trace!( + "consensus.block_graph.add_block_to_graph.mark_final_blocks", + {} + ); + for final_block_hash in final_blocks.into_iter() { + // remove from gi_head + if let Some(other_incomps) = write_shared_state.gi_head.remove(&final_block_hash) { + for other_incomp in other_incomps.into_iter() { + if let Some(other_incomp_lst) = write_shared_state.gi_head.get_mut(&other_incomp) { + other_incomp_lst.remove(&final_block_hash); + } + } + } + + // mark as final and update latest_final_blocks_periods + if let Some(BlockStatus::Active { + a_block: final_block, + .. + }) = write_shared_state.block_statuses.get_mut(&final_block_hash) + { + massa_trace!("consensus.block_graph.add_block_to_graph.final", { + "hash": final_block_hash + }); + final_block.is_final = true; + // remove from cliques + let final_block_fitness = final_block.fitness; + write_shared_state.max_cliques.iter_mut().for_each(|c| { + if c.block_ids.remove(&final_block_hash) { + c.fitness -= final_block_fitness; + } + }); + write_shared_state.max_cliques.retain(|c| !c.block_ids.is_empty()); // remove empty cliques + if write_shared_state.max_cliques.is_empty() { + // make sure at least one clique remains + write_shared_state.max_cliques = vec![Clique { + block_ids: PreHashSet::::default(), + fitness: 0, + is_blockclique: true, + }]; + } + // update latest final blocks + if final_block.slot.period + > write_shared_state.latest_final_blocks_periods[final_block.slot.thread as usize].1 + { + write_shared_state.latest_final_blocks_periods[final_block.slot.thread as usize] = + (final_block_hash, final_block.slot.period); + } + // update new final blocks list + self.new_final_blocks.insert(final_block_hash); + } else { + return Err(GraphError::ContainerInconsistency(format!("inconsistency inside block statuses updating final blocks adding {} - block {} is missing", add_block_id, final_block_hash))); + } + } + + massa_trace!("consensus.block_graph.add_block_to_graph.end", {}); + Ok(()) + } + + /// Note an attack attempt if the discard reason indicates one. + fn maybe_note_attack_attempt(&mut self, reason: &DiscardReason, hash: &BlockId) { + massa_trace!("consensus.block_graph.maybe_note_attack_attempt", {"hash": hash, "reason": reason}); + // If invalid, note the attack attempt. + if let DiscardReason::Invalid(reason) = reason { + info!( + "consensus.block_graph.maybe_note_attack_attempt DiscardReason::Invalid:{}", + reason + ); + self.attack_attempts.push(*hash); + } + } + + /// Gets a block and all its descendants + /// + /// # Argument + /// * hash : hash of the given block + pub fn get_active_block_and_descendants( + &self, + block_id: &BlockId, + read_shared_state: &GraphState + ) -> GraphResult> { + let mut to_visit = vec![*block_id]; + let mut result = PreHashSet::::default(); + while let Some(visit_h) = to_visit.pop() { + if !result.insert(visit_h) { + continue; // already visited + } + match read_shared_state.block_statuses.get(&visit_h) { + Some(BlockStatus::Active { a_block, .. }) => { + a_block.as_ref() + .children.iter() + .for_each(|thread_children| to_visit.extend(thread_children.keys())) + }, + _ => return Err(GraphError::ContainerInconsistency(format!("inconsistency inside block statuses iterating through descendants of {} - missing {}", block_id, visit_h))), + } + } + Ok(result) + } +} diff --git a/massa-graph-2-worker/src/worker/main_loop.rs b/massa-graph-2-worker/src/worker/main_loop.rs index ad32fa66d62..a5288847353 100644 --- a/massa-graph-2-worker/src/worker/main_loop.rs +++ b/massa-graph-2-worker/src/worker/main_loop.rs @@ -21,6 +21,7 @@ enum WaitingStatus { impl GraphWorker { fn manage_command(&self, command: GraphCommand) -> GraphResult<()> { match command { + GraphCommand::RegisterBlockHeader(_, _) => {} GraphCommand::RegisterBlock(_, _, _) => { // TODO } diff --git a/massa-graph-2-worker/src/worker/mod.rs b/massa-graph-2-worker/src/worker/mod.rs index 2fffac75290..f70933affc1 100644 --- a/massa-graph-2-worker/src/worker/mod.rs +++ b/massa-graph-2-worker/src/worker/mod.rs @@ -69,9 +69,11 @@ pub struct GraphWorker { storage: Storage, } +mod graph; mod init; mod main_loop; mod process_commands; +mod verifications; pub fn start_graph_worker( config: GraphConfig, diff --git a/massa-graph-2-worker/src/worker/process_commands.rs b/massa-graph-2-worker/src/worker/process_commands.rs index 77047df1c8a..da47cb9e6cf 100644 --- a/massa-graph-2-worker/src/worker/process_commands.rs +++ b/massa-graph-2-worker/src/worker/process_commands.rs @@ -1,50 +1,60 @@ -use massa_graph::error::GraphResult; +use std::collections::{hash_map, BTreeSet}; use super::GraphWorker; +use massa_graph::error::GraphResult; +use massa_graph_2_exports::block_status::{BlockStatus, HeaderOrBlock}; +use massa_logging::massa_trace; +use massa_models::{ + block::{BlockId, WrappedHeader}, + slot::Slot, +}; +use tracing::log::debug; impl GraphWorker { - // pub fn incoming_header( - // &mut self, - // block_id: BlockId, - // header: WrappedHeader, - // current_slot: Option, - // ) -> GraphResult<()> { - // // ignore genesis blocks - // if self.genesis_hashes.contains(&block_id) { - // return Ok(()); - // } + pub fn register_block_header( + &mut self, + block_id: BlockId, + header: WrappedHeader, + current_slot: Option, + ) -> GraphResult<()> { + let mut write_shared_state = self.shared_state.write(); + // ignore genesis blocks + if write_shared_state.genesis_hashes.contains(&block_id) { + return Ok(()); + } - // debug!( - // "received header {} for slot {}", - // block_id, header.content.slot - // ); - // massa_trace!("consensus.block_graph.incoming_header", {"block_id": block_id, "header": header}); - // let mut to_ack: BTreeSet<(Slot, BlockId)> = BTreeSet::new(); - // match self.block_statuses.entry(block_id) { - // // if absent => add as Incoming, call rec_ack on it - // hash_map::Entry::Vacant(vac) => { - // to_ack.insert((header.content.slot, block_id)); - // vac.insert(BlockStatus::Incoming(HeaderOrBlock::Header(header))); - // self.incoming_index.insert(block_id); - // } - // hash_map::Entry::Occupied(mut occ) => match occ.get_mut() { - // BlockStatus::Discarded { - // sequence_number, .. - // } => { - // // promote if discarded - // *sequence_number = BlockGraph::new_sequence_number(&mut self.sequence_counter); - // } - // BlockStatus::WaitingForDependencies { .. } => { - // // promote in dependencies - // self.promote_dep_tree(block_id)?; - // } - // _ => {} - // }, - // } + debug!( + "received header {} for slot {}", + block_id, header.content.slot + ); + massa_trace!("consensus.block_graph.incoming_header", {"block_id": block_id, "header": header}); + let mut to_ack: BTreeSet<(Slot, BlockId)> = BTreeSet::new(); + match write_shared_state.block_statuses.entry(block_id) { + // if absent => add as Incoming, call rec_ack on it + hash_map::Entry::Vacant(vac) => { + to_ack.insert((header.content.slot, block_id)); + vac.insert(BlockStatus::Incoming(HeaderOrBlock::Header(header))); + self.incoming_index.insert(block_id); + } + hash_map::Entry::Occupied(mut occ) => match occ.get_mut() { + BlockStatus::Discarded { + sequence_number, .. + } => { + // promote if discarded + self.sequence_counter += 1; + *sequence_number = self.sequence_counter; + } + BlockStatus::WaitingForDependencies { .. } => { + // promote in dependencies + self.promote_dep_tree(block_id, &mut write_shared_state)?; + } + _ => {} + }, + } - // // process - // self.rec_process(to_ack, current_slot)?; + // process + self.rec_process(to_ack, current_slot, &mut write_shared_state)?; - // Ok(()) - // } + Ok(()) + } } diff --git a/massa-graph-2-worker/src/worker/verifications.rs b/massa-graph-2-worker/src/worker/verifications.rs new file mode 100644 index 00000000000..eb8be068c06 --- /dev/null +++ b/massa-graph-2-worker/src/worker/verifications.rs @@ -0,0 +1,393 @@ +use crate::state::GraphState; + +use super::GraphWorker; +use massa_graph::error::{GraphError, GraphResult}; +use massa_graph_2_exports::block_status::{BlockStatus, DiscardReason}; +use massa_logging::massa_trace; +use massa_models::{ + block::{BlockId, WrappedHeader}, + prehash::PreHashSet, + slot::Slot, +}; + +/// Possible output of a header check +#[derive(Debug)] +pub enum HeaderCheckOutcome { + /// it's ok and here are some useful values + Proceed { + /// one (parent block id, parent's period) per thread + parents_hash_period: Vec<(BlockId, u64)>, + /// blocks that header is incompatible with + incompatibilities: PreHashSet, + /// number of incompatibilities that are inherited from the parents + inherited_incompatibilities_count: usize, + /// fitness + fitness: u64, + }, + /// there is something wrong with that header + Discard(DiscardReason), + /// it must wait for its slot to be fully processed + WaitForSlot, + /// it must wait for these block ids to be fully processed + WaitForDependencies(PreHashSet), +} + +/// Possible outcomes of endorsements check +#[derive(Debug)] +pub enum EndorsementsCheckOutcome { + /// Everything is ok + Proceed, + /// There is something wrong with that endorsement + Discard(DiscardReason), + /// It must wait for its slot to be fully processed + WaitForSlot, +} + +impl GraphWorker { + /// Process an incoming header. + /// + /// Checks performed: + /// - Number of parents matches thread count. + /// - Slot above 0. + /// - Valid thread. + /// - Check that the block is older than the latest final one in thread. + /// - Check that the block slot is not too much into the future, + /// as determined by the configuration `future_block_processing_max_periods`. + /// - Check if it was the creator's turn to create this block. + /// - TODO: check for double staking. + /// - Check parents are present. + /// - Check the topological consistency of the parents. + /// - Check endorsements. + /// - Check thread incompatibility test. + /// - Check grandpa incompatibility test. + /// - Check if the block is incompatible with a parent. + /// - Check if the block is incompatible with a final block. + pub fn check_header( + &self, + block_id: &BlockId, + header: &WrappedHeader, + current_slot: Option, + read_shared_state: &GraphState, + ) -> GraphResult { + massa_trace!("consensus.block_graph.check_header", { + "block_id": block_id + }); + let mut parents: Vec<(BlockId, u64)> = + Vec::with_capacity(self.config.thread_count as usize); + let mut incomp = PreHashSet::::default(); + let mut missing_deps = PreHashSet::::default(); + let creator_addr = header.creator_address; + + // check that is older than the latest final block in that thread + // Note: this excludes genesis blocks + if header.content.slot.period + <= read_shared_state.latest_final_blocks_periods[header.content.slot.thread as usize].1 + { + return Ok(HeaderCheckOutcome::Discard(DiscardReason::Stale)); + } + + // check if block slot is too much in the future + if let Some(cur_slot) = current_slot { + if header.content.slot.period + > cur_slot + .period + .saturating_add(self.config.future_block_processing_max_periods) + { + return Ok(HeaderCheckOutcome::WaitForSlot); + } + } + + // check if it was the creator's turn to create this block + // (step 1 in consensus/pos.md) + let slot_draw_address = match self.channels.selector_controller.get_producer(header.content.slot) { + Ok(draw) => draw, + Err(_) => return Ok(HeaderCheckOutcome::WaitForSlot), // TODO properly handle PoS errors + }; + if creator_addr != slot_draw_address { + // it was not the creator's turn to create a block for this slot + return Ok(HeaderCheckOutcome::Discard(DiscardReason::Invalid( + format!("Bad creator turn for the slot:{}", header.content.slot), + ))); + } + + // check if block is in the future: queue it + // note: do it after testing signature + draw to prevent queue flooding/DoS + // note: Some(x) > None + if Some(header.content.slot) > current_slot { + return Ok(HeaderCheckOutcome::WaitForSlot); + } + + // Note: here we will check if we already have a block for that slot + // and if someone double staked, they will be denounced + + // list parents and ensure they are present + let parent_set: PreHashSet = header.content.parents.iter().copied().collect(); + for parent_thread in 0u8..self.config.thread_count { + let parent_hash = header.content.parents[parent_thread as usize]; + match read_shared_state.block_statuses.get(&parent_hash) { + Some(BlockStatus::Discarded { reason, .. }) => { + // parent is discarded + return Ok(HeaderCheckOutcome::Discard(match reason { + DiscardReason::Invalid(invalid_reason) => DiscardReason::Invalid(format!( + "discarded because a parent was discarded for the following reason: {}", + invalid_reason + )), + r => r.clone(), + })); + } + Some(BlockStatus::Active { + a_block: parent, .. + }) => { + // parent is active + + // check that the parent is from an earlier slot in the right thread + if parent.slot.thread != parent_thread || parent.slot >= header.content.slot { + return Ok(HeaderCheckOutcome::Discard(DiscardReason::Invalid( + format!( + "Bad parent {} in thread:{} or slot:{} for {}.", + parent_hash, parent_thread, parent.slot, header.content.slot + ), + ))); + } + + // inherit parent incompatibilities + // and ensure parents are mutually compatible + if let Some(p_incomp) = read_shared_state.gi_head.get(&parent_hash) { + if !p_incomp.is_disjoint(&parent_set) { + return Ok(HeaderCheckOutcome::Discard(DiscardReason::Invalid( + "Parent not mutually compatible".to_string(), + ))); + } + incomp.extend(p_incomp); + } + + parents.push((parent_hash, parent.slot.period)); + } + _ => { + // parent is missing or queued + if read_shared_state.genesis_hashes.contains(&parent_hash) { + // forbid depending on discarded genesis block + return Ok(HeaderCheckOutcome::Discard(DiscardReason::Stale)); + } + missing_deps.insert(parent_hash); + } + } + } + if !missing_deps.is_empty() { + return Ok(HeaderCheckOutcome::WaitForDependencies(missing_deps)); + } + let inherited_incomp_count = incomp.len(); + + // check the topological consistency of the parents + { + let mut gp_max_slots = vec![0u64; self.config.thread_count as usize]; + for parent_i in 0..self.config.thread_count { + let (parent_h, parent_period) = parents[parent_i as usize]; + let parent = match read_shared_state.block_statuses.get(&parent_h) { + Some(BlockStatus::Active { a_block, storage: _ }) => a_block, + _ => return Err(GraphError::ContainerInconsistency(format!( + "inconsistency inside block statuses searching parent {} of block {}", + parent_h, block_id + ))), + }; + if parent_period < gp_max_slots[parent_i as usize] { + // a parent is earlier than a block known by another parent in that thread + return Ok(HeaderCheckOutcome::Discard(DiscardReason::Invalid( + "a parent is earlier than a block known by another parent in that thread" + .to_string(), + ))); + } + gp_max_slots[parent_i as usize] = parent_period; + if parent_period == 0 { + // genesis + continue; + } + for gp_i in 0..self.config.thread_count { + if gp_i == parent_i { + continue; + } + let gp_h = parent.parents[gp_i as usize].0; + match read_shared_state.block_statuses.get(&gp_h) { + // this grandpa is discarded + Some(BlockStatus::Discarded { reason, .. }) => { + return Ok(HeaderCheckOutcome::Discard(reason.clone())); + } + // this grandpa is active + Some(BlockStatus::Active { a_block: gp, .. }) => { + if gp.slot.period > gp_max_slots[gp_i as usize] { + if gp_i < parent_i { + return Ok(HeaderCheckOutcome::Discard( + DiscardReason::Invalid( + "grandpa error: gp_i < parent_i".to_string(), + ), + )); + } + gp_max_slots[gp_i as usize] = gp.slot.period; + } + } + // this grandpa is missing, assume stale + _ => return Ok(HeaderCheckOutcome::Discard(DiscardReason::Stale)), + } + } + } + } + + // get parent in own thread + let parent_in_own_thread = match read_shared_state + .block_statuses + .get(&parents[header.content.slot.thread as usize].0) + { + Some(BlockStatus::Active { a_block, storage: _ }) => Some(a_block), + _ => None, + } + .ok_or_else(|| { + GraphError::ContainerInconsistency(format!( + "inconsistency inside block statuses searching parent {} in own thread of block {}", + parents[header.content.slot.thread as usize].0, block_id + )) + })?; + + // check endorsements + match self.check_endorsements(header)? { + EndorsementsCheckOutcome::Proceed => {} + EndorsementsCheckOutcome::Discard(reason) => { + return Ok(HeaderCheckOutcome::Discard(reason)) + } + EndorsementsCheckOutcome::WaitForSlot => return Ok(HeaderCheckOutcome::WaitForSlot), + } + + // thread incompatibility test + parent_in_own_thread.children[header.content.slot.thread as usize] + .keys() + .filter(|&sibling_h| sibling_h != block_id) + .try_for_each(|&sibling_h| { + incomp.extend(self.get_active_block_and_descendants(&sibling_h, read_shared_state)?); + GraphResult::<()>::Ok(()) + })?; + + // grandpa incompatibility test + for tau in (0u8..self.config.thread_count).filter(|&t| t != header.content.slot.thread) { + // for each parent in a different thread tau + // traverse parent's descendants in tau + let mut to_explore = vec![(0usize, header.content.parents[tau as usize])]; + while let Some((cur_gen, cur_h)) = to_explore.pop() { + let cur_b = match read_shared_state.block_statuses.get(&cur_h) { + Some(BlockStatus::Active { a_block, storage: _ }) => Some(a_block), + _ => None, + }.ok_or_else(|| GraphError::ContainerInconsistency(format!("inconsistency inside block statuses searching {} while checking grandpa incompatibility of block {}",cur_h, block_id)))?; + + // traverse but do not check up to generation 1 + if cur_gen <= 1 { + to_explore.extend( + cur_b.children[tau as usize] + .keys() + .map(|&c_h| (cur_gen + 1, c_h)), + ); + continue; + } + + let parent_id = { + self.storage + .read_blocks() + .get(&cur_b.block_id) + .ok_or_else(|| { + GraphError::MissingBlock(format!( + "missing block in grandpa incomp test: {}", + cur_b.block_id + )) + })? + .content + .header + .content + .parents[header.content.slot.thread as usize] + }; + + // check if the parent in tauB has a strictly lower period number than B's parent in tauB + // note: cur_b cannot be genesis at gen > 1 + let parent_period = match read_shared_state.block_statuses.get(&parent_id) { + Some(BlockStatus::Active { a_block, storage: _ }) => Some(a_block), + _ => None, + }.ok_or_else(|| + GraphError::ContainerInconsistency( + format!("inconsistency inside block statuses searching {} check if the parent in tauB has a strictly lower period number than B's parent in tauB while checking grandpa incompatibility of block {}", + parent_id, + block_id) + ))?.slot.period; + if parent_period < parent_in_own_thread.slot.period { + // GPI detected + incomp.extend(self.get_active_block_and_descendants(&cur_h, read_shared_state)?); + } // otherwise, cur_b and its descendants cannot be GPI with the block: don't traverse + } + } + + // check if the block is incompatible with a parent + if !incomp.is_disjoint(&parents.iter().map(|(h, _p)| *h).collect()) { + return Ok(HeaderCheckOutcome::Discard(DiscardReason::Invalid( + "Block incompatible with a parent".to_string(), + ))); + } + + // check if the block is incompatible with a final block + if !incomp.is_disjoint( + &read_shared_state + .active_index + .iter() + .filter_map(|h| { + if let Some(BlockStatus::Active { a_block: a, .. }) = read_shared_state.block_statuses.get(h) + { + if a.is_final { + return Some(*h); + } + } + None + }) + .collect(), + ) { + return Ok(HeaderCheckOutcome::Discard(DiscardReason::Stale)); + } + massa_trace!("consensus.block_graph.check_header.ok", { + "block_id": block_id + }); + + Ok(HeaderCheckOutcome::Proceed { + parents_hash_period: parents, + incompatibilities: incomp, + inherited_incompatibilities_count: inherited_incomp_count, + fitness: header.get_fitness(), + }) + } + + /// check endorsements: + /// * endorser was selected for that (slot, index) + /// * endorsed slot is `parent_in_own_thread` slot + pub fn check_endorsements( + &self, + header: &WrappedHeader, + ) -> GraphResult { + // check endorsements + let endorsement_draws = match self.channels.selector_controller.get_selection(header.content.slot) { + Ok(sel) => sel.endorsements, + Err(_) => return Ok(EndorsementsCheckOutcome::WaitForSlot), + }; + for endorsement in header.content.endorsements.iter() { + // check that the draw is correct + if endorsement.creator_address != endorsement_draws[endorsement.content.index as usize] + { + return Ok(EndorsementsCheckOutcome::Discard(DiscardReason::Invalid( + format!( + "endorser draw mismatch for header in slot: {}", + header.content.slot + ), + ))); + } + + // note that the following aspects are checked in protocol + // * signature + // * index reuse + // * slot matching the block's + // * the endorsed block is the containing block's parent + } + + Ok(EndorsementsCheckOutcome::Proceed) + } +} From d69a802797883aec3a348fd28a3b99d96f3d9008 Mon Sep 17 00:00:00 2001 From: AurelienFT Date: Fri, 30 Sep 2022 12:19:26 +0200 Subject: [PATCH 11/40] Move all variable in the state and the functions that process it. --- massa-graph-2-exports/src/channels.rs | 1 - .../src/{state.rs => state/mod.rs} | 42 +++- .../src/{worker/graph.rs => state/process.rs} | 202 +++++++++--------- .../src/{worker => state}/verifications.rs | 46 ++-- massa-graph-2-worker/src/worker/init.rs | 10 - massa-graph-2-worker/src/worker/mod.rs | 35 +-- .../src/worker/process_commands.rs | 9 +- 7 files changed, 187 insertions(+), 158 deletions(-) rename massa-graph-2-worker/src/{state.rs => state/mod.rs} (89%) rename massa-graph-2-worker/src/{worker/graph.rs => state/process.rs} (83%) rename massa-graph-2-worker/src/{worker => state}/verifications.rs (92%) diff --git a/massa-graph-2-exports/src/channels.rs b/massa-graph-2-exports/src/channels.rs index 9ea6b2d2bd9..4b390932514 100644 --- a/massa-graph-2-exports/src/channels.rs +++ b/massa-graph-2-exports/src/channels.rs @@ -1,4 +1,3 @@ -use std::sync::mpsc::Receiver; use massa_pos_exports::SelectorController; /// Contains a reference to the pool, selector and execution controller diff --git a/massa-graph-2-worker/src/state.rs b/massa-graph-2-worker/src/state/mod.rs similarity index 89% rename from massa-graph-2-worker/src/state.rs rename to massa-graph-2-worker/src/state/mod.rs index 30d7201421e..adf1a9b66a7 100644 --- a/massa-graph-2-worker/src/state.rs +++ b/massa-graph-2-worker/src/state/mod.rs @@ -2,10 +2,11 @@ use massa_graph::error::{GraphError, GraphResult}; use massa_graph_2_exports::{ block_graph_export::BlockGraphExport, block_status::{BlockStatus, ExportCompiledBlock}, - GraphConfig, + GraphChannels, GraphConfig, }; use massa_models::{ active_block::ActiveBlock, + address::Address, api::BlockGraphStatus, block::BlockId, clique::Clique, @@ -14,10 +15,15 @@ use massa_models::{ }; use massa_storage::Storage; -#[derive(Clone, Debug)] +mod process; +mod verifications; + +#[derive(Clone)] pub struct GraphState { /// Configuration pub config: GraphConfig, + /// Channels to communicate with other modules + pub channels: GraphChannels, /// Storage pub storage: Storage, /// Block ids of genesis blocks @@ -35,9 +41,32 @@ pub struct GraphState { pub best_parents: Vec<(BlockId, u64)>, /// Every block we know about pub block_statuses: PreHashMap, + /// Ids of incoming blocks/headers + pub incoming_index: PreHashSet, + /// Used to limit the number of waiting and discarded blocks + pub sequence_counter: u64, + /// ids of waiting for slot blocks/headers + pub waiting_for_slot_index: PreHashSet, + /// ids of waiting for dependencies blocks/headers + pub waiting_for_dependencies_index: PreHashSet, + /// ids of discarded blocks + pub discarded_index: PreHashSet, + /// Blocks that need to be propagated + pub to_propagate: PreHashMap, + /// List of block ids we think are attack attempts + pub attack_attempts: Vec, + /// Newly final blocks + pub new_final_blocks: PreHashSet, + /// Newly stale block mapped to creator and slot + pub new_stale_blocks: PreHashMap, } impl GraphState { + pub fn new_sequence_number(&mut self) -> u64 { + self.sequence_counter += 1; + self.sequence_counter + } + fn get_full_active_block(&self, block_id: &BlockId) -> Option<(&ActiveBlock, &Storage)> { match self.block_statuses.get(block_id) { Some(BlockStatus::Active { a_block, storage }) => Some((a_block.as_ref(), storage)), @@ -46,7 +75,7 @@ impl GraphState { } /// get the blockclique (or final) block ID at a given slot, if any - pub(crate) fn get_blockclique_block_at_slot(&self, slot: &Slot) -> Option { + pub fn get_blockclique_block_at_slot(&self, slot: &Slot) -> Option { // List all blocks at this slot. // The list should be small: make a copy of it to avoid holding the storage lock. let blocks_at_slot = { @@ -83,7 +112,7 @@ impl GraphState { } /// get the latest blockclique (or final) block ID at a given slot, if any - pub(crate) fn get_latest_blockclique_block_at_slot(&self, slot: &Slot) -> BlockId { + pub fn get_latest_blockclique_block_at_slot(&self, slot: &Slot) -> BlockId { let (mut best_block_id, mut best_block_period) = self .latest_final_blocks_periods .get(slot.thread as usize) @@ -120,7 +149,7 @@ impl GraphState { best_block_id } - pub(crate) fn get_block_status(&self, block_id: &BlockId) -> BlockGraphStatus { + pub fn get_block_status(&self, block_id: &BlockId) -> BlockGraphStatus { match self.block_statuses.get(block_id) { None => BlockGraphStatus::NotFound, Some(BlockStatus::Active { a_block, .. }) => { @@ -147,7 +176,8 @@ impl GraphState { Some(BlockStatus::WaitingForSlot(_)) => BlockGraphStatus::WaitingForSlot, } } - pub(crate) fn list_required_active_blocks(&self) -> GraphResult> { + + pub fn list_required_active_blocks(&self) -> GraphResult> { // list all active blocks let mut retain_active: PreHashSet = PreHashSet::::with_capacity(self.active_index.len()); diff --git a/massa-graph-2-worker/src/worker/graph.rs b/massa-graph-2-worker/src/state/process.rs similarity index 83% rename from massa-graph-2-worker/src/worker/graph.rs rename to massa-graph-2-worker/src/state/process.rs index c1a9d5f3864..6fe44953332 100644 --- a/massa-graph-2-worker/src/worker/graph.rs +++ b/massa-graph-2-worker/src/state/process.rs @@ -1,8 +1,5 @@ use std::collections::{BTreeSet, VecDeque}; -use crate::{worker::verifications::HeaderCheckOutcome, state::GraphState}; - -use super::GraphWorker; use massa_graph::error::{GraphError, GraphResult}; use massa_graph_2_exports::block_status::{BlockStatus, DiscardReason, HeaderOrBlock}; use massa_logging::massa_trace; @@ -16,20 +13,22 @@ use massa_models::{ }; use massa_signature::PublicKey; use massa_storage::Storage; -use parking_lot::RwLockWriteGuard; use tracing::log::{debug, info}; -impl GraphWorker { +use crate::state::verifications::HeaderCheckOutcome; + +use super::GraphState; + +impl GraphState { /// acknowledge a set of items recursively pub fn rec_process( &mut self, mut to_ack: BTreeSet<(Slot, BlockId)>, current_slot: Option, - mut write_shared_state: &mut RwLockWriteGuard, ) -> GraphResult<()> { // order processing by (slot, hash) while let Some((_slot, hash)) = to_ack.pop_first() { - to_ack.extend(self.process(hash, current_slot, write_shared_state)?) + to_ack.extend(self.process(hash, current_slot)?) } Ok(()) } @@ -39,7 +38,6 @@ impl GraphWorker { &mut self, block_id: BlockId, current_slot: Option, - mut write_shared_state: &mut RwLockWriteGuard, ) -> GraphResult> { // list items to reprocess let mut reprocess = BTreeSet::new(); @@ -54,7 +52,7 @@ impl GraphWorker { valid_block_inherited_incomp_count, valid_block_storage, valid_block_fitness, - ) = match write_shared_state.block_statuses.get(&block_id) { + ) = match self.block_statuses.get(&block_id) { None => return Ok(BTreeSet::new()), // disappeared before being processed: do nothing // discarded: do nothing @@ -80,7 +78,7 @@ impl GraphWorker { }); // remove header let header = if let Some(BlockStatus::Incoming(HeaderOrBlock::Header(header))) = - write_shared_state.block_statuses.remove(&block_id) + self.block_statuses.remove(&block_id) { self.incoming_index.remove(&block_id); header @@ -90,12 +88,12 @@ impl GraphWorker { block_id ))); }; - match self.check_header(&block_id, &header, current_slot, &write_shared_state)? { + match self.check_header(&block_id, &header, current_slot, &self)? { HeaderCheckOutcome::Proceed { .. } => { // set as waiting dependencies let mut dependencies = PreHashSet::::default(); dependencies.insert(block_id); // add self as unsatisfied - write_shared_state.block_statuses.insert( + self.block_statuses.insert( block_id, BlockStatus::WaitingForDependencies { header_or_block: HeaderOrBlock::Header(header), @@ -107,7 +105,7 @@ impl GraphWorker { }, ); self.waiting_for_dependencies_index.insert(block_id); - self.promote_dep_tree(block_id, write_shared_state)?; + self.promote_dep_tree(block_id)?; massa_trace!( "consensus.block_graph.process.incoming_header.waiting_for_self", @@ -120,7 +118,7 @@ impl GraphWorker { dependencies.insert(block_id); // add self as unsatisfied massa_trace!("consensus.block_graph.process.incoming_header.waiting_for_dependencies", {"block_id": block_id, "dependencies": dependencies}); - write_shared_state.block_statuses.insert( + self.block_statuses.insert( block_id, BlockStatus::WaitingForDependencies { header_or_block: HeaderOrBlock::Header(header), @@ -132,13 +130,13 @@ impl GraphWorker { }, ); self.waiting_for_dependencies_index.insert(block_id); - self.promote_dep_tree(block_id, write_shared_state)?; + self.promote_dep_tree(block_id)?; return Ok(BTreeSet::new()); } HeaderCheckOutcome::WaitForSlot => { // make it wait for slot - write_shared_state.block_statuses.insert( + self.block_statuses.insert( block_id, BlockStatus::WaitingForSlot(HeaderOrBlock::Header(header)), ); @@ -159,7 +157,7 @@ impl GraphWorker { .insert(block_id, (header.creator_address, header.content.slot)); } // discard - write_shared_state.block_statuses.insert( + self.block_statuses.insert( block_id, BlockStatus::Discarded { slot: header.content.slot, @@ -188,7 +186,7 @@ impl GraphWorker { let (slot, storage) = if let Some(BlockStatus::Incoming(HeaderOrBlock::Block { slot, storage, .. - })) = write_shared_state.block_statuses.remove(&block_id) + })) = self.block_statuses.remove(&block_id) { self.incoming_index.remove(&block_id); (slot, storage) @@ -204,7 +202,12 @@ impl GraphWorker { .cloned() .expect("incoming block not found in storage"); - match self.check_header(&block_id, &stored_block.content.header, current_slot, &write_shared_state)? { + match self.check_header( + &block_id, + &stored_block.content.header, + current_slot, + &self, + )? { HeaderCheckOutcome::Proceed { parents_hash_period, incompatibilities, @@ -227,7 +230,7 @@ impl GraphWorker { } HeaderCheckOutcome::WaitForDependencies(dependencies) => { // set as waiting dependencies - write_shared_state.block_statuses.insert( + self.block_statuses.insert( block_id, BlockStatus::WaitingForDependencies { header_or_block: HeaderOrBlock::Block { @@ -243,7 +246,7 @@ impl GraphWorker { }, ); self.waiting_for_dependencies_index.insert(block_id); - self.promote_dep_tree(block_id, write_shared_state)?; + self.promote_dep_tree(block_id)?; massa_trace!( "consensus.block_graph.process.incoming_block.waiting_for_dependencies", { "block_id": block_id } @@ -252,7 +255,7 @@ impl GraphWorker { } HeaderCheckOutcome::WaitForSlot => { // set as waiting for slot - write_shared_state.block_statuses.insert( + self.block_statuses.insert( block_id, BlockStatus::WaitingForSlot(HeaderOrBlock::Block { id: block_id, @@ -282,7 +285,7 @@ impl GraphWorker { ); } // add to discard - write_shared_state.block_statuses.insert( + self.block_statuses.insert( block_id, BlockStatus::Discarded { slot: stored_block.content.header.content.slot, @@ -317,10 +320,10 @@ impl GraphWorker { } // send back as incoming and ask for reprocess if let Some(BlockStatus::WaitingForSlot(header_or_block)) = - write_shared_state.block_statuses.remove(&block_id) + self.block_statuses.remove(&block_id) { self.waiting_for_slot_index.remove(&block_id); - write_shared_state.block_statuses + self.block_statuses .insert(block_id, BlockStatus::Incoming(header_or_block)); self.incoming_index.insert(block_id); reprocess.insert((slot, block_id)); @@ -348,11 +351,11 @@ impl GraphWorker { // send back as incoming and ask for reprocess if let Some(BlockStatus::WaitingForDependencies { header_or_block, .. - }) = write_shared_state.block_statuses.remove(&block_id) + }) = self.block_statuses.remove(&block_id) { self.waiting_for_dependencies_index.remove(&block_id); reprocess.insert((header_or_block.get_slot(), block_id)); - write_shared_state.block_statuses + self.block_statuses .insert(block_id, BlockStatus::Incoming(header_or_block)); self.incoming_index.insert(block_id); massa_trace!( @@ -376,11 +379,10 @@ impl GraphWorker { valid_block_inherited_incomp_count, valid_block_fitness, valid_block_storage, - write_shared_state, )?; // if the block was added, update linked dependencies and mark satisfied ones for recheck - if let Some(BlockStatus::Active { storage, .. }) = write_shared_state.block_statuses.get(&block_id) { + if let Some(BlockStatus::Active { storage, .. }) = self.block_statuses.get(&block_id) { massa_trace!("consensus.block_graph.process.is_active", { "block_id": block_id }); @@ -390,7 +392,7 @@ impl GraphWorker { header_or_block, unsatisfied_dependencies, .. - }) = write_shared_state.block_statuses.get_mut(itm_block_id) + }) = self.block_statuses.get_mut(itm_block_id) { if unsatisfied_dependencies.remove(&block_id) { // a dependency was satisfied: retry @@ -403,7 +405,7 @@ impl GraphWorker { Ok(reprocess) } - pub fn promote_dep_tree(&mut self, hash: BlockId, mut write_shared_state: &mut RwLockWriteGuard) -> GraphResult<()> { + pub fn promote_dep_tree(&mut self, hash: BlockId) -> GraphResult<()> { let mut to_explore = vec![hash]; let mut to_promote: PreHashMap = PreHashMap::default(); while let Some(h) = to_explore.pop() { @@ -415,7 +417,7 @@ impl GraphWorker { unsatisfied_dependencies, sequence_number, .. - }) = write_shared_state.block_statuses.get(&h) + }) = self.block_statuses.get(&h) { // promote current block to_promote.insert(h, (header_or_block.get_slot(), *sequence_number)); @@ -432,7 +434,7 @@ impl GraphWorker { for (_slot, _seq, h) in to_promote.into_iter() { if let Some(BlockStatus::WaitingForDependencies { sequence_number, .. - }) = write_shared_state.block_statuses.get_mut(&h) + }) = self.block_statuses.get_mut(&h) { self.sequence_counter += 1; *sequence_number = self.sequence_counter; @@ -504,7 +506,6 @@ impl GraphWorker { inherited_incomp_count: usize, fitness: u64, mut storage: Storage, - mut write_shared_state: &mut RwLockWriteGuard, ) -> GraphResult<()> { massa_trace!("consensus.block_graph.add_block_to_graph", { "block_id": add_block_id @@ -515,7 +516,7 @@ impl GraphWorker { storage.claim_block_refs(&parents_hash_period.iter().map(|(p_id, _)| *p_id).collect()); // add block to status structure - write_shared_state.block_statuses.insert( + self.block_statuses.insert( add_block_id, BlockStatus::Active { a_block: Box::new(ActiveBlock { @@ -531,13 +532,13 @@ impl GraphWorker { storage, }, ); - write_shared_state.active_index.insert(add_block_id); + self.active_index.insert(add_block_id); // add as child to parents for (parent_h, _parent_period) in parents_hash_period.iter() { if let Some(BlockStatus::Active { a_block: a_parent, .. - }) = write_shared_state.block_statuses.get_mut(parent_h) + }) = self.block_statuses.get_mut(parent_h) { a_parent.children[add_block_slot.thread as usize] .insert(add_block_id, add_block_slot.period); @@ -559,7 +560,7 @@ impl GraphWorker { continue; } if let Some(BlockStatus::Active { a_block: ab, .. }) = - write_shared_state.block_statuses.get_mut(&ancestor_h) + self.block_statuses.get_mut(&ancestor_h) { ab.descendants.insert(add_block_id); for (ancestor_parent_h, _) in ab.parents.iter() { @@ -575,7 +576,7 @@ impl GraphWorker { {} ); for incomp_h in incomp.iter() { - write_shared_state.gi_head + self.gi_head .get_mut(incomp_h) .ok_or_else(|| { GraphError::MissingBlock(format!( @@ -585,7 +586,7 @@ impl GraphWorker { })? .insert(add_block_id); } - write_shared_state.gi_head.insert(add_block_id, incomp.clone()); + self.gi_head.insert(add_block_id, incomp.clone()); // max cliques update massa_trace!( @@ -596,7 +597,7 @@ impl GraphWorker { // clique optimization routine: // the block only has incompatibilities inherited from its parents // therefore it is not forking and can simply be added to the cliques it is compatible with - write_shared_state.max_cliques + self.max_cliques .iter_mut() .filter(|c| incomp.is_disjoint(&c.block_ids)) .for_each(|c| { @@ -608,9 +609,9 @@ impl GraphWorker { "consensus.block_graph.add_block_to_graph.clique_full_computing", { "hash": add_block_id } ); - let before = write_shared_state.max_cliques.len(); - write_shared_state.max_cliques = self - .compute_max_cliques(&write_shared_state) + let before = self.max_cliques.len(); + self.max_cliques = self + .compute_max_cliques(&self) .into_iter() .map(|c| Clique { block_ids: c, @@ -618,11 +619,11 @@ impl GraphWorker { is_blockclique: false, }) .collect(); - let after = write_shared_state.max_cliques.len(); + let after = self.max_cliques.len(); if before != after { massa_trace!( "consensus.block_graph.add_block_to_graph.clique_full_computing more than one clique", - { "cliques": write_shared_state.max_cliques, "gi_head": write_shared_state.gi_head } + { "cliques": self.max_cliques, "gi_head": self.gi_head } ); // gi_head debug!( @@ -638,13 +639,13 @@ impl GraphWorker { { let mut blockclique_i = 0usize; let mut max_clique_fitness = (0u64, num::BigInt::default()); - for (clique_i, clique) in write_shared_state.max_cliques.iter_mut().enumerate() { + for (clique_i, clique) in self.max_cliques.iter_mut().enumerate() { clique.fitness = 0; clique.is_blockclique = false; let mut sum_hash = num::BigInt::default(); for block_h in clique.block_ids.iter() { - let fitness = match write_shared_state.block_statuses.get(block_h) { - Some(BlockStatus::Active { a_block, storage }) => a_block.fitness, + let fitness = match self.block_statuses.get(block_h) { + Some(BlockStatus::Active { a_block, storage: _ }) => a_block.fitness, _ => return Err(GraphError::ContainerInconsistency(format!("inconsistency inside block statuses computing fitness while adding {} - missing {}", add_block_id, block_h))), }; clique.fitness = clique @@ -660,7 +661,7 @@ impl GraphWorker { max_clique_fitness = cur_fit; } } - write_shared_state.max_cliques[blockclique_i].is_blockclique = true; + self.max_cliques[blockclique_i].is_blockclique = true; } // update best parents @@ -670,24 +671,24 @@ impl GraphWorker { ); { // find blockclique - let blockclique_i = write_shared_state + let blockclique_i = self .max_cliques .iter() .position(|c| c.is_blockclique) .unwrap_or_default(); - let blockclique = &write_shared_state.max_cliques[blockclique_i]; + let blockclique = &self.max_cliques[blockclique_i]; // init best parents as latest_final_blocks_periods - write_shared_state.best_parents = write_shared_state.latest_final_blocks_periods.clone(); + self.best_parents = self.latest_final_blocks_periods.clone(); // for each blockclique block, set it as best_parent in its own thread // if its period is higher than the current best_parent in that thread for block_h in blockclique.block_ids.iter() { - let b_slot = match write_shared_state.block_statuses.get(block_h) { + let b_slot = match self.block_statuses.get(block_h) { Some(BlockStatus::Active { a_block, storage: _ }) => a_block.slot, _ => return Err(GraphError::ContainerInconsistency(format!("inconsistency inside block statuses updating best parents while adding {} - missing {}", add_block_id, block_h))), }; - if b_slot.period > write_shared_state.best_parents[b_slot.thread as usize].1 { - write_shared_state.best_parents[b_slot.thread as usize] = (*block_h, b_slot.period); + if b_slot.period > self.best_parents[b_slot.thread as usize].1 { + self.best_parents[b_slot.thread as usize] = (*block_h, b_slot.period); } } } @@ -698,28 +699,28 @@ impl GraphWorker { {} ); let stale_blocks = { - let blockclique_i = write_shared_state + let blockclique_i = self .max_cliques .iter() .position(|c| c.is_blockclique) .unwrap_or_default(); - let fitness_threshold = write_shared_state.max_cliques[blockclique_i] + let fitness_threshold = self.max_cliques[blockclique_i] .fitness .saturating_sub(self.config.delta_f0); // iterate from largest to smallest to minimize reallocations - let mut indices: Vec = (0..write_shared_state.max_cliques.len()).collect(); + let mut indices: Vec = (0..self.max_cliques.len()).collect(); indices - .sort_unstable_by_key(|&i| std::cmp::Reverse(write_shared_state.max_cliques[i].block_ids.len())); + .sort_unstable_by_key(|&i| std::cmp::Reverse(self.max_cliques[i].block_ids.len())); let mut high_set = PreHashSet::::default(); let mut low_set = PreHashSet::::default(); for clique_i in indices.into_iter() { - if write_shared_state.max_cliques[clique_i].fitness >= fitness_threshold { - high_set.extend(&write_shared_state.max_cliques[clique_i].block_ids); + if self.max_cliques[clique_i].fitness >= fitness_threshold { + high_set.extend(&self.max_cliques[clique_i].block_ids); } else { - low_set.extend(&write_shared_state.max_cliques[clique_i].block_ids); + low_set.extend(&self.max_cliques[clique_i].block_ids); } } - write_shared_state.max_cliques.retain(|c| c.fitness >= fitness_threshold); + self.max_cliques.retain(|c| c.fitness >= fitness_threshold); &low_set - &high_set }; // mark stale blocks @@ -731,17 +732,17 @@ impl GraphWorker { if let Some(BlockStatus::Active { a_block: active_block, storage: _storage, - }) = write_shared_state.block_statuses.remove(&stale_block_hash) + }) = self.block_statuses.remove(&stale_block_hash) { - write_shared_state.active_index.remove(&stale_block_hash); + self.active_index.remove(&stale_block_hash); if active_block.is_final { return Err(GraphError::ContainerInconsistency(format!("inconsistency inside block statuses removing stale blocks adding {} - block {} was already final", add_block_id, stale_block_hash))); } // remove from gi_head - if let Some(other_incomps) = write_shared_state.gi_head.remove(&stale_block_hash) { + if let Some(other_incomps) = self.gi_head.remove(&stale_block_hash) { for other_incomp in other_incomps.into_iter() { - if let Some(other_incomp_lst) = write_shared_state.gi_head.get_mut(&other_incomp) { + if let Some(other_incomp_lst) = self.gi_head.get_mut(&other_incomp) { other_incomp_lst.remove(&stale_block_hash); } } @@ -749,15 +750,15 @@ impl GraphWorker { // remove from cliques let stale_block_fitness = active_block.fitness; - write_shared_state.max_cliques.iter_mut().for_each(|c| { + self.max_cliques.iter_mut().for_each(|c| { if c.block_ids.remove(&stale_block_hash) { c.fitness -= stale_block_fitness; } }); - write_shared_state.max_cliques.retain(|c| !c.block_ids.is_empty()); // remove empty cliques - if write_shared_state.max_cliques.is_empty() { + self.max_cliques.retain(|c| !c.block_ids.is_empty()); // remove empty cliques + if self.max_cliques.is_empty() { // make sure at least one clique remains - write_shared_state.max_cliques = vec![Clique { + self.max_cliques = vec![Clique { block_ids: PreHashSet::::default(), fitness: 0, is_blockclique: true, @@ -769,7 +770,7 @@ impl GraphWorker { if let Some(BlockStatus::Active { a_block: parent_active_block, .. - }) = write_shared_state.block_statuses.get_mut(parent_h) + }) = self.block_statuses.get_mut(parent_h) { parent_active_block.children[active_block.slot.thread as usize] .remove(&stale_block_hash); @@ -785,7 +786,7 @@ impl GraphWorker { stale_block_hash, (active_block.creator_address, active_block.slot), ); - write_shared_state.block_statuses.insert( + self.block_statuses.insert( stale_block_hash, BlockStatus::Discarded { slot: active_block.slot, @@ -811,11 +812,11 @@ impl GraphWorker { ); let final_blocks = { // short-circuiting intersection of cliques from smallest to largest - let mut indices: Vec = (0..write_shared_state.max_cliques.len()).collect(); - indices.sort_unstable_by_key(|&i| write_shared_state.max_cliques[i].block_ids.len()); - let mut final_candidates = write_shared_state.max_cliques[indices[0]].block_ids.clone(); + let mut indices: Vec = (0..self.max_cliques.len()).collect(); + indices.sort_unstable_by_key(|&i| self.max_cliques[i].block_ids.len()); + let mut final_candidates = self.max_cliques[indices[0]].block_ids.clone(); for i in 1..indices.len() { - final_candidates.retain(|v| write_shared_state.max_cliques[i].block_ids.contains(v)); + final_candidates.retain(|v| self.max_cliques[i].block_ids.contains(v)); if final_candidates.is_empty() { break; } @@ -826,8 +827,8 @@ impl GraphWorker { "consensus.block_graph.add_block_to_graph.list_final_blocks.restrict", {} ); - indices.retain(|&i| write_shared_state.max_cliques[i].fitness > self.config.delta_f0); - indices.sort_unstable_by_key(|&i| std::cmp::Reverse(write_shared_state.max_cliques[i].fitness)); + indices.retain(|&i| self.max_cliques[i].fitness > self.config.delta_f0); + indices.sort_unstable_by_key(|&i| std::cmp::Reverse(self.max_cliques[i].fitness)); let mut final_blocks = PreHashSet::::default(); for clique_i in indices.into_iter() { @@ -840,23 +841,28 @@ impl GraphWorker { // no more final candidates break; } - let clique = &write_shared_state.max_cliques[clique_i]; + let clique = &self.max_cliques[clique_i]; // compute the total fitness of all the descendants of the candidate within the clique let loc_candidates = final_candidates.clone(); for candidate_h in loc_candidates.into_iter() { - let descendants = match write_shared_state.block_statuses.get(&candidate_h) { - Some(BlockStatus::Active { a_block, storage: _ }) => &a_block.descendants, - _ => return Err(GraphError::MissingBlock(format!( - "missing block when computing total fitness of descendants: {}", - candidate_h - ))), + let descendants = match self.block_statuses.get(&candidate_h) { + Some(BlockStatus::Active { + a_block, + storage: _, + }) => &a_block.descendants, + _ => { + return Err(GraphError::MissingBlock(format!( + "missing block when computing total fitness of descendants: {}", + candidate_h + ))) + } }; let desc_fit: u64 = descendants .intersection(&clique.block_ids) .map(|h| { if let Some(BlockStatus::Active { a_block: ab, .. }) = - write_shared_state.block_statuses.get(h) + self.block_statuses.get(h) { return ab.fitness; } @@ -880,9 +886,9 @@ impl GraphWorker { ); for final_block_hash in final_blocks.into_iter() { // remove from gi_head - if let Some(other_incomps) = write_shared_state.gi_head.remove(&final_block_hash) { + if let Some(other_incomps) = self.gi_head.remove(&final_block_hash) { for other_incomp in other_incomps.into_iter() { - if let Some(other_incomp_lst) = write_shared_state.gi_head.get_mut(&other_incomp) { + if let Some(other_incomp_lst) = self.gi_head.get_mut(&other_incomp) { other_incomp_lst.remove(&final_block_hash); } } @@ -892,7 +898,7 @@ impl GraphWorker { if let Some(BlockStatus::Active { a_block: final_block, .. - }) = write_shared_state.block_statuses.get_mut(&final_block_hash) + }) = self.block_statuses.get_mut(&final_block_hash) { massa_trace!("consensus.block_graph.add_block_to_graph.final", { "hash": final_block_hash @@ -900,15 +906,15 @@ impl GraphWorker { final_block.is_final = true; // remove from cliques let final_block_fitness = final_block.fitness; - write_shared_state.max_cliques.iter_mut().for_each(|c| { + self.max_cliques.iter_mut().for_each(|c| { if c.block_ids.remove(&final_block_hash) { c.fitness -= final_block_fitness; } }); - write_shared_state.max_cliques.retain(|c| !c.block_ids.is_empty()); // remove empty cliques - if write_shared_state.max_cliques.is_empty() { + self.max_cliques.retain(|c| !c.block_ids.is_empty()); // remove empty cliques + if self.max_cliques.is_empty() { // make sure at least one clique remains - write_shared_state.max_cliques = vec![Clique { + self.max_cliques = vec![Clique { block_ids: PreHashSet::::default(), fitness: 0, is_blockclique: true, @@ -916,9 +922,9 @@ impl GraphWorker { } // update latest final blocks if final_block.slot.period - > write_shared_state.latest_final_blocks_periods[final_block.slot.thread as usize].1 + > self.latest_final_blocks_periods[final_block.slot.thread as usize].1 { - write_shared_state.latest_final_blocks_periods[final_block.slot.thread as usize] = + self.latest_final_blocks_periods[final_block.slot.thread as usize] = (final_block_hash, final_block.slot.period); } // update new final blocks list @@ -952,7 +958,7 @@ impl GraphWorker { pub fn get_active_block_and_descendants( &self, block_id: &BlockId, - read_shared_state: &GraphState + read_shared_state: &GraphState, ) -> GraphResult> { let mut to_visit = vec![*block_id]; let mut result = PreHashSet::::default(); diff --git a/massa-graph-2-worker/src/worker/verifications.rs b/massa-graph-2-worker/src/state/verifications.rs similarity index 92% rename from massa-graph-2-worker/src/worker/verifications.rs rename to massa-graph-2-worker/src/state/verifications.rs index eb8be068c06..6a371f9b909 100644 --- a/massa-graph-2-worker/src/worker/verifications.rs +++ b/massa-graph-2-worker/src/state/verifications.rs @@ -1,6 +1,5 @@ -use crate::state::GraphState; +use super::GraphState; -use super::GraphWorker; use massa_graph::error::{GraphError, GraphResult}; use massa_graph_2_exports::block_status::{BlockStatus, DiscardReason}; use massa_logging::massa_trace; @@ -43,7 +42,7 @@ pub enum EndorsementsCheckOutcome { WaitForSlot, } -impl GraphWorker { +impl GraphState { /// Process an incoming header. /// /// Checks performed: @@ -99,7 +98,11 @@ impl GraphWorker { // check if it was the creator's turn to create this block // (step 1 in consensus/pos.md) - let slot_draw_address = match self.channels.selector_controller.get_producer(header.content.slot) { + let slot_draw_address = match self + .channels + .selector_controller + .get_producer(header.content.slot) + { Ok(draw) => draw, Err(_) => return Ok(HeaderCheckOutcome::WaitForSlot), // TODO properly handle PoS errors }; @@ -184,11 +187,16 @@ impl GraphWorker { for parent_i in 0..self.config.thread_count { let (parent_h, parent_period) = parents[parent_i as usize]; let parent = match read_shared_state.block_statuses.get(&parent_h) { - Some(BlockStatus::Active { a_block, storage: _ }) => a_block, - _ => return Err(GraphError::ContainerInconsistency(format!( - "inconsistency inside block statuses searching parent {} of block {}", - parent_h, block_id - ))), + Some(BlockStatus::Active { + a_block, + storage: _, + }) => a_block, + _ => { + return Err(GraphError::ContainerInconsistency(format!( + "inconsistency inside block statuses searching parent {} of block {}", + parent_h, block_id + ))) + } }; if parent_period < gp_max_slots[parent_i as usize] { // a parent is earlier than a block known by another parent in that thread @@ -237,7 +245,10 @@ impl GraphWorker { .block_statuses .get(&parents[header.content.slot.thread as usize].0) { - Some(BlockStatus::Active { a_block, storage: _ }) => Some(a_block), + Some(BlockStatus::Active { + a_block, + storage: _, + }) => Some(a_block), _ => None, } .ok_or_else(|| { @@ -261,7 +272,8 @@ impl GraphWorker { .keys() .filter(|&sibling_h| sibling_h != block_id) .try_for_each(|&sibling_h| { - incomp.extend(self.get_active_block_and_descendants(&sibling_h, read_shared_state)?); + incomp + .extend(self.get_active_block_and_descendants(&sibling_h, read_shared_state)?); GraphResult::<()>::Ok(()) })?; @@ -315,7 +327,8 @@ impl GraphWorker { ))?.slot.period; if parent_period < parent_in_own_thread.slot.period { // GPI detected - incomp.extend(self.get_active_block_and_descendants(&cur_h, read_shared_state)?); + incomp + .extend(self.get_active_block_and_descendants(&cur_h, read_shared_state)?); } // otherwise, cur_b and its descendants cannot be GPI with the block: don't traverse } } @@ -333,7 +346,8 @@ impl GraphWorker { .active_index .iter() .filter_map(|h| { - if let Some(BlockStatus::Active { a_block: a, .. }) = read_shared_state.block_statuses.get(h) + if let Some(BlockStatus::Active { a_block: a, .. }) = + read_shared_state.block_statuses.get(h) { if a.is_final { return Some(*h); @@ -365,7 +379,11 @@ impl GraphWorker { header: &WrappedHeader, ) -> GraphResult { // check endorsements - let endorsement_draws = match self.channels.selector_controller.get_selection(header.content.slot) { + let endorsement_draws = match self + .channels + .selector_controller + .get_selection(header.content.slot) + { Ok(sel) => sel.endorsements, Err(_) => return Ok(EndorsementsCheckOutcome::WaitForSlot), }; diff --git a/massa-graph-2-worker/src/worker/init.rs b/massa-graph-2-worker/src/worker/init.rs index 25d14066ba8..6790cd0b251 100644 --- a/massa-graph-2-worker/src/worker/init.rs +++ b/massa-graph-2-worker/src/worker/init.rs @@ -13,7 +13,6 @@ use massa_models::{ active_block::ActiveBlock, address::Address, block::{Block, BlockHeader, BlockHeaderSerializer, BlockId, BlockSerializer, WrappedBlock}, - clique::Clique, prehash::{PreHashMap, PreHashSet}, slot::Slot, timeslots::{get_block_slot_timestamp, get_latest_block_slot_at_timestamp}, @@ -176,15 +175,6 @@ impl GraphWorker { config.stats_timespan, ), launch_time: MassaTime::now(config.clock_compensation_millis)?, - sequence_counter: 0, - incoming_index: Default::default(), - waiting_for_slot_index: Default::default(), - waiting_for_dependencies_index: Default::default(), - discarded_index: Default::default(), - to_propagate: Default::default(), - attack_attempts: Default::default(), - new_final_blocks: Default::default(), - new_stale_blocks: Default::default(), storage: storage.clone(), }; diff --git a/massa-graph-2-worker/src/worker/mod.rs b/massa-graph-2-worker/src/worker/mod.rs index f70933affc1..1750511fc52 100644 --- a/massa-graph-2-worker/src/worker/mod.rs +++ b/massa-graph-2-worker/src/worker/mod.rs @@ -1,7 +1,5 @@ use massa_graph::BootstrapableGraph; -use massa_graph_2_exports::{ - block_status::BlockStatus, GraphChannels, GraphConfig, GraphController, GraphManager, -}; +use massa_graph_2_exports::{GraphChannels, GraphConfig, GraphController, GraphManager}; use massa_models::address::Address; use massa_models::block::{BlockId, WrappedHeader}; use massa_models::clique::Clique; @@ -46,34 +44,13 @@ pub struct GraphWorker { stats_desync_detection_timespan: MassaTime, /// time at which the node was launched (used for desynchronization detection) launch_time: MassaTime, - - /// Used to limit the number of waiting and discarded blocks - sequence_counter: u64, - /// Ids of incoming blocks/headers - incoming_index: PreHashSet, - /// ids of waiting for slot blocks/headers - waiting_for_slot_index: PreHashSet, - /// ids of waiting for dependencies blocks/headers - waiting_for_dependencies_index: PreHashSet, - /// ids of discarded blocks - discarded_index: PreHashSet, - /// Blocks that need to be propagated - to_propagate: PreHashMap, - /// List of block ids we think are attack attempts - attack_attempts: Vec, - /// Newly final blocks - new_final_blocks: PreHashSet, - /// Newly stale block mapped to creator and slot - new_stale_blocks: PreHashMap, /// Shared storage, storage: Storage, } -mod graph; mod init; mod main_loop; mod process_commands; -mod verifications; pub fn start_graph_worker( config: GraphConfig, @@ -85,11 +62,21 @@ pub fn start_graph_worker( let shared_state = Arc::new(RwLock::new(GraphState { storage: storage.clone(), config: config.clone(), + channels: channels.clone(), max_cliques: vec![Clique { block_ids: PreHashSet::::default(), fitness: 0, is_blockclique: true, }], + sequence_counter: 0, + waiting_for_slot_index: Default::default(), + waiting_for_dependencies_index: Default::default(), + discarded_index: Default::default(), + to_propagate: Default::default(), + attack_attempts: Default::default(), + new_final_blocks: Default::default(), + new_stale_blocks: Default::default(), + incoming_index: Default::default(), active_index: Default::default(), latest_final_blocks_periods: Default::default(), best_parents: Default::default(), diff --git a/massa-graph-2-worker/src/worker/process_commands.rs b/massa-graph-2-worker/src/worker/process_commands.rs index da47cb9e6cf..17626e88c0b 100644 --- a/massa-graph-2-worker/src/worker/process_commands.rs +++ b/massa-graph-2-worker/src/worker/process_commands.rs @@ -34,26 +34,25 @@ impl GraphWorker { hash_map::Entry::Vacant(vac) => { to_ack.insert((header.content.slot, block_id)); vac.insert(BlockStatus::Incoming(HeaderOrBlock::Header(header))); - self.incoming_index.insert(block_id); + write_shared_state.incoming_index.insert(block_id); } hash_map::Entry::Occupied(mut occ) => match occ.get_mut() { BlockStatus::Discarded { sequence_number, .. } => { // promote if discarded - self.sequence_counter += 1; - *sequence_number = self.sequence_counter; + write_shared_state.new_sequence_number(); } BlockStatus::WaitingForDependencies { .. } => { // promote in dependencies - self.promote_dep_tree(block_id, &mut write_shared_state)?; + write_shared_state.promote_dep_tree(block_id)?; } _ => {} }, } // process - self.rec_process(to_ack, current_slot, &mut write_shared_state)?; + write_shared_state.rec_process(to_ack, current_slot)?; Ok(()) } From c9cdba1491d96278e24eb9383fa9bf22e1ac3a0c Mon Sep 17 00:00:00 2001 From: AurelienFT Date: Mon, 10 Oct 2022 10:13:30 +0200 Subject: [PATCH 12/40] Add slot tick to new version of consensus. Need to fix errors with protocol sender. --- Cargo.lock | 4 + massa-graph-2-exports/Cargo.toml | 4 + massa-graph-2-exports/src/channels.rs | 10 + massa-graph-2-exports/src/events.rs | 6 + massa-graph-2-exports/src/lib.rs | 1 + massa-graph-2-worker/src/controller.rs | 2 +- massa-graph-2-worker/src/state/mod.rs | 70 +++++- massa-graph-2-worker/src/state/process.rs | 26 ++ massa-graph-2-worker/src/worker/init.rs | 45 +++- massa-graph-2-worker/src/worker/main_loop.rs | 227 +++++++++++++++++- massa-graph-2-worker/src/worker/mod.rs | 21 +- .../src/worker/process_commands.rs | 35 ++- massa-graph-2-worker/src/worker/stats.rs | 33 +++ massa-graph-2-worker/src/worker/tick.rs | 56 +++++ 14 files changed, 528 insertions(+), 12 deletions(-) create mode 100644 massa-graph-2-exports/src/events.rs create mode 100644 massa-graph-2-worker/src/worker/stats.rs create mode 100644 massa-graph-2-worker/src/worker/tick.rs diff --git a/Cargo.lock b/Cargo.lock index 56cdd983229..fef91e76e43 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1927,10 +1927,14 @@ dependencies = [ name = "massa_graph_2_exports" version = "0.1.0" dependencies = [ + "crossbeam-channel", "displaydoc", + "massa_execution_exports", "massa_graph", "massa_models", + "massa_pool_exports", "massa_pos_exports", + "massa_protocol_exports", "massa_signature", "massa_storage", "massa_time", diff --git a/massa-graph-2-exports/Cargo.toml b/massa-graph-2-exports/Cargo.toml index 6c5db29c044..3cb982dc969 100644 --- a/massa-graph-2-exports/Cargo.toml +++ b/massa-graph-2-exports/Cargo.toml @@ -7,13 +7,17 @@ edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] +crossbeam-channel = "0.5.6" displaydoc = "0.2" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" #custom modules +massa_execution_exports = { path = "../massa-execution-exports" } massa_graph = { path = "../massa-graph" } massa_models = { path = "../massa-models" } +massa_pool_exports = { path = "../massa-pool-exports" } massa_pos_exports = { path = "../massa-pos-exports" } +massa_protocol_exports ={ path = "../massa-protocol-exports" } massa_storage = { path = "../massa-storage" } massa_time = { path = "../massa-time" } massa_signature = { path = "../massa-signature" } \ No newline at end of file diff --git a/massa-graph-2-exports/src/channels.rs b/massa-graph-2-exports/src/channels.rs index 4b390932514..28e564ac350 100644 --- a/massa-graph-2-exports/src/channels.rs +++ b/massa-graph-2-exports/src/channels.rs @@ -1,8 +1,18 @@ +use crossbeam_channel::Sender; +use massa_execution_exports::ExecutionController; +use massa_pool_exports::PoolController; use massa_pos_exports::SelectorController; +use massa_protocol_exports::ProtocolCommandSender; + +use crate::events::GraphEvent; /// Contains a reference to the pool, selector and execution controller /// Contains a channel to send info to protocol #[derive(Clone)] pub struct GraphChannels { + pub execution_controller: Box, pub selector_controller: Box, + pub pool_command_sender: Box, + pub controller_event_tx: Sender, + pub protocol_command_sender: ProtocolCommandSender, } diff --git a/massa-graph-2-exports/src/events.rs b/massa-graph-2-exports/src/events.rs new file mode 100644 index 00000000000..bf38b3f00a6 --- /dev/null +++ b/massa-graph-2-exports/src/events.rs @@ -0,0 +1,6 @@ +/// Events that are emitted by graph. +#[derive(Debug, Clone)] +pub enum GraphEvent { + /// probable desynchronization detected, need re-synchronization + NeedSync, +} diff --git a/massa-graph-2-exports/src/lib.rs b/massa-graph-2-exports/src/lib.rs index b2f52826cde..b7a285a0058 100644 --- a/massa-graph-2-exports/src/lib.rs +++ b/massa-graph-2-exports/src/lib.rs @@ -7,6 +7,7 @@ mod settings; pub mod block_graph_export; pub mod block_status; +pub mod events; pub use channels::GraphChannels; pub use controller_trait::{GraphController, GraphManager}; diff --git a/massa-graph-2-worker/src/controller.rs b/massa-graph-2-worker/src/controller.rs index 298494c0ba2..5d0fb7557b3 100644 --- a/massa-graph-2-worker/src/controller.rs +++ b/massa-graph-2-worker/src/controller.rs @@ -95,7 +95,7 @@ impl GraphController for GraphControllerImpl { } fn get_stats(&self) -> GraphResult { - todo!() + self.get_stats() } fn get_best_parents(&self) -> Vec<(BlockId, u64)> { diff --git a/massa-graph-2-worker/src/state/mod.rs b/massa-graph-2-worker/src/state/mod.rs index adf1a9b66a7..0df94cab9b1 100644 --- a/massa-graph-2-worker/src/state/mod.rs +++ b/massa-graph-2-worker/src/state/mod.rs @@ -1,14 +1,16 @@ +use std::collections::HashMap; + use massa_graph::error::{GraphError, GraphResult}; use massa_graph_2_exports::{ block_graph_export::BlockGraphExport, - block_status::{BlockStatus, ExportCompiledBlock}, + block_status::{BlockStatus, ExportCompiledBlock, HeaderOrBlock}, GraphChannels, GraphConfig, }; use massa_models::{ active_block::ActiveBlock, address::Address, api::BlockGraphStatus, - block::BlockId, + block::{BlockId, WrappedHeader}, clique::Clique, prehash::{CapacityAllocator, PreHashMap, PreHashSet}, slot::Slot, @@ -67,13 +69,17 @@ impl GraphState { self.sequence_counter } - fn get_full_active_block(&self, block_id: &BlockId) -> Option<(&ActiveBlock, &Storage)> { + pub fn get_full_active_block(&self, block_id: &BlockId) -> Option<(&ActiveBlock, &Storage)> { match self.block_statuses.get(block_id) { Some(BlockStatus::Active { a_block, storage }) => Some((a_block.as_ref(), storage)), _ => None, } } + pub fn get_clique_count(&self) -> usize { + self.max_cliques.len() + } + /// get the blockclique (or final) block ID at a given slot, if any pub fn get_blockclique_block_at_slot(&self, slot: &Slot) -> Option { // List all blocks at this slot. @@ -373,4 +379,62 @@ impl GraphState { Ok(export) } + + /// Gets all stored final blocks, not only the still-useful ones + /// This is used when initializing Execution from Consensus. + /// Since the Execution bootstrap snapshot is older than the Consensus snapshot, + /// we might need to signal older final blocks for Execution to catch up. + pub fn get_all_final_blocks(&self) -> HashMap { + self.active_index + .iter() + .map(|b_id| { + let block_infos = match self.block_statuses.get(&b_id) { + Some(BlockStatus::Active { a_block, storage }) => { + (a_block.slot, storage.clone()) + } + _ => panic!("active block missing"), + }; + (*b_id, block_infos) + }) + .collect() + } + + /// get the clique of higher fitness + pub fn get_blockclique(&self) -> &PreHashSet { + &self + .max_cliques + .iter() + .find(|c| c.is_blockclique) + .expect("blockclique missing") + .block_ids + } + + /// get the current block wish list, including the operations hash. + pub fn get_block_wishlist(&self) -> GraphResult>> { + let mut wishlist = PreHashMap::>::default(); + for block_id in self.waiting_for_dependencies_index.iter() { + if let Some(BlockStatus::WaitingForDependencies { + unsatisfied_dependencies, + .. + }) = self.block_statuses.get(block_id) + { + for unsatisfied_h in unsatisfied_dependencies.iter() { + match self.block_statuses.get(unsatisfied_h) { + Some(BlockStatus::WaitingForDependencies { + header_or_block: HeaderOrBlock::Header(header), + .. + }) => { + wishlist.insert(header.id, Some(header.clone())); + } + None => { + wishlist.insert(*unsatisfied_h, None); + } + _ => {} + } + } + } + } + + Ok(wishlist) + } } diff --git a/massa-graph-2-worker/src/state/process.rs b/massa-graph-2-worker/src/state/process.rs index 6fe44953332..015ba1897e6 100644 --- a/massa-graph-2-worker/src/state/process.rs +++ b/massa-graph-2-worker/src/state/process.rs @@ -977,4 +977,30 @@ impl GraphState { } Ok(result) } + + /// signal new slot + pub fn slot_tick(&mut self, current_slot: Option) -> GraphResult<()> { + // list all elements for which the time has come + let to_process: BTreeSet<(Slot, BlockId)> = self + .waiting_for_slot_index + .iter() + .filter_map(|b_id| match self.block_statuses.get(b_id) { + Some(BlockStatus::WaitingForSlot(header_or_block)) => { + let slot = header_or_block.get_slot(); + if Some(slot) <= current_slot { + Some((slot, *b_id)) + } else { + None + } + } + _ => None, + }) + .collect(); + + massa_trace!("consensus.block_graph.slot_tick", {}); + // process those elements + self.rec_process(to_process, current_slot)?; + + Ok(()) + } } diff --git a/massa-graph-2-worker/src/worker/init.rs b/massa-graph-2-worker/src/worker/init.rs index 6790cd0b251..bca76a9f0d6 100644 --- a/massa-graph-2-worker/src/worker/init.rs +++ b/massa-graph-2-worker/src/worker/init.rs @@ -1,5 +1,5 @@ use std::{ - collections::VecDeque, + collections::{HashMap, VecDeque}, sync::{mpsc, Arc}, }; @@ -126,6 +126,12 @@ impl GraphWorker { next_slot.period, next_slot.thread, ); + let latest_final_periods: Vec = shared_state + .read() + .latest_final_blocks_periods + .iter() + .map(|(_block_id, period)| *period) + .collect(); if config.genesis_timestamp > now { let (days, hours, mins, secs) = config .genesis_timestamp @@ -175,6 +181,8 @@ impl GraphWorker { config.stats_timespan, ), launch_time: MassaTime::now(config.clock_compensation_millis)?, + latest_final_periods, + prev_blockclique: Default::default(), storage: storage.clone(), }; @@ -230,8 +238,41 @@ impl GraphWorker { write_shared_state.block_statuses = block_statuses; } } + + // Notify execution module of current blockclique and all final blocks. + // we need to do this because the bootstrap snapshots of the executor vs the consensus may not have been taken in sync + // because the two modules run concurrently and out of sync. + { + let read_shared_state = res_graph.shared_state.read(); + let mut block_storage: PreHashMap = Default::default(); + let notify_finals: HashMap = read_shared_state + .get_all_final_blocks() + .into_iter() + .map(|(b_id, block_infos)| { + block_storage.insert(b_id, block_infos.1); + (block_infos.0, b_id) + }) + .collect(); + let notify_blockclique: HashMap = read_shared_state + .get_blockclique() + .iter() + .map(|b_id| { + let (a_block, storage) = read_shared_state + .get_full_active_block(b_id) + .expect("active block missing from block_db"); + let slot = a_block.slot; + block_storage.insert(*b_id, storage.clone()); + (slot, *b_id) + }) + .collect(); + res_graph.prev_blockclique = notify_blockclique.iter().map(|(k, v)| (*v, *k)).collect(); + res_graph + .channels + .execution_controller + .update_blockclique_status(notify_finals, Some(notify_blockclique), block_storage); + } + Ok(res_graph) - //TODO: Add notify execution } fn claim_parent_refs(&mut self) -> GraphResult<()> { diff --git a/massa-graph-2-worker/src/worker/main_loop.rs b/massa-graph-2-worker/src/worker/main_loop.rs index a5288847353..aaa7fa28942 100644 --- a/massa-graph-2-worker/src/worker/main_loop.rs +++ b/massa-graph-2-worker/src/worker/main_loop.rs @@ -1,10 +1,15 @@ -use std::{sync::mpsc, time::Instant}; +use std::{collections::HashMap, mem, sync::mpsc, time::Instant}; use massa_graph::error::GraphResult; +use massa_graph_2_exports::block_status::BlockStatus; +use massa_logging::massa_trace; use massa_models::{ + block::{BlockId, WrappedHeader}, + prehash::{PreHashMap, PreHashSet}, slot::Slot, timeslots::{get_block_slot_timestamp, get_closest_slot_to_timestamp}, }; +use massa_storage::Storage; use massa_time::MassaTime; use tracing::log::warn; @@ -94,11 +99,231 @@ impl GraphWorker { (next_slot, next_instant) } + /// Notify execution about blockclique changes and finalized blocks. + fn notify_execution(&mut self, finalized_blocks: HashMap) { + let read_shared_state = self.shared_state.read(); + // List new block storage instances that Execution doesn't know about. + // That's blocks that have not been sent to execution before, ie. in the previous blockclique). + let mut new_blocks_storage: PreHashMap = finalized_blocks + .iter() + .filter_map(|(_slot, b_id)| { + if self.prev_blockclique.contains_key(b_id) { + // was previously sent as a blockclique element + return None; + } + let storage = match read_shared_state.block_statuses.get(b_id) { + Some(BlockStatus::Active { + a_block: _, + storage, + }) => storage, + _ => panic!("final block not found in active blocks"), + }; + Some((*b_id, storage.clone())) + }) + .collect(); + + // Get new blockclique block list with slots. + let mut blockclique_changed = false; + let new_blockclique: PreHashMap = read_shared_state + .get_blockclique() + .iter() + .map(|b_id| { + if let Some(slot) = self.prev_blockclique.remove(b_id) { + // The block was already sent in the previous blockclique: + // the slot can be gathered from there without locking Storage. + // Note: the block is removed from self.prev_blockclique. + (*b_id, slot) + } else { + // The block was not present in the previous blockclique: + // the blockclique has changed => get the block's slot by querying Storage. + blockclique_changed = true; + let (slot, storage) = match read_shared_state.block_statuses.get(b_id) { + Some(BlockStatus::Active { a_block, storage }) => (a_block.slot, storage), + _ => panic!("blockclique block not found in active blocks"), + }; + new_blocks_storage.insert(*b_id, storage.clone()); + (*b_id, slot) + } + }) + .collect(); + if !self.prev_blockclique.is_empty() { + // All elements present in the new blockclique have been removed from `prev_blockclique` above. + // If `prev_blockclique` is not empty here, it means that it contained elements that are not in the new blockclique anymore. + // In that case, we mark the blockclique as having changed. + blockclique_changed = true; + } + // Overwrite previous blockclique. + // Should still be done even if unchanged because elements were removed from it above. + self.prev_blockclique = new_blockclique.clone(); + + if finalized_blocks.is_empty() && !blockclique_changed { + // There are no changes (neither block finalizations not blockclique changes) to send to execution. + return; + } + + // Notify execution of block finalizations and blockclique changes + self.channels + .execution_controller + .update_blockclique_status( + finalized_blocks, + if blockclique_changed { + Some(new_blockclique.into_iter().map(|(k, v)| (v, k)).collect()) + } else { + None + }, + new_blocks_storage, + ); + } + + /// call me if the block database changed + /// Processing of final blocks, pruning. + /// + /// 1. propagate blocks + /// 2. Notify of attack attempts + /// 3. get new final blocks + /// 4. get blockclique + /// 5. notify Execution + /// 6. Process new final blocks + /// 7. Notify pool of new final ops + /// 8. Notify PoS of final blocks + /// 9. notify protocol of block wish list + /// 10. note new latest final periods (prune graph if changed) + /// 11. add stale blocks to stats + pub fn block_db_changed(&mut self) -> GraphResult<()> { + let final_block_slots = { + let mut write_shared_state = self.shared_state.write(); + massa_trace!("consensus.consensus_worker.block_db_changed", {}); + + // Propagate new blocks + for (block_id, storage) in mem::take(&mut write_shared_state.to_propagate).into_iter() { + massa_trace!("consensus.consensus_worker.block_db_changed.integrated", { + "block_id": block_id + }); + self.channels + .protocol_command_sender + .integrated_block(block_id, storage) + .await?; + } + + // Notify protocol of attack attempts. + for hash in mem::take(&mut write_shared_state.attack_attempts).into_iter() { + self.channels + .protocol_command_sender + .notify_block_attack(hash) + .await?; + massa_trace!("consensus.consensus_worker.block_db_changed.attack", { + "hash": hash + }); + } + + // manage finalized blocks + let timestamp = MassaTime::now(self.config.clock_compensation_millis)?; + let finalized_blocks = mem::take(&mut write_shared_state.new_final_blocks); + let mut final_block_slots = HashMap::with_capacity(finalized_blocks.len()); + for b_id in finalized_blocks { + if let Some(BlockStatus::Active { + a_block, + storage: _, + }) = write_shared_state.block_statuses.get(&b_id) + { + // add to final blocks to notify execution + final_block_slots.insert(a_block.slot, b_id); + + // add to stats + let block_is_from_protocol = self + .protocol_blocks + .iter() + .any(|(_, block_id)| block_id == &b_id); + self.final_block_stats.push_back(( + timestamp, + a_block.creator_address, + block_is_from_protocol, + )); + } + } + + // add stale blocks to stats + let new_stale_block_ids_creators_slots = + mem::take(&mut write_shared_state.new_stale_blocks); + let timestamp = MassaTime::now(self.config.clock_compensation_millis)?; + for (_b_id, (_b_creator, _b_slot)) in new_stale_block_ids_creators_slots.into_iter() { + self.stale_block_stats.push_back(timestamp); + } + final_block_slots + }; + + // notify execution + self.notify_execution(final_block_slots); + + // notify protocol of block wishlist + { + let read_shared_state = self.shared_state.read(); + let new_wishlist = read_shared_state.get_block_wishlist()?; + let new_blocks: PreHashMap> = new_wishlist + .iter() + .filter_map(|(id, header)| { + if !self.wishlist.contains_key(id) { + Some((*id, header.clone())) + } else { + None + } + }) + .collect(); + let remove_blocks: PreHashSet = self + .wishlist + .iter() + .filter_map(|(id, _)| { + if !new_wishlist.contains_key(id) { + Some(*id) + } else { + None + } + }) + .collect(); + if !new_blocks.is_empty() || !remove_blocks.is_empty() { + massa_trace!("consensus.consensus_worker.block_db_changed.send_wishlist_delta", { "new": new_wishlist, "remove": remove_blocks }); + self.channels + .protocol_command_sender + .send_wishlist_delta(new_blocks, remove_blocks) + .await?; + self.wishlist = new_wishlist; + } + + // note new latest final periods + let latest_final_periods: Vec = read_shared_state + .latest_final_blocks_periods + .iter() + .map(|(_block_id, period)| *period) + .collect(); + // if changed... + if self.latest_final_periods != latest_final_periods { + // signal new last final periods to pool + self.channels + .pool_command_sender + .notify_final_cs_periods(&latest_final_periods); + // update final periods + self.latest_final_periods = latest_final_periods; + } + }; + + /* + TODO add this again + let creator_addr = Address::from_public_key(&b_creator); + if self.staking_keys.contains_key(&creator_addr) { + warn!("block {} that was produced by our address {} at slot {} became stale. This is probably due to a temporary desynchronization.", b_id, creator_addr, b_slot); + } + */ + + Ok(()) + } + pub fn run(&mut self) { loop { match self.wait_slot_or_command(self.next_instant) { WaitingStatus::Ended => { //TODO: Desync, stats, block_db changed + self.slot_tick(self.next_slot); + self.previous_slot = Some(self.next_slot); (self.next_slot, self.next_instant) = self.get_next_slot(Some(self.next_slot)); } WaitingStatus::Disconnected => { diff --git a/massa-graph-2-worker/src/worker/mod.rs b/massa-graph-2-worker/src/worker/mod.rs index 1750511fc52..8ba4242de13 100644 --- a/massa-graph-2-worker/src/worker/mod.rs +++ b/massa-graph-2-worker/src/worker/mod.rs @@ -1,4 +1,5 @@ use massa_graph::BootstrapableGraph; +use massa_graph_2_exports::events::GraphEvent; use massa_graph_2_exports::{GraphChannels, GraphConfig, GraphController, GraphManager}; use massa_models::address::Address; use massa_models::block::{BlockId, WrappedHeader}; @@ -9,6 +10,7 @@ use massa_storage::Storage; use massa_time::MassaTime; use parking_lot::RwLock; use std::collections::VecDeque; +use std::sync::mpsc::Receiver; use std::sync::{mpsc, Arc}; use std::thread; use std::time::Instant; @@ -40,10 +42,13 @@ pub struct GraphWorker { /// the time span considered for stats stats_history_timespan: MassaTime, /// the time span considered for desynchronization detection - #[allow(dead_code)] stats_desync_detection_timespan: MassaTime, + /// save latest final periods + latest_final_periods: Vec, /// time at which the node was launched (used for desynchronization detection) launch_time: MassaTime, + /// previous blockclique notified to Execution + prev_blockclique: PreHashMap, /// Shared storage, storage: Storage, } @@ -51,6 +56,8 @@ pub struct GraphWorker { mod init; mod main_loop; mod process_commands; +mod stats; +mod tick; pub fn start_graph_worker( config: GraphConfig, @@ -89,9 +96,15 @@ pub fn start_graph_worker( let thread_graph = thread::Builder::new() .name("graph worker".into()) .spawn(move || { - let mut graph_worker = - //TODO: Better error management - GraphWorker::new(rx, config, channels, shared_state_cloned, init_graph, storage).expect("Failed to initialize graph worker"); + let mut graph_worker = GraphWorker::new( + rx, + config, + channels, + shared_state_cloned, + init_graph, + storage, + ) + .unwrap(); graph_worker.run() }) .expect("Can't spawn thread graph."); diff --git a/massa-graph-2-worker/src/worker/process_commands.rs b/massa-graph-2-worker/src/worker/process_commands.rs index 17626e88c0b..ffe98237d34 100644 --- a/massa-graph-2-worker/src/worker/process_commands.rs +++ b/massa-graph-2-worker/src/worker/process_commands.rs @@ -1,3 +1,4 @@ +use std::cmp::max; use std::collections::{hash_map, BTreeSet}; use super::GraphWorker; @@ -7,10 +8,41 @@ use massa_logging::massa_trace; use massa_models::{ block::{BlockId, WrappedHeader}, slot::Slot, + stats::ConsensusStats, }; +use massa_time::MassaTime; use tracing::log::debug; impl GraphWorker { + pub fn get_stats(&self) -> GraphResult { + let timespan_end = max( + self.launch_time, + MassaTime::now(self.config.clock_compensation_millis)?, + ); + let timespan_start = max( + timespan_end.saturating_sub(self.config.stats_timespan), + self.launch_time, + ); + let final_block_count = self + .final_block_stats + .iter() + .filter(|(t, _, _)| *t >= timespan_start && *t < timespan_end) + .count() as u64; + let stale_block_count = self + .stale_block_stats + .iter() + .filter(|t| **t >= timespan_start && **t < timespan_end) + .count() as u64; + let clique_count = self.shared_state.read().get_clique_count() as u64; + Ok(ConsensusStats { + final_block_count, + stale_block_count, + clique_count, + start_timespan: timespan_start, + end_timespan: timespan_end, + }) + } + pub fn register_block_header( &mut self, block_id: BlockId, @@ -41,7 +73,8 @@ impl GraphWorker { sequence_number, .. } => { // promote if discarded - write_shared_state.new_sequence_number(); + //TODO: Readd this + //*sequence_number = write_shared_state.new_sequence_number(); } BlockStatus::WaitingForDependencies { .. } => { // promote in dependencies diff --git a/massa-graph-2-worker/src/worker/stats.rs b/massa-graph-2-worker/src/worker/stats.rs new file mode 100644 index 00000000000..31d7e6a40b3 --- /dev/null +++ b/massa-graph-2-worker/src/worker/stats.rs @@ -0,0 +1,33 @@ +use massa_graph::error::GraphResult; +use massa_time::MassaTime; + +use super::GraphWorker; + +impl GraphWorker { + pub fn prune_stats(&mut self) -> GraphResult<()> { + let start_time = MassaTime::now(self.config.clock_compensation_millis)? + .saturating_sub(self.stats_history_timespan); + while let Some((t, _, _)) = self.final_block_stats.front() { + if t < &start_time { + self.final_block_stats.pop_front(); + } else { + break; + } + } + while let Some(t) = self.stale_block_stats.front() { + if t < &start_time { + self.stale_block_stats.pop_front(); + } else { + break; + } + } + while let Some((t, _)) = self.protocol_blocks.front() { + if t < &start_time { + self.protocol_blocks.pop_front(); + } else { + break; + } + } + Ok(()) + } +} diff --git a/massa-graph-2-worker/src/worker/tick.rs b/massa-graph-2-worker/src/worker/tick.rs new file mode 100644 index 00000000000..6c56d8a5b05 --- /dev/null +++ b/massa-graph-2-worker/src/worker/tick.rs @@ -0,0 +1,56 @@ +use massa_graph::error::GraphResult; +use massa_graph_2_exports::events::GraphEvent; +use massa_logging::massa_trace; +use massa_models::slot::Slot; +use massa_time::MassaTime; +use std::cmp::max; +use tracing::{info, log::warn}; + +use super::GraphWorker; + +impl GraphWorker { + pub fn slot_tick(&mut self, slot: Slot) -> GraphResult<()> { + let now = MassaTime::now(self.config.clock_compensation_millis)?; + massa_trace!("consensus.consensus_worker.slot_tick", { "slot": slot }); + + let previous_cycle = self + .previous_slot + .map(|s| s.get_cycle(self.config.periods_per_cycle)); + let observed_cycle = slot.get_cycle(self.config.periods_per_cycle); + if previous_cycle.is_none() { + // first cycle observed + info!("Massa network has started ! 🎉") + } + if previous_cycle < Some(observed_cycle) { + info!("Started cycle {}", observed_cycle); + } + + // check if there are any final blocks is coming from protocol + // if none => we are probably desync + #[cfg(not(feature = "sandbox"))] + if now + > max(self.config.genesis_timestamp, self.launch_time) + .saturating_add(self.stats_desync_detection_timespan) + && !self + .final_block_stats + .iter() + .any(|(time, _, is_from_protocol)| { + time > &now.saturating_sub(self.stats_desync_detection_timespan) + && *is_from_protocol + }) + { + warn!("desynchronization detected because the recent final block history is empty or contains only blocks produced by this node"); + let _ = self.channels.controller_event_tx.send(GraphEvent::NeedSync); + } + + // signal tick to block graph + self.shared_state.write().slot_tick(Some(slot))?; + + // take care of block db changes + self.block_db_changed()?; + + // prune stats + self.prune_stats()?; + Ok(()) + } +} From 86dd52d2592910765c9f5276beb418ccfc9dc959 Mon Sep 17 00:00:00 2001 From: AurelienFT Date: Tue, 11 Oct 2022 09:56:46 +0200 Subject: [PATCH 13/40] Fix send protocol. --- Cargo.lock | 1 + massa-api/src/public.rs | 2 +- .../src/consensus_worker.rs | 9 +++------ massa-graph-2-worker/src/worker/main_loop.rs | 9 +++------ massa-graph/Cargo.toml | 1 + massa-graph/src/error.rs | 3 +++ .../src/protocol_controller.rs | 20 ++++++++----------- massa-protocol-exports/src/tests/tools.rs | 1 - .../src/tests/ask_block_scenarios.rs | 7 ------- .../src/tests/ban_nodes_scenarios.rs | 3 --- .../src/tests/operations_scenarios.rs | 7 ------- massa-protocol-worker/src/tests/scenarios.rs | 4 ---- 12 files changed, 20 insertions(+), 47 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fef91e76e43..9867625a1bf 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1911,6 +1911,7 @@ dependencies = [ "massa_logging", "massa_models", "massa_pos_exports", + "massa_protocol_exports", "massa_serialization", "massa_signature", "massa_storage", diff --git a/massa-api/src/public.rs b/massa-api/src/public.rs index 87ac7aa23e9..8193d8252fc 100644 --- a/massa-api/src/public.rs +++ b/massa-api/src/public.rs @@ -880,7 +880,7 @@ impl Endpoints for API { to_send.store_operations(verified_ops.clone()); let ids: Vec = verified_ops.iter().map(|op| op.id).collect(); cmd_sender.add_operations(to_send.clone()); - protocol_sender.propagate_operations(to_send).await?; + protocol_sender.propagate_operations(to_send)?; Ok(ids) }; Box::pin(closure()) diff --git a/massa-consensus-worker/src/consensus_worker.rs b/massa-consensus-worker/src/consensus_worker.rs index 02382805693..7d65a106b2f 100644 --- a/massa-consensus-worker/src/consensus_worker.rs +++ b/massa-consensus-worker/src/consensus_worker.rs @@ -654,16 +654,14 @@ impl ConsensusWorker { }); self.channels .protocol_command_sender - .integrated_block(block_id, storage) - .await?; + .integrated_block(block_id, storage)?; } // Notify protocol of attack attempts. for hash in self.block_db.get_attack_attempts().into_iter() { self.channels .protocol_command_sender - .notify_block_attack(hash) - .await?; + .notify_block_attack(hash)?; massa_trace!("consensus.consensus_worker.block_db_changed.attack", { "hash": hash }); @@ -721,8 +719,7 @@ impl ConsensusWorker { massa_trace!("consensus.consensus_worker.block_db_changed.send_wishlist_delta", { "new": new_wishlist, "remove": remove_blocks }); self.channels .protocol_command_sender - .send_wishlist_delta(new_blocks, remove_blocks) - .await?; + .send_wishlist_delta(new_blocks, remove_blocks)?; self.wishlist = new_wishlist; } diff --git a/massa-graph-2-worker/src/worker/main_loop.rs b/massa-graph-2-worker/src/worker/main_loop.rs index aaa7fa28942..0145e2d5deb 100644 --- a/massa-graph-2-worker/src/worker/main_loop.rs +++ b/massa-graph-2-worker/src/worker/main_loop.rs @@ -201,16 +201,14 @@ impl GraphWorker { }); self.channels .protocol_command_sender - .integrated_block(block_id, storage) - .await?; + .integrated_block(block_id, storage)?; } // Notify protocol of attack attempts. for hash in mem::take(&mut write_shared_state.attack_attempts).into_iter() { self.channels .protocol_command_sender - .notify_block_attack(hash) - .await?; + .notify_block_attack(hash)?; massa_trace!("consensus.consensus_worker.block_db_changed.attack", { "hash": hash }); @@ -284,8 +282,7 @@ impl GraphWorker { massa_trace!("consensus.consensus_worker.block_db_changed.send_wishlist_delta", { "new": new_wishlist, "remove": remove_blocks }); self.channels .protocol_command_sender - .send_wishlist_delta(new_blocks, remove_blocks) - .await?; + .send_wishlist_delta(new_blocks, remove_blocks)?; self.wishlist = new_wishlist; } diff --git a/massa-graph/Cargo.toml b/massa-graph/Cargo.toml index 2e3e154013d..d8277889fa2 100644 --- a/massa-graph/Cargo.toml +++ b/massa-graph/Cargo.toml @@ -22,6 +22,7 @@ massa_logging = { path = "../massa-logging" } massa_models = { path = "../massa-models" } massa_storage = { path = "../massa-storage" } massa_signature = { path = "../massa-signature" } +massa_protocol_exports = { path = "../massa-protocol-exports" } massa_serialization = { path = "../massa-serialization"} massa_time = { path = "../massa-time" } diff --git a/massa-graph/src/error.rs b/massa-graph/src/error.rs index b01a53f4eca..36e12b98e5f 100644 --- a/massa-graph/src/error.rs +++ b/massa-graph/src/error.rs @@ -2,6 +2,7 @@ use displaydoc::Display; use massa_execution_exports::ExecutionError; use massa_models::error::ModelsError; +use massa_protocol_exports::ProtocolError; use massa_time::TimeError; use std::array::TryFromSliceError; use thiserror::Error; @@ -44,6 +45,8 @@ pub enum GraphError { MassaTimeError(#[from] TimeError), /// transaction error {0} TransactionError(String), + /// Protocol error {0} + ProtocolError(#[from] ProtocolError), } /// Internal error diff --git a/massa-protocol-exports/src/protocol_controller.rs b/massa-protocol-exports/src/protocol_controller.rs index 502e7a290ff..e9e854781ec 100644 --- a/massa-protocol-exports/src/protocol_controller.rs +++ b/massa-protocol-exports/src/protocol_controller.rs @@ -100,7 +100,7 @@ impl ProtocolCommandSender { /// # Arguments /// * `block_id`: ID of the block /// * `storage`: Storage instance containing references to the block and all its dependencies - pub async fn integrated_block( + pub fn integrated_block( &mut self, block_id: BlockId, storage: Storage, @@ -109,34 +109,31 @@ impl ProtocolCommandSender { "block_id": block_id }); self.0 - .send(ProtocolCommand::IntegratedBlock { block_id, storage }) - .await + .blocking_send(ProtocolCommand::IntegratedBlock { block_id, storage }) .map_err(|_| ProtocolError::ChannelError("block_integrated command send error".into())) } /// Notify to protocol an attack attempt. - pub async fn notify_block_attack(&mut self, block_id: BlockId) -> Result<(), ProtocolError> { + pub fn notify_block_attack(&mut self, block_id: BlockId) -> Result<(), ProtocolError> { massa_trace!("protocol.command_sender.notify_block_attack", { "block_id": block_id }); self.0 - .send(ProtocolCommand::AttackBlockDetected(block_id)) - .await + .blocking_send(ProtocolCommand::AttackBlockDetected(block_id)) .map_err(|_| { ProtocolError::ChannelError("notify_block_attack command send error".into()) }) } /// update the block wish list - pub async fn send_wishlist_delta( + pub fn send_wishlist_delta( &mut self, new: PreHashMap>, remove: PreHashSet, ) -> Result<(), ProtocolError> { massa_trace!("protocol.command_sender.send_wishlist_delta", { "new": new, "remove": remove }); self.0 - .send(ProtocolCommand::WishlistDelta { new, remove }) - .await + .blocking_send(ProtocolCommand::WishlistDelta { new, remove }) .map_err(|_| { ProtocolError::ChannelError("send_wishlist_delta command send error".into()) }) @@ -145,13 +142,12 @@ impl ProtocolCommandSender { /// Propagate a batch of operation ids (from pool). /// /// note: Full `OperationId` is replaced by a `OperationPrefixId` later by the worker. - pub async fn propagate_operations(&mut self, operations: Storage) -> Result<(), ProtocolError> { + pub fn propagate_operations(&mut self, operations: Storage) -> Result<(), ProtocolError> { massa_trace!("protocol.command_sender.propagate_operations", { "operations": operations.get_op_refs() }); self.0 - .send(ProtocolCommand::PropagateOperations(operations)) - .await + .blocking_send(ProtocolCommand::PropagateOperations(operations)) .map_err(|_| { ProtocolError::ChannelError("propagate_operation command send error".into()) }) diff --git a/massa-protocol-exports/src/tests/tools.rs b/massa-protocol-exports/src/tests/tools.rs index 80d6d57f907..03c06ed9788 100644 --- a/massa-protocol-exports/src/tests/tools.rs +++ b/massa-protocol-exports/src/tests/tools.rs @@ -185,7 +185,6 @@ pub async fn send_and_propagate_block( .collect(), PreHashSet::::default(), ) - .await .unwrap(); // Send block info to protocol. diff --git a/massa-protocol-worker/src/tests/ask_block_scenarios.rs b/massa-protocol-worker/src/tests/ask_block_scenarios.rs index bbe8c4f6a8f..123a57620be 100644 --- a/massa-protocol-worker/src/tests/ask_block_scenarios.rs +++ b/massa-protocol-worker/src/tests/ask_block_scenarios.rs @@ -61,7 +61,6 @@ async fn test_full_ask_block_workflow() { .collect(), PreHashSet::::default(), ) - .await .unwrap(); // assert it was asked to node A, then B @@ -178,7 +177,6 @@ async fn test_empty_block() { .collect(), PreHashSet::::default(), ) - .await .unwrap(); // assert it was asked to node A, then B @@ -291,7 +289,6 @@ async fn test_someone_knows_it() { .collect(), PreHashSet::::default(), ) - .await .unwrap(); assert_hash_asked_to_node(hash_1, node_c.id, &mut network_controller).await; @@ -377,7 +374,6 @@ async fn test_dont_want_it_anymore() { .collect(), PreHashSet::::default(), ) - .await .unwrap(); // assert it was asked to node A @@ -386,7 +382,6 @@ async fn test_dont_want_it_anymore() { // we don't want it anymore protocol_command_sender .send_wishlist_delta(Default::default(), vec![hash_1].into_iter().collect()) - .await .unwrap(); // 7. Make sure protocol did not send additional ask for block commands. @@ -454,7 +449,6 @@ async fn test_no_one_has_it() { .collect(), PreHashSet::::default(), ) - .await .unwrap(); // assert it was asked to node A @@ -545,7 +539,6 @@ async fn test_multiple_blocks_without_a_priori() { .collect(), PreHashSet::::default(), ) - .await .unwrap(); let list = asked_list(&mut network_controller).await; diff --git a/massa-protocol-worker/src/tests/ban_nodes_scenarios.rs b/massa-protocol-worker/src/tests/ban_nodes_scenarios.rs index fec4bb7dfac..46fb541fafc 100644 --- a/massa-protocol-worker/src/tests/ban_nodes_scenarios.rs +++ b/massa-protocol-worker/src/tests/ban_nodes_scenarios.rs @@ -163,7 +163,6 @@ async fn test_protocol_bans_node_sending_header_with_invalid_signature() { .collect(), PreHashSet::::default(), ) - .await .unwrap(); tools::assert_hash_asked_to_node(block.id, to_ban_node.id, &mut network_controller) @@ -287,7 +286,6 @@ async fn test_protocol_does_not_asks_for_block_from_banned_node_who_propagated_h .collect(), PreHashSet::::default(), ) - .await .expect("Failed to ask for block."); // 6. Make sure protocol did not ask for the block from the banned node. @@ -473,7 +471,6 @@ async fn test_protocol_bans_all_nodes_propagating_an_attack_attempt() { // Simulate consensus notifying an attack attempt. protocol_command_sender .notify_block_attack(expected_hash) - .await .expect("Failed to ask for block."); // Make sure all initial nodes are banned. diff --git a/massa-protocol-worker/src/tests/operations_scenarios.rs b/massa-protocol-worker/src/tests/operations_scenarios.rs index 55e70e2c0a7..6c39b788d18 100644 --- a/massa-protocol-worker/src/tests/operations_scenarios.rs +++ b/massa-protocol-worker/src/tests/operations_scenarios.rs @@ -165,7 +165,6 @@ async fn test_protocol_propagates_operations_to_active_nodes() { storage.store_operations(vec![operation.clone()]); protocol_command_sender .propagate_operations(storage) - .await .unwrap(); loop { @@ -239,7 +238,6 @@ async fn test_protocol_propagates_operations_only_to_nodes_that_dont_know_about_ storage.store_operations(vec![operation.clone()]); protocol_command_sender .propagate_operations(storage) - .await .unwrap(); loop { @@ -372,7 +370,6 @@ async fn test_protocol_batches_propagation_of_operations_received_over_the_netwo storage.store_operations(vec![operation.clone()]); protocol_command_sender .propagate_operations(storage) - .await .unwrap(); let expected_operation_id_2 = operation.id; @@ -455,7 +452,6 @@ async fn test_protocol_propagates_operations_only_to_nodes_that_dont_know_about_ .collect(), PreHashSet::::default(), ) - .await .unwrap(); assert_hash_asked_to_node(block.id, nodes[0].id, &mut network_controller).await; @@ -480,7 +476,6 @@ async fn test_protocol_propagates_operations_only_to_nodes_that_dont_know_about_ storage.store_operations(vec![operation.clone()]); protocol_command_sender .propagate_operations(storage) - .await .unwrap(); match network_controller @@ -569,7 +564,6 @@ async fn test_protocol_propagates_operations_only_to_nodes_that_dont_know_about_ .collect(), PreHashSet::::default(), ) - .await .unwrap(); // assert it was asked to node A, then B, then C. @@ -608,7 +602,6 @@ async fn test_protocol_propagates_operations_only_to_nodes_that_dont_know_about_ storage.store_operations(vec![op_2.clone()]); protocol_command_sender .propagate_operations(storage) - .await .unwrap(); match network_controller diff --git a/massa-protocol-worker/src/tests/scenarios.rs b/massa-protocol-worker/src/tests/scenarios.rs index 829c59f0c55..55d1d24e4dd 100644 --- a/massa-protocol-worker/src/tests/scenarios.rs +++ b/massa-protocol-worker/src/tests/scenarios.rs @@ -72,7 +72,6 @@ async fn test_protocol_asks_for_block_from_node_who_propagated_header() { .collect(), PreHashSet::::default(), ) - .await .expect("Failed to ask for block."); // 6. Check that protocol asks the node for the full block. @@ -143,7 +142,6 @@ async fn test_protocol_sends_blocks_when_asked_for() { storage.store_block(block.clone()); protocol_command_sender .integrated_block(expected_hash, storage.clone()) - .await .unwrap(); // 3. Simulate two nodes asking for a block. @@ -260,7 +258,6 @@ async fn test_protocol_propagates_block_to_all_nodes_including_those_who_asked_f let _op_ids = ref_block.content.operations.clone(); protocol_command_sender .integrated_block(ref_hash, storage) - .await .expect("Failed to ask for block."); // 6. Check that protocol propagates the header to the right nodes. @@ -369,7 +366,6 @@ async fn test_protocol_propagates_block_to_node_who_asked_for_operations_and_onl let _op_ids = ref_block.content.operations.clone(); protocol_command_sender .integrated_block(ref_hash, storage) - .await .expect("Failed to ask for block."); // 6. Check that protocol propagates the header to the right nodes. From 91e1ea80e3c5734defab9db75b301367d1182ce2 Mon Sep 17 00:00:00 2001 From: AurelienFT Date: Tue, 11 Oct 2022 11:58:04 +0200 Subject: [PATCH 14/40] Add process of block and block header --- massa-graph-2-exports/src/channels.rs | 2 + massa-graph-2-worker/src/controller.rs | 3 +- massa-graph-2-worker/src/state/mod.rs | 5 +- massa-graph-2-worker/src/state/process.rs | 6 +- .../src/state/process_commands.rs | 136 ++++++++++++++++++ massa-graph-2-worker/src/worker/init.rs | 54 ++++--- massa-graph-2-worker/src/worker/main_loop.rs | 51 +++++-- massa-graph-2-worker/src/worker/mod.rs | 4 +- .../src/worker/process_commands.rs | 57 +------- massa-graph-2-worker/src/worker/tick.rs | 36 ++--- 10 files changed, 239 insertions(+), 115 deletions(-) create mode 100644 massa-graph-2-worker/src/state/process_commands.rs diff --git a/massa-graph-2-exports/src/channels.rs b/massa-graph-2-exports/src/channels.rs index 28e564ac350..792e14984fc 100644 --- a/massa-graph-2-exports/src/channels.rs +++ b/massa-graph-2-exports/src/channels.rs @@ -1,3 +1,5 @@ +use std::sync::mpsc::Receiver; + use crossbeam_channel::Sender; use massa_execution_exports::ExecutionController; use massa_pool_exports::PoolController; diff --git a/massa-graph-2-worker/src/controller.rs b/massa-graph-2-worker/src/controller.rs index 5d0fb7557b3..8704b973803 100644 --- a/massa-graph-2-worker/src/controller.rs +++ b/massa-graph-2-worker/src/controller.rs @@ -95,7 +95,8 @@ impl GraphController for GraphControllerImpl { } fn get_stats(&self) -> GraphResult { - self.get_stats() + //TODO: Get the stats here + todo!() } fn get_best_parents(&self) -> Vec<(BlockId, u64)> { diff --git a/massa-graph-2-worker/src/state/mod.rs b/massa-graph-2-worker/src/state/mod.rs index 0df94cab9b1..c37210a335e 100644 --- a/massa-graph-2-worker/src/state/mod.rs +++ b/massa-graph-2-worker/src/state/mod.rs @@ -18,6 +18,7 @@ use massa_models::{ use massa_storage::Storage; mod process; +mod process_commands; mod verifications; #[derive(Clone)] @@ -224,7 +225,7 @@ impl GraphState { while let Some((current_block, _)) = self.get_full_active_block(¤t_block_id) { let parent_id = { if !current_block.parents.is_empty() { - Some(current_block.parents[thread as usize].0) + Some(current_block.parents[thread].0) } else { None } @@ -388,7 +389,7 @@ impl GraphState { self.active_index .iter() .map(|b_id| { - let block_infos = match self.block_statuses.get(&b_id) { + let block_infos = match self.block_statuses.get(b_id) { Some(BlockStatus::Active { a_block, storage }) => { (a_block.slot, storage.clone()) } diff --git a/massa-graph-2-worker/src/state/process.rs b/massa-graph-2-worker/src/state/process.rs index 015ba1897e6..5c9fdc3aa0a 100644 --- a/massa-graph-2-worker/src/state/process.rs +++ b/massa-graph-2-worker/src/state/process.rs @@ -88,7 +88,7 @@ impl GraphState { block_id ))); }; - match self.check_header(&block_id, &header, current_slot, &self)? { + match self.check_header(&block_id, &header, current_slot, self)? { HeaderCheckOutcome::Proceed { .. } => { // set as waiting dependencies let mut dependencies = PreHashSet::::default(); @@ -206,7 +206,7 @@ impl GraphState { &block_id, &stored_block.content.header, current_slot, - &self, + self, )? { HeaderCheckOutcome::Proceed { parents_hash_period, @@ -611,7 +611,7 @@ impl GraphState { ); let before = self.max_cliques.len(); self.max_cliques = self - .compute_max_cliques(&self) + .compute_max_cliques(self) .into_iter() .map(|c| Clique { block_ids: c, diff --git a/massa-graph-2-worker/src/state/process_commands.rs b/massa-graph-2-worker/src/state/process_commands.rs new file mode 100644 index 00000000000..5de43fa1797 --- /dev/null +++ b/massa-graph-2-worker/src/state/process_commands.rs @@ -0,0 +1,136 @@ +use std::collections::{hash_map::Entry, BTreeSet}; + +use massa_graph::error::GraphResult; +use massa_graph_2_exports::block_status::{BlockStatus, HeaderOrBlock}; +use massa_logging::massa_trace; +use massa_models::{ + block::{BlockId, WrappedHeader}, + slot::Slot, +}; +use massa_storage::Storage; +use tracing::debug; + +use super::GraphState; + +impl GraphState { + pub fn register_block_header( + &mut self, + block_id: BlockId, + header: WrappedHeader, + current_slot: Option, + ) -> GraphResult<()> { + // ignore genesis blocks + if self.genesis_hashes.contains(&block_id) { + return Ok(()); + } + + debug!( + "received header {} for slot {}", + block_id, header.content.slot + ); + massa_trace!("consensus.block_graph.incoming_header", {"block_id": block_id, "header": header}); + let mut to_ack: BTreeSet<(Slot, BlockId)> = BTreeSet::new(); + match self.block_statuses.entry(block_id) { + // if absent => add as Incoming, call rec_ack on it + Entry::Vacant(vac) => { + to_ack.insert((header.content.slot, block_id)); + vac.insert(BlockStatus::Incoming(HeaderOrBlock::Header(header))); + self.incoming_index.insert(block_id); + } + Entry::Occupied(mut occ) => match occ.get_mut() { + BlockStatus::Discarded { + sequence_number, .. + } => { + // promote if discarded + self.sequence_counter += 1; + *sequence_number = self.sequence_counter; + } + BlockStatus::WaitingForDependencies { .. } => { + // promote in dependencies + self.promote_dep_tree(block_id)?; + } + _ => {} + }, + } + + // process + self.rec_process(to_ack, current_slot)?; + + Ok(()) + } + + /// A new block has come + /// + /// Checks performed: + /// - Ignore genesis blocks. + /// - See `process`. + pub fn register_block( + &mut self, + block_id: BlockId, + slot: Slot, + current_slot: Option, + storage: Storage, + ) -> GraphResult<()> { + // ignore genesis blocks + if self.genesis_hashes.contains(&block_id) { + return Ok(()); + } + + debug!("received block {} for slot {}", block_id, slot); + + let mut to_ack: BTreeSet<(Slot, BlockId)> = BTreeSet::new(); + match self.block_statuses.entry(block_id) { + // if absent => add as Incoming, call rec_ack on it + Entry::Vacant(vac) => { + to_ack.insert((slot, block_id)); + vac.insert(BlockStatus::Incoming(HeaderOrBlock::Block { + id: block_id, + slot, + storage, + })); + self.incoming_index.insert(block_id); + } + Entry::Occupied(mut occ) => match occ.get_mut() { + BlockStatus::Discarded { + sequence_number, .. + } => { + // promote if discarded + self.sequence_counter += 1; + *sequence_number = self.sequence_counter; + } + BlockStatus::WaitingForSlot(header_or_block) => { + // promote to full block + *header_or_block = HeaderOrBlock::Block { + id: block_id, + slot, + storage, + }; + } + BlockStatus::WaitingForDependencies { + header_or_block, + unsatisfied_dependencies, + .. + } => { + // promote to full block and satisfy self-dependency + if unsatisfied_dependencies.remove(&block_id) { + // a dependency was satisfied: process + to_ack.insert((slot, block_id)); + } + *header_or_block = HeaderOrBlock::Block { + id: block_id, + slot, + storage, + }; + // promote in dependencies + self.promote_dep_tree(block_id)?; + } + _ => return Ok(()), + }, + } + + // process + self.rec_process(to_ack, current_slot)?; + + Ok(()) + } +} diff --git a/massa-graph-2-worker/src/worker/init.rs b/massa-graph-2-worker/src/worker/init.rs index bca76a9f0d6..6956b319ec1 100644 --- a/massa-graph-2-worker/src/worker/init.rs +++ b/massa-graph-2-worker/src/worker/init.rs @@ -30,12 +30,12 @@ use super::GraphWorker; /// Creates genesis block in given thread. /// /// # Arguments -/// * `cfg`: consensus configuration +/// * `cfg`: graph configuration /// * `thread_number`: thread in which we want a genesis block -pub fn create_genesis_block( - cfg: &GraphConfig, - thread_number: u8, -) -> GraphResult<(BlockId, WrappedBlock)> { +/// +/// # Returns +/// A genesis block +pub fn create_genesis_block(cfg: &GraphConfig, thread_number: u8) -> GraphResult { let keypair = &cfg.genesis_key; let header = BlockHeader::new_wrapped( BlockHeader { @@ -48,23 +48,32 @@ pub fn create_genesis_block( keypair, )?; - Ok(( - header.id, - Block::new_wrapped( - Block { - header, - operations: Default::default(), - }, - BlockSerializer::new(), - keypair, - )?, - )) + Ok(Block::new_wrapped( + Block { + header, + operations: Default::default(), + }, + BlockSerializer::new(), + keypair, + )?) } impl GraphWorker { + /// Creates a new Graph worker. + /// + /// # Arguments + /// * `config`: graph configuration + /// * `command_receiver`: channel to receive commands from controller + /// * `channels`: channels to communicate with other workers + /// * `shared_state`: shared state with the controller + /// * `init_graph`: Optional graph of blocks to init the worker + /// * `storage`: shared storage + /// + /// # Returns: + /// A GraphWorker, to interact with it use the `GraphController` pub fn new( - command_receiver: mpsc::Receiver, config: GraphConfig, + command_receiver: mpsc::Receiver, channels: GraphChannels, shared_state: Arc>, init_graph: Option, @@ -79,19 +88,19 @@ impl GraphWorker { now, ) .expect("Couldn't get the init slot consensus."); - // load genesis blocks + // load genesis blocks let mut block_statuses = PreHashMap::default(); let mut genesis_block_ids = Vec::with_capacity(config.thread_count as usize); for thread in 0u8..config.thread_count { - let (block_id, block) = create_genesis_block(&config, thread).map_err(|err| { + let block = create_genesis_block(&config, thread).map_err(|err| { GraphError::GenesisCreationError(format!("genesis error {}", err)) })?; let mut storage = storage.clone_without_refs(); storage.store_block(block.clone()); - genesis_block_ids.push(block_id); + genesis_block_ids.push(block.id); block_statuses.insert( - block_id, + block.id, BlockStatus::Active { a_block: Box::new(ActiveBlock { creator_address: block.creator_address, @@ -99,7 +108,7 @@ impl GraphWorker { children: vec![PreHashMap::default(); config.thread_count as usize], descendants: Default::default(), is_final: true, - block_id, + block_id: block.id, slot: block.content.header.content.slot, fitness: block.get_fitness(), }), @@ -275,6 +284,7 @@ impl GraphWorker { Ok(res_graph) } + /// Internal function used at initialization of the `GraphWorker` to link blocks with their parents fn claim_parent_refs(&mut self) -> GraphResult<()> { let mut write_shared_state = self.shared_state.write(); for (_b_id, block_status) in write_shared_state.block_statuses.iter_mut() { diff --git a/massa-graph-2-worker/src/worker/main_loop.rs b/massa-graph-2-worker/src/worker/main_loop.rs index 0145e2d5deb..fafea3a8a06 100644 --- a/massa-graph-2-worker/src/worker/main_loop.rs +++ b/massa-graph-2-worker/src/worker/main_loop.rs @@ -24,17 +24,36 @@ enum WaitingStatus { } impl GraphWorker { - fn manage_command(&self, command: GraphCommand) -> GraphResult<()> { + fn manage_command(&mut self, command: GraphCommand) -> GraphResult<()> { match command { - GraphCommand::RegisterBlockHeader(_, _) => {} - GraphCommand::RegisterBlock(_, _, _) => { - // TODO + GraphCommand::RegisterBlockHeader(block_id, header) => { + { + let mut write_shared_state = self.shared_state.write(); + write_shared_state.register_block_header( + block_id, + header, + self.previous_slot, + )?; + } + self.block_db_changed() + } + GraphCommand::RegisterBlock(block_id, slot, block_storage) => { + { + let mut write_shared_state = self.shared_state.write(); + write_shared_state.register_block( + block_id, + slot, + self.previous_slot, + block_storage, + )?; + } + self.block_db_changed() } _ => { + Ok(()) // TODO } } - Ok(()) } /// Wait and interrupt or wait until an instant or a stop signal @@ -43,14 +62,13 @@ impl GraphWorker { /// Returns the error of the process of the command if any. /// Returns true if we reached the instant. /// Returns false if we were interrupted by a command. - fn wait_slot_or_command(&self, deadline: Instant) -> WaitingStatus { + fn wait_slot_or_command(&mut self, deadline: Instant) -> WaitingStatus { match self.command_receiver.recv_deadline(deadline) { // message received => manage it Ok(command) => { - match self.manage_command(command) { - Err(err) => warn!("Error in graph: {}", err), - Ok(()) => {} - }; + if let Err(err) = self.manage_command(command) { + warn!("Error in graph: {}", err); + } WaitingStatus::Interrupted } // timeout => continue main loop @@ -100,6 +118,9 @@ impl GraphWorker { } /// Notify execution about blockclique changes and finalized blocks. + /// + /// # Arguments: + /// * `finalized_blocks`: Block that became final and need to be send to execution fn notify_execution(&mut self, finalized_blocks: HashMap) { let read_shared_state = self.shared_state.read(); // List new block storage instances that Execution doesn't know about. @@ -314,13 +335,19 @@ impl GraphWorker { Ok(()) } + /// Runs in loop forever. This loop must stop every slot to perform operations on stats and graph + /// but can be stopped anytime by a command received. pub fn run(&mut self) { loop { match self.wait_slot_or_command(self.next_instant) { WaitingStatus::Ended => { - //TODO: Desync, stats, block_db changed - self.slot_tick(self.next_slot); self.previous_slot = Some(self.next_slot); + if let Err(err) = self.slot_tick(self.next_slot) { + warn!("Error while processing block tick: {}", err); + } + if let Err(err) = self.stats_tick() { + warn!("Error while processing stats tick: {}", err); + } (self.next_slot, self.next_instant) = self.get_next_slot(Some(self.next_slot)); } WaitingStatus::Disconnected => { diff --git a/massa-graph-2-worker/src/worker/mod.rs b/massa-graph-2-worker/src/worker/mod.rs index 8ba4242de13..c27346262d7 100644 --- a/massa-graph-2-worker/src/worker/mod.rs +++ b/massa-graph-2-worker/src/worker/mod.rs @@ -1,5 +1,4 @@ use massa_graph::BootstrapableGraph; -use massa_graph_2_exports::events::GraphEvent; use massa_graph_2_exports::{GraphChannels, GraphConfig, GraphController, GraphManager}; use massa_models::address::Address; use massa_models::block::{BlockId, WrappedHeader}; @@ -10,7 +9,6 @@ use massa_storage::Storage; use massa_time::MassaTime; use parking_lot::RwLock; use std::collections::VecDeque; -use std::sync::mpsc::Receiver; use std::sync::{mpsc, Arc}; use std::thread; use std::time::Instant; @@ -97,8 +95,8 @@ pub fn start_graph_worker( .name("graph worker".into()) .spawn(move || { let mut graph_worker = GraphWorker::new( - rx, config, + rx, channels, shared_state_cloned, init_graph, diff --git a/massa-graph-2-worker/src/worker/process_commands.rs b/massa-graph-2-worker/src/worker/process_commands.rs index ffe98237d34..3aa001294ec 100644 --- a/massa-graph-2-worker/src/worker/process_commands.rs +++ b/massa-graph-2-worker/src/worker/process_commands.rs @@ -1,17 +1,9 @@ use std::cmp::max; -use std::collections::{hash_map, BTreeSet}; use super::GraphWorker; use massa_graph::error::GraphResult; -use massa_graph_2_exports::block_status::{BlockStatus, HeaderOrBlock}; -use massa_logging::massa_trace; -use massa_models::{ - block::{BlockId, WrappedHeader}, - slot::Slot, - stats::ConsensusStats, -}; +use massa_models::stats::ConsensusStats; use massa_time::MassaTime; -use tracing::log::debug; impl GraphWorker { pub fn get_stats(&self) -> GraphResult { @@ -42,51 +34,4 @@ impl GraphWorker { end_timespan: timespan_end, }) } - - pub fn register_block_header( - &mut self, - block_id: BlockId, - header: WrappedHeader, - current_slot: Option, - ) -> GraphResult<()> { - let mut write_shared_state = self.shared_state.write(); - // ignore genesis blocks - if write_shared_state.genesis_hashes.contains(&block_id) { - return Ok(()); - } - - debug!( - "received header {} for slot {}", - block_id, header.content.slot - ); - massa_trace!("consensus.block_graph.incoming_header", {"block_id": block_id, "header": header}); - let mut to_ack: BTreeSet<(Slot, BlockId)> = BTreeSet::new(); - match write_shared_state.block_statuses.entry(block_id) { - // if absent => add as Incoming, call rec_ack on it - hash_map::Entry::Vacant(vac) => { - to_ack.insert((header.content.slot, block_id)); - vac.insert(BlockStatus::Incoming(HeaderOrBlock::Header(header))); - write_shared_state.incoming_index.insert(block_id); - } - hash_map::Entry::Occupied(mut occ) => match occ.get_mut() { - BlockStatus::Discarded { - sequence_number, .. - } => { - // promote if discarded - //TODO: Readd this - //*sequence_number = write_shared_state.new_sequence_number(); - } - BlockStatus::WaitingForDependencies { .. } => { - // promote in dependencies - write_shared_state.promote_dep_tree(block_id)?; - } - _ => {} - }, - } - - // process - write_shared_state.rec_process(to_ack, current_slot)?; - - Ok(()) - } } diff --git a/massa-graph-2-worker/src/worker/tick.rs b/massa-graph-2-worker/src/worker/tick.rs index 6c56d8a5b05..43b17bbccba 100644 --- a/massa-graph-2-worker/src/worker/tick.rs +++ b/massa-graph-2-worker/src/worker/tick.rs @@ -9,21 +9,8 @@ use tracing::{info, log::warn}; use super::GraphWorker; impl GraphWorker { - pub fn slot_tick(&mut self, slot: Slot) -> GraphResult<()> { + pub fn stats_tick(&mut self) -> GraphResult<()> { let now = MassaTime::now(self.config.clock_compensation_millis)?; - massa_trace!("consensus.consensus_worker.slot_tick", { "slot": slot }); - - let previous_cycle = self - .previous_slot - .map(|s| s.get_cycle(self.config.periods_per_cycle)); - let observed_cycle = slot.get_cycle(self.config.periods_per_cycle); - if previous_cycle.is_none() { - // first cycle observed - info!("Massa network has started ! 🎉") - } - if previous_cycle < Some(observed_cycle) { - info!("Started cycle {}", observed_cycle); - } // check if there are any final blocks is coming from protocol // if none => we are probably desync @@ -42,6 +29,25 @@ impl GraphWorker { warn!("desynchronization detected because the recent final block history is empty or contains only blocks produced by this node"); let _ = self.channels.controller_event_tx.send(GraphEvent::NeedSync); } + // prune stats + self.prune_stats()?; + Ok(()) + } + + pub fn slot_tick(&mut self, slot: Slot) -> GraphResult<()> { + massa_trace!("consensus.consensus_worker.slot_tick", { "slot": slot }); + + let previous_cycle = self + .previous_slot + .map(|s| s.get_cycle(self.config.periods_per_cycle)); + let observed_cycle = slot.get_cycle(self.config.periods_per_cycle); + if previous_cycle.is_none() { + // first cycle observed + info!("Massa network has started ! 🎉") + } + if previous_cycle < Some(observed_cycle) { + info!("Started cycle {}", observed_cycle); + } // signal tick to block graph self.shared_state.write().slot_tick(Some(slot))?; @@ -49,8 +55,6 @@ impl GraphWorker { // take care of block db changes self.block_db_changed()?; - // prune stats - self.prune_stats()?; Ok(()) } } From 10c51308eca94ed4b2de9fadca231dd236e8a77e Mon Sep 17 00:00:00 2001 From: AurelienFT Date: Wed, 12 Oct 2022 11:40:51 +0200 Subject: [PATCH 15/40] Add fetch get stats. --- massa-graph-2-exports/src/channels.rs | 2 - massa-graph-2-worker/src/commands.rs | 1 - massa-graph-2-worker/src/controller.rs | 3 +- massa-graph-2-worker/src/manager.rs | 3 +- massa-graph-2-worker/src/state/mod.rs | 16 +++- massa-graph-2-worker/src/state/process.rs | 31 ++++++- massa-graph-2-worker/src/state/stats.rs | 92 +++++++++++++++++++ massa-graph-2-worker/src/worker/init.rs | 15 +-- massa-graph-2-worker/src/worker/main_loop.rs | 27 ++++-- massa-graph-2-worker/src/worker/mod.rs | 26 +++--- .../src/worker/process_commands.rs | 37 -------- massa-graph-2-worker/src/worker/stats.rs | 33 ------- massa-graph-2-worker/src/worker/tick.rs | 31 +------ 13 files changed, 177 insertions(+), 140 deletions(-) create mode 100644 massa-graph-2-worker/src/state/stats.rs delete mode 100644 massa-graph-2-worker/src/worker/process_commands.rs delete mode 100644 massa-graph-2-worker/src/worker/stats.rs diff --git a/massa-graph-2-exports/src/channels.rs b/massa-graph-2-exports/src/channels.rs index 792e14984fc..28e564ac350 100644 --- a/massa-graph-2-exports/src/channels.rs +++ b/massa-graph-2-exports/src/channels.rs @@ -1,5 +1,3 @@ -use std::sync::mpsc::Receiver; - use crossbeam_channel::Sender; use massa_execution_exports::ExecutionController; use massa_pool_exports::PoolController; diff --git a/massa-graph-2-worker/src/commands.rs b/massa-graph-2-worker/src/commands.rs index 584f4b553d3..2f690a0cbf6 100644 --- a/massa-graph-2-worker/src/commands.rs +++ b/massa-graph-2-worker/src/commands.rs @@ -10,5 +10,4 @@ pub enum GraphCommand { RegisterBlock(BlockId, Slot, Storage), RegisterBlockHeader(BlockId, Wrapped), MarkInvalidBlock(BlockId, Wrapped), - Stop, } diff --git a/massa-graph-2-worker/src/controller.rs b/massa-graph-2-worker/src/controller.rs index 8704b973803..69aa24776c6 100644 --- a/massa-graph-2-worker/src/controller.rs +++ b/massa-graph-2-worker/src/controller.rs @@ -95,8 +95,7 @@ impl GraphController for GraphControllerImpl { } fn get_stats(&self) -> GraphResult { - //TODO: Get the stats here - todo!() + self.shared_state.read().get_stats() } fn get_best_parents(&self) -> Vec<(BlockId, u64)> { diff --git a/massa-graph-2-worker/src/manager.rs b/massa-graph-2-worker/src/manager.rs index 03e7ca12dae..52da5097114 100644 --- a/massa-graph-2-worker/src/manager.rs +++ b/massa-graph-2-worker/src/manager.rs @@ -12,7 +12,8 @@ pub struct GraphManagerImpl { impl GraphManager for GraphManagerImpl { fn stop(&mut self) { info!("stopping graph worker..."); - let _ = self.graph_command_sender.send(GraphCommand::Stop); + //TODO: Stop graph command sender + //drop(self.graph_command_sender); // join the graph thread if let Some(join_handle) = self.thread_graph.take() { join_handle diff --git a/massa-graph-2-worker/src/state/mod.rs b/massa-graph-2-worker/src/state/mod.rs index c37210a335e..26b75894235 100644 --- a/massa-graph-2-worker/src/state/mod.rs +++ b/massa-graph-2-worker/src/state/mod.rs @@ -1,4 +1,4 @@ -use std::collections::HashMap; +use std::collections::{HashMap, VecDeque}; use massa_graph::error::{GraphError, GraphResult}; use massa_graph_2_exports::{ @@ -16,9 +16,11 @@ use massa_models::{ slot::Slot, }; use massa_storage::Storage; +use massa_time::MassaTime; mod process; mod process_commands; +mod stats; mod verifications; #[derive(Clone)] @@ -62,6 +64,18 @@ pub struct GraphState { pub new_final_blocks: PreHashSet, /// Newly stale block mapped to creator and slot pub new_stale_blocks: PreHashMap, + /// time at which the node was launched (used for desynchronization detection) + pub launch_time: MassaTime, + /// Final block stats `(time, creator, is_from_protocol)` + pub final_block_stats: VecDeque<(MassaTime, Address, bool)>, + /// Blocks that come from protocol used for stats and ids are removed when inserted in `final_block_stats` + pub protocol_blocks: VecDeque<(MassaTime, BlockId)>, + /// Stale block timestamp + pub stale_block_stats: VecDeque, + /// the time span considered for stats + pub stats_history_timespan: MassaTime, + /// the time span considered for desynchronization detection + pub stats_desync_detection_timespan: MassaTime, } impl GraphState { diff --git a/massa-graph-2-worker/src/state/process.rs b/massa-graph-2-worker/src/state/process.rs index 5c9fdc3aa0a..5a850e55a58 100644 --- a/massa-graph-2-worker/src/state/process.rs +++ b/massa-graph-2-worker/src/state/process.rs @@ -6,7 +6,7 @@ use massa_logging::massa_trace; use massa_models::{ active_block::ActiveBlock, address::Address, - block::BlockId, + block::{BlockId, WrappedHeader}, clique::Clique, prehash::{PreHashMap, PreHashSet}, slot::Slot, @@ -938,6 +938,35 @@ impl GraphState { Ok(()) } + /// Mark a block as invalid + pub fn mark_invalid_block( + &mut self, + block_id: &BlockId, + header: WrappedHeader, + ) -> Result<(), GraphError> { + let reason = DiscardReason::Invalid("invalid".to_string()); + self.maybe_note_attack_attempt(&reason, block_id); + massa_trace!("consensus.block_graph.process.invalid_block", {"block_id": block_id, "reason": reason}); + + // add to discard + self.block_statuses.insert( + *block_id, + BlockStatus::Discarded { + slot: header.content.slot, + creator: header.creator_address, + parents: header.content.parents, + reason, + sequence_number: { + self.sequence_counter += 1; + self.sequence_counter + }, + }, + ); + self.discarded_index.insert(*block_id); + + Ok(()) + } + /// Note an attack attempt if the discard reason indicates one. fn maybe_note_attack_attempt(&mut self, reason: &DiscardReason, hash: &BlockId) { massa_trace!("consensus.block_graph.maybe_note_attack_attempt", {"hash": hash, "reason": reason}); diff --git a/massa-graph-2-worker/src/state/stats.rs b/massa-graph-2-worker/src/state/stats.rs new file mode 100644 index 00000000000..905d298f532 --- /dev/null +++ b/massa-graph-2-worker/src/state/stats.rs @@ -0,0 +1,92 @@ +use super::GraphState; +use massa_graph::error::GraphResult; +use massa_graph_2_exports::events::GraphEvent; +use massa_models::stats::ConsensusStats; +use massa_time::MassaTime; +use std::cmp::max; +use tracing::log::warn; + +impl GraphState { + /// retrieve stats + /// Used in response to a API request + pub fn get_stats(&self) -> GraphResult { + let timespan_end = max( + self.launch_time, + MassaTime::now(self.config.clock_compensation_millis)?, + ); + let timespan_start = max( + timespan_end.saturating_sub(self.config.stats_timespan), + self.launch_time, + ); + let final_block_count = self + .final_block_stats + .iter() + .filter(|(t, _, _)| *t >= timespan_start && *t < timespan_end) + .count() as u64; + let stale_block_count = self + .stale_block_stats + .iter() + .filter(|t| **t >= timespan_start && **t < timespan_end) + .count() as u64; + let clique_count = self.get_clique_count() as u64; + Ok(ConsensusStats { + final_block_count, + stale_block_count, + clique_count, + start_timespan: timespan_start, + end_timespan: timespan_end, + }) + } + + pub fn stats_tick(&mut self) -> GraphResult<()> { + let now = MassaTime::now(self.config.clock_compensation_millis)?; + + // check if there are any final blocks is coming from protocol + // if none => we are probably desync + #[cfg(not(feature = "sandbox"))] + if now + > max(self.config.genesis_timestamp, self.launch_time) + .saturating_add(self.stats_desync_detection_timespan) + && !self + .final_block_stats + .iter() + .any(|(time, _, is_from_protocol)| { + time > &now.saturating_sub(self.stats_desync_detection_timespan) + && *is_from_protocol + }) + { + warn!("desynchronization detected because the recent final block history is empty or contains only blocks produced by this node"); + let _ = self.channels.controller_event_tx.send(GraphEvent::NeedSync); + } + // prune stats + self.prune_stats()?; + Ok(()) + } + + pub fn prune_stats(&mut self) -> GraphResult<()> { + let start_time = MassaTime::now(self.config.clock_compensation_millis)? + .saturating_sub(self.stats_history_timespan); + while let Some((t, _, _)) = self.final_block_stats.front() { + if t < &start_time { + self.final_block_stats.pop_front(); + } else { + break; + } + } + while let Some(t) = self.stale_block_stats.front() { + if t < &start_time { + self.stale_block_stats.pop_front(); + } else { + break; + } + } + while let Some((t, _)) = self.protocol_blocks.front() { + if t < &start_time { + self.protocol_blocks.pop_front(); + } else { + break; + } + } + Ok(()) + } +} diff --git a/massa-graph-2-worker/src/worker/init.rs b/massa-graph-2-worker/src/worker/init.rs index 6956b319ec1..63e3dc337b3 100644 --- a/massa-graph-2-worker/src/worker/init.rs +++ b/massa-graph-2-worker/src/worker/init.rs @@ -168,10 +168,6 @@ impl GraphWorker { )) } - // desync detection timespan - let stats_desync_detection_timespan = - config.t0.checked_mul(config.periods_per_cycle * 2)?; - let mut res_graph = GraphWorker { config: config.clone(), command_receiver, @@ -181,15 +177,6 @@ impl GraphWorker { next_slot, next_instant, wishlist: Default::default(), - final_block_stats, - protocol_blocks: Default::default(), - stale_block_stats: VecDeque::new(), - stats_desync_detection_timespan, - stats_history_timespan: std::cmp::max( - stats_desync_detection_timespan, - config.stats_timespan, - ), - launch_time: MassaTime::now(config.clock_compensation_millis)?, latest_final_periods, prev_blockclique: Default::default(), storage: storage.clone(), @@ -232,6 +219,7 @@ impl GraphWorker { )) }) .collect::>()?; + write_shared_state.final_block_stats = final_block_stats; } res_graph.claim_parent_refs()?; @@ -245,6 +233,7 @@ impl GraphWorker { genesis_block_ids.iter().map(|v| (*v, 0)).collect(); write_shared_state.genesis_hashes = genesis_block_ids; write_shared_state.block_statuses = block_statuses; + write_shared_state.final_block_stats = final_block_stats; } } diff --git a/massa-graph-2-worker/src/worker/main_loop.rs b/massa-graph-2-worker/src/worker/main_loop.rs index fafea3a8a06..04bd48cbfc0 100644 --- a/massa-graph-2-worker/src/worker/main_loop.rs +++ b/massa-graph-2-worker/src/worker/main_loop.rs @@ -1,4 +1,9 @@ -use std::{collections::HashMap, mem, sync::mpsc, time::Instant}; +use std::{ + collections::{HashMap, VecDeque}, + mem, + sync::mpsc, + time::Instant, +}; use massa_graph::error::GraphResult; use massa_graph_2_exports::block_status::BlockStatus; @@ -49,9 +54,12 @@ impl GraphWorker { } self.block_db_changed() } - _ => { + GraphCommand::MarkInvalidBlock(block_id, header) => { + { + let mut write_shared_state = self.shared_state.write(); + write_shared_state.mark_invalid_block(&block_id, header)?; + } Ok(()) - // TODO } } } @@ -239,6 +247,7 @@ impl GraphWorker { let timestamp = MassaTime::now(self.config.clock_compensation_millis)?; let finalized_blocks = mem::take(&mut write_shared_state.new_final_blocks); let mut final_block_slots = HashMap::with_capacity(finalized_blocks.len()); + let mut final_block_stats = VecDeque::with_capacity(finalized_blocks.len()); for b_id in finalized_blocks { if let Some(BlockStatus::Active { a_block, @@ -249,24 +258,27 @@ impl GraphWorker { final_block_slots.insert(a_block.slot, b_id); // add to stats - let block_is_from_protocol = self + let block_is_from_protocol = write_shared_state .protocol_blocks .iter() .any(|(_, block_id)| block_id == &b_id); - self.final_block_stats.push_back(( + final_block_stats.push_back(( timestamp, a_block.creator_address, block_is_from_protocol, )); } } + write_shared_state + .final_block_stats + .extend(final_block_stats); // add stale blocks to stats let new_stale_block_ids_creators_slots = mem::take(&mut write_shared_state.new_stale_blocks); let timestamp = MassaTime::now(self.config.clock_compensation_millis)?; for (_b_id, (_b_creator, _b_slot)) in new_stale_block_ids_creators_slots.into_iter() { - self.stale_block_stats.push_back(timestamp); + write_shared_state.stale_block_stats.push_back(timestamp); } final_block_slots }; @@ -345,9 +357,6 @@ impl GraphWorker { if let Err(err) = self.slot_tick(self.next_slot) { warn!("Error while processing block tick: {}", err); } - if let Err(err) = self.stats_tick() { - warn!("Error while processing stats tick: {}", err); - } (self.next_slot, self.next_instant) = self.get_next_slot(Some(self.next_slot)); } WaitingStatus::Disconnected => { diff --git a/massa-graph-2-worker/src/worker/mod.rs b/massa-graph-2-worker/src/worker/mod.rs index c27346262d7..2914670b90c 100644 --- a/massa-graph-2-worker/src/worker/mod.rs +++ b/massa-graph-2-worker/src/worker/mod.rs @@ -31,20 +31,8 @@ pub struct GraphWorker { next_instant: Instant, /// blocks we want wishlist: PreHashMap>, - /// Final block stats `(time, creator, is_from_protocol)` - final_block_stats: VecDeque<(MassaTime, Address, bool)>, - /// Blocks that come from protocol used for stats and ids are removed when inserted in `final_block_stats` - protocol_blocks: VecDeque<(MassaTime, BlockId)>, - /// Stale block timestamp - stale_block_stats: VecDeque, - /// the time span considered for stats - stats_history_timespan: MassaTime, - /// the time span considered for desynchronization detection - stats_desync_detection_timespan: MassaTime, /// save latest final periods latest_final_periods: Vec, - /// time at which the node was launched (used for desynchronization detection) - launch_time: MassaTime, /// previous blockclique notified to Execution prev_blockclique: PreHashMap, /// Shared storage, @@ -53,8 +41,6 @@ pub struct GraphWorker { mod init; mod main_loop; -mod process_commands; -mod stats; mod tick; pub fn start_graph_worker( @@ -64,6 +50,9 @@ pub fn start_graph_worker( storage: Storage, ) -> (Box, Box) { let (tx, rx) = mpsc::sync_channel(10); + // desync detection timespan + let stats_desync_detection_timespan = + config.t0.checked_mul(config.periods_per_cycle * 2).unwrap(); let shared_state = Arc::new(RwLock::new(GraphState { storage: storage.clone(), config: config.clone(), @@ -88,6 +77,15 @@ pub fn start_graph_worker( block_statuses: Default::default(), genesis_hashes: Default::default(), gi_head: Default::default(), + final_block_stats: Default::default(), + stale_block_stats: Default::default(), + protocol_blocks: Default::default(), + launch_time: MassaTime::now(config.clock_compensation_millis).unwrap(), + stats_desync_detection_timespan, + stats_history_timespan: std::cmp::max( + stats_desync_detection_timespan, + config.stats_timespan, + ), })); let shared_state_cloned = shared_state.clone(); diff --git a/massa-graph-2-worker/src/worker/process_commands.rs b/massa-graph-2-worker/src/worker/process_commands.rs deleted file mode 100644 index 3aa001294ec..00000000000 --- a/massa-graph-2-worker/src/worker/process_commands.rs +++ /dev/null @@ -1,37 +0,0 @@ -use std::cmp::max; - -use super::GraphWorker; -use massa_graph::error::GraphResult; -use massa_models::stats::ConsensusStats; -use massa_time::MassaTime; - -impl GraphWorker { - pub fn get_stats(&self) -> GraphResult { - let timespan_end = max( - self.launch_time, - MassaTime::now(self.config.clock_compensation_millis)?, - ); - let timespan_start = max( - timespan_end.saturating_sub(self.config.stats_timespan), - self.launch_time, - ); - let final_block_count = self - .final_block_stats - .iter() - .filter(|(t, _, _)| *t >= timespan_start && *t < timespan_end) - .count() as u64; - let stale_block_count = self - .stale_block_stats - .iter() - .filter(|t| **t >= timespan_start && **t < timespan_end) - .count() as u64; - let clique_count = self.shared_state.read().get_clique_count() as u64; - Ok(ConsensusStats { - final_block_count, - stale_block_count, - clique_count, - start_timespan: timespan_start, - end_timespan: timespan_end, - }) - } -} diff --git a/massa-graph-2-worker/src/worker/stats.rs b/massa-graph-2-worker/src/worker/stats.rs deleted file mode 100644 index 31d7e6a40b3..00000000000 --- a/massa-graph-2-worker/src/worker/stats.rs +++ /dev/null @@ -1,33 +0,0 @@ -use massa_graph::error::GraphResult; -use massa_time::MassaTime; - -use super::GraphWorker; - -impl GraphWorker { - pub fn prune_stats(&mut self) -> GraphResult<()> { - let start_time = MassaTime::now(self.config.clock_compensation_millis)? - .saturating_sub(self.stats_history_timespan); - while let Some((t, _, _)) = self.final_block_stats.front() { - if t < &start_time { - self.final_block_stats.pop_front(); - } else { - break; - } - } - while let Some(t) = self.stale_block_stats.front() { - if t < &start_time { - self.stale_block_stats.pop_front(); - } else { - break; - } - } - while let Some((t, _)) = self.protocol_blocks.front() { - if t < &start_time { - self.protocol_blocks.pop_front(); - } else { - break; - } - } - Ok(()) - } -} diff --git a/massa-graph-2-worker/src/worker/tick.rs b/massa-graph-2-worker/src/worker/tick.rs index 43b17bbccba..b7dea2b691d 100644 --- a/massa-graph-2-worker/src/worker/tick.rs +++ b/massa-graph-2-worker/src/worker/tick.rs @@ -9,31 +9,6 @@ use tracing::{info, log::warn}; use super::GraphWorker; impl GraphWorker { - pub fn stats_tick(&mut self) -> GraphResult<()> { - let now = MassaTime::now(self.config.clock_compensation_millis)?; - - // check if there are any final blocks is coming from protocol - // if none => we are probably desync - #[cfg(not(feature = "sandbox"))] - if now - > max(self.config.genesis_timestamp, self.launch_time) - .saturating_add(self.stats_desync_detection_timespan) - && !self - .final_block_stats - .iter() - .any(|(time, _, is_from_protocol)| { - time > &now.saturating_sub(self.stats_desync_detection_timespan) - && *is_from_protocol - }) - { - warn!("desynchronization detected because the recent final block history is empty or contains only blocks produced by this node"); - let _ = self.channels.controller_event_tx.send(GraphEvent::NeedSync); - } - // prune stats - self.prune_stats()?; - Ok(()) - } - pub fn slot_tick(&mut self, slot: Slot) -> GraphResult<()> { massa_trace!("consensus.consensus_worker.slot_tick", { "slot": slot }); @@ -50,7 +25,11 @@ impl GraphWorker { } // signal tick to block graph - self.shared_state.write().slot_tick(Some(slot))?; + { + let mut write_shared_state = self.shared_state.write(); + write_shared_state.slot_tick(Some(slot))?; + write_shared_state.stats_tick()?; + } // take care of block db changes self.block_db_changed()?; From 54db8d332b41eaa6051e6858b945ecf1f93840f4 Mon Sep 17 00:00:00 2001 From: AurelienFT Date: Wed, 12 Oct 2022 23:14:16 +0200 Subject: [PATCH 16/40] Move code from worker to state. --- massa-graph-2-worker/src/state/mod.rs | 3 + massa-graph-2-worker/src/state/process.rs | 243 ++++++++++++++-- massa-graph-2-worker/src/state/tick.rs | 55 ++++ massa-graph-2-worker/src/worker/init.rs | 9 +- massa-graph-2-worker/src/worker/main_loop.rs | 278 ++----------------- massa-graph-2-worker/src/worker/mod.rs | 10 +- massa-graph-2-worker/src/worker/tick.rs | 39 --- 7 files changed, 296 insertions(+), 341 deletions(-) create mode 100644 massa-graph-2-worker/src/state/tick.rs delete mode 100644 massa-graph-2-worker/src/worker/tick.rs diff --git a/massa-graph-2-worker/src/state/mod.rs b/massa-graph-2-worker/src/state/mod.rs index 26b75894235..2c83ab7bcc7 100644 --- a/massa-graph-2-worker/src/state/mod.rs +++ b/massa-graph-2-worker/src/state/mod.rs @@ -21,6 +21,7 @@ use massa_time::MassaTime; mod process; mod process_commands; mod stats; +mod tick; mod verifications; #[derive(Clone)] @@ -76,6 +77,8 @@ pub struct GraphState { pub stats_history_timespan: MassaTime, /// the time span considered for desynchronization detection pub stats_desync_detection_timespan: MassaTime, + /// blocks we want + pub wishlist: PreHashMap>, } impl GraphState { diff --git a/massa-graph-2-worker/src/state/process.rs b/massa-graph-2-worker/src/state/process.rs index 5a850e55a58..5ba460cf27a 100644 --- a/massa-graph-2-worker/src/state/process.rs +++ b/massa-graph-2-worker/src/state/process.rs @@ -1,4 +1,7 @@ -use std::collections::{BTreeSet, VecDeque}; +use std::{ + collections::{BTreeSet, HashMap, VecDeque}, + mem, +}; use massa_graph::error::{GraphError, GraphResult}; use massa_graph_2_exports::block_status::{BlockStatus, DiscardReason, HeaderOrBlock}; @@ -13,6 +16,7 @@ use massa_models::{ }; use massa_signature::PublicKey; use massa_storage::Storage; +use massa_time::MassaTime; use tracing::log::{debug, info}; use crate::state::verifications::HeaderCheckOutcome; @@ -444,7 +448,7 @@ impl GraphState { } /// Computes max cliques of compatible blocks - pub fn compute_max_cliques(&self, read_shared_state: &GraphState) -> Vec> { + pub fn compute_max_cliques(&self) -> Vec> { let mut max_cliques: Vec> = Vec::new(); // algorithm adapted from IK_GPX as summarized in: @@ -459,7 +463,7 @@ impl GraphState { PreHashSet, )> = vec![( PreHashSet::::default(), - read_shared_state.gi_head.keys().cloned().collect(), + self.gi_head.keys().cloned().collect(), PreHashSet::::default(), )]; while let Some((r, mut p, mut x)) = stack.pop() { @@ -472,18 +476,18 @@ impl GraphState { let &u_p = p .union(&x) .max_by_key(|&u| { - p.difference(&(&read_shared_state.gi_head[u] | &vec![*u].into_iter().collect())) + p.difference(&(&self.gi_head[u] | &vec![*u].into_iter().collect())) .count() }) .unwrap(); // p was checked to be non-empty before // iterate over u_set = (p /\ Neighbors(u_p, GI)) let u_set: PreHashSet = - &p & &(&read_shared_state.gi_head[&u_p] | &vec![u_p].into_iter().collect()); + &p & &(&self.gi_head[&u_p] | &vec![u_p].into_iter().collect()); for u_i in u_set.into_iter() { p.remove(&u_i); let u_i_set: PreHashSet = vec![u_i].into_iter().collect(); - let comp_n_u_i: PreHashSet = &read_shared_state.gi_head[&u_i] | &u_i_set; + let comp_n_u_i: PreHashSet = &self.gi_head[&u_i] | &u_i_set; stack.push((&r | &u_i_set, &p - &comp_n_u_i, &x - &comp_n_u_i)); x.insert(u_i); } @@ -611,7 +615,7 @@ impl GraphState { ); let before = self.max_cliques.len(); self.max_cliques = self - .compute_max_cliques(self) + .compute_max_cliques() .into_iter() .map(|c| Clique { block_ids: c, @@ -987,7 +991,6 @@ impl GraphState { pub fn get_active_block_and_descendants( &self, block_id: &BlockId, - read_shared_state: &GraphState, ) -> GraphResult> { let mut to_visit = vec![*block_id]; let mut result = PreHashSet::::default(); @@ -995,7 +998,7 @@ impl GraphState { if !result.insert(visit_h) { continue; // already visited } - match read_shared_state.block_statuses.get(&visit_h) { + match self.block_statuses.get(&visit_h) { Some(BlockStatus::Active { a_block, .. }) => { a_block.as_ref() .children.iter() @@ -1007,28 +1010,216 @@ impl GraphState { Ok(result) } - /// signal new slot - pub fn slot_tick(&mut self, current_slot: Option) -> GraphResult<()> { - // list all elements for which the time has come - let to_process: BTreeSet<(Slot, BlockId)> = self - .waiting_for_slot_index + /// Notify execution about blockclique changes and finalized blocks. + /// + /// # Arguments: + /// * `finalized_blocks`: Block that became final and need to be send to execution + fn notify_execution(&mut self, finalized_blocks: HashMap) { + // List new block storage instances that Execution doesn't know about. + // That's blocks that have not been sent to execution before, ie. in the previous blockclique). + let mut new_blocks_storage: PreHashMap = finalized_blocks .iter() - .filter_map(|b_id| match self.block_statuses.get(b_id) { - Some(BlockStatus::WaitingForSlot(header_or_block)) => { - let slot = header_or_block.get_slot(); - if Some(slot) <= current_slot { - Some((slot, *b_id)) - } else { - None - } + .filter_map(|(_slot, b_id)| { + if self.prev_blockclique.contains_key(b_id) { + // was previously sent as a blockclique element + return None; + } + let storage = match self.block_statuses.get(b_id) { + Some(BlockStatus::Active { + a_block: _, + storage, + }) => storage, + _ => panic!("final block not found in active blocks"), + }; + Some((*b_id, storage.clone())) + }) + .collect(); + + // Get new blockclique block list with slots. + let mut blockclique_changed = false; + let new_blockclique: PreHashMap = self + .get_blockclique() + .iter() + .map(|b_id| { + if let Some(slot) = self.prev_blockclique.remove(b_id) { + // The block was already sent in the previous blockclique: + // the slot can be gathered from there without locking Storage. + // Note: the block is removed from self.prev_blockclique. + (*b_id, slot) + } else { + // The block was not present in the previous blockclique: + // the blockclique has changed => get the block's slot by querying Storage. + blockclique_changed = true; + let (slot, storage) = match self.block_statuses.get(b_id) { + Some(BlockStatus::Active { a_block, storage }) => (a_block.slot, storage), + _ => panic!("blockclique block not found in active blocks"), + }; + new_blocks_storage.insert(*b_id, storage.clone()); + (*b_id, slot) } - _ => None, }) .collect(); + if !self.prev_blockclique.is_empty() { + // All elements present in the new blockclique have been removed from `prev_blockclique` above. + // If `prev_blockclique` is not empty here, it means that it contained elements that are not in the new blockclique anymore. + // In that case, we mark the blockclique as having changed. + blockclique_changed = true; + } + // Overwrite previous blockclique. + // Should still be done even if unchanged because elements were removed from it above. + self.prev_blockclique = new_blockclique.clone(); + + if finalized_blocks.is_empty() && !blockclique_changed { + // There are no changes (neither block finalizations not blockclique changes) to send to execution. + return; + } + + // Notify execution of block finalizations and blockclique changes + self.channels + .execution_controller + .update_blockclique_status( + finalized_blocks, + if blockclique_changed { + Some(new_blockclique.into_iter().map(|(k, v)| (v, k)).collect()) + } else { + None + }, + new_blocks_storage, + ); + } + + /// call me if the block database changed + /// Processing of final blocks, pruning. + /// + /// 1. propagate blocks + /// 2. Notify of attack attempts + /// 3. get new final blocks + /// 4. get blockclique + /// 5. notify Execution + /// 6. Process new final blocks + /// 7. Notify pool of new final ops + /// 8. Notify PoS of final blocks + /// 9. notify protocol of block wish list + /// 10. note new latest final periods (prune graph if changed) + /// 11. add stale blocks to stats + pub fn block_db_changed(&mut self) -> GraphResult<()> { + let final_block_slots = { + massa_trace!("consensus.consensus_worker.block_db_changed", {}); - massa_trace!("consensus.block_graph.slot_tick", {}); - // process those elements - self.rec_process(to_process, current_slot)?; + // Propagate new blocks + for (block_id, storage) in mem::take(&mut self.to_propagate).into_iter() { + massa_trace!("consensus.consensus_worker.block_db_changed.integrated", { + "block_id": block_id + }); + self.channels + .protocol_command_sender + .integrated_block(block_id, storage)?; + } + + // Notify protocol of attack attempts. + for hash in mem::take(&mut self.attack_attempts).into_iter() { + self.channels + .protocol_command_sender + .notify_block_attack(hash)?; + massa_trace!("consensus.consensus_worker.block_db_changed.attack", { + "hash": hash + }); + } + + // manage finalized blocks + let timestamp = MassaTime::now(self.config.clock_compensation_millis)?; + let finalized_blocks = mem::take(&mut self.new_final_blocks); + let mut final_block_slots = HashMap::with_capacity(finalized_blocks.len()); + let mut final_block_stats = VecDeque::with_capacity(finalized_blocks.len()); + for b_id in finalized_blocks { + if let Some(BlockStatus::Active { + a_block, + storage: _, + }) = self.block_statuses.get(&b_id) + { + // add to final blocks to notify execution + final_block_slots.insert(a_block.slot, b_id); + + // add to stats + let block_is_from_protocol = self + .protocol_blocks + .iter() + .any(|(_, block_id)| block_id == &b_id); + final_block_stats.push_back(( + timestamp, + a_block.creator_address, + block_is_from_protocol, + )); + } + } + self.final_block_stats.extend(final_block_stats); + + // add stale blocks to stats + let new_stale_block_ids_creators_slots = mem::take(&mut self.new_stale_blocks); + let timestamp = MassaTime::now(self.config.clock_compensation_millis)?; + for (_b_id, (_b_creator, _b_slot)) in new_stale_block_ids_creators_slots.into_iter() { + self.stale_block_stats.push_back(timestamp); + } + final_block_slots + }; + + // notify execution + self.notify_execution(final_block_slots); + + // notify protocol of block wishlist + let new_wishlist = self.get_block_wishlist()?; + let new_blocks: PreHashMap> = new_wishlist + .iter() + .filter_map(|(id, header)| { + if !self.wishlist.contains_key(id) { + Some((*id, header.clone())) + } else { + None + } + }) + .collect(); + let remove_blocks: PreHashSet = self + .wishlist + .iter() + .filter_map(|(id, _)| { + if !new_wishlist.contains_key(id) { + Some(*id) + } else { + None + } + }) + .collect(); + if !new_blocks.is_empty() || !remove_blocks.is_empty() { + massa_trace!("consensus.consensus_worker.block_db_changed.send_wishlist_delta", { "new": new_wishlist, "remove": remove_blocks }); + self.channels + .protocol_command_sender + .send_wishlist_delta(new_blocks, remove_blocks)?; + self.wishlist = new_wishlist; + } + + // note new latest final periods + let latest_final_periods: Vec = self + .latest_final_blocks_periods + .iter() + .map(|(_block_id, period)| *period) + .collect(); + // if changed... + if self.latest_final_periods != latest_final_periods { + // signal new last final periods to pool + self.channels + .pool_command_sender + .notify_final_cs_periods(&latest_final_periods); + // update final periods + self.latest_final_periods = latest_final_periods; + } + + /* + TODO add this again + let creator_addr = Address::from_public_key(&b_creator); + if self.staking_keys.contains_key(&creator_addr) { + warn!("block {} that was produced by our address {} at slot {} became stale. This is probably due to a temporary desynchronization.", b_id, creator_addr, b_slot); + } + */ Ok(()) } diff --git a/massa-graph-2-worker/src/state/tick.rs b/massa-graph-2-worker/src/state/tick.rs new file mode 100644 index 00000000000..cc8de6e199b --- /dev/null +++ b/massa-graph-2-worker/src/state/tick.rs @@ -0,0 +1,55 @@ +use std::collections::BTreeSet; + +use massa_graph::error::GraphResult; +use massa_graph_2_exports::block_status::BlockStatus; +use massa_logging::massa_trace; +use massa_models::{block::BlockId, slot::Slot}; +use tracing::info; + +use super::GraphState; + +impl GraphState { + pub fn slot_tick(&mut self, actual_slot: Slot) -> GraphResult<()> { + massa_trace!("consensus.consensus_worker.slot_tick", { + "slot": actual_slot + }); + + let previous_cycle = self + .previous_slot + .map(|s| s.get_cycle(self.config.periods_per_cycle)); + let observed_cycle = actual_slot.get_cycle(self.config.periods_per_cycle); + if previous_cycle.is_none() { + // first cycle observed + info!("Massa network has started ! 🎉") + } + if previous_cycle < Some(observed_cycle) { + info!("Started cycle {}", observed_cycle); + } + // list all elements for which the time has come + let to_process: BTreeSet<(Slot, BlockId)> = self + .waiting_for_slot_index + .iter() + .filter_map(|b_id| match self.block_statuses.get(b_id) { + Some(BlockStatus::WaitingForSlot(header_or_block)) => { + let slot = header_or_block.get_slot(); + if slot <= actual_slot { + Some((slot, *b_id)) + } else { + None + } + } + _ => None, + }) + .collect(); + + massa_trace!("consensus.block_graph.slot_tick", {}); + // process those elements + self.rec_process(to_process, Some(actual_slot))?; + + self.stats_tick()?; + // take care of block db changes + self.block_db_changed()?; + + Ok(()) + } +} diff --git a/massa-graph-2-worker/src/worker/init.rs b/massa-graph-2-worker/src/worker/init.rs index 63e3dc337b3..88bec82afbb 100644 --- a/massa-graph-2-worker/src/worker/init.rs +++ b/massa-graph-2-worker/src/worker/init.rs @@ -135,12 +135,7 @@ impl GraphWorker { next_slot.period, next_slot.thread, ); - let latest_final_periods: Vec = shared_state - .read() - .latest_final_blocks_periods - .iter() - .map(|(_block_id, period)| *period) - .collect(); + if config.genesis_timestamp > now { let (days, hours, mins, secs) = config .genesis_timestamp @@ -176,8 +171,6 @@ impl GraphWorker { previous_slot, next_slot, next_instant, - wishlist: Default::default(), - latest_final_periods, prev_blockclique: Default::default(), storage: storage.clone(), }; diff --git a/massa-graph-2-worker/src/worker/main_loop.rs b/massa-graph-2-worker/src/worker/main_loop.rs index 04bd48cbfc0..d87801be326 100644 --- a/massa-graph-2-worker/src/worker/main_loop.rs +++ b/massa-graph-2-worker/src/worker/main_loop.rs @@ -1,20 +1,10 @@ -use std::{ - collections::{HashMap, VecDeque}, - mem, - sync::mpsc, - time::Instant, -}; +use std::{sync::mpsc, time::Instant}; use massa_graph::error::GraphResult; -use massa_graph_2_exports::block_status::BlockStatus; -use massa_logging::massa_trace; use massa_models::{ - block::{BlockId, WrappedHeader}, - prehash::{PreHashMap, PreHashSet}, slot::Slot, timeslots::{get_block_slot_timestamp, get_closest_slot_to_timestamp}, }; -use massa_storage::Storage; use massa_time::MassaTime; use tracing::log::warn; @@ -30,36 +20,23 @@ enum WaitingStatus { impl GraphWorker { fn manage_command(&mut self, command: GraphCommand) -> GraphResult<()> { + let mut write_shared_state = self.shared_state.write(); match command { GraphCommand::RegisterBlockHeader(block_id, header) => { - { - let mut write_shared_state = self.shared_state.write(); - write_shared_state.register_block_header( - block_id, - header, - self.previous_slot, - )?; - } - self.block_db_changed() + write_shared_state.register_block_header(block_id, header, self.previous_slot)?; + write_shared_state.block_db_changed() } GraphCommand::RegisterBlock(block_id, slot, block_storage) => { - { - let mut write_shared_state = self.shared_state.write(); - write_shared_state.register_block( - block_id, - slot, - self.previous_slot, - block_storage, - )?; - } - self.block_db_changed() + write_shared_state.register_block( + block_id, + slot, + self.previous_slot, + block_storage, + )?; + write_shared_state.block_db_changed() } GraphCommand::MarkInvalidBlock(block_id, header) => { - { - let mut write_shared_state = self.shared_state.write(); - write_shared_state.mark_invalid_block(&block_id, header)?; - } - Ok(()) + write_shared_state.mark_invalid_block(&block_id, header) } } } @@ -125,228 +102,6 @@ impl GraphWorker { (next_slot, next_instant) } - /// Notify execution about blockclique changes and finalized blocks. - /// - /// # Arguments: - /// * `finalized_blocks`: Block that became final and need to be send to execution - fn notify_execution(&mut self, finalized_blocks: HashMap) { - let read_shared_state = self.shared_state.read(); - // List new block storage instances that Execution doesn't know about. - // That's blocks that have not been sent to execution before, ie. in the previous blockclique). - let mut new_blocks_storage: PreHashMap = finalized_blocks - .iter() - .filter_map(|(_slot, b_id)| { - if self.prev_blockclique.contains_key(b_id) { - // was previously sent as a blockclique element - return None; - } - let storage = match read_shared_state.block_statuses.get(b_id) { - Some(BlockStatus::Active { - a_block: _, - storage, - }) => storage, - _ => panic!("final block not found in active blocks"), - }; - Some((*b_id, storage.clone())) - }) - .collect(); - - // Get new blockclique block list with slots. - let mut blockclique_changed = false; - let new_blockclique: PreHashMap = read_shared_state - .get_blockclique() - .iter() - .map(|b_id| { - if let Some(slot) = self.prev_blockclique.remove(b_id) { - // The block was already sent in the previous blockclique: - // the slot can be gathered from there without locking Storage. - // Note: the block is removed from self.prev_blockclique. - (*b_id, slot) - } else { - // The block was not present in the previous blockclique: - // the blockclique has changed => get the block's slot by querying Storage. - blockclique_changed = true; - let (slot, storage) = match read_shared_state.block_statuses.get(b_id) { - Some(BlockStatus::Active { a_block, storage }) => (a_block.slot, storage), - _ => panic!("blockclique block not found in active blocks"), - }; - new_blocks_storage.insert(*b_id, storage.clone()); - (*b_id, slot) - } - }) - .collect(); - if !self.prev_blockclique.is_empty() { - // All elements present in the new blockclique have been removed from `prev_blockclique` above. - // If `prev_blockclique` is not empty here, it means that it contained elements that are not in the new blockclique anymore. - // In that case, we mark the blockclique as having changed. - blockclique_changed = true; - } - // Overwrite previous blockclique. - // Should still be done even if unchanged because elements were removed from it above. - self.prev_blockclique = new_blockclique.clone(); - - if finalized_blocks.is_empty() && !blockclique_changed { - // There are no changes (neither block finalizations not blockclique changes) to send to execution. - return; - } - - // Notify execution of block finalizations and blockclique changes - self.channels - .execution_controller - .update_blockclique_status( - finalized_blocks, - if blockclique_changed { - Some(new_blockclique.into_iter().map(|(k, v)| (v, k)).collect()) - } else { - None - }, - new_blocks_storage, - ); - } - - /// call me if the block database changed - /// Processing of final blocks, pruning. - /// - /// 1. propagate blocks - /// 2. Notify of attack attempts - /// 3. get new final blocks - /// 4. get blockclique - /// 5. notify Execution - /// 6. Process new final blocks - /// 7. Notify pool of new final ops - /// 8. Notify PoS of final blocks - /// 9. notify protocol of block wish list - /// 10. note new latest final periods (prune graph if changed) - /// 11. add stale blocks to stats - pub fn block_db_changed(&mut self) -> GraphResult<()> { - let final_block_slots = { - let mut write_shared_state = self.shared_state.write(); - massa_trace!("consensus.consensus_worker.block_db_changed", {}); - - // Propagate new blocks - for (block_id, storage) in mem::take(&mut write_shared_state.to_propagate).into_iter() { - massa_trace!("consensus.consensus_worker.block_db_changed.integrated", { - "block_id": block_id - }); - self.channels - .protocol_command_sender - .integrated_block(block_id, storage)?; - } - - // Notify protocol of attack attempts. - for hash in mem::take(&mut write_shared_state.attack_attempts).into_iter() { - self.channels - .protocol_command_sender - .notify_block_attack(hash)?; - massa_trace!("consensus.consensus_worker.block_db_changed.attack", { - "hash": hash - }); - } - - // manage finalized blocks - let timestamp = MassaTime::now(self.config.clock_compensation_millis)?; - let finalized_blocks = mem::take(&mut write_shared_state.new_final_blocks); - let mut final_block_slots = HashMap::with_capacity(finalized_blocks.len()); - let mut final_block_stats = VecDeque::with_capacity(finalized_blocks.len()); - for b_id in finalized_blocks { - if let Some(BlockStatus::Active { - a_block, - storage: _, - }) = write_shared_state.block_statuses.get(&b_id) - { - // add to final blocks to notify execution - final_block_slots.insert(a_block.slot, b_id); - - // add to stats - let block_is_from_protocol = write_shared_state - .protocol_blocks - .iter() - .any(|(_, block_id)| block_id == &b_id); - final_block_stats.push_back(( - timestamp, - a_block.creator_address, - block_is_from_protocol, - )); - } - } - write_shared_state - .final_block_stats - .extend(final_block_stats); - - // add stale blocks to stats - let new_stale_block_ids_creators_slots = - mem::take(&mut write_shared_state.new_stale_blocks); - let timestamp = MassaTime::now(self.config.clock_compensation_millis)?; - for (_b_id, (_b_creator, _b_slot)) in new_stale_block_ids_creators_slots.into_iter() { - write_shared_state.stale_block_stats.push_back(timestamp); - } - final_block_slots - }; - - // notify execution - self.notify_execution(final_block_slots); - - // notify protocol of block wishlist - { - let read_shared_state = self.shared_state.read(); - let new_wishlist = read_shared_state.get_block_wishlist()?; - let new_blocks: PreHashMap> = new_wishlist - .iter() - .filter_map(|(id, header)| { - if !self.wishlist.contains_key(id) { - Some((*id, header.clone())) - } else { - None - } - }) - .collect(); - let remove_blocks: PreHashSet = self - .wishlist - .iter() - .filter_map(|(id, _)| { - if !new_wishlist.contains_key(id) { - Some(*id) - } else { - None - } - }) - .collect(); - if !new_blocks.is_empty() || !remove_blocks.is_empty() { - massa_trace!("consensus.consensus_worker.block_db_changed.send_wishlist_delta", { "new": new_wishlist, "remove": remove_blocks }); - self.channels - .protocol_command_sender - .send_wishlist_delta(new_blocks, remove_blocks)?; - self.wishlist = new_wishlist; - } - - // note new latest final periods - let latest_final_periods: Vec = read_shared_state - .latest_final_blocks_periods - .iter() - .map(|(_block_id, period)| *period) - .collect(); - // if changed... - if self.latest_final_periods != latest_final_periods { - // signal new last final periods to pool - self.channels - .pool_command_sender - .notify_final_cs_periods(&latest_final_periods); - // update final periods - self.latest_final_periods = latest_final_periods; - } - }; - - /* - TODO add this again - let creator_addr = Address::from_public_key(&b_creator); - if self.staking_keys.contains_key(&creator_addr) { - warn!("block {} that was produced by our address {} at slot {} became stale. This is probably due to a temporary desynchronization.", b_id, creator_addr, b_slot); - } - */ - - Ok(()) - } - /// Runs in loop forever. This loop must stop every slot to perform operations on stats and graph /// but can be stopped anytime by a command received. pub fn run(&mut self) { @@ -354,9 +109,12 @@ impl GraphWorker { match self.wait_slot_or_command(self.next_instant) { WaitingStatus::Ended => { self.previous_slot = Some(self.next_slot); - if let Err(err) = self.slot_tick(self.next_slot) { - warn!("Error while processing block tick: {}", err); - } + { + let mut write_shared_state = self.shared_state.write(); + if let Err(err) = write_shared_state.slot_tick(self.next_slot) { + warn!("Error while processing block tick: {}", err); + } + }; (self.next_slot, self.next_instant) = self.get_next_slot(Some(self.next_slot)); } WaitingStatus::Disconnected => { diff --git a/massa-graph-2-worker/src/worker/mod.rs b/massa-graph-2-worker/src/worker/mod.rs index 2914670b90c..e668357ac32 100644 --- a/massa-graph-2-worker/src/worker/mod.rs +++ b/massa-graph-2-worker/src/worker/mod.rs @@ -1,14 +1,12 @@ use massa_graph::BootstrapableGraph; use massa_graph_2_exports::{GraphChannels, GraphConfig, GraphController, GraphManager}; -use massa_models::address::Address; -use massa_models::block::{BlockId, WrappedHeader}; +use massa_models::block::BlockId; use massa_models::clique::Clique; use massa_models::prehash::{PreHashMap, PreHashSet}; use massa_models::slot::Slot; use massa_storage::Storage; use massa_time::MassaTime; use parking_lot::RwLock; -use std::collections::VecDeque; use std::sync::{mpsc, Arc}; use std::thread; use std::time::Instant; @@ -29,10 +27,6 @@ pub struct GraphWorker { next_slot: Slot, /// Next slot instant next_instant: Instant, - /// blocks we want - wishlist: PreHashMap>, - /// save latest final periods - latest_final_periods: Vec, /// previous blockclique notified to Execution prev_blockclique: PreHashMap, /// Shared storage, @@ -41,7 +35,6 @@ pub struct GraphWorker { mod init; mod main_loop; -mod tick; pub fn start_graph_worker( config: GraphConfig, @@ -80,6 +73,7 @@ pub fn start_graph_worker( final_block_stats: Default::default(), stale_block_stats: Default::default(), protocol_blocks: Default::default(), + wishlist: Default::default(), launch_time: MassaTime::now(config.clock_compensation_millis).unwrap(), stats_desync_detection_timespan, stats_history_timespan: std::cmp::max( diff --git a/massa-graph-2-worker/src/worker/tick.rs b/massa-graph-2-worker/src/worker/tick.rs deleted file mode 100644 index b7dea2b691d..00000000000 --- a/massa-graph-2-worker/src/worker/tick.rs +++ /dev/null @@ -1,39 +0,0 @@ -use massa_graph::error::GraphResult; -use massa_graph_2_exports::events::GraphEvent; -use massa_logging::massa_trace; -use massa_models::slot::Slot; -use massa_time::MassaTime; -use std::cmp::max; -use tracing::{info, log::warn}; - -use super::GraphWorker; - -impl GraphWorker { - pub fn slot_tick(&mut self, slot: Slot) -> GraphResult<()> { - massa_trace!("consensus.consensus_worker.slot_tick", { "slot": slot }); - - let previous_cycle = self - .previous_slot - .map(|s| s.get_cycle(self.config.periods_per_cycle)); - let observed_cycle = slot.get_cycle(self.config.periods_per_cycle); - if previous_cycle.is_none() { - // first cycle observed - info!("Massa network has started ! 🎉") - } - if previous_cycle < Some(observed_cycle) { - info!("Started cycle {}", observed_cycle); - } - - // signal tick to block graph - { - let mut write_shared_state = self.shared_state.write(); - write_shared_state.slot_tick(Some(slot))?; - write_shared_state.stats_tick()?; - } - - // take care of block db changes - self.block_db_changed()?; - - Ok(()) - } -} From bbd8d4380b0e9848dfeb383ca126b19d8bf0c9bd Mon Sep 17 00:00:00 2001 From: AurelienFT Date: Thu, 13 Oct 2022 11:42:58 +0200 Subject: [PATCH 17/40] Move graph functions in a specific file. --- massa-graph-2-worker/src/state/graph.rs | 360 +++++++++++++++ massa-graph-2-worker/src/state/mod.rs | 52 ++- massa-graph-2-worker/src/state/process.rs | 411 +----------------- .../src/state/process_commands.rs | 33 +- massa-graph-2-worker/src/state/tick.rs | 12 - .../src/state/verifications.rs | 6 +- massa-graph-2-worker/src/worker/init.rs | 48 +- massa-graph-2-worker/src/worker/main_loop.rs | 16 +- massa-graph-2-worker/src/worker/mod.rs | 20 +- 9 files changed, 472 insertions(+), 486 deletions(-) create mode 100644 massa-graph-2-worker/src/state/graph.rs diff --git a/massa-graph-2-worker/src/state/graph.rs b/massa-graph-2-worker/src/state/graph.rs new file mode 100644 index 00000000000..952b0ce2aaa --- /dev/null +++ b/massa-graph-2-worker/src/state/graph.rs @@ -0,0 +1,360 @@ +use std::collections::VecDeque; + +use massa_graph::error::GraphError; +use massa_graph_2_exports::block_status::{BlockStatus, DiscardReason}; +use massa_logging::massa_trace; +use massa_models::{block::BlockId, clique::Clique, prehash::PreHashSet, slot::Slot}; + +use super::GraphState; + +impl GraphState { + pub fn insert_parents_descendants( + &mut self, + add_block_id: BlockId, + add_block_slot: Slot, + parents_hash: &Vec, + ) { + // add as child to parents + for parent_h in parents_hash.iter() { + if let Some(BlockStatus::Active { + a_block: a_parent, .. + }) = self.block_statuses.get_mut(parent_h) + { + a_parent.children[add_block_slot.thread as usize] + .insert(add_block_id, add_block_slot.period); + } + } + + // add as descendant to ancestors. Note: descendants are never removed. + let mut ancestors: VecDeque = parents_hash.iter().map(|e| *e).collect(); + let mut visited = PreHashSet::::default(); + while let Some(ancestor_h) = ancestors.pop_back() { + if !visited.insert(ancestor_h) { + continue; + } + if let Some(BlockStatus::Active { a_block: ab, .. }) = + self.block_statuses.get_mut(&ancestor_h) + { + ab.descendants.insert(add_block_id); + for (ancestor_parent_h, _) in ab.parents.iter() { + ancestors.push_front(*ancestor_parent_h); + } + } + } + } + + pub fn compute_fitness_find_blockclique( + &mut self, + add_block_id: &BlockId, + ) -> Result { + let mut blockclique_i = 0usize; + let mut max_clique_fitness = (0u64, num::BigInt::default()); + for (clique_i, clique) in self.max_cliques.iter_mut().enumerate() { + clique.fitness = 0; + clique.is_blockclique = false; + let mut sum_hash = num::BigInt::default(); + for block_h in clique.block_ids.iter() { + let fitness = match self.block_statuses.get(block_h) { + Some(BlockStatus::Active { a_block, storage: _ }) => a_block.fitness, + _ => return Err(GraphError::ContainerInconsistency(format!("inconsistency inside block statuses computing fitness while adding {} - missing {}", add_block_id, block_h))), + }; + clique.fitness = clique + .fitness + .checked_add(fitness) + .ok_or(GraphError::FitnessOverflow)?; + sum_hash -= num::BigInt::from_bytes_be(num::bigint::Sign::Plus, block_h.to_bytes()); + } + let cur_fit = (clique.fitness, sum_hash); + if cur_fit > max_clique_fitness { + blockclique_i = clique_i; + max_clique_fitness = cur_fit; + } + } + self.max_cliques[blockclique_i].is_blockclique = true; + Ok(blockclique_i) + } + + pub fn list_stale_blocks(&self, fitness_threshold: u64) -> PreHashSet { + // iterate from largest to smallest to minimize reallocations + let mut indices: Vec = (0..self.max_cliques.len()).collect(); + indices.sort_unstable_by_key(|&i| std::cmp::Reverse(self.max_cliques[i].block_ids.len())); + let mut high_set = PreHashSet::::default(); + let mut low_set = PreHashSet::::default(); + for clique_i in indices.into_iter() { + if self.max_cliques[clique_i].fitness >= fitness_threshold { + high_set.extend(&self.max_cliques[clique_i].block_ids); + } else { + low_set.extend(&self.max_cliques[clique_i].block_ids); + } + } + &low_set - &high_set + } + + pub fn remove_block( + &mut self, + add_block_id: &BlockId, + block_id: &BlockId, + ) -> Result<(), GraphError> { + if let Some(BlockStatus::Active { + a_block: active_block, + storage: _storage, + }) = self.block_statuses.remove(&block_id) + { + self.active_index.remove(&block_id); + if active_block.is_final { + return Err(GraphError::ContainerInconsistency(format!("inconsistency inside block statuses removing stale blocks adding {} - block {} was already final", add_block_id, block_id))); + } + + // remove from gi_head + if let Some(other_incomps) = self.gi_head.remove(&block_id) { + for other_incomp in other_incomps.into_iter() { + if let Some(other_incomp_lst) = self.gi_head.get_mut(&other_incomp) { + other_incomp_lst.remove(&block_id); + } + } + } + + // remove from cliques + let stale_block_fitness = active_block.fitness; + self.max_cliques.iter_mut().for_each(|c| { + if c.block_ids.remove(&block_id) { + c.fitness -= stale_block_fitness; + } + }); + self.max_cliques.retain(|c| !c.block_ids.is_empty()); // remove empty cliques + if self.max_cliques.is_empty() { + // make sure at least one clique remains + self.max_cliques = vec![Clique { + block_ids: PreHashSet::::default(), + fitness: 0, + is_blockclique: true, + }]; + } + + // remove from parent's children + for (parent_h, _parent_period) in active_block.parents.iter() { + if let Some(BlockStatus::Active { + a_block: parent_active_block, + .. + }) = self.block_statuses.get_mut(parent_h) + { + parent_active_block.children[active_block.slot.thread as usize] + .remove(&block_id); + } + } + + massa_trace!("consensus.block_graph.add_block_to_graph.stale", { + "hash": block_id + }); + + // mark as stale + self.new_stale_blocks + .insert(*block_id, (active_block.creator_address, active_block.slot)); + self.block_statuses.insert( + *block_id, + BlockStatus::Discarded { + slot: active_block.slot, + creator: active_block.creator_address, + parents: active_block.parents.iter().map(|(h, _)| *h).collect(), + reason: DiscardReason::Stale, + sequence_number: { + self.sequence_counter += 1; + self.sequence_counter + }, + }, + ); + self.discarded_index.insert(*block_id); + Ok(()) + } else { + return Err(GraphError::ContainerInconsistency(format!("inconsistency inside block statuses removing stale blocks adding {} - block {} is missing", add_block_id, block_id))); + } + } + + pub fn list_final_blocks(&self) -> Result, GraphError> { + // short-circuiting intersection of cliques from smallest to largest + let mut indices: Vec = (0..self.max_cliques.len()).collect(); + indices.sort_unstable_by_key(|&i| self.max_cliques[i].block_ids.len()); + let mut final_candidates = self.max_cliques[indices[0]].block_ids.clone(); + for i in 1..indices.len() { + final_candidates.retain(|v| self.max_cliques[i].block_ids.contains(v)); + if final_candidates.is_empty() { + break; + } + } + + // restrict search to cliques with high enough fitness, sort cliques by fitness (highest to lowest) + massa_trace!( + "consensus.block_graph.add_block_to_graph.list_final_blocks.restrict", + {} + ); + indices.retain(|&i| self.max_cliques[i].fitness > self.config.delta_f0); + indices.sort_unstable_by_key(|&i| std::cmp::Reverse(self.max_cliques[i].fitness)); + + let mut final_blocks = PreHashSet::::default(); + for clique_i in indices.into_iter() { + massa_trace!( + "consensus.block_graph.add_block_to_graph.list_final_blocks.loop", + { "clique_i": clique_i } + ); + // check in cliques from highest to lowest fitness + if final_candidates.is_empty() { + // no more final candidates + break; + } + let clique = &self.max_cliques[clique_i]; + + // compute the total fitness of all the descendants of the candidate within the clique + let loc_candidates = final_candidates.clone(); + for candidate_h in loc_candidates.into_iter() { + let descendants = match self.block_statuses.get(&candidate_h) { + Some(BlockStatus::Active { + a_block, + storage: _, + }) => &a_block.descendants, + _ => { + return Err(GraphError::MissingBlock(format!( + "missing block when computing total fitness of descendants: {}", + candidate_h + ))) + } + }; + let desc_fit: u64 = descendants + .intersection(&clique.block_ids) + .map(|h| { + if let Some(BlockStatus::Active { a_block: ab, .. }) = + self.block_statuses.get(h) + { + return ab.fitness; + } + 0 + }) + .sum(); + if desc_fit > self.config.delta_f0 { + // candidate is final + final_candidates.remove(&candidate_h); + final_blocks.insert(candidate_h); + } + } + } + Ok(final_blocks) + } + + /// Computes max cliques of compatible blocks + pub fn compute_max_cliques(&self) -> Vec> { + let mut max_cliques: Vec> = Vec::new(); + + // algorithm adapted from IK_GPX as summarized in: + // Cazals et al., "A note on the problem of reporting maximal cliques" + // Theoretical Computer Science, 2008 + // https://doi.org/10.1016/j.tcs.2008.05.010 + + // stack: r, p, x + let mut stack: Vec<( + PreHashSet, + PreHashSet, + PreHashSet, + )> = vec![( + PreHashSet::::default(), + self.gi_head.keys().cloned().collect(), + PreHashSet::::default(), + )]; + while let Some((r, mut p, mut x)) = stack.pop() { + if p.is_empty() && x.is_empty() { + max_cliques.push(r); + continue; + } + // choose the pivot vertex following the GPX scheme: + // u_p = node from (p \/ x) that maximizes the cardinality of (P \ Neighbors(u_p, GI)) + let &u_p = p + .union(&x) + .max_by_key(|&u| { + p.difference(&(&self.gi_head[u] | &vec![*u].into_iter().collect())) + .count() + }) + .unwrap(); // p was checked to be non-empty before + + // iterate over u_set = (p /\ Neighbors(u_p, GI)) + let u_set: PreHashSet = + &p & &(&self.gi_head[&u_p] | &vec![u_p].into_iter().collect()); + for u_i in u_set.into_iter() { + p.remove(&u_i); + let u_i_set: PreHashSet = vec![u_i].into_iter().collect(); + let comp_n_u_i: PreHashSet = &self.gi_head[&u_i] | &u_i_set; + stack.push((&r | &u_i_set, &p - &comp_n_u_i, &x - &comp_n_u_i)); + x.insert(u_i); + } + } + if max_cliques.is_empty() { + // make sure at least one clique remains + max_cliques = vec![PreHashSet::::default()]; + } + max_cliques + } + + /// get the clique of higher fitness + pub fn get_blockclique(&self) -> PreHashSet { + self.max_cliques + .iter() + .find(|c| c.is_blockclique) + .expect("blockclique missing") + .block_ids + .clone() + } + + pub fn mark_final_blocks( + &mut self, + add_block_id: &BlockId, + final_blocks: PreHashSet, + ) -> Result<(), GraphError> { + for block_id in final_blocks.into_iter() { + // remove from gi_head + if let Some(other_incomps) = self.gi_head.remove(&block_id) { + for other_incomp in other_incomps.into_iter() { + if let Some(other_incomp_lst) = self.gi_head.get_mut(&other_incomp) { + other_incomp_lst.remove(&block_id); + } + } + } + + // mark as final and update latest_final_blocks_periods + if let Some(BlockStatus::Active { + a_block: final_block, + .. + }) = self.block_statuses.get_mut(&block_id) + { + massa_trace!("consensus.block_graph.add_block_to_graph.final", { + "hash": block_id + }); + final_block.is_final = true; + // remove from cliques + let final_block_fitness = final_block.fitness; + self.max_cliques.iter_mut().for_each(|c| { + if c.block_ids.remove(&block_id) { + c.fitness -= final_block_fitness; + } + }); + self.max_cliques.retain(|c| !c.block_ids.is_empty()); // remove empty cliques + if self.max_cliques.is_empty() { + // make sure at least one clique remains + self.max_cliques = vec![Clique { + block_ids: PreHashSet::::default(), + fitness: 0, + is_blockclique: true, + }]; + } + // update latest final blocks + if final_block.slot.period + > self.latest_final_blocks_periods[final_block.slot.thread as usize].1 + { + self.latest_final_blocks_periods[final_block.slot.thread as usize] = + (block_id, final_block.slot.period); + } + // update new final blocks list + self.new_final_blocks.insert(block_id); + } else { + return Err(GraphError::ContainerInconsistency(format!("inconsistency inside block statuses updating final blocks adding {} - block {} is missing", add_block_id, block_id))); + } + } + Ok(()) + } +} diff --git a/massa-graph-2-worker/src/state/mod.rs b/massa-graph-2-worker/src/state/mod.rs index 2c83ab7bcc7..daacc6bbdfc 100644 --- a/massa-graph-2-worker/src/state/mod.rs +++ b/massa-graph-2-worker/src/state/mod.rs @@ -18,6 +18,7 @@ use massa_models::{ use massa_storage::Storage; use massa_time::MassaTime; +mod graph; mod process; mod process_commands; mod stats; @@ -41,6 +42,8 @@ pub struct GraphState { pub max_cliques: Vec, /// ids of active blocks pub active_index: PreHashSet, + /// Save of latest periods + pub save_final_periods: Vec, /// One (block id, period) per thread pub latest_final_blocks_periods: Vec<(BlockId, u64)>, /// One `(block id, period)` per thread TODO not sure I understand the difference with `latest_final_blocks_periods` @@ -79,14 +82,11 @@ pub struct GraphState { pub stats_desync_detection_timespan: MassaTime, /// blocks we want pub wishlist: PreHashMap>, + /// previous blockclique notified to Execution + pub prev_blockclique: PreHashMap, } impl GraphState { - pub fn new_sequence_number(&mut self) -> u64 { - self.sequence_counter += 1; - self.sequence_counter - } - pub fn get_full_active_block(&self, block_id: &BlockId) -> Option<(&ActiveBlock, &Storage)> { match self.block_statuses.get(block_id) { Some(BlockStatus::Active { a_block, storage }) => Some((a_block.as_ref(), storage)), @@ -142,11 +142,7 @@ impl GraphState { .get(slot.thread as usize) .unwrap_or_else(|| panic!("unexpected not found latest final block period")); - self.max_cliques - .iter() - .find(|c| c.is_blockclique) - .expect("expected one clique to be the blockclique") - .block_ids + self.get_blockclique() .iter() .for_each(|id| match self.block_statuses.get(id) { Some(BlockStatus::Active { @@ -417,16 +413,6 @@ impl GraphState { .collect() } - /// get the clique of higher fitness - pub fn get_blockclique(&self) -> &PreHashSet { - &self - .max_cliques - .iter() - .find(|c| c.is_blockclique) - .expect("blockclique missing") - .block_ids - } - /// get the current block wish list, including the operations hash. pub fn get_block_wishlist(&self) -> GraphResult>> { let mut wishlist = PreHashMap::>::default(); @@ -455,4 +441,30 @@ impl GraphState { Ok(wishlist) } + + /// Gets a block and all its descendants + /// + /// # Argument + /// * hash : hash of the given block + pub fn get_active_block_and_descendants( + &self, + block_id: &BlockId, + ) -> GraphResult> { + let mut to_visit = vec![*block_id]; + let mut result = PreHashSet::::default(); + while let Some(visit_h) = to_visit.pop() { + if !result.insert(visit_h) { + continue; // already visited + } + match self.block_statuses.get(&visit_h) { + Some(BlockStatus::Active { a_block, .. }) => { + a_block.as_ref() + .children.iter() + .for_each(|thread_children| to_visit.extend(thread_children.keys())) + }, + _ => return Err(GraphError::ContainerInconsistency(format!("inconsistency inside block statuses iterating through descendants of {} - missing {}", block_id, visit_h))), + } + } + Ok(result) + } } diff --git a/massa-graph-2-worker/src/state/process.rs b/massa-graph-2-worker/src/state/process.rs index 5ba460cf27a..d99739fc8eb 100644 --- a/massa-graph-2-worker/src/state/process.rs +++ b/massa-graph-2-worker/src/state/process.rs @@ -447,58 +447,6 @@ impl GraphState { Ok(()) } - /// Computes max cliques of compatible blocks - pub fn compute_max_cliques(&self) -> Vec> { - let mut max_cliques: Vec> = Vec::new(); - - // algorithm adapted from IK_GPX as summarized in: - // Cazals et al., "A note on the problem of reporting maximal cliques" - // Theoretical Computer Science, 2008 - // https://doi.org/10.1016/j.tcs.2008.05.010 - - // stack: r, p, x - let mut stack: Vec<( - PreHashSet, - PreHashSet, - PreHashSet, - )> = vec![( - PreHashSet::::default(), - self.gi_head.keys().cloned().collect(), - PreHashSet::::default(), - )]; - while let Some((r, mut p, mut x)) = stack.pop() { - if p.is_empty() && x.is_empty() { - max_cliques.push(r); - continue; - } - // choose the pivot vertex following the GPX scheme: - // u_p = node from (p \/ x) that maximizes the cardinality of (P \ Neighbors(u_p, GI)) - let &u_p = p - .union(&x) - .max_by_key(|&u| { - p.difference(&(&self.gi_head[u] | &vec![*u].into_iter().collect())) - .count() - }) - .unwrap(); // p was checked to be non-empty before - - // iterate over u_set = (p /\ Neighbors(u_p, GI)) - let u_set: PreHashSet = - &p & &(&self.gi_head[&u_p] | &vec![u_p].into_iter().collect()); - for u_i in u_set.into_iter() { - p.remove(&u_i); - let u_i_set: PreHashSet = vec![u_i].into_iter().collect(); - let comp_n_u_i: PreHashSet = &self.gi_head[&u_i] | &u_i_set; - stack.push((&r | &u_i_set, &p - &comp_n_u_i, &x - &comp_n_u_i)); - x.insert(u_i); - } - } - if max_cliques.is_empty() { - // make sure at least one clique remains - max_cliques = vec![PreHashSet::::default()]; - } - max_cliques - } - #[allow(clippy::too_many_arguments)] fn add_block_to_graph( &mut self, @@ -539,40 +487,12 @@ impl GraphState { self.active_index.insert(add_block_id); // add as child to parents - for (parent_h, _parent_period) in parents_hash_period.iter() { - if let Some(BlockStatus::Active { - a_block: a_parent, .. - }) = self.block_statuses.get_mut(parent_h) - { - a_parent.children[add_block_slot.thread as usize] - .insert(add_block_id, add_block_slot.period); - } else { - return Err(GraphError::ContainerInconsistency(format!( - "inconsistency inside block statuses adding child {} of block {}", - add_block_id, parent_h - ))); - } - } - // add as descendant to ancestors. Note: descendants are never removed. - { - let mut ancestors: VecDeque = - parents_hash_period.iter().map(|(h, _)| *h).collect(); - let mut visited = PreHashSet::::default(); - while let Some(ancestor_h) = ancestors.pop_back() { - if !visited.insert(ancestor_h) { - continue; - } - if let Some(BlockStatus::Active { a_block: ab, .. }) = - self.block_statuses.get_mut(&ancestor_h) - { - ab.descendants.insert(add_block_id); - for (ancestor_parent_h, _) in ab.parents.iter() { - ancestors.push_front(*ancestor_parent_h); - } - } - } - } + self.insert_parents_descendants( + add_block_id, + add_block_slot, + &parents_hash_period.iter().map(|(p_id, _)| *p_id).collect(), + ); // add incompatibilities to gi_head massa_trace!( @@ -640,33 +560,7 @@ impl GraphState { // compute clique fitnesses and find blockclique massa_trace!("consensus.block_graph.add_block_to_graph.compute_clique_fitnesses_and_find_blockclique", {}); // note: clique_fitnesses is pair (fitness, -hash_sum) where the second parameter is negative for sorting - { - let mut blockclique_i = 0usize; - let mut max_clique_fitness = (0u64, num::BigInt::default()); - for (clique_i, clique) in self.max_cliques.iter_mut().enumerate() { - clique.fitness = 0; - clique.is_blockclique = false; - let mut sum_hash = num::BigInt::default(); - for block_h in clique.block_ids.iter() { - let fitness = match self.block_statuses.get(block_h) { - Some(BlockStatus::Active { a_block, storage: _ }) => a_block.fitness, - _ => return Err(GraphError::ContainerInconsistency(format!("inconsistency inside block statuses computing fitness while adding {} - missing {}", add_block_id, block_h))), - }; - clique.fitness = clique - .fitness - .checked_add(fitness) - .ok_or(GraphError::FitnessOverflow)?; - sum_hash -= - num::BigInt::from_bytes_be(num::bigint::Sign::Plus, block_h.to_bytes()); - } - let cur_fit = (clique.fitness, sum_hash); - if cur_fit > max_clique_fitness { - blockclique_i = clique_i; - max_clique_fitness = cur_fit; - } - } - self.max_cliques[blockclique_i].is_blockclique = true; - } + let position_blockclique = self.compute_fitness_find_blockclique(&add_block_id)?; // update best parents massa_trace!( @@ -674,13 +568,7 @@ impl GraphState { {} ); { - // find blockclique - let blockclique_i = self - .max_cliques - .iter() - .position(|c| c.is_blockclique) - .unwrap_or_default(); - let blockclique = &self.max_cliques[blockclique_i]; + let blockclique = &self.max_cliques[position_blockclique]; // init best parents as latest_final_blocks_periods self.best_parents = self.latest_final_blocks_periods.clone(); @@ -702,111 +590,18 @@ impl GraphState { "consensus.block_graph.add_block_to_graph.list_stale_blocks", {} ); - let stale_blocks = { - let blockclique_i = self - .max_cliques - .iter() - .position(|c| c.is_blockclique) - .unwrap_or_default(); - let fitness_threshold = self.max_cliques[blockclique_i] - .fitness - .saturating_sub(self.config.delta_f0); - // iterate from largest to smallest to minimize reallocations - let mut indices: Vec = (0..self.max_cliques.len()).collect(); - indices - .sort_unstable_by_key(|&i| std::cmp::Reverse(self.max_cliques[i].block_ids.len())); - let mut high_set = PreHashSet::::default(); - let mut low_set = PreHashSet::::default(); - for clique_i in indices.into_iter() { - if self.max_cliques[clique_i].fitness >= fitness_threshold { - high_set.extend(&self.max_cliques[clique_i].block_ids); - } else { - low_set.extend(&self.max_cliques[clique_i].block_ids); - } - } - self.max_cliques.retain(|c| c.fitness >= fitness_threshold); - &low_set - &high_set - }; + let fitness_threshold = self.max_cliques[position_blockclique] + .fitness + .saturating_sub(self.config.delta_f0); + let stale_blocks = self.list_stale_blocks(fitness_threshold); + self.max_cliques.retain(|c| c.fitness >= fitness_threshold); // mark stale blocks massa_trace!( "consensus.block_graph.add_block_to_graph.mark_stale_blocks", {} ); for stale_block_hash in stale_blocks.into_iter() { - if let Some(BlockStatus::Active { - a_block: active_block, - storage: _storage, - }) = self.block_statuses.remove(&stale_block_hash) - { - self.active_index.remove(&stale_block_hash); - if active_block.is_final { - return Err(GraphError::ContainerInconsistency(format!("inconsistency inside block statuses removing stale blocks adding {} - block {} was already final", add_block_id, stale_block_hash))); - } - - // remove from gi_head - if let Some(other_incomps) = self.gi_head.remove(&stale_block_hash) { - for other_incomp in other_incomps.into_iter() { - if let Some(other_incomp_lst) = self.gi_head.get_mut(&other_incomp) { - other_incomp_lst.remove(&stale_block_hash); - } - } - } - - // remove from cliques - let stale_block_fitness = active_block.fitness; - self.max_cliques.iter_mut().for_each(|c| { - if c.block_ids.remove(&stale_block_hash) { - c.fitness -= stale_block_fitness; - } - }); - self.max_cliques.retain(|c| !c.block_ids.is_empty()); // remove empty cliques - if self.max_cliques.is_empty() { - // make sure at least one clique remains - self.max_cliques = vec![Clique { - block_ids: PreHashSet::::default(), - fitness: 0, - is_blockclique: true, - }]; - } - - // remove from parent's children - for (parent_h, _parent_period) in active_block.parents.iter() { - if let Some(BlockStatus::Active { - a_block: parent_active_block, - .. - }) = self.block_statuses.get_mut(parent_h) - { - parent_active_block.children[active_block.slot.thread as usize] - .remove(&stale_block_hash); - } - } - - massa_trace!("consensus.block_graph.add_block_to_graph.stale", { - "hash": stale_block_hash - }); - - // mark as stale - self.new_stale_blocks.insert( - stale_block_hash, - (active_block.creator_address, active_block.slot), - ); - self.block_statuses.insert( - stale_block_hash, - BlockStatus::Discarded { - slot: active_block.slot, - creator: active_block.creator_address, - parents: active_block.parents.iter().map(|(h, _)| *h).collect(), - reason: DiscardReason::Stale, - sequence_number: { - self.sequence_counter += 1; - self.sequence_counter - }, - }, - ); - self.discarded_index.insert(stale_block_hash); - } else { - return Err(GraphError::ContainerInconsistency(format!("inconsistency inside block statuses removing stale blocks adding {} - block {} is missing", add_block_id, stale_block_hash))); - } + self.remove_block(&add_block_id, &stale_block_hash)?; } // list final blocks @@ -814,165 +609,21 @@ impl GraphState { "consensus.block_graph.add_block_to_graph.list_final_blocks", {} ); - let final_blocks = { - // short-circuiting intersection of cliques from smallest to largest - let mut indices: Vec = (0..self.max_cliques.len()).collect(); - indices.sort_unstable_by_key(|&i| self.max_cliques[i].block_ids.len()); - let mut final_candidates = self.max_cliques[indices[0]].block_ids.clone(); - for i in 1..indices.len() { - final_candidates.retain(|v| self.max_cliques[i].block_ids.contains(v)); - if final_candidates.is_empty() { - break; - } - } - - // restrict search to cliques with high enough fitness, sort cliques by fitness (highest to lowest) - massa_trace!( - "consensus.block_graph.add_block_to_graph.list_final_blocks.restrict", - {} - ); - indices.retain(|&i| self.max_cliques[i].fitness > self.config.delta_f0); - indices.sort_unstable_by_key(|&i| std::cmp::Reverse(self.max_cliques[i].fitness)); - - let mut final_blocks = PreHashSet::::default(); - for clique_i in indices.into_iter() { - massa_trace!( - "consensus.block_graph.add_block_to_graph.list_final_blocks.loop", - { "clique_i": clique_i } - ); - // check in cliques from highest to lowest fitness - if final_candidates.is_empty() { - // no more final candidates - break; - } - let clique = &self.max_cliques[clique_i]; - - // compute the total fitness of all the descendants of the candidate within the clique - let loc_candidates = final_candidates.clone(); - for candidate_h in loc_candidates.into_iter() { - let descendants = match self.block_statuses.get(&candidate_h) { - Some(BlockStatus::Active { - a_block, - storage: _, - }) => &a_block.descendants, - _ => { - return Err(GraphError::MissingBlock(format!( - "missing block when computing total fitness of descendants: {}", - candidate_h - ))) - } - }; - let desc_fit: u64 = descendants - .intersection(&clique.block_ids) - .map(|h| { - if let Some(BlockStatus::Active { a_block: ab, .. }) = - self.block_statuses.get(h) - { - return ab.fitness; - } - 0 - }) - .sum(); - if desc_fit > self.config.delta_f0 { - // candidate is final - final_candidates.remove(&candidate_h); - final_blocks.insert(candidate_h); - } - } - } - final_blocks - }; + let final_blocks = self.list_final_blocks()?; // mark final blocks and update latest_final_blocks_periods massa_trace!( "consensus.block_graph.add_block_to_graph.mark_final_blocks", {} ); - for final_block_hash in final_blocks.into_iter() { - // remove from gi_head - if let Some(other_incomps) = self.gi_head.remove(&final_block_hash) { - for other_incomp in other_incomps.into_iter() { - if let Some(other_incomp_lst) = self.gi_head.get_mut(&other_incomp) { - other_incomp_lst.remove(&final_block_hash); - } - } - } - - // mark as final and update latest_final_blocks_periods - if let Some(BlockStatus::Active { - a_block: final_block, - .. - }) = self.block_statuses.get_mut(&final_block_hash) - { - massa_trace!("consensus.block_graph.add_block_to_graph.final", { - "hash": final_block_hash - }); - final_block.is_final = true; - // remove from cliques - let final_block_fitness = final_block.fitness; - self.max_cliques.iter_mut().for_each(|c| { - if c.block_ids.remove(&final_block_hash) { - c.fitness -= final_block_fitness; - } - }); - self.max_cliques.retain(|c| !c.block_ids.is_empty()); // remove empty cliques - if self.max_cliques.is_empty() { - // make sure at least one clique remains - self.max_cliques = vec![Clique { - block_ids: PreHashSet::::default(), - fitness: 0, - is_blockclique: true, - }]; - } - // update latest final blocks - if final_block.slot.period - > self.latest_final_blocks_periods[final_block.slot.thread as usize].1 - { - self.latest_final_blocks_periods[final_block.slot.thread as usize] = - (final_block_hash, final_block.slot.period); - } - // update new final blocks list - self.new_final_blocks.insert(final_block_hash); - } else { - return Err(GraphError::ContainerInconsistency(format!("inconsistency inside block statuses updating final blocks adding {} - block {} is missing", add_block_id, final_block_hash))); - } - } + self.mark_final_blocks(&add_block_id, final_blocks)?; massa_trace!("consensus.block_graph.add_block_to_graph.end", {}); Ok(()) } - /// Mark a block as invalid - pub fn mark_invalid_block( - &mut self, - block_id: &BlockId, - header: WrappedHeader, - ) -> Result<(), GraphError> { - let reason = DiscardReason::Invalid("invalid".to_string()); - self.maybe_note_attack_attempt(&reason, block_id); - massa_trace!("consensus.block_graph.process.invalid_block", {"block_id": block_id, "reason": reason}); - - // add to discard - self.block_statuses.insert( - *block_id, - BlockStatus::Discarded { - slot: header.content.slot, - creator: header.creator_address, - parents: header.content.parents, - reason, - sequence_number: { - self.sequence_counter += 1; - self.sequence_counter - }, - }, - ); - self.discarded_index.insert(*block_id); - - Ok(()) - } - /// Note an attack attempt if the discard reason indicates one. - fn maybe_note_attack_attempt(&mut self, reason: &DiscardReason, hash: &BlockId) { + pub fn maybe_note_attack_attempt(&mut self, reason: &DiscardReason, hash: &BlockId) { massa_trace!("consensus.block_graph.maybe_note_attack_attempt", {"hash": hash, "reason": reason}); // If invalid, note the attack attempt. if let DiscardReason::Invalid(reason) = reason { @@ -984,32 +635,6 @@ impl GraphState { } } - /// Gets a block and all its descendants - /// - /// # Argument - /// * hash : hash of the given block - pub fn get_active_block_and_descendants( - &self, - block_id: &BlockId, - ) -> GraphResult> { - let mut to_visit = vec![*block_id]; - let mut result = PreHashSet::::default(); - while let Some(visit_h) = to_visit.pop() { - if !result.insert(visit_h) { - continue; // already visited - } - match self.block_statuses.get(&visit_h) { - Some(BlockStatus::Active { a_block, .. }) => { - a_block.as_ref() - .children.iter() - .for_each(|thread_children| to_visit.extend(thread_children.keys())) - }, - _ => return Err(GraphError::ContainerInconsistency(format!("inconsistency inside block statuses iterating through descendants of {} - missing {}", block_id, visit_h))), - } - } - Ok(result) - } - /// Notify execution about blockclique changes and finalized blocks. /// /// # Arguments: @@ -1204,13 +829,13 @@ impl GraphState { .map(|(_block_id, period)| *period) .collect(); // if changed... - if self.latest_final_periods != latest_final_periods { + if self.save_final_periods != latest_final_periods { // signal new last final periods to pool self.channels .pool_command_sender .notify_final_cs_periods(&latest_final_periods); // update final periods - self.latest_final_periods = latest_final_periods; + self.save_final_periods = latest_final_periods; } /* diff --git a/massa-graph-2-worker/src/state/process_commands.rs b/massa-graph-2-worker/src/state/process_commands.rs index 5de43fa1797..e2d5f40e077 100644 --- a/massa-graph-2-worker/src/state/process_commands.rs +++ b/massa-graph-2-worker/src/state/process_commands.rs @@ -1,7 +1,7 @@ use std::collections::{hash_map::Entry, BTreeSet}; -use massa_graph::error::GraphResult; -use massa_graph_2_exports::block_status::{BlockStatus, HeaderOrBlock}; +use massa_graph::error::{GraphError, GraphResult}; +use massa_graph_2_exports::block_status::{BlockStatus, DiscardReason, HeaderOrBlock}; use massa_logging::massa_trace; use massa_models::{ block::{BlockId, WrappedHeader}, @@ -133,4 +133,33 @@ impl GraphState { Ok(()) } + + /// Mark a block as invalid + pub fn mark_invalid_block( + &mut self, + block_id: &BlockId, + header: WrappedHeader, + ) -> Result<(), GraphError> { + let reason = DiscardReason::Invalid("invalid".to_string()); + self.maybe_note_attack_attempt(&reason, block_id); + massa_trace!("consensus.block_graph.process.invalid_block", {"block_id": block_id, "reason": reason}); + + // add to discard + self.block_statuses.insert( + *block_id, + BlockStatus::Discarded { + slot: header.content.slot, + creator: header.creator_address, + parents: header.content.parents, + reason, + sequence_number: { + self.sequence_counter += 1; + self.sequence_counter + }, + }, + ); + self.discarded_index.insert(*block_id); + + Ok(()) + } } diff --git a/massa-graph-2-worker/src/state/tick.rs b/massa-graph-2-worker/src/state/tick.rs index cc8de6e199b..f5b8afe206f 100644 --- a/massa-graph-2-worker/src/state/tick.rs +++ b/massa-graph-2-worker/src/state/tick.rs @@ -4,7 +4,6 @@ use massa_graph::error::GraphResult; use massa_graph_2_exports::block_status::BlockStatus; use massa_logging::massa_trace; use massa_models::{block::BlockId, slot::Slot}; -use tracing::info; use super::GraphState; @@ -14,17 +13,6 @@ impl GraphState { "slot": actual_slot }); - let previous_cycle = self - .previous_slot - .map(|s| s.get_cycle(self.config.periods_per_cycle)); - let observed_cycle = actual_slot.get_cycle(self.config.periods_per_cycle); - if previous_cycle.is_none() { - // first cycle observed - info!("Massa network has started ! 🎉") - } - if previous_cycle < Some(observed_cycle) { - info!("Started cycle {}", observed_cycle); - } // list all elements for which the time has come let to_process: BTreeSet<(Slot, BlockId)> = self .waiting_for_slot_index diff --git a/massa-graph-2-worker/src/state/verifications.rs b/massa-graph-2-worker/src/state/verifications.rs index 6a371f9b909..ffb4aef32a0 100644 --- a/massa-graph-2-worker/src/state/verifications.rs +++ b/massa-graph-2-worker/src/state/verifications.rs @@ -272,8 +272,7 @@ impl GraphState { .keys() .filter(|&sibling_h| sibling_h != block_id) .try_for_each(|&sibling_h| { - incomp - .extend(self.get_active_block_and_descendants(&sibling_h, read_shared_state)?); + incomp.extend(self.get_active_block_and_descendants(&sibling_h)?); GraphResult::<()>::Ok(()) })?; @@ -327,8 +326,7 @@ impl GraphState { ))?.slot.period; if parent_period < parent_in_own_thread.slot.period { // GPI detected - incomp - .extend(self.get_active_block_and_descendants(&cur_h, read_shared_state)?); + incomp.extend(self.get_active_block_and_descendants(&cur_h)?); } // otherwise, cur_b and its descendants cannot be GPI with the block: don't traverse } } diff --git a/massa-graph-2-worker/src/worker/init.rs b/massa-graph-2-worker/src/worker/init.rs index 88bec82afbb..8dfbc5e438f 100644 --- a/massa-graph-2-worker/src/worker/init.rs +++ b/massa-graph-2-worker/src/worker/init.rs @@ -7,13 +7,13 @@ use massa_graph::{ error::{GraphError, GraphResult}, BootstrapableGraph, }; -use massa_graph_2_exports::{block_status::BlockStatus, GraphChannels, GraphConfig}; +use massa_graph_2_exports::{block_status::BlockStatus, GraphConfig}; use massa_hash::Hash; use massa_models::{ active_block::ActiveBlock, address::Address, block::{Block, BlockHeader, BlockHeaderSerializer, BlockId, BlockSerializer, WrappedBlock}, - prehash::{PreHashMap, PreHashSet}, + prehash::PreHashMap, slot::Slot, timeslots::{get_block_slot_timestamp, get_latest_block_slot_at_timestamp}, wrapped::WrappedContent, @@ -74,7 +74,6 @@ impl GraphWorker { pub fn new( config: GraphConfig, command_receiver: mpsc::Receiver, - channels: GraphChannels, shared_state: Arc>, init_graph: Option, storage: Storage, @@ -166,13 +165,10 @@ impl GraphWorker { let mut res_graph = GraphWorker { config: config.clone(), command_receiver, - channels, shared_state, previous_slot, next_slot, next_instant, - prev_blockclique: Default::default(), - storage: storage.clone(), }; if let Some(BootstrapableGraph { final_blocks }) = init_graph { @@ -234,9 +230,9 @@ impl GraphWorker { // we need to do this because the bootstrap snapshots of the executor vs the consensus may not have been taken in sync // because the two modules run concurrently and out of sync. { - let read_shared_state = res_graph.shared_state.read(); + let mut write_shared_state = res_graph.shared_state.write(); let mut block_storage: PreHashMap = Default::default(); - let notify_finals: HashMap = read_shared_state + let notify_finals: HashMap = write_shared_state .get_all_final_blocks() .into_iter() .map(|(b_id, block_infos)| { @@ -244,11 +240,11 @@ impl GraphWorker { (block_infos.0, b_id) }) .collect(); - let notify_blockclique: HashMap = read_shared_state + let notify_blockclique: HashMap = write_shared_state .get_blockclique() .iter() .map(|b_id| { - let (a_block, storage) = read_shared_state + let (a_block, storage) = write_shared_state .get_full_active_block(b_id) .expect("active block missing from block_db"); let slot = a_block.slot; @@ -256,8 +252,9 @@ impl GraphWorker { (slot, *b_id) }) .collect(); - res_graph.prev_blockclique = notify_blockclique.iter().map(|(k, v)| (*v, *k)).collect(); - res_graph + write_shared_state.prev_blockclique = + notify_blockclique.iter().map(|(k, v)| (*v, *k)).collect(); + write_shared_state .channels .execution_controller .update_blockclique_status(notify_finals, Some(notify_blockclique), block_storage); @@ -304,32 +301,7 @@ impl GraphWorker { .collect(); for (b_id, (b_slot, b_parents)) in active_blocks_map.into_iter() { - // deduce children - for parent_id in &b_parents { - if let Some(BlockStatus::Active { - a_block: parent, .. - }) = write_shared_state.block_statuses.get_mut(parent_id) - { - parent.children[b_slot.thread as usize].insert(b_id, b_slot.period); - } - } - - // deduce descendants - let mut ancestors: VecDeque = b_parents.into_iter().collect(); - let mut visited: PreHashSet = Default::default(); - while let Some(ancestor_h) = ancestors.pop_back() { - if !visited.insert(ancestor_h) { - continue; - } - if let Some(BlockStatus::Active { a_block: ab, .. }) = - write_shared_state.block_statuses.get_mut(&ancestor_h) - { - ab.descendants.insert(b_id); - for (ancestor_parent_h, _) in ab.parents.iter() { - ancestors.push_front(*ancestor_parent_h); - } - } - } + write_shared_state.insert_parents_descendants(b_id, b_slot, &b_parents); } Ok(()) } diff --git a/massa-graph-2-worker/src/worker/main_loop.rs b/massa-graph-2-worker/src/worker/main_loop.rs index d87801be326..3261f83df31 100644 --- a/massa-graph-2-worker/src/worker/main_loop.rs +++ b/massa-graph-2-worker/src/worker/main_loop.rs @@ -6,7 +6,7 @@ use massa_models::{ timeslots::{get_block_slot_timestamp, get_closest_slot_to_timestamp}, }; use massa_time::MassaTime; -use tracing::log::warn; +use tracing::{info, log::warn}; use crate::commands::GraphCommand; @@ -105,16 +105,28 @@ impl GraphWorker { /// Runs in loop forever. This loop must stop every slot to perform operations on stats and graph /// but can be stopped anytime by a command received. pub fn run(&mut self) { + //TODO: Add notify cs periods loop { match self.wait_slot_or_command(self.next_instant) { WaitingStatus::Ended => { - self.previous_slot = Some(self.next_slot); + let previous_cycle = self + .previous_slot + .map(|s| s.get_cycle(self.config.periods_per_cycle)); + let observed_cycle = self.next_slot.get_cycle(self.config.periods_per_cycle); + if previous_cycle.is_none() { + // first cycle observed + info!("Massa network has started ! 🎉") + } + if previous_cycle < Some(observed_cycle) { + info!("Started cycle {}", observed_cycle); + } { let mut write_shared_state = self.shared_state.write(); if let Err(err) = write_shared_state.slot_tick(self.next_slot) { warn!("Error while processing block tick: {}", err); } }; + self.previous_slot = Some(self.next_slot); (self.next_slot, self.next_instant) = self.get_next_slot(Some(self.next_slot)); } WaitingStatus::Disconnected => { diff --git a/massa-graph-2-worker/src/worker/mod.rs b/massa-graph-2-worker/src/worker/mod.rs index e668357ac32..9522bd8adfe 100644 --- a/massa-graph-2-worker/src/worker/mod.rs +++ b/massa-graph-2-worker/src/worker/mod.rs @@ -2,7 +2,7 @@ use massa_graph::BootstrapableGraph; use massa_graph_2_exports::{GraphChannels, GraphConfig, GraphController, GraphManager}; use massa_models::block::BlockId; use massa_models::clique::Clique; -use massa_models::prehash::{PreHashMap, PreHashSet}; +use massa_models::prehash::PreHashSet; use massa_models::slot::Slot; use massa_storage::Storage; use massa_time::MassaTime; @@ -19,7 +19,6 @@ use crate::state::GraphState; pub struct GraphWorker { command_receiver: mpsc::Receiver, config: GraphConfig, - channels: GraphChannels, shared_state: Arc>, /// Previous slot. previous_slot: Option, @@ -27,10 +26,6 @@ pub struct GraphWorker { next_slot: Slot, /// Next slot instant next_instant: Instant, - /// previous blockclique notified to Execution - prev_blockclique: PreHashMap, - /// Shared storage, - storage: Storage, } mod init; @@ -65,6 +60,7 @@ pub fn start_graph_worker( new_stale_blocks: Default::default(), incoming_index: Default::default(), active_index: Default::default(), + save_final_periods: Default::default(), latest_final_blocks_periods: Default::default(), best_parents: Default::default(), block_statuses: Default::default(), @@ -80,21 +76,15 @@ pub fn start_graph_worker( stats_desync_detection_timespan, config.stats_timespan, ), + prev_blockclique: Default::default(), })); let shared_state_cloned = shared_state.clone(); let thread_graph = thread::Builder::new() .name("graph worker".into()) .spawn(move || { - let mut graph_worker = GraphWorker::new( - config, - rx, - channels, - shared_state_cloned, - init_graph, - storage, - ) - .unwrap(); + let mut graph_worker = + GraphWorker::new(config, rx, shared_state_cloned, init_graph, storage).unwrap(); graph_worker.run() }) .expect("Can't spawn thread graph."); From a41f5672e9e5f4484beaae6c3eae73821ee4c1ad Mon Sep 17 00:00:00 2001 From: AurelienFT Date: Thu, 13 Oct 2022 19:46:43 +0200 Subject: [PATCH 18/40] Add docs and split functions of the graph. --- massa-graph-2-worker/src/state/mod.rs | 1 + massa-graph-2-worker/src/state/process.rs | 30 ++++++++++++++++- .../src/state/process_commands.rs | 32 ++++++++++++++----- massa-graph-2-worker/src/state/stats.rs | 7 ++-- massa-graph-2-worker/src/state/tick.rs | 15 ++++++--- massa-graph-2-worker/src/worker/main_loop.rs | 10 +++++- massa-graph-2-worker/src/worker/mod.rs | 15 +++++++++ 7 files changed, 93 insertions(+), 17 deletions(-) diff --git a/massa-graph-2-worker/src/state/mod.rs b/massa-graph-2-worker/src/state/mod.rs index daacc6bbdfc..206ba788070 100644 --- a/massa-graph-2-worker/src/state/mod.rs +++ b/massa-graph-2-worker/src/state/mod.rs @@ -87,6 +87,7 @@ pub struct GraphState { } impl GraphState { + /// Get a full active block pub fn get_full_active_block(&self, block_id: &BlockId) -> Option<(&ActiveBlock, &Storage)> { match self.block_statuses.get(block_id) { Some(BlockStatus::Active { a_block, storage }) => Some((a_block.as_ref(), storage)), diff --git a/massa-graph-2-worker/src/state/process.rs b/massa-graph-2-worker/src/state/process.rs index d99739fc8eb..a6db18d2ac7 100644 --- a/massa-graph-2-worker/src/state/process.rs +++ b/massa-graph-2-worker/src/state/process.rs @@ -24,7 +24,14 @@ use crate::state::verifications::HeaderCheckOutcome; use super::GraphState; impl GraphState { - /// acknowledge a set of items recursively + /// Acknowledge a set of items recursively and process them + /// + /// # Arguments: + /// * `to_ack`: the set of items to acknowledge and process + /// * `current_slot`: the current slot when this function is called + /// + /// # Returns: + /// Success or error if an error happened during the processing of items pub fn rec_process( &mut self, mut to_ack: BTreeSet<(Slot, BlockId)>, @@ -38,6 +45,13 @@ impl GraphState { } /// Acknowledge a single item, return a set of items to re-ack + /// + /// # Arguments: + /// * `block_id`: the id of the block to acknowledge + /// * `current_slot`: the current slot when this function is called + /// + /// # Returns: + /// A list of items to re-ack and process or an error if the process of an item failed pub fn process( &mut self, block_id: BlockId, @@ -409,6 +423,7 @@ impl GraphState { Ok(reprocess) } + /// TODO: Doc pub fn promote_dep_tree(&mut self, hash: BlockId) -> GraphResult<()> { let mut to_explore = vec![hash]; let mut to_promote: PreHashMap = PreHashMap::default(); @@ -447,6 +462,19 @@ impl GraphState { Ok(()) } + /// Add a block to the graph and update the cliques, the graph dependencies and incompatibilities + /// + /// # Arguments: + /// * `add_block_id`: Block id of the block to add + /// * `parents_hash_period`: Ids and periods of the parents of the block to add + /// * `add_block_creator`: Creator of the block to add + /// * `add_block_slot`: Slot of the block to add + /// * `incomp`: Block ids of the blocks incompatible with the block to add + /// * `fitness`: Fitness of the block to add + /// * `storage`: Storage containing all the data of the block to add + /// + /// # Returns: + /// Success or error if any steps failed #[allow(clippy::too_many_arguments)] fn add_block_to_graph( &mut self, diff --git a/massa-graph-2-worker/src/state/process_commands.rs b/massa-graph-2-worker/src/state/process_commands.rs index e2d5f40e077..467f6ddba2c 100644 --- a/massa-graph-2-worker/src/state/process_commands.rs +++ b/massa-graph-2-worker/src/state/process_commands.rs @@ -13,6 +13,15 @@ use tracing::debug; use super::GraphState; impl GraphState { + /// Register a block header in the graph. Ignore genesis hashes. + /// + /// # Arguments: + /// * `block_id`: the block id + /// * `header`: the header to register + /// * `current_slot`: the slot when this function is called + /// + /// # Returns: + /// Success or error if the header is invalid or too old pub fn register_block_header( &mut self, block_id: BlockId, @@ -59,11 +68,16 @@ impl GraphState { Ok(()) } - /// A new block has come + /// Register a new full block in the graph. Ignore genesis hashes. /// - /// Checks performed: - /// - Ignore genesis blocks. - /// - See `process`. + /// # Arguments: + /// * `block_id`: the block id + /// * `slot`: the slot of the block + /// * `current_slot`: the slot when this function is called + /// * `storage`: Storage containing the whole content of the block + /// + /// # Returns: + /// Success or error if the block is invalid or too old pub fn register_block( &mut self, block_id: BlockId, @@ -134,12 +148,16 @@ impl GraphState { Ok(()) } - /// Mark a block as invalid + /// Mark a block that is in the graph as invalid. + /// + /// # Arguments: + /// * `block_id`: Block id of the block to mark as invalid + /// * `header`: Header of the block to mark as invalid pub fn mark_invalid_block( &mut self, block_id: &BlockId, header: WrappedHeader, - ) -> Result<(), GraphError> { + ) { let reason = DiscardReason::Invalid("invalid".to_string()); self.maybe_note_attack_attempt(&reason, block_id); massa_trace!("consensus.block_graph.process.invalid_block", {"block_id": block_id, "reason": reason}); @@ -159,7 +177,5 @@ impl GraphState { }, ); self.discarded_index.insert(*block_id); - - Ok(()) } } diff --git a/massa-graph-2-worker/src/state/stats.rs b/massa-graph-2-worker/src/state/stats.rs index 905d298f532..ee5b9f4efc0 100644 --- a/massa-graph-2-worker/src/state/stats.rs +++ b/massa-graph-2-worker/src/state/stats.rs @@ -7,8 +7,7 @@ use std::cmp::max; use tracing::log::warn; impl GraphState { - /// retrieve stats - /// Used in response to a API request + /// Calculate and return stats about graph pub fn get_stats(&self) -> GraphResult { let timespan_end = max( self.launch_time, @@ -37,7 +36,8 @@ impl GraphState { end_timespan: timespan_end, }) } - + + /// Must be called each tick to update stats. Will detect if a desynchronization happened pub fn stats_tick(&mut self) -> GraphResult<()> { let now = MassaTime::now(self.config.clock_compensation_millis)?; @@ -63,6 +63,7 @@ impl GraphState { Ok(()) } + /// Remove old stats from graph storage pub fn prune_stats(&mut self) -> GraphResult<()> { let start_time = MassaTime::now(self.config.clock_compensation_millis)? .saturating_sub(self.stats_history_timespan); diff --git a/massa-graph-2-worker/src/state/tick.rs b/massa-graph-2-worker/src/state/tick.rs index f5b8afe206f..b5d627e46a0 100644 --- a/massa-graph-2-worker/src/state/tick.rs +++ b/massa-graph-2-worker/src/state/tick.rs @@ -8,9 +8,16 @@ use massa_models::{block::BlockId, slot::Slot}; use super::GraphState; impl GraphState { - pub fn slot_tick(&mut self, actual_slot: Slot) -> GraphResult<()> { + /// This function should be called each tick and will check if there is a block in the graph that should be processed at this slot, and if so, process it. + /// + /// # Arguments: + /// * `current_slot`: the current slot + /// + /// # Returns: + /// Error if the process of a block returned an error. + pub fn slot_tick(&mut self, current_slot: Slot) -> GraphResult<()> { massa_trace!("consensus.consensus_worker.slot_tick", { - "slot": actual_slot + "slot": current_slot }); // list all elements for which the time has come @@ -20,7 +27,7 @@ impl GraphState { .filter_map(|b_id| match self.block_statuses.get(b_id) { Some(BlockStatus::WaitingForSlot(header_or_block)) => { let slot = header_or_block.get_slot(); - if slot <= actual_slot { + if slot <= current_slot { Some((slot, *b_id)) } else { None @@ -32,7 +39,7 @@ impl GraphState { massa_trace!("consensus.block_graph.slot_tick", {}); // process those elements - self.rec_process(to_process, Some(actual_slot))?; + self.rec_process(to_process, Some(current_slot))?; self.stats_tick()?; // take care of block db changes diff --git a/massa-graph-2-worker/src/worker/main_loop.rs b/massa-graph-2-worker/src/worker/main_loop.rs index 3261f83df31..71efc65bb09 100644 --- a/massa-graph-2-worker/src/worker/main_loop.rs +++ b/massa-graph-2-worker/src/worker/main_loop.rs @@ -19,6 +19,13 @@ enum WaitingStatus { } impl GraphWorker { + /// Execute a command received from the controller also run an update of the graph after processing the command. + /// + /// # Arguments: + /// * `command`: the command to execute + /// + /// # Returns: + /// An error if the command failed fn manage_command(&mut self, command: GraphCommand) -> GraphResult<()> { let mut write_shared_state = self.shared_state.write(); match command { @@ -36,7 +43,8 @@ impl GraphWorker { write_shared_state.block_db_changed() } GraphCommand::MarkInvalidBlock(block_id, header) => { - write_shared_state.mark_invalid_block(&block_id, header) + write_shared_state.mark_invalid_block(&block_id, header); + Ok(()) } } } diff --git a/massa-graph-2-worker/src/worker/mod.rs b/massa-graph-2-worker/src/worker/mod.rs index 9522bd8adfe..1cee623fbd2 100644 --- a/massa-graph-2-worker/src/worker/mod.rs +++ b/massa-graph-2-worker/src/worker/mod.rs @@ -16,9 +16,13 @@ use crate::controller::GraphControllerImpl; use crate::manager::GraphManagerImpl; use crate::state::GraphState; +/// The graph worker structure that contains all informations and tools for the graph worker thread. pub struct GraphWorker { + /// Channel to receive command from the controller command_receiver: mpsc::Receiver, + /// Configuration of the graph config: GraphConfig, + /// State shared with the controller shared_state: Arc>, /// Previous slot. previous_slot: Option, @@ -31,6 +35,17 @@ pub struct GraphWorker { mod init; mod main_loop; +/// Create a new graph worker thread. +/// +/// # Arguments: +/// * `config`: Configuration of the graph +/// * `channels`: Channels to communicate with others modules +/// * `init_graph`: Optional initial graph to bootstrap the graph. if None, the graph will have only genesis blocks. +/// * `storage`: Storage to use for the graph +/// +/// # Returns: +/// * The graph controller to communicate with the graph worker thread +/// * The graph manager to manage the graph worker thread pub fn start_graph_worker( config: GraphConfig, channels: GraphChannels, From 1aea183b5bd020dc41c82879d72cbcb0fdafe06b Mon Sep 17 00:00:00 2001 From: AurelienFT Date: Thu, 13 Oct 2022 19:52:19 +0200 Subject: [PATCH 19/40] Format and clippy. --- massa-graph-2-worker/src/state/graph.rs | 16 ++++++++-------- massa-graph-2-worker/src/state/process.rs | 14 +++++++------- .../src/state/process_commands.rs | 16 ++++++---------- massa-graph-2-worker/src/state/stats.rs | 2 +- massa-graph-2-worker/src/state/tick.rs | 4 ++-- massa-graph-2-worker/src/worker/init.rs | 2 +- massa-graph-2-worker/src/worker/main_loop.rs | 4 ++-- massa-graph-2-worker/src/worker/mod.rs | 8 ++++---- 8 files changed, 31 insertions(+), 35 deletions(-) diff --git a/massa-graph-2-worker/src/state/graph.rs b/massa-graph-2-worker/src/state/graph.rs index 952b0ce2aaa..2d0a4177e1e 100644 --- a/massa-graph-2-worker/src/state/graph.rs +++ b/massa-graph-2-worker/src/state/graph.rs @@ -12,7 +12,7 @@ impl GraphState { &mut self, add_block_id: BlockId, add_block_slot: Slot, - parents_hash: &Vec, + parents_hash: Vec, ) { // add as child to parents for parent_h in parents_hash.iter() { @@ -26,7 +26,7 @@ impl GraphState { } // add as descendant to ancestors. Note: descendants are never removed. - let mut ancestors: VecDeque = parents_hash.iter().map(|e| *e).collect(); + let mut ancestors: VecDeque = parents_hash.iter().copied().collect(); let mut visited = PreHashSet::::default(); while let Some(ancestor_h) = ancestors.pop_back() { if !visited.insert(ancestor_h) { @@ -98,18 +98,18 @@ impl GraphState { if let Some(BlockStatus::Active { a_block: active_block, storage: _storage, - }) = self.block_statuses.remove(&block_id) + }) = self.block_statuses.remove(block_id) { - self.active_index.remove(&block_id); + self.active_index.remove(block_id); if active_block.is_final { return Err(GraphError::ContainerInconsistency(format!("inconsistency inside block statuses removing stale blocks adding {} - block {} was already final", add_block_id, block_id))); } // remove from gi_head - if let Some(other_incomps) = self.gi_head.remove(&block_id) { + if let Some(other_incomps) = self.gi_head.remove(block_id) { for other_incomp in other_incomps.into_iter() { if let Some(other_incomp_lst) = self.gi_head.get_mut(&other_incomp) { - other_incomp_lst.remove(&block_id); + other_incomp_lst.remove(block_id); } } } @@ -117,7 +117,7 @@ impl GraphState { // remove from cliques let stale_block_fitness = active_block.fitness; self.max_cliques.iter_mut().for_each(|c| { - if c.block_ids.remove(&block_id) { + if c.block_ids.remove(block_id) { c.fitness -= stale_block_fitness; } }); @@ -139,7 +139,7 @@ impl GraphState { }) = self.block_statuses.get_mut(parent_h) { parent_active_block.children[active_block.slot.thread as usize] - .remove(&block_id); + .remove(block_id); } } diff --git a/massa-graph-2-worker/src/state/process.rs b/massa-graph-2-worker/src/state/process.rs index a6db18d2ac7..02d4d26d029 100644 --- a/massa-graph-2-worker/src/state/process.rs +++ b/massa-graph-2-worker/src/state/process.rs @@ -25,11 +25,11 @@ use super::GraphState; impl GraphState { /// Acknowledge a set of items recursively and process them - /// + /// /// # Arguments: /// * `to_ack`: the set of items to acknowledge and process /// * `current_slot`: the current slot when this function is called - /// + /// /// # Returns: /// Success or error if an error happened during the processing of items pub fn rec_process( @@ -45,11 +45,11 @@ impl GraphState { } /// Acknowledge a single item, return a set of items to re-ack - /// + /// /// # Arguments: /// * `block_id`: the id of the block to acknowledge /// * `current_slot`: the current slot when this function is called - /// + /// /// # Returns: /// A list of items to re-ack and process or an error if the process of an item failed pub fn process( @@ -463,7 +463,7 @@ impl GraphState { } /// Add a block to the graph and update the cliques, the graph dependencies and incompatibilities - /// + /// /// # Arguments: /// * `add_block_id`: Block id of the block to add /// * `parents_hash_period`: Ids and periods of the parents of the block to add @@ -472,7 +472,7 @@ impl GraphState { /// * `incomp`: Block ids of the blocks incompatible with the block to add /// * `fitness`: Fitness of the block to add /// * `storage`: Storage containing all the data of the block to add - /// + /// /// # Returns: /// Success or error if any steps failed #[allow(clippy::too_many_arguments)] @@ -519,7 +519,7 @@ impl GraphState { self.insert_parents_descendants( add_block_id, add_block_slot, - &parents_hash_period.iter().map(|(p_id, _)| *p_id).collect(), + parents_hash_period.iter().map(|(p_id, _)| *p_id).collect(), ); // add incompatibilities to gi_head diff --git a/massa-graph-2-worker/src/state/process_commands.rs b/massa-graph-2-worker/src/state/process_commands.rs index 467f6ddba2c..823eca00227 100644 --- a/massa-graph-2-worker/src/state/process_commands.rs +++ b/massa-graph-2-worker/src/state/process_commands.rs @@ -1,6 +1,6 @@ use std::collections::{hash_map::Entry, BTreeSet}; -use massa_graph::error::{GraphError, GraphResult}; +use massa_graph::error::GraphResult; use massa_graph_2_exports::block_status::{BlockStatus, DiscardReason, HeaderOrBlock}; use massa_logging::massa_trace; use massa_models::{ @@ -14,12 +14,12 @@ use super::GraphState; impl GraphState { /// Register a block header in the graph. Ignore genesis hashes. - /// + /// /// # Arguments: /// * `block_id`: the block id /// * `header`: the header to register /// * `current_slot`: the slot when this function is called - /// + /// /// # Returns: /// Success or error if the header is invalid or too old pub fn register_block_header( @@ -75,7 +75,7 @@ impl GraphState { /// * `slot`: the slot of the block /// * `current_slot`: the slot when this function is called /// * `storage`: Storage containing the whole content of the block - /// + /// /// # Returns: /// Success or error if the block is invalid or too old pub fn register_block( @@ -149,15 +149,11 @@ impl GraphState { } /// Mark a block that is in the graph as invalid. - /// + /// /// # Arguments: /// * `block_id`: Block id of the block to mark as invalid /// * `header`: Header of the block to mark as invalid - pub fn mark_invalid_block( - &mut self, - block_id: &BlockId, - header: WrappedHeader, - ) { + pub fn mark_invalid_block(&mut self, block_id: &BlockId, header: WrappedHeader) { let reason = DiscardReason::Invalid("invalid".to_string()); self.maybe_note_attack_attempt(&reason, block_id); massa_trace!("consensus.block_graph.process.invalid_block", {"block_id": block_id, "reason": reason}); diff --git a/massa-graph-2-worker/src/state/stats.rs b/massa-graph-2-worker/src/state/stats.rs index ee5b9f4efc0..2e1be5a53ac 100644 --- a/massa-graph-2-worker/src/state/stats.rs +++ b/massa-graph-2-worker/src/state/stats.rs @@ -36,7 +36,7 @@ impl GraphState { end_timespan: timespan_end, }) } - + /// Must be called each tick to update stats. Will detect if a desynchronization happened pub fn stats_tick(&mut self) -> GraphResult<()> { let now = MassaTime::now(self.config.clock_compensation_millis)?; diff --git a/massa-graph-2-worker/src/state/tick.rs b/massa-graph-2-worker/src/state/tick.rs index b5d627e46a0..b1ea02f9fcd 100644 --- a/massa-graph-2-worker/src/state/tick.rs +++ b/massa-graph-2-worker/src/state/tick.rs @@ -9,10 +9,10 @@ use super::GraphState; impl GraphState { /// This function should be called each tick and will check if there is a block in the graph that should be processed at this slot, and if so, process it. - /// + /// /// # Arguments: /// * `current_slot`: the current slot - /// + /// /// # Returns: /// Error if the process of a block returned an error. pub fn slot_tick(&mut self, current_slot: Slot) -> GraphResult<()> { diff --git a/massa-graph-2-worker/src/worker/init.rs b/massa-graph-2-worker/src/worker/init.rs index 8dfbc5e438f..eb3fdddbb4b 100644 --- a/massa-graph-2-worker/src/worker/init.rs +++ b/massa-graph-2-worker/src/worker/init.rs @@ -301,7 +301,7 @@ impl GraphWorker { .collect(); for (b_id, (b_slot, b_parents)) in active_blocks_map.into_iter() { - write_shared_state.insert_parents_descendants(b_id, b_slot, &b_parents); + write_shared_state.insert_parents_descendants(b_id, b_slot, b_parents); } Ok(()) } diff --git a/massa-graph-2-worker/src/worker/main_loop.rs b/massa-graph-2-worker/src/worker/main_loop.rs index 71efc65bb09..20bf62fadd6 100644 --- a/massa-graph-2-worker/src/worker/main_loop.rs +++ b/massa-graph-2-worker/src/worker/main_loop.rs @@ -20,10 +20,10 @@ enum WaitingStatus { impl GraphWorker { /// Execute a command received from the controller also run an update of the graph after processing the command. - /// + /// /// # Arguments: /// * `command`: the command to execute - /// + /// /// # Returns: /// An error if the command failed fn manage_command(&mut self, command: GraphCommand) -> GraphResult<()> { diff --git a/massa-graph-2-worker/src/worker/mod.rs b/massa-graph-2-worker/src/worker/mod.rs index 1cee623fbd2..9a53aa63c41 100644 --- a/massa-graph-2-worker/src/worker/mod.rs +++ b/massa-graph-2-worker/src/worker/mod.rs @@ -36,15 +36,15 @@ mod init; mod main_loop; /// Create a new graph worker thread. -/// +/// /// # Arguments: /// * `config`: Configuration of the graph /// * `channels`: Channels to communicate with others modules /// * `init_graph`: Optional initial graph to bootstrap the graph. if None, the graph will have only genesis blocks. /// * `storage`: Storage to use for the graph -/// +/// /// # Returns: -/// * The graph controller to communicate with the graph worker thread +/// * The graph controller to communicate with the graph worker thread /// * The graph manager to manage the graph worker thread pub fn start_graph_worker( config: GraphConfig, @@ -59,7 +59,7 @@ pub fn start_graph_worker( let shared_state = Arc::new(RwLock::new(GraphState { storage: storage.clone(), config: config.clone(), - channels: channels.clone(), + channels, max_cliques: vec![Clique { block_ids: PreHashSet::::default(), fitness: 0, From 5e0c96f69460727aa6c4aeabba03e53c312a9a8d Mon Sep 17 00:00:00 2001 From: AurelienFT Date: Tue, 18 Oct 2022 09:38:55 +0200 Subject: [PATCH 20/40] Add documentation about state and channels communication in graph --- massa-graph-2-worker/src/controller.rs | 50 ++++++++++++++++++++++++++ 1 file changed, 50 insertions(+) diff --git a/massa-graph-2-worker/src/controller.rs b/massa-graph-2-worker/src/controller.rs index 69aa24776c6..6b8216dba09 100644 --- a/massa-graph-2-worker/src/controller.rs +++ b/massa-graph-2-worker/src/controller.rs @@ -21,6 +21,13 @@ use std::sync::{mpsc::SyncSender, Arc}; use crate::{commands::GraphCommand, state::GraphState}; +/// The retrieval of data is made using a shared state and modifications are asked by sending message to a channel. +/// This is done mostly to be able to: +/// +/// - send commands through the channel without waiting for them to be processed from the point of view of the sending thread, and channels are very much optimal for that (much faster than locks) +/// - still be able to read the current state of the graph as processed so far (for this we need a shared state) +/// +/// Note that sending commands and reading the state is done from different, mutually-asynchronous tasks and they can have data that are not sync yet. #[derive(Clone)] pub struct GraphControllerImpl { command_sender: SyncSender, @@ -40,6 +47,14 @@ impl GraphControllerImpl { } impl GraphController for GraphControllerImpl { + /// Get a block graph export in a given period. + /// + /// # Arguments: + /// * `start_slot`: the start slot + /// * `end_slot`: the end slot + /// + /// # Returns: + /// An export of the block graph in this period fn get_block_graph_status( &self, start_slot: Option, @@ -50,6 +65,13 @@ impl GraphController for GraphControllerImpl { .extract_block_graph_part(start_slot, end_slot) } + /// Get statuses of blocks present in the graph + /// + /// # Arguments: + /// * `block_ids`: the block ids to get the status of + /// + /// # Returns: + /// A vector of statuses sorted by the order of the block ids fn get_block_statuses(&self, ids: Vec) -> Vec { let read_shared_state = self.shared_state.read(); ids.iter() @@ -57,10 +79,19 @@ impl GraphController for GraphControllerImpl { .collect() } + /// Get all the cliques possible in the block graph. + /// + /// # Returns: + /// A vector of cliques fn get_cliques(&self) -> Vec { self.shared_state.read().max_cliques.clone() } + /// Get a part of the graph to send to a node so that he can setup his graph. + /// Used for bootstrap. + /// + /// # Returns: + /// A portion of the graph fn get_bootstrap_graph(&self) -> GraphResult { let read_shared_state = self.shared_state.read(); let mut required_final_blocks: PreHashSet<_> = @@ -94,20 +125,39 @@ impl GraphController for GraphControllerImpl { Ok(BootstrapableGraph { final_blocks }) } + /// Get the stats of the consensus fn get_stats(&self) -> GraphResult { self.shared_state.read().get_stats() } + /// Get the current best parents for a block creation + /// + /// # Returns: + /// A block id and a period for each thread of the graph fn get_best_parents(&self) -> Vec<(BlockId, u64)> { self.shared_state.read().best_parents.clone() } + /// Get the block, that is in the blockclique, at a given slot. + /// + /// # Arguments: + /// * `slot`: the slot to get the block at + /// + /// # Returns: + /// The block id of the block at the given slot if exists fn get_blockclique_block_at_slot(&self, slot: Slot) -> Option { self.shared_state .read() .get_blockclique_block_at_slot(&slot) } + /// Get the latest block, that is in the blockclique, in the thread of the given slot and before this `slot`. + /// + /// # Arguments: + /// * `slot`: the slot that will give us the thread and the upper bound + /// + /// # Returns: + /// The block id of the latest block in the thread of the given slot and before this slot if exists fn get_latest_blockclique_block_at_slot(&self, slot: Slot) -> BlockId { self.shared_state .read() From 387d5ef5347c9f2b70227ee25762b3486fa2afb7 Mon Sep 17 00:00:00 2001 From: AurelienFT Date: Tue, 18 Oct 2022 11:51:45 +0200 Subject: [PATCH 21/40] Link new graph to the main. --- Cargo.lock | 27 +++ massa-api/Cargo.toml | 1 + massa-api/src/config.rs | 9 + massa-api/src/error.rs | 3 + massa-api/src/lib.rs | 12 +- massa-api/src/private.rs | 5 - massa-api/src/public.rs | 89 ++++----- massa-bootstrap/Cargo.toml | 2 + massa-bootstrap/src/error.rs | 3 + massa-bootstrap/src/lib.rs | 3 + massa-bootstrap/src/server.rs | 17 +- massa-factory-exports/Cargo.toml | 1 + massa-factory-exports/src/types.rs | 6 +- massa-factory-worker/src/block_factory.rs | 19 +- .../src/endorsement_factory.rs | 19 +- massa-graph-2-exports/src/controller_trait.rs | 14 +- massa-graph-2-worker/src/controller.rs | 6 +- massa-graph-2-worker/src/lib.rs | 3 +- massa-node/Cargo.toml | 4 +- massa-node/src/main.rs | 188 +++++++++++------- .../src/protocol_controller.rs | 5 +- 21 files changed, 259 insertions(+), 177 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9867625a1bf..adcbc06f378 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -908,6 +908,26 @@ dependencies = [ "instant", ] +[[package]] +name = "fix-hidden-lifetime-bug" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4ae9c2016a663983d4e40a9ff967d6dcac59819672f0b47f2b17574e99c33c8" +dependencies = [ + "fix-hidden-lifetime-bug-proc_macros", +] + +[[package]] +name = "fix-hidden-lifetime-bug-proc_macros" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e4c81935e123ab0741c4c4f0d9b8377e5fb21d3de7e062fa4b1263b1fbcba1ea" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "fixedbitset" version = "0.4.2" @@ -1575,6 +1595,7 @@ name = "massa-node" version = "0.1.0" dependencies = [ "anyhow", + "crossbeam-channel", "dialoguer", "enum-map", "lazy_static", @@ -1588,6 +1609,8 @@ dependencies = [ "massa_factory_exports", "massa_factory_worker", "massa_final_state", + "massa_graph_2_exports", + "massa_graph_2_worker", "massa_ledger_exports", "massa_ledger_worker", "massa_logging", @@ -1648,6 +1671,7 @@ dependencies = [ "massa_consensus_exports", "massa_execution_exports", "massa_graph", + "massa_graph_2_exports", "massa_hash", "massa_models", "massa_network_exports", @@ -1697,12 +1721,14 @@ dependencies = [ "async-speed-limit", "bitvec", "displaydoc", + "fix-hidden-lifetime-bug", "futures 0.3.24", "lazy_static", "massa_async_pool", "massa_consensus_exports", "massa_final_state", "massa_graph", + "massa_graph_2_exports", "massa_hash", "massa_ledger_exports", "massa_ledger_worker", @@ -1841,6 +1867,7 @@ dependencies = [ "displaydoc", "massa_consensus_exports", "massa_execution_exports", + "massa_graph_2_exports", "massa_hash", "massa_ledger_exports", "massa_models", diff --git a/massa-api/Cargo.toml b/massa-api/Cargo.toml index faca8e0bef0..867f24291ad 100644 --- a/massa-api/Cargo.toml +++ b/massa-api/Cargo.toml @@ -19,6 +19,7 @@ parking_lot = { version = "0.12", features = ["deadlock_detection"] } # custom modules massa_consensus_exports = { path = "../massa-consensus-exports" } massa_graph = { path = "../massa-graph" } +massa_graph_2_exports = { path = "../massa-graph-2-exports" } massa_hash = { path = "../massa-hash" } massa_models = { path = "../massa-models" } massa_network_exports = { path = "../massa-network-exports" } diff --git a/massa-api/src/config.rs b/massa-api/src/config.rs index 81b3a094f2a..830db341187 100644 --- a/massa-api/src/config.rs +++ b/massa-api/src/config.rs @@ -1,6 +1,7 @@ // Copyright (c) 2022 MASSA LABS use jsonrpc_core::serde::Deserialize; +use massa_time::MassaTime; use std::net::SocketAddr; /// API settings. @@ -27,4 +28,12 @@ pub struct APIConfig { pub max_function_name_length: u16, /// max parameter size pub max_parameter_size: u32, + /// thread count + pub thread_count: u8, + /// genesis_timestamp + pub genesis_timestamp: MassaTime, + /// t0 + pub t0: MassaTime, + /// periods per cycle + pub periods_per_cycle: u64, } diff --git a/massa-api/src/error.rs b/massa-api/src/error.rs index ef55848cc9d..996125d97ea 100644 --- a/massa-api/src/error.rs +++ b/massa-api/src/error.rs @@ -3,6 +3,7 @@ use displaydoc::Display; use massa_consensus_exports::error::ConsensusError; use massa_execution_exports::ExecutionError; +use massa_graph::error::GraphError; use massa_hash::MassaHashError; use massa_models::error::ModelsError; use massa_network_exports::NetworkError; @@ -24,6 +25,8 @@ pub enum ApiError { MassaHashError(#[from] MassaHashError), /// consensus error: {0} ConsensusError(#[from] Box), + /// graph error: {0} + GraphError(#[from] GraphError), /// execution error: {0} ExecutionError(#[from] ExecutionError), /// network error: {0} diff --git a/massa-api/src/lib.rs b/massa-api/src/lib.rs index 5bac0d04820..0ed983b2020 100644 --- a/massa-api/src/lib.rs +++ b/massa-api/src/lib.rs @@ -8,8 +8,8 @@ use error::ApiError; use jsonrpc_core::{BoxFuture, IoHandler, Value}; use jsonrpc_derive::rpc; use jsonrpc_http_server::{CloseHandle, ServerBuilder}; -use massa_consensus_exports::{ConsensusCommandSender, ConsensusConfig}; use massa_execution_exports::ExecutionController; +use massa_graph_2_exports::GraphController; use massa_models::api::{ AddressInfo, BlockInfo, BlockSummary, DatastoreEntryInput, DatastoreEntryOutput, EndorsementInfo, EventFilter, NodeStatus, OperationInfo, OperationInput, @@ -52,8 +52,8 @@ pub use config::APIConfig; /// Public API component pub struct Public { - /// link to the consensus component - pub consensus_command_sender: ConsensusCommandSender, + /// link to the graph component + pub graph_controller: Box, /// link to the execution component pub execution_controller: Box, /// link to the selector component @@ -64,8 +64,6 @@ pub struct Public { pub protocol_command_sender: ProtocolCommandSender, /// Massa storage pub storage: Storage, - /// consensus configuration (TODO: remove it, can be retrieved via an endpoint) - pub consensus_config: ConsensusConfig, /// API settings pub api_settings: APIConfig, /// network setting @@ -82,14 +80,10 @@ pub struct Public { /// Private API content pub struct Private { - /// link to the consensus component - pub consensus_command_sender: ConsensusCommandSender, /// link to the network component pub network_command_sender: NetworkCommandSender, /// link to the execution component pub execution_controller: Box, - /// consensus configuration (TODO: remove it, can be retrieved via an endpoint) - pub consensus_config: ConsensusConfig, /// API settings pub api_settings: APIConfig, /// stop channel diff --git a/massa-api/src/private.rs b/massa-api/src/private.rs index 583f643001e..2ab13e204e0 100644 --- a/massa-api/src/private.rs +++ b/massa-api/src/private.rs @@ -7,7 +7,6 @@ use crate::{Endpoints, Private, RpcServer, StopHandle, API}; use jsonrpc_core::BoxFuture; use jsonrpc_http_server::tokio::sync::mpsc; -use massa_consensus_exports::{ConsensusCommandSender, ConsensusConfig}; use massa_execution_exports::ExecutionController; use massa_models::api::{ AddressInfo, BlockInfo, BlockSummary, DatastoreEntryInput, DatastoreEntryOutput, @@ -38,20 +37,16 @@ use std::sync::Arc; impl API { /// generate a new private API pub fn new( - consensus_command_sender: ConsensusCommandSender, network_command_sender: NetworkCommandSender, execution_controller: Box, api_settings: APIConfig, - consensus_settings: ConsensusConfig, node_wallet: Arc>, ) -> (Self, mpsc::Receiver<()>) { let (stop_node_channel, rx) = mpsc::channel(1); ( API(Private { - consensus_command_sender, network_command_sender, execution_controller, - consensus_config: consensus_settings, api_settings, stop_node_channel, node_wallet, diff --git a/massa-api/src/public.rs b/massa-api/src/public.rs index 8193d8252fc..51c38680933 100644 --- a/massa-api/src/public.rs +++ b/massa-api/src/public.rs @@ -5,11 +5,11 @@ use crate::config::APIConfig; use crate::error::ApiError; use crate::{Endpoints, Public, RpcServer, StopHandle, API}; use jsonrpc_core::BoxFuture; -use massa_consensus_exports::{ConsensusCommandSender, ConsensusConfig}; use massa_execution_exports::{ ExecutionController, ExecutionStackElement, ReadOnlyExecutionRequest, ReadOnlyExecutionTarget, }; -use massa_graph::DiscardReason; +use massa_graph_2_exports::block_status::DiscardReason; +use massa_graph_2_exports::GraphController; use massa_models::api::{ BlockGraphStatus, DatastoreEntryInput, DatastoreEntryOutput, OperationInput, ReadOnlyBytecodeExecution, ReadOnlyCall, SlotAmount, @@ -57,11 +57,10 @@ use std::net::{IpAddr, SocketAddr}; impl API { /// generate a new public API pub fn new( - consensus_command_sender: ConsensusCommandSender, + graph_controller: Box, execution_controller: Box, api_settings: APIConfig, selector_controller: Box, - consensus_settings: ConsensusConfig, pool_command_sender: Box, protocol_command_sender: ProtocolCommandSender, network_settings: NetworkConfig, @@ -72,8 +71,7 @@ impl API { storage: Storage, ) -> Self { API(Public { - consensus_command_sender, - consensus_config: consensus_settings, + graph_controller, api_settings, pool_command_sender, network_settings, @@ -293,28 +291,28 @@ impl Endpoints for API { fn get_status(&self) -> BoxFuture> { let execution_controller = self.0.execution_controller.clone(); - let consensus_command_sender = self.0.consensus_command_sender.clone(); + let graph_controller = self.0.graph_controller.clone(); let network_command_sender = self.0.network_command_sender.clone(); let network_config = self.0.network_settings.clone(); let version = self.0.version; - let consensus_settings = self.0.consensus_config.clone(); let compensation_millis = self.0.compensation_millis; let pool_command_sender = self.0.pool_command_sender.clone(); let node_id = self.0.node_id; let config = CompactConfig::default(); + let api_config = self.0.api_settings.clone(); let closure = async move || { let now = MassaTime::now(compensation_millis)?; let last_slot = get_latest_block_slot_at_timestamp( - consensus_settings.thread_count, - consensus_settings.t0, - consensus_settings.genesis_timestamp, + api_config.thread_count, + api_config.t0, + api_config.genesis_timestamp, now, )?; let execution_stats = execution_controller.get_stats(); + let consensus_stats = graph_controller.get_stats()?; - let (consensus_stats, network_stats, peers) = tokio::join!( - consensus_command_sender.get_stats(), + let (network_stats, peers) = tokio::join!( network_command_sender.get_network_stats(), network_command_sender.get_peers() ); @@ -341,40 +339,40 @@ impl Endpoints for API { last_slot, next_slot: last_slot .unwrap_or_else(|| Slot::new(0, 0)) - .get_next_slot(consensus_settings.thread_count)?, + .get_next_slot(api_config.thread_count)?, execution_stats, - consensus_stats: consensus_stats?, + consensus_stats: consensus_stats, network_stats: network_stats?, pool_stats, config, current_cycle: last_slot .unwrap_or_else(|| Slot::new(0, 0)) - .get_cycle(consensus_settings.periods_per_cycle), + .get_cycle(api_config.periods_per_cycle), }) }; Box::pin(closure()) } fn get_cliques(&self) -> BoxFuture, ApiError>> { - let consensus_command_sender = self.0.consensus_command_sender.clone(); - let closure = async move || Ok(consensus_command_sender.get_cliques().await?); + let graph_controller = self.0.graph_controller.clone(); + let closure = async move || Ok(graph_controller.get_cliques()); Box::pin(closure()) } fn get_stakers(&self) -> BoxFuture, ApiError>> { let execution_controller = self.0.execution_controller.clone(); - let cfg = self.0.consensus_config.clone(); + let api_config = self.0.api_settings.clone(); let compensation_millis = self.0.compensation_millis; let closure = async move || { let curr_cycle = get_latest_block_slot_at_timestamp( - cfg.thread_count, - cfg.t0, - cfg.genesis_timestamp, + api_config.thread_count, + api_config.t0, + api_config.genesis_timestamp, MassaTime::now(compensation_millis)?, )? .unwrap_or_else(|| Slot::new(0, 0)) - .get_cycle(cfg.periods_per_cycle); + .get_cycle(api_config.periods_per_cycle); let mut staker_vec = execution_controller .get_cycle_active_rolls(curr_cycle) .into_iter() @@ -415,7 +413,7 @@ impl Endpoints for API { let in_pool = self.0.pool_command_sender.contains_operations(&ops); let api_cfg = self.0.api_settings; - let consensus_command_sender = self.0.consensus_command_sender.clone(); + let graph_controller = self.0.graph_controller.clone(); let closure = async move || { if ops.len() as u64 > api_cfg.max_arguments { return Err(ApiError::TooManyArguments("too many arguments".into())); @@ -429,9 +427,7 @@ impl Endpoints for API { .unique() .cloned() .collect(); - let involved_block_statuses = consensus_command_sender - .get_block_statuses(&involved_blocks) - .await?; + let involved_block_statuses = graph_controller.get_block_statuses(&involved_blocks); let block_statuses: PreHashMap = involved_blocks .into_iter() .zip(involved_block_statuses.into_iter()) @@ -498,7 +494,7 @@ impl Endpoints for API { // ask pool whether it carries the operations let in_pool = self.0.pool_command_sender.contains_endorsements(&eds); - let consensus_command_sender = self.0.consensus_command_sender.clone(); + let graph_controller = self.0.graph_controller.clone(); let api_cfg = self.0.api_settings; let closure = async move || { if eds.len() as u64 > api_cfg.max_arguments { @@ -513,9 +509,7 @@ impl Endpoints for API { .unique() .cloned() .collect(); - let involved_block_statuses = consensus_command_sender - .get_block_statuses(&involved_blocks) - .await?; + let involved_block_statuses = graph_controller.get_block_statuses(&involved_blocks); let block_statuses: PreHashMap = involved_blocks .into_iter() .zip(involved_block_statuses.into_iter()) @@ -556,7 +550,7 @@ impl Endpoints for API { /// gets a block. Returns None if not found /// only active blocks are returned fn get_block(&self, id: BlockId) -> BoxFuture> { - let consensus_command_sender = self.0.consensus_command_sender.clone(); + let graph_controller = self.0.graph_controller.clone(); let storage = self.0.storage.clone_without_refs(); let closure = async move || { let block = match storage.read_blocks().get(&id).cloned() { @@ -566,9 +560,8 @@ impl Endpoints for API { } }; - let graph_status = consensus_command_sender - .get_block_statuses(&[id]) - .await? + let graph_status = graph_controller + .get_block_statuses(&vec![id]) .into_iter() .next() .expect("expected get_block_statuses to return one element"); @@ -597,10 +590,10 @@ impl Endpoints for API { &self, slot: Slot, ) -> BoxFuture, ApiError>> { - let consensus_command_sender = self.0.consensus_command_sender.clone(); + let graph_controller = self.0.graph_controller.clone(); let storage = self.0.storage.clone_without_refs(); let closure = async move || { - let block_id = match consensus_command_sender.get_blockclique_block_at_slot(slot)? { + let block_id = match graph_controller.get_blockclique_block_at_slot(slot) { Some(id) => id, None => return Ok(None), }; @@ -619,20 +612,18 @@ impl Endpoints for API { &self, time: TimeInterval, ) -> BoxFuture, ApiError>> { - let consensus_command_sender = self.0.consensus_command_sender.clone(); - let consensus_settings = self.0.consensus_config.clone(); + let graph_controller = self.0.graph_controller.clone(); + let api_config = self.0.api_settings.clone(); let closure = async move || { // filter blocks from graph_export let (start_slot, end_slot) = time_range_to_slot_range( - consensus_settings.thread_count, - consensus_settings.t0, - consensus_settings.genesis_timestamp, + api_config.thread_count, + api_config.t0, + api_config.genesis_timestamp, time.start, time.end, )?; - let graph = consensus_command_sender - .get_block_graph_status(start_slot, end_slot) - .await?; + let graph = graph_controller.get_block_graph_status(start_slot, end_slot)?; let mut res = Vec::with_capacity(graph.active_blocks.len()); let blockclique = graph .max_cliques @@ -740,9 +731,9 @@ impl Endpoints for API { // get future draws from selector let selection_draws = { let cur_slot = timeslots::get_current_latest_block_slot( - self.0.consensus_config.thread_count, - self.0.consensus_config.t0, - self.0.consensus_config.genesis_timestamp, + self.0.api_settings.thread_count, + self.0.api_settings.t0, + self.0.api_settings.genesis_timestamp, self.0.compensation_millis, ) .expect("could not get latest current slot") @@ -786,7 +777,7 @@ impl Endpoints for API { res.push(AddressInfo { // general address info address, - thread: address.get_thread(self.0.consensus_config.thread_count), + thread: address.get_thread(self.0.api_settings.thread_count), // final execution info final_balance: execution_infos.final_balance, diff --git a/massa-bootstrap/Cargo.toml b/massa-bootstrap/Cargo.toml index a52c28ec6ad..a26e8c40724 100644 --- a/massa-bootstrap/Cargo.toml +++ b/massa-bootstrap/Cargo.toml @@ -11,6 +11,7 @@ async-speed-limit = { git = "https://github.com/adrien-zinger/async-speed-limit" "default", "tokio", ] } +fix-hidden-lifetime-bug = "0.2.5" displaydoc = "0.2" futures = "0.3" num_enum = "0.5" @@ -28,6 +29,7 @@ massa_async_pool = { path = "../massa-async-pool" } massa_consensus_exports = { path = "../massa-consensus-exports" } massa_final_state = { path = "../massa-final-state" } massa_graph = { path = "../massa-graph" } +massa_graph_2_exports = { path = "../massa-graph-2-exports" } massa_hash = { path = "../massa-hash" } massa_ledger_exports = { path = "../massa-ledger-exports" } massa_logging = { path = "../massa-logging" } diff --git a/massa-bootstrap/src/error.rs b/massa-bootstrap/src/error.rs index d072dccb0f6..b1a9554d250 100644 --- a/massa-bootstrap/src/error.rs +++ b/massa-bootstrap/src/error.rs @@ -4,6 +4,7 @@ use crate::messages::{BootstrapClientMessage, BootstrapServerMessage}; use displaydoc::Display; use massa_consensus_exports::error::ConsensusError; use massa_final_state::FinalStateError; +use massa_graph::error::GraphError; use massa_hash::MassaHashError; use massa_network_exports::NetworkError; use massa_pos_exports::PosError; @@ -30,6 +31,8 @@ pub enum BootstrapError { UnexpectedConnectionDrop, /// `massa_hash` error: {0} MassaHashError(#[from] MassaHashError), + /// `massa_graph` error: {0} + MassaGraphError(#[from] GraphError), /// `massa_signature` error {0} MassaSignatureError(#[from] massa_signature::MassaSignatureError), /// time error: {0} diff --git a/massa-bootstrap/src/lib.rs b/massa-bootstrap/src/lib.rs index d2bd466cffe..97d8003d51e 100644 --- a/massa-bootstrap/src/lib.rs +++ b/massa-bootstrap/src/lib.rs @@ -14,6 +14,9 @@ #![feature(map_first_last)] #![feature(let_chains)] +#[macro_use] +extern crate fix_hidden_lifetime_bug; + pub use establisher::types::Establisher; use massa_final_state::FinalState; use massa_graph::BootstrapableGraph; diff --git a/massa-bootstrap/src/server.rs b/massa-bootstrap/src/server.rs index d59fff81145..59b2df44c6d 100644 --- a/massa-bootstrap/src/server.rs +++ b/massa-bootstrap/src/server.rs @@ -8,8 +8,8 @@ use std::{ use futures::stream::FuturesUnordered; use futures::StreamExt; use massa_async_pool::AsyncMessageId; -use massa_consensus_exports::ConsensusCommandSender; use massa_final_state::{ExecutedOpsStreamingStep, FinalState}; +use massa_graph_2_exports::GraphController; use massa_ledger_exports::get_address_from_key; use massa_logging::massa_trace; use massa_models::{slot::Slot, version::Version}; @@ -52,7 +52,7 @@ impl BootstrapManager { /// start a bootstrap server. /// Once your node will be ready, you may want other to bootstrap from you. pub async fn start_bootstrap_server( - consensus_command_sender: ConsensusCommandSender, + graph_controller: Box, network_command_sender: NetworkCommandSender, final_state: Arc>, bootstrap_config: BootstrapConfig, @@ -66,7 +66,7 @@ pub async fn start_bootstrap_server( let (manager_tx, manager_rx) = mpsc::channel::<()>(1); let join_handle = tokio::spawn(async move { BootstrapServer { - consensus_command_sender, + graph_controller, network_command_sender, final_state, establisher, @@ -91,7 +91,7 @@ pub async fn start_bootstrap_server( } struct BootstrapServer { - consensus_command_sender: ConsensusCommandSender, + graph_controller: Box, network_command_sender: NetworkCommandSender, final_state: Arc>, establisher: Establisher, @@ -208,14 +208,14 @@ impl BootstrapServer { let compensation_millis = self.compensation_millis; let version = self.version; let data_execution = self.final_state.clone(); - let consensus_command_sender = self.consensus_command_sender.clone(); + let graph_controller = self.graph_controller.clone(); let network_command_sender = self.network_command_sender.clone(); let keypair = self.keypair.clone(); let config = self.bootstrap_config.clone(); bootstrap_sessions.push(async move { let mut server = BootstrapServerBinder::new(dplx, keypair, config.max_bytes_read_write, config.max_bootstrap_message_size, config.thread_count, config.max_datastore_key_length, config.randomness_size_bytes); - match manage_bootstrap(&config, &mut server, data_execution, compensation_millis, version, consensus_command_sender, network_command_sender).await { + match manage_bootstrap(&config, &mut server, data_execution, compensation_millis, version, graph_controller, network_command_sender).await { Ok(_) => { info!("bootstrapped peer {}", remote_addr) }, @@ -410,13 +410,14 @@ pub async fn send_final_state_stream( } #[allow(clippy::too_many_arguments)] +#[fix_hidden_lifetime_bug] async fn manage_bootstrap( bootstrap_config: &BootstrapConfig, server: &mut BootstrapServerBinder, final_state: Arc>, compensation_millis: i64, version: Version, - consensus_command_sender: ConsensusCommandSender, + graph_controller: Box, network_command_sender: NetworkCommandSender, ) -> Result<(), BootstrapError> { massa_trace!("bootstrap.lib.manage_bootstrap", {}); @@ -519,7 +520,7 @@ async fn manage_bootstrap( match tokio::time::timeout( write_timeout, server.send(BootstrapServerMessage::ConsensusState { - graph: consensus_command_sender.get_bootstrap_state().await?, + graph: graph_controller.get_bootstrap_graph()?, }), ) .await diff --git a/massa-factory-exports/Cargo.toml b/massa-factory-exports/Cargo.toml index 58648d00fa4..17dbe926558 100644 --- a/massa-factory-exports/Cargo.toml +++ b/massa-factory-exports/Cargo.toml @@ -24,6 +24,7 @@ massa_serialization = { path = "../massa-serialization" } massa_signature = { path = "../massa-signature" } massa_pos_exports = { path = "../massa-pos-exports" } massa_consensus_exports = { path = "../massa-consensus-exports" } +massa_graph_2_exports = { path = "../massa-graph-2-exports" } massa_pool_exports = { path = "../massa-pool-exports" } massa_protocol_exports = { path = "../massa-protocol-exports" } massa_execution_exports = { path = "../massa-execution-exports" } diff --git a/massa-factory-exports/src/types.rs b/massa-factory-exports/src/types.rs index 591b7ac4ea9..56f4fd0a16a 100644 --- a/massa-factory-exports/src/types.rs +++ b/massa-factory-exports/src/types.rs @@ -1,4 +1,4 @@ -use massa_consensus_exports::ConsensusCommandSender; +use massa_graph_2_exports::GraphController; use massa_models::block::Block; use massa_pool_exports::PoolController; use massa_pos_exports::SelectorController; @@ -14,8 +14,8 @@ pub type ProductionHistory = Vec; pub struct FactoryChannels { /// selector controller to get draws pub selector: Box, - /// consensus controller - pub consensus: ConsensusCommandSender, + /// graph controller + pub graph: Box, /// pool controller pub pool: Box, /// protocol controller diff --git a/massa-factory-worker/src/block_factory.rs b/massa-factory-worker/src/block_factory.rs index a0a094d0f7f..8055d3a94e5 100644 --- a/massa-factory-worker/src/block_factory.rs +++ b/massa-factory-worker/src/block_factory.rs @@ -143,12 +143,8 @@ impl BlockFactoryWorker { return; }; // get best parents and their periods - let parents: Vec<(BlockId, u64)> = self - .channels - .consensus - .get_best_parents() - .expect("Couldn't get best parents"); // Vec<(parent_id, parent_period)> - // generate the local storage object + let parents: Vec<(BlockId, u64)> = self.channels.graph.get_best_parents(); // Vec<(parent_id, parent_period)> + // generate the local storage object let mut block_storage = self.channels.storage.clone_without_refs(); // claim block parents in local storage @@ -235,14 +231,9 @@ impl BlockFactoryWorker { ); // send full block to consensus - if self - .channels - .consensus - .send_block(block_id, slot, block_storage) - .is_err() - { - warn!("could not send produced block to consensus: channel error"); - } + self.channels + .graph + .register_block(block_id, slot, block_storage); } /// main run loop of the block creator thread diff --git a/massa-factory-worker/src/endorsement_factory.rs b/massa-factory-worker/src/endorsement_factory.rs index 0c3205eb0a9..40b8414f2fb 100644 --- a/massa-factory-worker/src/endorsement_factory.rs +++ b/massa-factory-worker/src/endorsement_factory.rs @@ -162,23 +162,10 @@ impl EndorsementFactoryWorker { } // get consensus block ID for that slot - let endorsed_block: BlockId = match self + let endorsed_block: BlockId = self .channels - .consensus - .get_latest_blockclique_block_at_slot(slot) - { - // error getting block ID at target slot - Err(_) => { - warn!( - "could not get latest blockclique block to create endorsement to be included at slot {}", - slot - ); - return; - } - - // latest block found - Ok(b_id) => b_id, - }; + .graph + .get_latest_blockclique_block_at_slot(slot); // produce endorsements let mut endorsements: Vec = Vec::with_capacity(producers_indices.len()); diff --git a/massa-graph-2-exports/src/controller_trait.rs b/massa-graph-2-exports/src/controller_trait.rs index 9265980ead9..b20acc3934f 100644 --- a/massa-graph-2-exports/src/controller_trait.rs +++ b/massa-graph-2-exports/src/controller_trait.rs @@ -18,7 +18,7 @@ pub trait GraphController: Send + Sync { end_slot: Option, ) -> GraphResult; - fn get_block_statuses(&self, ids: Vec) -> Vec; + fn get_block_statuses(&self, ids: &Vec) -> Vec; fn get_cliques(&self) -> Vec; @@ -37,6 +37,18 @@ pub trait GraphController: Send + Sync { fn register_block_header(&self, block_id: BlockId, header: Wrapped); fn mark_invalid_block(&self, block_id: BlockId, header: Wrapped); + + /// Returns a boxed clone of self. + /// Useful to allow cloning `Box`. + fn clone_box(&self) -> Box; +} + +/// Allow cloning `Box` +/// Uses `GraphController::clone_box` internally +impl Clone for Box { + fn clone(&self) -> Box { + self.clone_box() + } } /// Graph manager used to stop the graph thread diff --git a/massa-graph-2-worker/src/controller.rs b/massa-graph-2-worker/src/controller.rs index 6b8216dba09..e1c23016004 100644 --- a/massa-graph-2-worker/src/controller.rs +++ b/massa-graph-2-worker/src/controller.rs @@ -72,7 +72,7 @@ impl GraphController for GraphControllerImpl { /// /// # Returns: /// A vector of statuses sorted by the order of the block ids - fn get_block_statuses(&self, ids: Vec) -> Vec { + fn get_block_statuses(&self, ids: &Vec) -> Vec { let read_shared_state = self.shared_state.read(); ids.iter() .map(|id| read_shared_state.get_block_status(id)) @@ -183,4 +183,8 @@ impl GraphController for GraphControllerImpl { .command_sender .try_send(GraphCommand::MarkInvalidBlock(block_id, header)); } + + fn clone_box(&self) -> Box { + Box::new(self.clone()) + } } diff --git a/massa-graph-2-worker/src/lib.rs b/massa-graph-2-worker/src/lib.rs index fd4291bf512..589f3b77c2d 100644 --- a/massa-graph-2-worker/src/lib.rs +++ b/massa-graph-2-worker/src/lib.rs @@ -1,8 +1,9 @@ #![feature(deadline_api)] -#![feature(map_first_last)] mod commands; mod controller; mod manager; mod state; mod worker; + +pub use worker::start_graph_worker; diff --git a/massa-node/Cargo.toml b/massa-node/Cargo.toml index c99e5b93e67..02cc64e6902 100644 --- a/massa-node/Cargo.toml +++ b/massa-node/Cargo.toml @@ -7,6 +7,7 @@ edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] +crossbeam-channel = "0.5.6" anyhow = "1.0" enum-map = { version = "2.4", features = ["serde"] } lazy_static = "1.4" @@ -49,7 +50,8 @@ massa_time = { path = "../massa-time" } massa_wallet = { path = "../massa-wallet" } massa_factory_exports = { path = "../massa-factory-exports" } massa_factory_worker = { path = "../massa-factory-worker" } - +massa_graph_2_exports = { path = "../massa-graph-2-exports" } +massa_graph_2_worker = { path = "../massa-graph-2-worker" } # for more information on what are the following features used for, see the cargo.toml at workspace level [features] diff --git a/massa-node/src/main.rs b/massa-node/src/main.rs index f70928b809d..d63f76fb238 100644 --- a/massa-node/src/main.rs +++ b/massa-node/src/main.rs @@ -6,20 +6,19 @@ extern crate massa_logging; use crate::settings::SETTINGS; +use crossbeam_channel::{Receiver, TryRecvError}; use dialoguer::Password; use massa_api::{APIConfig, Private, Public, RpcServer, StopHandle, API}; use massa_async_pool::AsyncPoolConfig; use massa_bootstrap::{get_state, start_bootstrap_server, BootstrapConfig, BootstrapManager}; -use massa_consensus_exports::ConsensusManager; -use massa_consensus_exports::{ - events::ConsensusEvent, settings::ConsensusChannels, ConsensusConfig, ConsensusEventReceiver, -}; -use massa_consensus_worker::start_consensus_controller; use massa_execution_exports::{ExecutionConfig, ExecutionManager, StorageCostsConstants}; use massa_execution_worker::start_execution_worker; use massa_factory_exports::{FactoryChannels, FactoryConfig, FactoryManager}; use massa_factory_worker::start_factory; use massa_final_state::{FinalState, FinalStateConfig}; +use massa_graph_2_exports::events::GraphEvent; +use massa_graph_2_exports::{GraphChannels, GraphConfig, GraphManager}; +use massa_graph_2_worker::start_graph_worker; use massa_ledger_exports::LedgerConfig; use massa_ledger_worker::FinalLedger; use massa_logging::massa_trace; @@ -57,21 +56,22 @@ use massa_wallet::Wallet; use parking_lot::RwLock; use std::path::PathBuf; use std::sync::atomic::{AtomicUsize, Ordering}; +use std::thread::sleep; +use std::time::Duration; use std::{path::Path, process, sync::Arc}; use structopt::StructOpt; use tokio::signal; use tokio::sync::mpsc; use tracing::{error, info, warn}; use tracing_subscriber::filter::{filter_fn, LevelFilter}; - mod settings; async fn launch( node_wallet: Arc>, ) -> ( - ConsensusEventReceiver, + Receiver, Option, - ConsensusManager, + Box, Box, Box, Box, @@ -355,7 +355,7 @@ async fn launch( max_operations_propagation_time: SETTINGS.protocol.max_operations_propagation_time, max_endorsements_propagation_time: SETTINGS.protocol.max_endorsements_propagation_time, }; - let (protocol_command_sender, protocol_event_receiver, protocol_manager) = + let (protocol_command_sender, _protocol_event_receiver, protocol_manager) = start_protocol_controller( protocol_config, network_command_sender.clone(), @@ -366,8 +366,48 @@ async fn launch( .await .expect("could not start protocol controller"); - // init consensus configuration - let consensus_config = ConsensusConfig { + // // init consensus configuration + // let consensus_config = ConsensusConfig { + // genesis_timestamp: *GENESIS_TIMESTAMP, + // end_timestamp: *END_TIMESTAMP, + // thread_count: THREAD_COUNT, + // t0: T0, + // genesis_key: GENESIS_KEY.clone(), + // max_discarded_blocks: SETTINGS.consensus.max_discarded_blocks, + // future_block_processing_max_periods: SETTINGS.consensus.future_block_processing_max_periods, + // max_future_processing_blocks: SETTINGS.consensus.max_future_processing_blocks, + // max_dependency_blocks: SETTINGS.consensus.max_dependency_blocks, + // delta_f0: DELTA_F0, + // operation_validity_periods: OPERATION_VALIDITY_PERIODS, + // periods_per_cycle: PERIODS_PER_CYCLE, + // stats_timespan: SETTINGS.consensus.stats_timespan, + // max_send_wait: SETTINGS.consensus.max_send_wait, + // force_keep_final_periods: SETTINGS.consensus.force_keep_final_periods, + // endorsement_count: ENDORSEMENT_COUNT, + // block_db_prune_interval: SETTINGS.consensus.block_db_prune_interval, + // max_item_return_count: SETTINGS.consensus.max_item_return_count, + // max_gas_per_block: MAX_GAS_PER_BLOCK, + // channel_size: CHANNEL_SIZE, + // }; + // // launch consensus controller + // let (consensus_command_sender, consensus_event_receiver, consensus_manager) = + // start_consensus_controller( + // consensus_config.clone(), + // ConsensusChannels { + // execution_controller: execution_controller.clone(), + // protocol_command_sender: protocol_command_sender.clone(), + // protocol_event_receiver, + // pool_command_sender: pool_controller.clone(), + // selector_controller: selector_controller.clone(), + // }, + // bootstrap_state.graph, + // shared_storage.clone(), + // bootstrap_state.compensation_millis, + // ) + // .await + // .expect("could not start consensus controller"); + + let graph_config = GraphConfig { genesis_timestamp: *GENESIS_TIMESTAMP, end_timestamp: *END_TIMESTAMP, thread_count: THREAD_COUNT, @@ -388,24 +428,24 @@ async fn launch( max_item_return_count: SETTINGS.consensus.max_item_return_count, max_gas_per_block: MAX_GAS_PER_BLOCK, channel_size: CHANNEL_SIZE, + clock_compensation_millis: bootstrap_state.compensation_millis, }; - // launch consensus controller - let (consensus_command_sender, consensus_event_receiver, consensus_manager) = - start_consensus_controller( - consensus_config.clone(), - ConsensusChannels { - execution_controller: execution_controller.clone(), - protocol_command_sender: protocol_command_sender.clone(), - protocol_event_receiver, - pool_command_sender: pool_controller.clone(), - selector_controller: selector_controller.clone(), - }, - bootstrap_state.graph, - shared_storage.clone(), - bootstrap_state.compensation_millis, - ) - .await - .expect("could not start consensus controller"); + + let (graph_event_sender, graph_event_receiver) = crossbeam_channel::bounded(CHANNEL_SIZE); + let graph_channels = GraphChannels { + execution_controller: execution_controller.clone(), + selector_controller: selector_controller.clone(), + pool_command_sender: pool_controller.clone(), + controller_event_tx: graph_event_sender, + protocol_command_sender: protocol_command_sender.clone(), + }; + + let (graph_controller, graph_manager) = start_graph_worker( + graph_config, + graph_channels, + bootstrap_state.graph, + shared_storage.clone(), + ); // launch factory let factory_config = FactoryConfig { @@ -419,7 +459,7 @@ async fn launch( }; let factory_channels = FactoryChannels { selector: selector_controller.clone(), - consensus: consensus_command_sender.clone(), + graph: graph_controller.clone(), pool: pool_controller.clone(), protocol: protocol_command_sender.clone(), storage: shared_storage.clone(), @@ -428,7 +468,7 @@ async fn launch( // launch bootstrap server let bootstrap_manager = start_bootstrap_server( - consensus_command_sender.clone(), + graph_controller.clone(), network_command_sender.clone(), final_state.clone(), bootstrap_config, @@ -451,25 +491,26 @@ async fn launch( max_op_datastore_value_length: MAX_OPERATION_DATASTORE_VALUE_LENGTH, max_function_name_length: MAX_FUNCTION_NAME_LENGTH, max_parameter_size: MAX_PARAMETERS_SIZE, + thread_count: THREAD_COUNT, + genesis_timestamp: *GENESIS_TIMESTAMP, + t0: T0, + periods_per_cycle: PERIODS_PER_CYCLE, }; // spawn private API let (api_private, api_private_stop_rx) = API::::new( - consensus_command_sender.clone(), network_command_sender.clone(), execution_controller.clone(), api_config, - consensus_config.clone(), node_wallet, ); let api_private_handle = api_private.serve(&SETTINGS.api.bind_private); // spawn public API let api_public = API::::new( - consensus_command_sender.clone(), + graph_controller.clone(), execution_controller.clone(), api_config, selector_controller.clone(), - consensus_config, pool_controller.clone(), protocol_command_sender.clone(), network_config, @@ -511,9 +552,9 @@ async fn launch( .expect("failed to spawn thread : deadlock-detection"); } ( - consensus_event_receiver, + graph_event_receiver, bootstrap_manager, - consensus_manager, + graph_manager, execution_manager, selector_manager, pool_manager, @@ -528,7 +569,7 @@ async fn launch( struct Managers { bootstrap_manager: Option, - consensus_manager: ConsensusManager, + graph_manager: Box, execution_manager: Box, selector_manager: Box, pool_manager: Box, @@ -538,11 +579,11 @@ struct Managers { } async fn stop( - consensus_event_receiver: ConsensusEventReceiver, + _graph_event_receiver: Receiver, Managers { bootstrap_manager, mut execution_manager, - consensus_manager, + mut graph_manager, mut selector_manager, mut pool_manager, protocol_manager, @@ -569,10 +610,7 @@ async fn stop( // stop factory factory_manager.stop(); - let protocol_event_receiver = consensus_manager - .stop(consensus_event_receiver) - .await - .expect("consensus shutdown failed"); + graph_manager.stop(); // stop pool pool_manager.stop(); @@ -589,7 +627,7 @@ async fn stop( // stop protocol controller let network_event_receiver = protocol_manager - .stop(protocol_event_receiver) + .stop() .await .expect("protocol shutdown failed"); @@ -683,9 +721,9 @@ async fn run(args: Args) -> anyhow::Result<()> { loop { let ( - mut consensus_event_receiver, + graph_event_receiver, bootstrap_manager, - consensus_manager, + graph_manager, execution_manager, selector_manager, pool_manager, @@ -698,43 +736,58 @@ async fn run(args: Args) -> anyhow::Result<()> { ) = launch(node_wallet.clone()).await; // interrupt signal listener - let stop_signal = signal::ctrl_c(); - tokio::pin!(stop_signal); + let (tx, rx) = crossbeam_channel::bounded(1); + let interrupt_signal_listener = tokio::spawn(async move { + signal::ctrl_c().await.unwrap(); + tx.send(()).unwrap(); + }); + // loop over messages let restart = loop { massa_trace!("massa-node.main.run.select", {}); - tokio::select! { - evt = consensus_event_receiver.wait_event() => { - massa_trace!("massa-node.main.run.select.consensus_event", {}); - match evt { - Ok(ConsensusEvent::NeedSync) => { - warn!("in response to a desynchronization, the node is going to bootstrap again"); - break true; - }, - Err(err) => { - error!("consensus_event_receiver.wait_event error: {}", err); - break false; - } + match graph_event_receiver.try_recv() { + Ok(evt) => match evt { + GraphEvent::NeedSync => { + warn!("in response to a desynchronization, the node is going to bootstrap again"); + break true; } }, - - _ = &mut stop_signal => { - massa_trace!("massa-node.main.run.select.stop", {}); - info!("interrupt signal received"); + Err(TryRecvError::Disconnected) => { + error!("consensus_event_receiver.wait_event disconnected"); break false; } + _ => {} + }; - _ = api_private_stop_rx.recv() => { + match api_private_stop_rx.try_recv() { + Ok(_) => { info!("stop command received from private API"); break false; } + Err(tokio::sync::mpsc::error::TryRecvError::Disconnected) => { + error!("api_private_stop_rx disconnected"); + break false; + } + _ => {} + } + match rx.try_recv() { + Ok(_) => { + info!("interrupt signal received"); + break false; + } + Err(crossbeam_channel::TryRecvError::Disconnected) => { + error!("interrupt_signal_listener disconnected"); + break false; + } + _ => {} } + sleep(Duration::from_millis(100)); }; stop( - consensus_event_receiver, + graph_event_receiver, Managers { bootstrap_manager, - consensus_manager, + graph_manager, execution_manager, selector_manager, pool_manager, @@ -750,6 +803,7 @@ async fn run(args: Args) -> anyhow::Result<()> { if !restart { break; } + interrupt_signal_listener.abort(); } Ok(()) } diff --git a/massa-protocol-exports/src/protocol_controller.rs b/massa-protocol-exports/src/protocol_controller.rs index e9e854781ec..67fb69a1463 100644 --- a/massa-protocol-exports/src/protocol_controller.rs +++ b/massa-protocol-exports/src/protocol_controller.rs @@ -217,12 +217,13 @@ impl ProtocolManager { /// Stop the protocol controller pub async fn stop( self, - protocol_event_receiver: ProtocolEventReceiver, + //TODO: FIX + //protocol_event_receiver: ProtocolEventReceiver, //protocol_pool_event_receiver: ProtocolPoolEventReceiver, ) -> Result { info!("stopping protocol controller..."); drop(self.manager_tx); - let _remaining_events = protocol_event_receiver.drain().await; + //let _remaining_events = protocol_event_receiver.drain().await; let network_event_receiver = self.join_handle.await??; info!("protocol controller stopped"); Ok(network_event_receiver) From 1bf138395fe6e6bce108bb4a0fc84d69c5c66f79 Mon Sep 17 00:00:00 2001 From: AurelienFT Date: Tue, 18 Oct 2022 15:26:08 +0200 Subject: [PATCH 22/40] Fix stopping consensus. --- massa-client/base_config/config.toml | 4 ++-- massa-graph-2-worker/src/commands.rs | 2 +- massa-graph-2-worker/src/manager.rs | 8 +++----- massa-graph-2-worker/src/worker/mod.rs | 3 +-- 4 files changed, 7 insertions(+), 10 deletions(-) diff --git a/massa-client/base_config/config.toml b/massa-client/base_config/config.toml index 089474105d7..6f558ecbe2c 100644 --- a/massa-client/base_config/config.toml +++ b/massa-client/base_config/config.toml @@ -3,7 +3,7 @@ history_file_path = "config/.massa_history" timeout = 1000 [default_node] -#ip = "145.239.66.206" -ip = "127.0.0.1" +ip = "158.69.23.120" +#ip = "127.0.0.1" private_port = 33034 public_port = 33035 \ No newline at end of file diff --git a/massa-graph-2-worker/src/commands.rs b/massa-graph-2-worker/src/commands.rs index 2f690a0cbf6..ba0809f5243 100644 --- a/massa-graph-2-worker/src/commands.rs +++ b/massa-graph-2-worker/src/commands.rs @@ -9,5 +9,5 @@ use massa_storage::Storage; pub enum GraphCommand { RegisterBlock(BlockId, Slot, Storage), RegisterBlockHeader(BlockId, Wrapped), - MarkInvalidBlock(BlockId, Wrapped), + MarkInvalidBlock(BlockId, Wrapped) } diff --git a/massa-graph-2-worker/src/manager.rs b/massa-graph-2-worker/src/manager.rs index 52da5097114..b850e3c50f8 100644 --- a/massa-graph-2-worker/src/manager.rs +++ b/massa-graph-2-worker/src/manager.rs @@ -5,17 +5,15 @@ use tracing::log::info; use crate::commands::GraphCommand; pub struct GraphManagerImpl { - pub thread_graph: Option>, - pub graph_command_sender: SyncSender, + pub thread_graph: Option<(SyncSender, JoinHandle<()>)>, } impl GraphManager for GraphManagerImpl { fn stop(&mut self) { info!("stopping graph worker..."); - //TODO: Stop graph command sender - //drop(self.graph_command_sender); // join the graph thread - if let Some(join_handle) = self.thread_graph.take() { + if let Some((tx, join_handle)) = self.thread_graph.take() { + drop(tx); join_handle .join() .expect("graph thread panicked on try to join"); diff --git a/massa-graph-2-worker/src/worker/mod.rs b/massa-graph-2-worker/src/worker/mod.rs index 9a53aa63c41..0839a29b0f2 100644 --- a/massa-graph-2-worker/src/worker/mod.rs +++ b/massa-graph-2-worker/src/worker/mod.rs @@ -105,8 +105,7 @@ pub fn start_graph_worker( .expect("Can't spawn thread graph."); let manager = GraphManagerImpl { - thread_graph: Some(thread_graph), - graph_command_sender: tx.clone(), + thread_graph: Some((tx.clone(), thread_graph)), }; let controller = GraphControllerImpl::new(tx, shared_state); From 5eac870df3402c4bfd6c84d63fde3834b5959229 Mon Sep 17 00:00:00 2001 From: AurelienFT Date: Wed, 19 Oct 2022 15:32:30 +0200 Subject: [PATCH 23/40] Start replacing module everywhere. --- Cargo.lock | 83 +- Cargo.toml | 3 - massa-api/Cargo.toml | 2 - massa-api/src/error.rs | 9 - massa-api/src/public.rs | 4 +- massa-bootstrap/Cargo.toml | 3 - massa-bootstrap/src/error.rs | 3 - massa-bootstrap/src/messages.rs | 2 +- massa-consensus-exports/Cargo.toml | 40 - massa-consensus-exports/src/commands.rs | 70 - .../src/consensus_controller.rs | 268 -- massa-consensus-exports/src/error.rs | 68 - massa-consensus-exports/src/events.rs | 6 - massa-consensus-exports/src/lib.rs | 29 - massa-consensus-exports/src/settings.rs | 218 -- .../src/test_exports/mock.rs | 55 - .../src/test_exports/mod.rs | 8 - .../src/test_exports/tools.rs | 70 - massa-consensus-worker/Cargo.toml | 55 - .../src/consensus_worker.rs | 785 ----- massa-consensus-worker/src/lib.rs | 16 - .../src/tests/block_factory.rs | 139 - .../tests/inter_cycle_batch_finalization.rs | 199 -- massa-consensus-worker/src/tests/mod.rs | 25 - .../src/tests/scenario_block_creation.rs | 849 ----- .../src/tests/scenario_roll.rs | 974 ------ .../src/tests/scenarios106.rs | 869 ------ .../src/tests/scenarios91_1.rs | 446 --- .../src/tests/scenarios91_2.rs | 514 --- .../src/tests/scenarios_basic.rs | 261 -- .../src/tests/scenarios_endorsements.rs | 196 -- .../src/tests/scenarios_get_operations.rs | 201 -- .../tests/scenarios_get_selection_draws.rs | 65 - .../src/tests/scenarios_header_check.rs | 123 - .../src/tests/scenarios_incompatibilities.rs | 311 -- .../tests/scenarios_note_attack_attempt.rs | 156 - .../src/tests/scenarios_operations_check.rs | 203 -- .../src/tests/scenarios_parents.rs | 206 -- .../src/tests/scenarios_pool_commands.rs | 460 --- .../src/tests/scenarios_pruning.rs | 196 -- .../src/tests/scenarios_reward_split.rs | 295 -- .../src/tests/scenarios_send_block.rs | 133 - .../src/tests/scenarios_wishlist.rs | 140 - .../src/tests/test_block_graph.rs | 174 -- massa-consensus-worker/src/tests/tools.rs | 1056 ------- massa-consensus-worker/src/tools.rs | 107 - massa-factory-exports/Cargo.toml | 1 - massa-factory-worker/Cargo.toml | 4 +- massa-graph-2-exports/Cargo.toml | 5 +- .../src/bootstrapable_graph.rs | 0 massa-graph-2-exports/src/controller_trait.rs | 2 +- .../src/error.rs | 0 .../src/export_active_block.rs | 0 massa-graph-2-exports/src/lib.rs | 3 + massa-graph-2-worker/Cargo.toml | 5 +- massa-graph-2-worker/src/commands.rs | 2 +- massa-graph-2-worker/src/controller.rs | 10 +- .../src/state/verifications.rs | 12 +- massa-graph-2-worker/src/worker/init.rs | 16 +- massa-graph-2-worker/src/worker/main_loop.rs | 2 +- massa-graph-2-worker/src/worker/mod.rs | 6 +- massa-graph/Cargo.toml | 28 - massa-graph/src/block_graph.rs | 2742 ----------------- massa-graph/src/lib.rs | 26 - massa-graph/src/settings.rs | 34 - massa-node/Cargo.toml | 5 +- massa-node/src/main.rs | 20 +- massa-protocol-exports/src/error.rs | 3 - massa-protocol-exports/src/lib.rs | 4 +- .../src/protocol_controller.rs | 68 +- massa-protocol-exports/src/tests/tools.rs | 58 +- massa-protocol-worker/Cargo.toml | 1 + massa-protocol-worker/src/protocol_network.rs | 24 +- massa-protocol-worker/src/protocol_worker.rs | 49 +- 74 files changed, 99 insertions(+), 13126 deletions(-) delete mode 100644 massa-consensus-exports/Cargo.toml delete mode 100644 massa-consensus-exports/src/commands.rs delete mode 100644 massa-consensus-exports/src/consensus_controller.rs delete mode 100644 massa-consensus-exports/src/error.rs delete mode 100644 massa-consensus-exports/src/events.rs delete mode 100644 massa-consensus-exports/src/lib.rs delete mode 100644 massa-consensus-exports/src/settings.rs delete mode 100644 massa-consensus-exports/src/test_exports/mock.rs delete mode 100644 massa-consensus-exports/src/test_exports/mod.rs delete mode 100644 massa-consensus-exports/src/test_exports/tools.rs delete mode 100644 massa-consensus-worker/Cargo.toml delete mode 100644 massa-consensus-worker/src/consensus_worker.rs delete mode 100644 massa-consensus-worker/src/lib.rs delete mode 100644 massa-consensus-worker/src/tests/block_factory.rs delete mode 100644 massa-consensus-worker/src/tests/inter_cycle_batch_finalization.rs delete mode 100644 massa-consensus-worker/src/tests/mod.rs delete mode 100644 massa-consensus-worker/src/tests/scenario_block_creation.rs delete mode 100644 massa-consensus-worker/src/tests/scenario_roll.rs delete mode 100644 massa-consensus-worker/src/tests/scenarios106.rs delete mode 100644 massa-consensus-worker/src/tests/scenarios91_1.rs delete mode 100644 massa-consensus-worker/src/tests/scenarios91_2.rs delete mode 100644 massa-consensus-worker/src/tests/scenarios_basic.rs delete mode 100644 massa-consensus-worker/src/tests/scenarios_endorsements.rs delete mode 100644 massa-consensus-worker/src/tests/scenarios_get_operations.rs delete mode 100644 massa-consensus-worker/src/tests/scenarios_get_selection_draws.rs delete mode 100644 massa-consensus-worker/src/tests/scenarios_header_check.rs delete mode 100644 massa-consensus-worker/src/tests/scenarios_incompatibilities.rs delete mode 100644 massa-consensus-worker/src/tests/scenarios_note_attack_attempt.rs delete mode 100644 massa-consensus-worker/src/tests/scenarios_operations_check.rs delete mode 100644 massa-consensus-worker/src/tests/scenarios_parents.rs delete mode 100644 massa-consensus-worker/src/tests/scenarios_pool_commands.rs delete mode 100644 massa-consensus-worker/src/tests/scenarios_pruning.rs delete mode 100644 massa-consensus-worker/src/tests/scenarios_reward_split.rs delete mode 100644 massa-consensus-worker/src/tests/scenarios_send_block.rs delete mode 100644 massa-consensus-worker/src/tests/scenarios_wishlist.rs delete mode 100644 massa-consensus-worker/src/tests/test_block_graph.rs delete mode 100644 massa-consensus-worker/src/tests/tools.rs delete mode 100644 massa-consensus-worker/src/tools.rs rename {massa-graph => massa-graph-2-exports}/src/bootstrapable_graph.rs (100%) rename {massa-graph => massa-graph-2-exports}/src/error.rs (100%) rename {massa-graph => massa-graph-2-exports}/src/export_active_block.rs (100%) delete mode 100644 massa-graph/Cargo.toml delete mode 100644 massa-graph/src/block_graph.rs delete mode 100644 massa-graph/src/lib.rs delete mode 100644 massa-graph/src/settings.rs diff --git a/Cargo.lock b/Cargo.lock index adcbc06f378..756647dd29e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1602,8 +1602,6 @@ dependencies = [ "massa_api", "massa_async_pool", "massa_bootstrap", - "massa_consensus_exports", - "massa_consensus_worker", "massa_execution_exports", "massa_execution_worker", "massa_factory_exports", @@ -1668,9 +1666,7 @@ dependencies = [ "jsonrpc-core", "jsonrpc-derive", "jsonrpc-http-server", - "massa_consensus_exports", "massa_execution_exports", - "massa_graph", "massa_graph_2_exports", "massa_hash", "massa_models", @@ -1725,9 +1721,7 @@ dependencies = [ "futures 0.3.24", "lazy_static", "massa_async_pool", - "massa_consensus_exports", "massa_final_state", - "massa_graph", "massa_graph_2_exports", "massa_hash", "massa_ledger_exports", @@ -1768,53 +1762,6 @@ dependencies = [ "thiserror", ] -[[package]] -name = "massa_consensus_exports" -version = "0.1.0" -dependencies = [ - "displaydoc", - "massa_cipher", - "massa_execution_exports", - "massa_graph", - "massa_models", - "massa_pool_exports", - "massa_pos_exports", - "massa_protocol_exports", - "massa_signature", - "massa_storage", - "massa_time", - "serde_json", - "tempfile", - "thiserror", - "tokio", -] - -[[package]] -name = "massa_consensus_worker" -version = "0.1.0" -dependencies = [ - "massa_cipher", - "massa_consensus_exports", - "massa_execution_exports", - "massa_graph", - "massa_hash", - "massa_logging", - "massa_models", - "massa_pool_exports", - "massa_pos_exports", - "massa_pos_worker", - "massa_protocol_exports", - "massa_serialization", - "massa_signature", - "massa_storage", - "massa_time", - "parking_lot", - "serde_json", - "serial_test 0.9.0", - "tokio", - "tracing", -] - [[package]] name = "massa_execution_exports" version = "0.1.0" @@ -1865,7 +1812,6 @@ version = "0.1.0" dependencies = [ "anyhow", "displaydoc", - "massa_consensus_exports", "massa_execution_exports", "massa_graph_2_exports", "massa_hash", @@ -1891,7 +1837,6 @@ name = "massa_factory_worker" version = "0.1.0" dependencies = [ "anyhow", - "massa_consensus_exports", "massa_factory_exports", "massa_hash", "massa_models", @@ -1929,14 +1874,15 @@ dependencies = [ ] [[package]] -name = "massa_graph" +name = "massa_graph_2_exports" version = "0.1.0" dependencies = [ + "crossbeam-channel", "displaydoc", "massa_execution_exports", "massa_hash", - "massa_logging", "massa_models", + "massa_pool_exports", "massa_pos_exports", "massa_protocol_exports", "massa_serialization", @@ -1944,30 +1890,9 @@ dependencies = [ "massa_storage", "massa_time", "nom 7.1.1", - "num", "serde 1.0.145", "serde_json", "thiserror", - "tracing", -] - -[[package]] -name = "massa_graph_2_exports" -version = "0.1.0" -dependencies = [ - "crossbeam-channel", - "displaydoc", - "massa_execution_exports", - "massa_graph", - "massa_models", - "massa_pool_exports", - "massa_pos_exports", - "massa_protocol_exports", - "massa_signature", - "massa_storage", - "massa_time", - "serde 1.0.145", - "serde_json", ] [[package]] @@ -1975,7 +1900,6 @@ name = "massa_graph_2_worker" version = "0.1.0" dependencies = [ "displaydoc", - "massa_graph", "massa_graph_2_exports", "massa_hash", "massa_logging", @@ -2209,6 +2133,7 @@ version = "0.1.0" dependencies = [ "futures 0.3.24", "lazy_static", + "massa_graph_2_exports", "massa_hash", "massa_logging", "massa_models", diff --git a/Cargo.toml b/Cargo.toml index ce5c152f2ea..55a6de4e949 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -5,15 +5,12 @@ members = [ "massa-bootstrap", "massa-client", "massa-cipher", - "massa-consensus-exports", - "massa-consensus-worker", "massa-execution-exports", "massa-execution-worker", "massa-factory-exports", "massa-factory-worker", "massa-graph-2-exports", "massa-graph-2-worker", - "massa-graph", "massa-hash", "massa-logging", "massa-models", diff --git a/massa-api/Cargo.toml b/massa-api/Cargo.toml index 867f24291ad..0b80cacffc3 100644 --- a/massa-api/Cargo.toml +++ b/massa-api/Cargo.toml @@ -17,8 +17,6 @@ tracing = "0.1" itertools = "0.10" parking_lot = { version = "0.12", features = ["deadlock_detection"] } # custom modules -massa_consensus_exports = { path = "../massa-consensus-exports" } -massa_graph = { path = "../massa-graph" } massa_graph_2_exports = { path = "../massa-graph-2-exports" } massa_hash = { path = "../massa-hash" } massa_models = { path = "../massa-models" } diff --git a/massa-api/src/error.rs b/massa-api/src/error.rs index 996125d97ea..abdb186220e 100644 --- a/massa-api/src/error.rs +++ b/massa-api/src/error.rs @@ -1,7 +1,6 @@ // Copyright (c) 2022 MASSA LABS use displaydoc::Display; -use massa_consensus_exports::error::ConsensusError; use massa_execution_exports::ExecutionError; use massa_graph::error::GraphError; use massa_hash::MassaHashError; @@ -23,8 +22,6 @@ pub enum ApiError { ReceiveChannelError(String), /// `massa_hash` error: {0} MassaHashError(#[from] MassaHashError), - /// consensus error: {0} - ConsensusError(#[from] Box), /// graph error: {0} GraphError(#[from] GraphError), /// execution error: {0} @@ -60,9 +57,3 @@ impl From for jsonrpc_core::Error { } } } - -impl std::convert::From for ApiError { - fn from(err: ConsensusError) -> Self { - ApiError::ConsensusError(Box::new(err)) - } -} diff --git a/massa-api/src/public.rs b/massa-api/src/public.rs index 51c38680933..aa80e8535b8 100644 --- a/massa-api/src/public.rs +++ b/massa-api/src/public.rs @@ -310,7 +310,7 @@ impl Endpoints for API { )?; let execution_stats = execution_controller.get_stats(); - let consensus_stats = graph_controller.get_stats()?; + let graph_stats = graph_controller.get_stats()?; let (network_stats, peers) = tokio::join!( network_command_sender.get_network_stats(), @@ -341,7 +341,7 @@ impl Endpoints for API { .unwrap_or_else(|| Slot::new(0, 0)) .get_next_slot(api_config.thread_count)?, execution_stats, - consensus_stats: consensus_stats, + consensus_stats: graph_stats, network_stats: network_stats?, pool_stats, config, diff --git a/massa-bootstrap/Cargo.toml b/massa-bootstrap/Cargo.toml index a26e8c40724..ce25981cf08 100644 --- a/massa-bootstrap/Cargo.toml +++ b/massa-bootstrap/Cargo.toml @@ -26,9 +26,7 @@ tracing = "0.1" # custom modules massa_async_pool = { path = "../massa-async-pool" } -massa_consensus_exports = { path = "../massa-consensus-exports" } massa_final_state = { path = "../massa-final-state" } -massa_graph = { path = "../massa-graph" } massa_graph_2_exports = { path = "../massa-graph-2-exports" } massa_hash = { path = "../massa-hash" } massa_ledger_exports = { path = "../massa-ledger-exports" } @@ -62,7 +60,6 @@ testing = [ ] sandbox = [ "massa_async_pool/sandbox", - "massa_consensus_exports/sandbox", "massa_final_state/sandbox", "massa_models/sandbox", ] diff --git a/massa-bootstrap/src/error.rs b/massa-bootstrap/src/error.rs index b1a9554d250..a421fdfd059 100644 --- a/massa-bootstrap/src/error.rs +++ b/massa-bootstrap/src/error.rs @@ -2,7 +2,6 @@ use crate::messages::{BootstrapClientMessage, BootstrapServerMessage}; use displaydoc::Display; -use massa_consensus_exports::error::ConsensusError; use massa_final_state::FinalStateError; use massa_graph::error::GraphError; use massa_hash::MassaHashError; @@ -37,8 +36,6 @@ pub enum BootstrapError { MassaSignatureError(#[from] massa_signature::MassaSignatureError), /// time error: {0} TimeError(#[from] TimeError), - /// consensus error: {0} - ConsensusError(#[from] ConsensusError), /// network error: {0} NetworkError(#[from] NetworkError), /// final state error: {0} diff --git a/massa-bootstrap/src/messages.rs b/massa-bootstrap/src/messages.rs index 596ecc03a8d..f2b71b5b753 100644 --- a/massa-bootstrap/src/messages.rs +++ b/massa-bootstrap/src/messages.rs @@ -6,7 +6,7 @@ use massa_final_state::{ ExecutedOpsStreamingStepSerializer, StateChanges, StateChangesDeserializer, StateChangesSerializer, }; -use massa_graph::{ +use massa_graph_2_exports::{ BootstrapableGraph, BootstrapableGraphDeserializer, BootstrapableGraphSerializer, }; use massa_ledger_exports::{KeyDeserializer, KeySerializer}; diff --git a/massa-consensus-exports/Cargo.toml b/massa-consensus-exports/Cargo.toml deleted file mode 100644 index abb6f5e3d83..00000000000 --- a/massa-consensus-exports/Cargo.toml +++ /dev/null @@ -1,40 +0,0 @@ -[package] -name = "massa_consensus_exports" -version = "0.1.0" -authors = ["Massa Labs "] -edition = "2021" - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -displaydoc = "0.2" -thiserror = "1.0" -tokio = { version = "1.21", features = ["full"] } -tempfile = { version = "3.3", optional = true } # use with testing feature -serde_json = { version = "1.0", optional = true } # use with testing feature -# custom modules -massa_cipher = { path = "../massa-cipher" } -massa_execution_exports = { path = "../massa-execution-exports" } -massa_graph = { path = "../massa-graph" } -massa_models = { path = "../massa-models" } -massa_pool_exports = { path = "../massa-pool-exports" } -massa_pos_exports = { path = "../massa-pos-exports" } -massa_protocol_exports = { path = "../massa-protocol-exports" } -massa_signature = { path = "../massa-signature" } -massa_time = { path = "../massa-time" } -massa_storage = { path = "../massa-storage" } - -[dev-dependencies] -massa_models = { path = "../massa-models", features = ["testing"] } - -# for more information on what are the following features used for, see the cargo.toml at workspace level -[features] -sandbox = [ "massa_protocol_exports/sandbox" ] -testing = [ - "massa_models/testing", - "massa_execution_exports/testing", - "massa_pool_exports/testing", - "massa_protocol_exports/testing", - "tempfile", - "serde_json" -] diff --git a/massa-consensus-exports/src/commands.rs b/massa-consensus-exports/src/commands.rs deleted file mode 100644 index f43c900b9ed..00000000000 --- a/massa-consensus-exports/src/commands.rs +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright (c) 2022 MASSA LABS - -//! Contains definitions of commands used by the controller -use massa_graph::{BlockGraphExport, BootstrapableGraph}; -use massa_models::api::BlockGraphStatus; -use massa_models::{block::BlockId, slot::Slot}; -use massa_models::{clique::Clique, stats::ConsensusStats}; -use massa_storage::Storage; -use tokio::sync::{mpsc, oneshot}; - -/// Commands that can be processed by consensus. -#[derive(Debug)] -pub enum ConsensusCommand { - /// Returns through a channel current blockgraph without block operations. - GetBlockGraphStatus { - /// optional start slot - slot_start: Option, - /// optional end slot - slot_end: Option, - /// response channel - response_tx: oneshot::Sender, - }, - /// Returns through a channel the graph statuses of a batch of blocks - GetBlockStatuses { - /// wanted block IDs - ids: Vec, - /// response channel - response_tx: oneshot::Sender>, - }, - /// Returns the bootstrap state - GetBootstrapState(mpsc::Sender>), - /// get current stats on consensus - GetStats(oneshot::Sender), - /// Get a block at a given slot in a blockclique - GetBlockcliqueBlockAtSlot { - /// wanted slot - slot: Slot, - /// response channel - response_tx: oneshot::Sender>, - }, - /// Get a block at a given slot in a blockclique - GetLatestBlockcliqueBlockAtSlot { - /// wanted slot - slot: Slot, - /// response channel - response_tx: oneshot::Sender, - }, - /// Get the best parents and their period - GetBestParents { - /// response channel - response_tx: oneshot::Sender>, - }, - /// Send a block - SendBlock { - /// block id - block_id: BlockId, - /// block slot - slot: Slot, - /// All the objects for the block - block_storage: Storage, - /// response channel - response_tx: oneshot::Sender<()>, - }, - /// Get cliques - GetCliques(oneshot::Sender>), -} - -/// Events that are emitted by consensus. -#[derive(Debug, Clone)] -pub enum ConsensusManagementCommand {} diff --git a/massa-consensus-exports/src/consensus_controller.rs b/massa-consensus-exports/src/consensus_controller.rs deleted file mode 100644 index 1a4397c495c..00000000000 --- a/massa-consensus-exports/src/consensus_controller.rs +++ /dev/null @@ -1,268 +0,0 @@ -//! Copyright (c) 2022 MASSA LABS - -use massa_graph::{BlockGraphExport, BootstrapableGraph}; -use massa_models::api::BlockGraphStatus; -use massa_models::{block::BlockId, slot::Slot}; -use massa_models::{clique::Clique, stats::ConsensusStats}; -use massa_protocol_exports::ProtocolEventReceiver; -use massa_storage::Storage; -use std::collections::VecDeque; - -use tokio::{ - sync::{mpsc, oneshot}, - task::JoinHandle, -}; - -use crate::{ - commands::{ConsensusCommand, ConsensusManagementCommand}, - error::ConsensusResult as Result, - events::ConsensusEvent, - ConsensusError, -}; - -/// Consensus commands sender -/// TODO Make private -#[derive(Clone)] -pub struct ConsensusCommandSender(pub mpsc::Sender); - -impl ConsensusCommandSender { - /// Gets all the available information on the block graph returning a `BlockGraphExport`. - /// - /// # Arguments - /// * `slot_start`: optional slot start for slot-based filtering (included). - /// * `slot_end`: optional slot end for slot-based filtering (excluded). - pub async fn get_block_graph_status( - &self, - slot_start: Option, - slot_end: Option, - ) -> Result { - let (response_tx, response_rx) = oneshot::channel::(); - self.0 - .send(ConsensusCommand::GetBlockGraphStatus { - slot_start, - slot_end, - response_tx, - }) - .await - .map_err(|_| { - ConsensusError::SendChannelError( - "send error consensus command get_block_graph_status".to_string(), - ) - })?; - response_rx.await.map_err(|_| { - ConsensusError::ReceiveChannelError( - "consensus command get_block_graph_status response read error".to_string(), - ) - }) - } - - /// Gets all cliques. - /// - pub async fn get_cliques(&self) -> Result, ConsensusError> { - let (response_tx, response_rx) = oneshot::channel::>(); - self.0 - .send(ConsensusCommand::GetCliques(response_tx)) - .await - .map_err(|_| { - ConsensusError::SendChannelError( - "send error consensus command get_cliques".to_string(), - ) - })?; - response_rx.await.map_err(|_| { - ConsensusError::ReceiveChannelError( - "consensus command get_cliques response read error".to_string(), - ) - }) - } - - /// Gets the graph statuses of a batch of blocks. - /// - /// # Arguments - /// * ids: array of block IDs - pub async fn get_block_statuses( - &self, - ids: &[BlockId], - ) -> Result, ConsensusError> { - let (response_tx, response_rx) = oneshot::channel::>(); - self.0 - .send(ConsensusCommand::GetBlockStatuses { - ids: ids.to_vec(), - response_tx, - }) - .await - .map_err(|_| { - ConsensusError::SendChannelError( - "send error consensus command get_block_statuses".to_string(), - ) - })?; - response_rx.await.map_err(|_| { - ConsensusError::ReceiveChannelError( - "consensus command get_block_statuses response read error".to_string(), - ) - }) - } - - /// get bootstrap snapshot - pub async fn get_bootstrap_state(&self) -> Result { - let (response_tx, mut response_rx) = mpsc::channel::>(10); - self.0 - .send(ConsensusCommand::GetBootstrapState(response_tx)) - .await - .map_err(|_| { - ConsensusError::SendChannelError( - "send error consensus command get_bootstrap_state".into(), - ) - })?; - Ok(*response_rx.recv().await.ok_or_else(|| { - ConsensusError::ReceiveChannelError( - "consensus command get_bootstrap_state response read error".to_string(), - ) - })?) - } - - /// get best parents - pub fn get_best_parents(&self) -> Result, ConsensusError> { - let (response_tx, response_rx) = oneshot::channel::>(); - self.0 - .blocking_send(ConsensusCommand::GetBestParents { response_tx }) - .map_err(|_| { - ConsensusError::SendChannelError( - "send error consensus command get_best_parents".into(), - ) - })?; - response_rx.blocking_recv().map_err(|_| { - ConsensusError::ReceiveChannelError( - "consensus command get_best_parents response read error".to_string(), - ) - }) - } - - /// get block id of a slot in a blockclique - pub fn get_blockclique_block_at_slot( - &self, - slot: Slot, - ) -> Result, ConsensusError> { - let (response_tx, response_rx) = oneshot::channel(); - self.0 - .blocking_send(ConsensusCommand::GetBlockcliqueBlockAtSlot { slot, response_tx }) - .map_err(|_| { - ConsensusError::SendChannelError( - "send error consensus command get_blockclique_block_at_slot".into(), - ) - })?; - response_rx.blocking_recv().map_err(|_| { - ConsensusError::ReceiveChannelError( - "consensus command get_blockclique_block_at_slot response read error".to_string(), - ) - }) - } - - /// get latest block id of a slot in a blockclique - pub fn get_latest_blockclique_block_at_slot( - &self, - slot: Slot, - ) -> Result { - let (response_tx, response_rx) = oneshot::channel(); - self.0 - .blocking_send(ConsensusCommand::GetLatestBlockcliqueBlockAtSlot { slot, response_tx }) - .map_err(|_| { - ConsensusError::SendChannelError( - "send error consensus command get_blockclique_block_at_slot".into(), - ) - })?; - response_rx.blocking_recv().map_err(|_| { - ConsensusError::ReceiveChannelError( - "consensus command get_blockclique_block_at_slot response read error".to_string(), - ) - }) - } - - /// get current consensus stats - pub async fn get_stats(&self) -> Result { - let (response_tx, response_rx) = oneshot::channel(); - self.0 - .send(ConsensusCommand::GetStats(response_tx)) - .await - .map_err(|_| { - ConsensusError::SendChannelError( - "send error consensus command get_stats".to_string(), - ) - })?; - response_rx.await.map_err(|_| { - ConsensusError::ReceiveChannelError( - "consensus command get_stats response read error".to_string(), - ) - }) - } - - ///send block - pub fn send_block( - &self, - block_id: BlockId, - slot: Slot, - block_storage: Storage, - ) -> Result<(), ConsensusError> { - let (response_tx, response_rx) = oneshot::channel(); - self.0 - .blocking_send(ConsensusCommand::SendBlock { - block_id, - slot, - block_storage, - response_tx, - }) - .map_err(|_| { - ConsensusError::SendChannelError("send error consensus command send_block".into()) - })?; - response_rx.blocking_recv().map_err(|_| { - ConsensusError::ReceiveChannelError( - "consensus command send_block response read error".to_string(), - ) - }) - } -} - -/// channel to receive consensus events -pub struct ConsensusEventReceiver(pub mpsc::Receiver); - -impl ConsensusEventReceiver { - /// wait for the next event - pub async fn wait_event(&mut self) -> Result { - self.0 - .recv() - .await - .ok_or(ConsensusError::ControllerEventError) - } - - /// drains remaining events and returns them in a `VecDeque` - /// note: events are sorted from oldest to newest - pub async fn drain(mut self) -> VecDeque { - let mut remaining_events: VecDeque = VecDeque::new(); - - while let Some(evt) = self.0.recv().await { - remaining_events.push_back(evt); - } - remaining_events - } -} - -/// Consensus manager -pub struct ConsensusManager { - /// protocol handler - pub join_handle: JoinHandle>, - /// consensus management sender - pub manager_tx: mpsc::Sender, -} - -impl ConsensusManager { - /// stop consensus - pub async fn stop( - self, - consensus_event_receiver: ConsensusEventReceiver, - ) -> Result { - drop(self.manager_tx); - let _remaining_events = consensus_event_receiver.drain().await; - let protocol_event_receiver = self.join_handle.await??; - - Ok(protocol_event_receiver) - } -} diff --git a/massa-consensus-exports/src/error.rs b/massa-consensus-exports/src/error.rs deleted file mode 100644 index f74321ae503..00000000000 --- a/massa-consensus-exports/src/error.rs +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright (c) 2022 MASSA LABS -use displaydoc::Display; -use massa_execution_exports::ExecutionError; -use massa_graph::error::GraphError; -use massa_models::error::ModelsError; -use massa_protocol_exports::ProtocolError; -use thiserror::Error; - -use crate::events::ConsensusEvent; - -/// Consensus -pub type ConsensusResult = core::result::Result; - -/// Internal error -#[non_exhaustive] -#[derive(Display, Error, Debug)] -pub enum InternalError { - /// transaction error {0} - TransactionError(String), -} - -/// Consensus errors -#[non_exhaustive] -#[derive(Display, Error, Debug)] -pub enum ConsensusError { - /// execution error: {0} - ExecutionError(#[from] ExecutionError), - /// models error: {0} - ModelsError(#[from] ModelsError), - /// configuration error: {0} - ConfigError(String), - /// Protocol error {0} - ProtocolError(#[from] Box), - /// failed retrieving consensus controller event - ControllerEventError, - /// Join error {0} - JoinError(#[from] tokio::task::JoinError), - /// Time error {0} - TimeError(#[from] massa_time::TimeError), - /// there was an inconsistency between containers {0} - ContainerInconsistency(String), - /// Send channel error : {0} - SendChannelError(String), - /// Receive channel error : {0} - ReceiveChannelError(String), - /// io error {0} - IOError(#[from] std::io::Error), - /// missing block {0} - MissingBlock(String), - /// block creation error {0} - BlockCreationError(String), - /// error sending consensus event: {0} - TokioSendError(#[from] tokio::sync::mpsc::error::SendError), - /// channel error: {0} - ChannelError(String), - /// Graph error: {0} - GraphError(#[from] GraphError), - /// slot overflow - SlotOverflowError, - /// `MassaCipher` error: {0} - MassaCipherError(#[from] massa_cipher::CipherError), -} - -impl std::convert::From for ConsensusError { - fn from(err: massa_protocol_exports::ProtocolError) -> Self { - ConsensusError::ProtocolError(Box::new(err)) - } -} diff --git a/massa-consensus-exports/src/events.rs b/massa-consensus-exports/src/events.rs deleted file mode 100644 index e48b4803379..00000000000 --- a/massa-consensus-exports/src/events.rs +++ /dev/null @@ -1,6 +0,0 @@ -/// Events that are emitted by consensus. -#[derive(Debug, Clone)] -pub enum ConsensusEvent { - /// probable desynchronization detected, need re-synchronization - NeedSync, -} diff --git a/massa-consensus-exports/src/lib.rs b/massa-consensus-exports/src/lib.rs deleted file mode 100644 index c25fef460ae..00000000000 --- a/massa-consensus-exports/src/lib.rs +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (c) 2022 MASSA LABS -//! Consensus exports -#![feature(async_closure)] -#![feature(hash_drain_filter)] -#![feature(int_roundings)] -#![warn(missing_docs)] -#![warn(unused_crate_dependencies)] - -pub use consensus_controller::{ConsensusCommandSender, ConsensusEventReceiver, ConsensusManager}; -pub use error::ConsensusError; -pub use settings::ConsensusConfig; - -mod consensus_controller; - -/// consensus errors -pub mod error; - -/// consensus settings -pub mod settings; - -/// consensus commands -pub mod commands; - -/// consensus events -pub mod events; - -/// consensus test tools -#[cfg(feature = "testing")] -pub mod test_exports; diff --git a/massa-consensus-exports/src/settings.rs b/massa-consensus-exports/src/settings.rs deleted file mode 100644 index f581026dfc3..00000000000 --- a/massa-consensus-exports/src/settings.rs +++ /dev/null @@ -1,218 +0,0 @@ -// Copyright (c) 2022 MASSA LABS -#![allow(clippy::assertions_on_constants)] -//! Definition & Implementation of the consensus settings -//! ----------------------------------------------------- -//! -//! # Configurations -//! -//! * `setting`: read from user settings file -//! * `config`: merge of settings and hard-coded configuration that shouldn't be -//! modified by user. -//! -//! This file is allowed to use a lot of constants from `massa-models` as all -//! other files named `settings.rs` or `config.rs`. -//! -//! The `ConsensusSettings` is the most basic and complete configuration in the -//! node. You can get almost every configuration from that one. -//! -//! `From impl *`: -//! - `ConsensusConfig`: Create a configuration merging user settings and hard-coded values -//! (see `/massa-models/node_configuration/*`) -//! -//! `From<&ConsensusConfig> impl *`: -//! - `GraphConfig` -//! - `LedgerConfig` -//! - `ProofOfStakeConfig` -//! -//! > Development note: We clone the values on getting a configuration from another. -//! -//! # Usage of constants -//! -//! The default configuration is loaded from the `massa-models` crate. You shouldn't -//! write an hard-coded value in the following file but create a new value in -//! `default.rs` and the testing default equivalent value in `default_testing.rs`. See -//! `/node_configuration/mod.rs` documentation in `massa-models` sources for more -//! information. -//! -//! # Channels -//! -//! The following file contains the definition of the Channels structures used in -//! the current module. -//! -//! # Testing feature -//! -//! In unit test your allowed to use the `testing` feature flag that will -//! use the default values from `/node_configuration/default_testing.rs` in the -//! `massa-models` crate sources. -use massa_execution_exports::ExecutionController; -use massa_graph::settings::GraphConfig; -use massa_pool_exports::PoolController; -use massa_pos_exports::SelectorController; -use massa_protocol_exports::{ProtocolCommandSender, ProtocolEventReceiver}; -use massa_signature::KeyPair; -use massa_time::MassaTime; -use tokio::sync::mpsc; - -use crate::{ - commands::{ConsensusCommand, ConsensusManagementCommand}, - events::ConsensusEvent, -}; - -/// Consensus full configuration (static + user defined) -#[derive(Debug, Clone)] -pub struct ConsensusConfig { - /// Time in milliseconds when the blockclique started. - pub genesis_timestamp: MassaTime, - /// TESTNET: time when the blockclique is ended. - pub end_timestamp: Option, - /// Number of threads - pub thread_count: u8, - /// Time between the periods in the same thread. - pub t0: MassaTime, - /// `KeyPair` to sign genesis blocks. - pub genesis_key: KeyPair, - /// Maximum number of blocks allowed in discarded blocks. - pub max_discarded_blocks: usize, - /// If a block is `future_block_processing_max_periods` periods in the future, it is just discarded. - pub future_block_processing_max_periods: u64, - /// Maximum number of blocks allowed in `FutureIncomingBlocks`. - pub max_future_processing_blocks: usize, - /// Maximum number of blocks allowed in `DependencyWaitingBlocks`. - pub max_dependency_blocks: usize, - /// Threshold for fitness. - pub delta_f0: u64, - /// Maximum operation validity period count - pub operation_validity_periods: u64, - /// cycle duration in periods - pub periods_per_cycle: u64, - /// stats time span - pub stats_timespan: MassaTime, - /// max event send wait - pub max_send_wait: MassaTime, - /// force keep at least this number of final periods in RAM for each thread - pub force_keep_final_periods: u64, - /// target number of endorsement per block - pub endorsement_count: u32, - /// old blocks are pruned every `block_db_prune_interval` - pub block_db_prune_interval: MassaTime, - /// max number of items returned while querying - pub max_item_return_count: usize, - /// Max gas per block for the execution configuration - pub max_gas_per_block: u64, - /// channel size - pub channel_size: usize, -} - -impl From<&ConsensusConfig> for GraphConfig { - fn from(cfg: &ConsensusConfig) -> Self { - GraphConfig { - thread_count: cfg.thread_count, - genesis_key: cfg.genesis_key.clone(), - max_discarded_blocks: cfg.max_discarded_blocks, - future_block_processing_max_periods: cfg.future_block_processing_max_periods, - max_future_processing_blocks: cfg.max_future_processing_blocks, - max_dependency_blocks: cfg.max_dependency_blocks, - delta_f0: cfg.delta_f0, - operation_validity_periods: cfg.operation_validity_periods, - periods_per_cycle: cfg.periods_per_cycle, - force_keep_final_periods: cfg.force_keep_final_periods, - endorsement_count: cfg.endorsement_count, - max_item_return_count: cfg.max_item_return_count, - } - } -} - -/// Communication asynchronous channels for the consensus worker -/// Contains consensus channels associated (protocol & execution) -/// Contains also controller asynchronous channels (command, manager receivers and event sender) -/// Contains a sender to the pool worker commands -pub struct ConsensusWorkerChannels { - /// Associated protocol command sender. - pub protocol_command_sender: ProtocolCommandSender, - /// Associated protocol event listener. - pub protocol_event_receiver: ProtocolEventReceiver, - /// Execution command sender. - pub execution_controller: Box, - /// Associated Pool command sender. - pub pool_command_sender: Box, - /// Selector controller - pub selector_controller: Box, - /// Channel receiving consensus commands. - pub controller_command_rx: mpsc::Receiver, - /// Channel sending out consensus events. - pub controller_event_tx: mpsc::Sender, - /// Channel receiving consensus management commands. - pub controller_manager_rx: mpsc::Receiver, -} - -/// Public channels associated to the consensus module. -/// Execution & Protocol Sender/Receiver -pub struct ConsensusChannels { - /// outgoing link to execution component - pub execution_controller: Box, - /// outgoing link to protocol component - pub protocol_command_sender: ProtocolCommandSender, - /// incoming link to protocol component - pub protocol_event_receiver: ProtocolEventReceiver, - /// outgoing link to pool component - pub pool_command_sender: Box, - /// selector controller - pub selector_controller: Box, -} - -#[cfg(feature = "testing")] -/// -/// Create the default value of `ConsensusConfig`. -/// -/// Configuration has default values described in crate `massa-models`. -/// The most of `ConsensusConfig` values have in test mode a default value. -/// -/// You can create a `ConsensusConfig` with classic default values and redefining -/// dynamically the values of desired parameters: -/// -/// ```ignore -/// let cfg = ConsensusConfig { -/// max_discarded_blocks: 25, -/// ..Default::default() -/// }; -/// ``` -/// -/// You can also look at the divers `default()` implementation bellow. For example that -/// one is used to initialize the _default paths_ : -/// -/// ```ignore -/// let cfg = ConsensusConfig { -/// max_discarded_blocks: 21, -/// ..ConsensusConfig::default_with_paths(), -/// }; -/// ``` -/// -impl Default for ConsensusConfig { - fn default() -> Self { - use massa_models::config::*; - Self { - // reset genesis timestamp because we are in test mode that can take a while to process - genesis_timestamp: MassaTime::now(0) - .expect("Impossible to reset the timestamp in test"), - end_timestamp: *END_TIMESTAMP, - thread_count: THREAD_COUNT, - t0: T0, - genesis_key: GENESIS_KEY.clone(), - max_discarded_blocks: 100, - future_block_processing_max_periods: 2, - max_future_processing_blocks: 10, - max_dependency_blocks: 100, - delta_f0: DELTA_F0, - operation_validity_periods: OPERATION_VALIDITY_PERIODS, - periods_per_cycle: PERIODS_PER_CYCLE, - stats_timespan: MassaTime::from_millis(1000), - max_send_wait: MassaTime::from_millis(1000), - force_keep_final_periods: 20, - endorsement_count: ENDORSEMENT_COUNT, - block_db_prune_interval: MassaTime::from_millis(1000), - max_item_return_count: 100, - max_gas_per_block: MAX_GAS_PER_BLOCK, - channel_size: CHANNEL_SIZE, - } - } -} diff --git a/massa-consensus-exports/src/test_exports/mock.rs b/massa-consensus-exports/src/test_exports/mock.rs deleted file mode 100644 index 8be461ec5cc..00000000000 --- a/massa-consensus-exports/src/test_exports/mock.rs +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright (c) 2022 MASSA LABS - -use massa_models::config::CHANNEL_SIZE; -use massa_time::MassaTime; -use tokio::{sync::mpsc, time::sleep}; - -use crate::{ - commands::ConsensusCommand, events::ConsensusEvent, ConsensusCommandSender, - ConsensusEventReceiver, -}; - -/// Mock for the consensus controller. -/// We will receive the commands in this mock and accept callback functions depending of the command in `wait_command`. -/// We will also send the events that can be received by listening to the `ConsensusEventReceiver`. -pub struct MockConsensusController { - /// Command receiver - pub consensus_command_rx: mpsc::Receiver, - _consensus_event_tx: mpsc::Sender, -} - -impl MockConsensusController { - /// Create a new mock consensus controller. - pub fn new_with_receiver() -> (Self, ConsensusCommandSender, ConsensusEventReceiver) { - let (consensus_command_tx, consensus_command_rx) = - mpsc::channel::(CHANNEL_SIZE); - let (consensus_event_tx, consensus_event_rx) = - mpsc::channel::(CHANNEL_SIZE); - ( - MockConsensusController { - consensus_command_rx, - _consensus_event_tx: consensus_event_tx, - }, - ConsensusCommandSender(consensus_command_tx), - ConsensusEventReceiver(consensus_event_rx), - ) - } - - /// wait command - pub async fn wait_command(&mut self, timeout: MassaTime, filter_map: F) -> Option - where - F: Fn(ConsensusCommand) -> Option, - { - let timer = sleep(timeout.into()); - tokio::pin!(timer); - loop { - tokio::select! { - cmd_opt = self.consensus_command_rx.recv() => match cmd_opt { - Some(orig_cmd) => if let Some(res_cmd) = filter_map(orig_cmd) { return Some(res_cmd); }, - None => panic!("Unexpected closure of network command channel."), - }, - _ = &mut timer => return None - } - } - } -} diff --git a/massa-consensus-exports/src/test_exports/mod.rs b/massa-consensus-exports/src/test_exports/mod.rs deleted file mode 100644 index a2f80855a21..00000000000 --- a/massa-consensus-exports/src/test_exports/mod.rs +++ /dev/null @@ -1,8 +0,0 @@ -//! Copyright (c) 2022 MASSA LABS - -/// Mock of the execution module -mod mock; -/// Tooling to make test using a consensus -mod tools; -pub use mock::*; -pub use tools::*; diff --git a/massa-consensus-exports/src/test_exports/tools.rs b/massa-consensus-exports/src/test_exports/tools.rs deleted file mode 100644 index d7d6861ced3..00000000000 --- a/massa-consensus-exports/src/test_exports/tools.rs +++ /dev/null @@ -1,70 +0,0 @@ -use std::collections::HashMap; - -use massa_cipher::encrypt; -use massa_models::{ - address::Address, - ledger_models::LedgerData, - rolls::{RollCounts, RollUpdate, RollUpdates}, -}; -use massa_signature::KeyPair; -use tempfile::NamedTempFile; - -/// Password used for encryption in tests -pub const TEST_PASSWORD: &str = "PASSWORD"; - -/// generate a named temporary JSON ledger file -pub fn generate_ledger_file(ledger_vec: &HashMap) -> NamedTempFile { - use std::io::prelude::*; - let ledger_file_named = NamedTempFile::new().expect("cannot create temp file"); - serde_json::to_writer_pretty(ledger_file_named.as_file(), &ledger_vec) - .expect("unable to write ledger file"); - ledger_file_named - .as_file() - .seek(std::io::SeekFrom::Start(0)) - .expect("could not seek file"); - ledger_file_named -} - -/// generate staking key temp file from array of keypair -pub fn generate_staking_keys_file(staking_keys: &[KeyPair]) -> NamedTempFile { - use std::io::prelude::*; - let file_named = NamedTempFile::new().expect("cannot create temp file"); - let json = serde_json::to_string(&staking_keys).expect("json serialization failed"); - let encrypted_data = encrypt(TEST_PASSWORD, json.as_bytes()).expect("encryption failed"); - std::fs::write(file_named.as_ref(), encrypted_data).expect("data writing failed"); - file_named - .as_file() - .seek(std::io::SeekFrom::Start(0)) - .expect("could not seek file"); - file_named -} - -/// generate a named temporary JSON initial rolls file -pub fn generate_roll_counts_file(roll_counts: &RollCounts) -> NamedTempFile { - use std::io::prelude::*; - let roll_counts_file_named = NamedTempFile::new().expect("cannot create temp file"); - serde_json::to_writer_pretty(roll_counts_file_named.as_file(), &roll_counts.0) - .expect("unable to write ledger file"); - roll_counts_file_named - .as_file() - .seek(std::io::SeekFrom::Start(0)) - .expect("could not seek file"); - roll_counts_file_named -} - -/// generate a default named temporary JSON initial rolls file, -/// assuming two threads. -pub fn generate_default_roll_counts_file(stakers: Vec) -> NamedTempFile { - let mut roll_counts = RollCounts::default(); - for key in stakers.iter() { - let address = Address::from_public_key(&key.get_public_key()); - let update = RollUpdate { - roll_purchases: 1, - roll_sales: 0, - }; - let mut updates = RollUpdates::default(); - updates.apply(&address, &update).unwrap(); - roll_counts.apply_updates(&updates).unwrap(); - } - generate_roll_counts_file(&roll_counts) -} diff --git a/massa-consensus-worker/Cargo.toml b/massa-consensus-worker/Cargo.toml deleted file mode 100644 index 679d1ae75b8..00000000000 --- a/massa-consensus-worker/Cargo.toml +++ /dev/null @@ -1,55 +0,0 @@ -[package] -name = "massa_consensus_worker" -version = "0.1.0" -authors = ["Massa Labs "] -edition = "2021" - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -serde_json = "1.0" -tokio = { version = "1.21", features = ["full"] } -tracing = "0.1" -# custom modules -massa_consensus_exports = { path = "../massa-consensus-exports" } -massa_graph = { path = "../massa-graph" } -massa_logging = { path = "../massa-logging" } -massa_models = { path = "../massa-models" } -massa_storage = { path = "../massa-storage" } -massa_protocol_exports = { path = "../massa-protocol-exports" } -massa_time = { path = "../massa-time" } - -[dev-dependencies] -serial_test = "0.9" -#tempfile = "3.3" -parking_lot = { version = "0.12", features = ["deadlock_detection"] } -massa_models = { path = "../massa-models", features = ["testing"] } -massa_execution_exports = { path = "../massa-execution-exports", features = [ - "testing", -] } -massa_consensus_exports = { path = "../massa-consensus-exports", features = [ - "testing", -] } -massa_pos_exports = { path = "../massa-pos-exports", features = ["testing"]} -massa_pos_worker = { path = "../massa-pos-worker" } -massa_pool_exports = { path = "../massa-pool-exports" } -massa_serialization = { path = "../massa-serialization"} -massa_hash = { path = "../massa-hash" } -massa_signature = { path = "../massa-signature" } -massa_cipher = { path = "../massa-cipher" } -massa_storage = { path = "../massa-storage" } -#num = { version = "0.4", features = ["serde"] } -#rand = "0.8" -#futures = "0.3" - - -# for more information on what are the following features used for, see the cargo.toml at workspace level -[features] -sandbox = ["massa_consensus_exports/sandbox", "massa_protocol_exports/sandbox" ] -testing = [ - "massa_consensus_exports/testing", - "massa_execution_exports/testing", - "massa_models/testing", - "massa_pool_exports/testing", - "massa_protocol_exports/testing" -] \ No newline at end of file diff --git a/massa-consensus-worker/src/consensus_worker.rs b/massa-consensus-worker/src/consensus_worker.rs deleted file mode 100644 index 7d65a106b2f..00000000000 --- a/massa-consensus-worker/src/consensus_worker.rs +++ /dev/null @@ -1,785 +0,0 @@ -// Copyright (c) 2022 MASSA LABS -use massa_consensus_exports::{ - commands::ConsensusCommand, - error::{ConsensusError, ConsensusResult as Result}, - settings::ConsensusWorkerChannels, - ConsensusConfig, -}; -use massa_graph::{BlockGraph, BlockGraphExport}; -use massa_models::timeslots::{get_block_slot_timestamp, get_latest_block_slot_at_timestamp}; -use massa_models::{address::Address, block::BlockId, slot::Slot}; -use massa_models::{block::WrappedHeader, prehash::PreHashMap}; -use massa_models::{prehash::PreHashSet, stats::ConsensusStats}; -use massa_protocol_exports::{ProtocolEvent, ProtocolEventReceiver}; -use massa_storage::Storage; -use massa_time::MassaTime; -use std::{ - cmp::max, - collections::{HashMap, VecDeque}, -}; -use tokio::time::{sleep, sleep_until, Sleep}; -use tracing::{info, warn}; - -#[cfg(not(feature = "sandbox"))] -use massa_consensus_exports::events::ConsensusEvent; -#[cfg(not(feature = "sandbox"))] -use tokio::sync::mpsc::error::SendTimeoutError; -#[cfg(not(feature = "sandbox"))] -use tracing::debug; - -/// Manages consensus. -pub struct ConsensusWorker { - /// Consensus Configuration - cfg: ConsensusConfig, - /// Associated channels, sender and receivers - channels: ConsensusWorkerChannels, - /// Database containing all information about blocks, the `BlockGraph` and cliques. - block_db: BlockGraph, - /// Previous slot. - previous_slot: Option, - /// Next slot - next_slot: Slot, - /// blocks we want - wishlist: PreHashMap>, - /// latest final periods - latest_final_periods: Vec, - /// clock compensation - clock_compensation: i64, - /// Final block stats `(time, creator, is_from_protocol)` - final_block_stats: VecDeque<(MassaTime, Address, bool)>, - /// Blocks that come from protocol used for stats and ids are removed when inserted in `final_block_stats` - protocol_blocks: VecDeque<(MassaTime, BlockId)>, - /// Stale block timestamp - stale_block_stats: VecDeque, - /// the time span considered for stats - stats_history_timespan: MassaTime, - /// the time span considered for desynchronization detection - #[allow(dead_code)] - stats_desync_detection_timespan: MassaTime, - /// time at which the node was launched (used for desynchronization detection) - launch_time: MassaTime, - /// previous blockclique notified to Execution - prev_blockclique: PreHashMap, -} - -impl ConsensusWorker { - /// Creates a new consensus controller. - /// Initiates the random selector. - /// - /// # Arguments - /// * `cfg`: consensus configuration. - /// * `protocol_command_sender`: associated protocol controller - /// * `block_db`: Database containing all information about blocks, the blockgraph and cliques. - /// * `controller_command_rx`: Channel receiving consensus commands. - /// * `controller_event_tx`: Channel sending out consensus events. - /// * `controller_manager_rx`: Channel receiving consensus management commands. - pub(crate) async fn new( - cfg: ConsensusConfig, - channels: ConsensusWorkerChannels, - block_db: BlockGraph, - clock_compensation: i64, - ) -> Result { - let now = MassaTime::now(clock_compensation)?; - let previous_slot = get_latest_block_slot_at_timestamp( - cfg.thread_count, - cfg.t0, - cfg.genesis_timestamp, - now, - )?; - let next_slot = previous_slot.map_or(Ok(Slot::new(0u64, 0u8)), |s| { - s.get_next_slot(cfg.thread_count) - })?; - let latest_final_periods: Vec = block_db - .get_latest_final_blocks_periods() - .iter() - .map(|(_block_id, period)| *period) - .collect(); - info!( - "Started node at time {}, cycle {}, period {}, thread {}", - now.to_utc_string(), - next_slot.get_cycle(cfg.periods_per_cycle), - next_slot.period, - next_slot.thread, - ); - if cfg.genesis_timestamp > now { - let (days, hours, mins, secs) = cfg - .genesis_timestamp - .saturating_sub(now) - .days_hours_mins_secs()?; - info!( - "{} days, {} hours, {} minutes, {} seconds remaining to genesis", - days, hours, mins, secs, - ) - } - massa_trace!("consensus.consensus_worker.new", {}); - - // desync detection timespan - let stats_desync_detection_timespan = cfg.t0.checked_mul(cfg.periods_per_cycle * 2)?; - - // Notify execution module of current blockclique and all final blocks. - // we need to do this because the bootstrap snapshots of the executor vs the consensus may not have been taken in sync - // because the two modules run concurrently and out of sync. - let mut block_storage: PreHashMap = Default::default(); - let notify_finals: HashMap = block_db - .get_all_final_blocks() - .into_iter() - .map(|(b_id, slot)| { - let (_a_block, storage) = block_db - .get_active_block(&b_id) - .expect("active block missing from block_db"); - block_storage.insert(b_id, storage.clone()); - (slot, b_id) - }) - .collect(); - let notify_blockclique: HashMap = block_db - .get_blockclique() - .iter() - .map(|b_id| { - let (a_block, storage) = block_db - .get_active_block(b_id) - .expect("active block missing from block_db"); - let slot = a_block.slot; - block_storage.insert(*b_id, storage.clone()); - (slot, *b_id) - }) - .collect(); - let prev_blockclique: PreHashMap = - notify_blockclique.iter().map(|(k, v)| (*v, *k)).collect(); - channels.execution_controller.update_blockclique_status( - notify_finals, - Some(notify_blockclique), - block_storage, - ); - - Ok(ConsensusWorker { - block_db, - previous_slot, - next_slot, - wishlist: Default::default(), - latest_final_periods, - clock_compensation, - channels, - final_block_stats: Default::default(), - protocol_blocks: Default::default(), - stale_block_stats: VecDeque::new(), - stats_desync_detection_timespan, - stats_history_timespan: max(stats_desync_detection_timespan, cfg.stats_timespan), - cfg, - launch_time: MassaTime::now(clock_compensation)?, - prev_blockclique, - }) - } - - /// Consensus work is managed here. - /// It's mostly a tokio::select within a loop. - pub async fn run_loop(mut self) -> Result { - // signal initial state to pool - self.channels - .pool_command_sender - .notify_final_cs_periods(&self.latest_final_periods); - - // set slot timer - let slot_deadline = get_block_slot_timestamp( - self.cfg.thread_count, - self.cfg.t0, - self.cfg.genesis_timestamp, - self.next_slot, - )? - .estimate_instant(self.clock_compensation)?; - let next_slot_timer = sleep_until(tokio::time::Instant::from(slot_deadline)); - - tokio::pin!(next_slot_timer); - - // set prune timer - let prune_timer = sleep(self.cfg.block_db_prune_interval.to_duration()); - tokio::pin!(prune_timer); - - loop { - massa_trace!("consensus.consensus_worker.run_loop.select", {}); - /* - select! without the "biased" modifier will randomly select the 1st branch to check, - then will check the next ones in the order they are written. - We choose this order: - * manager commands: low freq, avoid having to wait to stop - * consensus commands (low to medium freq): respond quickly - * slot timer (low freq, timing is important but does not have to be perfect either) - * prune timer: low freq, timing not important but should not wait too long - * receive protocol events (high freq) - */ - tokio::select! { - // listen to manager commands - cmd = self.channels.controller_manager_rx.recv() => { - massa_trace!("consensus.consensus_worker.run_loop.select.manager", {}); - match cmd { - None => break, - Some(_) => {} - }} - - // listen consensus commands - Some(cmd) = self.channels.controller_command_rx.recv() => { - massa_trace!("consensus.consensus_worker.run_loop.consensus_command", {}); - self.process_consensus_command(cmd).await? - }, - - // slot timer - _ = &mut next_slot_timer => { - massa_trace!("consensus.consensus_worker.run_loop.select.slot_tick", {}); - if let Some(end) = self.cfg.end_timestamp { - if MassaTime::now(self.clock_compensation)? > end { - info!("This episode has come to an end, please get the latest testnet node version to continue"); - break; - } - } - self.slot_tick(&mut next_slot_timer).await?; - }, - - // prune timer - _ = &mut prune_timer=> { - massa_trace!("consensus.consensus_worker.run_loop.prune_timer", {}); - // prune block db - let _discarded_final_blocks = self.block_db.prune()?; - - // reset timer - prune_timer.set(sleep( self.cfg.block_db_prune_interval.to_duration())) - } - - // receive protocol controller events - evt = self.channels.protocol_event_receiver.wait_event() =>{ - massa_trace!("consensus.consensus_worker.run_loop.select.protocol_event", {}); - match evt { - Ok(event) => self.process_protocol_event(event).await?, - Err(err) => return Err(ConsensusError::ProtocolError(Box::new(err))) - } - }, - } - } - // after this curly brace you can find the end of the loop - Ok(self.channels.protocol_event_receiver) - } - - /// this function is called around every slot tick - /// it checks for cycle increment - /// detects desynchronization - /// produce quite more logs than actual stuff - async fn slot_tick(&mut self, next_slot_timer: &mut std::pin::Pin<&mut Sleep>) -> Result<()> { - let now = MassaTime::now(self.clock_compensation)?; - let observed_slot = get_latest_block_slot_at_timestamp( - self.cfg.thread_count, - self.cfg.t0, - self.cfg.genesis_timestamp, - now, - )?; - - if observed_slot < Some(self.next_slot) { - // reset timer for next slot - let sleep_deadline = get_block_slot_timestamp( - self.cfg.thread_count, - self.cfg.t0, - self.cfg.genesis_timestamp, - self.next_slot, - )? - .estimate_instant(self.clock_compensation)?; - next_slot_timer.set(sleep_until(tokio::time::Instant::from(sleep_deadline))); - return Ok(()); - } - - let observed_slot = observed_slot.unwrap(); // does not panic, checked above - - massa_trace!("consensus.consensus_worker.slot_tick", { - "slot": observed_slot - }); - - let previous_cycle = self - .previous_slot - .map(|s| s.get_cycle(self.cfg.periods_per_cycle)); - let observed_cycle = observed_slot.get_cycle(self.cfg.periods_per_cycle); - if previous_cycle.is_none() { - // first cycle observed - info!("Massa network has started ! 🎉") - } - if previous_cycle < Some(observed_cycle) { - info!("Started cycle {}", observed_cycle); - } - - // check if there are any final blocks is coming from protocol - // if none => we are probably desync - #[cfg(not(feature = "sandbox"))] - if now - > max(self.cfg.genesis_timestamp, self.launch_time) - .saturating_add(self.stats_desync_detection_timespan) - && !self - .final_block_stats - .iter() - .any(|(time, _, is_from_protocol)| { - time > &now.saturating_sub(self.stats_desync_detection_timespan) - && *is_from_protocol - }) - { - warn!("desynchronization detected because the recent final block history is empty or contains only blocks produced by this node"); - let _ = self.send_consensus_event(ConsensusEvent::NeedSync).await; - } - - self.previous_slot = Some(observed_slot); - self.next_slot = observed_slot.get_next_slot(self.cfg.thread_count)?; - - // signal tick to block graph - self.block_db.slot_tick(Some(observed_slot))?; - - // take care of block db changes - self.block_db_changed().await?; - - // reset timer for next slot - let sleep_deadline = get_block_slot_timestamp( - self.cfg.thread_count, - self.cfg.t0, - self.cfg.genesis_timestamp, - self.next_slot, - )? - .estimate_instant(self.clock_compensation)?; - next_slot_timer.set(sleep_until(tokio::time::Instant::from(sleep_deadline))); - - // prune stats - self.prune_stats()?; - - Ok(()) - } - - /// Manages given consensus command. - /// They can come from the API or the bootstrap server - /// Please refactor me - /// - /// # Argument - /// * `cmd`: consensus command to process - async fn process_consensus_command(&mut self, cmd: ConsensusCommand) -> Result<()> { - match cmd { - ConsensusCommand::GetBlockGraphStatus { - slot_start, - slot_end, - response_tx, - } => { - massa_trace!( - "consensus.consensus_worker.process_consensus_command.get_block_graph_status", - {} - ); - if response_tx - .send(BlockGraphExport::extract_from( - &self.block_db, - slot_start, - slot_end, - )?) - .is_err() - { - warn!("consensus: could not send GetBlockGraphStatus answer"); - } - Ok(()) - } - // gets the graph status of a batch of blocks - ConsensusCommand::GetBlockStatuses { ids, response_tx } => { - massa_trace!( - "consensus.consensus_worker.process_consensus_command.get_block_statuses", - {} - ); - let res: Vec<_> = ids - .iter() - .map(|id| self.block_db.get_block_status(id)) - .collect(); - if response_tx.send(res).is_err() { - warn!("consensus: could not send get_block_statuses answer"); - } - Ok(()) - } - ConsensusCommand::GetCliques(response_tx) => { - massa_trace!( - "consensus.consensus_worker.process_consensus_command.get_cliques", - {} - ); - if response_tx.send(self.block_db.get_cliques()).is_err() { - warn!("consensus: could not send GetSelectionDraws response"); - } - Ok(()) - } - ConsensusCommand::GetBootstrapState(response_tx) => { - massa_trace!( - "consensus.consensus_worker.process_consensus_command.get_bootstrap_state", - {} - ); - let resp = self.block_db.export_bootstrap_graph()?; - if response_tx.send(Box::new(resp)).await.is_err() { - warn!("consensus: could not send GetBootstrapState answer"); - } - Ok(()) - } - ConsensusCommand::GetStats(response_tx) => { - massa_trace!( - "consensus.consensus_worker.process_consensus_command.get_stats", - {} - ); - let res = self.get_stats()?; - if response_tx.send(res).is_err() { - warn!("consensus: could not send get_stats response"); - } - Ok(()) - } - ConsensusCommand::GetBestParents { response_tx } => { - if response_tx - .send(self.block_db.get_best_parents().clone()) - .is_err() - { - warn!("consensus: could not send get best parents response"); - } - Ok(()) - } - ConsensusCommand::GetBlockcliqueBlockAtSlot { slot, response_tx } => { - let res = self.block_db.get_blockclique_block_at_slot(&slot); - if response_tx.send(res).is_err() { - warn!("consensus: could not send get block clique block at slot response"); - } - Ok(()) - } - ConsensusCommand::GetLatestBlockcliqueBlockAtSlot { slot, response_tx } => { - let res = self.block_db.get_latest_blockclique_block_at_slot(&slot); - if response_tx.send(res).is_err() { - warn!( - "consensus: could not send get latest block clique block at slot response" - ); - } - Ok(()) - } - ConsensusCommand::SendBlock { - block_id, - slot, - block_storage, - response_tx, - } => { - self.block_db - .incoming_block(block_id, slot, self.previous_slot, block_storage)?; - - if response_tx.send(()).is_err() { - warn!("consensus: could not send get block clique block at slot response"); - } - Ok(()) - } - } - } - - /// retrieve stats - /// Used in response to a API request - fn get_stats(&mut self) -> Result { - let timespan_end = max(self.launch_time, MassaTime::now(self.clock_compensation)?); - let timespan_start = max( - timespan_end.saturating_sub(self.cfg.stats_timespan), - self.launch_time, - ); - let final_block_count = self - .final_block_stats - .iter() - .filter(|(t, _, _)| *t >= timespan_start && *t < timespan_end) - .count() as u64; - let stale_block_count = self - .stale_block_stats - .iter() - .filter(|t| **t >= timespan_start && **t < timespan_end) - .count() as u64; - let clique_count = self.block_db.get_clique_count() as u64; - Ok(ConsensusStats { - final_block_count, - stale_block_count, - clique_count, - start_timespan: timespan_start, - end_timespan: timespan_end, - }) - } - - /// Manages received protocol events. - /// - /// # Arguments - /// * `event`: event type to process. - async fn process_protocol_event(&mut self, event: ProtocolEvent) -> Result<()> { - match event { - ProtocolEvent::ReceivedBlock { - block_id, - slot, - storage, - } => { - massa_trace!( - "consensus.consensus_worker.process_protocol_event.received_block", - { "block_id": block_id } - ); - self.block_db - .incoming_block(block_id, slot, self.previous_slot, storage)?; - let now = MassaTime::now(self.clock_compensation)?; - self.protocol_blocks.push_back((now, block_id)); - self.block_db_changed().await?; - } - ProtocolEvent::ReceivedBlockHeader { block_id, header } => { - massa_trace!("consensus.consensus_worker.process_protocol_event.received_header", { "block_id": block_id, "header": header }); - self.block_db - .incoming_header(block_id, header, self.previous_slot)?; - self.block_db_changed().await?; - } - ProtocolEvent::InvalidBlock { block_id, header } => { - massa_trace!( - "consensus.consensus_worker.process_protocol_event.invalid_block", - { "block_id": block_id } - ); - self.block_db.invalid_block(&block_id, header)?; - // Say it to consensus - } - } - Ok(()) - } - - /// prune statistics according to the stats span - fn prune_stats(&mut self) -> Result<()> { - let start_time = - MassaTime::now(self.clock_compensation)?.saturating_sub(self.stats_history_timespan); - while let Some((t, _, _)) = self.final_block_stats.front() { - if t < &start_time { - self.final_block_stats.pop_front(); - } else { - break; - } - } - while let Some(t) = self.stale_block_stats.front() { - if t < &start_time { - self.stale_block_stats.pop_front(); - } else { - break; - } - } - while let Some((t, _)) = self.protocol_blocks.front() { - if t < &start_time { - self.protocol_blocks.pop_front(); - } else { - break; - } - } - Ok(()) - } - - /// Notify execution about blockclique changes and finalized blocks. - fn notify_execution(&mut self, finalized_blocks: HashMap) { - // List new block storage instances that Execution doesn't know about. - // That's blocks that have not been sent to execution before, ie. in the previous blockclique). - let mut new_blocks_storage: PreHashMap = finalized_blocks - .iter() - .filter_map(|(_slot, b_id)| { - if self.prev_blockclique.contains_key(b_id) { - // was previously sent as a blockclique element - return None; - } - let (_a_block, storage) = self - .block_db - .get_active_block(b_id) - .expect("final block not found in active blocks"); - Some((*b_id, storage.clone())) - }) - .collect(); - - // Get new blockclique block list with slots. - let mut blockclique_changed = false; - let new_blockclique: PreHashMap = self - .block_db - .get_blockclique() - .iter() - .map(|b_id| { - if let Some(slot) = self.prev_blockclique.remove(b_id) { - // The block was already sent in the previous blockclique: - // the slot can be gathered from there without locking Storage. - // Note: the block is removed from self.prev_blockclique. - (*b_id, slot) - } else { - // The block was not present in the previous blockclique: - // the blockclique has changed => get the block's slot by querying Storage. - blockclique_changed = true; - let (a_block, storage) = self - .block_db - .get_active_block(b_id) - .expect("blockclique block not found in active blocks"); - new_blocks_storage.insert(*b_id, storage.clone()); - (*b_id, a_block.slot) - } - }) - .collect(); - if !self.prev_blockclique.is_empty() { - // All elements present in the new blockclique have been removed from `prev_blockclique` above. - // If `prev_blockclique` is not empty here, it means that it contained elements that are not in the new blockclique anymore. - // In that case, we mark the blockclique as having changed. - blockclique_changed = true; - } - // Overwrite previous blockclique. - // Should still be done even if unchanged because elements were removed from it above. - self.prev_blockclique = new_blockclique.clone(); - - if finalized_blocks.is_empty() && !blockclique_changed { - // There are no changes (neither block finalizations not blockclique changes) to send to execution. - return; - } - - // Notify execution of block finalizations and blockclique changes - self.channels - .execution_controller - .update_blockclique_status( - finalized_blocks, - if blockclique_changed { - Some(new_blockclique.into_iter().map(|(k, v)| (v, k)).collect()) - } else { - None - }, - new_blocks_storage, - ); - } - - /// call me if the block database changed - /// Processing of final blocks, pruning. - /// - /// 1. propagate blocks - /// 2. Notify of attack attempts - /// 3. get new final blocks - /// 4. get blockclique - /// 5. notify Execution - /// 6. Process new final blocks - /// 7. Notify pool of new final ops - /// 8. Notify PoS of final blocks - /// 9. notify protocol of block wish list - /// 10. note new latest final periods (prune graph if changed) - /// 11. add stale blocks to stats - async fn block_db_changed(&mut self) -> Result<()> { - massa_trace!("consensus.consensus_worker.block_db_changed", {}); - - // Propagate new blocks - for (block_id, storage) in self.block_db.get_blocks_to_propagate().into_iter() { - massa_trace!("consensus.consensus_worker.block_db_changed.integrated", { - "block_id": block_id - }); - self.channels - .protocol_command_sender - .integrated_block(block_id, storage)?; - } - - // Notify protocol of attack attempts. - for hash in self.block_db.get_attack_attempts().into_iter() { - self.channels - .protocol_command_sender - .notify_block_attack(hash)?; - massa_trace!("consensus.consensus_worker.block_db_changed.attack", { - "hash": hash - }); - } - - // manage finalized blocks - let timestamp = MassaTime::now(self.clock_compensation)?; - let finalized_blocks = self.block_db.get_new_final_blocks(); - let mut final_block_slots = HashMap::with_capacity(finalized_blocks.len()); - for b_id in finalized_blocks { - if let Some((a_block, _block_store)) = self.block_db.get_active_block(&b_id) { - // add to final blocks to notify execution - final_block_slots.insert(a_block.slot, b_id); - - // add to stats - let block_is_from_protocol = self - .protocol_blocks - .iter() - .any(|(_, block_id)| block_id == &b_id); - self.final_block_stats.push_back(( - timestamp, - a_block.creator_address, - block_is_from_protocol, - )); - } - } - - // notify execution - self.notify_execution(final_block_slots); - - // notify protocol of block wishlist - let new_wishlist = self.block_db.get_block_wishlist()?; - let new_blocks: PreHashMap> = new_wishlist - .iter() - .filter_map(|(id, header)| { - if !self.wishlist.contains_key(id) { - Some((*id, header.clone())) - } else { - None - } - }) - .collect(); - let remove_blocks: PreHashSet = self - .wishlist - .iter() - .filter_map(|(id, _)| { - if !new_wishlist.contains_key(id) { - Some(*id) - } else { - None - } - }) - .collect(); - if !new_blocks.is_empty() || !remove_blocks.is_empty() { - massa_trace!("consensus.consensus_worker.block_db_changed.send_wishlist_delta", { "new": new_wishlist, "remove": remove_blocks }); - self.channels - .protocol_command_sender - .send_wishlist_delta(new_blocks, remove_blocks)?; - self.wishlist = new_wishlist; - } - - // note new latest final periods - let latest_final_periods: Vec = self - .block_db - .get_latest_final_blocks_periods() - .iter() - .map(|(_block_id, period)| *period) - .collect(); - // if changed... - if self.latest_final_periods != latest_final_periods { - // signal new last final periods to pool - self.channels - .pool_command_sender - .notify_final_cs_periods(&latest_final_periods); - // update final periods - self.latest_final_periods = latest_final_periods; - } - - // add stale blocks to stats - let new_stale_block_ids_creators_slots = self.block_db.get_new_stale_blocks(); - let timestamp = MassaTime::now(self.clock_compensation)?; - for (_b_id, (_b_creator, _b_slot)) in new_stale_block_ids_creators_slots.into_iter() { - self.stale_block_stats.push_back(timestamp); - - /* - TODO add this again - let creator_addr = Address::from_public_key(&b_creator); - if self.staking_keys.contains_key(&creator_addr) { - warn!("block {} that was produced by our address {} at slot {} became stale. This is probably due to a temporary desynchronization.", b_id, creator_addr, b_slot); - } - */ - } - - Ok(()) - } - - /// Channel management stuff - /// todo delete - /// or at least introduce some generic - #[cfg(not(feature = "sandbox"))] - async fn send_consensus_event(&self, event: ConsensusEvent) -> Result<()> { - let result = self - .channels - .controller_event_tx - .send_timeout(event, self.cfg.max_send_wait.to_duration()) - .await; - match result { - Ok(()) => return Ok(()), - Err(SendTimeoutError::Closed(event)) => { - debug!( - "failed to send ConsensusEvent due to channel closure: {:?}", - event - ); - } - Err(SendTimeoutError::Timeout(event)) => { - debug!("failed to send ConsensusEvent due to timeout: {:?}", event); - } - } - Err(ConsensusError::ChannelError("failed to send event".into())) - } -} diff --git a/massa-consensus-worker/src/lib.rs b/massa-consensus-worker/src/lib.rs deleted file mode 100644 index 5d590c1d394..00000000000 --- a/massa-consensus-worker/src/lib.rs +++ /dev/null @@ -1,16 +0,0 @@ -//! Copyright (c) 2022 MASSA LABS - -#![feature(async_closure)] -#![feature(hash_drain_filter)] -#![feature(int_roundings)] -#![warn(missing_docs)] -#![warn(unused_crate_dependencies)] -#[macro_use] -extern crate massa_logging; - -mod consensus_worker; -mod tools; -pub use tools::start_consensus_controller; - -#[cfg(test)] -mod tests; diff --git a/massa-consensus-worker/src/tests/block_factory.rs b/massa-consensus-worker/src/tests/block_factory.rs deleted file mode 100644 index 0a415dde219..00000000000 --- a/massa-consensus-worker/src/tests/block_factory.rs +++ /dev/null @@ -1,139 +0,0 @@ -// Copyright (c) 2022 MASSA LABS - -//! This is a factory that can be used in consensus test -//! but at it was introduced quite late in the development process -//! it has only be used in scenarios basic - -use super::tools::{validate_notpropagate_block, validate_propagate_block}; -use massa_hash::Hash; -use massa_models::{ - block::{Block, BlockHeader, BlockHeaderSerializer, BlockId, BlockSerializer, WrappedBlock}, - endorsement::WrappedEndorsement, - operation::WrappedOperation, - slot::Slot, - wrapped::{Id, WrappedContent}, -}; -use massa_protocol_exports::test_exports::MockProtocolController; -use massa_signature::KeyPair; -use massa_storage::Storage; - -pub struct BlockFactory { - pub best_parents: Vec, - pub creator_keypair: KeyPair, - pub slot: Slot, - pub endorsements: Vec, - pub operations: Vec, - pub protocol_controller: MockProtocolController, -} - -impl BlockFactory { - pub fn start_block_factory( - genesis: Vec, - protocol_controller: MockProtocolController, - ) -> BlockFactory { - BlockFactory { - best_parents: genesis, - creator_keypair: KeyPair::generate(), - slot: Slot::new(1, 0), - endorsements: Vec::new(), - operations: Vec::new(), - protocol_controller, - } - } - - pub async fn create_and_receive_block(&mut self, valid: bool) -> WrappedBlock { - let header = BlockHeader::new_wrapped( - BlockHeader { - slot: self.slot, - parents: self.best_parents.clone(), - operation_merkle_root: Hash::compute_from( - &self - .operations - .iter() - .flat_map(|op| op.id.get_hash().into_bytes()) - .collect::>()[..], - ), - endorsements: self.endorsements.clone(), - }, - BlockHeaderSerializer::new(), - &self.creator_keypair, - ) - .unwrap(); - - let block = Block::new_wrapped( - Block { - header, - operations: self - .operations - .clone() - .into_iter() - .map(|op| op.id) - .collect(), - }, - BlockSerializer::new(), - &self.creator_keypair, - ) - .unwrap(); - - let mut storage = Storage::create_root(); - let id = block.id; - let slot = block.content.header.content.slot; - storage.store_block(block.clone()); - - self.protocol_controller - .receive_block(id, slot, storage) - .await; - if valid { - // Assert that the block is propagated. - validate_propagate_block(&mut self.protocol_controller, id, 2000).await; - } else { - // Assert that the the block is not propagated. - validate_notpropagate_block(&mut self.protocol_controller, id, 500).await; - } - block - } - - pub fn sign_header(&self, header: BlockHeader) -> WrappedBlock { - let header = - BlockHeader::new_wrapped(header, BlockHeaderSerializer::new(), &self.creator_keypair) - .unwrap(); - - Block::new_wrapped( - Block { - header, - operations: self - .operations - .clone() - .into_iter() - .map(|op| op.id) - .collect(), - }, - BlockSerializer::new(), - &self.creator_keypair, - ) - .unwrap() - } - - pub async fn receive_block( - &mut self, - valid: bool, - block_id: BlockId, - slot: Slot, - storage: Storage, - ) { - self.protocol_controller - .receive_block(block_id, slot, storage) - .await; - if valid { - // Assert that the block is propagated. - validate_propagate_block(&mut self.protocol_controller, block_id, 2000).await; - } else { - // Assert that the the block is not propagated. - validate_notpropagate_block(&mut self.protocol_controller, block_id, 500).await; - } - } - - pub fn take_protocol_controller(self) -> MockProtocolController { - self.protocol_controller - } -} diff --git a/massa-consensus-worker/src/tests/inter_cycle_batch_finalization.rs b/massa-consensus-worker/src/tests/inter_cycle_batch_finalization.rs deleted file mode 100644 index d9b7da9b817..00000000000 --- a/massa-consensus-worker/src/tests/inter_cycle_batch_finalization.rs +++ /dev/null @@ -1,199 +0,0 @@ -//! Copyright (c) 2022 MASSA LABS - -use super::tools::*; -use massa_consensus_exports::ConsensusConfig; -use massa_models::{block::BlockId, slot::Slot}; -use massa_signature::KeyPair; -use massa_time::MassaTime; -use serial_test::serial; -use std::{collections::HashSet, str::FromStr}; - -/// # Context -/// -/// Regression test for `https://github.com/massalabs/massa/pull/2433` -/// -/// When we have the following block sequence -/// ``` -/// 1 thread, periods_per_cycle = 2, delta_f0 = 1, 1 endorsement per block -/// -/// cycle 0 | cycle 1 | cycle 2 -/// G - B1 - B2 - B3 - B4 -/// where G is the genesis block -/// and B4 contains a roll sell operation -/// ``` -/// -/// And the block `B1` is received AFTER `B4`, blocks will be processed recursively: -/// ``` -/// * B1 is received and included -/// * B2 is processed -/// * B1 becomes final in the graph -/// * B3 is processed -/// * B2 becomes final in the graph -/// * B4 is processed -/// * B3 becomes final in the graph -/// * PoS is told about all finalized blocks -/// ``` -/// -/// The problem we had is that in order to check rolls to verify `B4`'s roll sell, -/// the final roll registry was assumed to be attached to the last final block known by the graph, -/// but that was inaccurate because PoS was the one holding the final roll registry, -/// and PoS was not yet aware of the blocks that finalized during recursion, -/// so it was actually still attached to G when `B4` was checked. -/// -/// The correction involved taking the point of view of PoS on where the final roll registry is attached. -/// This test ensures non-regression by making sure `B4` is propagated when `B1` is received. -#[tokio::test(flavor = "multi_thread", worker_threads = 2)] -#[serial] -async fn test_inter_cycle_batch_finalization() { - let t0: MassaTime = 1000.into(); - let staking_key = - KeyPair::from_str("S1UxdCJv5ckDK8z87E5Jq5fEfSVLi2cTHgtpfZy7iURs3KpPns8").unwrap(); - let warmup_time: MassaTime = 1000.into(); - let margin_time: MassaTime = 300.into(); - let cfg = ConsensusConfig { - periods_per_cycle: 2, - delta_f0: 1, - thread_count: 1, - endorsement_count: 1, - max_future_processing_blocks: 10, - max_dependency_blocks: 10, - future_block_processing_max_periods: 10, - t0, - genesis_timestamp: MassaTime::now(0).unwrap().saturating_add(warmup_time), - ..ConsensusConfig::default() - }; - - consensus_pool_test_with_storage( - cfg.clone(), - None, - async move |pool_controller, - mut protocol_controller, - consensus_command_sender, - consensus_event_receiver, - mut storage, - selector_controller| { - // wait for consensus warmup time - tokio::time::sleep(warmup_time.to_duration()).await; - - let genesis_blocks: Vec = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .best_parents - .iter() - .map(|(b, _p)| *b) - .collect(); - - // create B1 but DO NOT SEND IT - tokio::time::sleep(t0.to_duration()).await; - let b1_block = - create_block(&cfg, Slot::new(1, 0), genesis_blocks.clone(), &staking_key); - - // create and send B2 - tokio::time::sleep(t0.to_duration()).await; - let b2_block = create_block_with_operations_and_endorsements( - &cfg, - Slot::new(2, 0), - &vec![b1_block.id], - &staking_key, - vec![], - vec![create_endorsement( - &staking_key, - Slot::new(1, 0), - b1_block.id, - 0, - )], - ); - let b2_block_id = b2_block.id; - let b2_block_slot = b2_block.content.header.content.slot; - storage.store_block(b2_block); - protocol_controller - .receive_block(b2_block_id, b2_block_slot, storage.clone()) - .await; - - // create and send B3 - tokio::time::sleep(t0.to_duration()).await; - let b3_block = create_block_with_operations_and_endorsements( - &cfg, - Slot::new(3, 0), - &vec![b2_block_id], - &staking_key, - vec![], - vec![create_endorsement( - &staking_key, - Slot::new(2, 0), - b2_block_id, - 0, - )], - ); - let b3_block_id = b3_block.id; - let b3_block_slot = b3_block.content.header.content.slot; - storage.store_block(b3_block); - protocol_controller - .receive_block(b3_block_id, b3_block_slot, storage.clone()) - .await; - - // create and send B4 - tokio::time::sleep(t0.to_duration()).await; - let roll_sell = create_roll_sell(&staking_key, 1, 4, 0); - storage.store_operations(vec![roll_sell.clone()]); - let b4_block = create_block_with_operations_and_endorsements( - &cfg, - Slot::new(4, 0), - &vec![b3_block_id], - &staking_key, - vec![roll_sell], - vec![create_endorsement( - &staking_key, - Slot::new(3, 0), - b3_block_id, - 0, - )], - ); - let b4_block_id = b4_block.id; - let b4_block_slot = b4_block.content.header.content.slot; - storage.store_block(b4_block); - protocol_controller - .receive_block(b4_block_id, b4_block_slot, storage.clone()) - .await; - - // wait for the slot after B4 - tokio::time::sleep(t0.saturating_mul(5).to_duration()).await; - - // send B1 - let b1_block_id = b1_block.id; - let b1_block_slot = b1_block.content.header.content.slot; - storage.store_block(b1_block); - protocol_controller - .receive_block(b1_block_id, b1_block_slot, storage.clone()) - .await; - - approve_producer_and_selector_for_staker(&staking_key, &selector_controller); - - // wait for the propagation of B1, B2, B3 and B4 (unordered) - let mut to_propagate: HashSet<_> = - vec![b1_block_id, b2_block_id, b3_block_id, b4_block_id] - .into_iter() - .collect(); - for _ in 0u8..4 { - to_propagate.remove( - &validate_propagate_block_in_list( - &mut protocol_controller, - &to_propagate.clone().into_iter().collect(), - margin_time.to_millis(), - ) - .await, - ); - } - - ( - pool_controller, - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} diff --git a/massa-consensus-worker/src/tests/mod.rs b/massa-consensus-worker/src/tests/mod.rs deleted file mode 100644 index 5d62262b27a..00000000000 --- a/massa-consensus-worker/src/tests/mod.rs +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (c) 2022 MASSA LABS - -mod block_factory; -// mod inter_cycle_batch_finalization; /* TODO repair this test https://github.com/massalabs/massa/issues/3099 -mod scenario_block_creation; -mod scenario_roll; -mod scenarios106; -mod scenarios91_1; -mod scenarios91_2; -mod scenarios_basic; -mod scenarios_endorsements; -mod scenarios_get_operations; -mod scenarios_get_selection_draws; -mod scenarios_header_check; -mod scenarios_incompatibilities; -mod scenarios_note_attack_attempt; -mod scenarios_operations_check; -mod scenarios_parents; -mod scenarios_pool_commands; -mod scenarios_pruning; -mod scenarios_reward_split; -mod scenarios_send_block; -mod scenarios_wishlist; -mod test_block_graph; -pub mod tools; diff --git a/massa-consensus-worker/src/tests/scenario_block_creation.rs b/massa-consensus-worker/src/tests/scenario_block_creation.rs deleted file mode 100644 index 709a78a78a6..00000000000 --- a/massa-consensus-worker/src/tests/scenario_block_creation.rs +++ /dev/null @@ -1,849 +0,0 @@ -// Copyright (c) 2022 MASSA LABS - -use super::tools::random_address_on_thread; -use crate::tests::tools; -use massa_consensus_exports::ConsensusConfig; -use massa_models::ledger_models::LedgerData; -use massa_models::rolls::{RollCounts, RollUpdate, RollUpdates}; -use massa_models::{amount::Amount, slot::Slot}; -use massa_protocol_exports::ProtocolCommand; -use massa_time::MassaTime; -use serial_test::serial; -use std::collections::HashMap; -use tokio::time::sleep_until; - -// #[tokio::test] -// #[serial] -// async fn test_genesis_block_creation() { -// // define addresses use for the test -// // addresses a and b both in thread 0 -// // addr 1 has 1 roll and 0 coins -// // addr 2 is in consensus and has 0 roll and 1000 coins -// let thread_count = 2; -// let (address_1, keypair_1) = random_address_on_thread(0, thread_count).into(); -// let (address_2, keypair_2) = random_address_on_thread(0, thread_count).into(); -// let mut ledger = HashMap::new(); -// ledger.insert( -// address_2, -// LedgerData::new(Amount::from_str("1000").unwrap()), -// ); -// let mut cfg = ConsensusConfig { -// genesis_timestamp: MassaTime::now(0) -// .unwrap() -// .saturating_sub(MassaTime::from(30000)), -// ..ConsensusConfig::default_with_staking_keys_and_ledger(&[keypair_1, keypair_2], &ledger) -// }; -// // init roll count -// let mut roll_counts = RollCounts::default(); -// let update = RollUpdate { -// roll_purchases: 1, -// roll_sales: 0, -// }; -// let mut updates = RollUpdates::default(); -// updates.apply(&address_1, &update).unwrap(); -// roll_counts.apply_updates(&updates).unwrap(); - -// let initial_rolls_file = generate_roll_counts_file(&roll_counts); -// cfg.initial_rolls_path = initial_rolls_file.path().to_path_buf(); - -// tools::consensus_without_pool_test( -// cfg.clone(), -// async move |protocol_controller, -// consensus_command_sender, -// consensus_event_receiver, -// selector_controller| { -// let _genesis_ids = consensus_command_sender -// .get_block_graph_status(None, None) -// .await -// .expect("could not get block graph status") -// .genesis_blocks; - -// ( -// protocol_controller, -// consensus_command_sender, -// consensus_event_receiver, -// selector_controller, -// ) -// }, -// ) -// .await; -// } - -// /// /// See the test removed at https://gitlab.com/massalabs/massa-network/-/merge_requests/381/diffs#a5bee3b1b5cc9d8157b6feee0ac3e775aa457a33_544_539 -// /// -// /// **NOTE: that test is expected to fail 1 / 1000 times** -// /// -// /// -// /// ### Context -// /// ``` -// /// * price per roll = 1000 -// /// * periods per cycle = 30 000 -// /// * t0 = 500ms -// /// * look-back = 2 -// /// * thread count = 2 -// /// * delta f0 = 3 -// /// * genesis timestamp = now - t0 * periods per cycle * 3 - 1000 -// /// * block reward = 0 -// /// * fee = 0 for every operation -// /// * address 1 has 1 roll and 0 coins -// /// * address 2 is in consensus and has 0 roll and 1000 coins -// /// ``` -// /// ### Initialization -// /// Following blocks are sent through a protocol event to consensus right at the beginning. They all have best parents as parents. -// /// * block at slot(1,0) with operation address 2 buys 1 roll -// /// * block at slot( period per cycle, 0) -// /// * block at slot( period per cycle, 1) -// /// * block at slot( period per cycle + 1, 0) -// /// * block at slot( period per cycle + 1, 1) -// /// * block at slot( period per cycle + 2, 0) -// /// * block at slot( period per cycle + 2, 0) -// /// -// /// ### Scenario -// /// -// /// * start consensus -// /// * blocks previously described are sent to consensus through a protocol event -// /// * assert they are propagated -// /// * ```let draws = get_selection_draws( (3*periods_per cycle, 0), (4*periods_per cycle, 0)``` -// /// * assert -// /// ```math -// /// abs(1/2 - \frac{TimesAddr1WasDrawn}{ThreadCount * PeriodsPerCycle}) < 0.01 -// /// ``` -// /// (see [the math](https://en.wikipedia.org/wiki/Checking_whether_a_coin_is_fair)) -// /// * wait for cycle 3 beginning -// /// * for the 10 first slots of cycle 3 -// /// * if address 2 was selected assert consensus created and propagated a block -// /// * if address 1 was selected assert nothing is propagated -// #[tokio::test] -// #[serial] -// //#[ignore] -// async fn test_block_creation_with_draw() { -// let thread_count = 2; -// // define addresses use for the test -// // addresses a and b both in thread 0 -// // addr 1 has 1 roll and 0 coins -// // addr 2 is in consensus and has 0 roll and 1000 coins -// let (address_1, keypair_1) = random_address_on_thread(0, thread_count).into(); -// let (address_2, keypair_2) = random_address_on_thread(0, thread_count).into(); - -// let staking_keys = vec![keypair_1.clone(), keypair_2.clone()]; - -// // init address_2 with 1000 coins -// let mut ledger = HashMap::new(); -// ledger.insert( -// address_2, -// LedgerData::new(Amount::from_str("1000").unwrap()), -// ); - -// // finally create the configuration -// let t0 = MassaTime::from(1000); -// let periods_per_cycle = 1000; -// let mut cfg = ConsensusConfig { -// block_reward: Amount::default(), -// delta_f0: 3, -// max_operations_per_block: 50, -// operation_validity_periods: 100, -// periods_per_cycle, -// roll_price: Amount::from_str("1000").unwrap(), -// t0, -// genesis_timestamp: MassaTime::now(0) -// .unwrap() -// .checked_sub((t0.to_millis() * periods_per_cycle * 3).into()) -// .unwrap() -// .checked_add(2000.into()) -// .unwrap(), -// ..ConsensusConfig::default_with_staking_keys_and_ledger(&staking_keys, &ledger) -// }; - -// // init roll count -// let mut roll_counts = RollCounts::default(); -// let update = RollUpdate { -// roll_purchases: 1, -// roll_sales: 0, -// }; -// let mut updates = RollUpdates::default(); -// updates.apply(&address_1, &update).unwrap(); -// roll_counts.apply_updates(&updates).unwrap(); -// let initial_rolls_file = generate_roll_counts_file(&roll_counts); -// cfg.initial_rolls_path = initial_rolls_file.path().to_path_buf(); - -// let operation_fee = 0; -// tools::consensus_without_pool_with_storage_test( -// cfg.clone(), -// async move |mut storage, -// mut protocol_controller, -// consensus_command_sender, -// consensus_event_receiver, -// selector_controller| { -// let genesis_ids = consensus_command_sender -// .get_block_graph_status(None, None) -// .await -// .expect("could not get block graph status") -// .genesis_blocks; - -// // initial block: addr2 buys 1 roll -// let op1 = create_roll_transaction(&keypair_2, 1, true, 10, operation_fee); -// storage.store_operations(vec![op1.clone()]); -// let block = tools::create_block_with_operations( -// &cfg, -// Slot::new(1, 0), -// &genesis_ids, -// &staking_keys[0], -// vec![op1], -// ); - -// tools::propagate_block(&mut protocol_controller, block.clone(), true, 1000).await; - -// // make cycle 0 final/finished by sending enough blocks in each thread in cycle 1 -// // note that blocks in cycle 3 may be created during this, so make sure that their clique is overrun by sending a large amount of blocks -// let mut cur_parents = vec![block.id, genesis_ids[1]]; -// for delta_period in 0u64..10 { -// for thread in 0..cfg.thread_count { -// let res_block_id = tools::create_and_test_block( -// &mut protocol_controller, -// &cfg, -// Slot::new(cfg.periods_per_cycle + delta_period, thread), -// cur_parents.clone(), -// true, -// false, -// &staking_keys[0], -// ) -// .await; -// cur_parents[thread as usize] = res_block_id; -// } -// } - -// // get draws for cycle 3 (lookback = cycle 0) -// let mut draws: HashMap = HashMap::default(); -// for i in (3 * cfg.periods_per_cycle)..(4 * cfg.periods_per_cycle) { -// let slot = Slot::new(i, 0); -// draws.insert( -// slot, -// selector_controller.get_selection(slot).unwrap().producer, -// ); -// } -// let nb_address1_draws = draws.iter().filter(|(_, addr)| **addr == address_1).count(); -// // fair coin test. See https://en.wikipedia.org/wiki/Checking_whether_a_coin_is_fair -// // note: this is a statistical test. It may fail in rare occasions. -// assert!( -// (0.5 - ((nb_address1_draws as f32) -// / ((cfg.thread_count as u64 * cfg.periods_per_cycle) as f32))) -// .abs() -// < 0.15 -// ); - -// // check 10 draws -// let draws: HashMap = draws.into_iter().collect(); -// let mut cur_slot = Slot::new(cfg.periods_per_cycle * 3, 0); -// for _ in 0..10 { -// // wait block propagation -// let block_creator = protocol_controller -// .wait_command(3500.into(), |cmd| match cmd { -// ProtocolCommand::IntegratedBlock { block_id } => { -// let block = storage -// .retrieve_block(&block_id) -// .expect(&format!("Block id : {} not found in storage", block_id)); -// let stored_block = block.read(); -// if stored_block.content.header.content.slot == cur_slot { -// Some(stored_block.creator_public_key) -// } else { -// None -// } -// } -// _ => None, -// }) -// .await -// .expect("block did not propagate in time"); -// assert_eq!( -// draws[&cur_slot], -// Address::from_public_key(&block_creator), -// "wrong block creator" -// ); -// cur_slot = cur_slot.get_next_slot(cfg.thread_count).unwrap(); -// } - -// ( -// protocol_controller, -// consensus_command_sender, -// consensus_event_receiver, -// selector_controller, -// ) -// }, -// ) -// .await; -// } - -/// https://gitlab.com/massalabs/massa/-/issues/301 -/// -/// Block creation reception mix test -/// -/// see https://gitlab.com/massalabs/massa/-/issues/295#note_693561778 -/// -/// -/// two staking keys. Only key a is registered in consensus -/// start before genesis timestamp -/// retrieve next draws -/// for a few slots: -/// if it's key b time to create a block create it and send it to consensus -/// if key a created a block, assert it has chosen as parents expected blocks (no misses), and that it was sent to protocol around the time it was expected. -#[tokio::test] -#[serial] -#[ignore] -async fn test_interleaving_block_creation_with_reception() { - let thread_count = 1; - // define addresses use for the test - // addresses a and b both in thread 0 - let (address_1, _) = random_address_on_thread(0, thread_count).into(); - let (address_2, keypair_2) = random_address_on_thread(0, thread_count).into(); - - let mut ledger = HashMap::new(); - ledger.insert( - address_2, - LedgerData::new(Amount::from_mantissa_scale(1000, 0)), - ); - let cfg = ConsensusConfig { - thread_count, - t0: 1000.into(), - genesis_timestamp: MassaTime::now(0).unwrap().checked_add(1000.into()).unwrap(), - ..ConsensusConfig::default() - }; - // init roll count - let mut roll_counts = RollCounts::default(); - let update = RollUpdate { - roll_purchases: 1, - roll_sales: 0, - }; - let mut updates = RollUpdates::default(); - updates.apply(&address_1, &update).unwrap(); - updates.apply(&address_2, &update).unwrap(); - roll_counts.apply_updates(&updates).unwrap(); - - tools::consensus_without_pool_with_storage_test( - cfg.clone(), - async move |mut storage, - mut protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let mut parents = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .genesis_blocks; - - sleep_until(tokio::time::Instant::from_std( - cfg.genesis_timestamp - .saturating_add(cfg.t0) - .saturating_sub(150.into()) - .estimate_instant(0) - .expect("could not estimate instant for genesis timestamps"), - )) - .await; - - // check 10 draws - // Key1 and key2 can be drawn to produce block, - // but the local node only has key1, - // so when key2 is selected a block must be produced remotly - // and sent to the local node through protocol - for i in 1..11 { - let cur_slot = Slot::new(i, 0); - let creator = &selector_controller - .get_selection(cur_slot) - .expect("missing slot in drawss") - .producer; - - let block_id = if *creator == address_1 { - // wait block propagation - let (header, id) = protocol_controller - .wait_command(cfg.t0.saturating_add(300.into()), |cmd| match cmd { - ProtocolCommand::IntegratedBlock { block_id, storage } => { - let block = storage - .read_blocks() - .get(&block_id) - .unwrap_or_else(|| { - panic!("Block id : {} not found in storage", block_id) - }) - .clone(); - if block.content.header.content.slot == cur_slot { - Some((block.content.header, block_id)) - } else { - None - } - } - _ => None, - }) - .await - .expect("block did not propagate in time"); - assert_eq!(*creator, header.creator_address, "wrong block creator"); - id - } else if *creator == address_2 { - // create block and propagate it - let block = tools::create_block_with_operations( - &cfg, - cur_slot, - &parents, - &keypair_2, - vec![], - ); - storage.store_block(block.clone()); - tools::propagate_block( - &mut protocol_controller, - block.id, - block.content.header.content.slot, - storage.clone(), - true, - cfg.t0.to_millis() + 300, - ) - .await; - block.id - } else { - panic!("unexpected block creator"); - }; - parents[0] = block_id; - } - - ( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} - -// /// https://gitlab.com/massalabs/massa-network-archive/-/issues/343 -// /// Test block creation with operations -// /// -// /// Test consensus block creation with an initial graph and simulated pool -// /// -// /// In all tests, once it has started there is only one block creator, so we expect consensus to create blocks at every slots after initialization. -// /// -// /// context -// /// -// /// ``` -// /// initial ledger: A:100 -// /// op1 : A -> B : 5, fee 1 -// /// op2 : A -> B : 50, fee 10 -// /// op3 : B -> A : 10, fee 15 -// /// ``` -// /// -// /// --- -// /// -// /// ``` -// /// create block at (0,1) -// /// operations should be [op2, op1] -// /// ``` -// #[tokio::test] -// #[serial] -// async fn test_order_of_inclusion() { -// let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); -// // Increase timestamp a bit to avoid missing the first slot. -// let init_time: MassaTime = 1000.into(); -// let mut cfg = ConsensusConfig { -// genesis_timestamp: MassaTime::now(0).unwrap().checked_add(init_time).unwrap(), -// max_operations_per_block: 50, -// operation_validity_periods: 10, -// t0: 1000.into(), -// ..ConsensusConfig::default() -// }; -// // define addresses use for the test -// // addresses a and b both in thread 0 -// let (address_a, keypair_a) = random_address_on_thread(0, cfg.thread_count).into(); -// let (address_b, keypair_b) = random_address_on_thread(0, cfg.thread_count).into(); - -// let mut ledger = HashMap::new(); -// ledger.insert(address_a, LedgerData::new(Amount::from_str("100").unwrap())); -// let initial_ledger_file = generate_ledger_file(&ledger); // don't drop the `NamedTempFile` -// cfg.initial_ledger_path = initial_ledger_file.path().to_path_buf(); - -// let op1 = create_transaction(&keypair_a, address_b, 5, 10, 1); -// let op2 = create_transaction(&keypair_a, address_b, 50, 10, 10); -// let op3 = create_transaction(&keypair_b, address_a, 10, 10, 15); - -// // there is only one node so it should be drawn at every slot - -// tools::consensus_pool_test_with_storage( -// cfg.clone(), -// None, -// async move |pool_controller, -// mut protocol_controller, -// consensus_command_sender, -// consensus_event_receiver, -// storage, -// selector_controller| { -// //TODO: Replace -// // wait for first slot -// // pool_controller -// // .wait_command( -// // cfg.t0.saturating_mul(2).saturating_add(init_time), -// // |cmd| match cmd { -// // PoolCommand::UpdateCurrentSlot(s) => { -// // if s == Slot::new(1, 0) { -// // Some(()) -// // } else { -// // None -// // } -// // } -// // PoolCommand::GetEndorsements { response_tx, .. } => { -// // response_tx.send(Vec::new()).unwrap(); -// // None -// // } -// // _ => None, -// // }, -// // ) -// // .await -// // .expect("timeout while waiting for slot"); -// // -// // respond to first pool batch command -// // pool_controller -// // .wait_command(300.into(), |cmd| match cmd { -// // PoolCommand::GetOperationBatch { response_tx, .. } => { -// // response_tx -// // .send(vec![ -// // (op3.clone(), 50), -// // (op2.clone(), 50), -// // (op1.clone(), 50), -// // ]) -// // .unwrap(); -// // Some(()) -// // } -// // PoolCommand::GetEndorsements { response_tx, .. } => { -// // response_tx.send(Vec::new()).unwrap(); -// // None -// // } -// // _ => None, -// // }) -// // .await -// // .expect("timeout while waiting for 1st operation batch request"); - -// // // respond to second pool batch command -// // pool_controller -// // .wait_command(300.into(), |cmd| match cmd { -// // PoolCommand::GetOperationBatch { -// // response_tx, -// // exclude, -// // .. -// // } => { -// // assert!(!exclude.is_empty()); -// // response_tx.send(vec![]).unwrap(); -// // Some(()) -// // } -// // PoolCommand::GetEndorsements { response_tx, .. } => { -// // response_tx.send(Vec::new()).unwrap(); -// // None -// // } -// // _ => None, -// // }) -// // .await -// // .expect("timeout while waiting for 2nd operation batch request"); - -// // wait for block -// let block = protocol_controller -// .wait_command(300.into(), |cmd| match cmd { -// ProtocolCommand::IntegratedBlock { block_id } => { -// let block = storage -// .retrieve_block(&block_id) -// .expect(&format!("Block id : {} not found in storage", block_id)); -// let stored_block = block.read(); -// Some(stored_block.clone()) -// } -// _ => None, -// }) -// .await -// .expect("timeout while waiting for block"); - -// // assert it's the expected block -// assert_eq!(block.content.header.content.slot, Slot::new(1, 0)); -// let expected = vec![op2.clone(), op1.clone()]; -// let res = block.content.operations.clone(); -// assert_eq!(block.content.operations.len(), 2); -// for i in 0..2 { -// assert!(res.contains(&expected[i].id)); -// } -// ( -// pool_controller, -// protocol_controller, -// consensus_command_sender, -// consensus_event_receiver, -// selector_controller, -// ) -// }, -// ) -// .await; -// } - -// /// https://gitlab.com/massalabs/massa-network-archive/-/issues/343 -// /// Test block creation with operations -// /// -// /// Test consensus block creation with an initial graph and simulated pool -// /// -// /// In all tests, once it has started there is only one block creator, so we expect consensus to create blocks at every slots after initialization. -// /// -// /// context -// /// -// /// ```` -// /// initial ledger A = 1 000 000 -// /// max_block_size = 500 -// /// max_operations_per_block = 10 000 -// /// op_i = A -> B : 10, 1, signed for the i-th time -// /// ``` -// /// -// /// --- -// /// ``` -// /// let block_size = size of dummy block at (1,0) without any operation -// /// let op_size = size of an operation -// /// while consensus is asking for operations send next ops -// /// assert created_block_size is max_block_size +/- one op_size -// /// assert created_block_size = block_size +`op_size * op_count -// /// ``` -// #[tokio::test] -// #[serial] -// #[ignore] -// async fn test_block_filling() { -// let thread_count = 2; -// // define addresses use for the test -// // addresses a and b both in thread 0 -// let (address_a, keypair_a) = random_address_on_thread(0, thread_count).into(); -// let (address_b, keypair_b) = random_address_on_thread(0, thread_count).into(); -// let mut ledger = HashMap::new(); -// ledger.insert( -// address_a, -// LedgerData::new(Amount::from_str("1000000000").unwrap()), -// ); -// let cfg = ConsensusConfig { -// endorsement_count: 10, -// max_block_size: 2000, -// max_operations_per_block: 5000, -// operation_validity_periods: 10, -// periods_per_cycle: 3, -// t0: 1000.into(), -// ..ConsensusConfig::default_with_staking_keys_and_ledger( -// &[keypair_a.clone(), keypair_b.clone()], -// &ledger, -// ) -// }; - -// let mut ops = vec![create_executesc( -// &keypair_a, -// 10, -// 10, -// vec![1; 200], // dummy bytes as here we do not test the content -// 1_000, -// 0, -// 1, -// )]; // this operation has an higher rentability than any other - -// for _ in 0..500 { -// ops.push(create_transaction(&keypair_a, address_a, 5, 10, 1)) -// } - -// tools::consensus_pool_test_with_storage( -// cfg.clone(), -// None, -// async move |pool_controller, -// mut protocol_controller, -// consensus_command_sender, -// consensus_event_receiver, -// storage, -// selector_controller| { -// let op_size = 10; - -// // wait for slot -// //let mut prev_blocks = Vec::new(); -// for cur_slot in [Slot::new(1, 0), Slot::new(1, 1)] { -// //TODO: Replace -// // pool_controller -// // .wait_command(cfg.t0.checked_mul(2).unwrap(), |cmd| match cmd { -// // PoolCommand::UpdateCurrentSlot(s) => { -// // if s == cur_slot { -// // Some(()) -// // } else { -// // None -// // } -// // } -// // _ => None, -// // }) -// // .await -// // .expect("timeout while waiting for slot"); -// // // respond to pool batch command -// // pool_controller -// // .wait_command(300.into(), |cmd| match cmd { -// // PoolCommand::GetOperationBatch { response_tx, .. } => { -// // response_tx.send(Default::default()).unwrap(); -// // Some(()) -// // } -// // _ => None, -// // }) -// // .await -// // .expect("timeout while waiting for operation batch request"); -// // // wait for block -// // let block = protocol_controller -// // .wait_command(500.into(), |cmd| match cmd { -// // ProtocolCommand::IntegratedBlock { block_id } => { -// // let block = storage -// // .retrieve_block(&block_id) -// // .expect(&format!("Block id : {} not found in storage", block_id)); -// // let stored_block = block.read(); -// // Some(stored_block.clone()) -// // } -// // _ => None, -// // }) -// // .await -// // .expect("timeout while waiting for block"); -// // assert_eq!(block.content.header.content.slot, cur_slot); -// // prev_blocks.push(block.id); -// // } -// } - -// // // wait for slot p2t0 -// // pool_controller -// // .wait_command(cfg.t0, |cmd| match cmd { -// // PoolCommand::UpdateCurrentSlot(s) => { -// // if s == Slot::new(2, 0) { -// // Some(()) -// // } else { -// // None -// // } -// // } -// // _ => None, -// // }) -// // .await -// // .expect("timeout while waiting for slot"); - -// // // respond to endorsement command -// // let eds = pool_controller -// // .wait_command(300.into(), |cmd| match cmd { -// // PoolCommand::GetEndorsements { -// // target_slot, -// // parent, -// // creators, -// // response_tx, -// // .. -// // } => { -// // assert_eq!(Slot::new(1, 0), target_slot); -// // assert_eq!(parent, prev_blocks[0]); -// // let mut eds: Vec = Vec::new(); -// // for (index, creator) in creators.iter().enumerate() { -// // let ed = if *creator == address_a { -// // create_endorsement(&keypair_a, target_slot, parent, index as u32) -// // } else if *creator == address_b { -// // create_endorsement(&keypair_b, target_slot, parent, index as u32) -// // } else { -// // panic!("invalid endorser choice"); -// // }; -// // eds.push(ed); -// // } -// // response_tx.send(eds.clone()).unwrap(); -// // Some(eds) -// // } -// // _ => None, -// // }) -// // .await -// // .expect("timeout while waiting for endorsement request"); -// // assert_eq!(eds.len() as u32, cfg.endorsement_count); - -// // respond to first pool batch command -// //TODO: Replace -// // pool_controller -// // .wait_command(300.into(), |cmd| match cmd { -// // PoolCommand::GetOperationBatch { response_tx, .. } => { -// // response_tx -// // .send(ops.iter().map(|op| (op.clone(), op_size)).collect()) -// // .unwrap(); -// // Some(()) -// // } -// // _ => None, -// // }) -// // .await -// // .expect("timeout while waiting for 1st operation batch request"); - -// // respond to second pool batch command -// //TODO: Replace -// // pool_controller -// // .wait_command(300.into(), |cmd| match cmd { -// // PoolCommand::GetOperationBatch { -// // response_tx, -// // exclude, -// // .. -// // } => { -// // assert!(!exclude.is_empty()); -// // response_tx.send(vec![]).unwrap(); -// // Some(()) -// // } -// // _ => None, -// // }) -// // .await -// // .expect("timeout while waiting for 2nd operation batch request"); - -// let eds: Vec = Vec::new(); -// // wait for block -// let block = protocol_controller -// .wait_command(500.into(), |cmd| match cmd { -// ProtocolCommand::IntegratedBlock { block_id } => { -// let block = storage -// .retrieve_block(&block_id) -// .expect(&format!("Block id : {} not found in storage", block_id)); -// let stored_block = block.read(); -// Some(stored_block.clone()) -// } -// _ => None, -// }) -// .await -// .expect("timeout while waiting for block"); - -// // assert it's the expected block -// assert_eq!(block.content.header.content.slot, Slot::new(2, 0)); - -// // assert it includes the sent endorsements -// assert_eq!(block.content.header.content.endorsements.len(), eds.len()); -// for (e_found, e_expected) in block -// .content -// .header -// .content -// .endorsements -// .iter() -// .zip(eds.iter()) -// { -// assert_eq!(e_found.id, e_expected.id); -// assert_eq!(e_expected.id, e_expected.id); -// } - -// // create empty block -// let header = BlockHeader::new_wrapped( -// BlockHeader { -// slot: block.content.header.content.slot, -// parents: block.content.header.content.parents.clone(), -// operation_merkle_root: Hash::compute_from(&Vec::new()[..]), -// endorsements: eds, -// }, -// BlockHeaderSerializer::new(), -// &keypair_a, -// ) -// .unwrap(); -// let empty: WrappedBlock = Block::new_wrapped( -// Block { -// header, -// operations: Default::default(), -// }, -// BlockSerializer::new(), -// &keypair_a, -// ) -// .unwrap(); -// let remaining_block_space = (cfg.max_block_size as usize) -// .checked_sub(empty.serialized_data.len() as usize) -// .unwrap(); - -// let nb = remaining_block_space / (op_size as usize); -// assert_eq!(block.content.operations.len(), nb); -// ( -// pool_controller, -// protocol_controller, -// consensus_command_sender, -// consensus_event_receiver, -// selector_controller, -// ) -// }, -// ) -// .await; -// } diff --git a/massa-consensus-worker/src/tests/scenario_roll.rs b/massa-consensus-worker/src/tests/scenario_roll.rs deleted file mode 100644 index 20c30933c3d..00000000000 --- a/massa-consensus-worker/src/tests/scenario_roll.rs +++ /dev/null @@ -1,974 +0,0 @@ -// // Copyright (c) 2022 MASSA LABS - -// use massa_consensus_exports::{ -// settings::ConsensusChannels, -// test_exports::{ -// generate_default_roll_counts_file, generate_ledger_file, generate_staking_keys_file, -// }, -// ConsensusConfig, -// }; -// use massa_execution_exports::test_exports::MockExecutionController; -// use massa_models::{Address, Amount, BlockId, Slot}; -// use massa_pos_exports::SelectorConfig; -// use massa_pos_worker::start_selector_worker; -// use massa_protocol_exports::ProtocolCommand; -// use massa_storage::Storage; -// use massa_time::MassaTime; -// use num::rational::Ratio; -// use rand::{prelude::SliceRandom, rngs::StdRng, SeedableRng}; -// use serial_test::serial; -// use std::collections::{HashMap, VecDeque}; -// use std::str::FromStr; - -// use crate::{ -// start_consensus_controller, -// tests::{ -// mock_pool_controller::MockPoolController, -// mock_protocol_controller::MockProtocolController, -// tools::{ -// consensus_pool_test_with_storage, create_block, create_block_with_operations, -// create_roll_buy, create_roll_sell, get_creator_for_draw, propagate_block, -// random_address_on_thread, wait_pool_slot, -// }, -// }, -// }; -// use massa_models::ledger_models::LedgerData; -// use massa_models::prehash::Set; - -// #[tokio::test] -// #[serial] -// async fn test_roll() { -// // setup logging -// /* -// stderrlog::new() -// .verbosity(2) -// .timestamp(stderrlog::Timestamp::Millisecond) -// .init() -// .unwrap(); -// */ -// let init_time: MassaTime = 1000.into(); -// let mut cfg = ConsensusConfig { -// t0: 500.into(), -// periods_per_cycle: 2, -// delta_f0: 3, -// block_reward: Amount::default(), -// roll_price: Amount::from_str("1000").unwrap(), -// operation_validity_periods: 100, -// genesis_timestamp: MassaTime::now(0).unwrap().saturating_add(init_time), -// ..Default::default() -// }; -// // define addresses use for the test -// // addresses 1 and 2 both in thread 0 -// let (address_1, keypair_1) = random_address_on_thread(0, cfg.thread_count).into(); -// let (address_2, keypair_2) = random_address_on_thread(0, cfg.thread_count).into(); - -// let mut ledger = HashMap::new(); -// ledger.insert( -// address_2, -// LedgerData::new(Amount::from_str("10000").unwrap()), -// ); -// let initial_ledger_file = generate_ledger_file(&ledger); -// cfg.initial_ledger_path = initial_ledger_file.path().to_path_buf(); - -// let staking_keys_file = generate_staking_keys_file(&[keypair_2.clone()]); -// cfg.staking_keys_path = staking_keys_file.path().to_path_buf(); - -// let initial_rolls_file = generate_default_roll_counts_file(vec![keypair_1.clone()]); -// cfg.initial_rolls_path = initial_rolls_file.path().to_path_buf(); - -// consensus_pool_test_with_storage( -// cfg.clone(), -// None, -// async move |mut pool_controller, -// mut protocol_controller, -// consensus_command_sender, -// consensus_event_receiver, -// mut storage, -// selector_controller| { -// let mut parents: Vec = consensus_command_sender -// .get_block_graph_status(None, None) -// .await -// .expect("could not get block graph status") -// .best_parents -// .iter() -// .map(|(b, _p)| *b) -// .collect(); - -// // operations -// let rb_a1_r1_err = create_roll_buy(&keypair_1, 1, 90, 0); -// let rs_a2_r1_err = create_roll_sell(&keypair_2, 1, 90, 0); -// let rb_a2_r1 = create_roll_buy(&keypair_2, 1, 90, 0); -// let rs_a2_r1 = create_roll_sell(&keypair_2, 1, 90, 0); -// let rb_a2_r2 = create_roll_buy(&keypair_2, 2, 90, 0); -// let rs_a2_r2 = create_roll_sell(&keypair_2, 2, 90, 0); - -// // Store operations to make them accessible to the consensus worker. -// storage.store_operations(vec![ -// rb_a1_r1_err.clone(), -// rs_a2_r1_err.clone(), -// rb_a2_r1.clone(), -// rs_a2_r1.clone(), -// rb_a2_r2.clone(), -// rs_a2_r2.clone(), -// ]); - -// let mut addresses = Set::
::default(); -// addresses.insert(address_2); -// let addresses = addresses; - -// // cycle 0 -// let block1_err1 = create_block_with_operations( -// &cfg, -// Slot::new(1, 0), -// &parents, -// &keypair_1, -// vec![rb_a1_r1_err], -// ); -// tokio::time::sleep(init_time.to_duration()).await; -// wait_pool_slot(&mut pool_controller, cfg.t0, 1, 0).await; -// // invalid because a1 has not enough coins to buy a roll -// propagate_block(&mut protocol_controller, block1_err1, false, 150).await; - -// let block1_err2 = create_block_with_operations( -// &cfg, -// Slot::new(1, 0), -// &parents, -// &keypair_1, -// vec![rs_a2_r1_err], -// ); -// // invalid because a2 does not have enough rolls to sell -// propagate_block(&mut protocol_controller, block1_err2, false, 150).await; - -// let block1 = create_block_with_operations( -// &cfg, -// Slot::new(1, 0), -// &parents, -// &keypair_1, -// vec![rb_a2_r1], -// ); - -// // valid -// propagate_block(&mut protocol_controller, block1.clone(), true, 150).await; -// parents[0] = block1.id; - -// let addr_state = consensus_command_sender -// .get_addresses_info(addresses.clone()) -// .await -// .unwrap() -// .get(&address_2) -// .unwrap() -// .clone(); -// assert_eq!(addr_state.rolls.active_rolls, 0); -// assert_eq!(addr_state.rolls.final_rolls, 0); -// assert_eq!(addr_state.rolls.candidate_rolls, 1); -// assert_eq!( -// addr_state.ledger_info.candidate_ledger_info.balance, -// Amount::from_str("9000").unwrap() -// ); - -// let block1t1 = -// create_block_with_operations(&cfg, Slot::new(1, 1), &parents, &keypair_1, vec![]); - -// wait_pool_slot(&mut pool_controller, cfg.t0, 1, 1).await; -// // valid -// propagate_block(&mut protocol_controller, block1t1.clone(), true, 150).await; -// parents[1] = block1t1.id; - -// // cycle 1 - -// let block2 = create_block_with_operations( -// &cfg, -// Slot::new(2, 0), -// &parents, -// &keypair_1, -// vec![rs_a2_r1], -// ); - -// wait_pool_slot(&mut pool_controller, cfg.t0, 2, 0).await; -// // valid -// propagate_block(&mut protocol_controller, block2.clone(), true, 150).await; -// parents[0] = block2.id; - -// let addr_state = consensus_command_sender -// .get_addresses_info(addresses.clone()) -// .await -// .unwrap() -// .get(&address_2) -// .unwrap() -// .clone(); -// assert_eq!(addr_state.rolls.active_rolls, 0); -// assert_eq!(addr_state.rolls.final_rolls, 0); -// assert_eq!(addr_state.rolls.candidate_rolls, 0); -// let balance = addr_state.ledger_info.candidate_ledger_info.balance; -// assert_eq!(balance, Amount::from_str("9000").unwrap()); - -// let block2t2 = -// create_block_with_operations(&cfg, Slot::new(2, 1), &parents, &keypair_1, vec![]); -// wait_pool_slot(&mut pool_controller, cfg.t0, 2, 1).await; -// // valid -// propagate_block(&mut protocol_controller, block2t2.clone(), true, 150).await; -// parents[1] = block2t2.id; - -// // miss block 3 in thread 0 - -// // block 3 in thread 1 -// let block3t1 = -// create_block_with_operations(&cfg, Slot::new(3, 1), &parents, &keypair_1, vec![]); -// wait_pool_slot(&mut pool_controller, cfg.t0, 3, 1).await; -// // valid -// propagate_block(&mut protocol_controller, block3t1.clone(), true, 150).await; -// parents[1] = block3t1.id; - -// // cycle 2 - -// // miss block 4 - -// let block4t1 = -// create_block_with_operations(&cfg, Slot::new(4, 1), &parents, &keypair_1, vec![]); -// wait_pool_slot(&mut pool_controller, cfg.t0, 4, 1).await; -// // valid -// propagate_block(&mut protocol_controller, block4t1.clone(), true, 150).await; -// parents[1] = block4t1.id; - -// let block5 = -// create_block_with_operations(&cfg, Slot::new(5, 0), &parents, &keypair_1, vec![]); -// wait_pool_slot(&mut pool_controller, cfg.t0, 5, 0).await; -// // valid -// propagate_block(&mut protocol_controller, block5.clone(), true, 150).await; -// parents[0] = block5.id; - -// let block5t1 = -// create_block_with_operations(&cfg, Slot::new(5, 1), &parents, &keypair_1, vec![]); -// wait_pool_slot(&mut pool_controller, cfg.t0, 5, 1).await; -// // valid -// propagate_block(&mut protocol_controller, block5t1.clone(), true, 150).await; -// parents[1] = block5t1.id; - -// let other_addr = -// if selector_controller.get_producer(Slot::new(6, 0)).unwrap() == address_1 { -// address_2 -// } else { -// address_1 -// }; - -// let block6_err = create_block_with_operations( -// &cfg, -// Slot::new(6, 0), -// &parents, -// &get_creator_for_draw(&other_addr, &vec![keypair_1.clone(), keypair_2.clone()]), -// vec![], -// ); -// wait_pool_slot(&mut pool_controller, cfg.t0, 6, 0).await; -// // invalid: other_addr wasn't drawn for that block creation -// propagate_block(&mut protocol_controller, block6_err, false, 150).await; - -// let block6 = create_block_with_operations( -// &cfg, -// Slot::new(6, 0), -// &parents, -// &get_creator_for_draw( -// &selector_controller.get_producer(Slot::new(6, 0)).unwrap(), -// &vec![keypair_1.clone(), keypair_2.clone()], -// ), -// vec![], -// ); - -// // valid -// propagate_block(&mut protocol_controller, block6.clone(), true, 150).await; -// parents[0] = block6.id; - -// let addr_state = consensus_command_sender -// .get_addresses_info(addresses.clone()) -// .await -// .unwrap() -// .get(&address_2) -// .unwrap() -// .clone(); -// assert_eq!(addr_state.rolls.active_rolls, 1); -// assert_eq!(addr_state.rolls.final_rolls, 0); -// assert_eq!(addr_state.rolls.candidate_rolls, 0); - -// let block6t1 = create_block_with_operations( -// &cfg, -// Slot::new(6, 1), -// &parents, -// &get_creator_for_draw( -// &selector_controller.get_producer(Slot::new(6, 0)).unwrap(), -// &vec![keypair_1.clone(), keypair_2.clone()], -// ), -// vec![], -// ); - -// wait_pool_slot(&mut pool_controller, cfg.t0, 6, 1).await; -// // valid -// propagate_block(&mut protocol_controller, block6t1.clone(), true, 150).await; -// parents[1] = block6t1.id; - -// let block7 = create_block_with_operations( -// &cfg, -// Slot::new(7, 0), -// &parents, -// &get_creator_for_draw( -// &selector_controller.get_producer(Slot::new(6, 0)).unwrap(), -// &vec![keypair_1.clone(), keypair_2.clone()], -// ), -// vec![], -// ); - -// wait_pool_slot(&mut pool_controller, cfg.t0, 7, 0).await; -// // valid -// propagate_block(&mut protocol_controller, block7.clone(), true, 150).await; -// parents[0] = block7.id; - -// let addr_state = consensus_command_sender -// .get_addresses_info(addresses.clone()) -// .await -// .unwrap() -// .get(&address_2) -// .unwrap() -// .clone(); -// assert_eq!(addr_state.rolls.active_rolls, 1); -// assert_eq!(addr_state.rolls.final_rolls, 0); -// assert_eq!(addr_state.rolls.candidate_rolls, 0); - -// let block7t1 = create_block_with_operations( -// &cfg, -// Slot::new(7, 1), -// &parents, -// &get_creator_for_draw( -// &selector_controller.get_producer(Slot::new(6, 0)).unwrap(), -// &vec![keypair_1.clone(), keypair_2.clone()], -// ), -// vec![], -// ); - -// wait_pool_slot(&mut pool_controller, cfg.t0, 7, 1).await; -// // valid -// propagate_block(&mut protocol_controller, block7t1.clone(), true, 150).await; -// parents[1] = block7t1.id; - -// // cycle 4 - -// let block8 = create_block_with_operations( -// &cfg, -// Slot::new(8, 0), -// &parents, -// &keypair_1, -// vec![rb_a2_r2], -// ); -// wait_pool_slot(&mut pool_controller, cfg.t0, 8, 0).await; -// // valid -// propagate_block(&mut protocol_controller, block8.clone(), true, 150).await; -// parents[0] = block8.id; - -// let addr_state = consensus_command_sender -// .get_addresses_info(addresses.clone()) -// .await -// .unwrap() -// .get(&address_2) -// .unwrap() -// .clone(); -// assert_eq!(addr_state.rolls.active_rolls, 0); -// assert_eq!(addr_state.rolls.final_rolls, 0); -// assert_eq!(addr_state.rolls.candidate_rolls, 2); -// let balance = addr_state.ledger_info.candidate_ledger_info.balance; -// assert_eq!(balance, Amount::from_str("7000").unwrap()); - -// let block8t1 = -// create_block_with_operations(&cfg, Slot::new(8, 1), &parents, &keypair_1, vec![]); -// wait_pool_slot(&mut pool_controller, cfg.t0, 8, 1).await; -// // valid -// propagate_block(&mut protocol_controller, block8t1.clone(), true, 150).await; -// parents[1] = block8t1.id; - -// let block9 = create_block_with_operations( -// &cfg, -// Slot::new(9, 0), -// &parents, -// &keypair_1, -// vec![rs_a2_r2], -// ); -// wait_pool_slot(&mut pool_controller, cfg.t0, 9, 0).await; -// // valid -// propagate_block(&mut protocol_controller, block9.clone(), true, 150).await; -// parents[0] = block9.id; - -// let addr_state = consensus_command_sender -// .get_addresses_info(addresses.clone()) -// .await -// .unwrap() -// .get(&address_2) -// .unwrap() -// .clone(); -// assert_eq!(addr_state.rolls.active_rolls, 0); -// assert_eq!(addr_state.rolls.final_rolls, 0); -// assert_eq!(addr_state.rolls.candidate_rolls, 0); -// let balance = addr_state.ledger_info.candidate_ledger_info.balance; -// assert_eq!(balance, Amount::from_str("9000").unwrap()); - -// let block9t1 = -// create_block_with_operations(&cfg, Slot::new(9, 1), &parents, &keypair_1, vec![]); -// wait_pool_slot(&mut pool_controller, cfg.t0, 9, 1).await; -// // valid -// propagate_block(&mut protocol_controller, block9t1.clone(), true, 150).await; -// parents[1] = block9t1.id; - -// // cycle 5 - -// let block10 = -// create_block_with_operations(&cfg, Slot::new(10, 0), &parents, &keypair_1, vec![]); -// wait_pool_slot(&mut pool_controller, cfg.t0, 10, 0).await; -// // valid -// propagate_block(&mut protocol_controller, block10.clone(), true, 150).await; -// parents[0] = block10.id; - -// let addr_state = consensus_command_sender -// .get_addresses_info(addresses.clone()) -// .await -// .unwrap() -// .get(&address_2) -// .unwrap() -// .clone(); -// assert_eq!(addr_state.rolls.active_rolls, 0); -// assert_eq!(addr_state.rolls.final_rolls, 2); -// assert_eq!(addr_state.rolls.candidate_rolls, 0); - -// let balance = consensus_command_sender -// .get_addresses_info(addresses.clone()) -// .await -// .unwrap() -// .get(&address_2) -// .unwrap() -// .ledger_info -// .candidate_ledger_info -// .balance; -// assert_eq!(balance, Amount::from_str("10000").unwrap()); - -// let block10t1 = -// create_block_with_operations(&cfg, Slot::new(10, 1), &parents, &keypair_1, vec![]); -// wait_pool_slot(&mut pool_controller, cfg.t0, 10, 1).await; -// // valid -// propagate_block(&mut protocol_controller, block10t1.clone(), true, 150).await; -// parents[1] = block10t1.id; - -// let block11 = -// create_block_with_operations(&cfg, Slot::new(11, 0), &parents, &keypair_1, vec![]); -// wait_pool_slot(&mut pool_controller, cfg.t0, 11, 0).await; -// // valid -// propagate_block(&mut protocol_controller, block11.clone(), true, 150).await; -// parents[0] = block11.id; - -// let addr_state = consensus_command_sender -// .get_addresses_info(addresses.clone()) -// .await -// .unwrap() -// .get(&address_2) -// .unwrap() -// .clone(); -// assert_eq!(addr_state.rolls.active_rolls, 0); -// assert_eq!(addr_state.rolls.final_rolls, 0); -// assert_eq!(addr_state.rolls.candidate_rolls, 0); -// ( -// pool_controller, -// protocol_controller, -// consensus_command_sender, -// consensus_event_receiver, -// selector_controller, -// ) -// }, -// ) -// .await; -// } - -// #[tokio::test] -// #[serial] -// async fn test_roll_block_creation() { -// // setup logging -// /* -// stderrlog::new() -// .verbosity(4) -// .timestamp(stderrlog::Timestamp::Millisecond) -// .init() -// .unwrap(); -// */ -// let mut cfg = ConsensusConfig { -// block_reward: Amount::default(), -// delta_f0: 3, -// operation_validity_periods: 10, -// max_block_size: 500, -// max_operations_per_block: 5000, -// periods_per_cycle: 2, -// roll_price: Amount::from_str("1000").unwrap(), -// t0: 500.into(), -// ..Default::default() -// }; -// // define addresses use for the test -// // addresses 1 and 2 both in thread 0 -// let (_, keypair_1) = random_address_on_thread(0, cfg.thread_count).into(); -// let (address_2, keypair_2) = random_address_on_thread(0, cfg.thread_count).into(); - -// let mut ledger = HashMap::new(); -// ledger.insert( -// address_2, -// LedgerData::new(Amount::from_str("10000").unwrap()), -// ); -// let initial_ledger_file = generate_ledger_file(&ledger); -// let staking_keys_file = generate_staking_keys_file(&[keypair_1.clone()]); -// let initial_rolls_file = generate_default_roll_counts_file(vec![keypair_1.clone()]); -// cfg.initial_ledger_path = initial_ledger_file.path().to_path_buf(); -// cfg.staking_keys_path = staking_keys_file.path().to_path_buf(); -// cfg.initial_rolls_path = initial_rolls_file.path().to_path_buf(); -// // mock protocol & pool -// let (mut protocol_controller, protocol_command_sender, protocol_event_receiver) = -// MockProtocolController::new(); -// let mut pool_controller = MockPoolController::new(); -// let (execution_controller, _execution_rx) = MockExecutionController::new_with_receiver(); - -// let init_time: MassaTime = 1000.into(); -// cfg.genesis_timestamp = MassaTime::now(0).unwrap().saturating_add(init_time); -// let storage: Storage = Storage::create_root(); -// // launch consensus controller -// let selector_config = SelectorConfig { -// initial_rolls_path: cfg.initial_rolls_path.clone(), -// ..Default::default() -// }; -// let (_selector_manager, selector_controller) = -// start_selector_worker(selector_config, VecDeque::new()).unwrap(); -// let (consensus_command_sender, _consensus_event_receiver, _consensus_manager) = -// start_consensus_controller( -// cfg.clone(), -// ConsensusChannels { -// execution_controller, -// protocol_command_sender: protocol_command_sender.clone(), -// protocol_event_receiver, -// pool_command_sender: Box::new(pool_controller.clone()), -// selector_controller, -// }, -// None, -// storage.clone(), -// 0, -// ) -// .await -// .expect("could not start consensus controller"); - -// // operations -// let rb_a2_r1 = create_roll_buy(&keypair_2, 1, 90, 0); -// let rs_a2_r1 = create_roll_sell(&keypair_2, 1, 90, 0); - -// let mut addresses = Set::
::default(); -// addresses.insert(address_2); -// let addresses = addresses; - -// // wait for first slot -// // TODO: Replace ?? -// // pool_controller -// // .wait_command( -// // cfg.t0.saturating_mul(2).saturating_add(init_time), -// // |cmd| match cmd { -// // PoolCommand::UpdateCurrentSlot(s) => { -// // if s == Slot::new(1, 0) { -// // Some(()) -// // } else { -// // None -// // } -// // } -// // PoolCommand::GetEndorsements { response_tx, .. } => { -// // response_tx.send(Vec::new()).unwrap(); -// // None -// // } -// // _ => None, -// // }, -// // ) -// // .await -// // .expect("timeout while waiting for slot"); - -// // // cycle 0 -// // println!("Test"); -// // // respond to first pool batch command -// // pool_controller -// // .wait_command(300.into(), |cmd| match cmd { -// // PoolCommand::GetOperationBatch { -// // response_tx, -// // target_slot, -// // .. -// // } => { -// // assert_eq!(target_slot, Slot::new(1, 0)); -// // response_tx.send(vec![(rb_a2_r1.clone(), 10)]).unwrap(); -// // Some(()) -// // } -// // PoolCommand::GetEndorsements { response_tx, .. } => { -// // response_tx.send(Vec::new()).unwrap(); -// // None -// // } -// // _ => None, -// // }) -// // .await -// // .expect("timeout while waiting for 1st operation batch request"); - -// // wait for block -// let block = protocol_controller -// .wait_command(500.into(), |cmd| match cmd { -// ProtocolCommand::IntegratedBlock { block_id } => { -// println!("Integrated block"); -// let block = storage -// .retrieve_block(&block_id) -// .expect(&format!("Block id : {} not found in storage", block_id)); -// let stored_block = block.read(); -// Some(stored_block.clone()) -// } -// _ => None, -// }) -// .await -// .expect("timeout while waiting for block"); - -// // assert it's the expected block -// assert_eq!(block.content.header.content.slot, Slot::new(1, 0)); -// assert_eq!(block.content.operations.len(), 1); -// assert!(block.content.operations.contains(&rb_a2_r1.id)); - -// let addr_state = consensus_command_sender -// .get_addresses_info(addresses.clone()) -// .await -// .unwrap() -// .get(&address_2) -// .unwrap() -// .clone(); -// assert_eq!(addr_state.rolls.active_rolls, 0); -// assert_eq!(addr_state.rolls.final_rolls, 0); -// assert_eq!(addr_state.rolls.candidate_rolls, 1); - -// let balance = consensus_command_sender -// .get_addresses_info(addresses.clone()) -// .await -// .unwrap() -// .get(&address_2) -// .unwrap() -// .ledger_info -// .candidate_ledger_info -// .balance; -// assert_eq!(balance, Amount::from_str("9000").unwrap()); - -// wait_pool_slot(&mut pool_controller, cfg.t0, 1, 1).await; -// // TODO: Replace ?? -// // slot 1,1 -// // pool_controller -// // .wait_command(300.into(), |cmd| match cmd { -// // PoolCommand::GetOperationBatch { -// // response_tx, -// // target_slot, -// // .. -// // } => { -// // assert_eq!(target_slot, Slot::new(1, 1)); -// // response_tx.send(vec![]).unwrap(); -// // Some(()) -// // } -// // PoolCommand::GetEndorsements { response_tx, .. } => { -// // response_tx.send(Vec::new()).unwrap(); -// // None -// // } -// // _ => None, -// // }) -// // .await -// // .expect("timeout while waiting for operation batch request"); - -// // wait for block -// let block = protocol_controller -// .wait_command(500.into(), |cmd| match cmd { -// ProtocolCommand::IntegratedBlock { block_id } => { -// let block = storage -// .retrieve_block(&block_id) -// .expect(&format!("Block id : {} not found in storage", block_id)); -// let stored_block = block.read(); -// Some(stored_block.clone()) -// } -// _ => None, -// }) -// .await -// .expect("timeout while waiting for block"); - -// // assert it's the expected block -// assert_eq!(block.content.header.content.slot, Slot::new(1, 1)); -// assert!(block.content.operations.is_empty()); - -// // cycle 1 - -// //TODO: replace -// // pool_controller -// // .wait_command(300.into(), |cmd| match cmd { -// // PoolCommand::GetOperationBatch { -// // response_tx, -// // target_slot, -// // .. -// // } => { -// // assert_eq!(target_slot, Slot::new(2, 0)); -// // response_tx.send(vec![(rs_a2_r1.clone(), 10)]).unwrap(); -// // Some(()) -// // } -// // PoolCommand::GetEndorsements { response_tx, .. } => { -// // response_tx.send(Vec::new()).unwrap(); -// // None -// // } -// // _ => None, -// // }) -// // .await -// // .expect("timeout while waiting for 1st operation batch request"); - -// // wait for block -// let block = protocol_controller -// .wait_command(500.into(), |cmd| match cmd { -// ProtocolCommand::IntegratedBlock { block_id } => { -// let block = storage -// .retrieve_block(&block_id) -// .expect(&format!("Block id : {} not found in storage", block_id)); -// let stored_block = block.read(); -// Some(stored_block.clone()) -// } -// _ => None, -// }) -// .await -// .expect("timeout while waiting for block"); - -// // assert it's the expected block -// assert_eq!(block.content.header.content.slot, Slot::new(2, 0)); -// assert_eq!(block.content.operations.len(), 1); -// assert!(block.content.operations.contains(&rs_a2_r1.id)); - -// let addr_state = consensus_command_sender -// .get_addresses_info(addresses.clone()) -// .await -// .unwrap() -// .get(&address_2) -// .unwrap() -// .clone(); -// assert_eq!(addr_state.rolls.active_rolls, 0); -// assert_eq!(addr_state.rolls.final_rolls, 0); -// assert_eq!(addr_state.rolls.candidate_rolls, 0); -// let balance = addr_state.ledger_info.candidate_ledger_info.balance; -// assert_eq!(balance, Amount::from_str("9000").unwrap()); -// } - -// #[tokio::test] -// #[serial] -// async fn test_roll_deactivation() { -// /* -// Scenario: -// * deactivation threshold at 50% -// * thread_count = 10 -// * lookback_cycles = 2 -// * periods_per_cycle = 10 -// * delta_f0 = 2 -// * all addresses have 1 roll initially -// * in cycle 0: -// * an address A0 in thread 0 produces 20% of its blocks -// * an address B0 in thread 0 produces 80% of its blocks -// * an address A1 in thread 1 produces 20% of its blocks -// * an address B1 in thread 1 produces 80% of its blocks -// * at the next cycles, all addresses produce all their blocks -// * at the 1st block of thread 0 in cycle 2: -// * address A0 has (0 candidate, 1 final, 1 active) rolls -// * address B0 has (1 candidate, 1 final, 1 active) rolls -// * address A1 has (1 candidate, 1 final, 1 active) rolls -// * address B1 has (1 candidate, 1 final, 1 active) rolls -// * at the 1st block of thread 1 in cycle 2: -// * address A0 has (0 candidate, 1 final, 1 active) rolls -// * address B0 has (1 candidate, 1 final, 1 active) rolls -// * address A1 has (0 candidate, 1 final, 1 active) rolls -// * address B1 has (1 candidate, 1 final, 1 active) rolls -// */ -// let mut cfg = ConsensusConfig { -// delta_f0: 2, -// thread_count: 4, -// periods_per_cycle: 5, -// pos_lookback_cycles: 1, -// t0: 400.into(), -// roll_price: Amount::from_mantissa_scale(10, 0), -// pos_miss_rate_deactivation_threshold: Ratio::new(50, 100), -// ..Default::default() -// }; -// let storage: Storage = Storage::create_root(); - -// // setup addresses -// let (address_a0, keypair_a0) = random_address_on_thread(0, cfg.thread_count).into(); -// let (address_b0, keypair_b0) = random_address_on_thread(0, cfg.thread_count).into(); -// let (address_a1, keypair_a1) = random_address_on_thread(1, cfg.thread_count).into(); -// let (address_b1, keypair_b1) = random_address_on_thread(1, cfg.thread_count).into(); - -// let initial_ledger_file = generate_ledger_file(&HashMap::new()); -// let staking_keys_file = generate_staking_keys_file(&[]); -// let initial_rolls_file = generate_default_roll_counts_file(vec![ -// keypair_a0.clone(), -// keypair_a1.clone(), -// keypair_b0.clone(), -// keypair_b1.clone(), -// ]); - -// cfg.initial_ledger_path = initial_ledger_file.path().to_path_buf(); -// cfg.staking_keys_path = staking_keys_file.path().to_path_buf(); -// cfg.initial_rolls_path = initial_rolls_file.path().to_path_buf(); - -// // mock protocol & pool -// let (mut protocol_controller, protocol_command_sender, protocol_event_receiver) = -// MockProtocolController::new(); -// let (pool_controller, _pool_event_receiver) = MockPoolController::new_with_receiver(); -// let (execution_controller, _execution_rx) = MockExecutionController::new_with_receiver(); -// let selector_config = SelectorConfig { -// initial_rolls_path: cfg.initial_rolls_path.clone(), -// ..Default::default() -// }; -// let (_selector_manager, selector_controller) = -// start_selector_worker(selector_config, VecDeque::new()).unwrap(); -// cfg.genesis_timestamp = MassaTime::now(0).unwrap().saturating_add(300.into()); - -// // launch consensus controller -// let (consensus_command_sender, _consensus_event_receiver, _consensus_manager) = -// start_consensus_controller( -// cfg.clone(), -// ConsensusChannels { -// execution_controller, -// protocol_command_sender: protocol_command_sender.clone(), -// protocol_event_receiver, -// pool_command_sender: pool_controller, -// selector_controller: selector_controller.clone(), -// }, -// None, -// storage, -// 0, -// ) -// .await -// .expect("could not start consensus controller"); - -// let mut cur_slot = Slot::new(0, 0); -// let mut best_parents = consensus_command_sender -// .get_block_graph_status(None, None) -// .await -// .unwrap() -// .genesis_blocks; -// let mut cycle_draws = HashMap::new(); -// let mut draws_cycle = None; -// 'outer: loop { -// // wait for slot info -// // let latest_slot = pool_controller -// // .wait_command(cfg.t0.checked_mul(2).unwrap(), |cmd| match cmd { -// // PoolCommand::UpdateCurrentSlot(s) => Some(s), -// // _ => None, -// // }) -// // .await -// // .expect("timeout while waiting for slot"); -// let latest_slot = Slot::new(0, 0); -// // apply all slots in-between -// while cur_slot <= latest_slot { -// // skip genesis -// if cur_slot.period == 0 { -// cur_slot = cur_slot.get_next_slot(cfg.thread_count).unwrap(); -// continue; -// } -// let cur_cycle = cur_slot.get_cycle(cfg.periods_per_cycle); - -// // get draws -// if draws_cycle != Some(cur_cycle) { -// for i in std::cmp::max(cur_cycle * cfg.periods_per_cycle, 1)..(cur_cycle + 1) { -// let slot = Slot::new(i, 0); -// cycle_draws.insert( -// slot, -// Some(selector_controller.get_selection(slot).unwrap().producer), -// ); -// } -// if cur_cycle == 0 { -// // controlled misses in cycle 0 -// for address in [address_a0, address_a1, address_b0, address_b1] { -// let mut address_draws: Vec = cycle_draws -// .iter() -// .filter_map(|(s, opt_a)| { -// if let Some(a) = opt_a { -// if *a == address { -// return Some(*s); -// } -// } -// None -// }) -// .collect(); -// assert!( -// !address_draws.is_empty(), -// "unlucky seed: address has no draws in cycle 0, cannot perform test" -// ); -// address_draws.shuffle(&mut StdRng::from_entropy()); -// let produce_count: usize = if address == address_a0 || address == address_a1 -// { -// // produce less than 20% -// 20 * address_draws.len() / 100 -// } else { -// // produce more than 80% -// std::cmp::min(address_draws.len(), (80 * address_draws.len() / 100) + 1) -// }; -// address_draws.truncate(produce_count); -// for (slt, opt_addr) in cycle_draws.iter_mut() { -// if *opt_addr == Some(address) && !address_draws.contains(slt) { -// *opt_addr = None; -// } -// } -// } -// } -// draws_cycle = Some(cur_cycle); -// } -// let cur_draw = cycle_draws[&cur_slot]; - -// // create and propagate block -// if let Some(addr) = cur_draw { -// let creator_privkey = if addr == address_a0 { -// keypair_a0.clone() -// } else if addr == address_a1 { -// keypair_a1.clone() -// } else if addr == address_b0 { -// keypair_b0.clone() -// } else if addr == address_b1 { -// keypair_b1.clone() -// } else { -// panic!("invalid address selected"); -// }; -// let block_id = propagate_block( -// &mut protocol_controller, -// create_block(&cfg, cur_slot, best_parents.clone(), &creator_privkey), -// true, -// 500, -// ) -// .await; - -// // update best parents -// best_parents[cur_slot.thread as usize] = block_id; -// } - -// // check candidate rolls -// let addrs_info = consensus_command_sender -// .get_addresses_info( -// vec![address_a0, address_a1, address_b0, address_b1] -// .into_iter() -// .collect(), -// ) -// .await -// .unwrap() -// .clone(); -// if cur_slot.period == (1 + cfg.pos_lookback_cycles) * cfg.periods_per_cycle { -// if cur_slot.thread == 0 { -// assert_eq!(addrs_info[&address_a0].rolls.candidate_rolls, 0); -// assert_eq!(addrs_info[&address_b0].rolls.candidate_rolls, 1); -// assert_eq!(addrs_info[&address_a1].rolls.candidate_rolls, 1); -// assert_eq!(addrs_info[&address_b1].rolls.candidate_rolls, 1); -// } else if cur_slot.thread == 1 { -// assert_eq!(addrs_info[&address_a0].rolls.candidate_rolls, 0); -// assert_eq!(addrs_info[&address_b0].rolls.candidate_rolls, 1); -// assert_eq!(addrs_info[&address_a1].rolls.candidate_rolls, 0); -// assert_eq!(addrs_info[&address_b1].rolls.candidate_rolls, 1); -// } else { -// break 'outer; -// } -// } else { -// assert_eq!(addrs_info[&address_a0].rolls.candidate_rolls, 1); -// assert_eq!(addrs_info[&address_b0].rolls.candidate_rolls, 1); -// assert_eq!(addrs_info[&address_a1].rolls.candidate_rolls, 1); -// assert_eq!(addrs_info[&address_b1].rolls.candidate_rolls, 1); -// } - -// cur_slot = cur_slot.get_next_slot(cfg.thread_count).unwrap(); -// } -// } -// } diff --git a/massa-consensus-worker/src/tests/scenarios106.rs b/massa-consensus-worker/src/tests/scenarios106.rs deleted file mode 100644 index 38ccc690cf9..00000000000 --- a/massa-consensus-worker/src/tests/scenarios106.rs +++ /dev/null @@ -1,869 +0,0 @@ -// Copyright (c) 2022 MASSA LABS - -// RUST_BACKTRACE=1 cargo test scenarios106 -- --nocapture - -use super::tools::*; -use massa_consensus_exports::ConsensusConfig; - -use massa_models::prehash::PreHashSet; -use massa_models::timeslots; -use massa_models::{block::BlockId, slot::Slot}; -use massa_signature::KeyPair; -use massa_storage::Storage; -use massa_time::MassaTime; -use serial_test::serial; -use std::collections::HashSet; -use std::time::Duration; - -#[tokio::test] -#[serial] -#[ignore] -async fn test_unsorted_block() { - /*stderrlog::new() - .verbosity(4) - .timestamp(stderrlog::Timestamp::Millisecond) - .init() - .unwrap();*/ - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - t0: 1000.into(), - future_block_processing_max_periods: 50, - max_future_processing_blocks: 10, - ..ConsensusConfig::default() - }; - - let mut storage = Storage::create_root(); - - consensus_without_pool_test( - cfg.clone(), - async move |mut protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let start_period = 3; - let genesis_hashes = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .genesis_blocks; - // create test blocks - - let t0s1 = create_block( - &cfg, - Slot::new(1 + start_period, 0), - genesis_hashes.clone(), - &staking_keys[0], - ); - - let t1s1 = create_block( - &cfg, - Slot::new(1 + start_period, 1), - genesis_hashes.clone(), - &staking_keys[0], - ); - - let t0s2 = create_block( - &cfg, - Slot::new(2 + start_period, 0), - vec![t0s1.id, t1s1.id], - &staking_keys[0], - ); - let t1s2 = create_block( - &cfg, - Slot::new(2 + start_period, 1), - vec![t0s1.id, t1s1.id], - &staking_keys[0], - ); - - let t0s3 = create_block( - &cfg, - Slot::new(3 + start_period, 0), - vec![t0s2.id, t1s2.id], - &staking_keys[0], - ); - let t1s3 = create_block( - &cfg, - Slot::new(3 + start_period, 1), - vec![t0s2.id, t1s2.id], - &staking_keys[0], - ); - - let t0s4 = create_block( - &cfg, - Slot::new(4 + start_period, 0), - vec![t0s3.id, t1s3.id], - &staking_keys[0], - ); - let t1s4 = create_block( - &cfg, - Slot::new(4 + start_period, 1), - vec![t0s3.id, t1s3.id], - &staking_keys[0], - ); - - // send blocks t0s1, t1s1, - storage.store_block(t0s1.clone()); - protocol_controller - .receive_block(t0s1.id, t0s1.content.header.content.slot, storage.clone()) - .await; - storage.store_block(t1s1.clone()); - protocol_controller - .receive_block(t1s1.id, t1s1.content.header.content.slot, storage.clone()) - .await; - // send blocks t0s3, t1s4, t0s4, t0s2, t1s3, t1s2 - storage.store_block(t0s3.clone()); - protocol_controller - .receive_block(t0s3.id, t0s3.content.header.content.slot, storage.clone()) - .await; - storage.store_block(t1s4.clone()); - protocol_controller - .receive_block(t1s4.id, t1s4.content.header.content.slot, storage.clone()) - .await; - storage.store_block(t0s4.clone()); - protocol_controller - .receive_block(t0s4.id, t0s4.content.header.content.slot, storage.clone()) - .await; - storage.store_block(t0s2.clone()); - protocol_controller - .receive_block(t0s2.id, t0s2.content.header.content.slot, storage.clone()) - .await; - storage.store_block(t1s3.clone()); - protocol_controller - .receive_block(t1s3.id, t1s3.content.header.content.slot, storage.clone()) - .await; - storage.store_block(t1s2.clone()); - protocol_controller - .receive_block(t1s2.id, t1s2.content.header.content.slot, storage.clone()) - .await; - - // block t0s1 and t1s1 are propagated - let hash_list = vec![t0s1.id, t1s1.id]; - validate_propagate_block_in_list( - &mut protocol_controller, - &hash_list, - 3000 + start_period * 1000, - ) - .await; - validate_propagate_block_in_list(&mut protocol_controller, &hash_list, 1000).await; - // block t0s2 and t1s2 are propagated - let hash_list = vec![t0s2.id, t1s2.id]; - validate_propagate_block_in_list(&mut protocol_controller, &hash_list, 1000).await; - validate_propagate_block_in_list(&mut protocol_controller, &hash_list, 1000).await; - // block t0s3 and t1s3 are propagated - let hash_list = vec![t0s3.id, t1s3.id]; - validate_propagate_block_in_list(&mut protocol_controller, &hash_list, 1000).await; - validate_propagate_block_in_list(&mut protocol_controller, &hash_list, 1000).await; - // block t0s4 and t1s4 are propagated - let hash_list = vec![t0s4.id, t1s4.id]; - validate_propagate_block_in_list(&mut protocol_controller, &hash_list, 1000).await; - validate_propagate_block_in_list(&mut protocol_controller, &hash_list, 4000).await; - ( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} - -//test future_incoming_blocks block in the future with max_future_processing_blocks. -#[tokio::test] -#[serial] -#[ignore] -async fn test_unsorted_block_with_to_much_in_the_future() { - /*stderrlog::new() - .verbosity(4) - .timestamp(stderrlog::Timestamp::Millisecond) - .init() - .unwrap();*/ - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - t0: 1000.into(), - // slot 1 is in the past - genesis_timestamp: MassaTime::now(0).unwrap().saturating_sub(2000.into()), - future_block_processing_max_periods: 3, - max_future_processing_blocks: 5, - ..ConsensusConfig::default() - }; - - let mut storage = Storage::create_root(); - - consensus_without_pool_test( - cfg.clone(), - async move |mut protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - // create test blocks - let genesis_hashes = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .genesis_blocks; - - // a block in the past must be propagated - let block1 = create_block( - &cfg, - Slot::new(1, 0), - genesis_hashes.clone(), - &staking_keys[0], - ); - storage.store_block(block1.clone()); - protocol_controller - .receive_block( - block1.id, - block1.content.header.content.slot, - storage.clone(), - ) - .await; - validate_propagate_block(&mut protocol_controller, block1.id, 2500).await; - - // this block is slightly in the future: will wait for it - let slot = timeslots::get_current_latest_block_slot( - cfg.thread_count, - cfg.t0, - cfg.genesis_timestamp, - 0, - ) - .unwrap() - .unwrap(); - let block2 = create_block( - &cfg, - Slot::new(slot.period + 2, slot.thread), - genesis_hashes.clone(), - &staking_keys[0], - ); - storage.store_block(block2.clone()); - protocol_controller - .receive_block( - block2.id, - block2.content.header.content.slot, - storage.clone(), - ) - .await; - assert!(!validate_notpropagate_block(&mut protocol_controller, block2.id, 500).await); - validate_propagate_block(&mut protocol_controller, block2.id, 2500).await; - - // this block is too much in the future: do not process - let slot = timeslots::get_current_latest_block_slot( - cfg.thread_count, - cfg.t0, - cfg.genesis_timestamp, - 0, - ) - .unwrap() - .unwrap(); - let block3 = create_block( - &cfg, - Slot::new(slot.period + 1000, slot.thread), - genesis_hashes.clone(), - &staking_keys[0], - ); - storage.store_block(block3.clone()); - protocol_controller - .receive_block( - block3.id, - block3.content.header.content.slot, - storage.clone(), - ) - .await; - assert!(!validate_notpropagate_block(&mut protocol_controller, block3.id, 2500).await); - - // Check that the block has been silently dropped and not discarded for being too much in the future. - let block_graph = consensus_command_sender - .get_block_graph_status(None, None) - .await - .unwrap(); - assert!(!block_graph.active_blocks.contains_key(&block3.id)); - assert!(!block_graph.discarded_blocks.contains_key(&block3.id)); - ( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} - -#[tokio::test] -#[serial] -#[ignore] -async fn test_too_many_blocks_in_the_future() { - /*stderrlog::new() - .verbosity(4) - .timestamp(stderrlog::Timestamp::Millisecond) - .init() - .unwrap();*/ - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - delta_f0: 1000, - future_block_processing_max_periods: 100, - // slot 1 is in the past - genesis_timestamp: MassaTime::now(0).unwrap().saturating_sub(2000.into()), - max_future_processing_blocks: 2, - t0: 1000.into(), - ..ConsensusConfig::default() - }; - - let mut storage = Storage::create_root(); - - consensus_without_pool_test( - cfg.clone(), - async move |mut protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - // get genesis block hashes - let genesis_hashes = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .genesis_blocks; - - // generate 5 blocks but there is only space for 2 in the waiting line - let mut expected_block_hashes: HashSet = HashSet::new(); - let mut max_period = 0; - let slot = timeslots::get_current_latest_block_slot( - cfg.thread_count, - cfg.t0, - cfg.genesis_timestamp, - 0, - ) - .unwrap() - .unwrap(); - for period in 0..5 { - max_period = slot.period + 2 + period; - let block = create_block( - &cfg, - Slot::new(max_period, slot.thread), - genesis_hashes.clone(), - &staking_keys[0], - ); - storage.store_block(block.clone()); - protocol_controller - .receive_block(block.id, block.content.header.content.slot, storage.clone()) - .await; - if period < 2 { - expected_block_hashes.insert(block.id); - } - } - // wait for the 2 waiting blocks to propagate - let mut expected_clone = expected_block_hashes.clone(); - while !expected_block_hashes.is_empty() { - assert!( - expected_block_hashes.remove( - &validate_propagate_block_in_list( - &mut protocol_controller, - &expected_block_hashes.iter().copied().collect(), - 2500 - ) - .await - ), - "unexpected block propagated" - ); - } - // wait until we reach the slot of the last block - while timeslots::get_current_latest_block_slot( - cfg.thread_count, - cfg.t0, - cfg.genesis_timestamp, - 0, - ) - .unwrap() - .unwrap() - < Slot::new(max_period + 1, 0) - {} - // ensure that the graph contains only what we expect - let graph = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status"); - expected_clone.extend(graph.genesis_blocks); - assert_eq!( - expected_clone, - graph - .active_blocks - .keys() - .copied() - .collect::>(), - "unexpected block graph" - ); - ( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} - -#[tokio::test] -#[serial] -#[ignore] -async fn test_dep_in_back_order() { - /*stderrlog::new() - .verbosity(4) - .timestamp(stderrlog::Timestamp::Millisecond) - .init() - .unwrap();*/ - - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - genesis_timestamp: MassaTime::now(0) - .unwrap() - .saturating_sub(MassaTime::from_millis(1000).checked_mul(1000).unwrap()), - t0: 1000.into(), - ..ConsensusConfig::default() - }; - - let mut storage = Storage::create_root(); - consensus_without_pool_test( - cfg.clone(), - async move |mut protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let genesis_hashes = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .genesis_blocks; - - // create test blocks - let t0s1 = create_block( - &cfg, - Slot::new(1, 0), - genesis_hashes.clone(), - &staking_keys[0], - ); - - let t1s1 = create_block( - &cfg, - Slot::new(1, 1), - genesis_hashes.clone(), - &staking_keys[0], - ); - - let t0s2 = create_block( - &cfg, - Slot::new(2, 0), - vec![t0s1.id, t1s1.id], - &staking_keys[0], - ); - let t1s2 = create_block( - &cfg, - Slot::new(2, 1), - vec![t0s1.id, t1s1.id], - &staking_keys[0], - ); - - let t0s3 = create_block( - &cfg, - Slot::new(3, 0), - vec![t0s2.id, t1s2.id], - &staking_keys[0], - ); - let t1s3 = create_block( - &cfg, - Slot::new(3, 1), - vec![t0s2.id, t1s2.id], - &staking_keys[0], - ); - - let t0s4 = create_block( - &cfg, - Slot::new(4, 0), - vec![t0s3.id, t1s3.id], - &staking_keys[0], - ); - let t1s4 = create_block( - &cfg, - Slot::new(4, 1), - vec![t0s3.id, t1s3.id], - &staking_keys[0], - ); - - // send blocks t0s2, t1s3, t0s1, t0s4, t1s4, t1s1, t0s3, t1s2 - storage.store_block(t0s2.clone()); - protocol_controller - .receive_block(t0s2.id, t0s2.content.header.content.slot, storage.clone()) - .await; // not propagated and update wishlist - validate_wishlist( - &mut protocol_controller, - vec![t0s1.id, t1s1.id].into_iter().collect(), - PreHashSet::::default(), - 500, - ) - .await; - validate_notpropagate_block(&mut protocol_controller, t0s2.id, 500).await; - - storage.store_block(t1s3.clone()); - protocol_controller - .receive_block(t1s3.id, t1s3.content.header.content.slot, storage.clone()) - .await; // not propagated and no wishlist update - validate_notpropagate_block(&mut protocol_controller, t1s3.id, 500).await; - - storage.store_block(t0s1.clone()); - protocol_controller - .receive_block(t0s1.id, t0s1.content.header.content.slot, storage.clone()) - .await; // we have its parents so it should be integrated right now and update wishlist - - validate_propagate_block(&mut protocol_controller, t0s1.id, 500).await; - validate_wishlist( - &mut protocol_controller, - PreHashSet::::default(), - vec![t0s1.id].into_iter().collect(), - 500, - ) - .await; - - storage.store_block(t0s4.clone()); - protocol_controller - .receive_block(t0s4.id, t0s4.content.header.content.slot, storage.clone()) - .await; // not propagated and no wishlist update - validate_notpropagate_block(&mut protocol_controller, t0s4.id, 500).await; - - storage.store_block(t1s4.clone()); - protocol_controller - .receive_block(t1s4.id, t1s4.content.header.content.slot, storage.clone()) - .await; // not propagated and no wishlist update - validate_notpropagate_block(&mut protocol_controller, t1s4.id, 500).await; - - storage.store_block(t1s1.clone()); - protocol_controller - .receive_block(t1s1.id, t1s1.content.header.content.slot, storage.clone()) - .await; // assert t1s1 is integrated and t0s2 is integrated and wishlist updated - validate_propagate_block_in_list( - &mut protocol_controller, - &vec![t1s1.id, t0s2.id], - 500, - ) - .await; - - validate_propagate_block_in_list( - &mut protocol_controller, - &vec![t1s1.id, t0s2.id], - 500, - ) - .await; - validate_wishlist( - &mut protocol_controller, - vec![].into_iter().collect(), - vec![t1s1.id].into_iter().collect(), - 500, - ) - .await; - - storage.store_block(t0s3.clone()); - protocol_controller - .receive_block(t0s3.id, t0s3.content.header.content.slot, storage.clone()) - .await; // not propagated and no wishlist update - validate_notpropagate_block(&mut protocol_controller, t0s3.id, 500).await; - - storage.store_block(t1s2.clone()); - protocol_controller - .receive_block(t1s2.id, t1s2.content.header.content.slot, storage.clone()) - .await; - - // All remaining blocks are propagated - let integrated = vec![t1s2.id, t0s3.id, t1s3.id, t0s4.id, t1s4.id]; - validate_propagate_block_in_list(&mut protocol_controller, &integrated, 1000).await; - validate_propagate_block_in_list(&mut protocol_controller, &integrated, 1000).await; - validate_propagate_block_in_list(&mut protocol_controller, &integrated, 1000).await; - validate_propagate_block_in_list(&mut protocol_controller, &integrated, 1000).await; - validate_propagate_block_in_list(&mut protocol_controller, &integrated, 1000).await; - validate_wishlist( - &mut protocol_controller, - PreHashSet::::default(), - vec![t1s2.id].into_iter().collect(), - 500, - ) - .await; - ( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} - -#[tokio::test] -#[serial] -#[ignore] -async fn test_dep_in_back_order_with_max_dependency_blocks() { - /*stderrlog::new() - .verbosity(4) - .timestamp(stderrlog::Timestamp::Millisecond) - .init() - .unwrap();*/ - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - genesis_timestamp: MassaTime::now(0) - .unwrap() - .saturating_sub(MassaTime::from_millis(1000).checked_mul(1000).unwrap()), - max_dependency_blocks: 2, - t0: 1000.into(), - ..ConsensusConfig::default() - }; - tokio::time::sleep(Duration::from_millis(1000)).await; - let mut storage = Storage::create_root(); - - consensus_without_pool_test( - cfg.clone(), - async move |mut protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let genesis_hashes = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .genesis_blocks; - - // create test blocks - - let t0s1 = create_block( - &cfg, - Slot::new(1, 0), - genesis_hashes.clone(), - &staking_keys[0], - ); - - let t1s1 = create_block( - &cfg, - Slot::new(1, 1), - genesis_hashes.clone(), - &staking_keys[0], - ); - - let t0s2 = create_block( - &cfg, - Slot::new(2, 0), - vec![t0s1.id, t1s1.id], - &staking_keys[0], - ); - let t1s2 = create_block( - &cfg, - Slot::new(2, 1), - vec![t0s1.id, t1s1.id], - &staking_keys[0], - ); - - let t0s3 = create_block( - &cfg, - Slot::new(3, 0), - vec![t0s2.id, t1s2.id], - &staking_keys[0], - ); - let t1s3 = create_block( - &cfg, - Slot::new(3, 1), - vec![t0s2.id, t1s2.id], - &staking_keys[0], - ); - - // send blocks t0s2, t1s3, t0s1, t0s4, t1s4, t1s1, t0s3, t1s2 - storage.store_block(t0s2.clone()); - protocol_controller - .receive_block(t0s2.id, t0s2.content.header.content.slot, storage.clone()) - .await; - validate_wishlist( - &mut protocol_controller, - vec![t0s1.id, t1s1.id].into_iter().collect(), - PreHashSet::::default(), - 500, - ) - .await; - validate_notpropagate_block(&mut protocol_controller, t0s2.id, 500).await; - - storage.store_block(t1s3.clone()); - protocol_controller - .receive_block(t1s3.id, t1s3.content.header.content.slot, storage.clone()) - .await; - validate_notpropagate_block(&mut protocol_controller, t1s3.id, 500).await; - - storage.store_block(t0s1.clone()); - protocol_controller - .receive_block(t0s1.id, t0s1.content.header.content.slot, storage.clone()) - .await; - validate_propagate_block(&mut protocol_controller, t0s1.id, 500).await; - validate_wishlist( - &mut protocol_controller, - PreHashSet::::default(), - vec![t0s1.id].into_iter().collect(), - 500, - ) - .await; - storage.store_block(t0s3.clone()); - protocol_controller - .receive_block(t0s3.id, t0s3.content.header.content.slot, storage.clone()) - .await; - validate_notpropagate_block(&mut protocol_controller, t0s3.id, 500).await; - - storage.store_block(t1s2.clone()); - protocol_controller - .receive_block(t1s2.id, t1s2.content.header.content.slot, storage.clone()) - .await; - validate_notpropagate_block(&mut protocol_controller, t1s2.id, 500).await; - - storage.store_block(t1s1.clone()); - protocol_controller - .receive_block(t1s1.id, t1s1.content.header.content.slot, storage.clone()) - .await; - validate_propagate_block_in_list( - &mut protocol_controller, - &vec![t1s1.id, t1s2.id], - 500, - ) - .await; - validate_propagate_block_in_list( - &mut protocol_controller, - &vec![t1s1.id, t1s2.id], - 500, - ) - .await; - validate_wishlist( - &mut protocol_controller, - PreHashSet::::default(), - vec![t1s1.id].into_iter().collect(), - 500, - ) - .await; - - ( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} - -#[tokio::test] -#[serial] -#[ignore] -async fn test_add_block_that_depends_on_invalid_block() { - /*stderrlog::new() - .verbosity(4) - .timestamp(stderrlog::Timestamp::Millisecond) - .init() - .unwrap();*/ - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - genesis_timestamp: MassaTime::now(0) - .unwrap() - .saturating_sub(MassaTime::from_millis(1000).checked_mul(1000).unwrap()), - max_dependency_blocks: 7, - t0: 1000.into(), - ..ConsensusConfig::default() - }; - let mut storage = Storage::create_root(); - - consensus_without_pool_test( - cfg.clone(), - async move |mut protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let genesis_hashes = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .genesis_blocks; - - // create test blocks - let t0s1 = create_block( - &cfg, - Slot::new(1, 0), - genesis_hashes.clone(), - &staking_keys[0], - ); - - let t1s1 = create_block( - &cfg, - Slot::new(1, 1), - genesis_hashes.clone(), - &staking_keys[0], - ); - - // blocks t3s2 with wrong thread and (t0s1, t1s1) parents. - let t3s2 = create_block( - &cfg, - Slot::new(2, 3), - vec![t0s1.id, t1s1.id], - &staking_keys[0], - ); - - // blocks t0s3 and t1s3 with (t3s2, t1s2) parents. - let t0s3 = create_block( - &cfg, - Slot::new(3, 0), - vec![t3s2.id, t1s1.id], - &staking_keys[0], - ); - let t1s3 = create_block( - &cfg, - Slot::new(3, 1), - vec![t3s2.id, t1s1.id], - &staking_keys[0], - ); - - // add block in this order t0s1, t1s1, t0s3, t1s3, t3s2 - // send blocks t0s2, t1s3, t0s1, t0s4, t1s4, t1s1, t0s3, t1s2 - storage.store_block(t0s1.clone()); - protocol_controller - .receive_block(t0s1.id, t0s1.content.header.content.slot, storage.clone()) - .await; - storage.store_block(t1s1.clone()); - protocol_controller - .receive_block(t1s1.id, t1s1.content.header.content.slot, storage.clone()) - .await; - storage.store_block(t0s3.clone()); - protocol_controller - .receive_block(t0s3.id, t0s1.content.header.content.slot, storage.clone()) - .await; - storage.store_block(t1s3.clone()); - protocol_controller - .receive_block(t1s3.id, t1s3.content.header.content.slot, storage.clone()) - .await; - storage.store_block(t3s2.clone()); - protocol_controller - .receive_block(t3s2.id, t3s2.content.header.content.slot, storage.clone()) - .await; - - // block t0s1 and t1s1 are propagated - let hash_list = vec![t0s1.id, t1s1.id]; - validate_propagate_block_in_list(&mut protocol_controller, &hash_list, 1000).await; - validate_propagate_block_in_list(&mut protocol_controller, &hash_list, 1000).await; - - // block t0s3, t1s3 are not propagated - let hash_list = vec![t0s3.id, t1s3.id]; - assert!( - !validate_notpropagate_block_in_list(&mut protocol_controller, &hash_list, 2000) - .await - ); - assert!( - !validate_notpropagate_block_in_list(&mut protocol_controller, &hash_list, 2000) - .await - ); - ( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} diff --git a/massa-consensus-worker/src/tests/scenarios91_1.rs b/massa-consensus-worker/src/tests/scenarios91_1.rs deleted file mode 100644 index 4d3c0d4a858..00000000000 --- a/massa-consensus-worker/src/tests/scenarios91_1.rs +++ /dev/null @@ -1,446 +0,0 @@ -// Copyright (c) 2022 MASSA LABS - -// RUST_BACKTRACE=1 cargo test test_block_validity -- --nocapture - -use super::tools::*; -use massa_consensus_exports::ConsensusConfig; - -use massa_hash::Hash; -use massa_models::{block::BlockId, slot::Slot}; -use massa_signature::KeyPair; -use massa_storage::Storage; -use massa_time::MassaTime; -use serial_test::serial; - -//use time::MassaTime; - -#[tokio::test] -#[serial] -#[ignore] -async fn test_ti() { - /* stderrlog::new() - .verbosity(4) - .timestamp(stderrlog::Timestamp::Millisecond) - .init() - .unwrap(); */ - - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - future_block_processing_max_periods: 50, - // to avoid timing problems for blocks in the future - genesis_timestamp: MassaTime::now(0) - .unwrap() - .saturating_sub(MassaTime::from_millis(32000).checked_mul(1000).unwrap()), - ..ConsensusConfig::default() - }; - let mut storage = Storage::create_root(); - - // to avoid timing pb for block in the future - - consensus_without_pool_test( - cfg.clone(), - async move |mut protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let genesis_hashes = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .genesis_blocks; - - // create a valid block for thread 0 - let valid_hasht0s1 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(1, 0), - genesis_hashes.clone(), - true, - false, - &staking_keys[0], - ) - .await; - - // create a valid block on the other thread. - let valid_hasht1s1 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(1, 1), - genesis_hashes.clone(), - true, - false, - &staking_keys[0], - ) - .await; - - // one click with 2 block compatible - let block_graph = consensus_command_sender - .get_block_graph_status(None, None) - .await - .unwrap(); - let block1_clic = get_cliques(&block_graph, valid_hasht0s1); - let block2_clic = get_cliques(&block_graph, valid_hasht1s1); - assert_eq!(1, block1_clic.len()); - assert_eq!(1, block2_clic.len()); - assert_eq!(block1_clic, block2_clic); - - // Create other clique bock T0S2 - let fork_block = create_block_with_merkle_root( - &cfg, - Hash::compute_from("Other hash!".as_bytes()), - Slot::new(2, 0), - genesis_hashes.clone(), - &staking_keys[0], - ); - - storage.store_block(fork_block.clone()); - protocol_controller - .receive_block( - fork_block.id, - fork_block.content.header.content.slot, - storage.clone(), - ) - .await; - validate_propagate_block(&mut protocol_controller, fork_block.id, 1000).await; - // two clique with valid_hasht0s1 and valid_hasht1s1 in one and fork_block_hash, valid_hasht1s1 in the other - // test the first clique hasn't changed. - let block_graph = consensus_command_sender - .get_block_graph_status(None, None) - .await - .unwrap(); - let block1_clic = get_cliques(&block_graph, valid_hasht0s1); - let block2_clic = get_cliques(&block_graph, valid_hasht1s1); - assert_eq!(1, block1_clic.len()); - assert_eq!(2, block2_clic.len()); - assert!(block2_clic.intersection(&block1_clic).next().is_some()); - // test the new click - let fork_clic = get_cliques(&block_graph, fork_block.id); - assert_eq!(1, fork_clic.len()); - assert!(fork_clic.intersection(&block1_clic).next().is_none()); - assert!(fork_clic.intersection(&block2_clic).next().is_some()); - - // extend first clique - let mut parentt0sn_hash = valid_hasht0s1; - for period in 3..=35 { - let block_hash = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(period, 0), - vec![parentt0sn_hash, valid_hasht1s1], - true, - false, - &staking_keys[0], - ) - .await; - // validate the added block isn't in the forked block click. - let block_graph = consensus_command_sender - .get_block_graph_status(None, None) - .await - .unwrap(); - let block_clic = get_cliques(&block_graph, block_hash); - let fork_clic = get_cliques(&block_graph, fork_block.id); - assert!(fork_clic.intersection(&block_clic).next().is_none()); - - parentt0sn_hash = block_hash; - } - - // create new block in other clique - let block = create_block( - &cfg, - Slot::new(2, 1), - vec![fork_block.id, valid_hasht1s1], - &staking_keys[0], - ); - storage.store_block(block.clone()); - protocol_controller - .receive_block(block.id, block.content.header.content.slot, storage.clone()) - .await; - assert!(!validate_notpropagate_block(&mut protocol_controller, block.id, 1000,).await); - // verify that the clique has been pruned. - let block_graph = consensus_command_sender - .get_block_graph_status(None, None) - .await - .unwrap(); - let fork_clic = get_cliques(&block_graph, fork_block.id); - assert_eq!(0, fork_clic.len()); - ( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} - -#[tokio::test] -#[serial] -#[ignore] -async fn test_gpi() { - // // setup logging - /*stderrlog::new() - .verbosity(4) - .timestamp(stderrlog::Timestamp::Millisecond) - .init() - .unwrap();*/ - - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - future_block_processing_max_periods: 50, - // to avoid timing problems for blocks in the future - genesis_timestamp: MassaTime::now(0) - .unwrap() - .saturating_sub(MassaTime::from_millis(32000).checked_mul(1000).unwrap()), - ..ConsensusConfig::default() - }; - - consensus_without_pool_test( - cfg.clone(), - async move |mut protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let genesis_hashes = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .genesis_blocks; - - // * create 1 normal block in each thread (t0s1 and t1s1) with genesis parents - // create a valids block for thread 0 - let valid_hasht0s1 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(1, 0), - genesis_hashes.clone(), - true, - false, - &staking_keys[0], - ) - .await; - - // create a valid block on the other thread. - let valid_hasht1s1 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(1, 1), - genesis_hashes.clone(), - true, - false, - &staking_keys[0], - ) - .await; - - // one click with 2 block compatible - let block_graph = consensus_command_sender - .get_block_graph_status(None, None) - .await - .unwrap(); - let block1_clic = get_cliques(&block_graph, valid_hasht0s1); - let block2_clic = get_cliques(&block_graph, valid_hasht1s1); - assert_eq!(1, block1_clic.len()); - assert_eq!(1, block2_clic.len()); - assert_eq!(block1_clic, block2_clic); - - // create 2 clique - // * create 1 block in t0s2 with parents of slots (t0s1, t1s0) - let valid_hasht0s2 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(2, 0), - vec![valid_hasht0s1, genesis_hashes[1]], - true, - false, - &staking_keys[0], - ) - .await; - // * create 1 block in t1s2 with parents of slots (t0s0, t1s1) - let valid_hasht1s2 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(2, 1), - vec![genesis_hashes[0], valid_hasht1s1], - true, - false, - &staking_keys[0], - ) - .await; - - // * after processing the block in t1s2, the block of t0s2 is incompatible with block of t1s2 (link in gi) - let block_graph = consensus_command_sender - .get_block_graph_status(None, None) - .await - .unwrap(); - let blockt1s2_clic = get_cliques(&block_graph, valid_hasht1s2); - let blockt0s2_clic = get_cliques(&block_graph, valid_hasht0s2); - assert!(blockt1s2_clic - .intersection(&blockt0s2_clic) - .next() - .is_none()); - // * after processing the block in t1s2, there are 2 cliques, one with block of t0s2 and one with block of t1s2, and the parent vector uses the clique of minimum hash sum so the block of minimum hash between t0s2 and t1s2 - assert_eq!(1, blockt1s2_clic.len()); - assert_eq!(1, blockt0s2_clic.len()); - let parents: Vec = block_graph.best_parents.iter().map(|(b, _p)| *b).collect(); - if valid_hasht1s2 > valid_hasht0s2 { - assert_eq!(parents[0], valid_hasht0s2) - } else { - assert_eq!(parents[1], valid_hasht1s2) - } - - // * continue with 33 additional blocks in thread 0, that extend the clique of the block in t0s2: - // - a block in slot t0sX has parents (t0sX-1, t1s1), for X from 3 to 35 - let mut parentt0sn_hash = valid_hasht0s2; - for period in 3..=35 { - let block_hash = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(period, 0), - vec![parentt0sn_hash, valid_hasht1s1], - true, - false, - &staking_keys[0], - ) - .await; - parentt0sn_hash = block_hash; - } - // * create 1 block in t1s2 with the genesis blocks as parents - create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(3, 1), - vec![valid_hasht0s1, valid_hasht1s2], - false, - false, - &staking_keys[0], - ) - .await; - - // * after processing the 33 blocks, one clique is removed (too late), - // the block of minimum hash becomes final, the one of maximum hash becomes stale - // verify that the clique has been pruned. - let block_graph = consensus_command_sender - .get_block_graph_status(None, None) - .await - .unwrap(); - let fork_clic = get_cliques(&block_graph, valid_hasht1s2); - assert_eq!(0, fork_clic.len()); - assert!(block_graph.discarded_blocks.contains_key(&valid_hasht1s2)); - assert!(block_graph.active_blocks.contains_key(&valid_hasht0s2)); - assert!(!block_graph.active_blocks.contains_key(&valid_hasht1s2)); - ( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} - -#[tokio::test] -#[serial] -#[ignore] -async fn test_old_stale() { - // // setup logging - // stderrlog::new() - // .verbosity(4) - // .timestamp(stderrlog::Timestamp::Millisecond) - // .init() - // .unwrap(); - - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - future_block_processing_max_periods: 50, - // to avoid timing problems for blocks in the future - genesis_timestamp: MassaTime::now(0) - .unwrap() - .saturating_sub(MassaTime::from_millis(32000).checked_mul(1000).unwrap()), - ..ConsensusConfig::default() - }; - - consensus_without_pool_test( - cfg.clone(), - async move |mut protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let genesis_hashes = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .genesis_blocks; - - // * create 40 normal blocks in each thread: in slot 1 they have genesis parents, in slot 2 they have slot 1 parents - // create a valid block for slot 1 - let mut valid_hasht0 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(1, 0), - genesis_hashes.clone(), - true, - false, - &staking_keys[0], - ) - .await; - - // create a valid block on the other thread. - let mut valid_hasht1 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(1, 1), - genesis_hashes.clone(), - true, - false, - &staking_keys[0], - ) - .await; - - // and loop for the 39 other blocks - for i in 0..39 { - valid_hasht0 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(i + 2, 0), - vec![valid_hasht0, valid_hasht1], - true, - false, - &staking_keys[0], - ) - .await; - - // create a valid block on the other thread. - valid_hasht1 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(i + 2, 1), - vec![valid_hasht0, valid_hasht1], - true, - false, - &staking_keys[0], - ) - .await; - } - - // create 1 block in thread 0 slot 1 with genesis parents - let _valid_hasht0s2 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(1, 0), - genesis_hashes.clone(), - false, - false, - &staking_keys[0], - ) - .await; - ( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} diff --git a/massa-consensus-worker/src/tests/scenarios91_2.rs b/massa-consensus-worker/src/tests/scenarios91_2.rs deleted file mode 100644 index 2ba975e4f3d..00000000000 --- a/massa-consensus-worker/src/tests/scenarios91_2.rs +++ /dev/null @@ -1,514 +0,0 @@ -// Copyright (c) 2022 MASSA LABS - -use super::tools::*; -use massa_consensus_exports::ConsensusConfig; - -use massa_hash::Hash; -use massa_models::slot::Slot; -use massa_signature::KeyPair; -use massa_storage::Storage; -use massa_time::MassaTime; -use serial_test::serial; - -#[tokio::test] -#[serial] -#[ignore] -async fn test_queueing() { - // setup logging - // stderrlog::new() - // .verbosity(3) - // .timestamp(stderrlog::Timestamp::Millisecond) - // .init() - // .unwrap(); - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - future_block_processing_max_periods: 50, - // to avoid timing problems for blocks in the future - genesis_timestamp: MassaTime::now(0) - .unwrap() - .saturating_sub(MassaTime::from_millis(32000).checked_mul(1000).unwrap()), - ..ConsensusConfig::default() - }; - - consensus_without_pool_test( - cfg.clone(), - async move |mut protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let genesis_hashes = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .genesis_blocks; - - // * create 30 normal blocks in each thread: in slot 1 they have genesis parents, in slot 2 they have slot 1 parents - // create a valid block for slot 1 - let mut valid_hasht0 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(1, 0), - genesis_hashes.clone(), - true, - false, - &staking_keys[0], - ) - .await; - - // create a valid block on the other thread. - let mut valid_hasht1 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(1, 1), - genesis_hashes.clone(), - true, - false, - &staking_keys[0], - ) - .await; - - // and loop for the 29 other blocks - for i in 0..29 { - valid_hasht0 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(i + 2, 0), - vec![valid_hasht0, valid_hasht1], - true, - false, - &staking_keys[0], - ) - .await; - - // create a valid block on the other thread. - valid_hasht1 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(i + 2, 1), - vec![valid_hasht0, valid_hasht1], - true, - false, - &staking_keys[0], - ) - .await; - } - - let missed_block = create_block( - &cfg, - Slot::new(32, 0), - vec![valid_hasht0, valid_hasht1], - &staking_keys[0], - ); - - // create 1 block in thread 0 slot 33 with missed block as parent - valid_hasht0 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(33, 0), - vec![missed_block.id, valid_hasht1], - false, - false, - &staking_keys[0], - ) - .await; - - // and loop again for the 99 other blocks - for i in 0..30 { - valid_hasht0 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(i + 34, 0), - vec![valid_hasht0, valid_hasht1], - false, - false, - &staking_keys[0], - ) - .await; - - // create a valid block on the other thread. - valid_hasht1 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(i + 34, 1), - vec![valid_hasht0, valid_hasht1], - false, - false, - &staking_keys[0], - ) - .await; - } - ( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} - -#[tokio::test] -#[serial] -#[ignore] -async fn test_doubles() { - // setup logging - // stderrlog::new() - // .verbosity(3) - // .timestamp(stderrlog::Timestamp::Millisecond) - // .init() - // .unwrap(); - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - future_block_processing_max_periods: 50, - // to avoid timing problems for blocks in the future - genesis_timestamp: MassaTime::now(0) - .unwrap() - .saturating_sub(MassaTime::from_millis(32000).checked_mul(1000).unwrap()), - ..ConsensusConfig::default() - }; - - consensus_without_pool_test( - cfg.clone(), - async move |mut protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let genesis_hashes = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .genesis_blocks; - - // * create 40 normal blocks in each thread: in slot 1 they have genesis parents, in slot 2 they have slot 1 parents - // create a valid block for slot 1 - let mut valid_hasht0 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(1, 0), - genesis_hashes.clone(), - true, - false, - &staking_keys[0], - ) - .await; - - // create a valid block on the other thread. - let mut valid_hasht1 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(1, 1), - genesis_hashes.clone(), - true, - false, - &staking_keys[0], - ) - .await; - - // and loop for the 39 other blocks - for i in 0..39 { - valid_hasht0 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(i + 2, 0), - vec![valid_hasht0, valid_hasht1], - true, - false, - &staking_keys[0], - ) - .await; - - // create a valid block on the other thread. - valid_hasht1 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(i + 2, 1), - vec![valid_hasht0, valid_hasht1], - true, - false, - &staking_keys[0], - ) - .await; - } - - // create 1 block in thread 0 slot 41 with missed block as parent - create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(41, 0), - vec![valid_hasht0, valid_hasht1], - true, - false, - &staking_keys[0], - ) - .await; - ( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} - -#[tokio::test] -#[serial] -#[ignore] -async fn test_double_staking() { - // setup logging - // stderrlog::new() - // .verbosity(3) - // .timestamp(stderrlog::Timestamp::Millisecond) - // .init() - // .unwrap(); - - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - future_block_processing_max_periods: 50, - // to avoid timing problems for blocks in the future - genesis_timestamp: MassaTime::now(0) - .unwrap() - .saturating_sub(MassaTime::from_millis(32000).checked_mul(1000).unwrap()), - ..ConsensusConfig::default() - }; - let mut storage = Storage::create_root(); - - consensus_without_pool_test( - cfg.clone(), - async move |mut protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let genesis_hashes = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .genesis_blocks; - - // * create 40 normal blocks in each thread: in slot 1 they have genesis parents, in slot 2 they have slot 1 parents - // create a valid block for slot 1 - let mut valid_hasht0 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(1, 0), - genesis_hashes.clone(), - true, - false, - &staking_keys[0], - ) - .await; - - // create a valid block on the other thread. - let mut valid_hasht1 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(1, 1), - genesis_hashes.clone(), - true, - false, - &staking_keys[0], - ) - .await; - - // and loop for the 39 other blocks - for i in 0..39 { - valid_hasht0 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(i + 2, 0), - vec![valid_hasht0, valid_hasht1], - true, - false, - &staking_keys[0], - ) - .await; - - // create a valid block on the other thread. - valid_hasht1 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(i + 2, 1), - vec![valid_hasht0, valid_hasht1], - true, - false, - &staking_keys[0], - ) - .await; - } - - // same creator same slot, different block - let operation_merkle_root = Hash::compute_from("42".as_bytes()); - let block_1 = create_block_with_merkle_root( - &cfg, - operation_merkle_root, - Slot::new(41, 0), - vec![valid_hasht0, valid_hasht1], - &staking_keys[0], - ); - storage.store_block(block_1.clone()); - propagate_block( - &mut protocol_controller, - block_1.id, - block_1.content.header.content.slot, - storage.clone(), - true, - 150, - ) - .await; - - let operation_merkle_root = - Hash::compute_from("so long and thanks for all the fish".as_bytes()); - let block_2 = create_block_with_merkle_root( - &cfg, - operation_merkle_root, - Slot::new(41, 0), - vec![valid_hasht0, valid_hasht1], - &staking_keys[0], - ); - storage.store_block(block_2.clone()); - propagate_block( - &mut protocol_controller, - block_2.id, - block_2.content.header.content.slot, - storage.clone(), - true, - 150, - ) - .await; - - let graph = consensus_command_sender - .get_block_graph_status(None, None) - .await - .unwrap(); - let cliques_1 = get_cliques(&graph, block_1.id); - let cliques_2 = get_cliques(&graph, block_2.id); - assert!(cliques_1.is_disjoint(&cliques_2)); - ( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} - -#[tokio::test] -#[serial] -#[ignore] -async fn test_test_parents() { - // // setup logging - // stderrlog::new() - // .verbosity(4) - // .timestamp(stderrlog::Timestamp::Millisecond) - // .init() - // .unwrap(); - - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - future_block_processing_max_periods: 50, - // to avoid timing problems for blocks in the future - genesis_timestamp: MassaTime::now(0) - .unwrap() - .saturating_sub(MassaTime::from_millis(32000).checked_mul(1000).unwrap()), - ..ConsensusConfig::default() - }; - - consensus_without_pool_test( - cfg.clone(), - async move |mut protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let genesis_hashes = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .genesis_blocks; - - // * create 2 normal blocks in each thread: in slot 1 they have genesis parents, in slot 2 they have slot 1 parents - // create a valid block for slot 1 - let valid_hasht0s1 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(1, 0), - genesis_hashes.clone(), - true, - false, - &staking_keys[0], - ) - .await; - - // create a valid block on the other thread. - let valid_hasht1s1 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(1, 1), - genesis_hashes.clone(), - true, - false, - &staking_keys[0], - ) - .await; - - // create a valid block for slot 2 - let valid_hasht0s2 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(2, 0), - vec![valid_hasht0s1, valid_hasht1s1], - true, - false, - &staking_keys[0], - ) - .await; - - // create a valid block on the other thread. - create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(2, 1), - vec![valid_hasht0s1, valid_hasht1s1], - true, - false, - &staking_keys[0], - ) - .await; - - // * create 1 block in t0s3 with parents (t0s2, t1s0) - // create a valid block for slot 2 - create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(3, 0), - vec![valid_hasht0s2, genesis_hashes[1usize]], - false, - false, - &staking_keys[0], - ) - .await; - - // * create 1 block in t1s3 with parents (t0s0, t0s0) - create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(3, 1), - vec![genesis_hashes[0usize], genesis_hashes[0usize]], - false, - false, - &staking_keys[0], - ) - .await; - ( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} diff --git a/massa-consensus-worker/src/tests/scenarios_basic.rs b/massa-consensus-worker/src/tests/scenarios_basic.rs deleted file mode 100644 index a0e773f60af..00000000000 --- a/massa-consensus-worker/src/tests/scenarios_basic.rs +++ /dev/null @@ -1,261 +0,0 @@ -// Copyright (c) 2022 MASSA LABS - -use super::tools; -use crate::tests::block_factory::BlockFactory; -use massa_consensus_exports::ConsensusConfig; -use massa_hash::Hash; -use massa_models::{block::BlockId, slot::Slot}; -use massa_signature::KeyPair; -use massa_storage::Storage; -use serial_test::serial; - -#[tokio::test] -#[serial] -#[ignore] -async fn test_old_stale_not_propagated_and_discarded() { - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - t0: 32.into(), - future_block_processing_max_periods: 50, - ..ConsensusConfig::default() - }; - - tools::consensus_without_pool_test( - cfg.clone(), - async move |protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let parents: Vec = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .best_parents - .iter() - .map(|(b, _p)| *b) - .collect(); - - let mut block_factory = - BlockFactory::start_block_factory(parents.clone(), protocol_controller); - block_factory.creator_keypair = staking_keys[0].clone(); - block_factory.slot = Slot::new(1, 0); - - let block_1 = block_factory.create_and_receive_block(true).await; - - block_factory.slot = Slot::new(1, 1); - block_factory.create_and_receive_block(true).await; - - block_factory.slot = Slot::new(1, 0); - block_factory.best_parents = vec![block_1.id, parents[0]]; - let block_3 = block_factory.create_and_receive_block(false).await; - - // Old stale block was discarded. - let status = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status"); - assert_eq!(status.discarded_blocks.len(), 1); - assert!(status.discarded_blocks.get(&block_3.id).is_some()); - ( - block_factory.take_protocol_controller(), - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} - -#[tokio::test] -#[serial] -#[ignore] -async fn test_block_not_processed_multiple_times() { - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - t0: 500.into(), - future_block_processing_max_periods: 50, - ..ConsensusConfig::default() - }; - let mut storage = Storage::create_root(); - - tools::consensus_without_pool_test( - cfg.clone(), - async move |protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let parents: Vec = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .best_parents - .iter() - .map(|(b, _p)| *b) - .collect(); - - let mut block_factory = - BlockFactory::start_block_factory(parents.clone(), protocol_controller); - block_factory.creator_keypair = staking_keys[0].clone(); - block_factory.slot = Slot::new(1, 0); - let block_1 = block_factory.create_and_receive_block(true).await; - - // Send it again, it should not be propagated. - storage.store_block(block_1.clone()); - block_factory - .receive_block( - false, - block_1.id, - block_1.content.header.content.slot, - storage.clone(), - ) - .await; - - // Send it again, it should not be propagated. - block_factory - .receive_block( - false, - block_1.id, - block_1.content.header.content.slot, - storage.clone(), - ) - .await; - - // Block was not discarded. - let status = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status"); - assert_eq!(status.discarded_blocks.len(), 0); - ( - block_factory.take_protocol_controller(), - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} - -#[tokio::test] -#[serial] -#[ignore] -async fn test_queuing() { - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - future_block_processing_max_periods: 50, - t0: 1000.into(), - ..ConsensusConfig::default() - }; - - tools::consensus_without_pool_test( - cfg.clone(), - async move |protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let parents: Vec = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .best_parents - .iter() - .map(|(b, _p)| *b) - .collect(); - - let mut block_factory = - BlockFactory::start_block_factory(parents.clone(), protocol_controller); - block_factory.creator_keypair = staking_keys[0].clone(); - block_factory.slot = Slot::new(3, 0); - - let block_1 = block_factory.create_and_receive_block(false).await; - - block_factory.slot = Slot::new(4, 0); - block_factory.best_parents = vec![block_1.id, parents[1]]; - - block_factory.create_and_receive_block(false).await; - - // Blocks were queued, not discarded. - let status = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status"); - assert_eq!(status.discarded_blocks.len(), 0); - ( - block_factory.take_protocol_controller(), - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} - -#[tokio::test] -#[serial] -#[ignore] -async fn test_double_staking_does_not_propagate() { - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - future_block_processing_max_periods: 50, - t0: 1000.into(), - ..ConsensusConfig::default() - }; - - let mut storage = Storage::create_root(); - - tools::consensus_without_pool_test( - cfg.clone(), - async move |protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let parents: Vec = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .best_parents - .iter() - .map(|(b, _p)| *b) - .collect(); - - let mut block_factory = - BlockFactory::start_block_factory(parents.clone(), protocol_controller); - block_factory.creator_keypair = staking_keys[0].clone(); - block_factory.slot = Slot::new(1, 0); - let mut block_1 = block_factory.create_and_receive_block(true).await; - - // Same creator, same slot, different block - block_1.content.header.content.operation_merkle_root = - Hash::compute_from("hello world".as_bytes()); - let block = block_factory.sign_header(block_1.content.header.content); - - // Note: currently does propagate, see #190. - storage.store_block(block.clone()); - block_factory - .receive_block( - true, - block.id, - block.content.header.content.slot, - storage.clone(), - ) - .await; - - // Block was not discarded. - let status = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status"); - assert_eq!(status.discarded_blocks.len(), 0); - ( - block_factory.take_protocol_controller(), - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} diff --git a/massa-consensus-worker/src/tests/scenarios_endorsements.rs b/massa-consensus-worker/src/tests/scenarios_endorsements.rs deleted file mode 100644 index 0900906ba9e..00000000000 --- a/massa-consensus-worker/src/tests/scenarios_endorsements.rs +++ /dev/null @@ -1,196 +0,0 @@ -// Copyright (c) 2022 MASSA LABS - -use massa_models::{ - block::BlockId, - endorsement::{Endorsement, EndorsementSerializer}, - slot::Slot, - wrapped::WrappedContent, -}; -use massa_signature::KeyPair; -use massa_storage::Storage; -use massa_time::MassaTime; -use serial_test::serial; - -use super::tools::*; -use massa_consensus_exports::ConsensusConfig; - -#[tokio::test] -#[serial] -#[ignore] -async fn test_endorsement_check() { - // setup logging - /* - stderrlog::new() - .verbosity(4) - .timestamp(stderrlog::Timestamp::Millisecond) - .init() - .unwrap(); - */ - let cfg = ConsensusConfig { - delta_f0: 3, - endorsement_count: 1, - genesis_timestamp: MassaTime::now(0).unwrap().saturating_add(300.into()), - operation_validity_periods: 100, - periods_per_cycle: 2, - t0: 500.into(), - ..ConsensusConfig::default() - }; - // define addresses use for the test - // addresses 1 and 2 both in thread 0 - - let (address_1, keypair_1) = random_address_on_thread(0, cfg.thread_count).into(); - let (address_2, keypair_2) = random_address_on_thread(0, cfg.thread_count).into(); - assert_eq!(0, address_2.get_thread(cfg.thread_count)); - - let mut storage = Storage::create_root(); - consensus_without_pool_test( - cfg.clone(), - async move |mut protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let address_a = selector_controller - .get_selection(Slot::new(1, 0)) - .unwrap() - .producer; - let address_b = selector_controller - .get_selection(Slot::new(1, 0)) - .unwrap() - .endorsements[0]; - let address_c = selector_controller - .get_selection(Slot::new(1, 1)) - .unwrap() - .endorsements[1]; - - let keypair_a = if address_a == address_1 { - keypair_1.clone() - } else { - keypair_2.clone() - }; - let keypair_b = if address_b == address_1 { - keypair_1.clone() - } else { - keypair_2.clone() - }; - let keypair_c = if address_c == address_1 { - keypair_1.clone() - } else { - keypair_2.clone() - }; - - let parents: Vec = consensus_command_sender - .get_block_graph_status(None, None) - .await - .unwrap() - .best_parents - .iter() - .map(|(b, _p)| *b) - .collect(); - - let mut b10 = create_block(&cfg, Slot::new(1, 0), parents.clone(), &keypair_a); - - // create an otherwise valid endorsement with another address, include it in valid block(1,0), assert it is not propagated - let sender_keypair = KeyPair::generate(); - let content = Endorsement { - slot: Slot::new(1, 0), - index: 0, - endorsed_block: parents[0], - }; - let ed = Endorsement::new_wrapped( - content.clone(), - EndorsementSerializer::new(), - &sender_keypair, - ) - .unwrap(); - b10.content.header.content.endorsements = vec![ed]; - - storage.store_block(b10.clone()); - propagate_block( - &mut protocol_controller, - b10.id, - b10.content.header.content.slot, - storage.clone(), - false, - 500, - ) - .await; - - // create an otherwise valid endorsement at slot (1,1), include it in valid block(1,0), assert it is not propagated - let content = Endorsement { - slot: Slot::new(1, 1), - index: 0, - endorsed_block: parents[1], - }; - let ed = - Endorsement::new_wrapped(content.clone(), EndorsementSerializer::new(), &keypair_c) - .unwrap(); - let mut b10 = create_block(&cfg, Slot::new(1, 0), parents.clone(), &keypair_a); - b10.content.header.content.endorsements = vec![ed]; - - storage.store_block(b10.clone()); - propagate_block( - &mut protocol_controller, - b10.id, - b10.content.header.content.slot, - storage.clone(), - false, - 500, - ) - .await; - - // create an otherwise valid endorsement with genesis 1 as endorsed block, include it in valid block(1,0), assert it is not propagated - let content = Endorsement { - slot: Slot::new(1, 0), - index: 0, - endorsed_block: parents[1], - }; - let ed = - Endorsement::new_wrapped(content.clone(), EndorsementSerializer::new(), &keypair_b) - .unwrap(); - let mut b10 = create_block(&cfg, Slot::new(1, 0), parents.clone(), &keypair_a); - b10.content.header.content.endorsements = vec![ed]; - - storage.store_block(b10.clone()); - propagate_block( - &mut protocol_controller, - b10.id, - b10.content.header.content.slot, - storage.clone(), - false, - 500, - ) - .await; - - // create a valid endorsement, include it in valid block(1,1), assert it is propagated - let content = Endorsement { - slot: Slot::new(1, 0), - index: 0, - endorsed_block: parents[0], - }; - let ed = - Endorsement::new_wrapped(content.clone(), EndorsementSerializer::new(), &keypair_b) - .unwrap(); - let mut b10 = create_block(&cfg, Slot::new(1, 0), parents.clone(), &keypair_a); - b10.content.header.content.endorsements = vec![ed]; - - storage.store_block(b10.clone()); - propagate_block( - &mut protocol_controller, - b10.id, - b10.content.header.content.slot, - storage.clone(), - false, - 500, - ) - .await; - - ( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} diff --git a/massa-consensus-worker/src/tests/scenarios_get_operations.rs b/massa-consensus-worker/src/tests/scenarios_get_operations.rs deleted file mode 100644 index a2b53535c8c..00000000000 --- a/massa-consensus-worker/src/tests/scenarios_get_operations.rs +++ /dev/null @@ -1,201 +0,0 @@ -// // Copyright (c) 2022 MASSA LABS - -// use super::tools::*; -// use massa_consensus_exports::ConsensusConfig; - -// use massa_graph::BootstrapableGraph; -// use massa_models::WrappedOperation; -// use massa_models::{ -// clique::Clique, BlockId, OperationSearchResult, OperationSearchResultStatus, Slot, -// }; -// use massa_signature::KeyPair; -// use massa_time::MassaTime; -// use serial_test::serial; -// use std::collections::HashMap; - -// #[tokio::test] -// #[serial] -// #[ignore] -// async fn test_get_operation() { -// // // setup logging -// // stderrlog::new() -// // .verbosity(4) -// // .timestamp(stderrlog::Timestamp::Millisecond) -// // .init() -// // .unwrap(); -// let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); -// let cfg = ConsensusConfig { -// t0: 1000.into(), -// operation_validity_periods: 10, -// max_operations_per_block: 50, -// genesis_timestamp: MassaTime::now(0) -// .unwrap() -// .saturating_sub(MassaTime::from(32000).checked_mul(4).unwrap()) -// .saturating_add(300.into()), -// ..ConsensusConfig::default() -// }; -// // define addresses use for the test -// // addresses a and b both in thread 0 -// let (_address_a, keypair_a) = random_address_on_thread(0, cfg.thread_count).into(); -// let (address_b, _) = random_address_on_thread(0, cfg.thread_count).into(); -// // to avoid timing pb for block in the future - -// let op1 = create_transaction(&keypair_a, address_b, 1, 10, 1); -// let op2 = create_transaction(&keypair_a, address_b, 2, 10, 1); -// let op3 = create_transaction(&keypair_a, address_b, 3, 10, 1); -// let op4 = create_transaction(&keypair_a, address_b, 4, 10, 1); -// let op5 = create_transaction(&keypair_a, address_b, 5, 10, 1); - -// let ops = [ -// op1.clone(), -// op2.clone(), -// op3.clone(), -// op4.clone(), -// op5.clone(), -// ]; - -// let (boot_graph, b1, b2) = get_bootgraph(vec![op2.clone(), op3.clone()]); -// // there is only one node so it should be drawn at every slot - -// consensus_pool_test( -// cfg.clone(), -// Some(boot_graph), -// async move |pool_controller, -// protocol_controller, -// consensus_command_sender, -// consensus_event_receiver| { -// let (ops, _storage) = consensus_command_sender -// .get_operations(ops.iter().map(|op| op.id).collect()) -// .await -// .unwrap(); - -// let mut expected = HashMap::new(); - -// expected.insert( -// op2.id, -// OperationSearchResult { -// status: OperationSearchResultStatus::Pending, -// op: op2, -// in_pool: false, -// in_blocks: vec![(b1, (0, true))].into_iter().collect(), -// }, -// ); -// expected.insert( -// op3.id, -// OperationSearchResult { -// status: OperationSearchResultStatus::Pending, -// op: op3, -// in_pool: false, -// in_blocks: vec![(b2, (0, false))].into_iter().collect(), -// }, -// ); - -// assert_eq!(ops.len(), expected.len()); - -// for ( -// id, -// OperationSearchResult { -// op, -// in_blocks, -// in_pool, -// .. -// }, -// ) in ops.iter() -// { -// assert!(expected.contains_key(id)); -// let OperationSearchResult { -// op: ex_op, -// in_pool: ex_pool, -// in_blocks: ex_blocks, -// .. -// } = expected.get(id).unwrap(); -// assert_eq!(op.id, ex_op.id); -// assert_eq!(in_pool, ex_pool); -// assert_eq!(in_blocks.len(), ex_blocks.len()); -// for (b_id, val) in in_blocks.iter() { -// assert!(ex_blocks.contains_key(b_id)); -// assert_eq!(ex_blocks.get(b_id).unwrap(), val); -// } -// } -// ( -// pool_controller, -// protocol_controller, -// consensus_command_sender, -// consensus_event_receiver, -// ) -// }, -// ) -// .await; -// } - -// fn get_bootgraph(operations: Vec) -> (BootstrapableGraph, BlockId, BlockId) { -// let genesis_0 = get_export_active_test_block(vec![], vec![], Slot::new(0, 0), true); -// let genesis_1 = get_export_active_test_block(vec![], vec![], Slot::new(0, 1), true); -// let p1t0 = get_export_active_test_block( -// vec![(genesis_0.block_id, 0), (genesis_1.block_id, 0)], -// vec![operations[0].clone()], -// Slot::new(1, 0), -// true, -// ); -// let p1t1 = get_export_active_test_block( -// vec![(genesis_0.block_id, 0), (genesis_1.block_id, 0)], -// vec![], -// Slot::new(1, 1), -// false, -// ); -// let p2t0 = get_export_active_test_block( -// vec![(p1t0.block_id, 1), (p1t1.block_id, 1)], -// vec![operations[1].clone()], -// Slot::new(2, 0), -// false, -// ); -// ( -// BootstrapableGraph { -// /// Map of active blocks, where blocks are in their exported version. -// final_blocks: vec![ -// (genesis_0.block_id, genesis_0.clone()), -// (genesis_1.block_id, genesis_1.clone()), -// (p1t0.block_id, p1t0.clone()), -// (p1t1.block_id, p1t1.clone()), -// (p2t0.block_id, p2t0.clone()), -// ] -// .into_iter() -// .collect(), -// /// Best parents hashes in each thread. -// best_parents: vec![(p2t0.block_id, 2), (p1t1.block_id, 1)], -// /// Latest final period and block hash in each thread. -// latest_final_blocks_periods: vec![ -// (genesis_0.block_id, 0u64), -// (genesis_1.block_id, 0u64), -// ], -// /// Head of the incompatibility graph. -// gi_head: vec![ -// (genesis_0.block_id, Default::default()), -// (p1t0.block_id, Default::default()), -// (p2t0.block_id, Default::default()), -// (genesis_1.block_id, Default::default()), -// (p1t0.block_id, Default::default()), -// (p2t0.block_id, Default::default()), -// ] -// .into_iter() -// .collect(), - -// /// List of maximal cliques of compatible blocks. -// max_cliques: vec![Clique { -// block_ids: vec![ -// genesis_0.block_id, -// p1t0.block_id, -// genesis_1.block_id, -// p1t1.block_id, -// p2t0.block_id, -// ] -// .into_iter() -// .collect(), -// fitness: 123, -// is_blockclique: true, -// }], -// }, -// p1t0.block_id, -// p2t0.block_id, -// ) -// } diff --git a/massa-consensus-worker/src/tests/scenarios_get_selection_draws.rs b/massa-consensus-worker/src/tests/scenarios_get_selection_draws.rs deleted file mode 100644 index dba75bcde76..00000000000 --- a/massa-consensus-worker/src/tests/scenarios_get_selection_draws.rs +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright (c) 2022 MASSA LABS - -use super::tools::*; -use massa_consensus_exports::ConsensusConfig; - -use massa_models::ledger_models::LedgerData; -use massa_models::{amount::Amount, slot::Slot}; -use massa_time::MassaTime; -use serial_test::serial; -use std::collections::HashMap; -use std::str::FromStr; - -#[tokio::test] -#[serial] -#[ignore] -async fn test_get_selection_draws_high_end_slot() { - // setup logging - /* - stderrlog::new() - .verbosity(4) - .timestamp(stderrlog::Timestamp::Millisecond) - .init() - .unwrap(); - */ - let cfg = ConsensusConfig { - periods_per_cycle: 2, - t0: 500.into(), - delta_f0: 3, - operation_validity_periods: 100, - genesis_timestamp: MassaTime::now(0).unwrap().saturating_add(300.into()), - ..Default::default() - }; - // define addresses use for the test - // addresses 1 and 2 both in thread 0 - //let addr_1 = random_address_on_thread(0, cfg.thread_count); - let addr_2 = random_address_on_thread(0, cfg.thread_count); - - let mut ledger = HashMap::new(); - ledger.insert( - addr_2.address, - LedgerData::new(Amount::from_str("10000").unwrap()), - ); - - consensus_without_pool_test( - cfg.clone(), - async move |protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let draws = selector_controller.get_selection(Slot::new(1, 0)); - assert!(draws.is_ok()); - - // Too high end selection should return an error. - let too_high_draws = selector_controller.get_selection(Slot::new(200, 0)); - assert!(too_high_draws.is_err()); - ( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} diff --git a/massa-consensus-worker/src/tests/scenarios_header_check.rs b/massa-consensus-worker/src/tests/scenarios_header_check.rs deleted file mode 100644 index 98554102664..00000000000 --- a/massa-consensus-worker/src/tests/scenarios_header_check.rs +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright (c) 2022 MASSA LABS - -// RUST_BACKTRACE=1 cargo test scenarios106 -- --nocapture - -use super::tools::*; -use massa_consensus_exports::ConsensusConfig; - -use massa_models::slot::Slot; -use massa_signature::KeyPair; -use massa_storage::Storage; -use serial_test::serial; - -#[tokio::test] -#[serial] -#[ignore] -async fn test_consensus_asks_for_block() { - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - t0: 500.into(), - future_block_processing_max_periods: 50, - ..ConsensusConfig::default() - }; - - consensus_without_pool_test( - cfg.clone(), - async move |mut protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let genesis_hashes = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .genesis_blocks; - - // create test blocks - let t0s1 = create_block( - &cfg, - Slot::new(1, 0), - genesis_hashes.clone(), - &staking_keys[0], - ); - // send header for block t0s1 - protocol_controller - .receive_header(t0s1.content.header.clone()) - .await; - - validate_ask_for_block(&mut protocol_controller, t0s1.id, 1000).await; - ( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} - -#[tokio::test] -#[serial] -#[ignore] -async fn test_consensus_does_not_ask_for_block() { - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - t0: 32.into(), - future_block_processing_max_periods: 50, - ..ConsensusConfig::default() - }; - let mut storage = Storage::create_root(); - - consensus_without_pool_test( - cfg.clone(), - async move |mut protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let start_slot = 3; - let genesis_hashes = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .genesis_blocks; - - // create test blocks - let t0s1 = create_block( - &cfg, - Slot::new(1 + start_slot, 0), - genesis_hashes.clone(), - &staking_keys[0], - ); - let header = t0s1.content.header.clone(); - let id = t0s1.id; - // Send the actual block. - storage.store_block(t0s1); - protocol_controller - .receive_block(header.id, header.content.slot, storage.clone()) - .await; - - // block t0s1 is propagated - let hash_list = vec![id]; - validate_propagate_block_in_list( - &mut protocol_controller, - &hash_list, - 3000 + start_slot * 1000, - ) - .await; - - // Send the hash - protocol_controller.receive_header(header).await; - - // Consensus should not ask for the block, so the time-out should be hit. - validate_does_not_ask_for_block(&mut protocol_controller, &id, 10).await; - ( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} diff --git a/massa-consensus-worker/src/tests/scenarios_incompatibilities.rs b/massa-consensus-worker/src/tests/scenarios_incompatibilities.rs deleted file mode 100644 index 422557c0a68..00000000000 --- a/massa-consensus-worker/src/tests/scenarios_incompatibilities.rs +++ /dev/null @@ -1,311 +0,0 @@ -// Copyright (c) 2022 MASSA LABS - -// use super::tools::*; -// use massa_consensus_exports::ConsensusConfig; - -// use massa_models::{BlockId, Slot}; -// use massa_signature::KeyPair; -// use serial_test::serial; -// use std::collections::{HashSet, VecDeque}; - -// #[tokio::test] -// #[serial] -// async fn test_thread_incompatibility() { -// let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); -// let cfg = ConsensusConfig { -// t0: 200.into(), -// future_block_processing_max_periods: 50, -// ..ConsensusConfig::default() -// }; - -// consensus_without_pool_test( -// cfg.clone(), -// async move |mut protocol_controller, -// consensus_command_sender, -// consensus_event_receiver, -// selector_controller| { -// let parents: Vec = consensus_command_sender -// .get_block_graph_status(None, None) -// .await -// .expect("could not get block graph status") -// .best_parents -// .iter() -// .map(|(b, _p)| *b) -// .collect(); - -// let hash_1 = create_and_test_block( -// &mut protocol_controller, -// &cfg, -// Slot::new(1, 0), -// parents.clone(), -// true, -// false, -// &staking_keys[0], -// ) -// .await; - -// let hash_2 = create_and_test_block( -// &mut protocol_controller, -// &cfg, -// Slot::new(1, 1), -// parents.clone(), -// true, -// false, -// &staking_keys[0], -// ) -// .await; - -// let hash_3 = create_and_test_block( -// &mut protocol_controller, -// &cfg, -// Slot::new(2, 0), -// parents.clone(), -// true, -// false, -// &staking_keys[0], -// ) -// .await; - -// let status = consensus_command_sender -// .get_block_graph_status(None, None) -// .await -// .expect("could not get block graph status"); - -// if hash_1 > hash_3 { -// assert_eq!(status.best_parents[0].0, hash_3); -// } else { -// assert_eq!(status.best_parents[0].0, hash_1); -// } -// assert_eq!(status.best_parents[1].0, hash_2); - -// assert!(if let Some(h) = status.gi_head.get(&hash_3) { -// h.contains(&hash_1) -// } else { -// panic!("missing hash in gi_head") -// }); - -// assert_eq!(status.max_cliques.len(), 2); - -// for clique in status.max_cliques.clone() { -// if clique.block_ids.contains(&hash_1) && clique.block_ids.contains(&hash_3) { -// panic!("incompatible blocks in the same clique") -// } -// } - -// let mut current_period = 3; -// let mut parents = vec![hash_1, hash_2]; -// for _ in 0..3 { -// let hash = create_and_test_block( -// &mut protocol_controller, -// &cfg, -// Slot::new(current_period, 0), -// parents.clone(), -// true, -// false, -// &staking_keys[0], -// ) -// .await; -// current_period += 1; -// parents[0] = hash; -// } - -// let status = consensus_command_sender -// .get_block_graph_status(None, None) -// .await -// .expect("could not get block graph status"); - -// assert!(if let Some(h) = status.gi_head.get(&hash_3) { -// h.contains(&status.best_parents[0].0) -// } else { -// panic!("missing block in clique") -// }); - -// let mut parents = vec![status.best_parents[0].0, hash_2]; -// let mut current_period = 8; -// for _ in 0..30 { -// let b = create_block( -// &cfg, -// Slot::new(current_period, 0), -// parents.clone(), -// &staking_keys[0], -// ); -// current_period += 1; -// parents[0] = b.id; -// protocol_controller.receive_block(b.clone()).await; - -// // Note: higher timeout required. -// validate_propagate_block_in_list(&mut protocol_controller, &vec![b.id], 5000).await; -// } - -// let status = consensus_command_sender -// .get_block_graph_status(None, None) -// .await -// .expect("could not get block graph status"); - -// assert_eq!(status.max_cliques.len(), 1); - -// // clique should have been deleted by now -// let parents = vec![hash_3, hash_2]; -// let _ = create_and_test_block( -// &mut protocol_controller, -// &cfg, -// Slot::new(40, 0), -// parents.clone(), -// false, -// false, -// &staking_keys[0], -// ) -// .await; - -// ( -// protocol_controller, -// consensus_command_sender, -// consensus_event_receiver, -// selector_controller, -// ) -// }, -// ) -// .await; -// } - -// #[tokio::test] -// #[serial] -// async fn test_grandpa_incompatibility() { -// let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); -// let cfg = ConsensusConfig { -// t0: 200.into(), -// future_block_processing_max_periods: 50, -// ..ConsensusConfig::default() -// }; - -// consensus_without_pool_test( -// cfg.clone(), -// async move |mut protocol_controller, -// consensus_command_sender, -// consensus_event_receiver, -// selector_controller| { -// let genesis = consensus_command_sender -// .get_block_graph_status(None, None) -// .await -// .expect("could not get block graph status") -// .genesis_blocks; - -// let hash_1 = create_and_test_block( -// &mut protocol_controller, -// &cfg, -// Slot::new(1, 0), -// vec![genesis[0], genesis[1]], -// true, -// false, -// &staking_keys[0], -// ) -// .await; - -// let hash_2 = create_and_test_block( -// &mut protocol_controller, -// &cfg, -// Slot::new(1, 1), -// vec![genesis[0], genesis[1]], -// true, -// false, -// &staking_keys[0], -// ) -// .await; - -// let hash_3 = create_and_test_block( -// &mut protocol_controller, -// &cfg, -// Slot::new(2, 0), -// vec![hash_1, genesis[1]], -// true, -// false, -// &staking_keys[0], -// ) -// .await; - -// let hash_4 = create_and_test_block( -// &mut protocol_controller, -// &cfg, -// Slot::new(2, 1), -// vec![genesis[0], hash_2], -// true, -// false, -// &staking_keys[0], -// ) -// .await; - -// let status = consensus_command_sender -// .get_block_graph_status(None, None) -// .await -// .expect("could not get block graph status"); - -// assert!(if let Some(h) = status.gi_head.get(&hash_4) { -// h.contains(&hash_3) -// } else { -// panic!("missing block in gi_head") -// }); - -// assert_eq!(status.max_cliques.len(), 2); - -// for clique in status.max_cliques.clone() { -// if clique.block_ids.contains(&hash_3) && clique.block_ids.contains(&hash_4) { -// panic!("incompatible blocks in the same clique") -// } -// } - -// let parents: Vec = status.best_parents.iter().map(|(b, _p)| *b).collect(); -// if hash_4 > hash_3 { -// assert_eq!(parents[0], hash_3) -// } else { -// assert_eq!(parents[1], hash_4) -// } - -// let mut latest_extra_blocks = VecDeque::new(); -// for extend_i in 0..33 { -// let status = consensus_command_sender -// .get_block_graph_status(None, None) -// .await -// .expect("could not get block graph status"); -// let hash = create_and_test_block( -// &mut protocol_controller, -// &cfg, -// Slot::new(3 + extend_i, 0), -// status.best_parents.iter().map(|(b, _p)| *b).collect(), -// true, -// false, -// &staking_keys[0], -// ) -// .await; - -// latest_extra_blocks.push_back(hash); -// while latest_extra_blocks.len() > cfg.delta_f0 as usize + 1 { -// latest_extra_blocks.pop_front(); -// } -// } - -// let latest_extra_blocks: HashSet = latest_extra_blocks.into_iter().collect(); -// let status = consensus_command_sender -// .get_block_graph_status(None, None) -// .await -// .expect("could not get block graph status"); -// assert_eq!(status.max_cliques.len(), 1, "wrong cliques (len)"); -// assert_eq!( -// status.max_cliques[0] -// .block_ids -// .iter() -// .cloned() -// .collect::>(), -// latest_extra_blocks, -// "wrong cliques" -// ); - -// ( -// protocol_controller, -// consensus_command_sender, -// consensus_event_receiver, -// selector_controller, -// ) -// }, -// ) -// .await; -// } diff --git a/massa-consensus-worker/src/tests/scenarios_note_attack_attempt.rs b/massa-consensus-worker/src/tests/scenarios_note_attack_attempt.rs deleted file mode 100644 index ccabf914f68..00000000000 --- a/massa-consensus-worker/src/tests/scenarios_note_attack_attempt.rs +++ /dev/null @@ -1,156 +0,0 @@ -// Copyright (c) 2022 MASSA LABS - -use super::tools::*; -use crate::start_consensus_controller; -use massa_pool_exports::test_exports::MockPoolController; - -use massa_consensus_exports::settings::ConsensusChannels; -use massa_consensus_exports::ConsensusConfig; -use massa_execution_exports::test_exports::MockExecutionController; -use massa_hash::Hash; -use massa_models::{address::Address, block::BlockId, slot::Slot}; -use massa_pos_exports::SelectorConfig; -use massa_pos_worker::start_selector_worker; -use massa_protocol_exports::test_exports::MockProtocolController; -use massa_signature::KeyPair; -use massa_storage::Storage; -use serial_test::serial; - -#[tokio::test] -#[serial] -#[ignore] -async fn test_invalid_block_notified_as_attack_attempt() { - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - t0: 32.into(), - future_block_processing_max_periods: 50, - ..ConsensusConfig::default() - }; - - let storage: Storage = Storage::create_root(); - - // mock protocol & pool - let (mut protocol_controller, protocol_command_sender, protocol_event_receiver) = - MockProtocolController::new(); - let selector_config = SelectorConfig { - thread_count: 2, - periods_per_cycle: 100, - genesis_address: Address::from_public_key(&staking_keys[0].get_public_key()), - endorsement_count: 0, - max_draw_cache: 10, - channel_size: 256, - }; - let (_selector_manager, selector_controller) = start_selector_worker(selector_config).unwrap(); - let (pool_controller, _pool_event_receiver) = MockPoolController::new_with_receiver(); - let (execution_controller, _execution_rx) = MockExecutionController::new_with_receiver(); - // launch consensus controller - let (consensus_command_sender, _consensus_event_receiver, _consensus_manager) = - start_consensus_controller( - cfg.clone(), - ConsensusChannels { - execution_controller, - protocol_command_sender: protocol_command_sender.clone(), - protocol_event_receiver, - pool_command_sender: pool_controller, - selector_controller, - }, - None, - storage.clone(), - 0, - ) - .await - .expect("could not start consensus controller"); - - let parents: Vec = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .best_parents - .iter() - .map(|(b, _p)| *b) - .collect(); - - // Block for a non-existent thread. - let block = create_block_with_merkle_root( - &cfg, - Hash::compute_from("different".as_bytes()), - Slot::new(1, cfg.thread_count + 1), - parents.clone(), - &staking_keys[0], - ); - let block_id = block.id; - let slot = block.content.header.content.slot; - protocol_controller - .receive_block(block_id, slot, storage.clone()) - .await; - - validate_notify_block_attack_attempt(&mut protocol_controller, block_id, 1000).await; -} - -#[tokio::test] -#[serial] -#[ignore] -async fn test_invalid_header_notified_as_attack_attempt() { - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - t0: 32.into(), - future_block_processing_max_periods: 50, - ..ConsensusConfig::default() - }; - - // mock protocol & pool - let (mut protocol_controller, protocol_command_sender, protocol_event_receiver) = - MockProtocolController::new(); - let (pool_controller, _pool_event_receiver) = MockPoolController::new_with_receiver(); - let selector_config = SelectorConfig { - thread_count: 2, - periods_per_cycle: 100, - genesis_address: Address::from_public_key(&staking_keys[0].get_public_key()), - endorsement_count: 0, - max_draw_cache: 10, - channel_size: 256, - }; - let (_selector_manager, selector_controller) = start_selector_worker(selector_config).unwrap(); - let (execution_controller, _execution_rx) = MockExecutionController::new_with_receiver(); - let storage: Storage = Storage::create_root(); - // launch consensus controller - let (consensus_command_sender, _consensus_event_receiver, _consensus_manager) = - start_consensus_controller( - cfg.clone(), - ConsensusChannels { - execution_controller, - protocol_command_sender: protocol_command_sender.clone(), - protocol_event_receiver, - pool_command_sender: pool_controller, - selector_controller, - }, - None, - storage, - 0, - ) - .await - .expect("could not start consensus controller"); - - let parents: Vec = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .best_parents - .iter() - .map(|(b, _p)| *b) - .collect(); - - // Block for a non-existent thread. - let block = create_block_with_merkle_root( - &cfg, - Hash::compute_from("different".as_bytes()), - Slot::new(1, cfg.thread_count + 1), - parents.clone(), - &staking_keys[0], - ); - protocol_controller - .receive_header(block.content.header) - .await; - - validate_notify_block_attack_attempt(&mut protocol_controller, block.id, 1000).await; -} diff --git a/massa-consensus-worker/src/tests/scenarios_operations_check.rs b/massa-consensus-worker/src/tests/scenarios_operations_check.rs deleted file mode 100644 index 8d41645e1b7..00000000000 --- a/massa-consensus-worker/src/tests/scenarios_operations_check.rs +++ /dev/null @@ -1,203 +0,0 @@ -// // Copyright (c) 2022 MASSA LABS -// use super::tools::*; -// use massa_consensus_exports::ConsensusConfig; - -// use massa_models::ledger_models::LedgerData; -// use massa_models::prehash::Set; -// use massa_models::{Address, Amount, Slot}; -// use massa_signature::KeyPair; -// use massa_time::MassaTime; -// use serial_test::serial; -// use std::collections::HashMap; -// use std::str::FromStr; - -// #[tokio::test] -// #[serial] -// async fn test_operations_check() { -// // setup logging -// /* -// stderrlog::new() -// .verbosity(4) -// .timestamp(stderrlog::Timestamp::Millisecond) -// .init() -// .unwrap(); -// */ -// let thread_count = 2; - -// let (address_1, keypair_1) = random_address_on_thread(0, thread_count).into(); -// let (address_2, keypair_2) = random_address_on_thread(1, thread_count).into(); - -// assert_eq!(1, address_2.get_thread(thread_count)); -// let mut ledger = HashMap::new(); -// ledger.insert(address_1, LedgerData::new(Amount::from_str("5").unwrap())); - -// let cfg = ConsensusConfig { -// t0: 1000.into(), -// future_block_processing_max_periods: 50, -// operation_validity_periods: 10, -// genesis_timestamp: MassaTime::now(0).unwrap().saturating_sub(10000.into()), -// endorsement_count: 0, -// ..ConsensusConfig::default_with_staking_keys_and_ledger(&[keypair_1.clone()], &ledger) -// }; - -// consensus_without_pool_with_storage_test( -// cfg.clone(), -// async move |mut storage, -// mut protocol_controller, -// consensus_command_sender, -// consensus_event_receiver, -// selector_controller| { -// let genesis_ids = consensus_command_sender -// .get_block_graph_status(None, None) -// .await -// .expect("could not get block graph status") -// .genesis_blocks; - -// // Valid block A sending 5 from addr1 to addr2 + reward 1 to addr1 -// let operation_1 = create_transaction(&keypair_1, address_2, 5, 5, 1); -// storage.store_operations(vec![operation_1.clone()]); -// let block_a = create_block_with_operations( -// &cfg, -// Slot::new(1, 0), -// &genesis_ids, -// &keypair_1, -// vec![operation_1.clone()], -// ); -// propagate_block(&mut protocol_controller, block_a.clone(), true, 150).await; - -// // assert address 1 has 1 coin at blocks (A, genesis_ids[1]) (see #269) -// let mut set = Set::
::default(); -// set.insert(address_1); -// let res = consensus_command_sender -// .get_addresses_info(set) -// .await -// .unwrap() -// .get(&address_1) -// .unwrap() -// .ledger_info -// .candidate_ledger_info; -// assert_eq!(res.balance, Amount::from_str("1").unwrap()); - -// // receive block b with invalid operation (not enough coins) -// let operation_2 = create_transaction(&keypair_2, address_1, 10, 8, 1); -// storage.store_operations(vec![operation_2.clone()]); -// let block_2b = create_block_with_operations( -// &cfg, -// Slot::new(1, 1), -// &vec![block_a.id, genesis_ids[1]], -// &keypair_2, -// vec![operation_2], -// ); -// propagate_block(&mut protocol_controller, block_2b, false, 1000).await; - -// // receive empty block b -// let block_b = create_block_with_operations( -// &cfg, -// Slot::new(1, 1), -// &vec![block_a.id, genesis_ids[1]], -// &keypair_1, -// vec![], -// ); -// propagate_block(&mut protocol_controller, block_b.clone(), true, 150).await; - -// // assert address 2 has 5 coins at block B -// let mut set = Set::
::default(); -// set.insert(address_2); -// let res = consensus_command_sender -// .get_addresses_info(set) -// .await -// .unwrap() -// .get(&address_2) -// .unwrap() -// .ledger_info -// .candidate_ledger_info; -// assert_eq!(res.balance, Amount::from_str("5").unwrap()); - -// // receive block with reused operation -// let block_1c = create_block_with_operations( -// &cfg, -// Slot::new(1, 0), -// &vec![block_a.id, block_b.id], -// &keypair_1, -// vec![operation_1.clone()], -// ); -// propagate_block(&mut protocol_controller, block_1c.clone(), false, 1000).await; - -// ( -// protocol_controller, -// consensus_command_sender, -// consensus_event_receiver, -// selector_controller, -// ) -// }, -// ) -// .await; -// } - -// #[tokio::test] -// #[serial] -// async fn test_execution_check() { -// let (address_1, keypair_1) = random_address().into(); - -// let mut ledger = HashMap::new(); -// ledger.insert(address_1, LedgerData::new(Amount::from_str("5").unwrap())); - -// let staking_keys: Vec = vec![keypair_1.clone()]; -// let cfg = ConsensusConfig { -// t0: 1000.into(), -// future_block_processing_max_periods: 50, -// operation_validity_periods: 10, -// genesis_key: keypair_1.clone(), -// genesis_timestamp: MassaTime::now(0).unwrap().saturating_sub(10000.into()), -// endorsement_count: 0, -// ..ConsensusConfig::default_with_staking_keys_and_ledger(&staking_keys, &ledger) -// }; - -// consensus_without_pool_with_storage_test( -// cfg.clone(), -// async move |mut storage, -// mut protocol_controller, -// consensus_command_sender, -// consensus_event_receiver, -// selector_controller| { -// let genesis_ids = consensus_command_sender -// .get_block_graph_status(None, None) -// .await -// .expect("could not get block graph status") -// .genesis_blocks; - -// // Valid block A executing some bytecode and spending 2 coins. -// let operation_1 = create_executesc(&keypair_1, 5, 5, Default::default(), 1, 2, 1); -// storage.store_operations(vec![operation_1.clone()]); -// let block_a = create_block_with_operations( -// &cfg, -// Slot::new(1, 0), -// &genesis_ids, -// &keypair_1, -// vec![operation_1.clone()], -// ); -// propagate_block(&mut protocol_controller, block_a, true, 150).await; - -// // assert the `coins` argument as been deducted from the balance of address 1. -// let mut set = Set::
::default(); -// set.insert(address_1); -// let res = consensus_command_sender -// .get_addresses_info(set) -// .await -// .unwrap() -// .get(&address_1) -// .unwrap() -// .ledger_info -// .candidate_ledger_info; -// assert_eq!(res.balance, Amount::from_str("3").unwrap()); - -// ( -// protocol_controller, -// consensus_command_sender, -// consensus_event_receiver, -// selector_controller, -// ) -// }, -// ) -// .await; -// } diff --git a/massa-consensus-worker/src/tests/scenarios_parents.rs b/massa-consensus-worker/src/tests/scenarios_parents.rs deleted file mode 100644 index 1dd8edfcabb..00000000000 --- a/massa-consensus-worker/src/tests/scenarios_parents.rs +++ /dev/null @@ -1,206 +0,0 @@ -// Copyright (c) 2022 MASSA LABS -use super::tools::*; -use massa_consensus_exports::ConsensusConfig; - -use massa_models::slot::Slot; -use massa_signature::KeyPair; -use serial_test::serial; - -#[tokio::test] -#[serial] -#[ignore] -async fn test_parent_in_the_future() { - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - t0: 32.into(), - future_block_processing_max_periods: 50, - ..ConsensusConfig::default() - }; - - consensus_without_pool_test( - cfg.clone(), - async move |mut protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let genesis_hashes = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .genesis_blocks; - - // Parent, in the future. - let t0s1 = create_block( - &cfg, - Slot::new(4, 0), - genesis_hashes.clone(), - &staking_keys[0], - ); - - let _ = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(5, 0), - vec![t0s1.id], - false, - false, - &staking_keys[0], - ) - .await; - ( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} - -#[tokio::test] -#[serial] -#[ignore] -async fn test_parents() { - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - t0: 32.into(), - future_block_processing_max_periods: 50, - ..ConsensusConfig::default() - }; - - consensus_without_pool_test( - cfg.clone(), - async move |mut protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let genesis_hashes = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .genesis_blocks; - - // generate two normal blocks in each thread - let hasht1s1 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(1, 0), - genesis_hashes.clone(), - true, - false, - &staking_keys[0], - ) - .await; - - let _ = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(1, 1), - genesis_hashes.clone(), - true, - false, - &staking_keys[0], - ) - .await; - - let _ = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(3, 0), - vec![hasht1s1, genesis_hashes[0]], - false, - false, - &staking_keys[0], - ) - .await; - ( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} - -#[tokio::test] -#[serial] -#[ignore] -async fn test_parents_in_incompatible_cliques() { - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - t0: 32.into(), - future_block_processing_max_periods: 50, - ..ConsensusConfig::default() - }; - - consensus_without_pool_test( - cfg.clone(), - async move |mut protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let genesis_hashes = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .genesis_blocks; - - let hasht0s1 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(1, 0), - genesis_hashes.clone(), - true, - false, - &staking_keys[0], - ) - .await; - - let hasht0s2 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(2, 0), - genesis_hashes.clone(), - true, - false, - &staking_keys[0], - ) - .await; - - // from that point we have two incompatible clique - - let _ = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(1, 1), - vec![hasht0s1, genesis_hashes[1]], - true, - false, - &staking_keys[0], - ) - .await; - - // Block with incompatible parents. - let _ = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(2, 1), - vec![hasht0s1, hasht0s2], - false, - false, - &staking_keys[0], - ) - .await; - ( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} diff --git a/massa-consensus-worker/src/tests/scenarios_pool_commands.rs b/massa-consensus-worker/src/tests/scenarios_pool_commands.rs deleted file mode 100644 index 46b08bd7d85..00000000000 --- a/massa-consensus-worker/src/tests/scenarios_pool_commands.rs +++ /dev/null @@ -1,460 +0,0 @@ -// Copyright (c) 2022 MASSA LABS - -//TODO: Still needed ? -// use super::tools::*; -// use massa_consensus_exports::ConsensusConfig; - -// use massa_graph::BootstrapableGraph; -// use massa_models::clique::Clique; -// use massa_models::ledger_models::LedgerData; -// use massa_models::{Amount, BlockId, Slot, WrappedOperation}; -// use massa_signature::KeyPair; -// use massa_time::MassaTime; -// use serial_test::serial; -// use std::str::FromStr; - -// #[tokio::test] -// #[serial] -// async fn test_update_current_slot_cmd_notification() { -// let cfg = ConsensusConfig { -// t0: 1000.into(), -// thread_count: 1, -// genesis_timestamp: MassaTime::now(0).unwrap().checked_add(1000.into()).unwrap(), -// ..ConsensusConfig::default_with_paths() -// }; - -// let timeout = 150; - -// consensus_pool_test( -// cfg.clone(), -// None, -// async move |mut pool_controller, -// protocol_controller, -// consensus_command_sender, -// consensus_event_receiver| { -// let slot_notification_filter = |cmd| match cmd { -// massa_pool::PoolCommand::UpdateCurrentSlot(slot) => { -// Some((slot, MassaTime::now(0).unwrap())) -// } -// _ => None, -// }; - -// // wait for UpdateCurrentSlot pool command -// if let Some((slot_cmd, rec_time)) = pool_controller -// .wait_command(1500.into(), slot_notification_filter) -// .await -// { -// assert_eq!(slot_cmd, Slot::new(0, 0)); -// if rec_time > cfg.genesis_timestamp { -// assert!( -// rec_time.saturating_sub(cfg.genesis_timestamp) < MassaTime::from(timeout) -// ) -// } else { -// assert!( -// cfg.genesis_timestamp.saturating_sub(rec_time) < MassaTime::from(timeout) -// ) -// } -// } - -// // wait for next UpdateCurrentSlot pool command -// if let Some((slot_cmd, rec_time)) = pool_controller -// .wait_command(500.into(), slot_notification_filter) -// .await -// { -// assert_eq!(slot_cmd, Slot::new(0, 1)); -// if rec_time > cfg.genesis_timestamp { -// assert!( -// rec_time.saturating_sub(cfg.genesis_timestamp.saturating_add(cfg.t0)) -// < MassaTime::from(timeout) -// ); -// } else { -// assert!( -// cfg.genesis_timestamp -// .saturating_add(cfg.t0) -// .saturating_sub(rec_time) -// < MassaTime::from(timeout) -// ); -// } -// } -// ( -// pool_controller, -// protocol_controller, -// consensus_command_sender, -// consensus_event_receiver, -// ) -// }, -// ) -// .await; -// } - -// #[tokio::test] -// #[serial] -// async fn test_update_latest_final_block_cmd_notification() { -// let cfg = ConsensusConfig { -// t0: 1000.into(), -// delta_f0: 2, -// ..ConsensusConfig::default_with_paths() -// }; - -// consensus_pool_test( -// cfg.clone(), -// None, -// None, -// async move |mut pool_controller, -// protocol_controller, -// consensus_command_sender, -// consensus_event_receiver| { -// // UpdateLatestFinalPeriods pool command filter -// let update_final_notification_filter = |cmd| match cmd { -// massa_pool::PoolCommand::UpdateLatestFinalPeriods(periods) => Some(periods), -// PoolCommand::GetOperationBatch { response_tx, .. } => { -// response_tx.send(Vec::new()).unwrap(); -// None -// } -// PoolCommand::GetEndorsements { response_tx, .. } => { -// response_tx.send(Vec::new()).unwrap(); -// None -// } -// _ => None, -// }; -// // wait for initial final periods notification -// let final_periods = pool_controller -// .wait_command(1000.into(), update_final_notification_filter) -// .await; -// assert_eq!(final_periods, Some(vec![0, 0])); - -// // wait for next final periods notification -// let final_periods = pool_controller -// .wait_command( -// (cfg.t0.to_millis() * 3).into(), -// update_final_notification_filter, -// ) -// .await; -// assert_eq!(final_periods, Some(vec![1, 0])); -// ( -// pool_controller, -// protocol_controller, -// consensus_command_sender, -// consensus_event_receiver, -// ) -// }, -// ) -// .await; -// } - -// #[tokio::test] -// #[serial] -// #[ignore] -// async fn test_new_final_ops() { -// let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); -// let cfg = ConsensusConfig { -// t0: 1000.into(), -// delta_f0: 2, -// genesis_timestamp: MassaTime::now(0).unwrap(), -// ..ConsensusConfig::default() -// }; - -// // define addresses use for the test -// // addresses a and b both in thread 0 - -// let (address_a, keypair_a) = random_address_on_thread(0, cfg.thread_count).into(); -// let (address_b, _) = random_address_on_thread(0, cfg.thread_count).into(); - -// let boot_ledger = ConsensusLedgerSubset( -// vec![(address_a, LedgerData::new(Amount::from_str("100").unwrap()))] -// .into_iter() -// .collect(), -// ); -// let op = create_transaction(&keypair_a, address_b, 1, 10, 1); -// let (boot_graph, mut p0, mut p1) = get_bootgraph(op.clone(), boot_ledger); - -// consensus_pool_test( -// cfg.clone(), -// None, -// Some(boot_graph), -// async move |mut pool_controller, -// mut protocol_controller, -// consensus_command_sender, -// consensus_event_receiver| { -// p1 = create_and_test_block( -// &mut protocol_controller, -// &cfg, -// Slot::new(1, 1), -// vec![p0, p1], -// true, -// false, -// &staking_keys[0], -// ) -// .await; - -// p0 = create_and_test_block( -// &mut protocol_controller, -// &cfg, -// Slot::new(2, 0), -// vec![p0, p1], -// true, -// false, -// &staking_keys[0], -// ) -// .await; - -// create_and_test_block( -// &mut protocol_controller, -// &cfg, -// Slot::new(2, 1), -// vec![p0, p1], -// true, -// false, -// &staking_keys[0], -// ) -// .await; -// // UpdateLatestFinalPeriods pool command filter -// let new_final_ops_filter = |cmd| match cmd { -// PoolCommand::FinalOperations(ops) => Some(ops), -// _ => None, -// }; - -// // wait for initial final periods notification -// let final_ops = pool_controller -// .wait_command(300.into(), new_final_ops_filter) -// .await; -// if let Some(finals) = final_ops { -// assert!(finals.contains_key(&op.id)); -// assert_eq!(finals.get(&op.id), Some(&(10, 0))) -// } else { -// panic!("no final ops") -// } -// ( -// pool_controller, -// protocol_controller, -// consensus_command_sender, -// consensus_event_receiver, -// ) -// }, -// ) -// .await; -// } - -// #[tokio::test] -// #[serial] -// #[ignore] -// async fn test_max_attempts_get_operations() { -// let cfg = ConsensusConfig { -// t0: 1000.into(), -// genesis_timestamp: MassaTime::now(0).unwrap().checked_sub(1000.into()).unwrap(), -// delta_f0: 2, -// ..ConsensusConfig::default_with_paths() -// }; -// // define addresses use for the test -// // addresses a and b both in thread 0 -// let (address_a, keypair_a) = random_address_on_thread(0, cfg.thread_count).into(); -// let (address_b, _) = random_address_on_thread(0, cfg.thread_count).into(); - -// let boot_ledger = ConsensusLedgerSubset( -// vec![(address_a, LedgerData::new(Amount::from_str("100").unwrap()))] -// .into_iter() -// .collect(), -// ); -// let op = create_transaction(&keypair_a, address_b, 1, 10, 1); -// let (boot_graph, _p0, _p1) = get_bootgraph(op.clone(), boot_ledger); - -// consensus_pool_test( -// cfg.clone(), -// None, -// Some(boot_graph), -// async move |mut pool_controller, -// protocol_controller, -// consensus_command_sender, -// consensus_event_receiver| { -// // Test that consensus keeps trying to fill the block, -// // until the max number of attempts has been reached. -// let mut attempts = 0; -// let mut slot = None; -// while attempts != cfg.max_operations_fill_attempts { -// let get_operations_batch_filter = |cmd| match cmd { -// PoolCommand::GetOperationBatch { -// response_tx, -// target_slot, -// .. -// } => Some((response_tx, target_slot)), -// PoolCommand::GetEndorsements { response_tx, .. } => { -// response_tx.send(Vec::new()).unwrap(); -// None -// } -// _ => None, -// }; - -// let (response_tx, target_slot) = pool_controller -// .wait_command(1000.into(), get_operations_batch_filter) -// .await -// .expect("No response chan and target slot."); - -// // Test that the batch requests are still for the same slot. -// if let Some(slot) = slot { -// assert_eq!(slot, target_slot); -// } else { -// slot = Some(target_slot); -// } - -// // Send a full batch back. -// response_tx -// .send(vec![(op.clone(), 10)]) -// .expect("Failed to send empty batch."); -// attempts += 1; -// } - -// // The next command should be a slot update. -// let slot_filter = |cmd| match cmd { -// PoolCommand::UpdateCurrentSlot(slot) => Some(slot), -// PoolCommand::GetEndorsements { response_tx, .. } => { -// response_tx.send(Vec::new()).unwrap(); -// None -// } -// _ => None, -// }; - -// pool_controller.wait_command(3000.into(), slot_filter).await; -// ( -// pool_controller, -// protocol_controller, -// consensus_command_sender, -// consensus_event_receiver, -// ) -// }, -// ) -// .await; -// } - -// #[tokio::test] -// #[serial] -// #[ignore] -// async fn test_max_batch_size_get_operations() { -// let cfg = ConsensusConfig { -// t0: 1000.into(), -// genesis_timestamp: MassaTime::now(0).unwrap().checked_sub(1000.into()).unwrap(), -// delta_f0: 2, -// ..ConsensusConfig::default_with_paths() -// }; -// // define addresses use for the test -// // addresses a and b both in thread 0 -// let (address_a, keypair_a) = random_address_on_thread(0, cfg.thread_count).into(); -// let (address_b, _) = random_address_on_thread(0, cfg.thread_count).into(); - -// let boot_ledger = ConsensusLedgerSubset( -// vec![(address_a, LedgerData::new(Amount::from_str("100").unwrap()))] -// .into_iter() -// .collect(), -// ); -// let op = create_transaction(&keypair_a, address_b, 1, 10, 1); -// let (boot_graph, _p0, _p1) = get_bootgraph(op.clone(), boot_ledger); - -// consensus_pool_test( -// cfg.clone(), -// None, -// Some(boot_graph), -// async move |mut pool_controller, -// protocol_controller, -// consensus_command_sender, -// consensus_event_receiver| { -// // Test that consensus stops trying to fill the block, -// // once a non-full batch has been received. -// let get_operations_batch_filter = |cmd| match cmd { -// PoolCommand::GetOperationBatch { -// response_tx, -// target_slot, -// .. -// } => Some((response_tx, target_slot)), -// PoolCommand::GetEndorsements { response_tx, .. } => { -// response_tx.send(Vec::new()).unwrap(); -// None -// } -// _ => None, -// }; - -// let (response_tx, target_slot) = pool_controller -// .wait_command(1000.into(), get_operations_batch_filter) -// .await -// .expect("No response chan and target slot."); - -// // Send a non-full batch back. -// response_tx -// .send(vec![(op.clone(), 10)]) -// .expect("Failed to send non-full batch."); - -// // The next command should be a slot update. -// let slot_filter = |cmd| match cmd { -// PoolCommand::UpdateCurrentSlot(slot) => Some(slot), -// _ => None, -// }; - -// let slot_update = pool_controller -// .wait_command(3000.into(), slot_filter) -// .await -// .expect("Not slot update received."); - -// // Test that the update is for the slot -// // after the one for the just created block. -// assert_eq!(slot_update.period, target_slot.period + 1); -// ( -// pool_controller, -// protocol_controller, -// consensus_command_sender, -// consensus_event_receiver, -// ) -// }, -// ) -// .await; -// } - -// fn get_bootgraph( -// operation: WrappedOperation, -// ledger: ConsensusLedgerSubset, -// ) -> (BootstrapableGraph, BlockId, BlockId) { -// let genesis_0 = get_export_active_test_block(vec![], vec![], Slot::new(0, 0), true); -// let genesis_1 = get_export_active_test_block(vec![], vec![], Slot::new(0, 1), true); -// let p1t0 = get_export_active_test_block( -// vec![(genesis_0.block_id, 0), (genesis_1.block_id, 0)], -// vec![operation], -// Slot::new(1, 0), -// false, -// ); -// ( -// BootstrapableGraph { -// /// Map of active blocks, where blocks are in their exported version. -// active_blocks: vec![ -// (genesis_0.block_id, genesis_0.clone()), -// (genesis_1.block_id, genesis_1.clone()), -// (p1t0.block_id, p1t0.clone()), -// ] -// .into_iter() -// .collect(), -// /// Best parents hash in each thread. -// best_parents: vec![(p1t0.block_id, 1), (genesis_1.block_id, 0)], -// /// Latest final period and block hash in each thread. -// latest_final_blocks_periods: vec![ -// (genesis_0.block_id, 0u64), -// (genesis_1.block_id, 0u64), -// ], -// /// Head of the incompatibility graph. -// gi_head: vec![ -// (genesis_0.block_id, Default::default()), -// (p1t0.block_id, Default::default()), -// (genesis_1.block_id, Default::default()), -// ] -// .into_iter() -// .collect(), - -// /// List of maximal cliques of compatible blocks. -// max_cliques: vec![Clique { -// block_ids: vec![genesis_0.block_id, p1t0.block_id, genesis_1.block_id] -// .into_iter() -// .collect(), -// fitness: 1111, -// is_blockclique: true, -// }], -// ledger, -// }, -// p1t0.block_id, -// genesis_1.block_id, -// ) -// } diff --git a/massa-consensus-worker/src/tests/scenarios_pruning.rs b/massa-consensus-worker/src/tests/scenarios_pruning.rs deleted file mode 100644 index 943d35d387a..00000000000 --- a/massa-consensus-worker/src/tests/scenarios_pruning.rs +++ /dev/null @@ -1,196 +0,0 @@ -// Copyright (c) 2022 MASSA LABS - -use super::tools::*; -use massa_consensus_exports::ConsensusConfig; - -use massa_models::{block::BlockId, slot::Slot}; -use massa_signature::KeyPair; -use serial_test::serial; - -#[tokio::test] -#[serial] -#[ignore] -async fn test_pruning_of_discarded_blocks() { - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - t0: 32.into(), - future_block_processing_max_periods: 50, - ..ConsensusConfig::default() - }; - - consensus_without_pool_test( - cfg.clone(), - async move |mut protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let parents: Vec = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .best_parents - .iter() - .map(|(b, _p)| *b) - .collect(); - - // Send more bad blocks than the max number of cached discarded. - for i in 0..(cfg.max_discarded_blocks + 5) as u64 { - // Too far into the future. - let _ = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(100000000 + i, 0), - parents.clone(), - false, - false, - &staking_keys[0], - ) - .await; - } - - let status = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status"); - assert!(status.discarded_blocks.len() <= cfg.max_discarded_blocks); - - ( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} - -#[tokio::test] -#[serial] -#[ignore] -async fn test_pruning_of_awaiting_slot_blocks() { - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - t0: 32.into(), - future_block_processing_max_periods: 50, - ..ConsensusConfig::default() - }; - - consensus_without_pool_test( - cfg.clone(), - async move |mut protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let parents: Vec = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .best_parents - .iter() - .map(|(b, _p)| *b) - .collect(); - - // Send more blocks in the future than the max number of future processing blocks. - for i in 0..(cfg.max_future_processing_blocks + 5) as u64 { - // Too far into the future. - let _ = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(10 + i, 0), - parents.clone(), - false, - false, - &staking_keys[0], - ) - .await; - } - - let status = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status"); - assert!(status.discarded_blocks.len() <= cfg.max_future_processing_blocks); - ( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} - -#[tokio::test] -#[serial] -#[ignore] -async fn test_pruning_of_awaiting_dependencies_blocks_with_discarded_dependency() { - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - t0: 200.into(), - future_block_processing_max_periods: 50, - ..ConsensusConfig::default() - }; - - consensus_without_pool_test( - cfg.clone(), - async move |mut protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let parents: Vec = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .best_parents - .iter() - .map(|(b, _p)| *b) - .collect(); - - // Too far into the future. - let bad_block = - create_block(&cfg, Slot::new(10000, 0), parents.clone(), &staking_keys[0]); - - for i in 1..4 { - // Sent several headers with the bad parent as dependency. - let _ = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(i, 0), - vec![bad_block.id, parents.clone()[0]], - false, - false, - &staking_keys[0], - ) - .await; - } - - // Now, send the bad parent. - protocol_controller - .receive_header(bad_block.content.header) - .await; - validate_notpropagate_block_in_list(&mut protocol_controller, &vec![bad_block.id], 10) - .await; - - // Eventually, all blocks will be discarded due to their bad parent. - // Note the parent too much in the future will not be discarded, but ignored. - loop { - let status = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status"); - if status.discarded_blocks.len() == 3 { - break; - } - } - ( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} diff --git a/massa-consensus-worker/src/tests/scenarios_reward_split.rs b/massa-consensus-worker/src/tests/scenarios_reward_split.rs deleted file mode 100644 index 8fe0f8313e0..00000000000 --- a/massa-consensus-worker/src/tests/scenarios_reward_split.rs +++ /dev/null @@ -1,295 +0,0 @@ -// // Copyright (c) 2022 MASSA LABS - -// use super::tools::*; -// use massa_consensus_exports::ConsensusConfig; -// use massa_models::ledger_models::LedgerData; -// use massa_models::wrapped::WrappedContent; -// use massa_models::{Address, Amount, BlockId, Endorsement, EndorsementSerializer, Slot}; -// use massa_pos_exports::Selection; -// use massa_time::MassaTime; -// use serial_test::serial; -// use std::collections::HashMap; -// use std::str::FromStr; - -// #[tokio::test] -// #[serial] -// async fn test_reward_split() { -// // setup logging -// // stderrlog::new() -// // .verbosity(2) -// // .timestamp(stderrlog::Timestamp::Millisecond) -// // .init() -// // .unwrap(); -// let thread_count = 2; - -// // Create addresses -// let (address_a, keypair_a) = random_address_on_thread(0, thread_count).into(); -// let (address_b, keypair_b) = random_address_on_thread(0, thread_count).into(); - -// let mut ledger = HashMap::new(); -// ledger.insert(address_a, LedgerData::new(Amount::from_str("10").unwrap())); -// ledger.insert(address_b, LedgerData::new(Amount::from_str("10").unwrap())); -// let staking_keys = vec![keypair_a.clone(), keypair_b.clone()]; -// let init_time: MassaTime = 1000.into(); -// let cfg = ConsensusConfig { -// endorsement_count: 5, -// genesis_timestamp: MassaTime::now(0).unwrap().saturating_add(init_time), -// max_block_size: 2000, -// max_operations_per_block: 5000, -// operation_validity_periods: 10, -// periods_per_cycle: 3, -// t0: 500.into(), -// ..ConsensusConfig::default_with_staking_keys_and_ledger(&staking_keys, &ledger) -// }; - -// consensus_without_pool_test( -// cfg.clone(), -// async move |mut protocol_controller, -// consensus_command_sender, -// consensus_event_receiver, -// selector_controller| { -// // Check initial balances. -// let addresses_state = consensus_command_sender -// .get_addresses_info(vec![address_a, address_b].into_iter().collect()) -// .await -// .unwrap(); - -// let addresse_a_state = addresses_state.get(&address_a).unwrap(); -// assert_eq!( -// addresse_a_state.ledger_info.candidate_ledger_info.balance, -// Amount::from_str("10").unwrap() -// ); - -// let addresse_b_state = addresses_state.get(&address_b).unwrap(); -// assert_eq!( -// addresse_b_state.ledger_info.candidate_ledger_info.balance, -// Amount::from_str("10").unwrap() -// ); - -// let draws: Selection = selector_controller.get_selection(Slot::new(1, 0)).unwrap(); - -// let slot_one_block_addr = draws.producer; -// let slot_one_endorsements_addrs = draws.endorsements; - -// let slot_one_keypair = if slot_one_block_addr == address_a { -// keypair_a.clone() -// } else { -// keypair_b.clone() -// }; - -// // Create, and propagate, block 1. -// let parents: Vec = consensus_command_sender -// .get_block_graph_status(None, None) -// .await -// .unwrap() -// .best_parents -// .iter() -// .map(|(b, _p)| *b) -// .collect(); - -// let b1 = create_block(&cfg, Slot::new(1, 0), parents, &slot_one_keypair); - -// propagate_block( -// &mut protocol_controller, -// b1.clone(), -// true, -// init_time -// .saturating_add(cfg.t0.saturating_mul(2)) -// .to_millis(), -// ) -// .await; - -// let slot_two_block_addr = selector_controller.get_producer(Slot::new(2, 0)).unwrap(); - -// let slot_two_keypair = if slot_two_block_addr == address_a { -// keypair_a.clone() -// } else { -// keypair_b.clone() -// }; - -// // Create, and propagate, block 2. -// let parents: Vec = consensus_command_sender -// .get_block_graph_status(None, None) -// .await -// .unwrap() -// .best_parents -// .iter() -// .map(|(b, _p)| *b) -// .collect(); -// assert!(parents.contains(&b1.id)); - -// let mut b2 = create_block(&cfg, Slot::new(2, 0), parents, &slot_two_keypair); - -// // Endorsements in block 2. - -// // Creator of second block endorses the first. -// let index = slot_one_endorsements_addrs -// .iter() -// .position(|&addr| { -// addr == Address::from_public_key(&slot_two_keypair.get_public_key()) -// }) -// .unwrap() as u32; -// let content = Endorsement { -// slot: Slot::new(1, 0), -// index, -// endorsed_block: b1.id, -// }; -// let ed_1 = Endorsement::new_wrapped( -// content.clone(), -// EndorsementSerializer::new(), -// &slot_two_keypair, -// ) -// .unwrap(); - -// // Creator of first block endorses the first. -// let index = slot_one_endorsements_addrs -// .iter() -// .position(|&addr| { -// addr == Address::from_public_key(&slot_one_keypair.get_public_key()) -// }) -// .unwrap() as u32; -// let content = Endorsement { -// slot: Slot::new(1, 0), -// index, -// endorsed_block: b1.id, -// }; -// let ed_2 = Endorsement::new_wrapped( -// content.clone(), -// EndorsementSerializer::new(), -// &slot_one_keypair, -// ) -// .unwrap(); - -// // Creator of second block endorses the first, again. -// let index = slot_one_endorsements_addrs -// .iter() -// .position(|&addr| { -// addr == Address::from_public_key(&slot_two_keypair.get_public_key()) -// }) -// .unwrap() as u32; -// let content = Endorsement { -// slot: Slot::new(1, 0), -// index, -// endorsed_block: b1.id, -// }; -// let ed_3 = Endorsement::new_wrapped( -// content.clone(), -// EndorsementSerializer::new(), -// &slot_two_keypair, -// ) -// .unwrap(); - -// // Add endorsements to block. -// b2.content.header.content.endorsements = vec![ed_1, ed_2, ed_3]; - -// // Propagate block. -// tokio::time::sleep(cfg.t0.to_duration()).await; -// propagate_block(&mut protocol_controller, b2, true, 300).await; - -// // Check balances after second block. -// let addresses_state = consensus_command_sender -// .get_addresses_info(vec![address_a, address_b].into_iter().collect()) -// .await -// .unwrap(); - -// let third = cfg -// .block_reward -// .checked_div_u64((3 * (1 + cfg.endorsement_count)).into()) -// .unwrap(); - -// let expected_a = Amount::from_str("10") -// .unwrap() // initial ledger -// .saturating_add(if keypair_a.to_bytes() == slot_one_keypair.to_bytes() { -// // block 1 reward -// cfg.block_reward -// .checked_mul_u64(1) -// .unwrap() -// .checked_div_u64((1 + cfg.endorsement_count).into()) -// .unwrap() -// .saturating_sub(third.checked_mul_u64(0).unwrap()) -// // endorsements reward -// .saturating_add( -// third // parent in ed 1 -// .saturating_add(third) // creator of ed 2 -// .saturating_add(third) // parent in ed 2 -// .saturating_add(third), // parent in ed 3 -// ) -// } else { -// Default::default() -// }) -// .saturating_add(if keypair_a.to_bytes() == slot_two_keypair.to_bytes() { -// // block 2 creation reward -// cfg.block_reward -// .checked_mul_u64(1 + 3) -// .unwrap() -// .checked_div_u64((1 + cfg.endorsement_count).into()) -// .unwrap() -// .saturating_sub(third.checked_mul_u64(2 * 3).unwrap()) -// // endorsement rewards -// .saturating_add( -// third // creator of ed 1 -// .saturating_add(third), // creator of ed 3 -// ) -// } else { -// Default::default() -// }); - -// let expected_b = Amount::from_str("10") -// .unwrap() // initial ledger -// .saturating_add(if keypair_b.to_bytes() == slot_one_keypair.to_bytes() { -// // block 1 reward -// cfg.block_reward -// .checked_mul_u64(1) -// .unwrap() -// .checked_div_u64((1 + cfg.endorsement_count).into()) -// .unwrap() -// .saturating_sub(third.checked_mul_u64(0).unwrap()) -// // endorsements reward -// .saturating_add( -// third // parent in ed 1 -// .saturating_add(third) // creator of ed 2 -// .saturating_add(third) // parent in ed 2 -// .saturating_add(third), // parent in ed 3 -// ) -// } else { -// Default::default() -// }) -// .saturating_add(if keypair_b.to_bytes() == slot_two_keypair.to_bytes() { -// // block 2 creation reward -// cfg.block_reward -// .checked_mul_u64(1 + 3) -// .unwrap() -// .checked_div_u64((1 + cfg.endorsement_count).into()) -// .unwrap() -// .saturating_sub(third.checked_mul_u64(2 * 3).unwrap()) -// // endorsement rewards -// .saturating_add( -// third // creator of ed 1 -// .saturating_add(third), // creator of ed 3 -// ) -// } else { -// Default::default() -// }); - -// let state_a = addresses_state.get(&address_a).unwrap(); -// assert_eq!( -// state_a.ledger_info.candidate_ledger_info.balance, -// expected_a -// ); - -// let state_b = addresses_state.get(&address_b).unwrap(); -// assert_eq!( -// state_b.ledger_info.candidate_ledger_info.balance, -// expected_b -// ); - -// ( -// protocol_controller, -// consensus_command_sender, -// consensus_event_receiver, -// selector_controller, -// ) -// }, -// ) -// .await; -// } diff --git a/massa-consensus-worker/src/tests/scenarios_send_block.rs b/massa-consensus-worker/src/tests/scenarios_send_block.rs deleted file mode 100644 index c2201b0eb1a..00000000000 --- a/massa-consensus-worker/src/tests/scenarios_send_block.rs +++ /dev/null @@ -1,133 +0,0 @@ -// Copyright (c) 2022 MASSA LABS - -// RUST_BACKTRACE=1 cargo test scenarios106 -- --nocapture - -use super::tools::*; -use massa_consensus_exports::ConsensusConfig; - -use massa_models::slot::Slot; -use massa_signature::KeyPair; -use massa_storage::Storage; -use serial_test::serial; - -#[tokio::test] -#[serial] -#[ignore] -async fn test_consensus_sends_block_to_peer_who_asked_for_it() { - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - t0: 32.into(), - future_block_processing_max_periods: 50, - ..ConsensusConfig::default() - }; - let mut storage = Storage::create_root(); - - consensus_without_pool_test( - cfg.clone(), - async move |mut protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let start_slot = 3; - let genesis_hashes = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .genesis_blocks; - - // create test blocks - let slot = Slot::new(1 + start_slot, 0); - let draw = selector_controller.get_selection(slot).unwrap().producer; - let creator = get_creator_for_draw(&draw, &staking_keys.clone()); - let t0s1 = create_block( - &cfg, - Slot::new(1 + start_slot, 0), - genesis_hashes.clone(), - &creator, - ); - - let t0s1_id = t0s1.id; - let t0s1_slot = t0s1.content.header.content.slot; - storage.store_block(t0s1); - - // Send the actual block. - protocol_controller - .receive_block(t0s1_id, t0s1_slot, storage.clone()) - .await; - - // block t0s1 is propagated - let hash_list = vec![t0s1_id]; - validate_propagate_block_in_list( - &mut protocol_controller, - &hash_list, - 3000 + start_slot * 1000, - ) - .await; - - // Ask for the block to consensus. - protocol_controller - .receive_get_active_blocks(vec![t0s1_id]) - .await; - - // Consensus should respond with results including the block. - validate_block_found(&mut protocol_controller, &t0s1_id, 100).await; - ( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} - -#[tokio::test] -#[serial] -#[ignore] -async fn test_consensus_block_not_found() { - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - t0: 32.into(), - future_block_processing_max_periods: 50, - ..ConsensusConfig::default() - }; - - consensus_without_pool_test( - cfg.clone(), - async move |mut protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let start_slot = 3; - let genesis_hashes = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .genesis_blocks; - - // create test blocks - let t0s1 = create_block( - &cfg, - Slot::new(1 + start_slot, 0), - genesis_hashes.clone(), - &staking_keys[0], - ); - - // Ask for the block to consensus. - protocol_controller - .receive_get_active_blocks(vec![t0s1.id]) - .await; - - // Consensus should not have the block. - validate_block_not_found(&mut protocol_controller, &t0s1.id, 100).await; - ( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} diff --git a/massa-consensus-worker/src/tests/scenarios_wishlist.rs b/massa-consensus-worker/src/tests/scenarios_wishlist.rs deleted file mode 100644 index b1b29dc19ea..00000000000 --- a/massa-consensus-worker/src/tests/scenarios_wishlist.rs +++ /dev/null @@ -1,140 +0,0 @@ -// Copyright (c) 2022 MASSA LABS - -// RUST_BACKTRACE=1 cargo test scenarios106 -- --nocapture - -use super::tools::*; -use massa_consensus_exports::ConsensusConfig; - -use massa_models::slot::Slot; -use massa_signature::KeyPair; -use massa_storage::Storage; -use serial_test::serial; -use std::collections::HashSet; -use std::iter::FromIterator; - -#[tokio::test] -#[serial] -#[ignore] -async fn test_wishlist_delta_with_empty_remove() { - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - t0: 32.into(), - future_block_processing_max_periods: 50, - ..ConsensusConfig::default() - }; - - consensus_without_pool_test( - cfg.clone(), - async move |mut protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let genesis_hashes = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .genesis_blocks; - // create test blocks - let slot = Slot::new(1, 0); - let draw = selector_controller - .get_selection(slot) - .expect("could not get selection draws.") - .producer; - let creator = get_creator_for_draw(&draw, &staking_keys.clone()); - let t0s1 = create_block(&cfg, Slot::new(1, 0), genesis_hashes.clone(), &creator); - - // send header for block t0s1 - protocol_controller - .receive_header(t0s1.content.header.clone()) - .await; - - let expected_new = HashSet::from_iter(vec![t0s1.id].into_iter()); - let expected_remove = HashSet::from_iter(vec![].into_iter()); - validate_wishlist( - &mut protocol_controller, - expected_new, - expected_remove, - cfg.t0.saturating_add(1000.into()).to_millis(), // leave 1sec extra for init and margin - ) - .await; - ( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} - -#[tokio::test] -#[serial] -#[ignore] -async fn test_wishlist_delta_remove() { - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - t0: 32.into(), - future_block_processing_max_periods: 50, - ..ConsensusConfig::default() - }; - - let mut storage = Storage::create_root(); - - consensus_without_pool_test( - cfg.clone(), - async move |mut protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let genesis_hashes = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .genesis_blocks; - - // create test blocks - let t0s1 = create_block( - &cfg, - Slot::new(1, 0), - genesis_hashes.clone(), - &staking_keys[0], - ); - // send header for block t0s1 - protocol_controller - .receive_header(t0s1.content.header.clone()) - .await; - - let expected_new = HashSet::from_iter(vec![t0s1.id].into_iter()); - let expected_remove = HashSet::from_iter(vec![].into_iter()); - validate_wishlist( - &mut protocol_controller, - expected_new, - expected_remove, - cfg.t0.saturating_add(1000.into()).to_millis(), // leave 1sec extra for init and margin, - ) - .await; - - storage.store_block(t0s1.clone()); - protocol_controller - .receive_block(t0s1.id, t0s1.content.header.content.slot, storage.clone()) - .await; - let expected_new = HashSet::from_iter(vec![].into_iter()); - let expected_remove = HashSet::from_iter(vec![t0s1.id].into_iter()); - validate_wishlist( - &mut protocol_controller, - expected_new, - expected_remove, - 1000, - ) - .await; - ( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} diff --git a/massa-consensus-worker/src/tests/test_block_graph.rs b/massa-consensus-worker/src/tests/test_block_graph.rs deleted file mode 100644 index 13f9a086eb8..00000000000 --- a/massa-consensus-worker/src/tests/test_block_graph.rs +++ /dev/null @@ -1,174 +0,0 @@ -use crate::tests::tools::get_dummy_block_id; -use massa_graph::{ - export_active_block::ExportActiveBlock, BootstrapableGraph, BootstrapableGraphDeserializer, - BootstrapableGraphSerializer, -}; -use massa_hash::Hash; -use massa_models::{ - block::{Block, BlockHeader, BlockHeaderSerializer, BlockSerializer, WrappedBlock}, - endorsement::{Endorsement, EndorsementSerializerLW}, - slot::Slot, - wrapped::WrappedContent, -}; - -use massa_serialization::{DeserializeError, Deserializer, Serializer}; -use massa_signature::KeyPair; -use serial_test::serial; - -/// the data input to create the public keys was generated using the `secp256k1` curve -/// a test using this function is a regression test not an implementation test -fn get_export_active_test_block() -> (WrappedBlock, ExportActiveBlock) { - let keypair = KeyPair::generate(); - let block = Block::new_wrapped( - Block { - header: BlockHeader::new_wrapped( - BlockHeader { - operation_merkle_root: Hash::compute_from(&Vec::new()), - parents: vec![get_dummy_block_id("parent1"), get_dummy_block_id("parent2")], - slot: Slot::new(1, 0), - endorsements: vec![Endorsement::new_wrapped( - Endorsement { - endorsed_block: get_dummy_block_id("parent1"), - index: 0, - slot: Slot::new(1, 0), - }, - EndorsementSerializerLW::new(), - &keypair, - ) - .unwrap()], - }, - BlockHeaderSerializer::new(), - &keypair, - ) - .unwrap(), - operations: Default::default(), - }, - BlockSerializer::new(), - &keypair, - ) - .unwrap(); - - ( - block.clone(), - ExportActiveBlock { - parents: vec![ - (get_dummy_block_id("parent11"), 23), - (get_dummy_block_id("parent12"), 24), - ], - block, - operations: vec![], - is_final: true, - }, - ) -} - -#[test] -#[serial] -fn test_bootstrapable_graph_serialized() { - //let storage: Storage = Storage::create_root(); - - let (_, active_block) = get_export_active_test_block(); - - //storage.store_block(block.header.content.compute_id().expect("Fail to calculate block id."), block, block.to_bytes_compact().expect("Fail to serialize block")); - - let graph = BootstrapableGraph { - /// Map of active blocks, were blocks are in their exported version. - final_blocks: vec![active_block].into_iter().collect(), - }; - - let bootstrapable_graph_serializer = BootstrapableGraphSerializer::new(); - let bootstrapable_graph_deserializer = BootstrapableGraphDeserializer::new( - 2, 8, 10000, 10000, 10000, 10000, 10000, 10, 255, 10_000, - ); - let mut bytes = Vec::new(); - - bootstrapable_graph_serializer - .serialize(&graph, &mut bytes) - .unwrap(); - let (_, new_graph) = bootstrapable_graph_deserializer - .deserialize::(&bytes) - .unwrap(); - - assert_eq!( - graph.final_blocks[0].block.serialized_data, - new_graph.final_blocks[0].block.serialized_data - ); -} - -// #[tokio::test] -// #[serial] -// async fn test_clique_calculation() { -// let ledger_file = generate_ledger_file(&Map::default()); -// let cfg = ConsensusConfig::from(ledger_file.path()); -// let storage: Storage = Storage::create_root(); -// let selector_config = SelectorConfig { -// initial_rolls_path: cfg.initial_rolls_path.clone(), -// thread_count: 2, -// periods_per_cycle: 100, -// genesis_address: Address::from_str("A12hgh5ULW9o8fJE9muLNXhQENaUUswQbxPyDSq8ridnDGu5gRiJ") -// .unwrap(), -// endorsement_count: 0, -// max_draw_cache: 10, -// initial_draw_seed: "".to_string(), -// }; -// let (mut selector_manager, selector_controller) = -// start_selector_worker(selector_config).unwrap(); -// let mut block_graph = -// BlockGraph::new(GraphConfig::from(&cfg), None, storage, selector_controller) -// .await -// .unwrap(); -// let hashes: Vec = vec![ -// "VzCRpnoZVYY1yQZTXtVQbbxwzdu6hYtdCUZB5BXWSabsiXyfP", -// "JnWwNHRR1tUD7UJfnEFgDB4S4gfDTX2ezLadr7pcwuZnxTvn1", -// "xtvLedxC7CigAPytS5qh9nbTuYyLbQKCfbX8finiHsKMWH6SG", -// "2Qs9sSbc5sGpVv5GnTeDkTKdDpKhp4AgCVT4XFcMaf55msdvJN", -// "2VNc8pR4tNnZpEPudJr97iNHxXbHiubNDmuaSzrxaBVwKXxV6w", -// "2bsrYpfLdvVWAJkwXoJz1kn4LWshdJ6QjwTrA7suKg8AY3ecH1", -// "kfUeGj3ZgBprqFRiAQpE47dW5tcKTAueVaWXZquJW6SaPBd4G", -// ] -// .into_iter() -// .map(|h| BlockId::from_bs58_check(h).unwrap()) -// .collect(); -// block_graph.gi_head = vec![ -// (0, vec![1, 2, 3, 4]), -// (1, vec![0]), -// (2, vec![0]), -// (3, vec![0]), -// (4, vec![0]), -// (5, vec![6]), -// (6, vec![5]), -// ] -// .into_iter() -// .map(|(idx, lst)| (hashes[idx], lst.into_iter().map(|i| hashes[i]).collect())) -// .collect(); -// let computed_sets = block_graph.compute_max_cliques(); - -// let expected_sets: Vec> = vec![ -// vec![1, 2, 3, 4, 5], -// vec![1, 2, 3, 4, 6], -// vec![0, 5], -// vec![0, 6], -// ] -// .into_iter() -// .map(|lst| lst.into_iter().map(|i| hashes[i]).collect()) -// .collect(); - -// assert_eq!(computed_sets.len(), expected_sets.len()); -// for expected in expected_sets.into_iter() { -// assert!(computed_sets.iter().any(|v| v == &expected)); -// } -// selector_manager.stop(); -// } - -// /// generate a named temporary JSON ledger file -// fn generate_ledger_file(ledger_vec: &Map) -> NamedTempFile { -// use std::io::prelude::*; -// let ledger_file_named = NamedTempFile::new().expect("cannot create temp file"); -// serde_json::to_writer_pretty(ledger_file_named.as_file(), &ledger_vec) -// .expect("unable to write ledger file"); -// ledger_file_named -// .as_file() -// .seek(std::io::SeekFrom::Start(0)) -// .expect("could not seek file"); -// ledger_file_named -// } diff --git a/massa-consensus-worker/src/tests/tools.rs b/massa-consensus-worker/src/tests/tools.rs deleted file mode 100644 index c29c1d3093e..00000000000 --- a/massa-consensus-worker/src/tests/tools.rs +++ /dev/null @@ -1,1056 +0,0 @@ -// Copyright (c) 2022 MASSA LABS -#![allow(clippy::ptr_arg)] // this allow &Vec<..> as function argument type - -use crate::start_consensus_controller; -use massa_cipher::decrypt; -use massa_consensus_exports::error::ConsensusResult; -use massa_consensus_exports::{ - settings::ConsensusChannels, ConsensusCommandSender, ConsensusConfig, ConsensusEventReceiver, -}; -use massa_execution_exports::test_exports::MockExecutionController; -use massa_graph::{export_active_block::ExportActiveBlock, BlockGraphExport, BootstrapableGraph}; -use massa_hash::Hash; -use massa_models::prehash::PreHashMap; -use massa_models::{ - address::Address, - amount::Amount, - block::{ - Block, BlockHeader, BlockHeaderSerializer, BlockId, BlockSerializer, WrappedBlock, - WrappedHeader, - }, - operation::{Operation, OperationSerializer, OperationType, WrappedOperation}, - prehash::PreHashSet, - slot::Slot, - wrapped::{Id, WrappedContent}, -}; -use massa_pool_exports::test_exports::MockPoolController; -use massa_pool_exports::PoolController; -use massa_pos_exports::{SelectorConfig, SelectorController}; -use massa_pos_worker::start_selector_worker; -use massa_protocol_exports::test_exports::MockProtocolController; -use massa_protocol_exports::ProtocolCommand; -use massa_signature::KeyPair; -use massa_storage::Storage; -use massa_time::MassaTime; -use parking_lot::Mutex; -use std::{collections::BTreeMap, collections::HashSet, future::Future, path::Path}; -use std::{str::FromStr, sync::Arc, time::Duration}; - -use tracing::info; - -/* TODO https://github.com/massalabs/massa/issues/3099 -/// Handle the expected selector messages, always approving the address. -pub fn approve_producer_and_selector_for_staker( - staking_key: &KeyPair, - selector_controller: &Receiver, -) { - let addr = Address::from_public_key(&staking_key.get_public_key()); - // Drain all messages, assuming there can be a slight delay between sending some. - loop { - let timeout = Duration::from_millis(100); - match selector_controller.recv_timeout(timeout) { - Ok(MockSelectorControllerMessage::GetSelection { - slot: _, - response_tx, - }) => { - let selection = Selection { - producer: addr.clone(), - endorsements: vec![addr.clone(); ENDORSEMENT_COUNT as usize], - }; - response_tx.send(Ok(selection)).unwrap(); - } - Ok(MockSelectorControllerMessage::GetProducer { - slot: _, - response_tx, - }) => { - response_tx.send(Ok(addr.clone())).unwrap(); - } - Ok(msg) => panic!("Unexpected selector message {:?}", msg), - Err(RecvTimeoutError::Timeout) => break, - _ => panic!("Unexpected error from selector receiver"), - } - } -} -*/ - -pub fn get_dummy_block_id(s: &str) -> BlockId { - BlockId(Hash::compute_from(s.as_bytes())) -} - -pub struct AddressTest { - pub address: Address, - pub keypair: KeyPair, -} - -impl From for (Address, KeyPair) { - fn from(addr: AddressTest) -> Self { - (addr.address, addr.keypair) - } -} - -/// Same as `random_address()` but force a specific thread -pub fn random_address_on_thread(thread: u8, thread_count: u8) -> AddressTest { - loop { - let keypair = KeyPair::generate(); - let address = Address::from_public_key(&keypair.get_public_key()); - if thread == address.get_thread(thread_count) { - return AddressTest { address, keypair }; - } - } -} - -/// Generate a random address -pub fn _random_address() -> AddressTest { - let keypair = KeyPair::generate(); - AddressTest { - address: Address::from_public_key(&keypair.get_public_key()), - keypair, - } -} - -/// return true if another block has been seen -pub async fn validate_notpropagate_block( - protocol_controller: &mut MockProtocolController, - not_propagated: BlockId, - timeout_ms: u64, -) -> bool { - let param = protocol_controller - .wait_command(timeout_ms.into(), |cmd| match cmd { - ProtocolCommand::IntegratedBlock { - block_id, - storage: _, - } => Some(block_id), - _ => None, - }) - .await; - match param { - Some(block_id) => not_propagated != block_id, - None => false, - } -} - -/// return true if another block has been seen -pub async fn validate_notpropagate_block_in_list( - protocol_controller: &mut MockProtocolController, - not_propagated: &Vec, - timeout_ms: u64, -) -> bool { - let param = protocol_controller - .wait_command(timeout_ms.into(), |cmd| match cmd { - ProtocolCommand::IntegratedBlock { - block_id, - storage: _, - } => Some(block_id), - _ => None, - }) - .await; - match param { - Some(block_id) => !not_propagated.contains(&block_id), - None => false, - } -} - -pub async fn validate_propagate_block_in_list( - protocol_controller: &mut MockProtocolController, - valid: &Vec, - timeout_ms: u64, -) -> BlockId { - let param = protocol_controller - .wait_command(timeout_ms.into(), |cmd| match cmd { - ProtocolCommand::IntegratedBlock { - block_id, - storage: _, - } => Some(block_id), - _ => None, - }) - .await; - match param { - Some(block_id) => { - assert!( - valid.contains(&block_id), - "not the valid hash propagated, it can be a genesis_timestamp problem" - ); - block_id - } - None => panic!("Hash not propagated."), - } -} - -pub async fn validate_ask_for_block( - protocol_controller: &mut MockProtocolController, - valid: BlockId, - timeout_ms: u64, -) -> BlockId { - let param = protocol_controller - .wait_command(timeout_ms.into(), |cmd| match cmd { - ProtocolCommand::WishlistDelta { new, .. } => Some(new), - _ => None, - }) - .await; - match param { - Some(new) => { - assert!(new.contains_key(&valid), "not the valid hash asked for"); - assert_eq!(new.len(), 1); - valid - } - None => panic!("Block not asked for before timeout."), - } -} - -pub async fn validate_wishlist( - protocol_controller: &mut MockProtocolController, - new: PreHashSet, - remove: PreHashSet, - timeout_ms: u64, -) { - let new: PreHashMap> = - new.into_iter().map(|id| (id, None)).collect(); - let param = protocol_controller - .wait_command(timeout_ms.into(), |cmd| match cmd { - ProtocolCommand::WishlistDelta { new, remove } => Some((new, remove)), - _ => None, - }) - .await; - match param { - Some((got_new, got_remove)) => { - for key in got_new.keys() { - assert!(new.contains_key(key)); - } - assert_eq!(remove, got_remove); - } - None => panic!("Wishlist delta not sent for before timeout."), - } -} - -pub async fn validate_does_not_ask_for_block( - protocol_controller: &mut MockProtocolController, - hash: &BlockId, - timeout_ms: u64, -) { - let param = protocol_controller - .wait_command(timeout_ms.into(), |cmd| match cmd { - ProtocolCommand::WishlistDelta { new, .. } => Some(new), - _ => None, - }) - .await; - if let Some(new) = param { - if new.contains_key(hash) { - panic!("unexpected ask for block {}", hash); - } - } -} - -pub async fn validate_propagate_block( - protocol_controller: &mut MockProtocolController, - valid_hash: BlockId, - timeout_ms: u64, -) { - protocol_controller - .wait_command(timeout_ms.into(), |cmd| match cmd { - ProtocolCommand::IntegratedBlock { - block_id, - storage: _, - } => { - if block_id == valid_hash { - return Some(()); - } - None - } - _ => None, - }) - .await - .expect("Block not propagated before timeout.") -} - -pub async fn validate_notify_block_attack_attempt( - protocol_controller: &mut MockProtocolController, - valid_hash: BlockId, - timeout_ms: u64, -) { - let param = protocol_controller - .wait_command(timeout_ms.into(), |cmd| match cmd { - ProtocolCommand::AttackBlockDetected(hash) => Some(hash), - _ => None, - }) - .await; - match param { - Some(hash) => assert_eq!(valid_hash, hash, "Attack attempt notified for wrong hash."), - None => panic!("Attack attempt not notified before timeout."), - } -} - -pub async fn validate_block_found( - _protocol_controller: &mut MockProtocolController, - _valid_hash: &BlockId, - _timeout_ms: u64, -) { -} - -pub async fn validate_block_not_found( - _protocol_controller: &mut MockProtocolController, - _valid_hash: &BlockId, - _timeout_ms: u64, -) { -} - -pub async fn create_and_test_block( - protocol_controller: &mut MockProtocolController, - cfg: &ConsensusConfig, - slot: Slot, - best_parents: Vec, - valid: bool, - trace: bool, - creator: &KeyPair, -) -> BlockId { - let block = create_block(cfg, slot, best_parents, creator); - let block_id = block.id; - let slot = block.content.header.content.slot; - let mut storage = Storage::create_root(); - if trace { - info!("create block:{}", block.id); - } - - storage.store_block(block); - protocol_controller - .receive_block(block_id, slot, storage.clone()) - .await; - if valid { - // Assert that the block is propagated. - validate_propagate_block(protocol_controller, block_id, 2000).await; - } else { - // Assert that the the block is not propagated. - validate_notpropagate_block(protocol_controller, block_id, 500).await; - } - block_id -} - -pub async fn propagate_block( - protocol_controller: &mut MockProtocolController, - block_id: BlockId, - slot: Slot, - storage: Storage, - valid: bool, - timeout_ms: u64, -) -> BlockId { - let block_hash = block_id; - protocol_controller - .receive_block(block_id, slot, storage) - .await; - if valid { - // see if the block is propagated. - validate_propagate_block(protocol_controller, block_hash, timeout_ms).await; - } else { - // see if the block is propagated. - validate_notpropagate_block(protocol_controller, block_hash, timeout_ms).await; - } - block_hash -} - -pub fn _create_roll_transaction( - keypair: &KeyPair, - roll_count: u64, - buy: bool, - expire_period: u64, - fee: u64, -) -> WrappedOperation { - let op = if buy { - OperationType::RollBuy { roll_count } - } else { - OperationType::RollSell { roll_count } - }; - - let content = Operation { - fee: Amount::from_str(&fee.to_string()).unwrap(), - expire_period, - op, - }; - Operation::new_wrapped(content, OperationSerializer::new(), keypair).unwrap() -} - -pub async fn _wait_pool_slot( - _pool_controller: &mut MockPoolController, - _t0: MassaTime, - period: u64, - thread: u8, -) -> Slot { - // TODO: Replace ?? - // pool_controller - // .wait_command(t0.checked_mul(2).unwrap(), |cmd| match cmd { - // PoolCommand::UpdateCurrentSlot(s) => { - // if s >= Slot::new(period, thread) { - // Some(s) - // } else { - // None - // } - // } - // _ => None, - // }) - // .await - // .expect("timeout while waiting for slot") - Slot::new(period, thread) -} - -pub fn _create_transaction( - keypair: &KeyPair, - recipient_address: Address, - amount: u64, - expire_period: u64, - fee: u64, -) -> WrappedOperation { - let op = OperationType::Transaction { - recipient_address, - amount: Amount::from_str(&amount.to_string()).unwrap(), - }; - - let content = Operation { - fee: Amount::from_str(&fee.to_string()).unwrap(), - expire_period, - op, - }; - Operation::new_wrapped(content, OperationSerializer::new(), keypair).unwrap() -} - -#[allow(clippy::too_many_arguments)] -pub fn _create_executesc( - keypair: &KeyPair, - expire_period: u64, - fee: u64, - data: Vec, - max_gas: u64, - gas_price: u64, -) -> WrappedOperation { - let op = OperationType::ExecuteSC { - data, - max_gas, - gas_price: Amount::from_str(&gas_price.to_string()).unwrap(), - datastore: BTreeMap::new(), - }; - - let content = Operation { - fee: Amount::from_str(&fee.to_string()).unwrap(), - expire_period, - op, - }; - Operation::new_wrapped(content, OperationSerializer::new(), keypair).unwrap() -} - -pub fn _create_roll_buy( - keypair: &KeyPair, - roll_count: u64, - expire_period: u64, - fee: u64, -) -> WrappedOperation { - let op = OperationType::RollBuy { roll_count }; - let content = Operation { - fee: Amount::from_str(&fee.to_string()).unwrap(), - expire_period, - op, - }; - Operation::new_wrapped(content, OperationSerializer::new(), keypair).unwrap() -} - -/* TODO https://github.com/massalabs/massa/issues/3099 -pub fn create_roll_sell( - keypair: &KeyPair, - roll_count: u64, - expire_period: u64, - fee: u64, -) -> WrappedOperation { - let op = OperationType::RollSell { roll_count }; - let content = Operation { - fee: Amount::from_str(&fee.to_string()).unwrap(), - expire_period, - op, - }; - Operation::new_wrapped(content, OperationSerializer::new(), keypair).unwrap() -} -*/ - -// returns hash and resulting discarded blocks -pub fn create_block( - cfg: &ConsensusConfig, - slot: Slot, - best_parents: Vec, - creator: &KeyPair, -) -> WrappedBlock { - create_block_with_merkle_root( - cfg, - Hash::compute_from("default_val".as_bytes()), - slot, - best_parents, - creator, - ) -} - -// returns hash and resulting discarded blocks -pub fn create_block_with_merkle_root( - _cfg: &ConsensusConfig, - operation_merkle_root: Hash, - slot: Slot, - best_parents: Vec, - creator: &KeyPair, -) -> WrappedBlock { - let header = BlockHeader::new_wrapped( - BlockHeader { - slot, - parents: best_parents, - operation_merkle_root, - endorsements: Vec::new(), - }, - BlockHeaderSerializer::new(), - creator, - ) - .unwrap(); - - Block::new_wrapped( - Block { - header, - operations: Default::default(), - }, - BlockSerializer::new(), - creator, - ) - .unwrap() -} - -/* TODO https://github.com/massalabs/massa/issues/3099 -/// Creates an endorsement for use in consensus tests. -pub fn create_endorsement( - sender_keypair: &KeyPair, - slot: Slot, - endorsed_block: BlockId, - index: u32, -) -> WrappedEndorsement { - let content = Endorsement { - slot, - index, - endorsed_block, - }; - Endorsement::new_wrapped(content, EndorsementSerializer::new(), sender_keypair).unwrap() -} -*/ - -pub fn _get_export_active_test_block( - parents: Vec<(BlockId, u64)>, - operations: Vec, - slot: Slot, - is_final: bool, -) -> ExportActiveBlock { - let keypair = KeyPair::generate(); - let block = Block::new_wrapped( - Block { - header: BlockHeader::new_wrapped( - BlockHeader { - operation_merkle_root: Hash::compute_from( - &operations - .iter() - .flat_map(|op| op.id.into_bytes()) - .collect::>()[..], - ), - parents: parents.iter().map(|(id, _)| *id).collect(), - slot, - endorsements: Vec::new(), - }, - BlockHeaderSerializer::new(), - &keypair, - ) - .unwrap(), - operations: operations.iter().cloned().map(|op| op.id).collect(), - }, - BlockSerializer::new(), - &keypair, - ) - .unwrap(); - - ExportActiveBlock { - parents, - block, - operations, - is_final, - } -} - -pub fn create_block_with_operations( - _cfg: &ConsensusConfig, - slot: Slot, - best_parents: &Vec, - creator: &KeyPair, - operations: Vec, -) -> WrappedBlock { - let operation_merkle_root = Hash::compute_from( - &operations.iter().fold(Vec::new(), |acc, v| { - [acc, v.id.get_hash().to_bytes().to_vec()].concat() - })[..], - ); - - let header = BlockHeader::new_wrapped( - BlockHeader { - slot, - parents: best_parents.clone(), - operation_merkle_root, - endorsements: Vec::new(), - }, - BlockHeaderSerializer::new(), - creator, - ) - .unwrap(); - - Block::new_wrapped( - Block { - header, - operations: operations.into_iter().map(|op| op.id).collect(), - }, - BlockSerializer::new(), - creator, - ) - .unwrap() -} - -/* TODO https://github.com/massalabs/massa/issues/3099 -pub fn create_block_with_operations_and_endorsements( - _cfg: &ConsensusConfig, - slot: Slot, - best_parents: &Vec, - creator: &KeyPair, - operations: Vec, - endorsements: Vec, -) -> WrappedBlock { - let operation_merkle_root = Hash::compute_from( - &operations.iter().fold(Vec::new(), |acc, v| { - [acc, v.id.get_hash().to_bytes().to_vec()].concat() - })[..], - ); - - let header = BlockHeader::new_wrapped( - BlockHeader { - slot, - parents: best_parents.clone(), - operation_merkle_root, - endorsements, - }, - BlockHeaderSerializer::new(), - creator, - ) - .unwrap(); - - Block::new_wrapped( - Block { - header, - operations: operations.into_iter().map(|op| op.id).collect(), - }, - BlockSerializer::new(), - creator, - ) - .unwrap() -} -*/ - -pub fn get_creator_for_draw(draw: &Address, nodes: &Vec) -> KeyPair { - for key in nodes.iter() { - let address = Address::from_public_key(&key.get_public_key()); - if address == *draw { - return key.clone(); - } - } - panic!("Matching key for draw not found."); -} - -/// Load staking keys from file and derive public keys and addresses -pub async fn _load_initial_staking_keys( - path: &Path, - password: &str, -) -> ConsensusResult> { - if !std::path::Path::is_file(path) { - return Ok(PreHashMap::default()); - } - let (_version, data) = decrypt(password, &tokio::fs::read(path).await?)?; - serde_json::from_slice::>(&data) - .unwrap() - .into_iter() - .map(|key| Ok((Address::from_public_key(&key.get_public_key()), key))) - .collect() -} - -/// Runs a consensus test, passing a mock pool controller to it. -pub async fn _consensus_pool_test( - cfg: ConsensusConfig, - boot_graph: Option, - test: F, -) where - F: FnOnce( - Box, - MockProtocolController, - ConsensusCommandSender, - ConsensusEventReceiver, - ) -> V, - V: Future< - Output = ( - Box, - MockProtocolController, - ConsensusCommandSender, - ConsensusEventReceiver, - ), - >, -{ - let mut storage: Storage = Storage::create_root(); - if let Some(ref graph) = boot_graph { - for export_block in &graph.final_blocks { - storage.store_block(export_block.block.clone()); - } - } - // mock protocol & pool - let (protocol_controller, protocol_command_sender, protocol_event_receiver) = - MockProtocolController::new(); - let (pool_controller, _pool_event_receiver) = MockPoolController::new_with_receiver(); - // for now, execution_rx is ignored: cique updates to Execution pile up and are discarded - let (execution_controller, execution_rx) = MockExecutionController::new_with_receiver(); - let stop_sinks = Arc::new(Mutex::new(false)); - let stop_sinks_clone = stop_sinks.clone(); - let execution_sink = std::thread::spawn(move || { - while !*stop_sinks_clone.lock() { - let _ = execution_rx.recv_timeout(Duration::from_millis(500)); - } - }); - let staking_key = - KeyPair::from_str("S1UxdCJv5ckDK8z87E5Jq5fEfSVLi2cTHgtpfZy7iURs3KpPns8").unwrap(); - let genesis_address = Address::from_public_key(&staking_key.get_public_key()); - let selector_config = SelectorConfig { - max_draw_cache: 12, - channel_size: 256, - thread_count: 2, - endorsement_count: 8, - periods_per_cycle: 2, - genesis_address, - }; - // launch consensus controller - let (_selector_manager, selector_controller) = start_selector_worker(selector_config).unwrap(); - let (consensus_command_sender, consensus_event_receiver, consensus_manager) = - start_consensus_controller( - cfg.clone(), - ConsensusChannels { - execution_controller, - protocol_command_sender: protocol_command_sender.clone(), - protocol_event_receiver, - pool_command_sender: pool_controller.clone(), - selector_controller, - }, - boot_graph, - storage.clone(), - 0, - ) - .await - .expect("could not start consensus controller"); - - // Call test func. - let ( - _pool_controller, - mut protocol_controller, - _consensus_command_sender, - consensus_event_receiver, - ) = test( - pool_controller, - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - ) - .await; - - // stop controller while ignoring all commands - let stop_fut = consensus_manager.stop(consensus_event_receiver); - tokio::pin!(stop_fut); - protocol_controller - .ignore_commands_while(stop_fut) - .await - .unwrap(); - - // stop sinks - *stop_sinks.lock() = true; - execution_sink.join().unwrap(); -} - -/* TODO https://github.com/massalabs/massa/issues/3099 -/// Runs a consensus test, passing a mock pool controller to it. -pub async fn consensus_pool_test_with_storage( - cfg: ConsensusConfig, - boot_graph: Option, - test: F, -) where - F: FnOnce( - Box, - MockProtocolController, - ConsensusCommandSender, - ConsensusEventReceiver, - Storage, - Receiver, - ) -> V, - V: Future< - Output = ( - Box, - MockProtocolController, - ConsensusCommandSender, - ConsensusEventReceiver, - Receiver, - ), - >, -{ - let mut storage: Storage = Storage::create_root(); - if let Some(ref graph) = boot_graph { - for export_block in &graph.final_blocks { - storage.store_block(export_block.block.clone()); - } - } - // mock protocol & pool - let (protocol_controller, protocol_command_sender, protocol_event_receiver) = - MockProtocolController::new(); - let (pool_controller, _pool_event_receiver) = MockPoolController::new_with_receiver(); - // for now, execution_rx is ignored: cique updates to Execution pile up and are discarded - let (execution_controller, execution_rx) = MockExecutionController::new_with_receiver(); - let stop_sinks = Arc::new(Mutex::new(false)); - let stop_sinks_clone = stop_sinks.clone(); - let execution_sink = std::thread::spawn(move || { - while !*stop_sinks_clone.lock() { - let _ = execution_rx.recv_timeout(Duration::from_millis(500)); - } - }); - let (selector_controller, selector_receiver) = MockSelectorController::new_with_receiver(); - // launch consensus controller - let (consensus_command_sender, consensus_event_receiver, consensus_manager) = - start_consensus_controller( - cfg.clone(), - ConsensusChannels { - execution_controller, - protocol_command_sender: protocol_command_sender.clone(), - protocol_event_receiver, - pool_command_sender: pool_controller.clone(), - selector_controller: selector_controller, - }, - boot_graph, - storage.clone(), - 0, - ) - .await - .expect("could not start consensus controller"); - - // Call test func. - let ( - _pool_controller, - mut protocol_controller, - _consensus_command_sender, - consensus_event_receiver, - _selector_controller, - ) = test( - pool_controller, - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - storage, - selector_receiver, - ) - .await; - - // stop controller while ignoring all commands - let stop_fut = consensus_manager.stop(consensus_event_receiver); - tokio::pin!(stop_fut); - protocol_controller - .ignore_commands_while(stop_fut) - .await - .unwrap(); - - // stop sinks - *stop_sinks.lock() = true; - execution_sink.join().unwrap(); -} -*/ - -/// Runs a consensus test, without passing a mock pool controller to it. -pub async fn consensus_without_pool_test(cfg: ConsensusConfig, test: F) -where - F: FnOnce( - MockProtocolController, - ConsensusCommandSender, - ConsensusEventReceiver, - Box, - ) -> V, - V: Future< - Output = ( - MockProtocolController, - ConsensusCommandSender, - ConsensusEventReceiver, - Box, - ), - >, -{ - let storage: Storage = Storage::create_root(); - // mock protocol & pool - let (protocol_controller, protocol_command_sender, protocol_event_receiver) = - MockProtocolController::new(); - let (pool_controller, _pool_event_receiver) = MockPoolController::new_with_receiver(); - let staking_key = - KeyPair::from_str("S1UxdCJv5ckDK8z87E5Jq5fEfSVLi2cTHgtpfZy7iURs3KpPns8").unwrap(); - let genesis_address = Address::from_public_key(&staking_key.get_public_key()); - let selector_config = SelectorConfig { - max_draw_cache: 12, - channel_size: 256, - thread_count: 2, - endorsement_count: 8, - periods_per_cycle: 2, - genesis_address, - }; - let (mut selector_manager, selector_controller) = - start_selector_worker(selector_config).unwrap(); - // for now, execution_rx is ignored: clique updates to Execution pile up and are discarded - let (execution_controller, execution_rx) = MockExecutionController::new_with_receiver(); - let stop_sinks = Arc::new(Mutex::new(false)); - let stop_sinks_clone = stop_sinks.clone(); - let execution_sink = std::thread::spawn(move || { - while !*stop_sinks_clone.lock() { - let _ = execution_rx.recv_timeout(Duration::from_millis(500)); - } - }); - // launch consensus controller - let (consensus_command_sender, consensus_event_receiver, consensus_manager) = - start_consensus_controller( - cfg.clone(), - ConsensusChannels { - execution_controller, - protocol_command_sender: protocol_command_sender.clone(), - protocol_event_receiver, - pool_command_sender: pool_controller, - selector_controller: selector_controller.clone(), - }, - None, - storage.clone(), - 0, - ) - .await - .expect("could not start consensus controller"); - - // Call test func. - let ( - mut protocol_controller, - _consensus_command_sender, - consensus_event_receiver, - _selector_controller, - ) = test( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - .await; - - // stop controller while ignoring all commands - let stop_fut = consensus_manager.stop(consensus_event_receiver); - tokio::pin!(stop_fut); - protocol_controller - .ignore_commands_while(stop_fut) - .await - .unwrap(); - selector_manager.stop(); - // stop sinks - *stop_sinks.lock() = true; - execution_sink.join().unwrap(); -} - -/// Runs a consensus test, without passing a mock pool controller to it, -/// and passing a reference to storage. -pub async fn consensus_without_pool_with_storage_test(cfg: ConsensusConfig, test: F) -where - F: FnOnce( - Storage, - MockProtocolController, - ConsensusCommandSender, - ConsensusEventReceiver, - Box, - ) -> V, - V: Future< - Output = ( - MockProtocolController, - ConsensusCommandSender, - ConsensusEventReceiver, - Box, - ), - >, -{ - let storage: Storage = Storage::create_root(); - // mock protocol & pool - let (protocol_controller, protocol_command_sender, protocol_event_receiver) = - MockProtocolController::new(); - let (pool_controller, _pool_event_receiver) = MockPoolController::new_with_receiver(); - // for now, execution_rx is ignored: clique updates to Execution pile up and are discarded - let (execution_controller, execution_rx) = MockExecutionController::new_with_receiver(); - let stop_sinks = Arc::new(Mutex::new(false)); - let stop_sinks_clone = stop_sinks.clone(); - let execution_sink = std::thread::spawn(move || { - while !*stop_sinks_clone.lock() { - let _ = execution_rx.recv_timeout(Duration::from_millis(500)); - } - }); - let staking_key = - KeyPair::from_str("S1UxdCJv5ckDK8z87E5Jq5fEfSVLi2cTHgtpfZy7iURs3KpPns8").unwrap(); - let genesis_address = Address::from_public_key(&staking_key.get_public_key()); - let selector_config = SelectorConfig { - max_draw_cache: 12, - channel_size: 256, - thread_count: 2, - endorsement_count: 8, - periods_per_cycle: 2, - genesis_address, - }; - let (mut selector_manager, selector_controller) = - start_selector_worker(selector_config).unwrap(); - // launch consensus controller - let (consensus_command_sender, consensus_event_receiver, consensus_manager) = - start_consensus_controller( - cfg.clone(), - ConsensusChannels { - execution_controller, - protocol_command_sender: protocol_command_sender.clone(), - protocol_event_receiver, - pool_command_sender: pool_controller, - selector_controller: selector_controller.clone(), - }, - None, - storage.clone(), - 0, - ) - .await - .expect("could not start consensus controller"); - - // Call test func. - let ( - mut protocol_controller, - _consensus_command_sender, - consensus_event_receiver, - _selector_controller, - ) = test( - storage, - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - .await; - - // stop controller while ignoring all commands - let stop_fut = consensus_manager.stop(consensus_event_receiver); - tokio::pin!(stop_fut); - protocol_controller - .ignore_commands_while(stop_fut) - .await - .unwrap(); - selector_manager.stop(); - // stop sinks - *stop_sinks.lock() = true; - execution_sink.join().unwrap(); -} - -pub fn get_cliques(graph: &BlockGraphExport, hash: BlockId) -> HashSet { - let mut res = HashSet::new(); - for (i, clique) in graph.max_cliques.iter().enumerate() { - if clique.block_ids.contains(&hash) { - res.insert(i); - } - } - res -} diff --git a/massa-consensus-worker/src/tools.rs b/massa-consensus-worker/src/tools.rs deleted file mode 100644 index 2d771e2de71..00000000000 --- a/massa-consensus-worker/src/tools.rs +++ /dev/null @@ -1,107 +0,0 @@ -use crate::consensus_worker::ConsensusWorker; -use massa_consensus_exports::settings::ConsensusConfig; -use massa_consensus_exports::{ - commands::{ConsensusCommand, ConsensusManagementCommand}, - error::{ConsensusError, ConsensusResult as Result}, - events::ConsensusEvent, - settings::{ConsensusChannels, ConsensusWorkerChannels}, - ConsensusCommandSender, ConsensusEventReceiver, ConsensusManager, -}; -use massa_graph::{settings::GraphConfig, BlockGraph, BootstrapableGraph}; -use massa_storage::Storage; -use tokio::sync::mpsc; -use tracing::{debug, error, info}; - -/// Creates a new consensus controller. -/// -/// # Arguments -/// * `cfg`: consensus configuration -/// * `protocol_command_sender`: a `ProtocolCommandSender` instance to send commands to Protocol. -/// * `protocol_event_receiver`: a `ProtocolEventReceiver` instance to receive events from Protocol. -#[allow(clippy::too_many_arguments)] -pub async fn start_consensus_controller( - cfg: ConsensusConfig, - channels: ConsensusChannels, - boot_graph: Option, - storage: Storage, - clock_compensation: i64, -) -> Result<( - ConsensusCommandSender, - ConsensusEventReceiver, - ConsensusManager, -)> { - debug!("starting consensus controller"); - massa_trace!( - "consensus.consensus_controller.start_consensus_controller", - {} - ); - - // todo that is checked when loading the config, should be removed - // ensure that the parameters are sane - if cfg.thread_count == 0 { - return Err(ConsensusError::ConfigError( - "thread_count should be strictly more than 0".to_string(), - )); - } - if cfg.t0 == 0.into() { - return Err(ConsensusError::ConfigError( - "t0 should be strictly more than 0".to_string(), - )); - } - if cfg.t0.checked_rem_u64(cfg.thread_count as u64)? != 0.into() { - return Err(ConsensusError::ConfigError( - "thread_count should divide t0".to_string(), - )); - } - - // start worker - let block_db = BlockGraph::new( - GraphConfig::from(&cfg), - boot_graph, - storage.clone_without_refs(), - channels.selector_controller.clone(), - ) - .await?; - let (command_tx, command_rx) = mpsc::channel::(cfg.channel_size); - let (event_tx, event_rx) = mpsc::channel::(cfg.channel_size); - let (manager_tx, manager_rx) = mpsc::channel::(1); - let cfg_copy = cfg.clone(); - let join_handle = tokio::spawn(async move { - let res = ConsensusWorker::new( - cfg_copy, - ConsensusWorkerChannels { - protocol_command_sender: channels.protocol_command_sender, - protocol_event_receiver: channels.protocol_event_receiver, - execution_controller: channels.execution_controller, - pool_command_sender: channels.pool_command_sender, - selector_controller: channels.selector_controller, - controller_command_rx: command_rx, - controller_event_tx: event_tx, - controller_manager_rx: manager_rx, - }, - block_db, - clock_compensation, - ) - .await? - .run_loop() - .await; - match res { - Err(err) => { - error!("consensus worker crashed: {}", err); - Err(err) - } - Ok(v) => { - info!("consensus worker finished cleanly"); - Ok(v) - } - } - }); - Ok(( - ConsensusCommandSender(command_tx), - ConsensusEventReceiver(event_rx), - ConsensusManager { - manager_tx, - join_handle, - }, - )) -} diff --git a/massa-factory-exports/Cargo.toml b/massa-factory-exports/Cargo.toml index 17dbe926558..ae3e8e696c8 100644 --- a/massa-factory-exports/Cargo.toml +++ b/massa-factory-exports/Cargo.toml @@ -23,7 +23,6 @@ massa_ledger_exports = { path = "../massa-ledger-exports" } massa_serialization = { path = "../massa-serialization" } massa_signature = { path = "../massa-signature" } massa_pos_exports = { path = "../massa-pos-exports" } -massa_consensus_exports = { path = "../massa-consensus-exports" } massa_graph_2_exports = { path = "../massa-graph-2-exports" } massa_pool_exports = { path = "../massa-pool-exports" } massa_protocol_exports = { path = "../massa-protocol-exports" } diff --git a/massa-factory-worker/Cargo.toml b/massa-factory-worker/Cargo.toml index 86afe9b3336..ff29c1011fb 100644 --- a/massa-factory-worker/Cargo.toml +++ b/massa-factory-worker/Cargo.toml @@ -22,7 +22,6 @@ massa_wallet = { path = "../massa-wallet" } massa_hash = { path = "../massa-hash" } massa_pos_exports = { path = "../massa-pos-exports" } massa_serialization = { path = "../massa-serialization" } -massa_consensus_exports = { path = "../massa-consensus-exports" } massa_pool_exports = { path = "../massa-pool-exports" } [dev-dependencies] @@ -31,9 +30,8 @@ massa_protocol_exports = { path = "../massa-protocol-exports", features=["testin massa_factory_exports = { path = "../massa-factory-exports", features=["testing"] } massa_wallet = { path = "../massa-wallet", features=["testing"] } massa_pos_exports = { path = "../massa-pos-exports", features=["testing"] } -massa_consensus_exports = { path = "../massa-consensus-exports", features=["testing"] } massa_pool_exports = { path = "../massa-pool-exports", features=["testing"] } [features] sandbox = [] -testing = ["massa_factory_exports/testing", "massa_pos_exports/testing", "massa_pool_exports/testing", "massa_consensus_exports/testing", "massa_protocol_exports/testing", "massa_wallet/testing"] +testing = ["massa_factory_exports/testing", "massa_pos_exports/testing", "massa_pool_exports/testing", "massa_protocol_exports/testing", "massa_wallet/testing"] diff --git a/massa-graph-2-exports/Cargo.toml b/massa-graph-2-exports/Cargo.toml index 3cb982dc969..087e465ac04 100644 --- a/massa-graph-2-exports/Cargo.toml +++ b/massa-graph-2-exports/Cargo.toml @@ -9,15 +9,18 @@ edition = "2021" [dependencies] crossbeam-channel = "0.5.6" displaydoc = "0.2" +nom = "7.1" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" +thiserror = "1.0" #custom modules +massa_hash = { path = "../massa-hash"} massa_execution_exports = { path = "../massa-execution-exports" } -massa_graph = { path = "../massa-graph" } massa_models = { path = "../massa-models" } massa_pool_exports = { path = "../massa-pool-exports" } massa_pos_exports = { path = "../massa-pos-exports" } massa_protocol_exports ={ path = "../massa-protocol-exports" } massa_storage = { path = "../massa-storage" } +massa_serialization = { path = "../massa-serialization" } massa_time = { path = "../massa-time" } massa_signature = { path = "../massa-signature" } \ No newline at end of file diff --git a/massa-graph/src/bootstrapable_graph.rs b/massa-graph-2-exports/src/bootstrapable_graph.rs similarity index 100% rename from massa-graph/src/bootstrapable_graph.rs rename to massa-graph-2-exports/src/bootstrapable_graph.rs diff --git a/massa-graph-2-exports/src/controller_trait.rs b/massa-graph-2-exports/src/controller_trait.rs index b20acc3934f..480fb105480 100644 --- a/massa-graph-2-exports/src/controller_trait.rs +++ b/massa-graph-2-exports/src/controller_trait.rs @@ -1,5 +1,5 @@ use crate::block_graph_export::BlockGraphExport; -use massa_graph::{error::GraphResult, BootstrapableGraph}; +use crate::{bootstrapable_graph::BootstrapableGraph, error::GraphResult}; use massa_models::{ api::BlockGraphStatus, block::{BlockHeader, BlockId}, diff --git a/massa-graph/src/error.rs b/massa-graph-2-exports/src/error.rs similarity index 100% rename from massa-graph/src/error.rs rename to massa-graph-2-exports/src/error.rs diff --git a/massa-graph/src/export_active_block.rs b/massa-graph-2-exports/src/export_active_block.rs similarity index 100% rename from massa-graph/src/export_active_block.rs rename to massa-graph-2-exports/src/export_active_block.rs diff --git a/massa-graph-2-exports/src/lib.rs b/massa-graph-2-exports/src/lib.rs index b7a285a0058..bc95375851c 100644 --- a/massa-graph-2-exports/src/lib.rs +++ b/massa-graph-2-exports/src/lib.rs @@ -7,7 +7,10 @@ mod settings; pub mod block_graph_export; pub mod block_status; +pub mod bootstrapable_graph; +pub mod error; pub mod events; +pub mod export_active_block; pub use channels::GraphChannels; pub use controller_trait::{GraphController, GraphManager}; diff --git a/massa-graph-2-worker/Cargo.toml b/massa-graph-2-worker/Cargo.toml index 5d76d822b13..4b60ebb7e5e 100644 --- a/massa-graph-2-worker/Cargo.toml +++ b/massa-graph-2-worker/Cargo.toml @@ -15,10 +15,13 @@ serde_json = "1.0" parking_lot = { version = "0.12", features = ["deadlock_detection"] } #custom modules massa_graph_2_exports = { path = "../massa-graph-2-exports" } -massa_graph = { path = "../massa-graph" } massa_models = { path = "../massa-models" } massa_storage = { path = "../massa-storage" } massa_signature = { path = "../massa-signature" } massa_time = { path = "../massa-time" } massa_hash = { path = "../massa-hash" } massa_logging = { path = "../massa-logging" } + +[features] + +sandbox = [] \ No newline at end of file diff --git a/massa-graph-2-worker/src/commands.rs b/massa-graph-2-worker/src/commands.rs index ba0809f5243..2f690a0cbf6 100644 --- a/massa-graph-2-worker/src/commands.rs +++ b/massa-graph-2-worker/src/commands.rs @@ -9,5 +9,5 @@ use massa_storage::Storage; pub enum GraphCommand { RegisterBlock(BlockId, Slot, Storage), RegisterBlockHeader(BlockId, Wrapped), - MarkInvalidBlock(BlockId, Wrapped) + MarkInvalidBlock(BlockId, Wrapped), } diff --git a/massa-graph-2-worker/src/controller.rs b/massa-graph-2-worker/src/controller.rs index e1c23016004..f94db0db2ea 100644 --- a/massa-graph-2-worker/src/controller.rs +++ b/massa-graph-2-worker/src/controller.rs @@ -1,10 +1,10 @@ -use massa_graph::{ +use massa_graph_2_exports::{ + block_graph_export::BlockGraphExport, + block_status::BlockStatus, + bootstrapable_graph::BootstrapableGraph, error::{GraphError, GraphResult}, export_active_block::ExportActiveBlock, - BootstrapableGraph, -}; -use massa_graph_2_exports::{ - block_graph_export::BlockGraphExport, block_status::BlockStatus, GraphController, + GraphController, }; use massa_models::{ api::BlockGraphStatus, diff --git a/massa-graph-2-worker/src/state/verifications.rs b/massa-graph-2-worker/src/state/verifications.rs index ffb4aef32a0..2fc9fb4986a 100644 --- a/massa-graph-2-worker/src/state/verifications.rs +++ b/massa-graph-2-worker/src/state/verifications.rs @@ -1,7 +1,9 @@ use super::GraphState; -use massa_graph::error::{GraphError, GraphResult}; -use massa_graph_2_exports::block_status::{BlockStatus, DiscardReason}; +use massa_graph_2_exports::{ + block_status::{BlockStatus, DiscardReason}, + error::GraphError, +}; use massa_logging::massa_trace; use massa_models::{ block::{BlockId, WrappedHeader}, @@ -67,7 +69,7 @@ impl GraphState { header: &WrappedHeader, current_slot: Option, read_shared_state: &GraphState, - ) -> GraphResult { + ) -> Result { massa_trace!("consensus.block_graph.check_header", { "block_id": block_id }); @@ -273,7 +275,7 @@ impl GraphState { .filter(|&sibling_h| sibling_h != block_id) .try_for_each(|&sibling_h| { incomp.extend(self.get_active_block_and_descendants(&sibling_h)?); - GraphResult::<()>::Ok(()) + Result::<(), GraphError>::Ok(()) })?; // grandpa incompatibility test @@ -375,7 +377,7 @@ impl GraphState { pub fn check_endorsements( &self, header: &WrappedHeader, - ) -> GraphResult { + ) -> Result { // check endorsements let endorsement_draws = match self .channels diff --git a/massa-graph-2-worker/src/worker/init.rs b/massa-graph-2-worker/src/worker/init.rs index eb3fdddbb4b..152e643547f 100644 --- a/massa-graph-2-worker/src/worker/init.rs +++ b/massa-graph-2-worker/src/worker/init.rs @@ -1,13 +1,9 @@ -use std::{ - collections::{HashMap, VecDeque}, - sync::{mpsc, Arc}, -}; - -use massa_graph::{ +use massa_graph_2_exports::{ + block_status::BlockStatus, + bootstrapable_graph::BootstrapableGraph, error::{GraphError, GraphResult}, - BootstrapableGraph, + GraphConfig, }; -use massa_graph_2_exports::{block_status::BlockStatus, GraphConfig}; use massa_hash::Hash; use massa_models::{ active_block::ActiveBlock, @@ -21,6 +17,10 @@ use massa_models::{ use massa_storage::Storage; use massa_time::MassaTime; use parking_lot::RwLock; +use std::{ + collections::{HashMap, VecDeque}, + sync::{mpsc, Arc}, +}; use tracing::log::info; use crate::{commands::GraphCommand, state::GraphState}; diff --git a/massa-graph-2-worker/src/worker/main_loop.rs b/massa-graph-2-worker/src/worker/main_loop.rs index 20bf62fadd6..d1ff46acfe1 100644 --- a/massa-graph-2-worker/src/worker/main_loop.rs +++ b/massa-graph-2-worker/src/worker/main_loop.rs @@ -1,6 +1,6 @@ use std::{sync::mpsc, time::Instant}; -use massa_graph::error::GraphResult; +use massa_graph_2_exports::error::GraphResult; use massa_models::{ slot::Slot, timeslots::{get_block_slot_timestamp, get_closest_slot_to_timestamp}, diff --git a/massa-graph-2-worker/src/worker/mod.rs b/massa-graph-2-worker/src/worker/mod.rs index 0839a29b0f2..daa1da3f9a6 100644 --- a/massa-graph-2-worker/src/worker/mod.rs +++ b/massa-graph-2-worker/src/worker/mod.rs @@ -1,5 +1,7 @@ -use massa_graph::BootstrapableGraph; -use massa_graph_2_exports::{GraphChannels, GraphConfig, GraphController, GraphManager}; +use massa_graph_2_exports::{ + bootstrapable_graph::BootstrapableGraph, GraphChannels, GraphConfig, GraphController, + GraphManager, +}; use massa_models::block::BlockId; use massa_models::clique::Clique; use massa_models::prehash::PreHashSet; diff --git a/massa-graph/Cargo.toml b/massa-graph/Cargo.toml deleted file mode 100644 index d8277889fa2..00000000000 --- a/massa-graph/Cargo.toml +++ /dev/null @@ -1,28 +0,0 @@ -[package] -name = "massa_graph" -version = "0.1.0" -authors = ["Massa Labs "] -edition = "2021" - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -displaydoc = "0.2" -num = { version = "0.4", features = ["serde"] } -nom = "7.1" -serde = { version = "1.0", features = ["derive"] } -serde_json = "1.0" -thiserror = "1.0" -tracing = "0.1" -# custom modules -massa_pos_exports = { path = "../massa-pos-exports" } -massa_execution_exports = { path = "../massa-execution-exports" } -massa_hash = { path = "../massa-hash" } -massa_logging = { path = "../massa-logging" } -massa_models = { path = "../massa-models" } -massa_storage = { path = "../massa-storage" } -massa_signature = { path = "../massa-signature" } -massa_protocol_exports = { path = "../massa-protocol-exports" } -massa_serialization = { path = "../massa-serialization"} -massa_time = { path = "../massa-time" } - diff --git a/massa-graph/src/block_graph.rs b/massa-graph/src/block_graph.rs deleted file mode 100644 index 66549955784..00000000000 --- a/massa-graph/src/block_graph.rs +++ /dev/null @@ -1,2742 +0,0 @@ -// Copyright (c) 2022 MASSA LABS - -//! All information concerning blocks, the block graph and cliques is managed here. -use crate::{ - bootstrapable_graph::BootstrapableGraph, - error::{GraphError, GraphResult as Result}, - export_active_block::ExportActiveBlock, - settings::GraphConfig, -}; -use massa_hash::Hash; -use massa_logging::massa_trace; -use massa_models::prehash::{CapacityAllocator, PreHashMap, PreHashSet}; -use massa_models::{ - active_block::ActiveBlock, api::BlockGraphStatus, clique::Clique, wrapped::WrappedContent, -}; -use massa_models::{ - address::Address, - block::{ - Block, BlockHeader, BlockHeaderSerializer, BlockId, BlockSerializer, WrappedBlock, - WrappedHeader, - }, - slot::Slot, -}; -use massa_pos_exports::SelectorController; -use massa_signature::PublicKey; -use massa_storage::Storage; -use serde::{Deserialize, Serialize}; -use std::collections::{hash_map, BTreeSet, HashMap, VecDeque}; -use std::mem; -use tracing::{debug, info}; - -#[derive(Debug, Clone)] -#[allow(clippy::large_enum_variant)] -enum HeaderOrBlock { - Header(WrappedHeader), - Block { - id: BlockId, - slot: Slot, - storage: Storage, - }, -} - -impl HeaderOrBlock { - /// Gets slot for that header or block - pub fn get_slot(&self) -> Slot { - match self { - HeaderOrBlock::Header(header) => header.content.slot, - HeaderOrBlock::Block { slot, .. } => *slot, - } - } -} - -/// Something can be discarded -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub enum DiscardReason { - /// Block is invalid, either structurally, or because of some incompatibility. The String contains the reason for info or debugging. - Invalid(String), - /// Block is incompatible with a final block. - Stale, - /// Block has enough fitness. - Final, -} - -/// Enum used in `BlockGraph`'s state machine -#[derive(Debug, Clone)] -enum BlockStatus { - /// The block/header has reached consensus but no consensus-level check has been performed. - /// It will be processed during the next iteration - Incoming(HeaderOrBlock), - /// The block's or header's slot is too much in the future. - /// It will be processed at the block/header slot - WaitingForSlot(HeaderOrBlock), - /// The block references an unknown Block id - WaitingForDependencies { - /// Given header/block - header_or_block: HeaderOrBlock, - /// includes self if it's only a header - unsatisfied_dependencies: PreHashSet, - /// Used to limit and sort the number of blocks/headers waiting for dependencies - sequence_number: u64, - }, - /// The block was checked and included in the blockgraph - Active { - a_block: Box, - storage: Storage, - }, - /// The block was discarded and is kept to avoid reprocessing it - Discarded { - /// Just the slot of that block - slot: Slot, - /// Address of the creator of the block - creator: Address, - /// Ids of parents blocks - parents: Vec, - /// why it was discarded - reason: DiscardReason, - /// Used to limit and sort the number of blocks/headers waiting for dependencies - sequence_number: u64, - }, -} - -/// Block status in the graph that can be exported. -#[derive(Debug, Clone, Serialize, Deserialize)] -pub enum ExportBlockStatus { - /// received but not yet graph processed - Incoming, - /// waiting for its slot - WaitingForSlot, - /// waiting for a missing dependency - WaitingForDependencies, - /// valid and not yet final - Active(Block), - /// immutable - Final(Block), - /// not part of the graph - Discarded(DiscardReason), -} - -/// The block version that can be exported. -/// Note that the detailed list of operation is not exported -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct ExportCompiledBlock { - /// Header of the corresponding block. - pub header: WrappedHeader, - /// For (i, set) in children, - /// set contains the headers' hashes - /// of blocks referencing exported block as a parent, - /// in thread i. - pub children: Vec>, - /// Active or final - pub is_final: bool, -} - -/// Status -#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] -pub enum Status { - /// without enough fitness to be part of immutable history - Active, - /// with enough fitness to be part of immutable history - Final, -} - -impl<'a> BlockGraphExport { - /// Conversion from blockgraph. - pub fn extract_from( - block_graph: &'a BlockGraph, - slot_start: Option, - slot_end: Option, - ) -> Result { - let mut export = BlockGraphExport { - genesis_blocks: block_graph.genesis_hashes.clone(), - active_blocks: PreHashMap::with_capacity(block_graph.block_statuses.len()), - discarded_blocks: PreHashMap::with_capacity(block_graph.block_statuses.len()), - best_parents: block_graph.best_parents.clone(), - latest_final_blocks_periods: block_graph.latest_final_blocks_periods.clone(), - gi_head: block_graph.gi_head.clone(), - max_cliques: block_graph.max_cliques.clone(), - }; - - let filter = |&s| { - if let Some(s_start) = slot_start { - if s < s_start { - return false; - } - } - if let Some(s_end) = slot_end { - if s >= s_end { - return false; - } - } - true - }; - - for (hash, block) in block_graph.block_statuses.iter() { - match block { - BlockStatus::Discarded { - slot, - creator, - parents, - reason, - .. - } => { - if filter(slot) { - export - .discarded_blocks - .insert(*hash, (reason.clone(), (*slot, *creator, parents.clone()))); - } - } - BlockStatus::Active { a_block, storage } => { - if filter(&a_block.slot) { - let stored_block = - storage.read_blocks().get(hash).cloned().ok_or_else(|| { - GraphError::MissingBlock(format!( - "missing block in BlockGraphExport::extract_from: {}", - hash - )) - })?; - export.active_blocks.insert( - *hash, - ExportCompiledBlock { - header: stored_block.content.header, - children: a_block - .children - .iter() - .map(|thread| { - thread.keys().copied().collect::>() - }) - .collect(), - is_final: a_block.is_final, - }, - ); - } - } - _ => continue, - } - } - - Ok(export) - } -} - -/// Bootstrap compatible version of the block graph -#[derive(Debug, Clone)] -#[allow(clippy::type_complexity)] -pub struct BlockGraphExport { - /// Genesis blocks. - pub genesis_blocks: Vec, - /// Map of active blocks, were blocks are in their exported version. - pub active_blocks: PreHashMap, - /// Finite cache of discarded blocks, in exported version `(slot, creator_address, parents)`. - pub discarded_blocks: PreHashMap))>, - /// Best parents hashes in each thread. - pub best_parents: Vec<(BlockId, u64)>, - /// Latest final period and block hash in each thread. - pub latest_final_blocks_periods: Vec<(BlockId, u64)>, - /// Head of the incompatibility graph. - pub gi_head: PreHashMap>, - /// List of maximal cliques of compatible blocks. - pub max_cliques: Vec, -} - -/// Graph management -pub struct BlockGraph { - /// Consensus related configuration - cfg: GraphConfig, - /// Block ids of genesis blocks - genesis_hashes: Vec, - /// Used to limit the number of waiting and discarded blocks - sequence_counter: u64, - /// Every block we know about - block_statuses: PreHashMap, - /// Ids of incoming blocks/headers - incoming_index: PreHashSet, - /// ids of waiting for slot blocks/headers - waiting_for_slot_index: PreHashSet, - /// ids of waiting for dependencies blocks/headers - waiting_for_dependencies_index: PreHashSet, - /// ids of active blocks - active_index: PreHashSet, - /// ids of discarded blocks - discarded_index: PreHashSet, - /// One (block id, period) per thread - latest_final_blocks_periods: Vec<(BlockId, u64)>, - /// One `(block id, period)` per thread TODO not sure I understand the difference with `latest_final_blocks_periods` - best_parents: Vec<(BlockId, u64)>, - /// Incompatibility graph: maps a block id to the block ids it is incompatible with - /// One entry per Active Block - gi_head: PreHashMap>, - /// All the cliques - max_cliques: Vec, - /// Blocks that need to be propagated - to_propagate: PreHashMap, - /// List of block ids we think are attack attempts - attack_attempts: Vec, - /// Newly final blocks - new_final_blocks: PreHashSet, - /// Newly stale block mapped to creator and slot - new_stale_blocks: PreHashMap, - /// Shared storage, - storage: Storage, - /// Selector controller - selector_controller: Box, -} - -/// Possible output of a header check -#[derive(Debug)] -enum HeaderCheckOutcome { - /// it's ok and here are some useful values - Proceed { - /// one (parent block id, parent's period) per thread - parents_hash_period: Vec<(BlockId, u64)>, - /// blocks that header is incompatible with - incompatibilities: PreHashSet, - /// number of incompatibilities that are inherited from the parents - inherited_incompatibilities_count: usize, - /// fitness - fitness: u64, - }, - /// there is something wrong with that header - Discard(DiscardReason), - /// it must wait for its slot to be fully processed - WaitForSlot, - /// it must wait for these block ids to be fully processed - WaitForDependencies(PreHashSet), -} - -/// Possible outcomes of endorsements check -#[derive(Debug)] -enum EndorsementsCheckOutcome { - /// Everything is ok - Proceed, - /// There is something wrong with that endorsement - Discard(DiscardReason), - /// It must wait for its slot to be fully processed - WaitForSlot, -} - -/// Creates genesis block in given thread. -/// -/// # Arguments -/// * `cfg`: consensus configuration -/// * `thread_number`: thread in which we want a genesis block -pub fn create_genesis_block( - cfg: &GraphConfig, - thread_number: u8, -) -> Result<(BlockId, WrappedBlock)> { - let keypair = &cfg.genesis_key; - let header = BlockHeader::new_wrapped( - BlockHeader { - slot: Slot::new(0, thread_number), - parents: Vec::new(), - operation_merkle_root: Hash::compute_from(&Vec::new()), - endorsements: Vec::new(), - }, - BlockHeaderSerializer::new(), - keypair, - )?; - - Ok(( - header.id, - Block::new_wrapped( - Block { - header, - operations: Default::default(), - }, - BlockSerializer::new(), - keypair, - )?, - )) -} - -impl BlockGraph { - /// Creates a new `BlockGraph`. - /// - /// # Argument - /// * `cfg`: consensus configuration. - /// * `init`: A bootstrap graph to start the graph with - /// * `storage`: A shared storage that share data across all modules. - /// * `selector_controller`: Access to the PoS selector to get draws - pub async fn new( - cfg: GraphConfig, - init: Option, - storage: Storage, - selector_controller: Box, - ) -> Result { - // load genesis blocks - - let mut block_statuses = PreHashMap::default(); - let mut genesis_block_ids = Vec::with_capacity(cfg.thread_count as usize); - for thread in 0u8..cfg.thread_count { - let (block_id, block) = create_genesis_block(&cfg, thread).map_err(|err| { - GraphError::GenesisCreationError(format!("genesis error {}", err)) - })?; - let mut storage = storage.clone_without_refs(); - storage.store_block(block.clone()); - genesis_block_ids.push(block_id); - block_statuses.insert( - block_id, - BlockStatus::Active { - a_block: Box::new(ActiveBlock { - creator_address: block.creator_address, - parents: Vec::new(), - children: vec![PreHashMap::default(); cfg.thread_count as usize], - descendants: Default::default(), - is_final: true, - block_id, - slot: block.content.header.content.slot, - fitness: block.get_fitness(), - }), - storage, - }, - ); - } - - massa_trace!("consensus.block_graph.new", {}); - if let Some(BootstrapableGraph { final_blocks }) = init { - // load final blocks - let final_blocks: Vec<(ActiveBlock, Storage)> = final_blocks - .into_iter() - .map(|export_b| export_b.to_active_block(&storage, cfg.thread_count)) - .collect::>()?; - - // compute latest_final_blocks_periods - let mut latest_final_blocks_periods: Vec<(BlockId, u64)> = - genesis_block_ids.iter().map(|id| (*id, 0u64)).collect(); - for (b, _) in &final_blocks { - if let Some(v) = latest_final_blocks_periods.get_mut(b.slot.thread as usize) { - if b.slot.period > v.1 { - *v = (b.block_id, b.slot.period); - } - } - } - - // generate graph - let mut res_graph = BlockGraph { - cfg: cfg.clone(), - sequence_counter: 0, - genesis_hashes: genesis_block_ids, - active_index: final_blocks.iter().map(|(b, _)| b.block_id).collect(), - incoming_index: Default::default(), - waiting_for_slot_index: Default::default(), - waiting_for_dependencies_index: Default::default(), - discarded_index: Default::default(), - best_parents: latest_final_blocks_periods.clone(), - latest_final_blocks_periods, - gi_head: Default::default(), - max_cliques: vec![Clique { - block_ids: PreHashSet::::default(), - fitness: 0, - is_blockclique: true, - }], - to_propagate: Default::default(), - attack_attempts: Default::default(), - new_final_blocks: Default::default(), - new_stale_blocks: Default::default(), - storage, - selector_controller, - block_statuses: final_blocks - .into_iter() - .map(|(b, s)| { - Ok(( - b.block_id, - BlockStatus::Active { - a_block: Box::new(b), - storage: s, - }, - )) - }) - .collect::>()?, - }; - - // claim parent refs - for (_b_id, block_status) in res_graph.block_statuses.iter_mut() { - if let BlockStatus::Active { - a_block, - storage: block_storage, - } = block_status - { - // claim parent refs - let n_claimed_parents = block_storage - .claim_block_refs(&a_block.parents.iter().map(|(p_id, _)| *p_id).collect()) - .len(); - - if !a_block.is_final { - // note: parents of final blocks will be missing, that's ok, but it shouldn't be the case for non-finals - if n_claimed_parents != cfg.thread_count as usize { - return Err(GraphError::MissingBlock( - "block storage could not claim refs to all parent blocks".into(), - )); - } - } - } - } - - // list active block parents - let active_blocks_map: PreHashMap)> = res_graph - .block_statuses - .iter() - .filter_map(|(h, s)| { - if let BlockStatus::Active { a_block: a, .. } = s { - return Some((*h, (a.slot, a.parents.iter().map(|(ph, _)| *ph).collect()))); - } - None - }) - .collect(); - // deduce children and descendants - for (b_id, (b_slot, b_parents)) in active_blocks_map.into_iter() { - // deduce children - for parent_id in &b_parents { - if let Some(BlockStatus::Active { - a_block: parent, .. - }) = res_graph.block_statuses.get_mut(parent_id) - { - parent.children[b_slot.thread as usize].insert(b_id, b_slot.period); - } - } - - // deduce descendants - let mut ancestors: VecDeque = b_parents.into_iter().collect(); - let mut visited: PreHashSet = Default::default(); - while let Some(ancestor_h) = ancestors.pop_back() { - if !visited.insert(ancestor_h) { - continue; - } - if let Some(BlockStatus::Active { a_block: ab, .. }) = - res_graph.block_statuses.get_mut(&ancestor_h) - { - ab.descendants.insert(b_id); - for (ancestor_parent_h, _) in ab.parents.iter() { - ancestors.push_front(*ancestor_parent_h); - } - } - } - } - Ok(res_graph) - } else { - Ok(BlockGraph { - cfg, - sequence_counter: 0, - block_statuses, - incoming_index: Default::default(), - waiting_for_slot_index: Default::default(), - waiting_for_dependencies_index: Default::default(), - active_index: genesis_block_ids.iter().copied().collect(), - discarded_index: Default::default(), - latest_final_blocks_periods: genesis_block_ids.iter().map(|h| (*h, 0)).collect(), - best_parents: genesis_block_ids.iter().map(|v| (*v, 0)).collect(), - genesis_hashes: genesis_block_ids, - gi_head: PreHashMap::default(), - max_cliques: vec![Clique { - block_ids: PreHashSet::::default(), - fitness: 0, - is_blockclique: true, - }], - to_propagate: Default::default(), - attack_attempts: Default::default(), - new_final_blocks: Default::default(), - new_stale_blocks: Default::default(), - storage, - selector_controller, - }) - } - } - - /// export full graph in a bootstrap compatible version - pub fn export_bootstrap_graph(&self) -> Result { - let mut required_final_blocks: PreHashSet<_> = self.list_required_active_blocks()?; - required_final_blocks.retain(|b_id| { - if let Some(BlockStatus::Active { a_block, .. }) = self.block_statuses.get(b_id) { - if a_block.is_final { - // filter only final actives - return true; - } - } - false - }); - let mut final_blocks: Vec = - Vec::with_capacity(required_final_blocks.len()); - for b_id in &required_final_blocks { - if let Some(BlockStatus::Active { a_block, storage }) = self.block_statuses.get(b_id) { - final_blocks.push(ExportActiveBlock::from_active_block(a_block, storage)); - } else { - return Err(GraphError::ContainerInconsistency(format!( - "block {} was expected to be active but wasn't on bootstrap graph export", - b_id - ))); - } - } - - Ok(BootstrapableGraph { final_blocks }) - } - - /// Gets latest final blocks (hash, period) for each thread. - pub fn get_latest_final_blocks_periods(&self) -> &Vec<(BlockId, u64)> { - &self.latest_final_blocks_periods - } - - /// Gets best parents. - pub fn get_best_parents(&self) -> &Vec<(BlockId, u64)> { - &self.best_parents - } - - /// Gets the list of cliques. - pub fn get_cliques(&self) -> Vec { - self.max_cliques.clone() - } - - /// Returns the list of block IDs created by a given address, and their finality statuses - pub fn get_block_ids_by_creator(&self, address: &Address) -> PreHashMap { - // iterate on active (final and non-final) blocks - self.active_index - .iter() - .filter_map(|block_id| match self.block_statuses.get(block_id) { - Some(BlockStatus::Active { a_block, .. }) => { - if a_block.creator_address == *address { - Some(( - *block_id, - if a_block.is_final { - Status::Final - } else { - Status::Active - }, - )) - } else { - None - } - } - _ => None, - }) - .collect() - } - - /// Gets whole compiled block corresponding to given hash, if it is active. - /// - /// # Argument - /// * `block_id`: block ID - pub fn get_active_block(&self, block_id: &BlockId) -> Option<(&ActiveBlock, &Storage)> { - BlockGraph::get_full_active_block(&self.block_statuses, *block_id) - } - - /// get block graph status - pub fn get_block_status(&self, block_id: &BlockId) -> BlockGraphStatus { - match self.block_statuses.get(block_id) { - None => BlockGraphStatus::NotFound, - Some(BlockStatus::Active { a_block, .. }) => { - if a_block.is_final { - BlockGraphStatus::Final - } else if self - .max_cliques - .iter() - .find(|clique| clique.is_blockclique) - .expect("blockclique absent") - .block_ids - .contains(block_id) - { - BlockGraphStatus::ActiveInBlockclique - } else { - BlockGraphStatus::ActiveInAlternativeCliques - } - } - Some(BlockStatus::Discarded { .. }) => BlockGraphStatus::Discarded, - Some(BlockStatus::Incoming(_)) => BlockGraphStatus::Incoming, - Some(BlockStatus::WaitingForDependencies { .. }) => { - BlockGraphStatus::WaitingForDependencies - } - Some(BlockStatus::WaitingForSlot(_)) => BlockGraphStatus::WaitingForSlot, - } - } - - /// signal new slot - pub fn slot_tick(&mut self, current_slot: Option) -> Result<()> { - // list all elements for which the time has come - let to_process: BTreeSet<(Slot, BlockId)> = self - .waiting_for_slot_index - .iter() - .filter_map(|b_id| match self.block_statuses.get(b_id) { - Some(BlockStatus::WaitingForSlot(header_or_block)) => { - let slot = header_or_block.get_slot(); - if Some(slot) <= current_slot { - Some((slot, *b_id)) - } else { - None - } - } - _ => None, - }) - .collect(); - - massa_trace!("consensus.block_graph.slot_tick", {}); - // process those elements - self.rec_process(to_process, current_slot)?; - - Ok(()) - } - - /// A new header has come ! - /// - /// Checks performed: - /// - Ignore genesis blocks. - /// - See `process`. - pub fn incoming_header( - &mut self, - block_id: BlockId, - header: WrappedHeader, - current_slot: Option, - ) -> Result<()> { - // ignore genesis blocks - if self.genesis_hashes.contains(&block_id) { - return Ok(()); - } - - debug!( - "received header {} for slot {}", - block_id, header.content.slot - ); - massa_trace!("consensus.block_graph.incoming_header", {"block_id": block_id, "header": header}); - let mut to_ack: BTreeSet<(Slot, BlockId)> = BTreeSet::new(); - match self.block_statuses.entry(block_id) { - // if absent => add as Incoming, call rec_ack on it - hash_map::Entry::Vacant(vac) => { - to_ack.insert((header.content.slot, block_id)); - vac.insert(BlockStatus::Incoming(HeaderOrBlock::Header(header))); - self.incoming_index.insert(block_id); - } - hash_map::Entry::Occupied(mut occ) => match occ.get_mut() { - BlockStatus::Discarded { - sequence_number, .. - } => { - // promote if discarded - *sequence_number = BlockGraph::new_sequence_number(&mut self.sequence_counter); - } - BlockStatus::WaitingForDependencies { .. } => { - // promote in dependencies - self.promote_dep_tree(block_id)?; - } - _ => {} - }, - } - - // process - self.rec_process(to_ack, current_slot)?; - - Ok(()) - } - - /// A new block has come - /// - /// Checks performed: - /// - Ignore genesis blocks. - /// - See `process`. - pub fn incoming_block( - &mut self, - block_id: BlockId, - slot: Slot, - current_slot: Option, - storage: Storage, - ) -> Result<()> { - // ignore genesis blocks - if self.genesis_hashes.contains(&block_id) { - return Ok(()); - } - - debug!("received block {} for slot {}", block_id, slot); - - let mut to_ack: BTreeSet<(Slot, BlockId)> = BTreeSet::new(); - match self.block_statuses.entry(block_id) { - // if absent => add as Incoming, call rec_ack on it - hash_map::Entry::Vacant(vac) => { - to_ack.insert((slot, block_id)); - vac.insert(BlockStatus::Incoming(HeaderOrBlock::Block { - id: block_id, - slot, - storage, - })); - self.incoming_index.insert(block_id); - } - hash_map::Entry::Occupied(mut occ) => match occ.get_mut() { - BlockStatus::Discarded { - sequence_number, .. - } => { - // promote if discarded - *sequence_number = BlockGraph::new_sequence_number(&mut self.sequence_counter); - } - BlockStatus::WaitingForSlot(header_or_block) => { - // promote to full block - *header_or_block = HeaderOrBlock::Block { - id: block_id, - slot, - storage, - }; - } - BlockStatus::WaitingForDependencies { - header_or_block, - unsatisfied_dependencies, - .. - } => { - // promote to full block and satisfy self-dependency - if unsatisfied_dependencies.remove(&block_id) { - // a dependency was satisfied: process - to_ack.insert((slot, block_id)); - } - *header_or_block = HeaderOrBlock::Block { - id: block_id, - slot, - storage, - }; - // promote in dependencies - self.promote_dep_tree(block_id)?; - } - _ => return Ok(()), - }, - } - - // process - self.rec_process(to_ack, current_slot)?; - - Ok(()) - } - - fn new_sequence_number(sequence_counter: &mut u64) -> u64 { - let res = *sequence_counter; - *sequence_counter += 1; - res - } - - /// acknowledge a set of items recursively - fn rec_process( - &mut self, - mut to_ack: BTreeSet<(Slot, BlockId)>, - current_slot: Option, - ) -> Result<()> { - // order processing by (slot, hash) - while let Some((_slot, hash)) = to_ack.pop_first() { - to_ack.extend(self.process(hash, current_slot)?) - } - Ok(()) - } - - /// Acknowledge a single item, return a set of items to re-ack - fn process( - &mut self, - block_id: BlockId, - current_slot: Option, - ) -> Result> { - // list items to reprocess - let mut reprocess = BTreeSet::new(); - - massa_trace!("consensus.block_graph.process", { "block_id": block_id }); - // control all the waiting states and try to get a valid block - let ( - valid_block_creator, - valid_block_slot, - valid_block_parents_hash_period, - valid_block_incomp, - valid_block_inherited_incomp_count, - valid_block_storage, - valid_block_fitness, - ) = match self.block_statuses.get(&block_id) { - None => return Ok(BTreeSet::new()), // disappeared before being processed: do nothing - - // discarded: do nothing - Some(BlockStatus::Discarded { .. }) => { - massa_trace!("consensus.block_graph.process.discarded", { - "block_id": block_id - }); - return Ok(BTreeSet::new()); - } - - // already active: do nothing - Some(BlockStatus::Active { .. }) => { - massa_trace!("consensus.block_graph.process.active", { - "block_id": block_id - }); - return Ok(BTreeSet::new()); - } - - // incoming header - Some(BlockStatus::Incoming(HeaderOrBlock::Header(_))) => { - massa_trace!("consensus.block_graph.process.incoming_header", { - "block_id": block_id - }); - // remove header - let header = if let Some(BlockStatus::Incoming(HeaderOrBlock::Header(header))) = - self.block_statuses.remove(&block_id) - { - self.incoming_index.remove(&block_id); - header - } else { - return Err(GraphError::ContainerInconsistency(format!( - "inconsistency inside block statuses removing incoming header {}", - block_id - ))); - }; - match self.check_header(&block_id, &header, current_slot)? { - HeaderCheckOutcome::Proceed { .. } => { - // set as waiting dependencies - let mut dependencies = PreHashSet::::default(); - dependencies.insert(block_id); // add self as unsatisfied - self.block_statuses.insert( - block_id, - BlockStatus::WaitingForDependencies { - header_or_block: HeaderOrBlock::Header(header), - unsatisfied_dependencies: dependencies, - sequence_number: BlockGraph::new_sequence_number( - &mut self.sequence_counter, - ), - }, - ); - self.waiting_for_dependencies_index.insert(block_id); - self.promote_dep_tree(block_id)?; - - massa_trace!( - "consensus.block_graph.process.incoming_header.waiting_for_self", - { "block_id": block_id } - ); - return Ok(BTreeSet::new()); - } - HeaderCheckOutcome::WaitForDependencies(mut dependencies) => { - // set as waiting dependencies - dependencies.insert(block_id); // add self as unsatisfied - massa_trace!("consensus.block_graph.process.incoming_header.waiting_for_dependencies", {"block_id": block_id, "dependencies": dependencies}); - - self.block_statuses.insert( - block_id, - BlockStatus::WaitingForDependencies { - header_or_block: HeaderOrBlock::Header(header), - unsatisfied_dependencies: dependencies, - sequence_number: BlockGraph::new_sequence_number( - &mut self.sequence_counter, - ), - }, - ); - self.waiting_for_dependencies_index.insert(block_id); - self.promote_dep_tree(block_id)?; - - return Ok(BTreeSet::new()); - } - HeaderCheckOutcome::WaitForSlot => { - // make it wait for slot - self.block_statuses.insert( - block_id, - BlockStatus::WaitingForSlot(HeaderOrBlock::Header(header)), - ); - self.waiting_for_slot_index.insert(block_id); - - massa_trace!( - "consensus.block_graph.process.incoming_header.waiting_for_slot", - { "block_id": block_id } - ); - return Ok(BTreeSet::new()); - } - HeaderCheckOutcome::Discard(reason) => { - self.maybe_note_attack_attempt(&reason, &block_id); - massa_trace!("consensus.block_graph.process.incoming_header.discarded", {"block_id": block_id, "reason": reason}); - // count stales - if reason == DiscardReason::Stale { - self.new_stale_blocks - .insert(block_id, (header.creator_address, header.content.slot)); - } - // discard - self.block_statuses.insert( - block_id, - BlockStatus::Discarded { - slot: header.content.slot, - creator: header.creator_address, - parents: header.content.parents, - reason, - sequence_number: BlockGraph::new_sequence_number( - &mut self.sequence_counter, - ), - }, - ); - self.discarded_index.insert(block_id); - - return Ok(BTreeSet::new()); - } - } - } - - // incoming block - Some(BlockStatus::Incoming(HeaderOrBlock::Block { id: block_id, .. })) => { - let block_id = *block_id; - massa_trace!("consensus.block_graph.process.incoming_block", { - "block_id": block_id - }); - let (slot, storage) = - if let Some(BlockStatus::Incoming(HeaderOrBlock::Block { - slot, storage, .. - })) = self.block_statuses.remove(&block_id) - { - self.incoming_index.remove(&block_id); - (slot, storage) - } else { - return Err(GraphError::ContainerInconsistency(format!( - "inconsistency inside block statuses removing incoming block {}", - block_id - ))); - }; - let stored_block = storage - .read_blocks() - .get(&block_id) - .cloned() - .expect("incoming block not found in storage"); - - match self.check_header(&block_id, &stored_block.content.header, current_slot)? { - HeaderCheckOutcome::Proceed { - parents_hash_period, - incompatibilities, - inherited_incompatibilities_count, - fitness, - } => { - // block is valid: remove it from Incoming and return it - massa_trace!("consensus.block_graph.process.incoming_block.valid", { - "block_id": block_id - }); - ( - stored_block.content.header.creator_public_key, - slot, - parents_hash_period, - incompatibilities, - inherited_incompatibilities_count, - storage, - fitness, - ) - } - HeaderCheckOutcome::WaitForDependencies(dependencies) => { - // set as waiting dependencies - self.block_statuses.insert( - block_id, - BlockStatus::WaitingForDependencies { - header_or_block: HeaderOrBlock::Block { - id: block_id, - slot, - storage, - }, - unsatisfied_dependencies: dependencies, - sequence_number: BlockGraph::new_sequence_number( - &mut self.sequence_counter, - ), - }, - ); - self.waiting_for_dependencies_index.insert(block_id); - self.promote_dep_tree(block_id)?; - massa_trace!( - "consensus.block_graph.process.incoming_block.waiting_for_dependencies", - { "block_id": block_id } - ); - return Ok(BTreeSet::new()); - } - HeaderCheckOutcome::WaitForSlot => { - // set as waiting for slot - self.block_statuses.insert( - block_id, - BlockStatus::WaitingForSlot(HeaderOrBlock::Block { - id: block_id, - slot, - storage, - }), - ); - self.waiting_for_slot_index.insert(block_id); - - massa_trace!( - "consensus.block_graph.process.incoming_block.waiting_for_slot", - { "block_id": block_id } - ); - return Ok(BTreeSet::new()); - } - HeaderCheckOutcome::Discard(reason) => { - self.maybe_note_attack_attempt(&reason, &block_id); - massa_trace!("consensus.block_graph.process.incoming_block.discarded", {"block_id": block_id, "reason": reason}); - // count stales - if reason == DiscardReason::Stale { - self.new_stale_blocks.insert( - block_id, - ( - stored_block.content.header.creator_address, - stored_block.content.header.content.slot, - ), - ); - } - // add to discard - self.block_statuses.insert( - block_id, - BlockStatus::Discarded { - slot: stored_block.content.header.content.slot, - creator: stored_block.creator_address, - parents: stored_block.content.header.content.parents.clone(), - reason, - sequence_number: BlockGraph::new_sequence_number( - &mut self.sequence_counter, - ), - }, - ); - self.discarded_index.insert(block_id); - - return Ok(BTreeSet::new()); - } - } - } - - Some(BlockStatus::WaitingForSlot(header_or_block)) => { - massa_trace!("consensus.block_graph.process.waiting_for_slot", { - "block_id": block_id - }); - let slot = header_or_block.get_slot(); - if Some(slot) > current_slot { - massa_trace!( - "consensus.block_graph.process.waiting_for_slot.in_the_future", - { "block_id": block_id } - ); - // in the future: ignore - return Ok(BTreeSet::new()); - } - // send back as incoming and ask for reprocess - if let Some(BlockStatus::WaitingForSlot(header_or_block)) = - self.block_statuses.remove(&block_id) - { - self.waiting_for_slot_index.remove(&block_id); - self.block_statuses - .insert(block_id, BlockStatus::Incoming(header_or_block)); - self.incoming_index.insert(block_id); - reprocess.insert((slot, block_id)); - massa_trace!( - "consensus.block_graph.process.waiting_for_slot.reprocess", - { "block_id": block_id } - ); - return Ok(reprocess); - } else { - return Err(GraphError::ContainerInconsistency(format!("inconsistency inside block statuses removing waiting for slot block or header {}", block_id))); - }; - } - - Some(BlockStatus::WaitingForDependencies { - unsatisfied_dependencies, - .. - }) => { - massa_trace!("consensus.block_graph.process.waiting_for_dependencies", { - "block_id": block_id - }); - if !unsatisfied_dependencies.is_empty() { - // still has unsatisfied dependencies: ignore - return Ok(BTreeSet::new()); - } - // send back as incoming and ask for reprocess - if let Some(BlockStatus::WaitingForDependencies { - header_or_block, .. - }) = self.block_statuses.remove(&block_id) - { - self.waiting_for_dependencies_index.remove(&block_id); - reprocess.insert((header_or_block.get_slot(), block_id)); - self.block_statuses - .insert(block_id, BlockStatus::Incoming(header_or_block)); - self.incoming_index.insert(block_id); - massa_trace!( - "consensus.block_graph.process.waiting_for_dependencies.reprocess", - { "block_id": block_id } - ); - return Ok(reprocess); - } else { - return Err(GraphError::ContainerInconsistency(format!("inconsistency inside block statuses removing waiting for slot header or block {}", block_id))); - } - } - }; - - // add block to graph - self.add_block_to_graph( - block_id, - valid_block_parents_hash_period, - valid_block_creator, - valid_block_slot, - valid_block_incomp, - valid_block_inherited_incomp_count, - valid_block_fitness, - valid_block_storage, - )?; - - // if the block was added, update linked dependencies and mark satisfied ones for recheck - if let Some(BlockStatus::Active { storage, .. }) = self.block_statuses.get(&block_id) { - massa_trace!("consensus.block_graph.process.is_active", { - "block_id": block_id - }); - self.to_propagate.insert(block_id, storage.clone()); - for itm_block_id in self.waiting_for_dependencies_index.iter() { - if let Some(BlockStatus::WaitingForDependencies { - header_or_block, - unsatisfied_dependencies, - .. - }) = self.block_statuses.get_mut(itm_block_id) - { - if unsatisfied_dependencies.remove(&block_id) { - // a dependency was satisfied: retry - reprocess.insert((header_or_block.get_slot(), *itm_block_id)); - } - } - } - } - - Ok(reprocess) - } - - /// Mark a block as invalid - pub fn invalid_block( - &mut self, - block_id: &BlockId, - header: WrappedHeader, - ) -> Result<(), GraphError> { - let reason = DiscardReason::Invalid("invalid".to_string()); - self.maybe_note_attack_attempt(&reason, block_id); - massa_trace!("consensus.block_graph.process.invalid_block", {"block_id": block_id, "reason": reason}); - - // add to discard - self.block_statuses.insert( - *block_id, - BlockStatus::Discarded { - slot: header.content.slot, - creator: header.creator_address, - parents: header.content.parents, - reason, - sequence_number: BlockGraph::new_sequence_number(&mut self.sequence_counter), - }, - ); - self.discarded_index.insert(*block_id); - - Ok(()) - } - - /// Note an attack attempt if the discard reason indicates one. - fn maybe_note_attack_attempt(&mut self, reason: &DiscardReason, hash: &BlockId) { - massa_trace!("consensus.block_graph.maybe_note_attack_attempt", {"hash": hash, "reason": reason}); - // If invalid, note the attack attempt. - if let DiscardReason::Invalid(reason) = reason { - info!( - "consensus.block_graph.maybe_note_attack_attempt DiscardReason::Invalid:{}", - reason - ); - self.attack_attempts.push(*hash); - } - } - - /// Gets whole `ActiveBlock` corresponding to given `block_id` - /// - /// # Argument - /// * `block_id`: block ID - fn get_full_active_block( - block_statuses: &PreHashMap, - block_id: BlockId, - ) -> Option<(&ActiveBlock, &Storage)> { - match block_statuses.get(&block_id) { - Some(BlockStatus::Active { a_block, storage }) => Some((a_block.as_ref(), storage)), - _ => None, - } - } - - /// Gets a block and all its descendants - /// - /// # Argument - /// * hash : hash of the given block - fn get_active_block_and_descendants(&self, block_id: &BlockId) -> Result> { - let mut to_visit = vec![*block_id]; - let mut result = PreHashSet::::default(); - while let Some(visit_h) = to_visit.pop() { - if !result.insert(visit_h) { - continue; // already visited - } - BlockGraph::get_full_active_block(&self.block_statuses, visit_h) - .ok_or_else(|| GraphError::ContainerInconsistency(format!("inconsistency inside block statuses iterating through descendants of {} - missing {}", block_id, visit_h)))? - .0 - .children - .iter() - .for_each(|thread_children| to_visit.extend(thread_children.keys())); - } - Ok(result) - } - - /// Process an incoming header. - /// - /// Checks performed: - /// - Number of parents matches thread count. - /// - Slot above 0. - /// - Valid thread. - /// - Check that the block is older than the latest final one in thread. - /// - Check that the block slot is not too much into the future, - /// as determined by the configuration `future_block_processing_max_periods`. - /// - Check if it was the creator's turn to create this block. - /// - TODO: check for double staking. - /// - Check parents are present. - /// - Check the topological consistency of the parents. - /// - Check endorsements. - /// - Check thread incompatibility test. - /// - Check grandpa incompatibility test. - /// - Check if the block is incompatible with a parent. - /// - Check if the block is incompatible with a final block. - fn check_header( - &self, - block_id: &BlockId, - header: &WrappedHeader, - current_slot: Option, - ) -> Result { - massa_trace!("consensus.block_graph.check_header", { - "block_id": block_id - }); - let mut parents: Vec<(BlockId, u64)> = Vec::with_capacity(self.cfg.thread_count as usize); - let mut incomp = PreHashSet::::default(); - let mut missing_deps = PreHashSet::::default(); - let creator_addr = header.creator_address; - - // check that is older than the latest final block in that thread - // Note: this excludes genesis blocks - if header.content.slot.period - <= self.latest_final_blocks_periods[header.content.slot.thread as usize].1 - { - return Ok(HeaderCheckOutcome::Discard(DiscardReason::Stale)); - } - - // check if block slot is too much in the future - if let Some(cur_slot) = current_slot { - if header.content.slot.period - > cur_slot - .period - .saturating_add(self.cfg.future_block_processing_max_periods) - { - return Ok(HeaderCheckOutcome::WaitForSlot); - } - } - - // check if it was the creator's turn to create this block - let slot_draw_address = match self.selector_controller.get_producer(header.content.slot) { - Ok(draw) => draw, - Err(_) => return Ok(HeaderCheckOutcome::WaitForSlot), // TODO properly handle PoS errors - }; - if creator_addr != slot_draw_address { - // it was not the creator's turn to create a block for this slot - return Ok(HeaderCheckOutcome::Discard(DiscardReason::Invalid( - format!("Bad creator turn for the slot:{}", header.content.slot), - ))); - } - - // check if block is in the future: queue it - // note: do it after testing signature + draw to prevent queue flooding/DoS - // note: Some(x) > None - if Some(header.content.slot) > current_slot { - return Ok(HeaderCheckOutcome::WaitForSlot); - } - - // Note: here we will check if we already have a block for that slot - // and if someone double staked, they will be denounced - - // list parents and ensure they are present - let parent_set: PreHashSet = header.content.parents.iter().copied().collect(); - for parent_thread in 0u8..self.cfg.thread_count { - let parent_hash = header.content.parents[parent_thread as usize]; - match self.block_statuses.get(&parent_hash) { - Some(BlockStatus::Discarded { reason, .. }) => { - // parent is discarded - return Ok(HeaderCheckOutcome::Discard(match reason { - DiscardReason::Invalid(invalid_reason) => DiscardReason::Invalid(format!( - "discarded because a parent was discarded for the following reason: {}", - invalid_reason - )), - r => r.clone(), - })); - } - Some(BlockStatus::Active { - a_block: parent, .. - }) => { - // parent is active - - // check that the parent is from an earlier slot in the right thread - if parent.slot.thread != parent_thread || parent.slot >= header.content.slot { - return Ok(HeaderCheckOutcome::Discard(DiscardReason::Invalid( - format!( - "Bad parent {} in thread:{} or slot:{} for {}.", - parent_hash, parent_thread, parent.slot, header.content.slot - ), - ))); - } - - // inherit parent incompatibilities - // and ensure parents are mutually compatible - if let Some(p_incomp) = self.gi_head.get(&parent_hash) { - if !p_incomp.is_disjoint(&parent_set) { - return Ok(HeaderCheckOutcome::Discard(DiscardReason::Invalid( - "Parent not mutually compatible".to_string(), - ))); - } - incomp.extend(p_incomp); - } - - parents.push((parent_hash, parent.slot.period)); - } - _ => { - // parent is missing or queued - if self.genesis_hashes.contains(&parent_hash) { - // forbid depending on discarded genesis block - return Ok(HeaderCheckOutcome::Discard(DiscardReason::Stale)); - } - missing_deps.insert(parent_hash); - } - } - } - if !missing_deps.is_empty() { - return Ok(HeaderCheckOutcome::WaitForDependencies(missing_deps)); - } - let inherited_incomp_count = incomp.len(); - - // check the topological consistency of the parents - { - let mut gp_max_slots = vec![0u64; self.cfg.thread_count as usize]; - for parent_i in 0..self.cfg.thread_count { - let (parent_h, parent_period) = parents[parent_i as usize]; - let parent = self - .get_active_block(&parent_h) - .ok_or_else(|| { - GraphError::ContainerInconsistency(format!( - "inconsistency inside block statuses searching parent {} of block {}", - parent_h, block_id - )) - })? - .0; - if parent_period < gp_max_slots[parent_i as usize] { - // a parent is earlier than a block known by another parent in that thread - return Ok(HeaderCheckOutcome::Discard(DiscardReason::Invalid( - "a parent is earlier than a block known by another parent in that thread" - .to_string(), - ))); - } - gp_max_slots[parent_i as usize] = parent_period; - if parent_period == 0 { - // genesis - continue; - } - for gp_i in 0..self.cfg.thread_count { - if gp_i == parent_i { - continue; - } - let gp_h = parent.parents[gp_i as usize].0; - match self.block_statuses.get(&gp_h) { - // this grandpa is discarded - Some(BlockStatus::Discarded { reason, .. }) => { - return Ok(HeaderCheckOutcome::Discard(reason.clone())); - } - // this grandpa is active - Some(BlockStatus::Active { a_block: gp, .. }) => { - if gp.slot.period > gp_max_slots[gp_i as usize] { - if gp_i < parent_i { - return Ok(HeaderCheckOutcome::Discard( - DiscardReason::Invalid( - "grandpa error: gp_i < parent_i".to_string(), - ), - )); - } - gp_max_slots[gp_i as usize] = gp.slot.period; - } - } - // this grandpa is missing, assume stale - _ => return Ok(HeaderCheckOutcome::Discard(DiscardReason::Stale)), - } - } - } - } - - // get parent in own thread - let parent_in_own_thread = BlockGraph::get_full_active_block( - &self.block_statuses, - parents[header.content.slot.thread as usize].0, - ) - .ok_or_else(|| { - GraphError::ContainerInconsistency(format!( - "inconsistency inside block statuses searching parent {} in own thread of block {}", - parents[header.content.slot.thread as usize].0, block_id - )) - })? - .0; - - // check endorsements - match self.check_endorsements(header)? { - EndorsementsCheckOutcome::Proceed => {} - EndorsementsCheckOutcome::Discard(reason) => { - return Ok(HeaderCheckOutcome::Discard(reason)) - } - EndorsementsCheckOutcome::WaitForSlot => return Ok(HeaderCheckOutcome::WaitForSlot), - } - - // thread incompatibility test - parent_in_own_thread.children[header.content.slot.thread as usize] - .keys() - .filter(|&sibling_h| sibling_h != block_id) - .try_for_each(|&sibling_h| { - incomp.extend(self.get_active_block_and_descendants(&sibling_h)?); - Result::<()>::Ok(()) - })?; - - // grandpa incompatibility test - for tau in (0u8..self.cfg.thread_count).filter(|&t| t != header.content.slot.thread) { - // for each parent in a different thread tau - // traverse parent's descendants in tau - let mut to_explore = vec![(0usize, header.content.parents[tau as usize])]; - while let Some((cur_gen, cur_h)) = to_explore.pop() { - let cur_b = BlockGraph::get_full_active_block(&self.block_statuses, cur_h) - .ok_or_else(|| GraphError::ContainerInconsistency(format!("inconsistency inside block statuses searching {} while checking grandpa incompatibility of block {}",cur_h, block_id)))?.0; - - // traverse but do not check up to generation 1 - if cur_gen <= 1 { - to_explore.extend( - cur_b.children[tau as usize] - .keys() - .map(|&c_h| (cur_gen + 1, c_h)), - ); - continue; - } - - let parent_id = { - self.storage - .read_blocks() - .get(&cur_b.block_id) - .ok_or_else(|| { - GraphError::MissingBlock(format!( - "missing block in grandpa incomp test: {}", - cur_b.block_id - )) - })? - .content - .header - .content - .parents[header.content.slot.thread as usize] - }; - - // check if the parent in tauB has a strictly lower period number than B's parent in tauB - // note: cur_b cannot be genesis at gen > 1 - if BlockGraph::get_full_active_block( - &self.block_statuses, - parent_id, - ) - .ok_or_else(|| - GraphError::ContainerInconsistency( - format!("inconsistency inside block statuses searching {} check if the parent in tauB has a strictly lower period number than B's parent in tauB while checking grandpa incompatibility of block {}", - parent_id, - block_id) - ))? - .0 - .slot - .period - < parent_in_own_thread.slot.period - { - // GPI detected - incomp.extend(self.get_active_block_and_descendants(&cur_h)?); - } // otherwise, cur_b and its descendants cannot be GPI with the block: don't traverse - } - } - - // check if the block is incompatible with a parent - if !incomp.is_disjoint(&parents.iter().map(|(h, _p)| *h).collect()) { - return Ok(HeaderCheckOutcome::Discard(DiscardReason::Invalid( - "Block incompatible with a parent".to_string(), - ))); - } - - // check if the block is incompatible with a final block - if !incomp.is_disjoint( - &self - .active_index - .iter() - .filter_map(|h| { - if let Some(BlockStatus::Active { a_block: a, .. }) = self.block_statuses.get(h) - { - if a.is_final { - return Some(*h); - } - } - None - }) - .collect(), - ) { - return Ok(HeaderCheckOutcome::Discard(DiscardReason::Stale)); - } - massa_trace!("consensus.block_graph.check_header.ok", { - "block_id": block_id - }); - - Ok(HeaderCheckOutcome::Proceed { - parents_hash_period: parents, - incompatibilities: incomp, - inherited_incompatibilities_count: inherited_incomp_count, - fitness: header.get_fitness(), - }) - } - - /// check endorsements: - /// * endorser was selected for that (slot, index) - /// * endorsed slot is `parent_in_own_thread` slot - fn check_endorsements(&self, header: &WrappedHeader) -> Result { - // check endorsements - let endorsement_draws = match self.selector_controller.get_selection(header.content.slot) { - Ok(sel) => sel.endorsements, - Err(_) => return Ok(EndorsementsCheckOutcome::WaitForSlot), - }; - for endorsement in header.content.endorsements.iter() { - // check that the draw is correct - if endorsement.creator_address != endorsement_draws[endorsement.content.index as usize] - { - return Ok(EndorsementsCheckOutcome::Discard(DiscardReason::Invalid( - format!( - "endorser draw mismatch for header in slot: {}", - header.content.slot - ), - ))); - } - - // note that the following aspects are checked in protocol - // * signature - // * index reuse - // * slot matching the block's - // * the endorsed block is the containing block's parent - } - - Ok(EndorsementsCheckOutcome::Proceed) - } - - /// get genesis block ids - pub fn get_genesis_block_ids(&self) -> &Vec { - &self.genesis_hashes - } - - /// Computes max cliques of compatible blocks - pub fn compute_max_cliques(&self) -> Vec> { - let mut max_cliques: Vec> = Vec::new(); - - // algorithm adapted from IK_GPX as summarized in: - // Cazals et al., "A note on the problem of reporting maximal cliques" - // Theoretical Computer Science, 2008 - // https://doi.org/10.1016/j.tcs.2008.05.010 - - // stack: r, p, x - let mut stack: Vec<( - PreHashSet, - PreHashSet, - PreHashSet, - )> = vec![( - PreHashSet::::default(), - self.gi_head.keys().cloned().collect(), - PreHashSet::::default(), - )]; - while let Some((r, mut p, mut x)) = stack.pop() { - if p.is_empty() && x.is_empty() { - max_cliques.push(r); - continue; - } - // choose the pivot vertex following the GPX scheme: - // u_p = node from (p \/ x) that maximizes the cardinality of (P \ Neighbors(u_p, GI)) - let &u_p = p - .union(&x) - .max_by_key(|&u| { - p.difference(&(&self.gi_head[u] | &vec![*u].into_iter().collect())) - .count() - }) - .unwrap(); // p was checked to be non-empty before - - // iterate over u_set = (p /\ Neighbors(u_p, GI)) - let u_set: PreHashSet = - &p & &(&self.gi_head[&u_p] | &vec![u_p].into_iter().collect()); - for u_i in u_set.into_iter() { - p.remove(&u_i); - let u_i_set: PreHashSet = vec![u_i].into_iter().collect(); - let comp_n_u_i: PreHashSet = &self.gi_head[&u_i] | &u_i_set; - stack.push((&r | &u_i_set, &p - &comp_n_u_i, &x - &comp_n_u_i)); - x.insert(u_i); - } - } - if max_cliques.is_empty() { - // make sure at least one clique remains - max_cliques = vec![PreHashSet::::default()]; - } - max_cliques - } - - #[allow(clippy::too_many_arguments)] - fn add_block_to_graph( - &mut self, - add_block_id: BlockId, - parents_hash_period: Vec<(BlockId, u64)>, - add_block_creator: PublicKey, - add_block_slot: Slot, - incomp: PreHashSet, - inherited_incomp_count: usize, - fitness: u64, - mut storage: Storage, - ) -> Result<()> { - massa_trace!("consensus.block_graph.add_block_to_graph", { - "block_id": add_block_id - }); - - // Ensure block parents are claimed by the block's storage. - // Note that operations and endorsements should already be there (claimed in Protocol). - storage.claim_block_refs(&parents_hash_period.iter().map(|(p_id, _)| *p_id).collect()); - - // add block to status structure - self.block_statuses.insert( - add_block_id, - BlockStatus::Active { - a_block: Box::new(ActiveBlock { - creator_address: Address::from_public_key(&add_block_creator), - parents: parents_hash_period.clone(), - descendants: PreHashSet::::default(), - block_id: add_block_id, - children: vec![Default::default(); self.cfg.thread_count as usize], - is_final: false, - slot: add_block_slot, - fitness, - }), - storage, - }, - ); - self.active_index.insert(add_block_id); - - // add as child to parents - for (parent_h, _parent_period) in parents_hash_period.iter() { - if let Some(BlockStatus::Active { - a_block: a_parent, .. - }) = self.block_statuses.get_mut(parent_h) - { - a_parent.children[add_block_slot.thread as usize] - .insert(add_block_id, add_block_slot.period); - } else { - return Err(GraphError::ContainerInconsistency(format!( - "inconsistency inside block statuses adding child {} of block {}", - add_block_id, parent_h - ))); - } - } - - // add as descendant to ancestors. Note: descendants are never removed. - { - let mut ancestors: VecDeque = - parents_hash_period.iter().map(|(h, _)| *h).collect(); - let mut visited = PreHashSet::::default(); - while let Some(ancestor_h) = ancestors.pop_back() { - if !visited.insert(ancestor_h) { - continue; - } - if let Some(BlockStatus::Active { a_block: ab, .. }) = - self.block_statuses.get_mut(&ancestor_h) - { - ab.descendants.insert(add_block_id); - for (ancestor_parent_h, _) in ab.parents.iter() { - ancestors.push_front(*ancestor_parent_h); - } - } - } - } - - // add incompatibilities to gi_head - massa_trace!( - "consensus.block_graph.add_block_to_graph.add_incompatibilities", - {} - ); - for incomp_h in incomp.iter() { - self.gi_head - .get_mut(incomp_h) - .ok_or_else(|| { - GraphError::MissingBlock(format!( - "missing block when adding incomp to gi_head: {}", - incomp_h - )) - })? - .insert(add_block_id); - } - self.gi_head.insert(add_block_id, incomp.clone()); - - // max cliques update - massa_trace!( - "consensus.block_graph.add_block_to_graph.max_cliques_update", - {} - ); - if incomp.len() == inherited_incomp_count { - // clique optimization routine: - // the block only has incompatibilities inherited from its parents - // therefore it is not forking and can simply be added to the cliques it is compatible with - self.max_cliques - .iter_mut() - .filter(|c| incomp.is_disjoint(&c.block_ids)) - .for_each(|c| { - c.block_ids.insert(add_block_id); - }); - } else { - // fully recompute max cliques - massa_trace!( - "consensus.block_graph.add_block_to_graph.clique_full_computing", - { "hash": add_block_id } - ); - let before = self.max_cliques.len(); - self.max_cliques = self - .compute_max_cliques() - .into_iter() - .map(|c| Clique { - block_ids: c, - fitness: 0, - is_blockclique: false, - }) - .collect(); - let after = self.max_cliques.len(); - if before != after { - massa_trace!( - "consensus.block_graph.add_block_to_graph.clique_full_computing more than one clique", - { "cliques": self.max_cliques, "gi_head": self.gi_head } - ); - // gi_head - debug!( - "clique number went from {} to {} after adding {}", - before, after, add_block_id - ); - } - } - - // compute clique fitnesses and find blockclique - massa_trace!("consensus.block_graph.add_block_to_graph.compute_clique_fitnesses_and_find_blockclique", {}); - // note: clique_fitnesses is pair (fitness, -hash_sum) where the second parameter is negative for sorting - { - let mut blockclique_i = 0usize; - let mut max_clique_fitness = (0u64, num::BigInt::default()); - for (clique_i, clique) in self.max_cliques.iter_mut().enumerate() { - clique.fitness = 0; - clique.is_blockclique = false; - let mut sum_hash = num::BigInt::default(); - for block_h in clique.block_ids.iter() { - clique.fitness = clique.fitness - .checked_add( - BlockGraph::get_full_active_block(&self.block_statuses, *block_h) - .ok_or_else(|| GraphError::ContainerInconsistency(format!("inconsistency inside block statuses computing fitness while adding {} - missing {}", add_block_id, block_h)))? - .0.fitness, - ) - .ok_or(GraphError::FitnessOverflow)?; - sum_hash -= - num::BigInt::from_bytes_be(num::bigint::Sign::Plus, block_h.to_bytes()); - } - let cur_fit = (clique.fitness, sum_hash); - if cur_fit > max_clique_fitness { - blockclique_i = clique_i; - max_clique_fitness = cur_fit; - } - } - self.max_cliques[blockclique_i].is_blockclique = true; - } - - // update best parents - massa_trace!( - "consensus.block_graph.add_block_to_graph.update_best_parents", - {} - ); - { - // find blockclique - let blockclique_i = self - .max_cliques - .iter() - .position(|c| c.is_blockclique) - .unwrap_or_default(); - let blockclique = &self.max_cliques[blockclique_i]; - - // init best parents as latest_final_blocks_periods - self.best_parents = self.latest_final_blocks_periods.clone(); - // for each blockclique block, set it as best_parent in its own thread - // if its period is higher than the current best_parent in that thread - for block_h in blockclique.block_ids.iter() { - let b_slot = BlockGraph::get_full_active_block(&self.block_statuses, *block_h) - .ok_or_else(|| GraphError::ContainerInconsistency(format!("inconsistency inside block statuses updating best parents while adding {} - missing {}", add_block_id, block_h)))? - .0.slot; - if b_slot.period > self.best_parents[b_slot.thread as usize].1 { - self.best_parents[b_slot.thread as usize] = (*block_h, b_slot.period); - } - } - } - - // list stale blocks - massa_trace!( - "consensus.block_graph.add_block_to_graph.list_stale_blocks", - {} - ); - let stale_blocks = { - let blockclique_i = self - .max_cliques - .iter() - .position(|c| c.is_blockclique) - .unwrap_or_default(); - let fitness_threshold = self.max_cliques[blockclique_i] - .fitness - .saturating_sub(self.cfg.delta_f0); - // iterate from largest to smallest to minimize reallocations - let mut indices: Vec = (0..self.max_cliques.len()).collect(); - indices - .sort_unstable_by_key(|&i| std::cmp::Reverse(self.max_cliques[i].block_ids.len())); - let mut high_set = PreHashSet::::default(); - let mut low_set = PreHashSet::::default(); - for clique_i in indices.into_iter() { - if self.max_cliques[clique_i].fitness >= fitness_threshold { - high_set.extend(&self.max_cliques[clique_i].block_ids); - } else { - low_set.extend(&self.max_cliques[clique_i].block_ids); - } - } - self.max_cliques.retain(|c| c.fitness >= fitness_threshold); - &low_set - &high_set - }; - // mark stale blocks - massa_trace!( - "consensus.block_graph.add_block_to_graph.mark_stale_blocks", - {} - ); - for stale_block_hash in stale_blocks.into_iter() { - if let Some(BlockStatus::Active { - a_block: active_block, - storage: _storage, - }) = self.block_statuses.remove(&stale_block_hash) - { - self.active_index.remove(&stale_block_hash); - if active_block.is_final { - return Err(GraphError::ContainerInconsistency(format!("inconsistency inside block statuses removing stale blocks adding {} - block {} was already final", add_block_id, stale_block_hash))); - } - - // remove from gi_head - if let Some(other_incomps) = self.gi_head.remove(&stale_block_hash) { - for other_incomp in other_incomps.into_iter() { - if let Some(other_incomp_lst) = self.gi_head.get_mut(&other_incomp) { - other_incomp_lst.remove(&stale_block_hash); - } - } - } - - // remove from cliques - let stale_block_fitness = active_block.fitness; - self.max_cliques.iter_mut().for_each(|c| { - if c.block_ids.remove(&stale_block_hash) { - c.fitness -= stale_block_fitness; - } - }); - self.max_cliques.retain(|c| !c.block_ids.is_empty()); // remove empty cliques - if self.max_cliques.is_empty() { - // make sure at least one clique remains - self.max_cliques = vec![Clique { - block_ids: PreHashSet::::default(), - fitness: 0, - is_blockclique: true, - }]; - } - - // remove from parent's children - for (parent_h, _parent_period) in active_block.parents.iter() { - if let Some(BlockStatus::Active { - a_block: parent_active_block, - .. - }) = self.block_statuses.get_mut(parent_h) - { - parent_active_block.children[active_block.slot.thread as usize] - .remove(&stale_block_hash); - } - } - - massa_trace!("consensus.block_graph.add_block_to_graph.stale", { - "hash": stale_block_hash - }); - - // mark as stale - self.new_stale_blocks.insert( - stale_block_hash, - (active_block.creator_address, active_block.slot), - ); - self.block_statuses.insert( - stale_block_hash, - BlockStatus::Discarded { - slot: active_block.slot, - creator: active_block.creator_address, - parents: active_block.parents.iter().map(|(h, _)| *h).collect(), - reason: DiscardReason::Stale, - sequence_number: BlockGraph::new_sequence_number( - &mut self.sequence_counter, - ), - }, - ); - self.discarded_index.insert(stale_block_hash); - } else { - return Err(GraphError::ContainerInconsistency(format!("inconsistency inside block statuses removing stale blocks adding {} - block {} is missing", add_block_id, stale_block_hash))); - } - } - - // list final blocks - massa_trace!( - "consensus.block_graph.add_block_to_graph.list_final_blocks", - {} - ); - let final_blocks = { - // short-circuiting intersection of cliques from smallest to largest - let mut indices: Vec = (0..self.max_cliques.len()).collect(); - indices.sort_unstable_by_key(|&i| self.max_cliques[i].block_ids.len()); - let mut final_candidates = self.max_cliques[indices[0]].block_ids.clone(); - for i in 1..indices.len() { - final_candidates.retain(|v| self.max_cliques[i].block_ids.contains(v)); - if final_candidates.is_empty() { - break; - } - } - - // restrict search to cliques with high enough fitness, sort cliques by fitness (highest to lowest) - massa_trace!( - "consensus.block_graph.add_block_to_graph.list_final_blocks.restrict", - {} - ); - indices.retain(|&i| self.max_cliques[i].fitness > self.cfg.delta_f0); - indices.sort_unstable_by_key(|&i| std::cmp::Reverse(self.max_cliques[i].fitness)); - - let mut final_blocks = PreHashSet::::default(); - for clique_i in indices.into_iter() { - massa_trace!( - "consensus.block_graph.add_block_to_graph.list_final_blocks.loop", - { "clique_i": clique_i } - ); - // check in cliques from highest to lowest fitness - if final_candidates.is_empty() { - // no more final candidates - break; - } - let clique = &self.max_cliques[clique_i]; - - // compute the total fitness of all the descendants of the candidate within the clique - let loc_candidates = final_candidates.clone(); - for candidate_h in loc_candidates.into_iter() { - let desc_fit: u64 = - BlockGraph::get_full_active_block(&self.block_statuses, candidate_h) - .ok_or_else(|| { - GraphError::MissingBlock(format!( - "missing block when computing total fitness of descendants: {}", - candidate_h - )) - })? - .0 - .descendants - .intersection(&clique.block_ids) - .map(|h| { - if let Some(BlockStatus::Active { a_block: ab, .. }) = - self.block_statuses.get(h) - { - return ab.fitness; - } - 0 - }) - .sum(); - if desc_fit > self.cfg.delta_f0 { - // candidate is final - final_candidates.remove(&candidate_h); - final_blocks.insert(candidate_h); - } - } - } - final_blocks - }; - - // mark final blocks and update latest_final_blocks_periods - massa_trace!( - "consensus.block_graph.add_block_to_graph.mark_final_blocks", - {} - ); - for final_block_hash in final_blocks.into_iter() { - // remove from gi_head - if let Some(other_incomps) = self.gi_head.remove(&final_block_hash) { - for other_incomp in other_incomps.into_iter() { - if let Some(other_incomp_lst) = self.gi_head.get_mut(&other_incomp) { - other_incomp_lst.remove(&final_block_hash); - } - } - } - - // mark as final and update latest_final_blocks_periods - if let Some(BlockStatus::Active { - a_block: final_block, - .. - }) = self.block_statuses.get_mut(&final_block_hash) - { - massa_trace!("consensus.block_graph.add_block_to_graph.final", { - "hash": final_block_hash - }); - final_block.is_final = true; - // remove from cliques - let final_block_fitness = final_block.fitness; - self.max_cliques.iter_mut().for_each(|c| { - if c.block_ids.remove(&final_block_hash) { - c.fitness -= final_block_fitness; - } - }); - self.max_cliques.retain(|c| !c.block_ids.is_empty()); // remove empty cliques - if self.max_cliques.is_empty() { - // make sure at least one clique remains - self.max_cliques = vec![Clique { - block_ids: PreHashSet::::default(), - fitness: 0, - is_blockclique: true, - }]; - } - // update latest final blocks - if final_block.slot.period - > self.latest_final_blocks_periods[final_block.slot.thread as usize].1 - { - self.latest_final_blocks_periods[final_block.slot.thread as usize] = - (final_block_hash, final_block.slot.period); - } - // update new final blocks list - self.new_final_blocks.insert(final_block_hash); - } else { - return Err(GraphError::ContainerInconsistency(format!("inconsistency inside block statuses updating final blocks adding {} - block {} is missing", add_block_id, final_block_hash))); - } - } - - massa_trace!("consensus.block_graph.add_block_to_graph.end", {}); - Ok(()) - } - - fn list_required_active_blocks(&self) -> Result> { - // list all active blocks - let mut retain_active: PreHashSet = - PreHashSet::::with_capacity(self.active_index.len()); - - let latest_final_blocks: Vec = self - .latest_final_blocks_periods - .iter() - .map(|(hash, _)| *hash) - .collect(); - - // retain all non-final active blocks, - // the current "best parents", - // and the dependencies for both. - for block_id in self.active_index.iter() { - if let Some(BlockStatus::Active { - a_block: active_block, - .. - }) = self.block_statuses.get(block_id) - { - if !active_block.is_final - || self.best_parents.iter().any(|(b, _p)| b == block_id) - || latest_final_blocks.contains(block_id) - { - retain_active.extend(active_block.parents.iter().map(|(p, _)| *p)); - retain_active.insert(*block_id); - } - } - } - - // retain best parents - retain_active.extend(self.best_parents.iter().map(|(b, _p)| *b)); - - // retain last final blocks - retain_active.extend(self.latest_final_blocks_periods.iter().map(|(h, _)| *h)); - - for (thread, id) in latest_final_blocks.iter().enumerate() { - let mut current_block_id = *id; - while let Some((current_block, _)) = self.get_active_block(¤t_block_id) { - let parent_id = { - if !current_block.parents.is_empty() { - Some(current_block.parents[thread].0) - } else { - None - } - }; - - // retain block - retain_active.insert(current_block_id); - - // stop traversing when reaching a block with period number low enough - // so that any of its operations will have their validity period expired at the latest final block in thread - // note: one more is kept because of the way we iterate - if current_block.slot.period - < self.latest_final_blocks_periods[thread] - .1 - .saturating_sub(self.cfg.operation_validity_periods) - { - break; - } - - // if not genesis, traverse parent - match parent_id { - Some(p_id) => current_block_id = p_id, - None => break, - } - } - } - - // grow with parents & fill thread holes twice - for _ in 0..2 { - // retain the parents of the selected blocks - let retain_clone = retain_active.clone(); - - for retain_h in retain_clone.into_iter() { - retain_active.extend( - self.get_active_block(&retain_h) - .ok_or_else(|| GraphError::ContainerInconsistency(format!("inconsistency inside block statuses pruning and retaining the parents of the selected blocks - {} is missing", retain_h)))? - .0.parents - .iter() - .map(|(b_id, _p)| *b_id), - ) - } - - // find earliest kept slots in each thread - let mut earliest_retained_periods: Vec = self - .latest_final_blocks_periods - .iter() - .map(|(_, p)| *p) - .collect(); - for retain_h in retain_active.iter() { - let retain_slot = &self - .get_active_block(retain_h) - .ok_or_else(|| GraphError::ContainerInconsistency(format!("inconsistency inside block statuses pruning and finding earliest kept slots in each thread - {} is missing", retain_h)))? - .0.slot; - earliest_retained_periods[retain_slot.thread as usize] = std::cmp::min( - earliest_retained_periods[retain_slot.thread as usize], - retain_slot.period, - ); - } - - // fill up from the latest final block back to the earliest for each thread - for thread in 0..self.cfg.thread_count { - let mut cursor = self.latest_final_blocks_periods[thread as usize].0; // hash of tha latest final in that thread - while let Some((c_block, _)) = self.get_active_block(&cursor) { - if c_block.slot.period < earliest_retained_periods[thread as usize] { - break; - } - retain_active.insert(cursor); - if c_block.parents.is_empty() { - // genesis - break; - } - cursor = c_block.parents[thread as usize].0; - } - } - } - - Ok(retain_active) - } - - /// prune active blocks and return final blocks, return discarded final blocks - fn prune_active(&mut self) -> Result> { - // list required active blocks - let mut retain_active = self.list_required_active_blocks()?; - - // retain extra history according to the config - // this is useful to avoid desync on temporary connection loss - for a_block in self.active_index.iter() { - if let Some(BlockStatus::Active { - a_block: active_block, - .. - }) = self.block_statuses.get(a_block) - { - let (_b_id, latest_final_period) = - self.latest_final_blocks_periods[active_block.slot.thread as usize]; - if active_block.slot.period - >= latest_final_period.saturating_sub(self.cfg.force_keep_final_periods) - { - retain_active.insert(*a_block); - } - } - } - - // remove unused final active blocks - let mut discarded_finals: PreHashMap = PreHashMap::default(); - let to_remove: Vec = self - .active_index - .difference(&retain_active) - .copied() - .collect(); - for discard_active_h in to_remove { - let block_slot; - let block_creator; - let block_parents; - { - let read_blocks = self.storage.read_blocks(); - let block = read_blocks.get(&discard_active_h).ok_or_else(|| { - GraphError::MissingBlock(format!( - "missing block when removing unused final active blocks: {}", - discard_active_h - )) - })?; - block_slot = block.content.header.content.slot; - block_creator = block.creator_address; - block_parents = block.content.header.content.parents.clone(); - }; - - let discarded_active = if let Some(BlockStatus::Active { - a_block: discarded_active, - .. - }) = self.block_statuses.remove(&discard_active_h) - { - self.active_index.remove(&discard_active_h); - discarded_active - } else { - return Err(GraphError::ContainerInconsistency(format!("inconsistency inside block statuses pruning and removing unused final active blocks - {} is missing", discard_active_h))); - }; - - // remove from parent's children - for (parent_h, _parent_period) in discarded_active.parents.iter() { - if let Some(BlockStatus::Active { - a_block: parent_active_block, - .. - }) = self.block_statuses.get_mut(parent_h) - { - parent_active_block.children[discarded_active.slot.thread as usize] - .remove(&discard_active_h); - } - } - - massa_trace!("consensus.block_graph.prune_active", {"hash": discard_active_h, "reason": DiscardReason::Final}); - - // mark as final - self.block_statuses.insert( - discard_active_h, - BlockStatus::Discarded { - slot: block_slot, - creator: block_creator, - parents: block_parents, - reason: DiscardReason::Final, - sequence_number: BlockGraph::new_sequence_number(&mut self.sequence_counter), - }, - ); - self.discarded_index.insert(discard_active_h); - - discarded_finals.insert(discard_active_h, *discarded_active); - } - - Ok(discarded_finals) - } - - fn promote_dep_tree(&mut self, hash: BlockId) -> Result<()> { - let mut to_explore = vec![hash]; - let mut to_promote: PreHashMap = PreHashMap::default(); - while let Some(h) = to_explore.pop() { - if to_promote.contains_key(&h) { - continue; - } - if let Some(BlockStatus::WaitingForDependencies { - header_or_block, - unsatisfied_dependencies, - sequence_number, - .. - }) = self.block_statuses.get(&h) - { - // promote current block - to_promote.insert(h, (header_or_block.get_slot(), *sequence_number)); - // register dependencies for exploration - to_explore.extend(unsatisfied_dependencies); - } - } - - let mut to_promote: Vec<(Slot, u64, BlockId)> = to_promote - .into_iter() - .map(|(h, (slot, seq))| (slot, seq, h)) - .collect(); - to_promote.sort_unstable(); // last ones should have the highest seq number - for (_slot, _seq, h) in to_promote.into_iter() { - if let Some(BlockStatus::WaitingForDependencies { - sequence_number, .. - }) = self.block_statuses.get_mut(&h) - { - *sequence_number = BlockGraph::new_sequence_number(&mut self.sequence_counter); - } - } - Ok(()) - } - - fn prune_waiting_for_dependencies(&mut self) -> Result<()> { - let mut to_discard: PreHashMap> = PreHashMap::default(); - let mut to_keep: PreHashMap = PreHashMap::default(); - - // list items that are older than the latest final blocks in their threads or have deps that are discarded - { - for block_id in self.waiting_for_dependencies_index.iter() { - if let Some(BlockStatus::WaitingForDependencies { - header_or_block, - unsatisfied_dependencies, - sequence_number, - }) = self.block_statuses.get(block_id) - { - // has already discarded dependencies => discard (choose worst reason) - let mut discard_reason = None; - let mut discarded_dep_found = false; - for dep in unsatisfied_dependencies.iter() { - if let Some(BlockStatus::Discarded { reason, .. }) = - self.block_statuses.get(dep) - { - discarded_dep_found = true; - match reason { - DiscardReason::Invalid(reason) => { - discard_reason = Some(DiscardReason::Invalid(format!("discarded because depend on block:{} that has discard reason:{}", block_id, reason))); - break; - } - DiscardReason::Stale => discard_reason = Some(DiscardReason::Stale), - DiscardReason::Final => discard_reason = Some(DiscardReason::Stale), - } - } - } - if discarded_dep_found { - to_discard.insert(*block_id, discard_reason); - continue; - } - - // is at least as old as the latest final block in its thread => discard as stale - let slot = header_or_block.get_slot(); - if slot.period <= self.latest_final_blocks_periods[slot.thread as usize].1 { - to_discard.insert(*block_id, Some(DiscardReason::Stale)); - continue; - } - - // otherwise, mark as to_keep - to_keep.insert(*block_id, (*sequence_number, header_or_block.get_slot())); - } - } - } - - // discard in chain and because of limited size - while !to_keep.is_empty() { - // mark entries as to_discard and remove them from to_keep - for (hash, _old_order) in to_keep.clone().into_iter() { - if let Some(BlockStatus::WaitingForDependencies { - unsatisfied_dependencies, - .. - }) = self.block_statuses.get(&hash) - { - // has dependencies that will be discarded => discard (choose worst reason) - let mut discard_reason = None; - let mut dep_to_discard_found = false; - for dep in unsatisfied_dependencies.iter() { - if let Some(reason) = to_discard.get(dep) { - dep_to_discard_found = true; - match reason { - Some(DiscardReason::Invalid(reason)) => { - discard_reason = Some(DiscardReason::Invalid(format!("discarded because depend on block:{} that has discard reason:{}", hash, reason))); - break; - } - Some(DiscardReason::Stale) => { - discard_reason = Some(DiscardReason::Stale) - } - Some(DiscardReason::Final) => { - discard_reason = Some(DiscardReason::Stale) - } - None => {} // leave as None - } - } - } - if dep_to_discard_found { - to_keep.remove(&hash); - to_discard.insert(hash, discard_reason); - continue; - } - } - } - - // remove worst excess element - if to_keep.len() > self.cfg.max_dependency_blocks { - let remove_elt = to_keep - .iter() - .filter_map(|(hash, _old_order)| { - if let Some(BlockStatus::WaitingForDependencies { - header_or_block, - sequence_number, - .. - }) = self.block_statuses.get(hash) - { - return Some((sequence_number, header_or_block.get_slot(), *hash)); - } - None - }) - .min(); - if let Some((_seq_num, _slot, hash)) = remove_elt { - to_keep.remove(&hash); - to_discard.insert(hash, None); - continue; - } - } - - // nothing happened: stop loop - break; - } - - // transition states to Discarded if there is a reason, otherwise just drop - for (block_id, reason_opt) in to_discard.drain() { - if let Some(BlockStatus::WaitingForDependencies { - header_or_block, .. - }) = self.block_statuses.remove(&block_id) - { - self.waiting_for_dependencies_index.remove(&block_id); - let header = match header_or_block { - HeaderOrBlock::Header(h) => h, - HeaderOrBlock::Block { id: block_id, .. } => self - .storage - .read_blocks() - .get(&block_id) - .ok_or_else(|| { - GraphError::MissingBlock(format!( - "missing block when pruning waiting for deps: {}", - block_id - )) - })? - .content - .header - .clone(), - }; - massa_trace!("consensus.block_graph.prune_waiting_for_dependencies", {"hash": block_id, "reason": reason_opt}); - - if let Some(reason) = reason_opt { - // add to stats if reason is Stale - if reason == DiscardReason::Stale { - self.new_stale_blocks - .insert(block_id, (header.creator_address, header.content.slot)); - } - // transition to Discarded only if there is a reason - self.block_statuses.insert( - block_id, - BlockStatus::Discarded { - slot: header.content.slot, - creator: header.creator_address, - parents: header.content.parents.clone(), - reason, - sequence_number: BlockGraph::new_sequence_number( - &mut self.sequence_counter, - ), - }, - ); - self.discarded_index.insert(block_id); - } - } - } - - Ok(()) - } - - fn prune_slot_waiting(&mut self) { - if self.waiting_for_slot_index.len() <= self.cfg.max_future_processing_blocks { - return; - } - let mut slot_waiting: Vec<(Slot, BlockId)> = self - .waiting_for_slot_index - .iter() - .filter_map(|block_id| { - if let Some(BlockStatus::WaitingForSlot(header_or_block)) = - self.block_statuses.get(block_id) - { - return Some((header_or_block.get_slot(), *block_id)); - } - None - }) - .collect(); - slot_waiting.sort_unstable(); - let len_slot_waiting = slot_waiting.len(); - (self.cfg.max_future_processing_blocks..len_slot_waiting).for_each(|idx| { - let (_slot, block_id) = &slot_waiting[idx]; - self.block_statuses.remove(block_id); - self.waiting_for_slot_index.remove(block_id); - }); - } - - fn prune_discarded(&mut self) -> Result<()> { - if self.discarded_index.len() <= self.cfg.max_discarded_blocks { - return Ok(()); - } - let mut discard_hashes: Vec<(u64, BlockId)> = self - .discarded_index - .iter() - .filter_map(|block_id| { - if let Some(BlockStatus::Discarded { - sequence_number, .. - }) = self.block_statuses.get(block_id) - { - return Some((*sequence_number, *block_id)); - } - None - }) - .collect(); - discard_hashes.sort_unstable(); - discard_hashes.truncate(self.discarded_index.len() - self.cfg.max_discarded_blocks); - for (_, block_id) in discard_hashes.iter() { - self.block_statuses.remove(block_id); - self.discarded_index.remove(block_id); - } - Ok(()) - } - - /// prune and return final blocks, return discarded final blocks - pub fn prune(&mut self) -> Result> { - let before = self.max_cliques.len(); - // Step 1: discard final blocks that are not useful to the graph anymore and return them - let discarded_finals = self.prune_active()?; - - // Step 2: prune slot waiting blocks - self.prune_slot_waiting(); - - // Step 3: prune dependency waiting blocks - self.prune_waiting_for_dependencies()?; - - // Step 4: prune discarded - self.prune_discarded()?; - - let after = self.max_cliques.len(); - if before != after { - debug!( - "clique number went from {} to {} after pruning", - before, after - ); - } - - Ok(discarded_finals) - } - - /// get the current block wish list, including the operations hash. - pub fn get_block_wishlist(&self) -> Result>> { - let mut wishlist = PreHashMap::>::default(); - for block_id in self.waiting_for_dependencies_index.iter() { - if let Some(BlockStatus::WaitingForDependencies { - unsatisfied_dependencies, - .. - }) = self.block_statuses.get(block_id) - { - for unsatisfied_h in unsatisfied_dependencies.iter() { - match self.block_statuses.get(unsatisfied_h) { - Some(BlockStatus::WaitingForDependencies { - header_or_block: HeaderOrBlock::Header(header), - .. - }) => { - wishlist.insert(header.id, Some(header.clone())); - } - None => { - wishlist.insert(*unsatisfied_h, None); - } - _ => {} - } - } - } - } - - Ok(wishlist) - } - - /// get clique count - pub fn get_clique_count(&self) -> usize { - self.max_cliques.len() - } - - /// get the clique of higher fitness - pub fn get_blockclique(&self) -> &PreHashSet { - &self - .max_cliques - .iter() - .find(|c| c.is_blockclique) - .expect("blockclique missing") - .block_ids - } - - /// get the blockclique (or final) block ID at a given slot, if any - pub fn get_blockclique_block_at_slot(&self, slot: &Slot) -> Option { - // List all blocks at this slot. - // The list should be small: make a copy of it to avoid holding the storage lock. - let blocks_at_slot = { - let storage_read = self.storage.read_blocks(); - let returned = match storage_read.get_blocks_by_slot(slot) { - Some(v) => v.clone(), - None => return None, - }; - returned - }; - - // search for the block in the blockclique - let search_in_blockclique = blocks_at_slot - .intersection( - &self - .max_cliques - .iter() - .find(|c| c.is_blockclique) - .expect("expected one clique to be the blockclique") - .block_ids, - ) - .next(); - if let Some(found_id) = search_in_blockclique { - return Some(*found_id); - } - - // block not found in the blockclique: search in the final blocks - blocks_at_slot - .into_iter() - .find(|b_id| match self.block_statuses.get(b_id) { - Some(BlockStatus::Active { a_block, .. }) => a_block.is_final, - _ => false, - }) - } - - /// get the latest blockclique (or final) block ID that is the most recent, but still strictly older than `slot`, in the same thread as `slot` - pub fn get_latest_blockclique_block_at_slot(&self, slot: &Slot) -> BlockId { - let (mut best_block_id, mut best_block_period) = self - .latest_final_blocks_periods - .get(slot.thread as usize) - .unwrap_or_else(|| panic!("unexpected not found latest final block period")); - - self.max_cliques - .iter() - .find(|c| c.is_blockclique) - .expect("expected one clique to be the blockclique") - .block_ids - .iter() - .for_each(|id| match self.block_statuses.get(id) { - Some(BlockStatus::Active { - a_block, - storage: _, - }) => { - if a_block.is_final { - panic!( - "unexpected final block on getting latest blockclique block at slot" - ); - } - if a_block.slot.thread == slot.thread - && a_block.slot.period < slot.period - && a_block.slot.period > best_block_period - { - best_block_period = a_block.slot.period; - best_block_id = *id; - } - } - _ => { - panic!("expected to find only active block but found another status") - } - }); - best_block_id - } - - /// Gets all stored final blocks, not only the still-useful ones - /// This is used when initializing Execution from Consensus. - /// Since the Execution bootstrap snapshot is older than the Consensus snapshot, - /// we might need to signal older final blocks for Execution to catch up. - pub fn get_all_final_blocks(&self) -> HashMap { - self.active_index - .iter() - .map(|b_id| { - let (a_block, _storage) = - self.get_active_block(b_id).expect("active block missing"); - (*b_id, a_block.slot) - }) - .collect() - } - - /// Get the block id's to be propagated. - /// Must be called by the consensus worker within `block_db_changed`. - pub fn get_blocks_to_propagate(&mut self) -> PreHashMap { - mem::take(&mut self.to_propagate) - } - - /// Get the hashes of objects that were attack attempts. - /// Must be called by the consensus worker within `block_db_changed`. - pub fn get_attack_attempts(&mut self) -> Vec { - mem::take(&mut self.attack_attempts) - } - - /// Get the ids of blocks that became final. - /// Must be called by the consensus worker within `block_db_changed`. - pub fn get_new_final_blocks(&mut self) -> PreHashSet { - mem::take(&mut self.new_final_blocks) - } - - /// Get the ids of blocks that became stale. - /// Must be called by the consensus worker within `block_db_changed`. - pub fn get_new_stale_blocks(&mut self) -> PreHashMap { - mem::take(&mut self.new_stale_blocks) - } -} diff --git a/massa-graph/src/lib.rs b/massa-graph/src/lib.rs deleted file mode 100644 index 6f78cb49505..00000000000 --- a/massa-graph/src/lib.rs +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (c) 2022 MASSA LABS -//! graph management -#![warn(missing_docs)] -#![warn(unused_crate_dependencies)] -#![feature(async_closure)] -#![feature(hash_drain_filter)] -#![feature(int_roundings)] - -extern crate massa_logging; - -/// useful structures -pub mod export_active_block; - -mod bootstrapable_graph; -pub use bootstrapable_graph::{ - BootstrapableGraph, BootstrapableGraphDeserializer, BootstrapableGraphSerializer, -}; - -mod block_graph; -pub use block_graph::*; - -/// graph errors -pub mod error; - -/// graph settings -pub mod settings; diff --git a/massa-graph/src/settings.rs b/massa-graph/src/settings.rs deleted file mode 100644 index 751df7f0bf6..00000000000 --- a/massa-graph/src/settings.rs +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright (c) 2022 MASSA LABS - -#![allow(clippy::assertions_on_constants)] -use massa_signature::KeyPair; -use serde::{Deserialize, Serialize}; - -/// Graph configuration -#[derive(Debug, Deserialize, Serialize, Clone)] -pub struct GraphConfig { - /// Number of threads - pub thread_count: u8, - /// Keypair to sign genesis blocks. - pub genesis_key: KeyPair, - /// Maximum number of blocks allowed in discarded blocks. - pub max_discarded_blocks: usize, - /// If a block `is future_block_processing_max_periods` periods in the future, it is just discarded. - pub future_block_processing_max_periods: u64, - /// Maximum number of blocks allowed in `FutureIncomingBlocks`. - pub max_future_processing_blocks: usize, - /// Maximum number of blocks allowed in `DependencyWaitingBlocks`. - pub max_dependency_blocks: usize, - /// Threshold for fitness. - pub delta_f0: u64, - /// Maximum operation validity period count - pub operation_validity_periods: u64, - /// cycle duration in periods - pub periods_per_cycle: u64, - /// force keep at least this number of final periods in RAM for each thread - pub force_keep_final_periods: u64, - /// target number of endorsement per block - pub endorsement_count: u32, - /// pub `block_db_prune_interval`: `MassaTime`, - pub max_item_return_count: usize, -} diff --git a/massa-node/Cargo.toml b/massa-node/Cargo.toml index 02cc64e6902..d48496dccde 100644 --- a/massa-node/Cargo.toml +++ b/massa-node/Cargo.toml @@ -27,8 +27,6 @@ dialoguer = "0.10" massa_api = { path = "../massa-api" } massa_async_pool = { path = "../massa-async-pool" } massa_bootstrap = { path = "../massa-bootstrap" } -massa_consensus_exports = { path = "../massa-consensus-exports" } -massa_consensus_worker = { path = "../massa-consensus-worker" } massa_execution_exports = { path = "../massa-execution-exports" } massa_execution_worker = { path = "../massa-execution-worker" } massa_signature = { path = "../massa-signature" } @@ -59,8 +57,7 @@ beta = [] deadlock_detection = [] sandbox = [ "massa_bootstrap/sandbox", - "massa_consensus_exports/sandbox", - "massa_consensus_worker/sandbox", + "massa_graph_2_worker/sandbox", "massa_execution_worker/sandbox", "massa_final_state/sandbox", "massa_models/sandbox", diff --git a/massa-node/src/main.rs b/massa-node/src/main.rs index d63f76fb238..848e4843953 100644 --- a/massa-node/src/main.rs +++ b/massa-node/src/main.rs @@ -355,16 +355,15 @@ async fn launch( max_operations_propagation_time: SETTINGS.protocol.max_operations_propagation_time, max_endorsements_propagation_time: SETTINGS.protocol.max_endorsements_propagation_time, }; - let (protocol_command_sender, _protocol_event_receiver, protocol_manager) = - start_protocol_controller( - protocol_config, - network_command_sender.clone(), - network_event_receiver, - pool_controller.clone(), - shared_storage.clone(), - ) - .await - .expect("could not start protocol controller"); + let (protocol_command_sender, protocol_manager) = start_protocol_controller( + protocol_config, + network_command_sender.clone(), + network_event_receiver, + pool_controller.clone(), + shared_storage.clone(), + ) + .await + .expect("could not start protocol controller"); // // init consensus configuration // let consensus_config = ConsensusConfig { @@ -610,6 +609,7 @@ async fn stop( // stop factory factory_manager.stop(); + // stop graph graph_manager.stop(); // stop pool diff --git a/massa-protocol-exports/src/error.rs b/massa-protocol-exports/src/error.rs index ead3ae5d7d6..778654bf8f3 100644 --- a/massa-protocol-exports/src/error.rs +++ b/massa-protocol-exports/src/error.rs @@ -1,6 +1,5 @@ // Copyright (c) 2022 MASSA LABS -use crate::ProtocolEvent; use displaydoc::Display; use massa_models::error::ModelsError; use massa_network_exports::ConnectionId; @@ -22,8 +21,6 @@ pub enum ProtocolError { TokioTaskJoinError(#[from] tokio::task::JoinError), /// error receiving one shot response : {0} TokioRecvError(#[from] tokio::sync::oneshot::error::RecvError), - /// error sending protocol event: {0} - TokioSendError(#[from] Box>), /// Error during network connection:`{0:?}` PeerConnectionError(NetworkConnectionErrorType), /// The ip:`{0}` address is not valid diff --git a/massa-protocol-exports/src/lib.rs b/massa-protocol-exports/src/lib.rs index dd2562b6b9a..ab550c45e2e 100644 --- a/massa-protocol-exports/src/lib.rs +++ b/massa-protocol-exports/src/lib.rs @@ -11,8 +11,8 @@ mod settings; pub use error::ProtocolError; pub use protocol_controller::{ - BlocksResults, ProtocolCommand, ProtocolCommandSender, ProtocolEvent, ProtocolEventReceiver, - ProtocolManagementCommand, ProtocolManager, + BlocksResults, ProtocolCommand, ProtocolCommandSender, ProtocolManagementCommand, + ProtocolManager, }; pub use settings::ProtocolConfig; diff --git a/massa-protocol-exports/src/protocol_controller.rs b/massa-protocol-exports/src/protocol_controller.rs index 67fb69a1463..c0ff2d28c9d 100644 --- a/massa-protocol-exports/src/protocol_controller.rs +++ b/massa-protocol-exports/src/protocol_controller.rs @@ -1,53 +1,19 @@ // Copyright (c) 2022 MASSA LABS -use std::collections::VecDeque; - use crate::error::ProtocolError; use massa_logging::massa_trace; +use massa_models::prehash::{PreHashMap, PreHashSet}; use massa_models::{ block::{BlockId, WrappedHeader}, endorsement::EndorsementId, operation::OperationId, }; -use massa_models::{ - prehash::{PreHashMap, PreHashSet}, - slot::Slot, -}; use massa_network_exports::NetworkEventReceiver; use massa_storage::Storage; use serde::Serialize; use tokio::{sync::mpsc, task::JoinHandle}; -use tracing::{debug, info}; - -/// Possible types of events that can happen. -#[allow(clippy::large_enum_variant)] -#[derive(Debug)] -pub enum ProtocolEvent { - /// A block with a valid signature has been received. - ReceivedBlock { - /// block ID - block_id: BlockId, - /// block slot - slot: Slot, - /// storage instance containing the block and its dependencies (except the parents) - storage: Storage, - }, - /// A message to tell the consensus that a block is invalid - InvalidBlock { - /// block ID - block_id: BlockId, - /// header - header: WrappedHeader, - }, - /// A block header with a valid signature has been received. - ReceivedBlockHeader { - /// its id - block_id: BlockId, - /// The header - header: WrappedHeader, - }, -} +use tracing::info; /// block result: map block id to /// ```md @@ -166,36 +132,6 @@ impl ProtocolCommandSender { } } -/// Protocol event receiver -pub struct ProtocolEventReceiver(pub mpsc::Receiver); - -impl ProtocolEventReceiver { - /// Receives the next `ProtocolEvent` from connected Node. - /// None is returned when all Sender halves have dropped, - /// indicating that no further values can be sent on the channel - pub async fn wait_event(&mut self) -> Result { - massa_trace!("protocol.event_receiver.wait_event", {}); - self.0.recv().await.ok_or_else(|| { - ProtocolError::ChannelError( - "DefaultProtocolController wait_event channel recv failed".into(), - ) - }) - } - - /// drains remaining events and returns them in a `VecDeque` - /// note: events are sorted from oldest to newest - pub async fn drain(mut self) -> VecDeque { - let mut remaining_events: VecDeque = VecDeque::new(); - while let Some(evt) = self.0.recv().await { - debug!( - "after receiving event from ProtocolEventReceiver.0 in protocol_controller drain" - ); - remaining_events.push_back(evt); - } - remaining_events - } -} - /// protocol manager used to stop the protocol pub struct ProtocolManager { join_handle: JoinHandle>, diff --git a/massa-protocol-exports/src/tests/tools.rs b/massa-protocol-exports/src/tests/tools.rs index 03c06ed9788..1a736cc913f 100644 --- a/massa-protocol-exports/src/tests/tools.rs +++ b/massa-protocol-exports/src/tests/tools.rs @@ -1,8 +1,8 @@ // Copyright (c) 2022 MASSA LABS use super::mock_network_controller::MockNetworkController; -use crate::protocol_controller::{ProtocolCommandSender, ProtocolEventReceiver}; -use crate::{ProtocolConfig, ProtocolEvent}; +use crate::protocol_controller::ProtocolCommandSender; +use crate::ProtocolConfig; use massa_hash::Hash; use massa_models::node::NodeId; use massa_models::operation::OperationSerializer; @@ -168,7 +168,6 @@ pub async fn send_and_propagate_block( block: WrappedBlock, valid: bool, source_node_id: NodeId, - protocol_event_receiver: &mut ProtocolEventReceiver, protocol_command_sender: &mut ProtocolCommandSender, operations: Vec, ) { @@ -202,22 +201,23 @@ pub async fn send_and_propagate_block( .send_block_info(source_node_id, info) .await; + //TODO: Readd // Check protocol sends block to consensus. - let hash = match wait_protocol_event(protocol_event_receiver, 1000.into(), |evt| match evt { - evt @ ProtocolEvent::ReceivedBlock { .. } => Some(evt), - _ => None, - }) - .await - { - Some(ProtocolEvent::ReceivedBlock { block_id, .. }) => Some(block_id), - None => None, - _ => panic!("Unexpected or no protocol event."), - }; - if valid { - assert_eq!(expected_hash, hash.unwrap()); - } else { - assert!(hash.is_none(), "unexpected protocol event") - } + // let hash = match wait_protocol_event(protocol_event_receiver, 1000.into(), |evt| match evt { + // evt @ ProtocolEvent::ReceivedBlock { .. } => Some(evt), + // _ => None, + // }) + // .await + // { + // Some(ProtocolEvent::ReceivedBlock { block_id, .. }) => Some(block_id), + // None => None, + // _ => panic!("Unexpected or no protocol event."), + // }; + // if valid { + // assert_eq!(expected_hash, hash.unwrap()); + // } else { + // assert!(hash.is_none(), "unexpected protocol event") + // } } /// Creates an endorsement for use in protocol tests, @@ -287,28 +287,6 @@ pub fn create_protocol_config() -> ProtocolConfig { } } -/// wait protocol event -pub async fn wait_protocol_event( - protocol_event_receiver: &mut ProtocolEventReceiver, - timeout: MassaTime, - filter_map: F, -) -> Option -where - F: Fn(ProtocolEvent) -> Option, -{ - let timer = sleep(timeout.into()); - tokio::pin!(timer); - loop { - tokio::select! { - evt_opt = protocol_event_receiver.wait_event() => match evt_opt { - Ok(orig_evt) => if let Some(res_evt) = filter_map(orig_evt) { return Some(res_evt); }, - _ => return None - }, - _ = &mut timer => return None - } - } -} - /// assert block id has been asked to node pub async fn assert_hash_asked_to_node( hash_1: BlockId, diff --git a/massa-protocol-worker/Cargo.toml b/massa-protocol-worker/Cargo.toml index 18b7f04dd9e..f469a1f5313 100644 --- a/massa-protocol-worker/Cargo.toml +++ b/massa-protocol-worker/Cargo.toml @@ -15,6 +15,7 @@ rayon = "1.5" massa_hash = { path = "../massa-hash" } massa_logging = { path = "../massa-logging" } massa_models = { path = "../massa-models" } +massa_graph_2_exports = { path = "../massa-graph-2-exports" } massa_network_exports = { path = "../massa-network-exports" } massa_pool_exports = { path = "../massa-pool-exports" } massa_protocol_exports = { path = "../massa-protocol-exports" } diff --git a/massa-protocol-worker/src/protocol_network.rs b/massa-protocol-worker/src/protocol_network.rs index d4e79f6d7a9..581a38a1a45 100644 --- a/massa-protocol-worker/src/protocol_network.rs +++ b/massa-protocol-worker/src/protocol_network.rs @@ -17,7 +17,7 @@ use massa_models::{ wrapped::{Id, Wrapped}, }; use massa_network_exports::{AskForBlocksInfo, BlockInfoReply, NetworkEvent}; -use massa_protocol_exports::{ProtocolError, ProtocolEvent}; +use massa_protocol_exports::ProtocolError; use massa_serialization::Serializer; use massa_storage::Storage; use std::pin::Pin; @@ -98,11 +98,8 @@ impl ProtocolWorker { self.note_header_from_node(&header, &source_node_id).await? { if is_new { - self.send_protocol_event(ProtocolEvent::ReceivedBlockHeader { - block_id, - header, - }) - .await; + self.graph_controller + .register_block_header(block_id, header); } self.update_ask_block(block_ask_timer).await?; } else { @@ -428,7 +425,7 @@ impl ProtocolWorker { return Ok(()); } - let protocol_event_full_block = match self.block_wishlist.entry(block_id) { + match self.block_wishlist.entry(block_id) { Entry::Occupied(mut entry) => { let info = entry.get_mut(); let header = if let Some(header) = &info.header { @@ -471,7 +468,7 @@ impl ProtocolWorker { warn!("Node id {} sent us full operations for block id {} but they exceed max size.", from_node_id, block_id); let _ = self.ban_node(&from_node_id).await; self.block_wishlist.remove(&block_id); - ProtocolEvent::InvalidBlock { block_id, header } + self.graph_controller.mark_invalid_block(block_id, header); } else { if known_operations != block_ids_set { warn!( @@ -516,11 +513,10 @@ impl ProtocolWorker { let slot = wrapped_block.content.header.content.slot; // add block to local storage and claim ref block_storage.store_block(wrapped_block); - ProtocolEvent::ReceivedBlock { - slot, - block_id, - storage: block_storage, - } + + // Send to graph + self.graph_controller + .register_block(block_id, slot, block_storage); } } Entry::Vacant(_) => { @@ -532,8 +528,6 @@ impl ProtocolWorker { return Ok(()); } }; - // Send to graph - self.send_protocol_event(protocol_event_full_block).await; // Update ask block let remove_hashes = vec![block_id].into_iter().collect(); diff --git a/massa-protocol-worker/src/protocol_worker.rs b/massa-protocol-worker/src/protocol_worker.rs index b26234100ad..9c1cc380c46 100644 --- a/massa-protocol-worker/src/protocol_worker.rs +++ b/massa-protocol-worker/src/protocol_worker.rs @@ -5,6 +5,7 @@ use crate::checked_operations::CheckedOperations; use crate::sig_verifier::verify_sigs_batch; use crate::{node_info::NodeInfo, worker_operations_impl::OperationBatchBuffer}; +use massa_graph_2_exports::GraphController; use massa_logging::massa_trace; use massa_models::slot::Slot; @@ -20,8 +21,8 @@ use massa_models::{ use massa_network_exports::{AskForBlocksInfo, NetworkCommandSender, NetworkEventReceiver}; use massa_pool_exports::PoolController; use massa_protocol_exports::{ - ProtocolCommand, ProtocolCommandSender, ProtocolConfig, ProtocolError, ProtocolEvent, - ProtocolEventReceiver, ProtocolManagementCommand, ProtocolManager, + ProtocolCommand, ProtocolCommandSender, ProtocolConfig, ProtocolError, + ProtocolManagementCommand, ProtocolManager, }; use massa_models::wrapped::Id; @@ -32,7 +33,6 @@ use std::mem; use std::pin::Pin; use tokio::{ sync::mpsc, - sync::mpsc::error::SendTimeoutError, time::{sleep, sleep_until, Instant, Sleep}, }; use tracing::{debug, error, info, warn}; @@ -51,20 +51,13 @@ pub async fn start_protocol_controller( config: ProtocolConfig, network_command_sender: NetworkCommandSender, network_event_receiver: NetworkEventReceiver, + graph_controller: Box, pool_controller: Box, storage: Storage, -) -> Result< - ( - ProtocolCommandSender, - ProtocolEventReceiver, - ProtocolManager, - ), - ProtocolError, -> { +) -> Result<(ProtocolCommandSender, ProtocolManager), ProtocolError> { debug!("starting protocol controller"); // launch worker - let (controller_event_tx, event_rx) = mpsc::channel::(config.event_channel_size); let (command_tx, controller_command_rx) = mpsc::channel::(config.controller_channel_size); let (manager_tx, controller_manager_rx) = mpsc::channel::(1); @@ -75,10 +68,10 @@ pub async fn start_protocol_controller( ProtocolWorkerChannels { network_command_sender, network_event_receiver, - controller_event_tx, controller_command_rx, controller_manager_rx, }, + graph_controller, pool_controller, storage, ) @@ -98,7 +91,6 @@ pub async fn start_protocol_controller( debug!("protocol controller ready"); Ok(( ProtocolCommandSender(command_tx), - ProtocolEventReceiver(event_rx), ProtocolManager::new(join_handle, manager_tx), )) } @@ -132,12 +124,12 @@ impl BlockInfo { pub struct ProtocolWorker { /// Protocol configuration. pub(crate) config: ProtocolConfig, + /// Graph controller + pub(crate) graph_controller: Box, /// Associated network command sender. pub(crate) network_command_sender: NetworkCommandSender, /// Associated network event receiver. network_event_receiver: NetworkEventReceiver, - /// Channel to send protocol events to the controller. - controller_event_tx: mpsc::Sender, /// Channel to send protocol pool events to the controller. pool_controller: Box, /// Channel receiving commands from the controller. @@ -171,8 +163,6 @@ pub struct ProtocolWorkerChannels { pub network_command_sender: NetworkCommandSender, /// network event receiver pub network_event_receiver: NetworkEventReceiver, - /// protocol event sender - pub controller_event_tx: mpsc::Sender, /// protocol command receiver pub controller_command_rx: mpsc::Receiver, /// protocol management command receiver @@ -193,10 +183,10 @@ impl ProtocolWorker { ProtocolWorkerChannels { network_command_sender, network_event_receiver, - controller_event_tx, controller_command_rx, controller_manager_rx, }: ProtocolWorkerChannels, + graph_controller: Box, pool_controller: Box, storage: Storage, ) -> ProtocolWorker { @@ -204,7 +194,7 @@ impl ProtocolWorker { config, network_command_sender, network_event_receiver, - controller_event_tx, + graph_controller, pool_controller, controller_command_rx, controller_manager_rx, @@ -224,25 +214,6 @@ impl ProtocolWorker { } } - pub(crate) async fn send_protocol_event(&self, event: ProtocolEvent) { - let result = self - .controller_event_tx - .send_timeout(event, self.config.max_send_wait.to_duration()) - .await; - match result { - Ok(()) => {} - Err(SendTimeoutError::Closed(event)) => { - warn!( - "Failed to send ProtocolEvent due to channel closure: {:?}.", - event - ); - } - Err(SendTimeoutError::Timeout(event)) => { - warn!("Failed to send ProtocolEvent due to timeout: {:?}.", event); - } - } - } - /// Main protocol worker loop. Consumes self. /// It is mostly a `tokio::select!` inside a loop /// waiting on : From 2e0881be5c77e6ecf1ac0bd79e23427e68acf913 Mon Sep 17 00:00:00 2001 From: AurelienFT Date: Wed, 19 Oct 2022 16:10:50 +0200 Subject: [PATCH 24/40] Fix usage of old graph crate. --- massa-api/src/error.rs | 2 +- massa-bootstrap/src/error.rs | 2 +- massa-bootstrap/src/lib.rs | 2 +- massa-bootstrap/src/messages.rs | 2 +- massa-graph-2-exports/src/controller_trait.rs | 8 ++-- massa-graph-2-exports/src/error.rs | 6 --- .../src/export_active_block.rs | 2 +- massa-graph-2-worker/src/controller.rs | 15 +++---- massa-graph-2-worker/src/state/graph.rs | 6 ++- massa-graph-2-worker/src/state/mod.rs | 12 +++--- massa-graph-2-worker/src/state/process.rs | 16 +++---- .../src/state/process_commands.rs | 10 +++-- massa-graph-2-worker/src/state/stats.rs | 39 +++++++++-------- massa-graph-2-worker/src/state/tick.rs | 5 +-- massa-graph-2-worker/src/worker/init.rs | 15 +++---- massa-graph-2-worker/src/worker/main_loop.rs | 4 +- massa-node/src/main.rs | 42 +------------------ 17 files changed, 73 insertions(+), 115 deletions(-) diff --git a/massa-api/src/error.rs b/massa-api/src/error.rs index abdb186220e..a27ef70919c 100644 --- a/massa-api/src/error.rs +++ b/massa-api/src/error.rs @@ -2,7 +2,7 @@ use displaydoc::Display; use massa_execution_exports::ExecutionError; -use massa_graph::error::GraphError; +use massa_graph_2_exports::error::GraphError; use massa_hash::MassaHashError; use massa_models::error::ModelsError; use massa_network_exports::NetworkError; diff --git a/massa-bootstrap/src/error.rs b/massa-bootstrap/src/error.rs index a421fdfd059..432b72c6f75 100644 --- a/massa-bootstrap/src/error.rs +++ b/massa-bootstrap/src/error.rs @@ -3,7 +3,7 @@ use crate::messages::{BootstrapClientMessage, BootstrapServerMessage}; use displaydoc::Display; use massa_final_state::FinalStateError; -use massa_graph::error::GraphError; +use massa_graph_2_exports::error::GraphError; use massa_hash::MassaHashError; use massa_network_exports::NetworkError; use massa_pos_exports::PosError; diff --git a/massa-bootstrap/src/lib.rs b/massa-bootstrap/src/lib.rs index ba29b6cc02c..b9ff13e16db 100644 --- a/massa-bootstrap/src/lib.rs +++ b/massa-bootstrap/src/lib.rs @@ -18,7 +18,7 @@ extern crate fix_hidden_lifetime_bug; pub use establisher::types::Establisher; use massa_final_state::FinalState; -use massa_graph::BootstrapableGraph; +use massa_graph_2_exports::bootstrapable_graph::BootstrapableGraph; use massa_network_exports::BootstrapPeers; use parking_lot::RwLock; use std::sync::Arc; diff --git a/massa-bootstrap/src/messages.rs b/massa-bootstrap/src/messages.rs index f2b71b5b753..31d6329cfbf 100644 --- a/massa-bootstrap/src/messages.rs +++ b/massa-bootstrap/src/messages.rs @@ -6,7 +6,7 @@ use massa_final_state::{ ExecutedOpsStreamingStepSerializer, StateChanges, StateChangesDeserializer, StateChangesSerializer, }; -use massa_graph_2_exports::{ +use massa_graph_2_exports::bootstrapable_graph::{ BootstrapableGraph, BootstrapableGraphDeserializer, BootstrapableGraphSerializer, }; use massa_ledger_exports::{KeyDeserializer, KeySerializer}; diff --git a/massa-graph-2-exports/src/controller_trait.rs b/massa-graph-2-exports/src/controller_trait.rs index 480fb105480..40f0c856ec3 100644 --- a/massa-graph-2-exports/src/controller_trait.rs +++ b/massa-graph-2-exports/src/controller_trait.rs @@ -1,5 +1,5 @@ use crate::block_graph_export::BlockGraphExport; -use crate::{bootstrapable_graph::BootstrapableGraph, error::GraphResult}; +use crate::{bootstrapable_graph::BootstrapableGraph, error::GraphError}; use massa_models::{ api::BlockGraphStatus, block::{BlockHeader, BlockId}, @@ -16,15 +16,15 @@ pub trait GraphController: Send + Sync { &self, start_slot: Option, end_slot: Option, - ) -> GraphResult; + ) -> Result; fn get_block_statuses(&self, ids: &Vec) -> Vec; fn get_cliques(&self) -> Vec; - fn get_bootstrap_graph(&self) -> GraphResult; + fn get_bootstrap_graph(&self) -> Result; - fn get_stats(&self) -> GraphResult; + fn get_stats(&self) -> Result; fn get_best_parents(&self) -> Vec<(BlockId, u64)>; diff --git a/massa-graph-2-exports/src/error.rs b/massa-graph-2-exports/src/error.rs index 36e12b98e5f..3389675d298 100644 --- a/massa-graph-2-exports/src/error.rs +++ b/massa-graph-2-exports/src/error.rs @@ -7,12 +7,6 @@ use massa_time::TimeError; use std::array::TryFromSliceError; use thiserror::Error; -/// Result used in the graph -pub type GraphResult = core::result::Result; - -/// Result used in the ledger -pub type LedgerResult = core::result::Result; - /// Graph error #[non_exhaustive] #[derive(Display, Error, Debug)] diff --git a/massa-graph-2-exports/src/export_active_block.rs b/massa-graph-2-exports/src/export_active_block.rs index dbf93c88f14..9c08f9ec37b 100644 --- a/massa-graph-2-exports/src/export_active_block.rs +++ b/massa-graph-2-exports/src/export_active_block.rs @@ -1,4 +1,4 @@ -use crate::error::{GraphError, GraphResult as Result}; +use crate::error::GraphError; use massa_hash::HashDeserializer; use massa_models::{ active_block::ActiveBlock, diff --git a/massa-graph-2-worker/src/controller.rs b/massa-graph-2-worker/src/controller.rs index f94db0db2ea..3f35b7e65b8 100644 --- a/massa-graph-2-worker/src/controller.rs +++ b/massa-graph-2-worker/src/controller.rs @@ -1,10 +1,7 @@ use massa_graph_2_exports::{ - block_graph_export::BlockGraphExport, - block_status::BlockStatus, - bootstrapable_graph::BootstrapableGraph, - error::{GraphError, GraphResult}, - export_active_block::ExportActiveBlock, - GraphController, + block_graph_export::BlockGraphExport, block_status::BlockStatus, + bootstrapable_graph::BootstrapableGraph, error::GraphError, + export_active_block::ExportActiveBlock, GraphController, }; use massa_models::{ api::BlockGraphStatus, @@ -59,7 +56,7 @@ impl GraphController for GraphControllerImpl { &self, start_slot: Option, end_slot: Option, - ) -> GraphResult { + ) -> Result { self.shared_state .read() .extract_block_graph_part(start_slot, end_slot) @@ -92,7 +89,7 @@ impl GraphController for GraphControllerImpl { /// /// # Returns: /// A portion of the graph - fn get_bootstrap_graph(&self) -> GraphResult { + fn get_bootstrap_graph(&self) -> Result { let read_shared_state = self.shared_state.read(); let mut required_final_blocks: PreHashSet<_> = read_shared_state.list_required_active_blocks()?; @@ -126,7 +123,7 @@ impl GraphController for GraphControllerImpl { } /// Get the stats of the consensus - fn get_stats(&self) -> GraphResult { + fn get_stats(&self) -> Result { self.shared_state.read().get_stats() } diff --git a/massa-graph-2-worker/src/state/graph.rs b/massa-graph-2-worker/src/state/graph.rs index 2d0a4177e1e..a6967f9e1f3 100644 --- a/massa-graph-2-worker/src/state/graph.rs +++ b/massa-graph-2-worker/src/state/graph.rs @@ -1,7 +1,9 @@ use std::collections::VecDeque; -use massa_graph::error::GraphError; -use massa_graph_2_exports::block_status::{BlockStatus, DiscardReason}; +use massa_graph_2_exports::{ + block_status::{BlockStatus, DiscardReason}, + error::GraphError, +}; use massa_logging::massa_trace; use massa_models::{block::BlockId, clique::Clique, prehash::PreHashSet, slot::Slot}; diff --git a/massa-graph-2-worker/src/state/mod.rs b/massa-graph-2-worker/src/state/mod.rs index 206ba788070..986db860385 100644 --- a/massa-graph-2-worker/src/state/mod.rs +++ b/massa-graph-2-worker/src/state/mod.rs @@ -1,9 +1,9 @@ use std::collections::{HashMap, VecDeque}; -use massa_graph::error::{GraphError, GraphResult}; use massa_graph_2_exports::{ block_graph_export::BlockGraphExport, block_status::{BlockStatus, ExportCompiledBlock, HeaderOrBlock}, + error::GraphError, GraphChannels, GraphConfig, }; use massa_models::{ @@ -198,7 +198,7 @@ impl GraphState { } } - pub fn list_required_active_blocks(&self) -> GraphResult> { + pub fn list_required_active_blocks(&self) -> Result, GraphError> { // list all active blocks let mut retain_active: PreHashSet = PreHashSet::::with_capacity(self.active_index.len()); @@ -323,7 +323,7 @@ impl GraphState { &self, slot_start: Option, slot_end: Option, - ) -> GraphResult { + ) -> Result { let mut export = BlockGraphExport { genesis_blocks: self.genesis_hashes.clone(), active_blocks: PreHashMap::with_capacity(self.block_statuses.len()), @@ -415,7 +415,9 @@ impl GraphState { } /// get the current block wish list, including the operations hash. - pub fn get_block_wishlist(&self) -> GraphResult>> { + pub fn get_block_wishlist( + &self, + ) -> Result>, GraphError> { let mut wishlist = PreHashMap::>::default(); for block_id in self.waiting_for_dependencies_index.iter() { if let Some(BlockStatus::WaitingForDependencies { @@ -450,7 +452,7 @@ impl GraphState { pub fn get_active_block_and_descendants( &self, block_id: &BlockId, - ) -> GraphResult> { + ) -> Result, GraphError> { let mut to_visit = vec![*block_id]; let mut result = PreHashSet::::default(); while let Some(visit_h) = to_visit.pop() { diff --git a/massa-graph-2-worker/src/state/process.rs b/massa-graph-2-worker/src/state/process.rs index 02d4d26d029..ca57af10a50 100644 --- a/massa-graph-2-worker/src/state/process.rs +++ b/massa-graph-2-worker/src/state/process.rs @@ -3,8 +3,10 @@ use std::{ mem, }; -use massa_graph::error::{GraphError, GraphResult}; -use massa_graph_2_exports::block_status::{BlockStatus, DiscardReason, HeaderOrBlock}; +use massa_graph_2_exports::{ + block_status::{BlockStatus, DiscardReason, HeaderOrBlock}, + error::GraphError, +}; use massa_logging::massa_trace; use massa_models::{ active_block::ActiveBlock, @@ -36,7 +38,7 @@ impl GraphState { &mut self, mut to_ack: BTreeSet<(Slot, BlockId)>, current_slot: Option, - ) -> GraphResult<()> { + ) -> Result<(), GraphError> { // order processing by (slot, hash) while let Some((_slot, hash)) = to_ack.pop_first() { to_ack.extend(self.process(hash, current_slot)?) @@ -56,7 +58,7 @@ impl GraphState { &mut self, block_id: BlockId, current_slot: Option, - ) -> GraphResult> { + ) -> Result, GraphError> { // list items to reprocess let mut reprocess = BTreeSet::new(); @@ -424,7 +426,7 @@ impl GraphState { } /// TODO: Doc - pub fn promote_dep_tree(&mut self, hash: BlockId) -> GraphResult<()> { + pub fn promote_dep_tree(&mut self, hash: BlockId) -> Result<(), GraphError> { let mut to_explore = vec![hash]; let mut to_promote: PreHashMap = PreHashMap::default(); while let Some(h) = to_explore.pop() { @@ -486,7 +488,7 @@ impl GraphState { inherited_incomp_count: usize, fitness: u64, mut storage: Storage, - ) -> GraphResult<()> { + ) -> Result<(), GraphError> { massa_trace!("consensus.block_graph.add_block_to_graph", { "block_id": add_block_id }); @@ -755,7 +757,7 @@ impl GraphState { /// 9. notify protocol of block wish list /// 10. note new latest final periods (prune graph if changed) /// 11. add stale blocks to stats - pub fn block_db_changed(&mut self) -> GraphResult<()> { + pub fn block_db_changed(&mut self) -> Result<(), GraphError> { let final_block_slots = { massa_trace!("consensus.consensus_worker.block_db_changed", {}); diff --git a/massa-graph-2-worker/src/state/process_commands.rs b/massa-graph-2-worker/src/state/process_commands.rs index 823eca00227..151f7b4451b 100644 --- a/massa-graph-2-worker/src/state/process_commands.rs +++ b/massa-graph-2-worker/src/state/process_commands.rs @@ -1,7 +1,9 @@ use std::collections::{hash_map::Entry, BTreeSet}; -use massa_graph::error::GraphResult; -use massa_graph_2_exports::block_status::{BlockStatus, DiscardReason, HeaderOrBlock}; +use massa_graph_2_exports::{ + block_status::{BlockStatus, DiscardReason, HeaderOrBlock}, + error::GraphError, +}; use massa_logging::massa_trace; use massa_models::{ block::{BlockId, WrappedHeader}, @@ -27,7 +29,7 @@ impl GraphState { block_id: BlockId, header: WrappedHeader, current_slot: Option, - ) -> GraphResult<()> { + ) -> Result<(), GraphError> { // ignore genesis blocks if self.genesis_hashes.contains(&block_id) { return Ok(()); @@ -84,7 +86,7 @@ impl GraphState { slot: Slot, current_slot: Option, storage: Storage, - ) -> GraphResult<()> { + ) -> Result<(), GraphError> { // ignore genesis blocks if self.genesis_hashes.contains(&block_id) { return Ok(()); diff --git a/massa-graph-2-worker/src/state/stats.rs b/massa-graph-2-worker/src/state/stats.rs index 2e1be5a53ac..d587531ec7e 100644 --- a/massa-graph-2-worker/src/state/stats.rs +++ b/massa-graph-2-worker/src/state/stats.rs @@ -1,14 +1,12 @@ use super::GraphState; -use massa_graph::error::GraphResult; -use massa_graph_2_exports::events::GraphEvent; +use massa_graph_2_exports::error::GraphError; use massa_models::stats::ConsensusStats; use massa_time::MassaTime; use std::cmp::max; -use tracing::log::warn; impl GraphState { /// Calculate and return stats about graph - pub fn get_stats(&self) -> GraphResult { + pub fn get_stats(&self) -> Result { let timespan_end = max( self.launch_time, MassaTime::now(self.config.clock_compensation_millis)?, @@ -38,25 +36,26 @@ impl GraphState { } /// Must be called each tick to update stats. Will detect if a desynchronization happened - pub fn stats_tick(&mut self) -> GraphResult<()> { - let now = MassaTime::now(self.config.clock_compensation_millis)?; - + pub fn stats_tick(&mut self) -> Result<(), GraphError> { // check if there are any final blocks is coming from protocol // if none => we are probably desync #[cfg(not(feature = "sandbox"))] - if now - > max(self.config.genesis_timestamp, self.launch_time) - .saturating_add(self.stats_desync_detection_timespan) - && !self - .final_block_stats - .iter() - .any(|(time, _, is_from_protocol)| { - time > &now.saturating_sub(self.stats_desync_detection_timespan) - && *is_from_protocol - }) { - warn!("desynchronization detected because the recent final block history is empty or contains only blocks produced by this node"); - let _ = self.channels.controller_event_tx.send(GraphEvent::NeedSync); + let now = MassaTime::now(self.config.clock_compensation_millis)?; + if now + > max(self.config.genesis_timestamp, self.launch_time) + .saturating_add(self.stats_desync_detection_timespan) + && !self + .final_block_stats + .iter() + .any(|(time, _, is_from_protocol)| { + time > &now.saturating_sub(self.stats_desync_detection_timespan) + && *is_from_protocol + }) + { + warn!("desynchronization detected because the recent final block history is empty or contains only blocks produced by this node"); + let _ = self.channels.controller_event_tx.send(GraphEvent::NeedSync); + } } // prune stats self.prune_stats()?; @@ -64,7 +63,7 @@ impl GraphState { } /// Remove old stats from graph storage - pub fn prune_stats(&mut self) -> GraphResult<()> { + pub fn prune_stats(&mut self) -> Result<(), GraphError> { let start_time = MassaTime::now(self.config.clock_compensation_millis)? .saturating_sub(self.stats_history_timespan); while let Some((t, _, _)) = self.final_block_stats.front() { diff --git a/massa-graph-2-worker/src/state/tick.rs b/massa-graph-2-worker/src/state/tick.rs index b1ea02f9fcd..31a022390fe 100644 --- a/massa-graph-2-worker/src/state/tick.rs +++ b/massa-graph-2-worker/src/state/tick.rs @@ -1,7 +1,6 @@ use std::collections::BTreeSet; -use massa_graph::error::GraphResult; -use massa_graph_2_exports::block_status::BlockStatus; +use massa_graph_2_exports::{block_status::BlockStatus, error::GraphError}; use massa_logging::massa_trace; use massa_models::{block::BlockId, slot::Slot}; @@ -15,7 +14,7 @@ impl GraphState { /// /// # Returns: /// Error if the process of a block returned an error. - pub fn slot_tick(&mut self, current_slot: Slot) -> GraphResult<()> { + pub fn slot_tick(&mut self, current_slot: Slot) -> Result<(), GraphError> { massa_trace!("consensus.consensus_worker.slot_tick", { "slot": current_slot }); diff --git a/massa-graph-2-worker/src/worker/init.rs b/massa-graph-2-worker/src/worker/init.rs index 152e643547f..6ae10f6757d 100644 --- a/massa-graph-2-worker/src/worker/init.rs +++ b/massa-graph-2-worker/src/worker/init.rs @@ -1,7 +1,5 @@ use massa_graph_2_exports::{ - block_status::BlockStatus, - bootstrapable_graph::BootstrapableGraph, - error::{GraphError, GraphResult}, + block_status::BlockStatus, bootstrapable_graph::BootstrapableGraph, error::GraphError, GraphConfig, }; use massa_hash::Hash; @@ -35,7 +33,10 @@ use super::GraphWorker; /// /// # Returns /// A genesis block -pub fn create_genesis_block(cfg: &GraphConfig, thread_number: u8) -> GraphResult { +pub fn create_genesis_block( + cfg: &GraphConfig, + thread_number: u8, +) -> Result { let keypair = &cfg.genesis_key; let header = BlockHeader::new_wrapped( BlockHeader { @@ -77,7 +78,7 @@ impl GraphWorker { shared_state: Arc>, init_graph: Option, storage: Storage, - ) -> GraphResult { + ) -> Result { let now = MassaTime::now(config.clock_compensation_millis) .expect("Couldn't init timer consensus"); let previous_slot = get_latest_block_slot_at_timestamp( @@ -207,7 +208,7 @@ impl GraphWorker { }, )) }) - .collect::>()?; + .collect::>()?; write_shared_state.final_block_stats = final_block_stats; } @@ -264,7 +265,7 @@ impl GraphWorker { } /// Internal function used at initialization of the `GraphWorker` to link blocks with their parents - fn claim_parent_refs(&mut self) -> GraphResult<()> { + fn claim_parent_refs(&mut self) -> Result<(), GraphError> { let mut write_shared_state = self.shared_state.write(); for (_b_id, block_status) in write_shared_state.block_statuses.iter_mut() { if let BlockStatus::Active { diff --git a/massa-graph-2-worker/src/worker/main_loop.rs b/massa-graph-2-worker/src/worker/main_loop.rs index d1ff46acfe1..1b28374aa37 100644 --- a/massa-graph-2-worker/src/worker/main_loop.rs +++ b/massa-graph-2-worker/src/worker/main_loop.rs @@ -1,6 +1,6 @@ use std::{sync::mpsc, time::Instant}; -use massa_graph_2_exports::error::GraphResult; +use massa_graph_2_exports::error::GraphError; use massa_models::{ slot::Slot, timeslots::{get_block_slot_timestamp, get_closest_slot_to_timestamp}, @@ -26,7 +26,7 @@ impl GraphWorker { /// /// # Returns: /// An error if the command failed - fn manage_command(&mut self, command: GraphCommand) -> GraphResult<()> { + fn manage_command(&mut self, command: GraphCommand) -> Result<(), GraphError> { let mut write_shared_state = self.shared_state.write(); match command { GraphCommand::RegisterBlockHeader(block_id, header) => { diff --git a/massa-node/src/main.rs b/massa-node/src/main.rs index 848e4843953..a62da570169 100644 --- a/massa-node/src/main.rs +++ b/massa-node/src/main.rs @@ -359,53 +359,13 @@ async fn launch( protocol_config, network_command_sender.clone(), network_event_receiver, + graph_controller.clone(), pool_controller.clone(), shared_storage.clone(), ) .await .expect("could not start protocol controller"); - // // init consensus configuration - // let consensus_config = ConsensusConfig { - // genesis_timestamp: *GENESIS_TIMESTAMP, - // end_timestamp: *END_TIMESTAMP, - // thread_count: THREAD_COUNT, - // t0: T0, - // genesis_key: GENESIS_KEY.clone(), - // max_discarded_blocks: SETTINGS.consensus.max_discarded_blocks, - // future_block_processing_max_periods: SETTINGS.consensus.future_block_processing_max_periods, - // max_future_processing_blocks: SETTINGS.consensus.max_future_processing_blocks, - // max_dependency_blocks: SETTINGS.consensus.max_dependency_blocks, - // delta_f0: DELTA_F0, - // operation_validity_periods: OPERATION_VALIDITY_PERIODS, - // periods_per_cycle: PERIODS_PER_CYCLE, - // stats_timespan: SETTINGS.consensus.stats_timespan, - // max_send_wait: SETTINGS.consensus.max_send_wait, - // force_keep_final_periods: SETTINGS.consensus.force_keep_final_periods, - // endorsement_count: ENDORSEMENT_COUNT, - // block_db_prune_interval: SETTINGS.consensus.block_db_prune_interval, - // max_item_return_count: SETTINGS.consensus.max_item_return_count, - // max_gas_per_block: MAX_GAS_PER_BLOCK, - // channel_size: CHANNEL_SIZE, - // }; - // // launch consensus controller - // let (consensus_command_sender, consensus_event_receiver, consensus_manager) = - // start_consensus_controller( - // consensus_config.clone(), - // ConsensusChannels { - // execution_controller: execution_controller.clone(), - // protocol_command_sender: protocol_command_sender.clone(), - // protocol_event_receiver, - // pool_command_sender: pool_controller.clone(), - // selector_controller: selector_controller.clone(), - // }, - // bootstrap_state.graph, - // shared_storage.clone(), - // bootstrap_state.compensation_millis, - // ) - // .await - // .expect("could not start consensus controller"); - let graph_config = GraphConfig { genesis_timestamp: *GENESIS_TIMESTAMP, end_timestamp: *END_TIMESTAMP, From a986ad035a3b936a9df44b0944913e19e6aa874e Mon Sep 17 00:00:00 2001 From: AurelienFT Date: Wed, 19 Oct 2022 18:37:02 +0200 Subject: [PATCH 25/40] Add mock graph. --- Cargo.lock | 1 + massa-factory-worker/Cargo.toml | 1 + massa-factory-worker/src/tests/tools.rs | 14 +- massa-graph-2-exports/Cargo.toml | 5 +- massa-graph-2-exports/src/lib.rs | 5 + .../src/test_exports/config.rs | 32 +++ .../src/test_exports/mock.rs | 217 ++++++++++++++++++ massa-graph-2-exports/src/test_exports/mod.rs | 7 + massa-node/src/main.rs | 93 ++++---- .../src/protocol_controller.rs | 6 +- .../src/test_exports/mock.rs | 36 +-- massa-protocol-worker/src/protocol_worker.rs | 14 +- massa-protocol-worker/src/tests/tools.rs | 5 +- 13 files changed, 333 insertions(+), 103 deletions(-) create mode 100644 massa-graph-2-exports/src/test_exports/config.rs create mode 100644 massa-graph-2-exports/src/test_exports/mock.rs create mode 100644 massa-graph-2-exports/src/test_exports/mod.rs diff --git a/Cargo.lock b/Cargo.lock index 756647dd29e..23804cd86f8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1838,6 +1838,7 @@ version = "0.1.0" dependencies = [ "anyhow", "massa_factory_exports", + "massa_graph_2_exports", "massa_hash", "massa_models", "massa_pool_exports", diff --git a/massa-factory-worker/Cargo.toml b/massa-factory-worker/Cargo.toml index ff29c1011fb..eb793992fa6 100644 --- a/massa-factory-worker/Cargo.toml +++ b/massa-factory-worker/Cargo.toml @@ -27,6 +27,7 @@ massa_pool_exports = { path = "../massa-pool-exports" } [dev-dependencies] serial_test = "0.9" massa_protocol_exports = { path = "../massa-protocol-exports", features=["testing"] } +massa_graph_2_exports = { path = "../massa-graph-2-exports", features = ["testing"] } massa_factory_exports = { path = "../massa-factory-exports", features=["testing"] } massa_wallet = { path = "../massa-wallet", features=["testing"] } massa_pos_exports = { path = "../massa-pos-exports", features=["testing"] } diff --git a/massa-factory-worker/src/tests/tools.rs b/massa-factory-worker/src/tests/tools.rs index 4e1c0cf82c3..9cf4d19ccdd 100644 --- a/massa-factory-worker/src/tests/tools.rs +++ b/massa-factory-worker/src/tests/tools.rs @@ -1,3 +1,4 @@ +use massa_graph_2_exports::test_exports::MockGraphController; use parking_lot::RwLock; use std::{ sync::{mpsc::Receiver, Arc}, @@ -5,7 +6,6 @@ use std::{ time::Duration, }; -use massa_consensus_exports::{commands::ConsensusCommand, test_exports::MockConsensusController}; use massa_factory_exports::{ test_exports::create_empty_block, FactoryChannels, FactoryConfig, FactoryManager, }; @@ -34,7 +34,7 @@ use massa_wallet::test_exports::create_test_wallet; /// You can use the method `new` to build all the mocks and make the connections /// Then you can use the method `get_next_created_block` that will manage the answers from the mock to the factory depending on the parameters you gave. pub struct TestFactory { - consensus_controller: MockConsensusController, + graph_controller: MockGraphController, pool_receiver: PoolEventReceiver, selector_receiver: Receiver, factory_config: FactoryConfig, @@ -53,13 +53,11 @@ impl TestFactory { /// - `TestFactory`: the structure that will be used to manage the tests pub fn new(default_keypair: &KeyPair) -> TestFactory { let (selector_controller, selector_receiver) = MockSelectorController::new_with_receiver(); - let (consensus_controller, consensus_command_sender, _consensus_event_receiver) = - MockConsensusController::new_with_receiver(); + let (graph_controller, consensus_command_sender) = MockGraphController::new_with_receiver(); let (pool_controller, pool_receiver) = MockPoolController::new_with_receiver(); let mut storage = Storage::create_root(); let mut factory_config = FactoryConfig::default(); - let (_protocol_controller, protocol_command_sender, _protocol_event_receiver) = - MockProtocolController::new(); + let (_protocol_controller, protocol_command_sender) = MockProtocolController::new(); let producer_keypair = default_keypair; let producer_address = Address::from_public_key(&producer_keypair.get_public_key()); let mut accounts = PreHashMap::default(); @@ -82,7 +80,7 @@ impl TestFactory { Arc::new(RwLock::new(create_test_wallet(Some(accounts)))), FactoryChannels { selector: selector_controller.clone(), - consensus: consensus_command_sender, + graph: graph_controller, pool: pool_controller.clone(), protocol: protocol_command_sender, storage: storage.clone_without_refs(), @@ -90,7 +88,7 @@ impl TestFactory { ); TestFactory { - consensus_controller, + graph_controller, pool_receiver, selector_receiver, factory_config, diff --git a/massa-graph-2-exports/Cargo.toml b/massa-graph-2-exports/Cargo.toml index 087e465ac04..f7893f91945 100644 --- a/massa-graph-2-exports/Cargo.toml +++ b/massa-graph-2-exports/Cargo.toml @@ -23,4 +23,7 @@ massa_protocol_exports ={ path = "../massa-protocol-exports" } massa_storage = { path = "../massa-storage" } massa_serialization = { path = "../massa-serialization" } massa_time = { path = "../massa-time" } -massa_signature = { path = "../massa-signature" } \ No newline at end of file +massa_signature = { path = "../massa-signature" } + +[features] +testing = ["massa_models/testing", "massa_execution_exports/testing", "massa_pool_exports/testing", "massa_pos_exports/testing", "massa_protocol_exports/testing", "massa_storage/testing"] \ No newline at end of file diff --git a/massa-graph-2-exports/src/lib.rs b/massa-graph-2-exports/src/lib.rs index bc95375851c..38ca3838da6 100644 --- a/massa-graph-2-exports/src/lib.rs +++ b/massa-graph-2-exports/src/lib.rs @@ -15,3 +15,8 @@ pub mod export_active_block; pub use channels::GraphChannels; pub use controller_trait::{GraphController, GraphManager}; pub use settings::GraphConfig; + +/// Test utils +#[cfg(feature = "testing")] +/// Exports related to tests as Mocks and configurations +pub mod test_exports; diff --git a/massa-graph-2-exports/src/test_exports/config.rs b/massa-graph-2-exports/src/test_exports/config.rs new file mode 100644 index 00000000000..96082463088 --- /dev/null +++ b/massa-graph-2-exports/src/test_exports/config.rs @@ -0,0 +1,32 @@ +use massa_models::config::constants::{GENESIS_TIMESTAMP, T0, THREAD_COUNT, GENESIS_KEY, MAX_GAS_PER_BLOCK, DELTA_F0, OPERATION_VALIDITY_PERIODS, PERIODS_PER_CYCLE, ENDORSEMENT_COUNT, CHANNEL_SIZE}; +use massa_time::MassaTime; + +use crate::GraphConfig; + +impl Default for GraphConfig { + fn default() -> Self { + Self { + clock_compensation_millis: 0, + genesis_timestamp: *GENESIS_TIMESTAMP, + t0: T0, + thread_count: THREAD_COUNT, + genesis_key: GENESIS_KEY.clone(), + max_discarded_blocks: 10000, + future_block_processing_max_periods: 100, + max_future_processing_blocks: 100, + max_dependency_blocks: 2048, + max_send_wait: MassaTime::from_millis(100), + block_db_prune_interval: MassaTime::from_millis(5000), + max_item_return_count: 100, + max_gas_per_block: MAX_GAS_PER_BLOCK, + delta_f0: DELTA_F0, + operation_validity_periods: OPERATION_VALIDITY_PERIODS, + periods_per_cycle: PERIODS_PER_CYCLE, + force_keep_final_periods: 20, + endorsement_count: ENDORSEMENT_COUNT, + end_timestamp: None, + stats_timespan: MassaTime::from_millis(60000), + channel_size: CHANNEL_SIZE, + } + } +} diff --git a/massa-graph-2-exports/src/test_exports/mock.rs b/massa-graph-2-exports/src/test_exports/mock.rs new file mode 100644 index 00000000000..8d017767ab5 --- /dev/null +++ b/massa-graph-2-exports/src/test_exports/mock.rs @@ -0,0 +1,217 @@ +// Copyright (c) 2022 MASSA LABS + +use std::sync::{ + mpsc::{self, Receiver}, + Arc, Mutex, +}; + +use massa_models::{ + block::{BlockId, BlockHeader}, api::BlockGraphStatus, slot::Slot, clique::Clique, stats::ConsensusStats, wrapped::Wrapped, +}; +use massa_storage::Storage; +use massa_time::MassaTime; + +use crate::{GraphController, error::GraphError, block_graph_export::BlockGraphExport, bootstrapable_graph::BootstrapableGraph}; + +/// Test tool to mock graph controller responses +pub struct GraphEventReceiver(pub Receiver); + +/// List of possible messages you can receive from the mock +/// Each variant corresponds to a unique method in `GraphController`, +/// Some variants wait for a response on their `response_tx` field, if present. +/// See the documentation of `GraphController` for details on parameters and return values. +pub enum MockGraphControllerMessage { + GetBlockStatuses { + block_ids: Vec, + response_tx: mpsc::Sender>, + }, + GetBlockGraphStatuses { + start_slot: Option, + end_slot: Option, + response_tx: mpsc::Sender>, + }, + GetCliques { + response_tx: mpsc::Sender> + }, + GetBootstrapableGraph { + response_tx: mpsc::Sender> + }, + GetStats { + response_tx: mpsc::Sender> + }, + GetBestParents { + response_tx: mpsc::Sender> + }, + GetBlockcliqueBlockAtSlot { + slot: Slot, + response_tx: mpsc::Sender> + }, + GetLatestBlockcliqueBlockAtSlot { + slot: Slot, + response_tx: mpsc::Sender + }, + MarkInvalidBlock { + block_id: BlockId, + header: Wrapped + }, + RegisterBlock { + block_id: BlockId, + slot: Slot, + block_storage: Storage + }, + RegisterBlockHeader { + block_id: BlockId, + header: Wrapped + } +} + +/// A mocked graph controller that will intercept calls on its methods +/// and emit corresponding `MockGraphControllerMessage` messages through a MPSC in a thread-safe way. +/// For messages with a `response_tx` field, the mock will await a response through their `response_tx` channel +/// in order to simulate returning this value at the end of the call. +#[derive(Clone)] +pub struct MockGraphController(Arc>>); + +impl MockGraphController { + /// Create a new pair (mock graph controller, mpsc receiver for emitted messages) + /// Note that unbounded mpsc channels are used + pub fn new_with_receiver() -> (Box, GraphEventReceiver) { + let (tx, rx) = mpsc::channel(); + ( + Box::new(MockGraphController(Arc::new(Mutex::new(tx)))), + GraphEventReceiver(rx), + ) + } +} + +impl GraphEventReceiver { + /// wait command + pub fn wait_command(&mut self, timeout: MassaTime, filter_map: F) -> Option + where + F: Fn(MockGraphControllerMessage) -> Option, + { + match self.0.recv_timeout(timeout.into()) { + Ok(msg) => filter_map(msg), + Err(_) => None, + } + } +} + +/// Implements all the methods of the `GraphController` trait, +/// but simply make them emit a `MockGraphControllerMessage`. +/// If the message contains a `response_tx`, +/// a response from that channel is read and returned as return value. +/// See the documentation of `GraphController` for details on each function. +impl GraphController for MockGraphController { + fn get_block_graph_status( + &self, + start_slot: Option, + end_slot: Option, + ) -> Result { + let (response_tx, response_rx) = mpsc::channel(); + self.0 + .lock() + .unwrap() + .send(MockGraphControllerMessage::GetBlockGraphStatuses { start_slot, end_slot, response_tx }) + .unwrap(); + response_rx.recv().unwrap() + } + + fn get_block_statuses(&self, ids: &Vec) -> Vec { + let (response_tx, response_rx) = mpsc::channel(); + self.0 + .lock() + .unwrap() + .send(MockGraphControllerMessage::GetBlockStatuses { block_ids: ids.clone(), response_tx }) + .unwrap(); + response_rx.recv().unwrap() + } + + fn get_cliques(&self) -> Vec { + let (response_tx, response_rx) = mpsc::channel(); + self.0 + .lock() + .unwrap() + .send(MockGraphControllerMessage::GetCliques { response_tx }) + .unwrap(); + response_rx.recv().unwrap() + } + + fn get_bootstrap_graph(&self) -> Result { + let (response_tx, response_rx) = mpsc::channel(); + self.0 + .lock() + .unwrap() + .send(MockGraphControllerMessage::GetBootstrapableGraph { response_tx }) + .unwrap(); + response_rx.recv().unwrap() + } + + fn get_stats(&self) -> Result { + let (response_tx, response_rx) = mpsc::channel(); + self.0 + .lock() + .unwrap() + .send(MockGraphControllerMessage::GetStats { response_tx }) + .unwrap(); + response_rx.recv().unwrap() + } + + fn get_best_parents(&self) -> Vec<(BlockId, u64)> { + let (response_tx, response_rx) = mpsc::channel(); + self.0 + .lock() + .unwrap() + .send(MockGraphControllerMessage::GetBestParents { response_tx }) + .unwrap(); + response_rx.recv().unwrap() + } + + fn get_blockclique_block_at_slot(&self, slot: Slot) -> Option { + let (response_tx, response_rx) = mpsc::channel(); + self.0 + .lock() + .unwrap() + .send(MockGraphControllerMessage::GetBlockcliqueBlockAtSlot { slot, response_tx }) + .unwrap(); + response_rx.recv().unwrap() + } + + fn get_latest_blockclique_block_at_slot(&self, slot: Slot) -> BlockId { + let (response_tx, response_rx) = mpsc::channel(); + self.0 + .lock() + .unwrap() + .send(MockGraphControllerMessage::GetLatestBlockcliqueBlockAtSlot { slot, response_tx }) + .unwrap(); + response_rx.recv().unwrap() + } + + fn mark_invalid_block(&self, block_id: BlockId, header: Wrapped) { + self.0 + .lock() + .unwrap() + .send(MockGraphControllerMessage::MarkInvalidBlock { block_id, header }) + .unwrap(); + } + + fn register_block(&self, block_id: BlockId, slot: Slot, block_storage: Storage) { + self.0 + .lock() + .unwrap() + .send(MockGraphControllerMessage::RegisterBlock { block_id, slot, block_storage }) + .unwrap(); + } + + fn register_block_header(&self, block_id: BlockId, header: Wrapped) { + self.0 + .lock() + .unwrap() + .send(MockGraphControllerMessage::RegisterBlockHeader{ block_id, header }) + .unwrap(); + } + + fn clone_box(&self) -> Box { + Box::new(self.clone()) + } +} diff --git a/massa-graph-2-exports/src/test_exports/mod.rs b/massa-graph-2-exports/src/test_exports/mod.rs new file mode 100644 index 00000000000..aeddfb526b0 --- /dev/null +++ b/massa-graph-2-exports/src/test_exports/mod.rs @@ -0,0 +1,7 @@ +// Copyright (c) 2022 MASSA LABS + +mod config; +mod mock; + +pub use config::*; +pub use mock::*; diff --git a/massa-node/src/main.rs b/massa-node/src/main.rs index a62da570169..9531c57b4b1 100644 --- a/massa-node/src/main.rs +++ b/massa-node/src/main.rs @@ -48,7 +48,7 @@ use massa_pool_exports::{PoolConfig, PoolManager}; use massa_pool_worker::start_pool_controller; use massa_pos_exports::{SelectorConfig, SelectorManager}; use massa_pos_worker::start_selector_worker; -use massa_protocol_exports::{ProtocolConfig, ProtocolManager}; +use massa_protocol_exports::{ProtocolCommand, ProtocolConfig, ProtocolManager, ProtocolCommandSender}; use massa_protocol_worker::start_protocol_controller; use massa_storage::Storage; use massa_time::MassaTime; @@ -324,6 +324,49 @@ async fn launch( let (pool_manager, pool_controller) = start_pool_controller(pool_config, &shared_storage, execution_controller.clone()); + let (protocol_command_sender, protocol_command_receiver) = + mpsc::channel::(PROTOCOL_CONTROLLER_CHANNEL_SIZE); + + let graph_config = GraphConfig { + genesis_timestamp: *GENESIS_TIMESTAMP, + end_timestamp: *END_TIMESTAMP, + thread_count: THREAD_COUNT, + t0: T0, + genesis_key: GENESIS_KEY.clone(), + max_discarded_blocks: SETTINGS.consensus.max_discarded_blocks, + future_block_processing_max_periods: SETTINGS.consensus.future_block_processing_max_periods, + max_future_processing_blocks: SETTINGS.consensus.max_future_processing_blocks, + max_dependency_blocks: SETTINGS.consensus.max_dependency_blocks, + delta_f0: DELTA_F0, + operation_validity_periods: OPERATION_VALIDITY_PERIODS, + periods_per_cycle: PERIODS_PER_CYCLE, + stats_timespan: SETTINGS.consensus.stats_timespan, + max_send_wait: SETTINGS.consensus.max_send_wait, + force_keep_final_periods: SETTINGS.consensus.force_keep_final_periods, + endorsement_count: ENDORSEMENT_COUNT, + block_db_prune_interval: SETTINGS.consensus.block_db_prune_interval, + max_item_return_count: SETTINGS.consensus.max_item_return_count, + max_gas_per_block: MAX_GAS_PER_BLOCK, + channel_size: CHANNEL_SIZE, + clock_compensation_millis: bootstrap_state.compensation_millis, + }; + + let (graph_event_sender, graph_event_receiver) = crossbeam_channel::bounded(CHANNEL_SIZE); + let graph_channels = GraphChannels { + execution_controller: execution_controller.clone(), + selector_controller: selector_controller.clone(), + pool_command_sender: pool_controller.clone(), + controller_event_tx: graph_event_sender, + protocol_command_sender: ProtocolCommandSender(protocol_command_sender.clone()), + }; + + let (graph_controller, graph_manager) = start_graph_worker( + graph_config, + graph_channels, + bootstrap_state.graph, + shared_storage.clone(), + ); + // launch protocol controller let protocol_config = ProtocolConfig { thread_count: THREAD_COUNT, @@ -355,10 +398,12 @@ async fn launch( max_operations_propagation_time: SETTINGS.protocol.max_operations_propagation_time, max_endorsements_propagation_time: SETTINGS.protocol.max_endorsements_propagation_time, }; - let (protocol_command_sender, protocol_manager) = start_protocol_controller( + + let protocol_manager = start_protocol_controller( protocol_config, network_command_sender.clone(), network_event_receiver, + protocol_command_receiver, graph_controller.clone(), pool_controller.clone(), shared_storage.clone(), @@ -366,46 +411,6 @@ async fn launch( .await .expect("could not start protocol controller"); - let graph_config = GraphConfig { - genesis_timestamp: *GENESIS_TIMESTAMP, - end_timestamp: *END_TIMESTAMP, - thread_count: THREAD_COUNT, - t0: T0, - genesis_key: GENESIS_KEY.clone(), - max_discarded_blocks: SETTINGS.consensus.max_discarded_blocks, - future_block_processing_max_periods: SETTINGS.consensus.future_block_processing_max_periods, - max_future_processing_blocks: SETTINGS.consensus.max_future_processing_blocks, - max_dependency_blocks: SETTINGS.consensus.max_dependency_blocks, - delta_f0: DELTA_F0, - operation_validity_periods: OPERATION_VALIDITY_PERIODS, - periods_per_cycle: PERIODS_PER_CYCLE, - stats_timespan: SETTINGS.consensus.stats_timespan, - max_send_wait: SETTINGS.consensus.max_send_wait, - force_keep_final_periods: SETTINGS.consensus.force_keep_final_periods, - endorsement_count: ENDORSEMENT_COUNT, - block_db_prune_interval: SETTINGS.consensus.block_db_prune_interval, - max_item_return_count: SETTINGS.consensus.max_item_return_count, - max_gas_per_block: MAX_GAS_PER_BLOCK, - channel_size: CHANNEL_SIZE, - clock_compensation_millis: bootstrap_state.compensation_millis, - }; - - let (graph_event_sender, graph_event_receiver) = crossbeam_channel::bounded(CHANNEL_SIZE); - let graph_channels = GraphChannels { - execution_controller: execution_controller.clone(), - selector_controller: selector_controller.clone(), - pool_command_sender: pool_controller.clone(), - controller_event_tx: graph_event_sender, - protocol_command_sender: protocol_command_sender.clone(), - }; - - let (graph_controller, graph_manager) = start_graph_worker( - graph_config, - graph_channels, - bootstrap_state.graph, - shared_storage.clone(), - ); - // launch factory let factory_config = FactoryConfig { thread_count: THREAD_COUNT, @@ -420,7 +425,7 @@ async fn launch( selector: selector_controller.clone(), graph: graph_controller.clone(), pool: pool_controller.clone(), - protocol: protocol_command_sender.clone(), + protocol: ProtocolCommandSender(protocol_command_sender.clone()), storage: shared_storage.clone(), }; let factory_manager = start_factory(factory_config, node_wallet.clone(), factory_channels); @@ -471,7 +476,7 @@ async fn launch( api_config, selector_controller.clone(), pool_controller.clone(), - protocol_command_sender.clone(), + ProtocolCommandSender(protocol_command_sender.clone()), network_config, *VERSION, network_command_sender.clone(), diff --git a/massa-protocol-exports/src/protocol_controller.rs b/massa-protocol-exports/src/protocol_controller.rs index c0ff2d28c9d..4f03f75de56 100644 --- a/massa-protocol-exports/src/protocol_controller.rs +++ b/massa-protocol-exports/src/protocol_controller.rs @@ -152,14 +152,10 @@ impl ProtocolManager { /// Stop the protocol controller pub async fn stop( - self, - //TODO: FIX - //protocol_event_receiver: ProtocolEventReceiver, - //protocol_pool_event_receiver: ProtocolPoolEventReceiver, + self ) -> Result { info!("stopping protocol controller..."); drop(self.manager_tx); - //let _remaining_events = protocol_event_receiver.drain().await; let network_event_receiver = self.join_handle.await??; info!("protocol controller stopped"); Ok(network_event_receiver) diff --git a/massa-protocol-exports/src/test_exports/mock.rs b/massa-protocol-exports/src/test_exports/mock.rs index 5639ab80d92..557e1091251 100644 --- a/massa-protocol-exports/src/test_exports/mock.rs +++ b/massa-protocol-exports/src/test_exports/mock.rs @@ -1,14 +1,10 @@ // Copyright (c) 2022 MASSA LABS use crate::{ - protocol_controller::ProtocolEventReceiver, ProtocolCommand, ProtocolCommandSender, - ProtocolEvent, -}; + ProtocolCommand, ProtocolCommandSender}; use massa_models::{ - block::{BlockId, WrappedHeader}, - slot::Slot, + block::BlockId }; -use massa_storage::Storage; use massa_time::MassaTime; use tokio::{sync::mpsc, time::sleep}; @@ -16,22 +12,17 @@ use tokio::{sync::mpsc, time::sleep}; /// TODO: Improve doc pub struct MockProtocolController { protocol_command_rx: mpsc::Receiver, - protocol_event_tx: mpsc::Sender, } impl MockProtocolController { /// Creates a new protocol mock - /// TODO: Improve doc - pub fn new() -> (Self, ProtocolCommandSender, ProtocolEventReceiver) { + pub fn new() -> (Self, ProtocolCommandSender) { let (protocol_command_tx, protocol_command_rx) = mpsc::channel::(256); - let (protocol_event_tx, protocol_event_rx) = mpsc::channel::(256); ( MockProtocolController { - protocol_event_tx, protocol_command_rx, }, ProtocolCommandSender(protocol_command_tx), - ProtocolEventReceiver(protocol_event_rx), ) } @@ -53,27 +44,6 @@ impl MockProtocolController { } } - /// Note: if you care about the operation set, use another method. - pub async fn receive_block(&mut self, block_id: BlockId, slot: Slot, storage: Storage) { - self.protocol_event_tx - .send(ProtocolEvent::ReceivedBlock { - block_id, - slot, - storage, - }) - .await - .expect("could not send protocol event"); - } - - /// Send a receive header to the protocol event channel - pub async fn receive_header(&mut self, header: WrappedHeader) { - let block_id = header.id; - self.protocol_event_tx - .send(ProtocolEvent::ReceivedBlockHeader { block_id, header }) - .await - .expect("could not send protocol event"); - } - /// Not implemented pub async fn receive_get_active_blocks(&mut self, _list: Vec) {} diff --git a/massa-protocol-worker/src/protocol_worker.rs b/massa-protocol-worker/src/protocol_worker.rs index 9c1cc380c46..398cf70e995 100644 --- a/massa-protocol-worker/src/protocol_worker.rs +++ b/massa-protocol-worker/src/protocol_worker.rs @@ -21,7 +21,7 @@ use massa_models::{ use massa_network_exports::{AskForBlocksInfo, NetworkCommandSender, NetworkEventReceiver}; use massa_pool_exports::PoolController; use massa_protocol_exports::{ - ProtocolCommand, ProtocolCommandSender, ProtocolConfig, ProtocolError, + ProtocolCommand, ProtocolConfig, ProtocolError, ProtocolManagementCommand, ProtocolManager, }; @@ -51,15 +51,14 @@ pub async fn start_protocol_controller( config: ProtocolConfig, network_command_sender: NetworkCommandSender, network_event_receiver: NetworkEventReceiver, + protocol_command_receiver: mpsc::Receiver, graph_controller: Box, pool_controller: Box, storage: Storage, -) -> Result<(ProtocolCommandSender, ProtocolManager), ProtocolError> { +) -> Result { debug!("starting protocol controller"); // launch worker - let (command_tx, controller_command_rx) = - mpsc::channel::(config.controller_channel_size); let (manager_tx, controller_manager_rx) = mpsc::channel::(1); let pool_controller = pool_controller.clone(); let join_handle = tokio::spawn(async move { @@ -68,7 +67,7 @@ pub async fn start_protocol_controller( ProtocolWorkerChannels { network_command_sender, network_event_receiver, - controller_command_rx, + controller_command_rx: protocol_command_receiver, controller_manager_rx, }, graph_controller, @@ -89,10 +88,7 @@ pub async fn start_protocol_controller( } }); debug!("protocol controller ready"); - Ok(( - ProtocolCommandSender(command_tx), - ProtocolManager::new(join_handle, manager_tx), - )) + Ok(ProtocolManager::new(join_handle, manager_tx)) } /// Info about a block we've seen diff --git a/massa-protocol-worker/src/tests/tools.rs b/massa-protocol-worker/src/tests/tools.rs index 3ef0904ecba..d6efa0b278e 100644 --- a/massa-protocol-worker/src/tests/tools.rs +++ b/massa-protocol-worker/src/tests/tools.rs @@ -32,9 +32,8 @@ where let (pool_controller, pool_event_receiver) = MockPoolController::new_with_receiver(); // start protocol controller - let (protocol_command_sender, protocol_event_receiver, protocol_manager): ( + let (protocol_command_sender, protocol_manager): ( ProtocolCommandSender, - ProtocolEventReceiver, ProtocolManager, ) = start_protocol_controller( *protocol_config, @@ -62,7 +61,7 @@ where .await; protocol_manager - .stop(protocol_event_receiver) + .stop() .await .expect("Failed to shutdown protocol."); } From 82a3b51444d98870bcced65eff19c0e8e492ff67 Mon Sep 17 00:00:00 2001 From: AurelienFT Date: Fri, 21 Oct 2022 12:11:51 +0200 Subject: [PATCH 26/40] Fix test compilation. --- massa-bootstrap/src/tests/scenarios.rs | 32 ++-- massa-bootstrap/src/tests/tools.rs | 31 +--- massa-factory-worker/src/tests/tools.rs | 60 +++---- .../src/test_exports/config.rs | 5 +- .../src/test_exports/mock.rs | 87 +++++---- massa-graph-2-worker/src/state/stats.rs | 3 +- massa-node/src/main.rs | 4 +- .../src/protocol_controller.rs | 4 +- .../src/test_exports/mock.rs | 7 +- massa-protocol-exports/src/tests/tools.rs | 58 ------ massa-protocol-worker/src/protocol_worker.rs | 3 +- .../src/tests/ask_block_scenarios.rs | 107 ++++++----- .../src/tests/ban_nodes_scenarios.rs | 167 +++++++++--------- .../src/tests/endorsements_scenarios.rs | 46 ++--- .../tests/in_block_operations_scenarios.rs | 19 +- .../src/tests/operations_scenarios.rs | 76 ++++---- massa-protocol-worker/src/tests/scenarios.rs | 96 +++++----- massa-protocol-worker/src/tests/tools.rs | 129 +++++++++++--- 18 files changed, 483 insertions(+), 451 deletions(-) diff --git a/massa-bootstrap/src/tests/scenarios.rs b/massa-bootstrap/src/tests/scenarios.rs index 23834bbf4b0..d79c8893339 100644 --- a/massa-bootstrap/src/tests/scenarios.rs +++ b/massa-bootstrap/src/tests/scenarios.rs @@ -4,7 +4,7 @@ use super::{ mock_establisher, tools::{ bridge_mock_streams, get_boot_state, get_peers, get_random_final_state_bootstrap, - get_random_ledger_changes, wait_consensus_command, wait_network_command, + get_random_ledger_changes, wait_network_command, }, }; use crate::tests::tools::{ @@ -15,8 +15,8 @@ use crate::{ get_state, start_bootstrap_server, tests::tools::{assert_eq_bootstrap_graph, get_bootstrap_config}, }; -use massa_consensus_exports::{commands::ConsensusCommand, ConsensusCommandSender}; use massa_final_state::{test_exports::assert_eq_final_state, FinalState, StateChanges}; +use massa_graph_2_exports::test_exports::{MockGraphController, MockGraphControllerMessage}; use massa_models::{address::Address, slot::Slot, version::Version}; use massa_network_exports::{NetworkCommand, NetworkCommandSender}; use massa_pos_exports::{test_exports::assert_eq_pos_selection, PoSFinalState, SelectorConfig}; @@ -59,7 +59,7 @@ async fn test_bootstrap_server() { }) .expect("could not start client selector controller"); - let (consensus_cmd_tx, mut consensus_cmd_rx) = mpsc::channel::(5); + let (graph_controller, mut graph_event_receiver) = MockGraphController::new_with_receiver(); let (network_cmd_tx, mut network_cmd_rx) = mpsc::channel::(5); let final_state_bootstrap = get_random_final_state_bootstrap( PoSFinalState::new( @@ -75,7 +75,7 @@ async fn test_bootstrap_server() { let (bootstrap_establisher, bootstrap_interface) = mock_establisher::new(); let bootstrap_manager = start_bootstrap_server( - ConsensusCommandSender(consensus_cmd_tx), + graph_controller, NetworkCommandSender(network_cmd_tx), final_state.clone(), bootstrap_config.clone(), @@ -168,18 +168,18 @@ async fn test_bootstrap_server() { // wait for bootstrap to ask consensus for bootstrap graph, send it let wait_graph = async move || { let response = - match wait_consensus_command(&mut consensus_cmd_rx, 1000.into(), |cmd| match cmd { - ConsensusCommand::GetBootstrapState(resp) => Some(resp), - _ => None, - }) - .await - { - Some(resp) => resp, - None => panic!("timeout waiting for get boot graph consensus command"), - }; - let sent_graph = get_boot_state(); - response.send(Box::new(sent_graph.clone())).await.unwrap(); - sent_graph + graph_event_receiver.wait_command(MassaTime::from_millis(1000), |cmd| match cmd { + MockGraphControllerMessage::GetBootstrapableGraph { response_tx } => { + let sent_graph = get_boot_state(); + response_tx.send(Ok(sent_graph.clone())).unwrap(); + Some(sent_graph) + } + _ => panic!("timeout waiting for get boot graph consensus command"), + }); + match response { + Some(graph) => graph, + None => panic!("error waiting for get boot graph consensus command"), + } }; // launch the modifier thread diff --git a/massa-bootstrap/src/tests/tools.rs b/massa-bootstrap/src/tests/tools.rs index 342737da7ee..39210f5d014 100644 --- a/massa-bootstrap/src/tests/tools.rs +++ b/massa-bootstrap/src/tests/tools.rs @@ -5,12 +5,14 @@ use crate::settings::BootstrapConfig; use bitvec::vec::BitVec; use massa_async_pool::test_exports::{create_async_pool, get_random_message}; use massa_async_pool::{AsyncPoolChanges, Change}; -use massa_consensus_exports::commands::ConsensusCommand; use massa_final_state::test_exports::create_final_state; use massa_final_state::{ExecutedOps, FinalState}; -use massa_graph::export_active_block::ExportActiveBlockSerializer; -use massa_graph::{export_active_block::ExportActiveBlock, BootstrapableGraph}; -use massa_graph::{BootstrapableGraphDeserializer, BootstrapableGraphSerializer}; +use massa_graph_2_exports::{ + bootstrapable_graph::{ + BootstrapableGraph, BootstrapableGraphDeserializer, BootstrapableGraphSerializer, + }, + export_active_block::{ExportActiveBlock, ExportActiveBlockSerializer}, +}; use massa_hash::Hash; use massa_ledger_exports::{LedgerChanges, LedgerEntry, SetUpdateOrDelete}; use massa_ledger_worker::test_exports::create_final_ledger; @@ -294,27 +296,6 @@ pub fn get_bootstrap_config(bootstrap_public_key: PublicKey) -> BootstrapConfig } } -pub async fn wait_consensus_command( - consensus_command_receiver: &mut Receiver, - timeout: MassaTime, - filter_map: F, -) -> Option -where - F: Fn(ConsensusCommand) -> Option, -{ - let timer = sleep(timeout.into()); - tokio::pin!(timer); - loop { - tokio::select! { - cmd = consensus_command_receiver.recv() => match cmd { - Some(orig_evt) => if let Some(res_evt) = filter_map(orig_evt) { return Some(res_evt); }, - _ => panic!("network event channel died") - }, - _ = &mut timer => return None - } - } -} - pub async fn wait_network_command( network_command_receiver: &mut Receiver, timeout: MassaTime, diff --git a/massa-factory-worker/src/tests/tools.rs b/massa-factory-worker/src/tests/tools.rs index 9cf4d19ccdd..0bebac3eb91 100644 --- a/massa-factory-worker/src/tests/tools.rs +++ b/massa-factory-worker/src/tests/tools.rs @@ -1,4 +1,6 @@ -use massa_graph_2_exports::test_exports::MockGraphController; +use massa_graph_2_exports::test_exports::{ + GraphEventReceiver, MockGraphController, MockGraphControllerMessage, +}; use parking_lot::RwLock; use std::{ sync::{mpsc::Receiver, Arc}, @@ -34,7 +36,7 @@ use massa_wallet::test_exports::create_test_wallet; /// You can use the method `new` to build all the mocks and make the connections /// Then you can use the method `get_next_created_block` that will manage the answers from the mock to the factory depending on the parameters you gave. pub struct TestFactory { - graph_controller: MockGraphController, + graph_event_receiver: GraphEventReceiver, pool_receiver: PoolEventReceiver, selector_receiver: Receiver, factory_config: FactoryConfig, @@ -53,7 +55,7 @@ impl TestFactory { /// - `TestFactory`: the structure that will be used to manage the tests pub fn new(default_keypair: &KeyPair) -> TestFactory { let (selector_controller, selector_receiver) = MockSelectorController::new_with_receiver(); - let (graph_controller, consensus_command_sender) = MockGraphController::new_with_receiver(); + let (graph_controller, graph_event_receiver) = MockGraphController::new_with_receiver(); let (pool_controller, pool_receiver) = MockPoolController::new_with_receiver(); let mut storage = Storage::create_root(); let mut factory_config = FactoryConfig::default(); @@ -88,7 +90,7 @@ impl TestFactory { ); TestFactory { - graph_controller, + graph_event_receiver, pool_receiver, selector_receiver, factory_config, @@ -148,17 +150,16 @@ impl TestFactory { _ => panic!("unexpected message"), } } - match self - .consensus_controller - .consensus_command_rx - .blocking_recv() - .unwrap() - { - ConsensusCommand::GetBestParents { response_tx } => { - response_tx.send(self.genesis_blocks.clone()).unwrap(); - } - _ => panic!("unexpected message"), - } + self.graph_event_receiver + .wait_command(MassaTime::from_millis(100), |command| { + if let MockGraphControllerMessage::GetBestParents { response_tx } = command { + response_tx.send(self.genesis_blocks.clone()).unwrap(); + Some(()) + } else { + None + } + }) + .unwrap(); self.pool_receiver .wait_command(MassaTime::from_millis(100), |command| match command { MockPoolControllerMessage::GetBlockEndorsements { @@ -201,23 +202,20 @@ impl TestFactory { _ => panic!("unexpected message"), }) .unwrap(); - match self - .consensus_controller - .consensus_command_rx - .blocking_recv() + self.graph_event_receiver + .wait_command(MassaTime::from_millis(100), |command| { + if let MockGraphControllerMessage::RegisterBlock { + block_id, + block_storage, + slot: _, + } = command + { + Some((block_id, block_storage)) + } else { + None + } + }) .unwrap() - { - ConsensusCommand::SendBlock { - block_id, - block_storage, - slot: _, - response_tx, - } => { - response_tx.send(()).unwrap(); - (block_id, block_storage) - } - _ => panic!("unexpected message"), - } } } diff --git a/massa-graph-2-exports/src/test_exports/config.rs b/massa-graph-2-exports/src/test_exports/config.rs index 96082463088..41bfac061e2 100644 --- a/massa-graph-2-exports/src/test_exports/config.rs +++ b/massa-graph-2-exports/src/test_exports/config.rs @@ -1,4 +1,7 @@ -use massa_models::config::constants::{GENESIS_TIMESTAMP, T0, THREAD_COUNT, GENESIS_KEY, MAX_GAS_PER_BLOCK, DELTA_F0, OPERATION_VALIDITY_PERIODS, PERIODS_PER_CYCLE, ENDORSEMENT_COUNT, CHANNEL_SIZE}; +use massa_models::config::constants::{ + CHANNEL_SIZE, DELTA_F0, ENDORSEMENT_COUNT, GENESIS_KEY, GENESIS_TIMESTAMP, MAX_GAS_PER_BLOCK, + OPERATION_VALIDITY_PERIODS, PERIODS_PER_CYCLE, T0, THREAD_COUNT, +}; use massa_time::MassaTime; use crate::GraphConfig; diff --git a/massa-graph-2-exports/src/test_exports/mock.rs b/massa-graph-2-exports/src/test_exports/mock.rs index 8d017767ab5..27263335f00 100644 --- a/massa-graph-2-exports/src/test_exports/mock.rs +++ b/massa-graph-2-exports/src/test_exports/mock.rs @@ -6,12 +6,20 @@ use std::sync::{ }; use massa_models::{ - block::{BlockId, BlockHeader}, api::BlockGraphStatus, slot::Slot, clique::Clique, stats::ConsensusStats, wrapped::Wrapped, + api::BlockGraphStatus, + block::{BlockHeader, BlockId}, + clique::Clique, + slot::Slot, + stats::ConsensusStats, + wrapped::Wrapped, }; use massa_storage::Storage; use massa_time::MassaTime; -use crate::{GraphController, error::GraphError, block_graph_export::BlockGraphExport, bootstrapable_graph::BootstrapableGraph}; +use crate::{ + block_graph_export::BlockGraphExport, bootstrapable_graph::BootstrapableGraph, + error::GraphError, GraphController, +}; /// Test tool to mock graph controller responses pub struct GraphEventReceiver(pub Receiver); @@ -31,38 +39,38 @@ pub enum MockGraphControllerMessage { response_tx: mpsc::Sender>, }, GetCliques { - response_tx: mpsc::Sender> + response_tx: mpsc::Sender>, }, GetBootstrapableGraph { - response_tx: mpsc::Sender> + response_tx: mpsc::Sender>, }, GetStats { - response_tx: mpsc::Sender> + response_tx: mpsc::Sender>, }, GetBestParents { - response_tx: mpsc::Sender> + response_tx: mpsc::Sender>, }, GetBlockcliqueBlockAtSlot { slot: Slot, - response_tx: mpsc::Sender> + response_tx: mpsc::Sender>, }, GetLatestBlockcliqueBlockAtSlot { slot: Slot, - response_tx: mpsc::Sender + response_tx: mpsc::Sender, }, MarkInvalidBlock { block_id: BlockId, - header: Wrapped + header: Wrapped, }, RegisterBlock { block_id: BlockId, slot: Slot, - block_storage: Storage + block_storage: Storage, }, RegisterBlockHeader { block_id: BlockId, - header: Wrapped - } + header: Wrapped, + }, } /// A mocked graph controller that will intercept calls on its methods @@ -104,17 +112,21 @@ impl GraphEventReceiver { /// See the documentation of `GraphController` for details on each function. impl GraphController for MockGraphController { fn get_block_graph_status( - &self, - start_slot: Option, - end_slot: Option, - ) -> Result { - let (response_tx, response_rx) = mpsc::channel(); - self.0 - .lock() - .unwrap() - .send(MockGraphControllerMessage::GetBlockGraphStatuses { start_slot, end_slot, response_tx }) - .unwrap(); - response_rx.recv().unwrap() + &self, + start_slot: Option, + end_slot: Option, + ) -> Result { + let (response_tx, response_rx) = mpsc::channel(); + self.0 + .lock() + .unwrap() + .send(MockGraphControllerMessage::GetBlockGraphStatuses { + start_slot, + end_slot, + response_tx, + }) + .unwrap(); + response_rx.recv().unwrap() } fn get_block_statuses(&self, ids: &Vec) -> Vec { @@ -122,7 +134,10 @@ impl GraphController for MockGraphController { self.0 .lock() .unwrap() - .send(MockGraphControllerMessage::GetBlockStatuses { block_ids: ids.clone(), response_tx }) + .send(MockGraphControllerMessage::GetBlockStatuses { + block_ids: ids.clone(), + response_tx, + }) .unwrap(); response_rx.recv().unwrap() } @@ -174,7 +189,7 @@ impl GraphController for MockGraphController { .unwrap() .send(MockGraphControllerMessage::GetBlockcliqueBlockAtSlot { slot, response_tx }) .unwrap(); - response_rx.recv().unwrap() + response_rx.recv().unwrap() } fn get_latest_blockclique_block_at_slot(&self, slot: Slot) -> BlockId { @@ -184,7 +199,7 @@ impl GraphController for MockGraphController { .unwrap() .send(MockGraphControllerMessage::GetLatestBlockcliqueBlockAtSlot { slot, response_tx }) .unwrap(); - response_rx.recv().unwrap() + response_rx.recv().unwrap() } fn mark_invalid_block(&self, block_id: BlockId, header: Wrapped) { @@ -197,18 +212,22 @@ impl GraphController for MockGraphController { fn register_block(&self, block_id: BlockId, slot: Slot, block_storage: Storage) { self.0 - .lock() - .unwrap() - .send(MockGraphControllerMessage::RegisterBlock { block_id, slot, block_storage }) - .unwrap(); + .lock() + .unwrap() + .send(MockGraphControllerMessage::RegisterBlock { + block_id, + slot, + block_storage, + }) + .unwrap(); } fn register_block_header(&self, block_id: BlockId, header: Wrapped) { self.0 - .lock() - .unwrap() - .send(MockGraphControllerMessage::RegisterBlockHeader{ block_id, header }) - .unwrap(); + .lock() + .unwrap() + .send(MockGraphControllerMessage::RegisterBlockHeader { block_id, header }) + .unwrap(); } fn clone_box(&self) -> Box { diff --git a/massa-graph-2-worker/src/state/stats.rs b/massa-graph-2-worker/src/state/stats.rs index d587531ec7e..c527108ddcd 100644 --- a/massa-graph-2-worker/src/state/stats.rs +++ b/massa-graph-2-worker/src/state/stats.rs @@ -1,8 +1,9 @@ use super::GraphState; -use massa_graph_2_exports::error::GraphError; +use massa_graph_2_exports::{error::GraphError, events::GraphEvent}; use massa_models::stats::ConsensusStats; use massa_time::MassaTime; use std::cmp::max; +use tracing::log::warn; impl GraphState { /// Calculate and return stats about graph diff --git a/massa-node/src/main.rs b/massa-node/src/main.rs index 9531c57b4b1..940063215e4 100644 --- a/massa-node/src/main.rs +++ b/massa-node/src/main.rs @@ -48,7 +48,9 @@ use massa_pool_exports::{PoolConfig, PoolManager}; use massa_pool_worker::start_pool_controller; use massa_pos_exports::{SelectorConfig, SelectorManager}; use massa_pos_worker::start_selector_worker; -use massa_protocol_exports::{ProtocolCommand, ProtocolConfig, ProtocolManager, ProtocolCommandSender}; +use massa_protocol_exports::{ + ProtocolCommand, ProtocolCommandSender, ProtocolConfig, ProtocolManager, +}; use massa_protocol_worker::start_protocol_controller; use massa_storage::Storage; use massa_time::MassaTime; diff --git a/massa-protocol-exports/src/protocol_controller.rs b/massa-protocol-exports/src/protocol_controller.rs index 4f03f75de56..4f615dc194c 100644 --- a/massa-protocol-exports/src/protocol_controller.rs +++ b/massa-protocol-exports/src/protocol_controller.rs @@ -151,9 +151,7 @@ impl ProtocolManager { } /// Stop the protocol controller - pub async fn stop( - self - ) -> Result { + pub async fn stop(self) -> Result { info!("stopping protocol controller..."); drop(self.manager_tx); let network_event_receiver = self.join_handle.await??; diff --git a/massa-protocol-exports/src/test_exports/mock.rs b/massa-protocol-exports/src/test_exports/mock.rs index 557e1091251..b38436b1606 100644 --- a/massa-protocol-exports/src/test_exports/mock.rs +++ b/massa-protocol-exports/src/test_exports/mock.rs @@ -1,10 +1,7 @@ // Copyright (c) 2022 MASSA LABS -use crate::{ - ProtocolCommand, ProtocolCommandSender}; -use massa_models::{ - block::BlockId -}; +use crate::{ProtocolCommand, ProtocolCommandSender}; +use massa_models::block::BlockId; use massa_time::MassaTime; use tokio::{sync::mpsc, time::sleep}; diff --git a/massa-protocol-exports/src/tests/tools.rs b/massa-protocol-exports/src/tests/tools.rs index 1a736cc913f..a5173bc81b5 100644 --- a/massa-protocol-exports/src/tests/tools.rs +++ b/massa-protocol-exports/src/tests/tools.rs @@ -162,64 +162,6 @@ pub fn create_block_with_endorsements( .unwrap() } -/// send a block and assert it has been propagate (or not) -pub async fn send_and_propagate_block( - network_controller: &mut MockNetworkController, - block: WrappedBlock, - valid: bool, - source_node_id: NodeId, - protocol_command_sender: &mut ProtocolCommandSender, - operations: Vec, -) { - let expected_hash = block.id; - - network_controller - .send_header(source_node_id, block.content.header.clone()) - .await; - - protocol_command_sender - .send_wishlist_delta( - vec![(block.id, Some(block.content.header.clone()))] - .into_iter() - .collect(), - PreHashSet::::default(), - ) - .unwrap(); - - // Send block info to protocol. - let info = vec![( - block.id, - BlockInfoReply::Info(block.content.operations.clone()), - )]; - network_controller - .send_block_info(source_node_id, info) - .await; - - // Send full ops. - let info = vec![(block.id, BlockInfoReply::Operations(operations))]; - network_controller - .send_block_info(source_node_id, info) - .await; - - //TODO: Readd - // Check protocol sends block to consensus. - // let hash = match wait_protocol_event(protocol_event_receiver, 1000.into(), |evt| match evt { - // evt @ ProtocolEvent::ReceivedBlock { .. } => Some(evt), - // _ => None, - // }) - // .await - // { - // Some(ProtocolEvent::ReceivedBlock { block_id, .. }) => Some(block_id), - // None => None, - // _ => panic!("Unexpected or no protocol event."), - // }; - // if valid { - // assert_eq!(expected_hash, hash.unwrap()); - // } else { - // assert!(hash.is_none(), "unexpected protocol event") - // } -} - /// Creates an endorsement for use in protocol tests, /// without paying attention to consensus related things. pub fn create_endorsement() -> WrappedEndorsement { diff --git a/massa-protocol-worker/src/protocol_worker.rs b/massa-protocol-worker/src/protocol_worker.rs index 398cf70e995..ac45094a636 100644 --- a/massa-protocol-worker/src/protocol_worker.rs +++ b/massa-protocol-worker/src/protocol_worker.rs @@ -21,8 +21,7 @@ use massa_models::{ use massa_network_exports::{AskForBlocksInfo, NetworkCommandSender, NetworkEventReceiver}; use massa_pool_exports::PoolController; use massa_protocol_exports::{ - ProtocolCommand, ProtocolConfig, ProtocolError, - ProtocolManagementCommand, ProtocolManager, + ProtocolCommand, ProtocolConfig, ProtocolError, ProtocolManagementCommand, ProtocolManager, }; use massa_models::wrapped::Id; diff --git a/massa-protocol-worker/src/tests/ask_block_scenarios.rs b/massa-protocol-worker/src/tests/ask_block_scenarios.rs index 123a57620be..1dc8bd90235 100644 --- a/massa-protocol-worker/src/tests/ask_block_scenarios.rs +++ b/massa-protocol-worker/src/tests/ask_block_scenarios.rs @@ -1,12 +1,13 @@ // Copyright (c) 2022 MASSA LABS use super::tools::protocol_test; +use massa_graph_2_exports::test_exports::MockGraphControllerMessage; use massa_models::prehash::PreHashSet; use massa_models::{block::BlockId, slot::Slot}; use massa_network_exports::{AskForBlocksInfo, BlockInfoReply, NetworkCommand}; use massa_protocol_exports::tests::tools; use massa_protocol_exports::tests::tools::{asked_list, assert_hash_asked_to_node}; -use massa_protocol_exports::ProtocolEvent; +use massa_time::MassaTime; use serial_test::serial; #[tokio::test] @@ -18,9 +19,9 @@ async fn test_full_ask_block_workflow() { protocol_test( protocol_config, async move |mut network_controller, - mut protocol_event_receiver, mut protocol_command_sender, protocol_manager, + mut protocol_graph_event_receiver, protocol_pool_event_receiver| { let node_a = tools::create_and_connect_nodes(1, &mut network_controller) .await @@ -106,26 +107,37 @@ async fn test_full_ask_block_workflow() { // Protocol sends expected block to consensus. loop { - match protocol_event_receiver.wait_event().await.unwrap() { - ProtocolEvent::ReceivedBlock { - slot, - block_id, - storage, - } => { - assert_eq!(slot, block.content.header.content.slot); - assert_eq!(block_id, block.id); - let received_block = storage.read_blocks().get(&block_id).cloned().unwrap(); - assert_eq!(received_block.content.operations, block.content.operations); + match protocol_graph_event_receiver.wait_command( + MassaTime::from_millis(100), + |command| match command { + MockGraphControllerMessage::RegisterBlock { + slot, + block_id, + block_storage, + } => { + assert_eq!(slot, block.content.header.content.slot); + assert_eq!(block_id, block.id); + let received_block = + block_storage.read_blocks().get(&block_id).cloned().unwrap(); + assert_eq!(received_block.content.operations, block.content.operations); + Some(()) + } + _evt => None, + }, + ) { + Some(()) => { break; } - _evt => continue, - }; + None => { + continue; + } + } } ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_graph_event_receiver, protocol_pool_event_receiver, ) }, @@ -142,9 +154,9 @@ async fn test_empty_block() { protocol_test( protocol_config, async move |mut network_controller, - mut protocol_event_receiver, mut protocol_command_sender, protocol_manager, + mut protocol_graph_event_receiver, protocol_pool_event_receiver| { let node_a = tools::create_and_connect_nodes(1, &mut network_controller) .await @@ -208,26 +220,37 @@ async fn test_empty_block() { // Protocol sends expected block to consensus. loop { - match protocol_event_receiver.wait_event().await.unwrap() { - ProtocolEvent::ReceivedBlock { - slot, - block_id, - storage, - } => { - assert_eq!(slot, block.content.header.content.slot); - assert_eq!(block_id, block.id); - let received_block = storage.read_blocks().get(&block_id).cloned().unwrap(); - assert_eq!(received_block.content.operations, block.content.operations); + match protocol_graph_event_receiver.wait_command( + MassaTime::from_millis(100), + |command| match command { + MockGraphControllerMessage::RegisterBlock { + slot, + block_id, + block_storage, + } => { + assert_eq!(slot, block.content.header.content.slot); + assert_eq!(block_id, block.id); + let received_block = + block_storage.read_blocks().get(&block_id).cloned().unwrap(); + assert_eq!(received_block.content.operations, block.content.operations); + Some(()) + } + _evt => None, + }, + ) { + Some(()) => { break; } - _evt => continue, - }; + None => { + continue; + } + } } ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_graph_event_receiver, protocol_pool_event_receiver, ) }, @@ -243,9 +266,9 @@ async fn test_someone_knows_it() { protocol_test( protocol_config, async move |mut network_controller, - mut protocol_event_receiver, mut protocol_command_sender, protocol_manager, + mut protocol_graph_event_receiver, protocol_pool_event_receiver| { let node_a = tools::create_and_connect_nodes(1, &mut network_controller) .await @@ -276,10 +299,12 @@ async fn test_someone_knows_it() { .send_header(node_c.id, block.content.header.clone()) .await; - match protocol_event_receiver.wait_event().await.unwrap() { - ProtocolEvent::ReceivedBlockHeader { .. } => {} - _ => panic!("unexpected protocol event"), - }; + protocol_graph_event_receiver.wait_command(MassaTime::from_millis(100), |command| { + match command { + MockGraphControllerMessage::RegisterBlockHeader { .. } => Some(()), + _ => panic!("unexpected protocol event"), + } + }); // send wishlist protocol_command_sender @@ -326,9 +351,9 @@ async fn test_someone_knows_it() { ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_graph_event_receiver, protocol_pool_event_receiver, ) }, @@ -344,9 +369,9 @@ async fn test_dont_want_it_anymore() { protocol_test( protocol_config, async move |mut network_controller, - protocol_event_receiver, mut protocol_command_sender, protocol_manager, + protocol_graph_event_receiver, protocol_pool_event_receiver| { let node_a = tools::create_and_connect_nodes(1, &mut network_controller) .await @@ -400,9 +425,9 @@ async fn test_dont_want_it_anymore() { ); ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_graph_event_receiver, protocol_pool_event_receiver, ) }, @@ -419,9 +444,9 @@ async fn test_no_one_has_it() { protocol_test( protocol_config, async move |mut network_controller, - protocol_event_receiver, mut protocol_command_sender, protocol_manager, + protocol_graph_event_receiver, protocol_pool_event_receiver| { let node_a = tools::create_and_connect_nodes(1, &mut network_controller) .await @@ -481,9 +506,9 @@ async fn test_no_one_has_it() { ); ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_graph_event_receiver, protocol_pool_event_receiver, ) }, @@ -499,9 +524,9 @@ async fn test_multiple_blocks_without_a_priori() { protocol_test( protocol_config, async move |mut network_controller, - protocol_event_receiver, mut protocol_command_sender, protocol_manager, + protocol_graph_event_receiver, protocol_pool_event_receiver| { let node_a = tools::create_and_connect_nodes(1, &mut network_controller) .await @@ -554,9 +579,9 @@ async fn test_multiple_blocks_without_a_priori() { } ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_graph_event_receiver, protocol_pool_event_receiver, ) }, diff --git a/massa-protocol-worker/src/tests/ban_nodes_scenarios.rs b/massa-protocol-worker/src/tests/ban_nodes_scenarios.rs index 46fb541fafc..71edc2dff1f 100644 --- a/massa-protocol-worker/src/tests/ban_nodes_scenarios.rs +++ b/massa-protocol-worker/src/tests/ban_nodes_scenarios.rs @@ -1,6 +1,7 @@ // Copyright (c) 2022 MASSA LABS use super::tools::protocol_test; +use massa_graph_2_exports::test_exports::MockGraphControllerMessage; use massa_hash::Hash; use massa_models::operation::OperationId; use massa_models::prehash::PreHashSet; @@ -9,8 +10,8 @@ use massa_models::{block::BlockId, slot::Slot}; use massa_network_exports::{BlockInfoReply, NetworkCommand}; use massa_pool_exports::test_exports::MockPoolControllerMessage; use massa_protocol_exports::tests::tools; -use massa_protocol_exports::ProtocolEvent; use massa_signature::KeyPair; +use massa_time::MassaTime; use serial_test::serial; use std::collections::HashSet; use std::time::Duration; @@ -22,9 +23,9 @@ async fn test_protocol_bans_node_sending_block_header_with_invalid_signature() { protocol_test( protocol_config, async move |mut network_controller, - mut protocol_event_receiver, protocol_command_sender, protocol_manager, + mut protocol_graph_event_receiver, protocol_pool_event_receiver| { // Create 1 node. let mut nodes = tools::create_and_connect_nodes(1, &mut network_controller).await; @@ -46,23 +47,25 @@ async fn test_protocol_bans_node_sending_block_header_with_invalid_signature() { tools::assert_banned_nodes(vec![creator_node.id], &mut network_controller).await; // Check protocol does not send block to consensus. - match tools::wait_protocol_event(&mut protocol_event_receiver, 1000.into(), |evt| { - match evt { - evt @ ProtocolEvent::ReceivedBlock { .. } => Some(evt), - evt @ ProtocolEvent::ReceivedBlockHeader { .. } => Some(evt), - evt @ ProtocolEvent::InvalidBlock { .. } => Some(evt), + protocol_graph_event_receiver.wait_command(MassaTime::from_millis(1000), |command| { + match command { + MockGraphControllerMessage::RegisterBlock { .. } => { + panic!("Protocol unexpectedly sent block.") + } + MockGraphControllerMessage::RegisterBlockHeader { .. } => { + panic!("Protocol unexpectedly sent header.") + } + MockGraphControllerMessage::MarkInvalidBlock { .. } => { + panic!("Protocol unexpectedly sent invalid block.") + } + _ => Some(()), } - }) - .await - { - None => {} - _ => panic!("Protocol unexpectedly sent block or header."), - } + }); ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_graph_event_receiver, protocol_pool_event_receiver, ) }, @@ -125,9 +128,9 @@ async fn test_protocol_bans_node_sending_header_with_invalid_signature() { protocol_test( protocol_config, async move |mut network_controller, - mut protocol_event_receiver, mut protocol_command_sender, protocol_manager, + mut protocol_graph_event_receiver, protocol_pool_event_receiver| { // Create 1 node. let mut nodes = tools::create_and_connect_nodes(1, &mut network_controller).await; @@ -150,10 +153,12 @@ async fn test_protocol_bans_node_sending_header_with_invalid_signature() { .send_header(to_ban_node.id, block.content.header.clone()) .await; - match protocol_event_receiver.wait_event().await.unwrap() { - ProtocolEvent::ReceivedBlockHeader { .. } => {} - _ => panic!("unexpected protocol event"), - }; + protocol_graph_event_receiver.wait_command(MassaTime::from_millis(1000), |command| { + match command { + MockGraphControllerMessage::RegisterBlockHeader { .. } => Some(()), + _ => panic!("unexpected protocol event"), + } + }); // send wishlist protocol_command_sender @@ -197,23 +202,25 @@ async fn test_protocol_bans_node_sending_header_with_invalid_signature() { .await; // Check protocol does not send block to consensus. - match tools::wait_protocol_event(&mut protocol_event_receiver, 1000.into(), |evt| { - match evt { - evt @ ProtocolEvent::ReceivedBlock { .. } => Some(evt), - evt @ ProtocolEvent::ReceivedBlockHeader { .. } => Some(evt), - evt @ ProtocolEvent::InvalidBlock { .. } => Some(evt), + protocol_graph_event_receiver.wait_command(MassaTime::from_millis(1000), |command| { + match command { + MockGraphControllerMessage::RegisterBlock { .. } => { + panic!("Protocol unexpectedly sent block.") + } + MockGraphControllerMessage::RegisterBlockHeader { .. } => { + panic!("Protocol unexpectedly sent header.") + } + MockGraphControllerMessage::MarkInvalidBlock { .. } => { + panic!("Protocol unexpectedly sent invalid block.") + } + _ => Some(()), } - }) - .await - { - None => {} - _ => panic!("Protocol unexpectedly sent header coming from banned node."), - } + }); ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_graph_event_receiver, protocol_pool_event_receiver, ) }, @@ -228,9 +235,9 @@ async fn test_protocol_does_not_asks_for_block_from_banned_node_who_propagated_h protocol_test( protocol_config, async move |mut network_controller, - mut protocol_event_receiver, mut protocol_command_sender, protocol_manager, + mut protocol_graph_event_receiver, protocol_pool_event_receiver| { let ask_for_block_cmd_filter = |cmd| match cmd { cmd @ NetworkCommand::AskForBlocks { .. } => Some(cmd), @@ -250,18 +257,15 @@ async fn test_protocol_does_not_asks_for_block_from_banned_node_who_propagated_h .await; // Check protocol sends header to consensus. - let received_hash = - match tools::wait_protocol_event(&mut protocol_event_receiver, 1000.into(), |evt| { - match evt { - evt @ ProtocolEvent::ReceivedBlockHeader { .. } => Some(evt), - _ => None, - } + let received_hash = protocol_graph_event_receiver + .wait_command(MassaTime::from_millis(1000), |command| match command { + MockGraphControllerMessage::RegisterBlockHeader { + block_id, + header: _, + } => Some(block_id), + _ => panic!("unexpected protocol event"), }) - .await - { - Some(ProtocolEvent::ReceivedBlockHeader { block_id, .. }) => block_id, - _ => panic!("Unexpected or no protocol event."), - }; + .unwrap(); // 3. Check that protocol sent the right header to consensus. let expected_hash = block.id; @@ -299,9 +303,9 @@ async fn test_protocol_does_not_asks_for_block_from_banned_node_who_propagated_h ); ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_graph_event_receiver, protocol_pool_event_receiver, ) }, @@ -316,9 +320,9 @@ async fn test_protocol_does_not_send_blocks_when_asked_for_by_banned_node() { protocol_test( protocol_config, async move |mut network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_graph_event_receiver, protocol_pool_event_receiver| { let send_block_or_header_cmd_filter = |cmd| match cmd { cmd @ NetworkCommand::SendBlockInfo { .. } => Some(cmd), @@ -387,9 +391,9 @@ async fn test_protocol_does_not_send_blocks_when_asked_for_by_banned_node() { ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_graph_event_receiver, protocol_pool_event_receiver, ) }, @@ -404,9 +408,9 @@ async fn test_protocol_bans_all_nodes_propagating_an_attack_attempt() { protocol_test( protocol_config, async move |mut network_controller, - mut protocol_event_receiver, mut protocol_command_sender, protocol_manager, + mut protocol_graph_event_receiver, protocol_pool_event_receiver| { // Create 4 nodes. let nodes = tools::create_and_connect_nodes(4, &mut network_controller).await; @@ -425,34 +429,30 @@ async fn test_protocol_bans_all_nodes_propagating_an_attack_attempt() { // Check protocol sends header to consensus (only the 1st time: later, there is caching). if idx == 0 { - let received_hash = match tools::wait_protocol_event( - &mut protocol_event_receiver, - 1000.into(), - |evt| match evt { - evt @ ProtocolEvent::ReceivedBlockHeader { .. } => Some(evt), - _ => None, - }, - ) - .await - { - Some(ProtocolEvent::ReceivedBlockHeader { block_id, .. }) => block_id, - Some(evt) => panic!("Unexpected protocol event {:?}", evt), - None => panic!("no protocol event"), - }; + let received_hash = protocol_graph_event_receiver + .wait_command(MassaTime::from_millis(1000), |command| match command { + MockGraphControllerMessage::RegisterBlockHeader { + block_id, + header: _, + } => Some(block_id), + _ => panic!("unexpected protocol event"), + }) + .unwrap(); // Check that protocol sent the right header to consensus. assert_eq!(expected_hash, received_hash); } else { assert!( - tools::wait_protocol_event( - &mut protocol_event_receiver, - 150.into(), - |evt| match evt { - evt @ ProtocolEvent::ReceivedBlockHeader { .. } => Some(evt), - _ => None, - }, - ) - .await - .is_none(), + protocol_graph_event_receiver + .wait_command(MassaTime::from_millis(1000), |command| { + match command { + MockGraphControllerMessage::RegisterBlockHeader { + block_id, + header: _, + } => Some(block_id), + _ => None, + } + }) + .is_none(), "caching was ignored" ); } @@ -493,9 +493,9 @@ async fn test_protocol_bans_all_nodes_propagating_an_attack_attempt() { ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_graph_event_receiver, protocol_pool_event_receiver, ) }, @@ -510,9 +510,9 @@ async fn test_protocol_removes_banned_node_on_disconnection() { protocol_test( protocol_config, async move |mut network_controller, - mut protocol_event_receiver, protocol_command_sender, protocol_manager, + mut protocol_graph_event_receiver, protocol_pool_event_receiver| { let mut nodes = tools::create_and_connect_nodes(1, &mut network_controller).await; @@ -539,27 +539,24 @@ async fn test_protocol_removes_banned_node_on_disconnection() { .await; // Check protocol sends header to consensus. - let received_hash = - match tools::wait_protocol_event(&mut protocol_event_receiver, 1000.into(), |evt| { - match evt { - evt @ ProtocolEvent::ReceivedBlockHeader { .. } => Some(evt), - _ => None, - } + let received_hash = protocol_graph_event_receiver + .wait_command(MassaTime::from_millis(1000), |command| match command { + MockGraphControllerMessage::RegisterBlockHeader { + block_id, + header: _, + } => Some(block_id), + _ => panic!("unexpected protocol event"), }) - .await - { - Some(ProtocolEvent::ReceivedBlockHeader { block_id, .. }) => block_id, - _ => panic!("Unexpected or no protocol event."), - }; + .unwrap(); // Check that protocol sent the right header to consensus. let expected_hash = block.id; assert_eq!(expected_hash, received_hash); ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_graph_event_receiver, protocol_pool_event_receiver, ) }, diff --git a/massa-protocol-worker/src/tests/endorsements_scenarios.rs b/massa-protocol-worker/src/tests/endorsements_scenarios.rs index 41273569bf4..6386f703b62 100644 --- a/massa-protocol-worker/src/tests/endorsements_scenarios.rs +++ b/massa-protocol-worker/src/tests/endorsements_scenarios.rs @@ -3,12 +3,13 @@ // RUST_BACKTRACE=1 cargo test test_one_handshake -- --nocapture --test-threads=1 use super::tools::protocol_test; +use massa_graph_2_exports::test_exports::MockGraphControllerMessage; use massa_models::{address::Address, slot::Slot}; use massa_network_exports::NetworkCommand; use massa_pool_exports::test_exports::MockPoolControllerMessage; use massa_protocol_exports::tests::tools; -use massa_protocol_exports::ProtocolEvent; use massa_storage::Storage; +use massa_time::MassaTime; use serial_test::serial; use std::thread; use std::time::Duration; @@ -20,9 +21,9 @@ async fn test_protocol_sends_valid_endorsements_it_receives_to_pool() { protocol_test( protocol_config, async move |mut network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_graph_event_receiver, mut pool_event_receiver| { // Create 1 node. let mut nodes = tools::create_and_connect_nodes(1, &mut network_controller).await; @@ -56,9 +57,9 @@ async fn test_protocol_sends_valid_endorsements_it_receives_to_pool() { ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_graph_event_receiver, pool_event_receiver, ) }, @@ -73,9 +74,9 @@ async fn test_protocol_does_not_send_invalid_endorsements_it_receives_to_pool() protocol_test( protocol_config, async move |mut network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_graph_event_receiver, mut pool_event_receiver| { // Create 1 node. let mut nodes = tools::create_and_connect_nodes(1, &mut network_controller).await; @@ -103,9 +104,9 @@ async fn test_protocol_does_not_send_invalid_endorsements_it_receives_to_pool() ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_graph_event_receiver, pool_event_receiver, ) }, @@ -120,9 +121,9 @@ async fn test_protocol_propagates_endorsements_to_active_nodes() { protocol_test( protocol_config, async move |mut network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_graph_event_receiver, mut pool_event_receiver| { // Create 2 nodes. let nodes = tools::create_and_connect_nodes(2, &mut network_controller).await; @@ -170,9 +171,9 @@ async fn test_protocol_propagates_endorsements_to_active_nodes() { } ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_graph_event_receiver, pool_event_receiver, ) }, @@ -187,9 +188,9 @@ async fn test_protocol_propagates_endorsements_only_to_nodes_that_dont_know_abou protocol_test( protocol_config, async move |mut network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_graph_event_receiver, mut pool_event_receiver| { // Create 1 node. let nodes = tools::create_and_connect_nodes(1, &mut network_controller).await; @@ -246,9 +247,9 @@ async fn test_protocol_propagates_endorsements_only_to_nodes_that_dont_know_abou } ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_graph_event_receiver, pool_event_receiver, ) }, @@ -264,9 +265,9 @@ async fn test_protocol_propagates_endorsements_only_to_nodes_that_dont_know_abou protocol_test( protocol_config, async move |mut network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_graph_event_receiver, protocol_pool_event_receiver| { // Create 1 node. let nodes = tools::create_and_connect_nodes(1, &mut network_controller).await; @@ -318,9 +319,9 @@ async fn test_protocol_propagates_endorsements_only_to_nodes_that_dont_know_abou ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_graph_event_receiver, protocol_pool_event_receiver, ) }, @@ -337,9 +338,9 @@ async fn test_protocol_propagates_endorsements_only_to_nodes_that_dont_know_abou protocol_test( protocol_config, async move |mut network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_graph_event_receiver, protocol_pool_event_receiver| { // Create 1 node. let nodes = tools::create_and_connect_nodes(1, &mut network_controller).await; @@ -392,9 +393,9 @@ async fn test_protocol_propagates_endorsements_only_to_nodes_that_dont_know_abou ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_graph_event_receiver, protocol_pool_event_receiver, ) }, @@ -410,9 +411,9 @@ async fn test_protocol_propagates_endorsements_only_to_nodes_that_dont_know_abou protocol_test( protocol_config, async move |mut network_controller, - mut protocol_event_receiver, protocol_command_sender, protocol_manager, + mut protocol_graph_event_receiver, protocol_pool_event_receiver| { // Create 2 nodes. let nodes = tools::create_and_connect_nodes(2, &mut network_controller).await; @@ -440,13 +441,12 @@ async fn test_protocol_propagates_endorsements_only_to_nodes_that_dont_know_abou // Wait for the event to be sure that the node is connected, // and noted as knowing the block and its endorsements. - let _ = tools::wait_protocol_event(&mut protocol_event_receiver, 1000.into(), |evt| { - match evt { - evt @ ProtocolEvent::ReceivedBlockHeader { .. } => Some(evt), - _ => None, + protocol_graph_event_receiver.wait_command(MassaTime::from_millis(1000), |command| { + match command { + MockGraphControllerMessage::RegisterBlockHeader { .. } => Some(()), + _ => panic!("Node isn't connected or didn't mark block as known."), } - }) - .await; + }); // Send the endorsement to protocol // it should not propagate to the node that already knows about it @@ -478,9 +478,9 @@ async fn test_protocol_propagates_endorsements_only_to_nodes_that_dont_know_abou ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_graph_event_receiver, protocol_pool_event_receiver, ) }, @@ -495,9 +495,9 @@ async fn test_protocol_does_not_propagates_endorsements_when_receiving_those_ins protocol_test( protocol_config, async move |mut network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_graph_event_receiver, protocol_pool_event_receiver| { // Create 2 nodes. let mut nodes = tools::create_and_connect_nodes(2, &mut network_controller).await; @@ -543,9 +543,9 @@ async fn test_protocol_does_not_propagates_endorsements_when_receiving_those_ins } ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_graph_event_receiver, protocol_pool_event_receiver, ) }, diff --git a/massa-protocol-worker/src/tests/in_block_operations_scenarios.rs b/massa-protocol-worker/src/tests/in_block_operations_scenarios.rs index 1daca60df23..05aaa493064 100644 --- a/massa-protocol-worker/src/tests/in_block_operations_scenarios.rs +++ b/massa-protocol-worker/src/tests/in_block_operations_scenarios.rs @@ -1,6 +1,6 @@ // Copyright (c) 2022 MASSA LABS -use super::tools::protocol_test; +use super::tools::{protocol_test, send_and_propagate_block}; use massa_hash::Hash; use massa_models::operation::OperationId; use massa_models::wrapped::{Id, WrappedContent}; @@ -13,7 +13,6 @@ use massa_network_exports::NetworkCommand; use massa_protocol_exports::tests::tools; use massa_protocol_exports::tests::tools::{ create_and_connect_nodes, create_block_with_operations, create_operation_with_expire_period, - send_and_propagate_block, }; use massa_signature::KeyPair; use serial_test::serial; @@ -25,9 +24,9 @@ async fn test_protocol_does_propagate_operations_received_in_blocks() { protocol_test( protocol_config, async move |mut network_controller, - mut protocol_event_receiver, mut protocol_command_sender, protocol_manager, + mut protocol_graph_event_receiver, protocol_pool_event_receiver| { // Create 2 node. let mut nodes = create_and_connect_nodes(2, &mut network_controller).await; @@ -58,7 +57,7 @@ async fn test_protocol_does_propagate_operations_received_in_blocks() { block, true, creator_node.id, - &mut protocol_event_receiver, + &mut protocol_graph_event_receiver, &mut protocol_command_sender, vec![op.clone()], ) @@ -81,9 +80,9 @@ async fn test_protocol_does_propagate_operations_received_in_blocks() { }; ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_graph_event_receiver, protocol_pool_event_receiver, ) }, @@ -104,9 +103,9 @@ async fn test_protocol_sends_blocks_with_operations_to_consensus() { protocol_test( protocol_config, async move |mut network_controller, - mut protocol_event_receiver, mut protocol_command_sender, protocol_manager, + mut protocol_graph_event_receiver, protocol_pool_event_receiver| { // Create 1 node. let mut nodes = create_and_connect_nodes(1, &mut network_controller).await; @@ -138,7 +137,7 @@ async fn test_protocol_sends_blocks_with_operations_to_consensus() { block, true, creator_node.id, - &mut protocol_event_receiver, + &mut protocol_graph_event_receiver, &mut protocol_command_sender, vec![op.clone()], ) @@ -180,7 +179,7 @@ async fn test_protocol_sends_blocks_with_operations_to_consensus() { block, false, creator_node.id, - &mut protocol_event_receiver, + &mut protocol_graph_event_receiver, &mut protocol_command_sender, vec![op.clone()], ) @@ -203,7 +202,7 @@ async fn test_protocol_sends_blocks_with_operations_to_consensus() { block, false, creator_node.id, - &mut protocol_event_receiver, + &mut protocol_graph_event_receiver, &mut protocol_command_sender, vec![op.clone()], ) @@ -212,9 +211,9 @@ async fn test_protocol_sends_blocks_with_operations_to_consensus() { ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_graph_event_receiver, protocol_pool_event_receiver, ) }, diff --git a/massa-protocol-worker/src/tests/operations_scenarios.rs b/massa-protocol-worker/src/tests/operations_scenarios.rs index 6c39b788d18..92616316a12 100644 --- a/massa-protocol-worker/src/tests/operations_scenarios.rs +++ b/massa-protocol-worker/src/tests/operations_scenarios.rs @@ -3,12 +3,13 @@ // RUST_BACKTRACE=1 cargo test test_one_handshake -- --nocapture --test-threads=1 use super::tools::{protocol_test, protocol_test_with_storage}; +use massa_graph_2_exports::test_exports::MockGraphControllerMessage; use massa_models::prehash::PreHashSet; use massa_models::{self, address::Address, amount::Amount, block::BlockId, slot::Slot}; use massa_network_exports::{BlockInfoReply, NetworkCommand}; use massa_pool_exports::test_exports::MockPoolControllerMessage; use massa_protocol_exports::tests::tools::{self, assert_hash_asked_to_node}; -use massa_protocol_exports::ProtocolEvent; +use massa_time::MassaTime; use serial_test::serial; use std::str::FromStr; use std::time::Duration; @@ -20,9 +21,9 @@ async fn test_protocol_sends_valid_operations_it_receives_to_consensus() { protocol_test( protocol_config, async move |mut network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_graph_event_receiver, mut protocol_pool_event_receiver| { // Create 1 node. let mut nodes = tools::create_and_connect_nodes(1, &mut network_controller).await; @@ -73,9 +74,9 @@ async fn test_protocol_sends_valid_operations_it_receives_to_consensus() { ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_graph_event_receiver, protocol_pool_event_receiver, ) }, @@ -90,9 +91,9 @@ async fn test_protocol_does_not_send_invalid_operations_it_receives_to_consensus protocol_test( protocol_config, async move |mut network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_graph_event_receiver, mut pool_event_receiver| { // Create 1 node. let mut nodes = tools::create_and_connect_nodes(1, &mut network_controller).await; @@ -119,9 +120,9 @@ async fn test_protocol_does_not_send_invalid_operations_it_receives_to_consensus ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_graph_event_receiver, pool_event_receiver, ) }, @@ -136,9 +137,9 @@ async fn test_protocol_propagates_operations_to_active_nodes() { protocol_test_with_storage( protocol_config, async move |mut network_controller, - protocol_event_receiver, mut protocol_command_sender, protocol_manager, + protocol_graph_event_receiver, mut pool_event_receiver, mut storage| { // Create 2 nodes. @@ -186,9 +187,9 @@ async fn test_protocol_propagates_operations_to_active_nodes() { } ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_graph_event_receiver, pool_event_receiver, ) }, @@ -203,9 +204,9 @@ async fn test_protocol_propagates_operations_only_to_nodes_that_dont_know_about_ protocol_test_with_storage( protocol_config, async move |mut network_controller, - protocol_event_receiver, mut protocol_command_sender, protocol_manager, + protocol_graph_event_receiver, mut pool_event_receiver, mut storage| { // Create 1 nodes. @@ -259,9 +260,9 @@ async fn test_protocol_propagates_operations_only_to_nodes_that_dont_know_about_ } ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_graph_event_receiver, pool_event_receiver, ) }, @@ -277,9 +278,9 @@ async fn test_protocol_propagates_operations_received_over_the_network_only_to_n protocol_test_with_storage( protocol_config, async move |mut network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_graph_event_receiver, mut pool_event_receiver, _storage| { // Create 2 nodes. @@ -321,9 +322,9 @@ async fn test_protocol_propagates_operations_received_over_the_network_only_to_n } ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_graph_event_receiver, pool_event_receiver, ) }, @@ -339,9 +340,9 @@ async fn test_protocol_batches_propagation_of_operations_received_over_the_netwo protocol_test_with_storage( protocol_config, async move |mut network_controller, - protocol_event_receiver, mut protocol_command_sender, protocol_manager, + protocol_graph_event_receiver, mut pool_event_receiver, mut storage| { // Create 2 nodes. @@ -398,9 +399,9 @@ async fn test_protocol_batches_propagation_of_operations_received_over_the_netwo } ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_graph_event_receiver, pool_event_receiver, ) }, @@ -416,9 +417,9 @@ async fn test_protocol_propagates_operations_only_to_nodes_that_dont_know_about_ protocol_test_with_storage( protocol_config, async move |mut network_controller, - mut protocol_event_receiver, mut protocol_command_sender, protocol_manager, + mut protocol_graph_event_receiver, protocol_pool_event_receiver, mut storage| { // Create 1 node. @@ -439,10 +440,12 @@ async fn test_protocol_propagates_operations_only_to_nodes_that_dont_know_about_ .send_header(nodes[0].id, block.content.header.clone()) .await; - match protocol_event_receiver.wait_event().await.unwrap() { - ProtocolEvent::ReceivedBlockHeader { .. } => {} - _ => panic!("unexpected protocol event"), - }; + protocol_graph_event_receiver.wait_command(MassaTime::from_millis(1000), |command| { + match command { + MockGraphControllerMessage::RegisterBlockHeader { .. } => Some(()), + _ => panic!("unexpected protocol event"), + } + }); // send wishlist protocol_command_sender @@ -499,9 +502,9 @@ async fn test_protocol_propagates_operations_only_to_nodes_that_dont_know_about_ ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_graph_event_receiver, protocol_pool_event_receiver, ) }, @@ -518,9 +521,9 @@ async fn test_protocol_propagates_operations_only_to_nodes_that_dont_know_about_ protocol_test_with_storage( protocol_config, async move |mut network_controller, - mut protocol_event_receiver, mut protocol_command_sender, protocol_manager, + mut protocol_graph_event_receiver, protocol_pool_event_receiver, mut storage| { // Create 3 nodes. @@ -589,13 +592,12 @@ async fn test_protocol_propagates_operations_only_to_nodes_that_dont_know_about_ .await; // Wait for the event to be sure that the node is connected. - let _ = tools::wait_protocol_event(&mut protocol_event_receiver, 1000.into(), |evt| { - match evt { - evt @ ProtocolEvent::ReceivedBlockHeader { .. } => Some(evt), - _ => None, + protocol_graph_event_receiver.wait_command(MassaTime::from_millis(1000), |command| { + match command { + MockGraphControllerMessage::RegisterBlockHeader { .. } => Some(()), + _ => panic!("unexpected protocol event"), } - }) - .await; + }); // Send the operation to protocol // it should propagate to the node because it isn't in the block. @@ -622,9 +624,9 @@ async fn test_protocol_propagates_operations_only_to_nodes_that_dont_know_about_ ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_graph_event_receiver, protocol_pool_event_receiver, ) }, @@ -640,9 +642,9 @@ async fn test_protocol_does_not_propagates_operations_when_receiving_those_insid protocol_test( protocol_config, async move |mut network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_graph_event_receiver, mut pool_event_receiver| { // Create 2 nodes. let mut nodes = tools::create_and_connect_nodes(2, &mut network_controller).await; @@ -686,9 +688,9 @@ async fn test_protocol_does_not_propagates_operations_when_receiving_those_insid } ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_graph_event_receiver, pool_event_receiver, ) }, @@ -703,9 +705,9 @@ async fn test_protocol_ask_operations_on_batch_received() { protocol_test( protocol_config, async move |mut network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_graph_event_receiver, protocol_pool_event_receiver| { // Create 1 node. let mut nodes = tools::create_and_connect_nodes(1, &mut network_controller).await; @@ -738,9 +740,9 @@ async fn test_protocol_ask_operations_on_batch_received() { ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_graph_event_receiver, protocol_pool_event_receiver, ) }, @@ -755,9 +757,9 @@ async fn test_protocol_re_ask_operations_to_another_node_on_batch_received_after protocol_test( protocol_config, async move |mut network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_graph_event_receiver, protocol_pool_event_receiver| { // Create 3 node. let mut nodes = tools::create_and_connect_nodes(3, &mut network_controller).await; @@ -811,9 +813,9 @@ async fn test_protocol_re_ask_operations_to_another_node_on_batch_received_after }; ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_graph_event_receiver, protocol_pool_event_receiver, ) }, @@ -828,9 +830,9 @@ async fn test_protocol_does_not_re_ask_operations_to_another_node_if_received() protocol_test( protocol_config, async move |mut network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_graph_event_receiver, protocol_pool_event_receiver| { // Create 3 node. let mut nodes = tools::create_and_connect_nodes(3, &mut network_controller).await; @@ -886,9 +888,9 @@ async fn test_protocol_does_not_re_ask_operations_to_another_node_if_received() ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_graph_event_receiver, protocol_pool_event_receiver, ) }, @@ -903,9 +905,9 @@ async fn test_protocol_on_ask_operations() { protocol_test_with_storage( protocol_config, async move |mut network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_graph_event_receiver, protocol_pool_event_receiver, mut storage| { // Create 1 node. @@ -950,9 +952,9 @@ async fn test_protocol_on_ask_operations() { ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_graph_event_receiver, protocol_pool_event_receiver, ) }, diff --git a/massa-protocol-worker/src/tests/scenarios.rs b/massa-protocol-worker/src/tests/scenarios.rs index 55d1d24e4dd..081cf3af672 100644 --- a/massa-protocol-worker/src/tests/scenarios.rs +++ b/massa-protocol-worker/src/tests/scenarios.rs @@ -3,14 +3,16 @@ // RUST_BACKTRACE=1 cargo test test_one_handshake -- --nocapture --test-threads=1 use super::tools::{protocol_test, protocol_test_with_storage}; +use massa_graph_2_exports::test_exports::MockGraphControllerMessage; use massa_models::block::BlockId; use massa_models::prehash::{PreHashMap, PreHashSet}; use massa_network_exports::{AskForBlocksInfo, NetworkCommand}; use massa_protocol_exports::tests::tools; use massa_protocol_exports::{ - tests::tools::{create_and_connect_nodes, create_block, wait_protocol_event}, - BlocksResults, ProtocolEvent, + tests::tools::{create_and_connect_nodes, create_block}, + BlocksResults, }; +use massa_time::MassaTime; use serial_test::serial; use std::collections::HashSet; @@ -22,9 +24,9 @@ async fn test_protocol_asks_for_block_from_node_who_propagated_header() { protocol_test( protocol_config, async move |mut network_controller, - mut protocol_event_receiver, mut protocol_command_sender, protocol_manager, + mut protocol_graph_event_receiver, protocol_pool_event_receiver| { let ask_for_block_cmd_filter = |cmd| match cmd { cmd @ NetworkCommand::AskForBlocks { .. } => Some(cmd), @@ -46,19 +48,15 @@ async fn test_protocol_asks_for_block_from_node_who_propagated_header() { .await; // Check protocol sends header to consensus. - let received_hash = match wait_protocol_event( - &mut protocol_event_receiver, - 1000.into(), - |evt| match evt { - evt @ ProtocolEvent::ReceivedBlockHeader { .. } => Some(evt), - _ => None, - }, - ) - .await - { - Some(ProtocolEvent::ReceivedBlockHeader { block_id, .. }) => block_id, - _ => panic!("Unexpected or no protocol event."), - }; + let received_hash = protocol_graph_event_receiver + .wait_command(MassaTime::from_millis(1000), |command| match command { + MockGraphControllerMessage::RegisterBlockHeader { + block_id, + header: _, + } => Some(block_id), + _ => panic!("unexpected protocol event"), + }) + .unwrap(); // 4. Check that protocol sent the right header to consensus. let expected_hash = block.id; @@ -100,9 +98,9 @@ async fn test_protocol_asks_for_block_from_node_who_propagated_header() { ); ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_graph_event_receiver, protocol_pool_event_receiver, ) }, @@ -117,9 +115,9 @@ async fn test_protocol_sends_blocks_when_asked_for() { protocol_test_with_storage( protocol_config, async move |mut network_controller, - protocol_event_receiver, mut protocol_command_sender, protocol_manager, + protocol_graph_event_receiver, protocol_pool_event_receiver, mut storage| { let send_block_info_cmd_filter = |cmd| match cmd { @@ -186,9 +184,9 @@ async fn test_protocol_sends_blocks_when_asked_for() { ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_graph_event_receiver, protocol_pool_event_receiver, ) }, @@ -203,9 +201,9 @@ async fn test_protocol_propagates_block_to_all_nodes_including_those_who_asked_f protocol_test_with_storage( protocol_config, async move |mut network_controller, - mut protocol_event_receiver, mut protocol_command_sender, protocol_manager, + mut protocol_graph_event_receiver, protocol_pool_event_receiver, mut storage| { // Create 4 nodes. @@ -233,19 +231,15 @@ async fn test_protocol_propagates_block_to_all_nodes_including_those_who_asked_f // node[1] asks for that block // Check protocol sends header to consensus. - let (ref_hash, _) = match wait_protocol_event( - &mut protocol_event_receiver, - 1000.into(), - |evt| match evt { - evt @ ProtocolEvent::ReceivedBlockHeader { .. } => Some(evt), - _ => None, - }, - ) - .await - { - Some(ProtocolEvent::ReceivedBlockHeader { block_id, header }) => (block_id, header), - _ => panic!("Unexpected or no protocol event."), - }; + let ref_hash = protocol_graph_event_receiver + .wait_command(MassaTime::from_millis(1000), |command| match command { + MockGraphControllerMessage::RegisterBlockHeader { + block_id, + header: _, + } => Some(block_id), + _ => panic!("unexpected protocol event"), + }) + .unwrap(); storage.store_block(ref_block.clone()); @@ -299,9 +293,9 @@ async fn test_protocol_propagates_block_to_all_nodes_including_those_who_asked_f } ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_graph_event_receiver, protocol_pool_event_receiver, ) }, @@ -317,9 +311,9 @@ async fn test_protocol_propagates_block_to_node_who_asked_for_operations_and_onl protocol_test_with_storage( protocol_config, async move |mut network_controller, - mut protocol_event_receiver, mut protocol_command_sender, protocol_manager, + mut protocol_graph_event_receiver, protocol_pool_event_receiver, mut storage| { // Create 4 nodes. @@ -347,19 +341,15 @@ async fn test_protocol_propagates_block_to_node_who_asked_for_operations_and_onl // node[1] asks for that block // Check protocol sends header to consensus. - let (ref_hash, _) = match wait_protocol_event( - &mut protocol_event_receiver, - 1000.into(), - |evt| match evt { - evt @ ProtocolEvent::ReceivedBlockHeader { .. } => Some(evt), - _ => None, - }, - ) - .await - { - Some(ProtocolEvent::ReceivedBlockHeader { block_id, header }) => (block_id, header), - _ => panic!("Unexpected or no protocol event."), - }; + let ref_hash = protocol_graph_event_receiver + .wait_command(MassaTime::from_millis(1000), |command| match command { + MockGraphControllerMessage::RegisterBlockHeader { + block_id, + header: _, + } => Some(block_id), + _ => panic!("unexpected protocol event"), + }) + .unwrap(); storage.store_block(ref_block.clone()); // 5. Propagate header. @@ -438,9 +428,9 @@ async fn test_protocol_propagates_block_to_node_who_asked_for_operations_and_onl } ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_graph_event_receiver, protocol_pool_event_receiver, ) }, @@ -456,9 +446,9 @@ async fn test_protocol_sends_full_blocks_it_receives_to_consensus() { protocol_test( protocol_config, async move |mut network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_graph_event_receiver, protocol_pool_event_receiver| { // Create 1 node. let mut nodes = create_and_connect_nodes(1, &mut network_controller).await; @@ -474,9 +464,9 @@ async fn test_protocol_sends_full_blocks_it_receives_to_consensus() { ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_graph_event_receiver, protocol_pool_event_receiver, ) }, @@ -491,9 +481,9 @@ async fn test_protocol_block_not_found() { protocol_test( protocol_config, async move |mut network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_graph_event_receiver, protocol_pool_event_receiver| { // Create 1 node. let mut nodes = create_and_connect_nodes(1, &mut network_controller).await; @@ -529,9 +519,9 @@ async fn test_protocol_block_not_found() { ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_graph_event_receiver, protocol_pool_event_receiver, ) }, diff --git a/massa-protocol-worker/src/tests/tools.rs b/massa-protocol-worker/src/tests/tools.rs index d6efa0b278e..6c8c137f669 100644 --- a/massa-protocol-worker/src/tests/tools.rs +++ b/massa-protocol-worker/src/tests/tools.rs @@ -1,27 +1,39 @@ use crate::start_protocol_controller; use futures::Future; +use massa_graph_2_exports::test_exports::{ + GraphEventReceiver, MockGraphController, MockGraphControllerMessage, +}; +use massa_models::{ + block::{BlockId, WrappedBlock}, + node::NodeId, + operation::WrappedOperation, + prehash::PreHashSet, +}; +use massa_network_exports::BlockInfoReply; use massa_pool_exports::test_exports::{MockPoolController, PoolEventReceiver}; use massa_protocol_exports::{ tests::mock_network_controller::MockNetworkController, ProtocolCommandSender, ProtocolConfig, - ProtocolEventReceiver, ProtocolManager, + ProtocolManager, }; use massa_storage::Storage; +use massa_time::MassaTime; +use tokio::sync::mpsc; pub async fn protocol_test(protocol_config: &ProtocolConfig, test: F) where F: FnOnce( MockNetworkController, - ProtocolEventReceiver, ProtocolCommandSender, ProtocolManager, + GraphEventReceiver, PoolEventReceiver, ) -> V, V: Future< Output = ( MockNetworkController, - ProtocolEventReceiver, ProtocolCommandSender, ProtocolManager, + GraphEventReceiver, PoolEventReceiver, ), >, @@ -30,32 +42,35 @@ where MockNetworkController::new(); let (pool_controller, pool_event_receiver) = MockPoolController::new_with_receiver(); - + let (graph_controller, graph_event_receiver) = MockGraphController::new_with_receiver(); // start protocol controller - let (protocol_command_sender, protocol_manager): ( - ProtocolCommandSender, - ProtocolManager, - ) = start_protocol_controller( + let (protocol_command_sender, protocol_command_receiver) = + mpsc::channel(protocol_config.controller_channel_size); + // start protocol controller + let protocol_manager: ProtocolManager = start_protocol_controller( *protocol_config, network_command_sender, network_event_receiver, + protocol_command_receiver, + graph_controller, pool_controller, Storage::create_root(), ) .await .expect("could not start protocol controller"); + let protocol_command_sender = ProtocolCommandSender(protocol_command_sender); let ( _network_controller, - protocol_event_receiver, _protocol_command_sender, protocol_manager, + _graph_event_receiver, _pool_event_receiver, ) = test( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + graph_event_receiver, pool_event_receiver, ) .await; @@ -70,18 +85,18 @@ pub async fn protocol_test_with_storage(protocol_config: &ProtocolConfig, where F: FnOnce( MockNetworkController, - ProtocolEventReceiver, ProtocolCommandSender, ProtocolManager, + GraphEventReceiver, PoolEventReceiver, Storage, ) -> V, V: Future< Output = ( MockNetworkController, - ProtocolEventReceiver, ProtocolCommandSender, ProtocolManager, + GraphEventReceiver, PoolEventReceiver, ), >, @@ -89,37 +104,101 @@ where let (network_controller, network_command_sender, network_event_receiver) = MockNetworkController::new(); let (pool_controller, mock_pool_receiver) = MockPoolController::new_with_receiver(); + let (graph_controller, mock_graph_receiver) = MockGraphController::new_with_receiver(); let storage = Storage::create_root(); // start protocol controller - let (protocol_command_sender, protocol_event_receiver, protocol_manager) = - start_protocol_controller( - *protocol_config, - network_command_sender, - network_event_receiver, - pool_controller, - storage.clone(), - ) - .await - .expect("could not start protocol controller"); + let (protocol_command_sender, protocol_command_receiver) = + mpsc::channel(protocol_config.controller_channel_size); + let protocol_manager = start_protocol_controller( + *protocol_config, + network_command_sender, + network_event_receiver, + protocol_command_receiver, + graph_controller, + pool_controller, + storage.clone(), + ) + .await + .expect("could not start protocol controller"); + let protocol_command_sender = ProtocolCommandSender(protocol_command_sender); let ( _network_controller, - protocol_event_receiver, _protocol_command_sender, protocol_manager, + _graph_event_receiver, _protocol_pool_event_receiver, ) = test( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + mock_graph_receiver, mock_pool_receiver, storage, ) .await; protocol_manager - .stop(protocol_event_receiver) + .stop() .await .expect("Failed to shutdown protocol."); } + +/// send a block and assert it has been propagate (or not) +pub async fn send_and_propagate_block( + network_controller: &mut MockNetworkController, + block: WrappedBlock, + valid: bool, + source_node_id: NodeId, + protocol_graph_event_receiver: &mut GraphEventReceiver, + protocol_command_sender: &mut ProtocolCommandSender, + operations: Vec, +) { + let expected_hash = block.id; + + network_controller + .send_header(source_node_id, block.content.header.clone()) + .await; + + protocol_command_sender + .send_wishlist_delta( + vec![(block.id, Some(block.content.header.clone()))] + .into_iter() + .collect(), + PreHashSet::::default(), + ) + .unwrap(); + + // Send block info to protocol. + let info = vec![( + block.id, + BlockInfoReply::Info(block.content.operations.clone()), + )]; + network_controller + .send_block_info(source_node_id, info) + .await; + + // Send full ops. + let info = vec![(block.id, BlockInfoReply::Operations(operations))]; + network_controller + .send_block_info(source_node_id, info) + .await; + + // Check protocol sends block to consensus. + let hash = + protocol_graph_event_receiver.wait_command(MassaTime::from_millis(1000), |command| { + match command { + MockGraphControllerMessage::RegisterBlock { + block_id, + slot: _, + block_storage: _, + } => Some(block_id), + _ => panic!("Unexpected or no protocol event."), + } + }); + if valid { + assert_eq!(expected_hash, hash.unwrap()); + } else { + assert!(hash.is_none(), "unexpected protocol event") + } +} From f412e4cc051e2641c966650c7648d086c24d888b Mon Sep 17 00:00:00 2001 From: AurelienFT Date: Fri, 21 Oct 2022 13:50:12 +0200 Subject: [PATCH 27/40] Fix warnings and format. --- massa-api/src/config.rs | 2 +- massa-api/src/public.rs | 8 ++++---- massa-bootstrap/src/server.rs | 1 + massa-bootstrap/src/tests/scenarios.rs | 2 +- massa-graph-2-exports/src/controller_trait.rs | 2 +- massa-graph-2-exports/src/test_exports/mock.rs | 4 ++-- massa-graph-2-worker/src/controller.rs | 2 +- massa-graph-2-worker/src/worker/init.rs | 4 ++-- massa-graph-2-worker/src/worker/mod.rs | 2 +- massa-protocol-exports/src/tests/tools.rs | 4 +--- 10 files changed, 15 insertions(+), 16 deletions(-) diff --git a/massa-api/src/config.rs b/massa-api/src/config.rs index 830db341187..89e03e5945d 100644 --- a/massa-api/src/config.rs +++ b/massa-api/src/config.rs @@ -30,7 +30,7 @@ pub struct APIConfig { pub max_parameter_size: u32, /// thread count pub thread_count: u8, - /// genesis_timestamp + /// `genesis_timestamp` pub genesis_timestamp: MassaTime, /// t0 pub t0: MassaTime, diff --git a/massa-api/src/public.rs b/massa-api/src/public.rs index aa80e8535b8..38f9e1218a5 100644 --- a/massa-api/src/public.rs +++ b/massa-api/src/public.rs @@ -299,7 +299,7 @@ impl Endpoints for API { let pool_command_sender = self.0.pool_command_sender.clone(); let node_id = self.0.node_id; let config = CompactConfig::default(); - let api_config = self.0.api_settings.clone(); + let api_config = self.0.api_settings; let closure = async move || { let now = MassaTime::now(compensation_millis)?; let last_slot = get_latest_block_slot_at_timestamp( @@ -361,7 +361,7 @@ impl Endpoints for API { fn get_stakers(&self) -> BoxFuture, ApiError>> { let execution_controller = self.0.execution_controller.clone(); - let api_config = self.0.api_settings.clone(); + let api_config = self.0.api_settings; let compensation_millis = self.0.compensation_millis; let closure = async move || { @@ -561,7 +561,7 @@ impl Endpoints for API { }; let graph_status = graph_controller - .get_block_statuses(&vec![id]) + .get_block_statuses(&[id]) .into_iter() .next() .expect("expected get_block_statuses to return one element"); @@ -613,7 +613,7 @@ impl Endpoints for API { time: TimeInterval, ) -> BoxFuture, ApiError>> { let graph_controller = self.0.graph_controller.clone(); - let api_config = self.0.api_settings.clone(); + let api_config = self.0.api_settings; let closure = async move || { // filter blocks from graph_export let (start_slot, end_slot) = time_range_to_slot_range( diff --git a/massa-bootstrap/src/server.rs b/massa-bootstrap/src/server.rs index 59b2df44c6d..31f9182f59f 100644 --- a/massa-bootstrap/src/server.rs +++ b/massa-bootstrap/src/server.rs @@ -409,6 +409,7 @@ pub async fn send_final_state_stream( Ok(()) } +#[allow(clippy::manual_async_fn)] #[allow(clippy::too_many_arguments)] #[fix_hidden_lifetime_bug] async fn manage_bootstrap( diff --git a/massa-bootstrap/src/tests/scenarios.rs b/massa-bootstrap/src/tests/scenarios.rs index d79c8893339..529bc8e7265 100644 --- a/massa-bootstrap/src/tests/scenarios.rs +++ b/massa-bootstrap/src/tests/scenarios.rs @@ -168,7 +168,7 @@ async fn test_bootstrap_server() { // wait for bootstrap to ask consensus for bootstrap graph, send it let wait_graph = async move || { let response = - graph_event_receiver.wait_command(MassaTime::from_millis(1000), |cmd| match cmd { + graph_event_receiver.wait_command(MassaTime::from_millis(2000), |cmd| match cmd { MockGraphControllerMessage::GetBootstrapableGraph { response_tx } => { let sent_graph = get_boot_state(); response_tx.send(Ok(sent_graph.clone())).unwrap(); diff --git a/massa-graph-2-exports/src/controller_trait.rs b/massa-graph-2-exports/src/controller_trait.rs index 40f0c856ec3..4affec7f21a 100644 --- a/massa-graph-2-exports/src/controller_trait.rs +++ b/massa-graph-2-exports/src/controller_trait.rs @@ -18,7 +18,7 @@ pub trait GraphController: Send + Sync { end_slot: Option, ) -> Result; - fn get_block_statuses(&self, ids: &Vec) -> Vec; + fn get_block_statuses(&self, ids: &[BlockId]) -> Vec; fn get_cliques(&self) -> Vec; diff --git a/massa-graph-2-exports/src/test_exports/mock.rs b/massa-graph-2-exports/src/test_exports/mock.rs index 27263335f00..cc000bd3ba2 100644 --- a/massa-graph-2-exports/src/test_exports/mock.rs +++ b/massa-graph-2-exports/src/test_exports/mock.rs @@ -129,13 +129,13 @@ impl GraphController for MockGraphController { response_rx.recv().unwrap() } - fn get_block_statuses(&self, ids: &Vec) -> Vec { + fn get_block_statuses(&self, ids: &[BlockId]) -> Vec { let (response_tx, response_rx) = mpsc::channel(); self.0 .lock() .unwrap() .send(MockGraphControllerMessage::GetBlockStatuses { - block_ids: ids.clone(), + block_ids: ids.to_vec(), response_tx, }) .unwrap(); diff --git a/massa-graph-2-worker/src/controller.rs b/massa-graph-2-worker/src/controller.rs index 3f35b7e65b8..181c807c996 100644 --- a/massa-graph-2-worker/src/controller.rs +++ b/massa-graph-2-worker/src/controller.rs @@ -69,7 +69,7 @@ impl GraphController for GraphControllerImpl { /// /// # Returns: /// A vector of statuses sorted by the order of the block ids - fn get_block_statuses(&self, ids: &Vec) -> Vec { + fn get_block_statuses(&self, ids: &[BlockId]) -> Vec { let read_shared_state = self.shared_state.read(); ids.iter() .map(|id| read_shared_state.get_block_status(id)) diff --git a/massa-graph-2-worker/src/worker/init.rs b/massa-graph-2-worker/src/worker/init.rs index 6ae10f6757d..915c6511a30 100644 --- a/massa-graph-2-worker/src/worker/init.rs +++ b/massa-graph-2-worker/src/worker/init.rs @@ -67,11 +67,11 @@ impl GraphWorker { /// * `command_receiver`: channel to receive commands from controller /// * `channels`: channels to communicate with other workers /// * `shared_state`: shared state with the controller - /// * `init_graph`: Optional graph of blocks to init the worker + /// * `init_graph`: Optional graph of blocks to initiate the worker /// * `storage`: shared storage /// /// # Returns: - /// A GraphWorker, to interact with it use the `GraphController` + /// A `GraphWorker`, to interact with it use the `GraphController` pub fn new( config: GraphConfig, command_receiver: mpsc::Receiver, diff --git a/massa-graph-2-worker/src/worker/mod.rs b/massa-graph-2-worker/src/worker/mod.rs index daa1da3f9a6..5324c1df32f 100644 --- a/massa-graph-2-worker/src/worker/mod.rs +++ b/massa-graph-2-worker/src/worker/mod.rs @@ -18,7 +18,7 @@ use crate::controller::GraphControllerImpl; use crate::manager::GraphManagerImpl; use crate::state::GraphState; -/// The graph worker structure that contains all informations and tools for the graph worker thread. +/// The graph worker structure that contains all information and tools for the graph worker thread. pub struct GraphWorker { /// Channel to receive command from the controller command_receiver: mpsc::Receiver, diff --git a/massa-protocol-exports/src/tests/tools.rs b/massa-protocol-exports/src/tests/tools.rs index a5173bc81b5..3544c84c7fa 100644 --- a/massa-protocol-exports/src/tests/tools.rs +++ b/massa-protocol-exports/src/tests/tools.rs @@ -1,12 +1,10 @@ // Copyright (c) 2022 MASSA LABS use super::mock_network_controller::MockNetworkController; -use crate::protocol_controller::ProtocolCommandSender; use crate::ProtocolConfig; use massa_hash::Hash; use massa_models::node::NodeId; use massa_models::operation::OperationSerializer; -use massa_models::prehash::PreHashSet; use massa_models::wrapped::WrappedContent; use massa_models::{ address::Address, @@ -16,7 +14,7 @@ use massa_models::{ operation::{Operation, OperationType, WrappedOperation}, slot::Slot, }; -use massa_network_exports::{AskForBlocksInfo, BlockInfoReply, NetworkCommand}; +use massa_network_exports::{AskForBlocksInfo, NetworkCommand}; use massa_signature::KeyPair; use massa_time::MassaTime; use std::collections::HashMap; From 2d0bbd883a2401de7605278b4f98e1f31686b531 Mon Sep 17 00:00:00 2001 From: AurelienFT Date: Mon, 24 Oct 2022 16:24:40 +0200 Subject: [PATCH 28/40] Fix call to blocking context in async contexts in tests. --- massa-bootstrap/Cargo.toml | 1 + massa-bootstrap/src/tests/scenarios.rs | 35 ++-- .../src/test_exports/mock.rs | 1 + .../src/protocol_controller.rs | 10 +- massa-protocol-worker/Cargo.toml | 4 + massa-protocol-worker/src/protocol_network.rs | 1 + .../src/tests/ask_block_scenarios.rs | 133 ++++++++------ .../src/tests/ban_nodes_scenarios.rs | 168 ++++++++++-------- .../src/tests/endorsements_scenarios.rs | 18 +- .../tests/in_block_operations_scenarios.rs | 117 +++++++++++- .../src/tests/operations_scenarios.rs | 36 ++-- massa-protocol-worker/src/tests/scenarios.rs | 60 ++++--- massa-protocol-worker/src/tests/tools.rs | 27 +-- 13 files changed, 389 insertions(+), 222 deletions(-) diff --git a/massa-bootstrap/Cargo.toml b/massa-bootstrap/Cargo.toml index ce25981cf08..ff5a97b9461 100644 --- a/massa-bootstrap/Cargo.toml +++ b/massa-bootstrap/Cargo.toml @@ -56,6 +56,7 @@ massa_pos_exports = { path = "../massa-pos-exports", features = ["testing"] } testing = [ "massa_final_state/testing", "massa_ledger_worker/testing", + "massa_graph_2_exports/testing", "massa_async_pool/testing", ] sandbox = [ diff --git a/massa-bootstrap/src/tests/scenarios.rs b/massa-bootstrap/src/tests/scenarios.rs index 529bc8e7265..89ad8edee9b 100644 --- a/massa-bootstrap/src/tests/scenarios.rs +++ b/massa-bootstrap/src/tests/scenarios.rs @@ -165,23 +165,6 @@ async fn test_bootstrap_server() { sent_peers }; - // wait for bootstrap to ask consensus for bootstrap graph, send it - let wait_graph = async move || { - let response = - graph_event_receiver.wait_command(MassaTime::from_millis(2000), |cmd| match cmd { - MockGraphControllerMessage::GetBootstrapableGraph { response_tx } => { - let sent_graph = get_boot_state(); - response_tx.send(Ok(sent_graph.clone())).unwrap(); - Some(sent_graph) - } - _ => panic!("timeout waiting for get boot graph consensus command"), - }); - match response { - Some(graph) => graph, - None => panic!("error waiting for get boot graph consensus command"), - } - }; - // launch the modifier thread let list_changes: Arc>> = Arc::new(RwLock::new(Vec::new())); let list_changes_clone = list_changes.clone(); @@ -207,7 +190,23 @@ async fn test_bootstrap_server() { let sent_peers = wait_peers().await; // wait for peers and graph - let sent_graph = wait_graph().await; + let sent_graph = tokio::task::spawn_blocking(move || { + let response = + graph_event_receiver.wait_command(MassaTime::from_millis(10000), |cmd| match cmd { + MockGraphControllerMessage::GetBootstrapableGraph { response_tx } => { + let sent_graph = get_boot_state(); + response_tx.send(Ok(sent_graph.clone())).unwrap(); + Some(sent_graph) + } + _ => panic!("bad command for get boot graph consensus command"), + }); + match response { + Some(graph) => graph, + None => panic!("error waiting for get boot graph consensus command"), + } + }) + .await + .unwrap(); // wait for get_state let bootstrap_res = get_state_h diff --git a/massa-graph-2-exports/src/test_exports/mock.rs b/massa-graph-2-exports/src/test_exports/mock.rs index cc000bd3ba2..9c9b575a339 100644 --- a/massa-graph-2-exports/src/test_exports/mock.rs +++ b/massa-graph-2-exports/src/test_exports/mock.rs @@ -28,6 +28,7 @@ pub struct GraphEventReceiver(pub Receiver); /// Each variant corresponds to a unique method in `GraphController`, /// Some variants wait for a response on their `response_tx` field, if present. /// See the documentation of `GraphController` for details on parameters and return values. +#[derive(Clone, Debug)] pub enum MockGraphControllerMessage { GetBlockStatuses { block_ids: Vec, diff --git a/massa-protocol-exports/src/protocol_controller.rs b/massa-protocol-exports/src/protocol_controller.rs index 4f615dc194c..3fec37b70ac 100644 --- a/massa-protocol-exports/src/protocol_controller.rs +++ b/massa-protocol-exports/src/protocol_controller.rs @@ -75,7 +75,7 @@ impl ProtocolCommandSender { "block_id": block_id }); self.0 - .blocking_send(ProtocolCommand::IntegratedBlock { block_id, storage }) + .try_send(ProtocolCommand::IntegratedBlock { block_id, storage }) .map_err(|_| ProtocolError::ChannelError("block_integrated command send error".into())) } @@ -85,7 +85,7 @@ impl ProtocolCommandSender { "block_id": block_id }); self.0 - .blocking_send(ProtocolCommand::AttackBlockDetected(block_id)) + .try_send(ProtocolCommand::AttackBlockDetected(block_id)) .map_err(|_| { ProtocolError::ChannelError("notify_block_attack command send error".into()) }) @@ -99,7 +99,7 @@ impl ProtocolCommandSender { ) -> Result<(), ProtocolError> { massa_trace!("protocol.command_sender.send_wishlist_delta", { "new": new, "remove": remove }); self.0 - .blocking_send(ProtocolCommand::WishlistDelta { new, remove }) + .try_send(ProtocolCommand::WishlistDelta { new, remove }) .map_err(|_| { ProtocolError::ChannelError("send_wishlist_delta command send error".into()) }) @@ -113,7 +113,7 @@ impl ProtocolCommandSender { "operations": operations.get_op_refs() }); self.0 - .blocking_send(ProtocolCommand::PropagateOperations(operations)) + .try_send(ProtocolCommand::PropagateOperations(operations)) .map_err(|_| { ProtocolError::ChannelError("propagate_operation command send error".into()) }) @@ -125,7 +125,7 @@ impl ProtocolCommandSender { "endorsements": endorsements.get_endorsement_refs() }); self.0 - .blocking_send(ProtocolCommand::PropagateEndorsements(endorsements)) + .try_send(ProtocolCommand::PropagateEndorsements(endorsements)) .map_err(|_| { ProtocolError::ChannelError("propagate_endorsements command send error".into()) }) diff --git a/massa-protocol-worker/Cargo.toml b/massa-protocol-worker/Cargo.toml index f469a1f5313..a554c52d978 100644 --- a/massa-protocol-worker/Cargo.toml +++ b/massa-protocol-worker/Cargo.toml @@ -31,3 +31,7 @@ futures = "0.3" massa_signature = { path = "../massa-signature" } massa_pool_exports = { path = "../massa-pool-exports", features = ["testing"] } + +[features] + +testing = ["massa_graph_2_exports/testing", "massa_network_exports/testing", "massa_pool_exports/testing", "massa_protocol_exports/testing"] \ No newline at end of file diff --git a/massa-protocol-worker/src/protocol_network.rs b/massa-protocol-worker/src/protocol_network.rs index 581a38a1a45..6bc0daaafd1 100644 --- a/massa-protocol-worker/src/protocol_network.rs +++ b/massa-protocol-worker/src/protocol_network.rs @@ -564,6 +564,7 @@ impl ProtocolWorker { // Send operations to pool, // before performing the below checks, // and wait for them to have been procesed(i.e. added to storage). + println!("AURELIEN: Full ops received"); self.on_block_full_operations_received(from_node_id, block_id, operations, op_timer) .await } diff --git a/massa-protocol-worker/src/tests/ask_block_scenarios.rs b/massa-protocol-worker/src/tests/ask_block_scenarios.rs index 1dc8bd90235..5a831ee40e4 100644 --- a/massa-protocol-worker/src/tests/ask_block_scenarios.rs +++ b/massa-protocol-worker/src/tests/ask_block_scenarios.rs @@ -105,34 +105,43 @@ async fn test_full_ask_block_workflow() { ) .await; - // Protocol sends expected block to consensus. - loop { - match protocol_graph_event_receiver.wait_command( - MassaTime::from_millis(100), - |command| match command { - MockGraphControllerMessage::RegisterBlock { - slot, - block_id, - block_storage, - } => { - assert_eq!(slot, block.content.header.content.slot); - assert_eq!(block_id, block.id); - let received_block = - block_storage.read_blocks().get(&block_id).cloned().unwrap(); - assert_eq!(received_block.content.operations, block.content.operations); - Some(()) + let protocol_graph_event_receiver = tokio::task::spawn_blocking(move || { + // Protocol sends expected block to consensus. + loop { + match protocol_graph_event_receiver.wait_command( + MassaTime::from_millis(100), + |command| match command { + MockGraphControllerMessage::RegisterBlock { + slot, + block_id, + block_storage, + } => { + assert_eq!(slot, block.content.header.content.slot); + assert_eq!(block_id, block.id); + let received_block = + block_storage.read_blocks().get(&block_id).cloned().unwrap(); + assert_eq!( + received_block.content.operations, + block.content.operations + ); + Some(()) + } + _evt => None, + }, + ) { + Some(()) => { + break; + } + None => { + continue; } - _evt => None, - }, - ) { - Some(()) => { - break; - } - None => { - continue; } } - } + return protocol_graph_event_receiver; + }) + .await + .unwrap(); + ( network_controller, protocol_command_sender, @@ -219,33 +228,41 @@ async fn test_empty_block() { ); // Protocol sends expected block to consensus. - loop { - match protocol_graph_event_receiver.wait_command( - MassaTime::from_millis(100), - |command| match command { - MockGraphControllerMessage::RegisterBlock { - slot, - block_id, - block_storage, - } => { - assert_eq!(slot, block.content.header.content.slot); - assert_eq!(block_id, block.id); - let received_block = - block_storage.read_blocks().get(&block_id).cloned().unwrap(); - assert_eq!(received_block.content.operations, block.content.operations); - Some(()) + let protocol_graph_event_receiver = tokio::task::spawn_blocking(move || { + loop { + match protocol_graph_event_receiver.wait_command( + MassaTime::from_millis(100), + |command| match command { + MockGraphControllerMessage::RegisterBlock { + slot, + block_id, + block_storage, + } => { + assert_eq!(slot, block.content.header.content.slot); + assert_eq!(block_id, block.id); + let received_block = + block_storage.read_blocks().get(&block_id).cloned().unwrap(); + assert_eq!( + received_block.content.operations, + block.content.operations + ); + Some(()) + } + _evt => None, + }, + ) { + Some(()) => { + break; + } + None => { + continue; } - _evt => None, - }, - ) { - Some(()) => { - break; - } - None => { - continue; } } - } + protocol_graph_event_receiver + }) + .await + .unwrap(); ( network_controller, protocol_command_sender, @@ -299,12 +316,18 @@ async fn test_someone_knows_it() { .send_header(node_c.id, block.content.header.clone()) .await; - protocol_graph_event_receiver.wait_command(MassaTime::from_millis(100), |command| { - match command { - MockGraphControllerMessage::RegisterBlockHeader { .. } => Some(()), - _ => panic!("unexpected protocol event"), - } - }); + let protocol_graph_event_receiver = tokio::task::spawn_blocking(move || { + protocol_graph_event_receiver.wait_command( + MassaTime::from_millis(100), + |command| match command { + MockGraphControllerMessage::RegisterBlockHeader { .. } => Some(()), + _ => panic!("unexpected protocol event"), + }, + ); + protocol_graph_event_receiver + }) + .await + .unwrap(); // send wishlist protocol_command_sender diff --git a/massa-protocol-worker/src/tests/ban_nodes_scenarios.rs b/massa-protocol-worker/src/tests/ban_nodes_scenarios.rs index 71edc2dff1f..af9478977ec 100644 --- a/massa-protocol-worker/src/tests/ban_nodes_scenarios.rs +++ b/massa-protocol-worker/src/tests/ban_nodes_scenarios.rs @@ -47,20 +47,26 @@ async fn test_protocol_bans_node_sending_block_header_with_invalid_signature() { tools::assert_banned_nodes(vec![creator_node.id], &mut network_controller).await; // Check protocol does not send block to consensus. - protocol_graph_event_receiver.wait_command(MassaTime::from_millis(1000), |command| { - match command { - MockGraphControllerMessage::RegisterBlock { .. } => { - panic!("Protocol unexpectedly sent block.") - } - MockGraphControllerMessage::RegisterBlockHeader { .. } => { - panic!("Protocol unexpectedly sent header.") - } - MockGraphControllerMessage::MarkInvalidBlock { .. } => { - panic!("Protocol unexpectedly sent invalid block.") - } - _ => Some(()), - } - }); + let protocol_graph_event_receiver = tokio::task::spawn_blocking(move || { + protocol_graph_event_receiver.wait_command( + MassaTime::from_millis(1000), + |command| match command { + MockGraphControllerMessage::RegisterBlock { .. } => { + panic!("Protocol unexpectedly sent block.") + } + MockGraphControllerMessage::RegisterBlockHeader { .. } => { + panic!("Protocol unexpectedly sent header.") + } + MockGraphControllerMessage::MarkInvalidBlock { .. } => { + panic!("Protocol unexpectedly sent invalid block.") + } + _ => Some(()), + }, + ); + protocol_graph_event_receiver + }) + .await + .unwrap(); ( network_controller, protocol_command_sender, @@ -153,12 +159,18 @@ async fn test_protocol_bans_node_sending_header_with_invalid_signature() { .send_header(to_ban_node.id, block.content.header.clone()) .await; - protocol_graph_event_receiver.wait_command(MassaTime::from_millis(1000), |command| { - match command { - MockGraphControllerMessage::RegisterBlockHeader { .. } => Some(()), - _ => panic!("unexpected protocol event"), - } - }); + let mut protocol_graph_event_receiver = tokio::task::spawn_blocking(move || { + protocol_graph_event_receiver.wait_command( + MassaTime::from_millis(1000), + |command| match command { + MockGraphControllerMessage::RegisterBlockHeader { .. } => Some(()), + _ => panic!("unexpected protocol event"), + }, + ); + protocol_graph_event_receiver + }) + .await + .unwrap(); // send wishlist protocol_command_sender @@ -202,20 +214,26 @@ async fn test_protocol_bans_node_sending_header_with_invalid_signature() { .await; // Check protocol does not send block to consensus. - protocol_graph_event_receiver.wait_command(MassaTime::from_millis(1000), |command| { - match command { - MockGraphControllerMessage::RegisterBlock { .. } => { - panic!("Protocol unexpectedly sent block.") - } - MockGraphControllerMessage::RegisterBlockHeader { .. } => { - panic!("Protocol unexpectedly sent header.") - } - MockGraphControllerMessage::MarkInvalidBlock { .. } => { - panic!("Protocol unexpectedly sent invalid block.") - } - _ => Some(()), - } - }); + let protocol_graph_event_receiver = tokio::task::spawn_blocking(move || { + protocol_graph_event_receiver.wait_command( + MassaTime::from_millis(1000), + |command| match command { + MockGraphControllerMessage::RegisterBlock { .. } => { + panic!("Protocol unexpectedly sent block.") + } + MockGraphControllerMessage::RegisterBlockHeader { .. } => { + panic!("Protocol unexpectedly sent header.") + } + MockGraphControllerMessage::MarkInvalidBlock { .. } => { + panic!("Protocol unexpectedly sent invalid block.") + } + _ => Some(()), + }, + ); + protocol_graph_event_receiver + }) + .await + .unwrap(); ( network_controller, protocol_command_sender, @@ -257,14 +275,20 @@ async fn test_protocol_does_not_asks_for_block_from_banned_node_who_propagated_h .await; // Check protocol sends header to consensus. - let received_hash = protocol_graph_event_receiver - .wait_command(MassaTime::from_millis(1000), |command| match command { - MockGraphControllerMessage::RegisterBlockHeader { - block_id, - header: _, - } => Some(block_id), - _ => panic!("unexpected protocol event"), + let (protocol_graph_event_receiver, received_hash) = + tokio::task::spawn_blocking(move || { + let id = protocol_graph_event_receiver + .wait_command(MassaTime::from_millis(1000), |command| match command { + MockGraphControllerMessage::RegisterBlockHeader { + block_id, + header: _, + } => Some(block_id), + _ => panic!("unexpected protocol event"), + }) + .unwrap(); + (protocol_graph_event_receiver, id) }) + .await .unwrap(); // 3. Check that protocol sent the right header to consensus. @@ -427,34 +451,30 @@ async fn test_protocol_bans_all_nodes_propagating_an_attack_attempt() { .send_header(creator_node.id, block.content.header.clone()) .await; + let (old_protocol_graph_event_receiver, optional_block_id) = + tokio::task::spawn_blocking(move || { + let id = protocol_graph_event_receiver.wait_command( + MassaTime::from_millis(1000), + |command| match command { + MockGraphControllerMessage::RegisterBlockHeader { + block_id, + header: _, + } => Some(block_id), + _ => panic!("unexpected protocol event"), + }, + ); + (protocol_graph_event_receiver, id) + }) + .await + .unwrap(); + protocol_graph_event_receiver = old_protocol_graph_event_receiver; // Check protocol sends header to consensus (only the 1st time: later, there is caching). if idx == 0 { - let received_hash = protocol_graph_event_receiver - .wait_command(MassaTime::from_millis(1000), |command| match command { - MockGraphControllerMessage::RegisterBlockHeader { - block_id, - header: _, - } => Some(block_id), - _ => panic!("unexpected protocol event"), - }) - .unwrap(); + let received_hash = optional_block_id.unwrap(); // Check that protocol sent the right header to consensus. assert_eq!(expected_hash, received_hash); } else { - assert!( - protocol_graph_event_receiver - .wait_command(MassaTime::from_millis(1000), |command| { - match command { - MockGraphControllerMessage::RegisterBlockHeader { - block_id, - header: _, - } => Some(block_id), - _ => None, - } - }) - .is_none(), - "caching was ignored" - ); + assert!(optional_block_id.is_none(), "caching was ignored"); } } @@ -539,14 +559,20 @@ async fn test_protocol_removes_banned_node_on_disconnection() { .await; // Check protocol sends header to consensus. - let received_hash = protocol_graph_event_receiver - .wait_command(MassaTime::from_millis(1000), |command| match command { - MockGraphControllerMessage::RegisterBlockHeader { - block_id, - header: _, - } => Some(block_id), - _ => panic!("unexpected protocol event"), + let (protocol_graph_event_receiver, received_hash) = + tokio::task::spawn_blocking(move || { + let id = protocol_graph_event_receiver + .wait_command(MassaTime::from_millis(1000), |command| match command { + MockGraphControllerMessage::RegisterBlockHeader { + block_id, + header: _, + } => Some(block_id), + _ => panic!("unexpected protocol event"), + }) + .unwrap(); + (protocol_graph_event_receiver, id) }) + .await .unwrap(); // Check that protocol sent the right header to consensus. diff --git a/massa-protocol-worker/src/tests/endorsements_scenarios.rs b/massa-protocol-worker/src/tests/endorsements_scenarios.rs index 6386f703b62..e1ff633606d 100644 --- a/massa-protocol-worker/src/tests/endorsements_scenarios.rs +++ b/massa-protocol-worker/src/tests/endorsements_scenarios.rs @@ -441,12 +441,18 @@ async fn test_protocol_propagates_endorsements_only_to_nodes_that_dont_know_abou // Wait for the event to be sure that the node is connected, // and noted as knowing the block and its endorsements. - protocol_graph_event_receiver.wait_command(MassaTime::from_millis(1000), |command| { - match command { - MockGraphControllerMessage::RegisterBlockHeader { .. } => Some(()), - _ => panic!("Node isn't connected or didn't mark block as known."), - } - }); + let protocol_graph_event_receiver = tokio::task::spawn_blocking(move || { + protocol_graph_event_receiver.wait_command( + MassaTime::from_millis(1000), + |command| match command { + MockGraphControllerMessage::RegisterBlockHeader { .. } => Some(()), + _ => panic!("Node isn't connected or didn't mark block as known."), + }, + ); + protocol_graph_event_receiver + }) + .await + .unwrap(); // Send the endorsement to protocol // it should not propagate to the node that already knows about it diff --git a/massa-protocol-worker/src/tests/in_block_operations_scenarios.rs b/massa-protocol-worker/src/tests/in_block_operations_scenarios.rs index 05aaa493064..2b692266ea9 100644 --- a/massa-protocol-worker/src/tests/in_block_operations_scenarios.rs +++ b/massa-protocol-worker/src/tests/in_block_operations_scenarios.rs @@ -1,6 +1,7 @@ // Copyright (c) 2022 MASSA LABS use super::tools::{protocol_test, send_and_propagate_block}; +use massa_graph_2_exports::test_exports::MockGraphControllerMessage; use massa_hash::Hash; use massa_models::operation::OperationId; use massa_models::wrapped::{Id, WrappedContent}; @@ -15,6 +16,7 @@ use massa_protocol_exports::tests::tools::{ create_and_connect_nodes, create_block_with_operations, create_operation_with_expire_period, }; use massa_signature::KeyPair; +use massa_time::MassaTime; use serial_test::serial; #[tokio::test] @@ -52,17 +54,45 @@ async fn test_protocol_does_propagate_operations_received_in_blocks() { Slot::new(1, op_thread), vec![op.clone()], ); + let block_id = block.id; send_and_propagate_block( &mut network_controller, block, - true, creator_node.id, - &mut protocol_graph_event_receiver, &mut protocol_command_sender, vec![op.clone()], ) .await; + // Check protocol sends block to consensus. + let (protocol_graph_event_receiver, expected_hash) = + tokio::task::spawn_blocking(move || { + let header_id = protocol_graph_event_receiver + .wait_command(MassaTime::from_millis(1000), |command| match command { + MockGraphControllerMessage::RegisterBlockHeader { + block_id, + header: _, + } => Some(block_id), + _ => panic!("Unexpected or no protocol event."), + }) + .unwrap(); + let id = protocol_graph_event_receiver + .wait_command(MassaTime::from_millis(1000), |command| match command { + MockGraphControllerMessage::RegisterBlock { + block_id, + slot: _, + block_storage: _, + } => Some(block_id), + _ => panic!("Unexpected or no protocol event."), + }) + .unwrap(); + assert_eq!(header_id, id); + (protocol_graph_event_receiver, id) + }) + .await + .unwrap(); + assert_eq!(expected_hash, block_id); + // Propagates the operation found in the block. if let Some(NetworkCommand::SendOperationAnnouncements { to_node, batch }) = network_controller @@ -132,16 +162,45 @@ async fn test_protocol_sends_blocks_with_operations_to_consensus() { Slot::new(1, op_thread), vec![op.clone()], ); + let block_id = block.id; send_and_propagate_block( &mut network_controller, block, - true, creator_node.id, - &mut protocol_graph_event_receiver, &mut protocol_command_sender, vec![op.clone()], ) .await; + + // Check protocol sends block to consensus. + let (new_protocol_graph_event_receiver, expected_hash) = + tokio::task::spawn_blocking(move || { + let header_id = protocol_graph_event_receiver + .wait_command(MassaTime::from_millis(1000), |command| match command { + MockGraphControllerMessage::RegisterBlockHeader { + block_id, + header: _, + } => Some(block_id), + _ => panic!("Unexpected or no protocol event."), + }) + .unwrap(); + let id = protocol_graph_event_receiver + .wait_command(MassaTime::from_millis(1000), |command| match command { + MockGraphControllerMessage::RegisterBlock { + block_id, + slot: _, + block_storage: _, + } => Some(block_id), + _ => panic!("Unexpected or no protocol event."), + }) + .unwrap(); + assert_eq!(header_id, id); + (protocol_graph_event_receiver, id) + }) + .await + .unwrap(); + protocol_graph_event_receiver = new_protocol_graph_event_receiver; + assert_eq!(expected_hash, block_id); } // block with wrong merkle root @@ -177,13 +236,34 @@ async fn test_protocol_sends_blocks_with_operations_to_consensus() { send_and_propagate_block( &mut network_controller, block, - false, creator_node.id, - &mut protocol_graph_event_receiver, &mut protocol_command_sender, vec![op.clone()], ) .await; + + // Check protocol didn't send block to consensus. + let (new_protocol_graph_event_receiver, optional_expected_hash) = + tokio::task::spawn_blocking(move || { + let id = protocol_graph_event_receiver.wait_command( + MassaTime::from_millis(1000), + |command| match command { + MockGraphControllerMessage::RegisterBlockHeader { + block_id, + header: _, + } => Some(block_id), + _ => None, + }, + ); + (protocol_graph_event_receiver, id) + }) + .await + .unwrap(); + protocol_graph_event_receiver = new_protocol_graph_event_receiver; + assert!( + optional_expected_hash.is_none(), + "Block sent to consensus but shouldn't." + ); } // block with operation with wrong signature @@ -200,13 +280,34 @@ async fn test_protocol_sends_blocks_with_operations_to_consensus() { send_and_propagate_block( &mut network_controller, block, - false, creator_node.id, - &mut protocol_graph_event_receiver, &mut protocol_command_sender, vec![op.clone()], ) .await; + + // Check protocol didn't send block to consensus. + let (new_protocol_graph_event_receiver, optional_expected_hash) = + tokio::task::spawn_blocking(move || { + let id = protocol_graph_event_receiver.wait_command( + MassaTime::from_millis(1000), + |command| match command { + MockGraphControllerMessage::RegisterBlockHeader { + block_id, + header: _, + } => Some(block_id), + _ => None, + }, + ); + (protocol_graph_event_receiver, id) + }) + .await + .unwrap(); + protocol_graph_event_receiver = new_protocol_graph_event_receiver; + assert!( + optional_expected_hash.is_none(), + "Block sent to consensus but shouldn't." + ); } ( diff --git a/massa-protocol-worker/src/tests/operations_scenarios.rs b/massa-protocol-worker/src/tests/operations_scenarios.rs index 92616316a12..52c4e8ad2cb 100644 --- a/massa-protocol-worker/src/tests/operations_scenarios.rs +++ b/massa-protocol-worker/src/tests/operations_scenarios.rs @@ -440,12 +440,18 @@ async fn test_protocol_propagates_operations_only_to_nodes_that_dont_know_about_ .send_header(nodes[0].id, block.content.header.clone()) .await; - protocol_graph_event_receiver.wait_command(MassaTime::from_millis(1000), |command| { - match command { - MockGraphControllerMessage::RegisterBlockHeader { .. } => Some(()), - _ => panic!("unexpected protocol event"), - } - }); + let protocol_graph_event_receiver = tokio::task::spawn_blocking(move || { + protocol_graph_event_receiver.wait_command( + MassaTime::from_millis(1000), + |command| match command { + MockGraphControllerMessage::RegisterBlockHeader { .. } => Some(()), + _ => panic!("unexpected protocol event"), + }, + ); + protocol_graph_event_receiver + }) + .await + .unwrap(); // send wishlist protocol_command_sender @@ -592,12 +598,18 @@ async fn test_protocol_propagates_operations_only_to_nodes_that_dont_know_about_ .await; // Wait for the event to be sure that the node is connected. - protocol_graph_event_receiver.wait_command(MassaTime::from_millis(1000), |command| { - match command { - MockGraphControllerMessage::RegisterBlockHeader { .. } => Some(()), - _ => panic!("unexpected protocol event"), - } - }); + let protocol_graph_event_receiver = tokio::task::spawn_blocking(move || { + protocol_graph_event_receiver.wait_command( + MassaTime::from_millis(1000), + |command| match command { + MockGraphControllerMessage::RegisterBlockHeader { .. } => Some(()), + _ => panic!("unexpected protocol event"), + }, + ); + protocol_graph_event_receiver + }) + .await + .unwrap(); // Send the operation to protocol // it should propagate to the node because it isn't in the block. diff --git a/massa-protocol-worker/src/tests/scenarios.rs b/massa-protocol-worker/src/tests/scenarios.rs index 081cf3af672..4e1003021db 100644 --- a/massa-protocol-worker/src/tests/scenarios.rs +++ b/massa-protocol-worker/src/tests/scenarios.rs @@ -48,14 +48,20 @@ async fn test_protocol_asks_for_block_from_node_who_propagated_header() { .await; // Check protocol sends header to consensus. - let received_hash = protocol_graph_event_receiver - .wait_command(MassaTime::from_millis(1000), |command| match command { - MockGraphControllerMessage::RegisterBlockHeader { - block_id, - header: _, - } => Some(block_id), - _ => panic!("unexpected protocol event"), + let (protocol_graph_event_receiver, received_hash) = + tokio::task::spawn_blocking(move || { + let id = protocol_graph_event_receiver + .wait_command(MassaTime::from_millis(1000), |command| match command { + MockGraphControllerMessage::RegisterBlockHeader { + block_id, + header: _, + } => Some(block_id), + _ => panic!("unexpected protocol event"), + }) + .unwrap(); + (protocol_graph_event_receiver, id) }) + .await .unwrap(); // 4. Check that protocol sent the right header to consensus. @@ -231,14 +237,20 @@ async fn test_protocol_propagates_block_to_all_nodes_including_those_who_asked_f // node[1] asks for that block // Check protocol sends header to consensus. - let ref_hash = protocol_graph_event_receiver - .wait_command(MassaTime::from_millis(1000), |command| match command { - MockGraphControllerMessage::RegisterBlockHeader { - block_id, - header: _, - } => Some(block_id), - _ => panic!("unexpected protocol event"), + let (protocol_graph_event_receiver, ref_hash) = + tokio::task::spawn_blocking(move || { + let id = protocol_graph_event_receiver + .wait_command(MassaTime::from_millis(1000), |command| match command { + MockGraphControllerMessage::RegisterBlockHeader { + block_id, + header: _, + } => Some(block_id), + _ => panic!("unexpected protocol event"), + }) + .unwrap(); + (protocol_graph_event_receiver, id) }) + .await .unwrap(); storage.store_block(ref_block.clone()); @@ -341,14 +353,20 @@ async fn test_protocol_propagates_block_to_node_who_asked_for_operations_and_onl // node[1] asks for that block // Check protocol sends header to consensus. - let ref_hash = protocol_graph_event_receiver - .wait_command(MassaTime::from_millis(1000), |command| match command { - MockGraphControllerMessage::RegisterBlockHeader { - block_id, - header: _, - } => Some(block_id), - _ => panic!("unexpected protocol event"), + let (protocol_graph_event_receiver, ref_hash) = + tokio::task::spawn_blocking(move || { + let id = protocol_graph_event_receiver + .wait_command(MassaTime::from_millis(1000), |command| match command { + MockGraphControllerMessage::RegisterBlockHeader { + block_id, + header: _, + } => Some(block_id), + _ => panic!("unexpected protocol event"), + }) + .unwrap(); + (protocol_graph_event_receiver, id) }) + .await .unwrap(); storage.store_block(ref_block.clone()); diff --git a/massa-protocol-worker/src/tests/tools.rs b/massa-protocol-worker/src/tests/tools.rs index 6c8c137f669..1125abdcc3b 100644 --- a/massa-protocol-worker/src/tests/tools.rs +++ b/massa-protocol-worker/src/tests/tools.rs @@ -1,8 +1,6 @@ use crate::start_protocol_controller; use futures::Future; -use massa_graph_2_exports::test_exports::{ - GraphEventReceiver, MockGraphController, MockGraphControllerMessage, -}; +use massa_graph_2_exports::test_exports::{GraphEventReceiver, MockGraphController}; use massa_models::{ block::{BlockId, WrappedBlock}, node::NodeId, @@ -16,7 +14,6 @@ use massa_protocol_exports::{ ProtocolManager, }; use massa_storage::Storage; -use massa_time::MassaTime; use tokio::sync::mpsc; pub async fn protocol_test(protocol_config: &ProtocolConfig, test: F) @@ -148,14 +145,10 @@ where pub async fn send_and_propagate_block( network_controller: &mut MockNetworkController, block: WrappedBlock, - valid: bool, source_node_id: NodeId, - protocol_graph_event_receiver: &mut GraphEventReceiver, protocol_command_sender: &mut ProtocolCommandSender, operations: Vec, ) { - let expected_hash = block.id; - network_controller .send_header(source_node_id, block.content.header.clone()) .await; @@ -183,22 +176,4 @@ pub async fn send_and_propagate_block( network_controller .send_block_info(source_node_id, info) .await; - - // Check protocol sends block to consensus. - let hash = - protocol_graph_event_receiver.wait_command(MassaTime::from_millis(1000), |command| { - match command { - MockGraphControllerMessage::RegisterBlock { - block_id, - slot: _, - block_storage: _, - } => Some(block_id), - _ => panic!("Unexpected or no protocol event."), - } - }); - if valid { - assert_eq!(expected_hash, hash.unwrap()); - } else { - assert!(hash.is_none(), "unexpected protocol event") - } } From d8e0e9debeffcf5f03add2be925f1e0a77ff930a Mon Sep 17 00:00:00 2001 From: AurelienFT Date: Mon, 24 Oct 2022 16:28:05 +0200 Subject: [PATCH 29/40] Fix doc tests --- massa-graph-2-exports/src/bootstrapable_graph.rs | 4 ++-- massa-graph-2-exports/src/export_active_block.rs | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/massa-graph-2-exports/src/bootstrapable_graph.rs b/massa-graph-2-exports/src/bootstrapable_graph.rs index a03e518596d..ae31f239988 100644 --- a/massa-graph-2-exports/src/bootstrapable_graph.rs +++ b/massa-graph-2-exports/src/bootstrapable_graph.rs @@ -36,7 +36,7 @@ impl BootstrapableGraphSerializer { impl Serializer for BootstrapableGraphSerializer { /// ## Example /// ```rust - /// use massa_graph::{BootstrapableGraph, BootstrapableGraphSerializer}; + /// use massa_graph_2_exports::bootstrapable_graph::{BootstrapableGraph, BootstrapableGraphSerializer}; /// use massa_serialization::Serializer; /// use massa_hash::Hash; /// use massa_models::{prehash::PreHashMap, block::BlockId, config::THREAD_COUNT}; @@ -115,7 +115,7 @@ impl BootstrapableGraphDeserializer { impl Deserializer for BootstrapableGraphDeserializer { /// ## Example /// ```rust - /// use massa_graph::{BootstrapableGraph, BootstrapableGraphDeserializer, BootstrapableGraphSerializer}; + /// use massa_graph_2_exports::bootstrapable_graph::{BootstrapableGraph, BootstrapableGraphDeserializer, BootstrapableGraphSerializer}; /// use massa_serialization::{Deserializer, Serializer, DeserializeError}; /// use massa_hash::Hash; /// use massa_models::{prehash::PreHashMap, block::BlockId, config::THREAD_COUNT}; diff --git a/massa-graph-2-exports/src/export_active_block.rs b/massa-graph-2-exports/src/export_active_block.rs index 9c08f9ec37b..8cc018ccb95 100644 --- a/massa-graph-2-exports/src/export_active_block.rs +++ b/massa-graph-2-exports/src/export_active_block.rs @@ -233,7 +233,7 @@ impl ExportActiveBlockDeserializer { impl Deserializer for ExportActiveBlockDeserializer { /// ## Example: /// ```rust - /// use massa_graph::export_active_block::{ExportActiveBlock, ExportActiveBlockDeserializer, ExportActiveBlockSerializer}; + /// use massa_graph_2_exports::export_active_block::{ExportActiveBlock, ExportActiveBlockDeserializer, ExportActiveBlockSerializer}; /// use massa_models::{ledger_models::LedgerChanges, config::THREAD_COUNT, rolls::RollUpdates, block::{BlockId, Block, BlockSerializer, BlockHeader, BlockHeaderSerializer}, prehash::PreHashSet, endorsement::{Endorsement, EndorsementSerializerLW}, slot::Slot, wrapped::WrappedContent}; /// use massa_hash::Hash; /// use std::collections::HashSet; From 8d6fef87547dd3feab0be5a9e814bce97b9cb3c2 Mon Sep 17 00:00:00 2001 From: AurelienFT Date: Mon, 24 Oct 2022 16:33:25 +0200 Subject: [PATCH 30/40] Remove unnecessary import. --- massa-bootstrap/src/server.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/massa-bootstrap/src/server.rs b/massa-bootstrap/src/server.rs index bcda8120b31..61bfab003ad 100644 --- a/massa-bootstrap/src/server.rs +++ b/massa-bootstrap/src/server.rs @@ -10,7 +10,6 @@ use futures::StreamExt; use massa_async_pool::AsyncMessageId; use massa_final_state::FinalState; use massa_graph_2_exports::GraphController; -use massa_ledger_exports::get_address_from_key; use massa_logging::massa_trace; use massa_models::{ operation::OperationId, slot::Slot, streaming_step::StreamingStep, version::Version, From c55bf24806189bbdf800899d86104c032e5e485e Mon Sep 17 00:00:00 2001 From: AurelienFT Date: Mon, 24 Oct 2022 16:43:38 +0200 Subject: [PATCH 31/40] Revert config client. --- massa-client/base_config/config.toml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/massa-client/base_config/config.toml b/massa-client/base_config/config.toml index 6f558ecbe2c..f1055375ceb 100644 --- a/massa-client/base_config/config.toml +++ b/massa-client/base_config/config.toml @@ -3,7 +3,6 @@ history_file_path = "config/.massa_history" timeout = 1000 [default_node] -ip = "158.69.23.120" -#ip = "127.0.0.1" +ip = "127.0.0.1" private_port = 33034 public_port = 33035 \ No newline at end of file From 337aa9b7ad9fcb491c3363f08313f092e44b7d67 Mon Sep 17 00:00:00 2001 From: AurelienFT Date: Tue, 25 Oct 2022 00:31:23 +0200 Subject: [PATCH 32/40] Add doc and fix stop --- massa-graph-2-exports/src/controller_trait.rs | 61 +++++++++++++++++++ massa-graph-2-worker/src/controller.rs | 2 +- massa-graph-2-worker/src/state/stats.rs | 7 ++- massa-node/src/main.rs | 12 ++-- 4 files changed, 74 insertions(+), 8 deletions(-) diff --git a/massa-graph-2-exports/src/controller_trait.rs b/massa-graph-2-exports/src/controller_trait.rs index 4affec7f21a..ee7ccae59d5 100644 --- a/massa-graph-2-exports/src/controller_trait.rs +++ b/massa-graph-2-exports/src/controller_trait.rs @@ -12,30 +12,91 @@ use massa_storage::Storage; /// interface that communicates with the graph worker thread pub trait GraphController: Send + Sync { + /// Get an export of a part of the graph + /// + /// # Arguments + /// * `start_slot`: the slot to start the export from, if None, the export starts from the genesis + /// * `end_slot`: the slot to end the export at, if None, the export ends at the current slot + /// + /// # Returns + /// The export of the graph fn get_block_graph_status( &self, start_slot: Option, end_slot: Option, ) -> Result; + /// Get statuses of a list of blocks + /// + /// # Arguments + /// * `block_ids`: the list of block ids to get the status of + /// + /// # Returns + /// The statuses of the blocks sorted by the order of the input list fn get_block_statuses(&self, ids: &[BlockId]) -> Vec; + /// Get all the cliques of the graph + /// + /// # Returns + /// The list of cliques fn get_cliques(&self) -> Vec; + /// Get a graph to bootstrap from + /// + /// # Returns + /// The graph to bootstrap from fn get_bootstrap_graph(&self) -> Result; + /// Get the stats of the consensus + /// + /// # Returns + /// The stats of the consensus fn get_stats(&self) -> Result; + /// Get the best parents for the next block to be produced + /// + /// # Returns + /// The id of best parents for the next block to be produced along with their period fn get_best_parents(&self) -> Vec<(BlockId, u64)>; + /// Get the block id of the block at a specific slot in the blockclique + /// + /// # Arguments + /// * `slot`: the slot to get the block id of + /// + /// # Returns + /// The block id of the block at the specified slot if exists fn get_blockclique_block_at_slot(&self, slot: Slot) -> Option; + /// Get the latest block, that is in the blockclique, in the thread of the given slot and before this `slot`. + /// + /// # Arguments: + /// * `slot`: the slot that will give us the thread and the upper bound + /// + /// # Returns: + /// The block id of the latest block in the thread of the given slot and before this slot fn get_latest_blockclique_block_at_slot(&self, slot: Slot) -> BlockId; + /// Register a block in the graph + /// + /// # Arguments + /// * `block_id`: the id of the block to register + /// * `slot`: the slot of the block + /// * `block_storage`: the storage that contains all the objects of the block fn register_block(&self, block_id: BlockId, slot: Slot, block_storage: Storage); + /// Register a block header in the graph + /// + /// # Arguments + /// * `block_id`: the id of the block to register + /// * `header`: the header of the block to register fn register_block_header(&self, block_id: BlockId, header: Wrapped); + /// Mark a block as invalid in the graph + /// + /// # Arguments + /// * `block_id`: the id of the block to mark as invalid + /// * `header`: the header of the block to mark as invalid fn mark_invalid_block(&self, block_id: BlockId, header: Wrapped); /// Returns a boxed clone of self. diff --git a/massa-graph-2-worker/src/controller.rs b/massa-graph-2-worker/src/controller.rs index 181c807c996..9a5fd57abca 100644 --- a/massa-graph-2-worker/src/controller.rs +++ b/massa-graph-2-worker/src/controller.rs @@ -154,7 +154,7 @@ impl GraphController for GraphControllerImpl { /// * `slot`: the slot that will give us the thread and the upper bound /// /// # Returns: - /// The block id of the latest block in the thread of the given slot and before this slot if exists + /// The block id of the latest block in the thread of the given slot and before this slot fn get_latest_blockclique_block_at_slot(&self, slot: Slot) -> BlockId { self.shared_state .read() diff --git a/massa-graph-2-worker/src/state/stats.rs b/massa-graph-2-worker/src/state/stats.rs index c527108ddcd..e931bb9c0d7 100644 --- a/massa-graph-2-worker/src/state/stats.rs +++ b/massa-graph-2-worker/src/state/stats.rs @@ -1,10 +1,15 @@ use super::GraphState; -use massa_graph_2_exports::{error::GraphError, events::GraphEvent}; +use massa_graph_2_exports::error::GraphError; use massa_models::stats::ConsensusStats; use massa_time::MassaTime; use std::cmp::max; + +#[cfg(not(feature = "sandbox"))] use tracing::log::warn; +#[cfg(not(feature = "sandbox"))] +use massa_graph_2_exports::events::GraphEvent; + impl GraphState { /// Calculate and return stats about graph pub fn get_stats(&self) -> Result { diff --git a/massa-node/src/main.rs b/massa-node/src/main.rs index 4a46a509584..56791b6361f 100644 --- a/massa-node/src/main.rs +++ b/massa-node/src/main.rs @@ -583,6 +583,12 @@ async fn stop( // stop factory factory_manager.stop(); + // stop protocol controller + let network_event_receiver = protocol_manager + .stop() + .await + .expect("protocol shutdown failed"); + // stop graph graph_manager.stop(); @@ -599,12 +605,6 @@ async fn stop( // TODO //let protocol_pool_event_receiver = pool_manager.stop().await.expect("pool shutdown failed"); - // stop protocol controller - let network_event_receiver = protocol_manager - .stop() - .await - .expect("protocol shutdown failed"); - // stop network controller network_manager .stop(network_event_receiver) From 70cc951aea1edb63d7b7b62fcab1441744c7cc9e Mon Sep 17 00:00:00 2001 From: AurelienFT Date: Tue, 25 Oct 2022 09:42:50 +0200 Subject: [PATCH 33/40] Add storage of protocol blocks in graph. --- massa-factory-worker/src/block_factory.rs | 2 +- massa-factory-worker/src/tests/tools.rs | 1 + massa-graph-2-exports/src/controller_trait.rs | 29 ++++++++++--------- .../src/test_exports/mock.rs | 4 ++- massa-graph-2-worker/src/commands.rs | 2 +- massa-graph-2-worker/src/controller.rs | 3 +- .../src/state/process_commands.rs | 9 ++++++ massa-graph-2-worker/src/worker/main_loop.rs | 3 +- massa-protocol-worker/src/protocol_network.rs | 2 +- .../src/tests/ask_block_scenarios.rs | 2 ++ .../tests/in_block_operations_scenarios.rs | 2 ++ 11 files changed, 39 insertions(+), 20 deletions(-) diff --git a/massa-factory-worker/src/block_factory.rs b/massa-factory-worker/src/block_factory.rs index 8055d3a94e5..eb74b0855ac 100644 --- a/massa-factory-worker/src/block_factory.rs +++ b/massa-factory-worker/src/block_factory.rs @@ -233,7 +233,7 @@ impl BlockFactoryWorker { // send full block to consensus self.channels .graph - .register_block(block_id, slot, block_storage); + .register_block(block_id, slot, block_storage, true); } /// main run loop of the block creator thread diff --git a/massa-factory-worker/src/tests/tools.rs b/massa-factory-worker/src/tests/tools.rs index 0bebac3eb91..b2151a39351 100644 --- a/massa-factory-worker/src/tests/tools.rs +++ b/massa-factory-worker/src/tests/tools.rs @@ -208,6 +208,7 @@ impl TestFactory { block_id, block_storage, slot: _, + created: _, } = command { Some((block_id, block_storage)) diff --git a/massa-graph-2-exports/src/controller_trait.rs b/massa-graph-2-exports/src/controller_trait.rs index ee7ccae59d5..169c40af0d5 100644 --- a/massa-graph-2-exports/src/controller_trait.rs +++ b/massa-graph-2-exports/src/controller_trait.rs @@ -13,11 +13,11 @@ use massa_storage::Storage; /// interface that communicates with the graph worker thread pub trait GraphController: Send + Sync { /// Get an export of a part of the graph - /// + /// /// # Arguments /// * `start_slot`: the slot to start the export from, if None, the export starts from the genesis /// * `end_slot`: the slot to end the export at, if None, the export ends at the current slot - /// + /// /// # Returns /// The export of the graph fn get_block_graph_status( @@ -27,43 +27,43 @@ pub trait GraphController: Send + Sync { ) -> Result; /// Get statuses of a list of blocks - /// + /// /// # Arguments /// * `block_ids`: the list of block ids to get the status of - /// + /// /// # Returns /// The statuses of the blocks sorted by the order of the input list fn get_block_statuses(&self, ids: &[BlockId]) -> Vec; /// Get all the cliques of the graph - /// + /// /// # Returns /// The list of cliques fn get_cliques(&self) -> Vec; /// Get a graph to bootstrap from - /// + /// /// # Returns /// The graph to bootstrap from fn get_bootstrap_graph(&self) -> Result; /// Get the stats of the consensus - /// + /// /// # Returns /// The stats of the consensus fn get_stats(&self) -> Result; /// Get the best parents for the next block to be produced - /// + /// /// # Returns /// The id of best parents for the next block to be produced along with their period fn get_best_parents(&self) -> Vec<(BlockId, u64)>; /// Get the block id of the block at a specific slot in the blockclique - /// + /// /// # Arguments /// * `slot`: the slot to get the block id of - /// + /// /// # Returns /// The block id of the block at the specified slot if exists fn get_blockclique_block_at_slot(&self, slot: Slot) -> Option; @@ -78,22 +78,23 @@ pub trait GraphController: Send + Sync { fn get_latest_blockclique_block_at_slot(&self, slot: Slot) -> BlockId; /// Register a block in the graph - /// + /// /// # Arguments /// * `block_id`: the id of the block to register /// * `slot`: the slot of the block /// * `block_storage`: the storage that contains all the objects of the block - fn register_block(&self, block_id: BlockId, slot: Slot, block_storage: Storage); + /// * `created`: is the block created by our node ? + fn register_block(&self, block_id: BlockId, slot: Slot, block_storage: Storage, created: bool); /// Register a block header in the graph - /// + /// /// # Arguments /// * `block_id`: the id of the block to register /// * `header`: the header of the block to register fn register_block_header(&self, block_id: BlockId, header: Wrapped); /// Mark a block as invalid in the graph - /// + /// /// # Arguments /// * `block_id`: the id of the block to mark as invalid /// * `header`: the header of the block to mark as invalid diff --git a/massa-graph-2-exports/src/test_exports/mock.rs b/massa-graph-2-exports/src/test_exports/mock.rs index 9c9b575a339..098b63b0b3f 100644 --- a/massa-graph-2-exports/src/test_exports/mock.rs +++ b/massa-graph-2-exports/src/test_exports/mock.rs @@ -67,6 +67,7 @@ pub enum MockGraphControllerMessage { block_id: BlockId, slot: Slot, block_storage: Storage, + created: bool, }, RegisterBlockHeader { block_id: BlockId, @@ -211,7 +212,7 @@ impl GraphController for MockGraphController { .unwrap(); } - fn register_block(&self, block_id: BlockId, slot: Slot, block_storage: Storage) { + fn register_block(&self, block_id: BlockId, slot: Slot, block_storage: Storage, created: bool) { self.0 .lock() .unwrap() @@ -219,6 +220,7 @@ impl GraphController for MockGraphController { block_id, slot, block_storage, + created, }) .unwrap(); } diff --git a/massa-graph-2-worker/src/commands.rs b/massa-graph-2-worker/src/commands.rs index 2f690a0cbf6..083618c344e 100644 --- a/massa-graph-2-worker/src/commands.rs +++ b/massa-graph-2-worker/src/commands.rs @@ -7,7 +7,7 @@ use massa_storage::Storage; #[allow(clippy::large_enum_variant)] pub enum GraphCommand { - RegisterBlock(BlockId, Slot, Storage), + RegisterBlock(BlockId, Slot, Storage, bool), RegisterBlockHeader(BlockId, Wrapped), MarkInvalidBlock(BlockId, Wrapped), } diff --git a/massa-graph-2-worker/src/controller.rs b/massa-graph-2-worker/src/controller.rs index 9a5fd57abca..0df8213276e 100644 --- a/massa-graph-2-worker/src/controller.rs +++ b/massa-graph-2-worker/src/controller.rs @@ -161,11 +161,12 @@ impl GraphController for GraphControllerImpl { .get_latest_blockclique_block_at_slot(&slot) } - fn register_block(&self, block_id: BlockId, slot: Slot, block_storage: Storage) { + fn register_block(&self, block_id: BlockId, slot: Slot, block_storage: Storage, created: bool) { let _ = self.command_sender.try_send(GraphCommand::RegisterBlock( block_id, slot, block_storage, + created, )); } diff --git a/massa-graph-2-worker/src/state/process_commands.rs b/massa-graph-2-worker/src/state/process_commands.rs index 151f7b4451b..ef9951e6780 100644 --- a/massa-graph-2-worker/src/state/process_commands.rs +++ b/massa-graph-2-worker/src/state/process_commands.rs @@ -10,6 +10,7 @@ use massa_models::{ slot::Slot, }; use massa_storage::Storage; +use massa_time::MassaTime; use tracing::debug; use super::GraphState; @@ -77,6 +78,7 @@ impl GraphState { /// * `slot`: the slot of the block /// * `current_slot`: the slot when this function is called /// * `storage`: Storage containing the whole content of the block + /// * `created`: is the block created by the node or received from the network /// /// # Returns: /// Success or error if the block is invalid or too old @@ -86,12 +88,19 @@ impl GraphState { slot: Slot, current_slot: Option, storage: Storage, + created: bool, ) -> Result<(), GraphError> { // ignore genesis blocks if self.genesis_hashes.contains(&block_id) { return Ok(()); } + // Block is coming from protocol mark it for desync calculation + if !created { + let now = MassaTime::now(self.config.clock_compensation_millis)?; + self.protocol_blocks.push_back((now, block_id)); + } + debug!("received block {} for slot {}", block_id, slot); let mut to_ack: BTreeSet<(Slot, BlockId)> = BTreeSet::new(); diff --git a/massa-graph-2-worker/src/worker/main_loop.rs b/massa-graph-2-worker/src/worker/main_loop.rs index 1b28374aa37..e605a66c870 100644 --- a/massa-graph-2-worker/src/worker/main_loop.rs +++ b/massa-graph-2-worker/src/worker/main_loop.rs @@ -33,12 +33,13 @@ impl GraphWorker { write_shared_state.register_block_header(block_id, header, self.previous_slot)?; write_shared_state.block_db_changed() } - GraphCommand::RegisterBlock(block_id, slot, block_storage) => { + GraphCommand::RegisterBlock(block_id, slot, block_storage, created) => { write_shared_state.register_block( block_id, slot, self.previous_slot, block_storage, + created, )?; write_shared_state.block_db_changed() } diff --git a/massa-protocol-worker/src/protocol_network.rs b/massa-protocol-worker/src/protocol_network.rs index 6bc0daaafd1..be0faada554 100644 --- a/massa-protocol-worker/src/protocol_network.rs +++ b/massa-protocol-worker/src/protocol_network.rs @@ -516,7 +516,7 @@ impl ProtocolWorker { // Send to graph self.graph_controller - .register_block(block_id, slot, block_storage); + .register_block(block_id, slot, block_storage, false); } } Entry::Vacant(_) => { diff --git a/massa-protocol-worker/src/tests/ask_block_scenarios.rs b/massa-protocol-worker/src/tests/ask_block_scenarios.rs index 5a831ee40e4..bf2c9d096fe 100644 --- a/massa-protocol-worker/src/tests/ask_block_scenarios.rs +++ b/massa-protocol-worker/src/tests/ask_block_scenarios.rs @@ -115,6 +115,7 @@ async fn test_full_ask_block_workflow() { slot, block_id, block_storage, + created: _, } => { assert_eq!(slot, block.content.header.content.slot); assert_eq!(block_id, block.id); @@ -237,6 +238,7 @@ async fn test_empty_block() { slot, block_id, block_storage, + created: _, } => { assert_eq!(slot, block.content.header.content.slot); assert_eq!(block_id, block.id); diff --git a/massa-protocol-worker/src/tests/in_block_operations_scenarios.rs b/massa-protocol-worker/src/tests/in_block_operations_scenarios.rs index 2b692266ea9..0497805d564 100644 --- a/massa-protocol-worker/src/tests/in_block_operations_scenarios.rs +++ b/massa-protocol-worker/src/tests/in_block_operations_scenarios.rs @@ -82,6 +82,7 @@ async fn test_protocol_does_propagate_operations_received_in_blocks() { block_id, slot: _, block_storage: _, + created: _, } => Some(block_id), _ => panic!("Unexpected or no protocol event."), }) @@ -190,6 +191,7 @@ async fn test_protocol_sends_blocks_with_operations_to_consensus() { block_id, slot: _, block_storage: _, + created: _, } => Some(block_id), _ => panic!("Unexpected or no protocol event."), }) From e8ec65c0f9a40d1151fb957b747ef850ae499d80 Mon Sep 17 00:00:00 2001 From: AurelienFT Date: Tue, 25 Oct 2022 11:10:19 +0200 Subject: [PATCH 34/40] Rename graph to consensus. --- Cargo.lock | 96 +++++++++---------- Cargo.toml | 4 +- massa-api/Cargo.toml | 2 +- massa-api/src/error.rs | 6 +- massa-api/src/lib.rs | 6 +- massa-api/src/public.rs | 38 ++++---- massa-bootstrap/Cargo.toml | 4 +- massa-bootstrap/src/error.rs | 6 +- massa-bootstrap/src/lib.rs | 2 +- massa-bootstrap/src/messages.rs | 6 +- massa-bootstrap/src/server.rs | 16 ++-- massa-bootstrap/src/tests/scenarios.rs | 10 +- massa-bootstrap/src/tests/tools.rs | 6 +- .../Cargo.toml | 2 +- .../src/block_graph_export.rs | 0 .../src/block_status.rs | 0 .../src/bootstrapable_graph.rs | 4 +- .../src/channels.rs | 6 +- .../src/controller_trait.rs | 30 +++--- .../src/error.rs | 4 +- .../src/events.rs | 4 +- .../src/export_active_block.rs | 8 +- .../src/lib.rs | 6 +- .../src/settings.rs | 2 +- .../src/test_exports/config.rs | 4 +- .../src/test_exports/mock.rs | 70 +++++++------- .../src/test_exports/mod.rs | 0 .../Cargo.toml | 4 +- .../src/commands.rs | 2 +- .../src/controller.rs | 38 ++++---- .../src/lib.rs | 2 +- massa-consensus-worker/src/manager.rs | 23 +++++ .../src/state/graph.rs | 28 +++--- .../src/state/mod.rs | 30 +++--- .../src/state/process.rs | 30 +++--- .../src/state/process_commands.rs | 12 +-- .../src/state/stats.rs | 20 ++-- .../src/state/tick.rs | 8 +- .../src/state/verifications.rs | 26 ++--- .../src/worker/init.rs | 56 +++++------ .../src/worker/main_loop.rs | 18 ++-- .../src/worker/mod.rs | 64 ++++++------- massa-factory-exports/Cargo.toml | 2 +- massa-factory-exports/src/types.rs | 6 +- massa-factory-worker/Cargo.toml | 2 +- massa-factory-worker/src/block_factory.rs | 4 +- .../src/endorsement_factory.rs | 2 +- massa-factory-worker/src/tests/tools.rs | 20 ++-- massa-graph-2-worker/src/manager.rs | 23 ----- massa-node/Cargo.toml | 6 +- massa-node/src/main.rs | 58 +++++------ massa-protocol-worker/Cargo.toml | 4 +- massa-protocol-worker/src/protocol_network.rs | 10 +- massa-protocol-worker/src/protocol_worker.rs | 14 +-- .../src/tests/ask_block_scenarios.rs | 50 +++++----- .../src/tests/ban_nodes_scenarios.rs | 84 ++++++++-------- .../src/tests/endorsements_scenarios.rs | 42 ++++---- .../tests/in_block_operations_scenarios.rs | 56 +++++------ .../src/tests/operations_scenarios.rs | 70 +++++++------- massa-protocol-worker/src/tests/scenarios.rs | 50 +++++----- massa-protocol-worker/src/tests/tools.rs | 26 ++--- 61 files changed, 616 insertions(+), 616 deletions(-) rename {massa-graph-2-exports => massa-consensus-exports}/Cargo.toml (97%) rename {massa-graph-2-exports => massa-consensus-exports}/src/block_graph_export.rs (100%) rename {massa-graph-2-exports => massa-consensus-exports}/src/block_status.rs (100%) rename {massa-graph-2-exports => massa-consensus-exports}/src/bootstrapable_graph.rs (95%) rename {massa-graph-2-exports => massa-consensus-exports}/src/channels.rs (83%) rename {massa-graph-2-exports => massa-consensus-exports}/src/controller_trait.rs (82%) rename {massa-graph-2-exports => massa-consensus-exports}/src/error.rs (97%) rename {massa-graph-2-exports => massa-consensus-exports}/src/events.rs (61%) rename {massa-graph-2-exports => massa-consensus-exports}/src/export_active_block.rs (97%) rename {massa-graph-2-exports => massa-consensus-exports}/src/lib.rs (75%) rename {massa-graph-2-exports => massa-consensus-exports}/src/settings.rs (98%) rename {massa-graph-2-exports => massa-consensus-exports}/src/test_exports/config.rs (95%) rename {massa-graph-2-exports => massa-consensus-exports}/src/test_exports/mock.rs (68%) rename {massa-graph-2-exports => massa-consensus-exports}/src/test_exports/mod.rs (100%) rename {massa-graph-2-worker => massa-consensus-worker}/Cargo.toml (88%) rename {massa-graph-2-worker => massa-consensus-worker}/src/commands.rs (92%) rename {massa-graph-2-worker => massa-consensus-worker}/src/controller.rs (83%) rename {massa-graph-2-worker => massa-consensus-worker}/src/lib.rs (70%) create mode 100644 massa-consensus-worker/src/manager.rs rename {massa-graph-2-worker => massa-consensus-worker}/src/state/graph.rs (93%) rename {massa-graph-2-worker => massa-consensus-worker}/src/state/mod.rs (94%) rename {massa-graph-2-worker => massa-consensus-worker}/src/state/process.rs (97%) rename {massa-graph-2-worker => massa-consensus-worker}/src/state/process_commands.rs (97%) rename {massa-graph-2-worker => massa-consensus-worker}/src/state/stats.rs (84%) rename {massa-graph-2-worker => massa-consensus-worker}/src/state/tick.rs (90%) rename {massa-graph-2-worker => massa-consensus-worker}/src/state/verifications.rs (95%) rename {massa-graph-2-worker => massa-consensus-worker}/src/worker/init.rs (88%) rename {massa-graph-2-worker => massa-consensus-worker}/src/worker/main_loop.rs (91%) rename {massa-graph-2-worker => massa-consensus-worker}/src/worker/mod.rs (62%) delete mode 100644 massa-graph-2-worker/src/manager.rs diff --git a/Cargo.lock b/Cargo.lock index 23804cd86f8..31033cdff74 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1602,13 +1602,13 @@ dependencies = [ "massa_api", "massa_async_pool", "massa_bootstrap", + "massa_consensus_exports", + "massa_consensus_worker", "massa_execution_exports", "massa_execution_worker", "massa_factory_exports", "massa_factory_worker", "massa_final_state", - "massa_graph_2_exports", - "massa_graph_2_worker", "massa_ledger_exports", "massa_ledger_worker", "massa_logging", @@ -1666,8 +1666,8 @@ dependencies = [ "jsonrpc-core", "jsonrpc-derive", "jsonrpc-http-server", + "massa_consensus_exports", "massa_execution_exports", - "massa_graph_2_exports", "massa_hash", "massa_models", "massa_network_exports", @@ -1721,8 +1721,8 @@ dependencies = [ "futures 0.3.24", "lazy_static", "massa_async_pool", + "massa_consensus_exports", "massa_final_state", - "massa_graph_2_exports", "massa_hash", "massa_ledger_exports", "massa_ledger_worker", @@ -1762,6 +1762,47 @@ dependencies = [ "thiserror", ] +[[package]] +name = "massa_consensus_exports" +version = "0.1.0" +dependencies = [ + "crossbeam-channel", + "displaydoc", + "massa_execution_exports", + "massa_hash", + "massa_models", + "massa_pool_exports", + "massa_pos_exports", + "massa_protocol_exports", + "massa_serialization", + "massa_signature", + "massa_storage", + "massa_time", + "nom 7.1.1", + "serde 1.0.145", + "serde_json", + "thiserror", +] + +[[package]] +name = "massa_consensus_worker" +version = "0.1.0" +dependencies = [ + "displaydoc", + "massa_consensus_exports", + "massa_hash", + "massa_logging", + "massa_models", + "massa_signature", + "massa_storage", + "massa_time", + "num", + "parking_lot", + "serde 1.0.145", + "serde_json", + "tracing", +] + [[package]] name = "massa_execution_exports" version = "0.1.0" @@ -1812,8 +1853,8 @@ version = "0.1.0" dependencies = [ "anyhow", "displaydoc", + "massa_consensus_exports", "massa_execution_exports", - "massa_graph_2_exports", "massa_hash", "massa_ledger_exports", "massa_models", @@ -1837,8 +1878,8 @@ name = "massa_factory_worker" version = "0.1.0" dependencies = [ "anyhow", + "massa_consensus_exports", "massa_factory_exports", - "massa_graph_2_exports", "massa_hash", "massa_models", "massa_pool_exports", @@ -1874,47 +1915,6 @@ dependencies = [ "tracing", ] -[[package]] -name = "massa_graph_2_exports" -version = "0.1.0" -dependencies = [ - "crossbeam-channel", - "displaydoc", - "massa_execution_exports", - "massa_hash", - "massa_models", - "massa_pool_exports", - "massa_pos_exports", - "massa_protocol_exports", - "massa_serialization", - "massa_signature", - "massa_storage", - "massa_time", - "nom 7.1.1", - "serde 1.0.145", - "serde_json", - "thiserror", -] - -[[package]] -name = "massa_graph_2_worker" -version = "0.1.0" -dependencies = [ - "displaydoc", - "massa_graph_2_exports", - "massa_hash", - "massa_logging", - "massa_models", - "massa_signature", - "massa_storage", - "massa_time", - "num", - "parking_lot", - "serde 1.0.145", - "serde_json", - "tracing", -] - [[package]] name = "massa_hash" version = "0.1.0" @@ -2134,7 +2134,7 @@ version = "0.1.0" dependencies = [ "futures 0.3.24", "lazy_static", - "massa_graph_2_exports", + "massa_consensus_exports", "massa_hash", "massa_logging", "massa_models", diff --git a/Cargo.toml b/Cargo.toml index 55a6de4e949..7ec33ddf67d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -9,8 +9,8 @@ members = [ "massa-execution-worker", "massa-factory-exports", "massa-factory-worker", - "massa-graph-2-exports", - "massa-graph-2-worker", + "massa-consensus-exports", + "massa-consensus-worker", "massa-hash", "massa-logging", "massa-models", diff --git a/massa-api/Cargo.toml b/massa-api/Cargo.toml index 0b80cacffc3..51c1035c758 100644 --- a/massa-api/Cargo.toml +++ b/massa-api/Cargo.toml @@ -17,7 +17,7 @@ tracing = "0.1" itertools = "0.10" parking_lot = { version = "0.12", features = ["deadlock_detection"] } # custom modules -massa_graph_2_exports = { path = "../massa-graph-2-exports" } +massa_consensus_exports = { path = "../massa-consensus-exports" } massa_hash = { path = "../massa-hash" } massa_models = { path = "../massa-models" } massa_network_exports = { path = "../massa-network-exports" } diff --git a/massa-api/src/error.rs b/massa-api/src/error.rs index a27ef70919c..fa301ead59a 100644 --- a/massa-api/src/error.rs +++ b/massa-api/src/error.rs @@ -1,8 +1,8 @@ // Copyright (c) 2022 MASSA LABS use displaydoc::Display; +use massa_consensus_exports::error::ConsensusError; use massa_execution_exports::ExecutionError; -use massa_graph_2_exports::error::GraphError; use massa_hash::MassaHashError; use massa_models::error::ModelsError; use massa_network_exports::NetworkError; @@ -22,8 +22,8 @@ pub enum ApiError { ReceiveChannelError(String), /// `massa_hash` error: {0} MassaHashError(#[from] MassaHashError), - /// graph error: {0} - GraphError(#[from] GraphError), + /// consensus error: {0} + ConsensusError(#[from] ConsensusError), /// execution error: {0} ExecutionError(#[from] ExecutionError), /// network error: {0} diff --git a/massa-api/src/lib.rs b/massa-api/src/lib.rs index 0ed983b2020..139d6e4a72d 100644 --- a/massa-api/src/lib.rs +++ b/massa-api/src/lib.rs @@ -8,8 +8,8 @@ use error::ApiError; use jsonrpc_core::{BoxFuture, IoHandler, Value}; use jsonrpc_derive::rpc; use jsonrpc_http_server::{CloseHandle, ServerBuilder}; +use massa_consensus_exports::ConsensusController; use massa_execution_exports::ExecutionController; -use massa_graph_2_exports::GraphController; use massa_models::api::{ AddressInfo, BlockInfo, BlockSummary, DatastoreEntryInput, DatastoreEntryOutput, EndorsementInfo, EventFilter, NodeStatus, OperationInfo, OperationInput, @@ -52,8 +52,8 @@ pub use config::APIConfig; /// Public API component pub struct Public { - /// link to the graph component - pub graph_controller: Box, + /// link to the consensus component + pub consensus_controller: Box, /// link to the execution component pub execution_controller: Box, /// link to the selector component diff --git a/massa-api/src/public.rs b/massa-api/src/public.rs index 38f9e1218a5..d920179300a 100644 --- a/massa-api/src/public.rs +++ b/massa-api/src/public.rs @@ -5,11 +5,11 @@ use crate::config::APIConfig; use crate::error::ApiError; use crate::{Endpoints, Public, RpcServer, StopHandle, API}; use jsonrpc_core::BoxFuture; +use massa_consensus_exports::block_status::DiscardReason; +use massa_consensus_exports::ConsensusController; use massa_execution_exports::{ ExecutionController, ExecutionStackElement, ReadOnlyExecutionRequest, ReadOnlyExecutionTarget, }; -use massa_graph_2_exports::block_status::DiscardReason; -use massa_graph_2_exports::GraphController; use massa_models::api::{ BlockGraphStatus, DatastoreEntryInput, DatastoreEntryOutput, OperationInput, ReadOnlyBytecodeExecution, ReadOnlyCall, SlotAmount, @@ -57,7 +57,7 @@ use std::net::{IpAddr, SocketAddr}; impl API { /// generate a new public API pub fn new( - graph_controller: Box, + consensus_controller: Box, execution_controller: Box, api_settings: APIConfig, selector_controller: Box, @@ -71,7 +71,7 @@ impl API { storage: Storage, ) -> Self { API(Public { - graph_controller, + consensus_controller, api_settings, pool_command_sender, network_settings, @@ -291,7 +291,7 @@ impl Endpoints for API { fn get_status(&self) -> BoxFuture> { let execution_controller = self.0.execution_controller.clone(); - let graph_controller = self.0.graph_controller.clone(); + let consensus_controller = self.0.consensus_controller.clone(); let network_command_sender = self.0.network_command_sender.clone(); let network_config = self.0.network_settings.clone(); let version = self.0.version; @@ -310,7 +310,7 @@ impl Endpoints for API { )?; let execution_stats = execution_controller.get_stats(); - let graph_stats = graph_controller.get_stats()?; + let consensus_stats = consensus_controller.get_stats()?; let (network_stats, peers) = tokio::join!( network_command_sender.get_network_stats(), @@ -341,7 +341,7 @@ impl Endpoints for API { .unwrap_or_else(|| Slot::new(0, 0)) .get_next_slot(api_config.thread_count)?, execution_stats, - consensus_stats: graph_stats, + consensus_stats, network_stats: network_stats?, pool_stats, config, @@ -354,8 +354,8 @@ impl Endpoints for API { } fn get_cliques(&self) -> BoxFuture, ApiError>> { - let graph_controller = self.0.graph_controller.clone(); - let closure = async move || Ok(graph_controller.get_cliques()); + let consensus_controller = self.0.consensus_controller.clone(); + let closure = async move || Ok(consensus_controller.get_cliques()); Box::pin(closure()) } @@ -413,7 +413,7 @@ impl Endpoints for API { let in_pool = self.0.pool_command_sender.contains_operations(&ops); let api_cfg = self.0.api_settings; - let graph_controller = self.0.graph_controller.clone(); + let consensus_controller = self.0.consensus_controller.clone(); let closure = async move || { if ops.len() as u64 > api_cfg.max_arguments { return Err(ApiError::TooManyArguments("too many arguments".into())); @@ -427,7 +427,7 @@ impl Endpoints for API { .unique() .cloned() .collect(); - let involved_block_statuses = graph_controller.get_block_statuses(&involved_blocks); + let involved_block_statuses = consensus_controller.get_block_statuses(&involved_blocks); let block_statuses: PreHashMap = involved_blocks .into_iter() .zip(involved_block_statuses.into_iter()) @@ -494,7 +494,7 @@ impl Endpoints for API { // ask pool whether it carries the operations let in_pool = self.0.pool_command_sender.contains_endorsements(&eds); - let graph_controller = self.0.graph_controller.clone(); + let consensus_controller = self.0.consensus_controller.clone(); let api_cfg = self.0.api_settings; let closure = async move || { if eds.len() as u64 > api_cfg.max_arguments { @@ -509,7 +509,7 @@ impl Endpoints for API { .unique() .cloned() .collect(); - let involved_block_statuses = graph_controller.get_block_statuses(&involved_blocks); + let involved_block_statuses = consensus_controller.get_block_statuses(&involved_blocks); let block_statuses: PreHashMap = involved_blocks .into_iter() .zip(involved_block_statuses.into_iter()) @@ -550,7 +550,7 @@ impl Endpoints for API { /// gets a block. Returns None if not found /// only active blocks are returned fn get_block(&self, id: BlockId) -> BoxFuture> { - let graph_controller = self.0.graph_controller.clone(); + let consensus_controller = self.0.consensus_controller.clone(); let storage = self.0.storage.clone_without_refs(); let closure = async move || { let block = match storage.read_blocks().get(&id).cloned() { @@ -560,7 +560,7 @@ impl Endpoints for API { } }; - let graph_status = graph_controller + let graph_status = consensus_controller .get_block_statuses(&[id]) .into_iter() .next() @@ -590,10 +590,10 @@ impl Endpoints for API { &self, slot: Slot, ) -> BoxFuture, ApiError>> { - let graph_controller = self.0.graph_controller.clone(); + let consensus_controller = self.0.consensus_controller.clone(); let storage = self.0.storage.clone_without_refs(); let closure = async move || { - let block_id = match graph_controller.get_blockclique_block_at_slot(slot) { + let block_id = match consensus_controller.get_blockclique_block_at_slot(slot) { Some(id) => id, None => return Ok(None), }; @@ -612,7 +612,7 @@ impl Endpoints for API { &self, time: TimeInterval, ) -> BoxFuture, ApiError>> { - let graph_controller = self.0.graph_controller.clone(); + let consensus_controller = self.0.consensus_controller.clone(); let api_config = self.0.api_settings; let closure = async move || { // filter blocks from graph_export @@ -623,7 +623,7 @@ impl Endpoints for API { time.start, time.end, )?; - let graph = graph_controller.get_block_graph_status(start_slot, end_slot)?; + let graph = consensus_controller.get_block_graph_status(start_slot, end_slot)?; let mut res = Vec::with_capacity(graph.active_blocks.len()); let blockclique = graph .max_cliques diff --git a/massa-bootstrap/Cargo.toml b/massa-bootstrap/Cargo.toml index ff5a97b9461..54e8d722434 100644 --- a/massa-bootstrap/Cargo.toml +++ b/massa-bootstrap/Cargo.toml @@ -27,7 +27,7 @@ tracing = "0.1" # custom modules massa_async_pool = { path = "../massa-async-pool" } massa_final_state = { path = "../massa-final-state" } -massa_graph_2_exports = { path = "../massa-graph-2-exports" } +massa_consensus_exports = { path = "../massa-consensus-exports" } massa_hash = { path = "../massa-hash" } massa_ledger_exports = { path = "../massa-ledger-exports" } massa_logging = { path = "../massa-logging" } @@ -56,7 +56,7 @@ massa_pos_exports = { path = "../massa-pos-exports", features = ["testing"] } testing = [ "massa_final_state/testing", "massa_ledger_worker/testing", - "massa_graph_2_exports/testing", + "massa_consensus_exports/testing", "massa_async_pool/testing", ] sandbox = [ diff --git a/massa-bootstrap/src/error.rs b/massa-bootstrap/src/error.rs index 432b72c6f75..5783ccce87a 100644 --- a/massa-bootstrap/src/error.rs +++ b/massa-bootstrap/src/error.rs @@ -2,8 +2,8 @@ use crate::messages::{BootstrapClientMessage, BootstrapServerMessage}; use displaydoc::Display; +use massa_consensus_exports::error::ConsensusError; use massa_final_state::FinalStateError; -use massa_graph_2_exports::error::GraphError; use massa_hash::MassaHashError; use massa_network_exports::NetworkError; use massa_pos_exports::PosError; @@ -30,8 +30,8 @@ pub enum BootstrapError { UnexpectedConnectionDrop, /// `massa_hash` error: {0} MassaHashError(#[from] MassaHashError), - /// `massa_graph` error: {0} - MassaGraphError(#[from] GraphError), + /// `massa_consensus` error: {0} + MassaConsensusError(#[from] ConsensusError), /// `massa_signature` error {0} MassaSignatureError(#[from] massa_signature::MassaSignatureError), /// time error: {0} diff --git a/massa-bootstrap/src/lib.rs b/massa-bootstrap/src/lib.rs index b9ff13e16db..64ae6f7763c 100644 --- a/massa-bootstrap/src/lib.rs +++ b/massa-bootstrap/src/lib.rs @@ -17,8 +17,8 @@ extern crate fix_hidden_lifetime_bug; pub use establisher::types::Establisher; +use massa_consensus_exports::bootstrapable_graph::BootstrapableGraph; use massa_final_state::FinalState; -use massa_graph_2_exports::bootstrapable_graph::BootstrapableGraph; use massa_network_exports::BootstrapPeers; use parking_lot::RwLock; use std::sync::Arc; diff --git a/massa-bootstrap/src/messages.rs b/massa-bootstrap/src/messages.rs index 76462f96aa1..36c51b86b0a 100644 --- a/massa-bootstrap/src/messages.rs +++ b/massa-bootstrap/src/messages.rs @@ -4,13 +4,13 @@ use massa_async_pool::{ AsyncMessage, AsyncMessageId, AsyncMessageIdDeserializer, AsyncMessageIdSerializer, AsyncPoolDeserializer, AsyncPoolSerializer, }; +use massa_consensus_exports::bootstrapable_graph::{ + BootstrapableGraph, BootstrapableGraphDeserializer, BootstrapableGraphSerializer, +}; use massa_final_state::{ ExecutedOps, ExecutedOpsDeserializer, ExecutedOpsSerializer, StateChanges, StateChangesDeserializer, StateChangesSerializer, }; -use massa_graph_2_exports::bootstrapable_graph::{ - BootstrapableGraph, BootstrapableGraphDeserializer, BootstrapableGraphSerializer, -}; use massa_ledger_exports::{KeyDeserializer, KeySerializer}; use massa_models::operation::{OperationId, OperationIdDeserializer, OperationIdSerializer}; use massa_models::serialization::{VecU8Deserializer, VecU8Serializer}; diff --git a/massa-bootstrap/src/server.rs b/massa-bootstrap/src/server.rs index 61bfab003ad..37b52be0460 100644 --- a/massa-bootstrap/src/server.rs +++ b/massa-bootstrap/src/server.rs @@ -8,8 +8,8 @@ use std::{ use futures::stream::FuturesUnordered; use futures::StreamExt; use massa_async_pool::AsyncMessageId; +use massa_consensus_exports::ConsensusController; use massa_final_state::FinalState; -use massa_graph_2_exports::GraphController; use massa_logging::massa_trace; use massa_models::{ operation::OperationId, slot::Slot, streaming_step::StreamingStep, version::Version, @@ -52,7 +52,7 @@ impl BootstrapManager { /// start a bootstrap server. /// Once your node will be ready, you may want other to bootstrap from you. pub async fn start_bootstrap_server( - graph_controller: Box, + consensus_controller: Box, network_command_sender: NetworkCommandSender, final_state: Arc>, bootstrap_config: BootstrapConfig, @@ -66,7 +66,7 @@ pub async fn start_bootstrap_server( let (manager_tx, manager_rx) = mpsc::channel::<()>(1); let join_handle = tokio::spawn(async move { BootstrapServer { - graph_controller, + consensus_controller, network_command_sender, final_state, establisher, @@ -91,7 +91,7 @@ pub async fn start_bootstrap_server( } struct BootstrapServer { - graph_controller: Box, + consensus_controller: Box, network_command_sender: NetworkCommandSender, final_state: Arc>, establisher: Establisher, @@ -208,14 +208,14 @@ impl BootstrapServer { let compensation_millis = self.compensation_millis; let version = self.version; let data_execution = self.final_state.clone(); - let graph_controller = self.graph_controller.clone(); + let consensus_controller = self.consensus_controller.clone(); let network_command_sender = self.network_command_sender.clone(); let keypair = self.keypair.clone(); let config = self.bootstrap_config.clone(); bootstrap_sessions.push(async move { let mut server = BootstrapServerBinder::new(dplx, keypair, config.max_bytes_read_write, config.max_bootstrap_message_size, config.thread_count, config.max_datastore_key_length, config.randomness_size_bytes); - match manage_bootstrap(&config, &mut server, data_execution, compensation_millis, version, graph_controller, network_command_sender).await { + match manage_bootstrap(&config, &mut server, data_execution, compensation_millis, version, consensus_controller, network_command_sender).await { Ok(_) => { info!("bootstrapped peer {}", remote_addr) }, @@ -392,7 +392,7 @@ async fn manage_bootstrap( final_state: Arc>, compensation_millis: i64, version: Version, - graph_controller: Box, + consensus_controller: Box, network_command_sender: NetworkCommandSender, ) -> Result<(), BootstrapError> { massa_trace!("bootstrap.lib.manage_bootstrap", {}); @@ -495,7 +495,7 @@ async fn manage_bootstrap( match tokio::time::timeout( write_timeout, server.send(BootstrapServerMessage::ConsensusState { - graph: graph_controller.get_bootstrap_graph()?, + graph: consensus_controller.get_bootstrap_graph()?, }), ) .await diff --git a/massa-bootstrap/src/tests/scenarios.rs b/massa-bootstrap/src/tests/scenarios.rs index 89ad8edee9b..ee16b1ed723 100644 --- a/massa-bootstrap/src/tests/scenarios.rs +++ b/massa-bootstrap/src/tests/scenarios.rs @@ -15,8 +15,8 @@ use crate::{ get_state, start_bootstrap_server, tests::tools::{assert_eq_bootstrap_graph, get_bootstrap_config}, }; +use massa_consensus_exports::test_exports::{MockConsensusController, MockConsensusControllerMessage}; use massa_final_state::{test_exports::assert_eq_final_state, FinalState, StateChanges}; -use massa_graph_2_exports::test_exports::{MockGraphController, MockGraphControllerMessage}; use massa_models::{address::Address, slot::Slot, version::Version}; use massa_network_exports::{NetworkCommand, NetworkCommandSender}; use massa_pos_exports::{test_exports::assert_eq_pos_selection, PoSFinalState, SelectorConfig}; @@ -59,7 +59,7 @@ async fn test_bootstrap_server() { }) .expect("could not start client selector controller"); - let (graph_controller, mut graph_event_receiver) = MockGraphController::new_with_receiver(); + let (consensus_controller, mut consensus_event_receiver) = MockConsensusController::new_with_receiver(); let (network_cmd_tx, mut network_cmd_rx) = mpsc::channel::(5); let final_state_bootstrap = get_random_final_state_bootstrap( PoSFinalState::new( @@ -75,7 +75,7 @@ async fn test_bootstrap_server() { let (bootstrap_establisher, bootstrap_interface) = mock_establisher::new(); let bootstrap_manager = start_bootstrap_server( - graph_controller, + consensus_controller, NetworkCommandSender(network_cmd_tx), final_state.clone(), bootstrap_config.clone(), @@ -192,8 +192,8 @@ async fn test_bootstrap_server() { // wait for peers and graph let sent_graph = tokio::task::spawn_blocking(move || { let response = - graph_event_receiver.wait_command(MassaTime::from_millis(10000), |cmd| match cmd { - MockGraphControllerMessage::GetBootstrapableGraph { response_tx } => { + consensus_event_receiver.wait_command(MassaTime::from_millis(10000), |cmd| match cmd { + MockConsensusControllerMessage::GetBootstrapableGraph { response_tx } => { let sent_graph = get_boot_state(); response_tx.send(Ok(sent_graph.clone())).unwrap(); Some(sent_graph) diff --git a/massa-bootstrap/src/tests/tools.rs b/massa-bootstrap/src/tests/tools.rs index b37fed9de7e..e27f4c7433c 100644 --- a/massa-bootstrap/src/tests/tools.rs +++ b/massa-bootstrap/src/tests/tools.rs @@ -5,14 +5,14 @@ use crate::settings::BootstrapConfig; use bitvec::vec::BitVec; use massa_async_pool::test_exports::{create_async_pool, get_random_message}; use massa_async_pool::{AsyncPoolChanges, Change}; -use massa_final_state::test_exports::create_final_state; -use massa_final_state::{ExecutedOps, FinalState}; -use massa_graph_2_exports::{ +use massa_consensus_exports::{ bootstrapable_graph::{ BootstrapableGraph, BootstrapableGraphDeserializer, BootstrapableGraphSerializer, }, export_active_block::{ExportActiveBlock, ExportActiveBlockSerializer}, }; +use massa_final_state::test_exports::create_final_state; +use massa_final_state::{ExecutedOps, FinalState}; use massa_hash::Hash; use massa_ledger_exports::{LedgerChanges, LedgerEntry, SetUpdateOrDelete}; use massa_ledger_worker::test_exports::create_final_ledger; diff --git a/massa-graph-2-exports/Cargo.toml b/massa-consensus-exports/Cargo.toml similarity index 97% rename from massa-graph-2-exports/Cargo.toml rename to massa-consensus-exports/Cargo.toml index f7893f91945..7286e9bd634 100644 --- a/massa-graph-2-exports/Cargo.toml +++ b/massa-consensus-exports/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "massa_graph_2_exports" +name = "massa_consensus_exports" version = "0.1.0" authors = ["Massa Labs "] edition = "2021" diff --git a/massa-graph-2-exports/src/block_graph_export.rs b/massa-consensus-exports/src/block_graph_export.rs similarity index 100% rename from massa-graph-2-exports/src/block_graph_export.rs rename to massa-consensus-exports/src/block_graph_export.rs diff --git a/massa-graph-2-exports/src/block_status.rs b/massa-consensus-exports/src/block_status.rs similarity index 100% rename from massa-graph-2-exports/src/block_status.rs rename to massa-consensus-exports/src/block_status.rs diff --git a/massa-graph-2-exports/src/bootstrapable_graph.rs b/massa-consensus-exports/src/bootstrapable_graph.rs similarity index 95% rename from massa-graph-2-exports/src/bootstrapable_graph.rs rename to massa-consensus-exports/src/bootstrapable_graph.rs index ae31f239988..9f2f0f32a64 100644 --- a/massa-graph-2-exports/src/bootstrapable_graph.rs +++ b/massa-consensus-exports/src/bootstrapable_graph.rs @@ -36,7 +36,7 @@ impl BootstrapableGraphSerializer { impl Serializer for BootstrapableGraphSerializer { /// ## Example /// ```rust - /// use massa_graph_2_exports::bootstrapable_graph::{BootstrapableGraph, BootstrapableGraphSerializer}; + /// use massa_consensus_exports::bootstrapable_graph::{BootstrapableGraph, BootstrapableGraphSerializer}; /// use massa_serialization::Serializer; /// use massa_hash::Hash; /// use massa_models::{prehash::PreHashMap, block::BlockId, config::THREAD_COUNT}; @@ -115,7 +115,7 @@ impl BootstrapableGraphDeserializer { impl Deserializer for BootstrapableGraphDeserializer { /// ## Example /// ```rust - /// use massa_graph_2_exports::bootstrapable_graph::{BootstrapableGraph, BootstrapableGraphDeserializer, BootstrapableGraphSerializer}; + /// use massa_consensus_exports::bootstrapable_graph::{BootstrapableGraph, BootstrapableGraphDeserializer, BootstrapableGraphSerializer}; /// use massa_serialization::{Deserializer, Serializer, DeserializeError}; /// use massa_hash::Hash; /// use massa_models::{prehash::PreHashMap, block::BlockId, config::THREAD_COUNT}; diff --git a/massa-graph-2-exports/src/channels.rs b/massa-consensus-exports/src/channels.rs similarity index 83% rename from massa-graph-2-exports/src/channels.rs rename to massa-consensus-exports/src/channels.rs index 28e564ac350..a895b7cfc97 100644 --- a/massa-graph-2-exports/src/channels.rs +++ b/massa-consensus-exports/src/channels.rs @@ -4,15 +4,15 @@ use massa_pool_exports::PoolController; use massa_pos_exports::SelectorController; use massa_protocol_exports::ProtocolCommandSender; -use crate::events::GraphEvent; +use crate::events::ConsensusEvent; /// Contains a reference to the pool, selector and execution controller /// Contains a channel to send info to protocol #[derive(Clone)] -pub struct GraphChannels { +pub struct ConsensusChannels { pub execution_controller: Box, pub selector_controller: Box, pub pool_command_sender: Box, - pub controller_event_tx: Sender, + pub controller_event_tx: Sender, pub protocol_command_sender: ProtocolCommandSender, } diff --git a/massa-graph-2-exports/src/controller_trait.rs b/massa-consensus-exports/src/controller_trait.rs similarity index 82% rename from massa-graph-2-exports/src/controller_trait.rs rename to massa-consensus-exports/src/controller_trait.rs index 169c40af0d5..a8aa52b675a 100644 --- a/massa-graph-2-exports/src/controller_trait.rs +++ b/massa-consensus-exports/src/controller_trait.rs @@ -1,5 +1,5 @@ use crate::block_graph_export::BlockGraphExport; -use crate::{bootstrapable_graph::BootstrapableGraph, error::GraphError}; +use crate::{bootstrapable_graph::BootstrapableGraph, error::ConsensusError}; use massa_models::{ api::BlockGraphStatus, block::{BlockHeader, BlockId}, @@ -11,7 +11,7 @@ use massa_models::{ use massa_storage::Storage; /// interface that communicates with the graph worker thread -pub trait GraphController: Send + Sync { +pub trait ConsensusController: Send + Sync { /// Get an export of a part of the graph /// /// # Arguments @@ -24,7 +24,7 @@ pub trait GraphController: Send + Sync { &self, start_slot: Option, end_slot: Option, - ) -> Result; + ) -> Result; /// Get statuses of a list of blocks /// @@ -45,13 +45,13 @@ pub trait GraphController: Send + Sync { /// /// # Returns /// The graph to bootstrap from - fn get_bootstrap_graph(&self) -> Result; + fn get_bootstrap_graph(&self) -> Result; /// Get the stats of the consensus /// /// # Returns /// The stats of the consensus - fn get_stats(&self) -> Result; + fn get_stats(&self) -> Result; /// Get the best parents for the next block to be produced /// @@ -101,23 +101,23 @@ pub trait GraphController: Send + Sync { fn mark_invalid_block(&self, block_id: BlockId, header: Wrapped); /// Returns a boxed clone of self. - /// Useful to allow cloning `Box`. - fn clone_box(&self) -> Box; + /// Useful to allow cloning `Box`. + fn clone_box(&self) -> Box; } -/// Allow cloning `Box` -/// Uses `GraphController::clone_box` internally -impl Clone for Box { - fn clone(&self) -> Box { +/// Allow cloning `Box` +/// Uses `ConsensusController::clone_box` internally +impl Clone for Box { + fn clone(&self) -> Box { self.clone_box() } } -/// Graph manager used to stop the graph thread -pub trait GraphManager { - /// Stop the graph thread +/// Consensus manager used to stop the consensus thread +pub trait ConsensusManager { + /// Stop the consensus thread /// Note that we do not take self by value to consume it - /// because it is not allowed to move out of Box + /// because it is not allowed to move out of Box /// This will improve if the `unsized_fn_params` feature stabilizes enough to be safely usable. fn stop(&mut self); } diff --git a/massa-graph-2-exports/src/error.rs b/massa-consensus-exports/src/error.rs similarity index 97% rename from massa-graph-2-exports/src/error.rs rename to massa-consensus-exports/src/error.rs index 3389675d298..417a1d6ce9d 100644 --- a/massa-graph-2-exports/src/error.rs +++ b/massa-consensus-exports/src/error.rs @@ -7,10 +7,10 @@ use massa_time::TimeError; use std::array::TryFromSliceError; use thiserror::Error; -/// Graph error +/// Consensus error #[non_exhaustive] #[derive(Display, Error, Debug)] -pub enum GraphError { +pub enum ConsensusError { /// execution error: {0} ExecutionError(#[from] ExecutionError), /// models error: {0} diff --git a/massa-graph-2-exports/src/events.rs b/massa-consensus-exports/src/events.rs similarity index 61% rename from massa-graph-2-exports/src/events.rs rename to massa-consensus-exports/src/events.rs index bf38b3f00a6..e48b4803379 100644 --- a/massa-graph-2-exports/src/events.rs +++ b/massa-consensus-exports/src/events.rs @@ -1,6 +1,6 @@ -/// Events that are emitted by graph. +/// Events that are emitted by consensus. #[derive(Debug, Clone)] -pub enum GraphEvent { +pub enum ConsensusEvent { /// probable desynchronization detected, need re-synchronization NeedSync, } diff --git a/massa-graph-2-exports/src/export_active_block.rs b/massa-consensus-exports/src/export_active_block.rs similarity index 97% rename from massa-graph-2-exports/src/export_active_block.rs rename to massa-consensus-exports/src/export_active_block.rs index 8cc018ccb95..05b8e49ca13 100644 --- a/massa-graph-2-exports/src/export_active_block.rs +++ b/massa-consensus-exports/src/export_active_block.rs @@ -1,4 +1,4 @@ -use crate::error::GraphError; +use crate::error::ConsensusError; use massa_hash::HashDeserializer; use massa_models::{ active_block::ActiveBlock, @@ -78,7 +78,7 @@ impl ExportActiveBlock { self, ref_storage: &Storage, thread_count: u8, - ) -> Result<(ActiveBlock, Storage), GraphError> { + ) -> Result<(ActiveBlock, Storage), ConsensusError> { // create resulting storage let mut storage = ref_storage.clone_without_refs(); @@ -95,7 +95,7 @@ impl ExportActiveBlock { .cloned() .collect::>() { - return Err(GraphError::MissingOperation( + return Err(ConsensusError::MissingOperation( "operation list mismatch on active block conversion".into(), )); } @@ -233,7 +233,7 @@ impl ExportActiveBlockDeserializer { impl Deserializer for ExportActiveBlockDeserializer { /// ## Example: /// ```rust - /// use massa_graph_2_exports::export_active_block::{ExportActiveBlock, ExportActiveBlockDeserializer, ExportActiveBlockSerializer}; + /// use massa_consensus_exports::export_active_block::{ExportActiveBlock, ExportActiveBlockDeserializer, ExportActiveBlockSerializer}; /// use massa_models::{ledger_models::LedgerChanges, config::THREAD_COUNT, rolls::RollUpdates, block::{BlockId, Block, BlockSerializer, BlockHeader, BlockHeaderSerializer}, prehash::PreHashSet, endorsement::{Endorsement, EndorsementSerializerLW}, slot::Slot, wrapped::WrappedContent}; /// use massa_hash::Hash; /// use std::collections::HashSet; diff --git a/massa-graph-2-exports/src/lib.rs b/massa-consensus-exports/src/lib.rs similarity index 75% rename from massa-graph-2-exports/src/lib.rs rename to massa-consensus-exports/src/lib.rs index 38ca3838da6..13eb8124690 100644 --- a/massa-graph-2-exports/src/lib.rs +++ b/massa-consensus-exports/src/lib.rs @@ -12,9 +12,9 @@ pub mod error; pub mod events; pub mod export_active_block; -pub use channels::GraphChannels; -pub use controller_trait::{GraphController, GraphManager}; -pub use settings::GraphConfig; +pub use channels::ConsensusChannels; +pub use controller_trait::{ConsensusController, ConsensusManager}; +pub use settings::ConsensusConfig; /// Test utils #[cfg(feature = "testing")] diff --git a/massa-graph-2-exports/src/settings.rs b/massa-consensus-exports/src/settings.rs similarity index 98% rename from massa-graph-2-exports/src/settings.rs rename to massa-consensus-exports/src/settings.rs index 83bea96dc8d..bbaab35c25e 100644 --- a/massa-graph-2-exports/src/settings.rs +++ b/massa-consensus-exports/src/settings.rs @@ -3,7 +3,7 @@ use massa_time::MassaTime; use serde::{Deserialize, Serialize}; #[derive(Debug, Clone, Deserialize, Serialize)] -pub struct GraphConfig { +pub struct ConsensusConfig { /// Clock compensation pub clock_compensation_millis: i64, /// Genesis timestamp diff --git a/massa-graph-2-exports/src/test_exports/config.rs b/massa-consensus-exports/src/test_exports/config.rs similarity index 95% rename from massa-graph-2-exports/src/test_exports/config.rs rename to massa-consensus-exports/src/test_exports/config.rs index 41bfac061e2..5520031b9a4 100644 --- a/massa-graph-2-exports/src/test_exports/config.rs +++ b/massa-consensus-exports/src/test_exports/config.rs @@ -4,9 +4,9 @@ use massa_models::config::constants::{ }; use massa_time::MassaTime; -use crate::GraphConfig; +use crate::ConsensusConfig; -impl Default for GraphConfig { +impl Default for ConsensusConfig { fn default() -> Self { Self { clock_compensation_millis: 0, diff --git a/massa-graph-2-exports/src/test_exports/mock.rs b/massa-consensus-exports/src/test_exports/mock.rs similarity index 68% rename from massa-graph-2-exports/src/test_exports/mock.rs rename to massa-consensus-exports/src/test_exports/mock.rs index 098b63b0b3f..b7d58cb71cd 100644 --- a/massa-graph-2-exports/src/test_exports/mock.rs +++ b/massa-consensus-exports/src/test_exports/mock.rs @@ -18,18 +18,18 @@ use massa_time::MassaTime; use crate::{ block_graph_export::BlockGraphExport, bootstrapable_graph::BootstrapableGraph, - error::GraphError, GraphController, + error::ConsensusError, ConsensusController, }; /// Test tool to mock graph controller responses -pub struct GraphEventReceiver(pub Receiver); +pub struct ConsensusEventReceiver(pub Receiver); /// List of possible messages you can receive from the mock -/// Each variant corresponds to a unique method in `GraphController`, +/// Each variant corresponds to a unique method in `ConsensusController`, /// Some variants wait for a response on their `response_tx` field, if present. -/// See the documentation of `GraphController` for details on parameters and return values. +/// See the documentation of `ConsensusController` for details on parameters and return values. #[derive(Clone, Debug)] -pub enum MockGraphControllerMessage { +pub enum MockConsensusControllerMessage { GetBlockStatuses { block_ids: Vec, response_tx: mpsc::Sender>, @@ -37,16 +37,16 @@ pub enum MockGraphControllerMessage { GetBlockGraphStatuses { start_slot: Option, end_slot: Option, - response_tx: mpsc::Sender>, + response_tx: mpsc::Sender>, }, GetCliques { response_tx: mpsc::Sender>, }, GetBootstrapableGraph { - response_tx: mpsc::Sender>, + response_tx: mpsc::Sender>, }, GetStats { - response_tx: mpsc::Sender>, + response_tx: mpsc::Sender>, }, GetBestParents { response_tx: mpsc::Sender>, @@ -76,29 +76,29 @@ pub enum MockGraphControllerMessage { } /// A mocked graph controller that will intercept calls on its methods -/// and emit corresponding `MockGraphControllerMessage` messages through a MPSC in a thread-safe way. +/// and emit corresponding `MockConsensusControllerMessage` messages through a MPSC in a thread-safe way. /// For messages with a `response_tx` field, the mock will await a response through their `response_tx` channel /// in order to simulate returning this value at the end of the call. #[derive(Clone)] -pub struct MockGraphController(Arc>>); +pub struct MockConsensusController(Arc>>); -impl MockGraphController { +impl MockConsensusController { /// Create a new pair (mock graph controller, mpsc receiver for emitted messages) /// Note that unbounded mpsc channels are used - pub fn new_with_receiver() -> (Box, GraphEventReceiver) { + pub fn new_with_receiver() -> (Box, ConsensusEventReceiver) { let (tx, rx) = mpsc::channel(); ( - Box::new(MockGraphController(Arc::new(Mutex::new(tx)))), - GraphEventReceiver(rx), + Box::new(MockConsensusController(Arc::new(Mutex::new(tx)))), + ConsensusEventReceiver(rx), ) } } -impl GraphEventReceiver { +impl ConsensusEventReceiver { /// wait command pub fn wait_command(&mut self, timeout: MassaTime, filter_map: F) -> Option where - F: Fn(MockGraphControllerMessage) -> Option, + F: Fn(MockConsensusControllerMessage) -> Option, { match self.0.recv_timeout(timeout.into()) { Ok(msg) => filter_map(msg), @@ -107,22 +107,22 @@ impl GraphEventReceiver { } } -/// Implements all the methods of the `GraphController` trait, -/// but simply make them emit a `MockGraphControllerMessage`. +/// Implements all the methods of the `ConsensusController` trait, +/// but simply make them emit a `MockConsensusControllerMessage`. /// If the message contains a `response_tx`, /// a response from that channel is read and returned as return value. -/// See the documentation of `GraphController` for details on each function. -impl GraphController for MockGraphController { +/// See the documentation of `ConsensusController` for details on each function. +impl ConsensusController for MockConsensusController { fn get_block_graph_status( &self, start_slot: Option, end_slot: Option, - ) -> Result { + ) -> Result { let (response_tx, response_rx) = mpsc::channel(); self.0 .lock() .unwrap() - .send(MockGraphControllerMessage::GetBlockGraphStatuses { + .send(MockConsensusControllerMessage::GetBlockGraphStatuses { start_slot, end_slot, response_tx, @@ -136,7 +136,7 @@ impl GraphController for MockGraphController { self.0 .lock() .unwrap() - .send(MockGraphControllerMessage::GetBlockStatuses { + .send(MockConsensusControllerMessage::GetBlockStatuses { block_ids: ids.to_vec(), response_tx, }) @@ -149,27 +149,27 @@ impl GraphController for MockGraphController { self.0 .lock() .unwrap() - .send(MockGraphControllerMessage::GetCliques { response_tx }) + .send(MockConsensusControllerMessage::GetCliques { response_tx }) .unwrap(); response_rx.recv().unwrap() } - fn get_bootstrap_graph(&self) -> Result { + fn get_bootstrap_graph(&self) -> Result { let (response_tx, response_rx) = mpsc::channel(); self.0 .lock() .unwrap() - .send(MockGraphControllerMessage::GetBootstrapableGraph { response_tx }) + .send(MockConsensusControllerMessage::GetBootstrapableGraph { response_tx }) .unwrap(); response_rx.recv().unwrap() } - fn get_stats(&self) -> Result { + fn get_stats(&self) -> Result { let (response_tx, response_rx) = mpsc::channel(); self.0 .lock() .unwrap() - .send(MockGraphControllerMessage::GetStats { response_tx }) + .send(MockConsensusControllerMessage::GetStats { response_tx }) .unwrap(); response_rx.recv().unwrap() } @@ -179,7 +179,7 @@ impl GraphController for MockGraphController { self.0 .lock() .unwrap() - .send(MockGraphControllerMessage::GetBestParents { response_tx }) + .send(MockConsensusControllerMessage::GetBestParents { response_tx }) .unwrap(); response_rx.recv().unwrap() } @@ -189,7 +189,7 @@ impl GraphController for MockGraphController { self.0 .lock() .unwrap() - .send(MockGraphControllerMessage::GetBlockcliqueBlockAtSlot { slot, response_tx }) + .send(MockConsensusControllerMessage::GetBlockcliqueBlockAtSlot { slot, response_tx }) .unwrap(); response_rx.recv().unwrap() } @@ -199,7 +199,7 @@ impl GraphController for MockGraphController { self.0 .lock() .unwrap() - .send(MockGraphControllerMessage::GetLatestBlockcliqueBlockAtSlot { slot, response_tx }) + .send(MockConsensusControllerMessage::GetLatestBlockcliqueBlockAtSlot { slot, response_tx }) .unwrap(); response_rx.recv().unwrap() } @@ -208,7 +208,7 @@ impl GraphController for MockGraphController { self.0 .lock() .unwrap() - .send(MockGraphControllerMessage::MarkInvalidBlock { block_id, header }) + .send(MockConsensusControllerMessage::MarkInvalidBlock { block_id, header }) .unwrap(); } @@ -216,7 +216,7 @@ impl GraphController for MockGraphController { self.0 .lock() .unwrap() - .send(MockGraphControllerMessage::RegisterBlock { + .send(MockConsensusControllerMessage::RegisterBlock { block_id, slot, block_storage, @@ -229,11 +229,11 @@ impl GraphController for MockGraphController { self.0 .lock() .unwrap() - .send(MockGraphControllerMessage::RegisterBlockHeader { block_id, header }) + .send(MockConsensusControllerMessage::RegisterBlockHeader { block_id, header }) .unwrap(); } - fn clone_box(&self) -> Box { + fn clone_box(&self) -> Box { Box::new(self.clone()) } } diff --git a/massa-graph-2-exports/src/test_exports/mod.rs b/massa-consensus-exports/src/test_exports/mod.rs similarity index 100% rename from massa-graph-2-exports/src/test_exports/mod.rs rename to massa-consensus-exports/src/test_exports/mod.rs diff --git a/massa-graph-2-worker/Cargo.toml b/massa-consensus-worker/Cargo.toml similarity index 88% rename from massa-graph-2-worker/Cargo.toml rename to massa-consensus-worker/Cargo.toml index 4b60ebb7e5e..a1cc2dd58ed 100644 --- a/massa-graph-2-worker/Cargo.toml +++ b/massa-consensus-worker/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "massa_graph_2_worker" +name = "massa_consensus_worker" version = "0.1.0" authors = ["Massa Labs "] edition = "2021" @@ -14,7 +14,7 @@ serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" parking_lot = { version = "0.12", features = ["deadlock_detection"] } #custom modules -massa_graph_2_exports = { path = "../massa-graph-2-exports" } +massa_consensus_exports = { path = "../massa-consensus-exports" } massa_models = { path = "../massa-models" } massa_storage = { path = "../massa-storage" } massa_signature = { path = "../massa-signature" } diff --git a/massa-graph-2-worker/src/commands.rs b/massa-consensus-worker/src/commands.rs similarity index 92% rename from massa-graph-2-worker/src/commands.rs rename to massa-consensus-worker/src/commands.rs index 083618c344e..4ca74d79f94 100644 --- a/massa-graph-2-worker/src/commands.rs +++ b/massa-consensus-worker/src/commands.rs @@ -6,7 +6,7 @@ use massa_models::{ use massa_storage::Storage; #[allow(clippy::large_enum_variant)] -pub enum GraphCommand { +pub enum ConsensusCommand { RegisterBlock(BlockId, Slot, Storage, bool), RegisterBlockHeader(BlockId, Wrapped), MarkInvalidBlock(BlockId, Wrapped), diff --git a/massa-graph-2-worker/src/controller.rs b/massa-consensus-worker/src/controller.rs similarity index 83% rename from massa-graph-2-worker/src/controller.rs rename to massa-consensus-worker/src/controller.rs index 0df8213276e..89a5bfdf3cb 100644 --- a/massa-graph-2-worker/src/controller.rs +++ b/massa-consensus-worker/src/controller.rs @@ -1,7 +1,7 @@ -use massa_graph_2_exports::{ +use massa_consensus_exports::{ block_graph_export::BlockGraphExport, block_status::BlockStatus, - bootstrapable_graph::BootstrapableGraph, error::GraphError, - export_active_block::ExportActiveBlock, GraphController, + bootstrapable_graph::BootstrapableGraph, error::ConsensusError, + export_active_block::ExportActiveBlock, ConsensusController, }; use massa_models::{ api::BlockGraphStatus, @@ -16,7 +16,7 @@ use massa_storage::Storage; use parking_lot::RwLock; use std::sync::{mpsc::SyncSender, Arc}; -use crate::{commands::GraphCommand, state::GraphState}; +use crate::{commands::ConsensusCommand, state::ConsensusState}; /// The retrieval of data is made using a shared state and modifications are asked by sending message to a channel. /// This is done mostly to be able to: @@ -26,15 +26,15 @@ use crate::{commands::GraphCommand, state::GraphState}; /// /// Note that sending commands and reading the state is done from different, mutually-asynchronous tasks and they can have data that are not sync yet. #[derive(Clone)] -pub struct GraphControllerImpl { - command_sender: SyncSender, - shared_state: Arc>, +pub struct ConsensusControllerImpl { + command_sender: SyncSender, + shared_state: Arc>, } -impl GraphControllerImpl { +impl ConsensusControllerImpl { pub fn new( - command_sender: SyncSender, - shared_state: Arc>, + command_sender: SyncSender, + shared_state: Arc>, ) -> Self { Self { command_sender, @@ -43,7 +43,7 @@ impl GraphControllerImpl { } } -impl GraphController for GraphControllerImpl { +impl ConsensusController for ConsensusControllerImpl { /// Get a block graph export in a given period. /// /// # Arguments: @@ -56,7 +56,7 @@ impl GraphController for GraphControllerImpl { &self, start_slot: Option, end_slot: Option, - ) -> Result { + ) -> Result { self.shared_state .read() .extract_block_graph_part(start_slot, end_slot) @@ -89,7 +89,7 @@ impl GraphController for GraphControllerImpl { /// /// # Returns: /// A portion of the graph - fn get_bootstrap_graph(&self) -> Result { + fn get_bootstrap_graph(&self) -> Result { let read_shared_state = self.shared_state.read(); let mut required_final_blocks: PreHashSet<_> = read_shared_state.list_required_active_blocks()?; @@ -112,7 +112,7 @@ impl GraphController for GraphControllerImpl { { final_blocks.push(ExportActiveBlock::from_active_block(a_block, storage)); } else { - return Err(GraphError::ContainerInconsistency(format!( + return Err(ConsensusError::ContainerInconsistency(format!( "block {} was expected to be active but wasn't on bootstrap graph export", b_id ))); @@ -123,7 +123,7 @@ impl GraphController for GraphControllerImpl { } /// Get the stats of the consensus - fn get_stats(&self) -> Result { + fn get_stats(&self) -> Result { self.shared_state.read().get_stats() } @@ -162,7 +162,7 @@ impl GraphController for GraphControllerImpl { } fn register_block(&self, block_id: BlockId, slot: Slot, block_storage: Storage, created: bool) { - let _ = self.command_sender.try_send(GraphCommand::RegisterBlock( + let _ = self.command_sender.try_send(ConsensusCommand::RegisterBlock( block_id, slot, block_storage, @@ -173,16 +173,16 @@ impl GraphController for GraphControllerImpl { fn register_block_header(&self, block_id: BlockId, header: Wrapped) { let _ = self .command_sender - .try_send(GraphCommand::RegisterBlockHeader(block_id, header)); + .try_send(ConsensusCommand::RegisterBlockHeader(block_id, header)); } fn mark_invalid_block(&self, block_id: BlockId, header: Wrapped) { let _ = self .command_sender - .try_send(GraphCommand::MarkInvalidBlock(block_id, header)); + .try_send(ConsensusCommand::MarkInvalidBlock(block_id, header)); } - fn clone_box(&self) -> Box { + fn clone_box(&self) -> Box { Box::new(self.clone()) } } diff --git a/massa-graph-2-worker/src/lib.rs b/massa-consensus-worker/src/lib.rs similarity index 70% rename from massa-graph-2-worker/src/lib.rs rename to massa-consensus-worker/src/lib.rs index 589f3b77c2d..b7c05664fc0 100644 --- a/massa-graph-2-worker/src/lib.rs +++ b/massa-consensus-worker/src/lib.rs @@ -6,4 +6,4 @@ mod manager; mod state; mod worker; -pub use worker::start_graph_worker; +pub use worker::start_consensus_worker; diff --git a/massa-consensus-worker/src/manager.rs b/massa-consensus-worker/src/manager.rs new file mode 100644 index 00000000000..d2ef67e5272 --- /dev/null +++ b/massa-consensus-worker/src/manager.rs @@ -0,0 +1,23 @@ +use massa_consensus_exports::ConsensusManager; +use std::{sync::mpsc::SyncSender, thread::JoinHandle}; +use tracing::log::info; + +use crate::commands::ConsensusCommand; + +pub struct ConsensusManagerImpl { + pub consensus_thread: Option<(SyncSender, JoinHandle<()>)>, +} + +impl ConsensusManager for ConsensusManagerImpl { + fn stop(&mut self) { + info!("stopping consensus worker..."); + // join the consensus thread + if let Some((tx, join_handle)) = self.consensus_thread.take() { + drop(tx); + join_handle + .join() + .expect("consensus thread panicked on try to join"); + } + info!("consensus worker stopped"); + } +} diff --git a/massa-graph-2-worker/src/state/graph.rs b/massa-consensus-worker/src/state/graph.rs similarity index 93% rename from massa-graph-2-worker/src/state/graph.rs rename to massa-consensus-worker/src/state/graph.rs index a6967f9e1f3..b2c08e5c6d9 100644 --- a/massa-graph-2-worker/src/state/graph.rs +++ b/massa-consensus-worker/src/state/graph.rs @@ -1,15 +1,15 @@ use std::collections::VecDeque; -use massa_graph_2_exports::{ +use massa_consensus_exports::{ block_status::{BlockStatus, DiscardReason}, - error::GraphError, + error::ConsensusError, }; use massa_logging::massa_trace; use massa_models::{block::BlockId, clique::Clique, prehash::PreHashSet, slot::Slot}; -use super::GraphState; +use super::ConsensusState; -impl GraphState { +impl ConsensusState { pub fn insert_parents_descendants( &mut self, add_block_id: BlockId, @@ -48,7 +48,7 @@ impl GraphState { pub fn compute_fitness_find_blockclique( &mut self, add_block_id: &BlockId, - ) -> Result { + ) -> Result { let mut blockclique_i = 0usize; let mut max_clique_fitness = (0u64, num::BigInt::default()); for (clique_i, clique) in self.max_cliques.iter_mut().enumerate() { @@ -58,12 +58,12 @@ impl GraphState { for block_h in clique.block_ids.iter() { let fitness = match self.block_statuses.get(block_h) { Some(BlockStatus::Active { a_block, storage: _ }) => a_block.fitness, - _ => return Err(GraphError::ContainerInconsistency(format!("inconsistency inside block statuses computing fitness while adding {} - missing {}", add_block_id, block_h))), + _ => return Err(ConsensusError::ContainerInconsistency(format!("inconsistency inside block statuses computing fitness while adding {} - missing {}", add_block_id, block_h))), }; clique.fitness = clique .fitness .checked_add(fitness) - .ok_or(GraphError::FitnessOverflow)?; + .ok_or(ConsensusError::FitnessOverflow)?; sum_hash -= num::BigInt::from_bytes_be(num::bigint::Sign::Plus, block_h.to_bytes()); } let cur_fit = (clique.fitness, sum_hash); @@ -96,7 +96,7 @@ impl GraphState { &mut self, add_block_id: &BlockId, block_id: &BlockId, - ) -> Result<(), GraphError> { + ) -> Result<(), ConsensusError> { if let Some(BlockStatus::Active { a_block: active_block, storage: _storage, @@ -104,7 +104,7 @@ impl GraphState { { self.active_index.remove(block_id); if active_block.is_final { - return Err(GraphError::ContainerInconsistency(format!("inconsistency inside block statuses removing stale blocks adding {} - block {} was already final", add_block_id, block_id))); + return Err(ConsensusError::ContainerInconsistency(format!("inconsistency inside block statuses removing stale blocks adding {} - block {} was already final", add_block_id, block_id))); } // remove from gi_head @@ -168,11 +168,11 @@ impl GraphState { self.discarded_index.insert(*block_id); Ok(()) } else { - return Err(GraphError::ContainerInconsistency(format!("inconsistency inside block statuses removing stale blocks adding {} - block {} is missing", add_block_id, block_id))); + return Err(ConsensusError::ContainerInconsistency(format!("inconsistency inside block statuses removing stale blocks adding {} - block {} is missing", add_block_id, block_id))); } } - pub fn list_final_blocks(&self) -> Result, GraphError> { + pub fn list_final_blocks(&self) -> Result, ConsensusError> { // short-circuiting intersection of cliques from smallest to largest let mut indices: Vec = (0..self.max_cliques.len()).collect(); indices.sort_unstable_by_key(|&i| self.max_cliques[i].block_ids.len()); @@ -214,7 +214,7 @@ impl GraphState { storage: _, }) => &a_block.descendants, _ => { - return Err(GraphError::MissingBlock(format!( + return Err(ConsensusError::MissingBlock(format!( "missing block when computing total fitness of descendants: {}", candidate_h ))) @@ -307,7 +307,7 @@ impl GraphState { &mut self, add_block_id: &BlockId, final_blocks: PreHashSet, - ) -> Result<(), GraphError> { + ) -> Result<(), ConsensusError> { for block_id in final_blocks.into_iter() { // remove from gi_head if let Some(other_incomps) = self.gi_head.remove(&block_id) { @@ -354,7 +354,7 @@ impl GraphState { // update new final blocks list self.new_final_blocks.insert(block_id); } else { - return Err(GraphError::ContainerInconsistency(format!("inconsistency inside block statuses updating final blocks adding {} - block {} is missing", add_block_id, block_id))); + return Err(ConsensusError::ContainerInconsistency(format!("inconsistency inside block statuses updating final blocks adding {} - block {} is missing", add_block_id, block_id))); } } Ok(()) diff --git a/massa-graph-2-worker/src/state/mod.rs b/massa-consensus-worker/src/state/mod.rs similarity index 94% rename from massa-graph-2-worker/src/state/mod.rs rename to massa-consensus-worker/src/state/mod.rs index 986db860385..88ebbd9006c 100644 --- a/massa-graph-2-worker/src/state/mod.rs +++ b/massa-consensus-worker/src/state/mod.rs @@ -1,10 +1,10 @@ use std::collections::{HashMap, VecDeque}; -use massa_graph_2_exports::{ +use massa_consensus_exports::{ block_graph_export::BlockGraphExport, block_status::{BlockStatus, ExportCompiledBlock, HeaderOrBlock}, - error::GraphError, - GraphChannels, GraphConfig, + error::ConsensusError, + ConsensusChannels, ConsensusConfig, }; use massa_models::{ active_block::ActiveBlock, @@ -26,11 +26,11 @@ mod tick; mod verifications; #[derive(Clone)] -pub struct GraphState { +pub struct ConsensusState { /// Configuration - pub config: GraphConfig, + pub config: ConsensusConfig, /// Channels to communicate with other modules - pub channels: GraphChannels, + pub channels: ConsensusChannels, /// Storage pub storage: Storage, /// Block ids of genesis blocks @@ -86,7 +86,7 @@ pub struct GraphState { pub prev_blockclique: PreHashMap, } -impl GraphState { +impl ConsensusState { /// Get a full active block pub fn get_full_active_block(&self, block_id: &BlockId) -> Option<(&ActiveBlock, &Storage)> { match self.block_statuses.get(block_id) { @@ -198,7 +198,7 @@ impl GraphState { } } - pub fn list_required_active_blocks(&self) -> Result, GraphError> { + pub fn list_required_active_blocks(&self) -> Result, ConsensusError> { // list all active blocks let mut retain_active: PreHashSet = PreHashSet::::with_capacity(self.active_index.len()); @@ -275,7 +275,7 @@ impl GraphState { for retain_h in retain_clone.into_iter() { retain_active.extend( self.get_full_active_block(&retain_h) - .ok_or_else(|| GraphError::ContainerInconsistency(format!("inconsistency inside block statuses pruning and retaining the parents of the selected blocks - {} is missing", retain_h)))? + .ok_or_else(|| ConsensusError::ContainerInconsistency(format!("inconsistency inside block statuses pruning and retaining the parents of the selected blocks - {} is missing", retain_h)))? .0.parents .iter() .map(|(b_id, _p)| *b_id), @@ -291,7 +291,7 @@ impl GraphState { for retain_h in retain_active.iter() { let retain_slot = &self .get_full_active_block(retain_h) - .ok_or_else(|| GraphError::ContainerInconsistency(format!("inconsistency inside block statuses pruning and finding earliest kept slots in each thread - {} is missing", retain_h)))? + .ok_or_else(|| ConsensusError::ContainerInconsistency(format!("inconsistency inside block statuses pruning and finding earliest kept slots in each thread - {} is missing", retain_h)))? .0.slot; earliest_retained_periods[retain_slot.thread as usize] = std::cmp::min( earliest_retained_periods[retain_slot.thread as usize], @@ -323,7 +323,7 @@ impl GraphState { &self, slot_start: Option, slot_end: Option, - ) -> Result { + ) -> Result { let mut export = BlockGraphExport { genesis_blocks: self.genesis_hashes.clone(), active_blocks: PreHashMap::with_capacity(self.block_statuses.len()), @@ -367,7 +367,7 @@ impl GraphState { if filter(&a_block.slot) { let stored_block = storage.read_blocks().get(hash).cloned().ok_or_else(|| { - GraphError::MissingBlock(format!( + ConsensusError::MissingBlock(format!( "missing block in BlockGraphExport::extract_from: {}", hash )) @@ -417,7 +417,7 @@ impl GraphState { /// get the current block wish list, including the operations hash. pub fn get_block_wishlist( &self, - ) -> Result>, GraphError> { + ) -> Result>, ConsensusError> { let mut wishlist = PreHashMap::>::default(); for block_id in self.waiting_for_dependencies_index.iter() { if let Some(BlockStatus::WaitingForDependencies { @@ -452,7 +452,7 @@ impl GraphState { pub fn get_active_block_and_descendants( &self, block_id: &BlockId, - ) -> Result, GraphError> { + ) -> Result, ConsensusError> { let mut to_visit = vec![*block_id]; let mut result = PreHashSet::::default(); while let Some(visit_h) = to_visit.pop() { @@ -465,7 +465,7 @@ impl GraphState { .children.iter() .for_each(|thread_children| to_visit.extend(thread_children.keys())) }, - _ => return Err(GraphError::ContainerInconsistency(format!("inconsistency inside block statuses iterating through descendants of {} - missing {}", block_id, visit_h))), + _ => return Err(ConsensusError::ContainerInconsistency(format!("inconsistency inside block statuses iterating through descendants of {} - missing {}", block_id, visit_h))), } } Ok(result) diff --git a/massa-graph-2-worker/src/state/process.rs b/massa-consensus-worker/src/state/process.rs similarity index 97% rename from massa-graph-2-worker/src/state/process.rs rename to massa-consensus-worker/src/state/process.rs index ca57af10a50..722cec343d0 100644 --- a/massa-graph-2-worker/src/state/process.rs +++ b/massa-consensus-worker/src/state/process.rs @@ -3,9 +3,9 @@ use std::{ mem, }; -use massa_graph_2_exports::{ +use massa_consensus_exports::{ block_status::{BlockStatus, DiscardReason, HeaderOrBlock}, - error::GraphError, + error::ConsensusError, }; use massa_logging::massa_trace; use massa_models::{ @@ -23,9 +23,9 @@ use tracing::log::{debug, info}; use crate::state::verifications::HeaderCheckOutcome; -use super::GraphState; +use super::ConsensusState; -impl GraphState { +impl ConsensusState { /// Acknowledge a set of items recursively and process them /// /// # Arguments: @@ -38,7 +38,7 @@ impl GraphState { &mut self, mut to_ack: BTreeSet<(Slot, BlockId)>, current_slot: Option, - ) -> Result<(), GraphError> { + ) -> Result<(), ConsensusError> { // order processing by (slot, hash) while let Some((_slot, hash)) = to_ack.pop_first() { to_ack.extend(self.process(hash, current_slot)?) @@ -58,7 +58,7 @@ impl GraphState { &mut self, block_id: BlockId, current_slot: Option, - ) -> Result, GraphError> { + ) -> Result, ConsensusError> { // list items to reprocess let mut reprocess = BTreeSet::new(); @@ -103,7 +103,7 @@ impl GraphState { self.incoming_index.remove(&block_id); header } else { - return Err(GraphError::ContainerInconsistency(format!( + return Err(ConsensusError::ContainerInconsistency(format!( "inconsistency inside block statuses removing incoming header {}", block_id ))); @@ -211,7 +211,7 @@ impl GraphState { self.incoming_index.remove(&block_id); (slot, storage) } else { - return Err(GraphError::ContainerInconsistency(format!( + return Err(ConsensusError::ContainerInconsistency(format!( "inconsistency inside block statuses removing incoming block {}", block_id ))); @@ -353,7 +353,7 @@ impl GraphState { ); return Ok(reprocess); } else { - return Err(GraphError::ContainerInconsistency(format!("inconsistency inside block statuses removing waiting for slot block or header {}", block_id))); + return Err(ConsensusError::ContainerInconsistency(format!("inconsistency inside block statuses removing waiting for slot block or header {}", block_id))); }; } @@ -384,7 +384,7 @@ impl GraphState { ); return Ok(reprocess); } else { - return Err(GraphError::ContainerInconsistency(format!("inconsistency inside block statuses removing waiting for slot header or block {}", block_id))); + return Err(ConsensusError::ContainerInconsistency(format!("inconsistency inside block statuses removing waiting for slot header or block {}", block_id))); } } }; @@ -426,7 +426,7 @@ impl GraphState { } /// TODO: Doc - pub fn promote_dep_tree(&mut self, hash: BlockId) -> Result<(), GraphError> { + pub fn promote_dep_tree(&mut self, hash: BlockId) -> Result<(), ConsensusError> { let mut to_explore = vec![hash]; let mut to_promote: PreHashMap = PreHashMap::default(); while let Some(h) = to_explore.pop() { @@ -488,7 +488,7 @@ impl GraphState { inherited_incomp_count: usize, fitness: u64, mut storage: Storage, - ) -> Result<(), GraphError> { + ) -> Result<(), ConsensusError> { massa_trace!("consensus.block_graph.add_block_to_graph", { "block_id": add_block_id }); @@ -533,7 +533,7 @@ impl GraphState { self.gi_head .get_mut(incomp_h) .ok_or_else(|| { - GraphError::MissingBlock(format!( + ConsensusError::MissingBlock(format!( "missing block when adding incomp to gi_head: {}", incomp_h )) @@ -607,7 +607,7 @@ impl GraphState { for block_h in blockclique.block_ids.iter() { let b_slot = match self.block_statuses.get(block_h) { Some(BlockStatus::Active { a_block, storage: _ }) => a_block.slot, - _ => return Err(GraphError::ContainerInconsistency(format!("inconsistency inside block statuses updating best parents while adding {} - missing {}", add_block_id, block_h))), + _ => return Err(ConsensusError::ContainerInconsistency(format!("inconsistency inside block statuses updating best parents while adding {} - missing {}", add_block_id, block_h))), }; if b_slot.period > self.best_parents[b_slot.thread as usize].1 { self.best_parents[b_slot.thread as usize] = (*block_h, b_slot.period); @@ -757,7 +757,7 @@ impl GraphState { /// 9. notify protocol of block wish list /// 10. note new latest final periods (prune graph if changed) /// 11. add stale blocks to stats - pub fn block_db_changed(&mut self) -> Result<(), GraphError> { + pub fn block_db_changed(&mut self) -> Result<(), ConsensusError> { let final_block_slots = { massa_trace!("consensus.consensus_worker.block_db_changed", {}); diff --git a/massa-graph-2-worker/src/state/process_commands.rs b/massa-consensus-worker/src/state/process_commands.rs similarity index 97% rename from massa-graph-2-worker/src/state/process_commands.rs rename to massa-consensus-worker/src/state/process_commands.rs index ef9951e6780..fd923648f10 100644 --- a/massa-graph-2-worker/src/state/process_commands.rs +++ b/massa-consensus-worker/src/state/process_commands.rs @@ -1,8 +1,8 @@ use std::collections::{hash_map::Entry, BTreeSet}; -use massa_graph_2_exports::{ +use massa_consensus_exports::{ block_status::{BlockStatus, DiscardReason, HeaderOrBlock}, - error::GraphError, + error::ConsensusError, }; use massa_logging::massa_trace; use massa_models::{ @@ -13,9 +13,9 @@ use massa_storage::Storage; use massa_time::MassaTime; use tracing::debug; -use super::GraphState; +use super::ConsensusState; -impl GraphState { +impl ConsensusState { /// Register a block header in the graph. Ignore genesis hashes. /// /// # Arguments: @@ -30,7 +30,7 @@ impl GraphState { block_id: BlockId, header: WrappedHeader, current_slot: Option, - ) -> Result<(), GraphError> { + ) -> Result<(), ConsensusError> { // ignore genesis blocks if self.genesis_hashes.contains(&block_id) { return Ok(()); @@ -89,7 +89,7 @@ impl GraphState { current_slot: Option, storage: Storage, created: bool, - ) -> Result<(), GraphError> { + ) -> Result<(), ConsensusError> { // ignore genesis blocks if self.genesis_hashes.contains(&block_id) { return Ok(()); diff --git a/massa-graph-2-worker/src/state/stats.rs b/massa-consensus-worker/src/state/stats.rs similarity index 84% rename from massa-graph-2-worker/src/state/stats.rs rename to massa-consensus-worker/src/state/stats.rs index e931bb9c0d7..aaea0b70489 100644 --- a/massa-graph-2-worker/src/state/stats.rs +++ b/massa-consensus-worker/src/state/stats.rs @@ -1,5 +1,5 @@ -use super::GraphState; -use massa_graph_2_exports::error::GraphError; +use super::ConsensusState; +use massa_consensus_exports::error::ConsensusError; use massa_models::stats::ConsensusStats; use massa_time::MassaTime; use std::cmp::max; @@ -8,11 +8,11 @@ use std::cmp::max; use tracing::log::warn; #[cfg(not(feature = "sandbox"))] -use massa_graph_2_exports::events::GraphEvent; +use massa_consensus_exports::events::ConsensusEvent; -impl GraphState { - /// Calculate and return stats about graph - pub fn get_stats(&self) -> Result { +impl ConsensusState { + /// Calculate and return stats about consensus + pub fn get_stats(&self) -> Result { let timespan_end = max( self.launch_time, MassaTime::now(self.config.clock_compensation_millis)?, @@ -42,7 +42,7 @@ impl GraphState { } /// Must be called each tick to update stats. Will detect if a desynchronization happened - pub fn stats_tick(&mut self) -> Result<(), GraphError> { + pub fn stats_tick(&mut self) -> Result<(), ConsensusError> { // check if there are any final blocks is coming from protocol // if none => we are probably desync #[cfg(not(feature = "sandbox"))] @@ -60,7 +60,7 @@ impl GraphState { }) { warn!("desynchronization detected because the recent final block history is empty or contains only blocks produced by this node"); - let _ = self.channels.controller_event_tx.send(GraphEvent::NeedSync); + let _ = self.channels.controller_event_tx.send(ConsensusEvent::NeedSync); } } // prune stats @@ -68,8 +68,8 @@ impl GraphState { Ok(()) } - /// Remove old stats from graph storage - pub fn prune_stats(&mut self) -> Result<(), GraphError> { + /// Remove old stats from consensus storage + pub fn prune_stats(&mut self) -> Result<(), ConsensusError> { let start_time = MassaTime::now(self.config.clock_compensation_millis)? .saturating_sub(self.stats_history_timespan); while let Some((t, _, _)) = self.final_block_stats.front() { diff --git a/massa-graph-2-worker/src/state/tick.rs b/massa-consensus-worker/src/state/tick.rs similarity index 90% rename from massa-graph-2-worker/src/state/tick.rs rename to massa-consensus-worker/src/state/tick.rs index 31a022390fe..3165bc82669 100644 --- a/massa-graph-2-worker/src/state/tick.rs +++ b/massa-consensus-worker/src/state/tick.rs @@ -1,12 +1,12 @@ use std::collections::BTreeSet; -use massa_graph_2_exports::{block_status::BlockStatus, error::GraphError}; +use massa_consensus_exports::{block_status::BlockStatus, error::ConsensusError}; use massa_logging::massa_trace; use massa_models::{block::BlockId, slot::Slot}; -use super::GraphState; +use super::ConsensusState; -impl GraphState { +impl ConsensusState { /// This function should be called each tick and will check if there is a block in the graph that should be processed at this slot, and if so, process it. /// /// # Arguments: @@ -14,7 +14,7 @@ impl GraphState { /// /// # Returns: /// Error if the process of a block returned an error. - pub fn slot_tick(&mut self, current_slot: Slot) -> Result<(), GraphError> { + pub fn slot_tick(&mut self, current_slot: Slot) -> Result<(), ConsensusError> { massa_trace!("consensus.consensus_worker.slot_tick", { "slot": current_slot }); diff --git a/massa-graph-2-worker/src/state/verifications.rs b/massa-consensus-worker/src/state/verifications.rs similarity index 95% rename from massa-graph-2-worker/src/state/verifications.rs rename to massa-consensus-worker/src/state/verifications.rs index 2fc9fb4986a..9fc6dc11be8 100644 --- a/massa-graph-2-worker/src/state/verifications.rs +++ b/massa-consensus-worker/src/state/verifications.rs @@ -1,8 +1,8 @@ -use super::GraphState; +use super::ConsensusState; -use massa_graph_2_exports::{ +use massa_consensus_exports::{ block_status::{BlockStatus, DiscardReason}, - error::GraphError, + error::ConsensusError, }; use massa_logging::massa_trace; use massa_models::{ @@ -44,7 +44,7 @@ pub enum EndorsementsCheckOutcome { WaitForSlot, } -impl GraphState { +impl ConsensusState { /// Process an incoming header. /// /// Checks performed: @@ -68,8 +68,8 @@ impl GraphState { block_id: &BlockId, header: &WrappedHeader, current_slot: Option, - read_shared_state: &GraphState, - ) -> Result { + read_shared_state: &ConsensusState, + ) -> Result { massa_trace!("consensus.block_graph.check_header", { "block_id": block_id }); @@ -194,7 +194,7 @@ impl GraphState { storage: _, }) => a_block, _ => { - return Err(GraphError::ContainerInconsistency(format!( + return Err(ConsensusError::ContainerInconsistency(format!( "inconsistency inside block statuses searching parent {} of block {}", parent_h, block_id ))) @@ -254,7 +254,7 @@ impl GraphState { _ => None, } .ok_or_else(|| { - GraphError::ContainerInconsistency(format!( + ConsensusError::ContainerInconsistency(format!( "inconsistency inside block statuses searching parent {} in own thread of block {}", parents[header.content.slot.thread as usize].0, block_id )) @@ -275,7 +275,7 @@ impl GraphState { .filter(|&sibling_h| sibling_h != block_id) .try_for_each(|&sibling_h| { incomp.extend(self.get_active_block_and_descendants(&sibling_h)?); - Result::<(), GraphError>::Ok(()) + Result::<(), ConsensusError>::Ok(()) })?; // grandpa incompatibility test @@ -287,7 +287,7 @@ impl GraphState { let cur_b = match read_shared_state.block_statuses.get(&cur_h) { Some(BlockStatus::Active { a_block, storage: _ }) => Some(a_block), _ => None, - }.ok_or_else(|| GraphError::ContainerInconsistency(format!("inconsistency inside block statuses searching {} while checking grandpa incompatibility of block {}",cur_h, block_id)))?; + }.ok_or_else(|| ConsensusError::ContainerInconsistency(format!("inconsistency inside block statuses searching {} while checking grandpa incompatibility of block {}",cur_h, block_id)))?; // traverse but do not check up to generation 1 if cur_gen <= 1 { @@ -304,7 +304,7 @@ impl GraphState { .read_blocks() .get(&cur_b.block_id) .ok_or_else(|| { - GraphError::MissingBlock(format!( + ConsensusError::MissingBlock(format!( "missing block in grandpa incomp test: {}", cur_b.block_id )) @@ -321,7 +321,7 @@ impl GraphState { Some(BlockStatus::Active { a_block, storage: _ }) => Some(a_block), _ => None, }.ok_or_else(|| - GraphError::ContainerInconsistency( + ConsensusError::ContainerInconsistency( format!("inconsistency inside block statuses searching {} check if the parent in tauB has a strictly lower period number than B's parent in tauB while checking grandpa incompatibility of block {}", parent_id, block_id) @@ -377,7 +377,7 @@ impl GraphState { pub fn check_endorsements( &self, header: &WrappedHeader, - ) -> Result { + ) -> Result { // check endorsements let endorsement_draws = match self .channels diff --git a/massa-graph-2-worker/src/worker/init.rs b/massa-consensus-worker/src/worker/init.rs similarity index 88% rename from massa-graph-2-worker/src/worker/init.rs rename to massa-consensus-worker/src/worker/init.rs index 915c6511a30..5af5969165c 100644 --- a/massa-graph-2-worker/src/worker/init.rs +++ b/massa-consensus-worker/src/worker/init.rs @@ -1,6 +1,6 @@ -use massa_graph_2_exports::{ - block_status::BlockStatus, bootstrapable_graph::BootstrapableGraph, error::GraphError, - GraphConfig, +use massa_consensus_exports::{ + block_status::BlockStatus, bootstrapable_graph::BootstrapableGraph, error::ConsensusError, + ConsensusConfig, }; use massa_hash::Hash; use massa_models::{ @@ -21,22 +21,22 @@ use std::{ }; use tracing::log::info; -use crate::{commands::GraphCommand, state::GraphState}; +use crate::{commands::ConsensusCommand, state::ConsensusState}; -use super::GraphWorker; +use super::ConsensusWorker; /// Creates genesis block in given thread. /// /// # Arguments -/// * `cfg`: graph configuration +/// * `cfg`: consensus configuration /// * `thread_number`: thread in which we want a genesis block /// /// # Returns /// A genesis block pub fn create_genesis_block( - cfg: &GraphConfig, + cfg: &ConsensusConfig, thread_number: u8, -) -> Result { +) -> Result { let keypair = &cfg.genesis_key; let header = BlockHeader::new_wrapped( BlockHeader { @@ -59,11 +59,11 @@ pub fn create_genesis_block( )?) } -impl GraphWorker { - /// Creates a new Graph worker. +impl ConsensusWorker { + /// Creates a new consensus worker. /// /// # Arguments - /// * `config`: graph configuration + /// * `config`: consensus configuration /// * `command_receiver`: channel to receive commands from controller /// * `channels`: channels to communicate with other workers /// * `shared_state`: shared state with the controller @@ -71,14 +71,14 @@ impl GraphWorker { /// * `storage`: shared storage /// /// # Returns: - /// A `GraphWorker`, to interact with it use the `GraphController` + /// A `ConsensusWorker`, to interact with it use the `ConsensusController` pub fn new( - config: GraphConfig, - command_receiver: mpsc::Receiver, - shared_state: Arc>, + config: ConsensusConfig, + command_receiver: mpsc::Receiver, + shared_state: Arc>, init_graph: Option, storage: Storage, - ) -> Result { + ) -> Result { let now = MassaTime::now(config.clock_compensation_millis) .expect("Couldn't init timer consensus"); let previous_slot = get_latest_block_slot_at_timestamp( @@ -94,7 +94,7 @@ impl GraphWorker { let mut genesis_block_ids = Vec::with_capacity(config.thread_count as usize); for thread in 0u8..config.thread_count { let block = create_genesis_block(&config, thread).map_err(|err| { - GraphError::GenesisCreationError(format!("genesis error {}", err)) + ConsensusError::GenesisCreationError(format!("genesis error {}", err)) })?; let mut storage = storage.clone_without_refs(); storage.store_block(block.clone()); @@ -163,7 +163,7 @@ impl GraphWorker { )) } - let mut res_graph = GraphWorker { + let mut res_consensus = ConsensusWorker { config: config.clone(), command_receiver, shared_state, @@ -177,7 +177,7 @@ impl GraphWorker { let final_blocks: Vec<(ActiveBlock, Storage)> = final_blocks .into_iter() .map(|export_b| export_b.to_active_block(&storage, config.thread_count)) - .collect::>()?; + .collect::>()?; // compute latest_final_blocks_periods let mut latest_final_blocks_periods: Vec<(BlockId, u64)> = @@ -191,7 +191,7 @@ impl GraphWorker { } { - let mut write_shared_state = res_graph.shared_state.write(); + let mut write_shared_state = res_consensus.shared_state.write(); write_shared_state.genesis_hashes = genesis_block_ids; write_shared_state.active_index = final_blocks.iter().map(|(b, _)| b.block_id).collect(); @@ -208,14 +208,14 @@ impl GraphWorker { }, )) }) - .collect::>()?; + .collect::>()?; write_shared_state.final_block_stats = final_block_stats; } - res_graph.claim_parent_refs()?; + res_consensus.claim_parent_refs()?; } else { { - let mut write_shared_state = res_graph.shared_state.write(); + let mut write_shared_state = res_consensus.shared_state.write(); write_shared_state.active_index = genesis_block_ids.iter().copied().collect(); write_shared_state.latest_final_blocks_periods = genesis_block_ids.iter().map(|h| (*h, 0)).collect(); @@ -231,7 +231,7 @@ impl GraphWorker { // we need to do this because the bootstrap snapshots of the executor vs the consensus may not have been taken in sync // because the two modules run concurrently and out of sync. { - let mut write_shared_state = res_graph.shared_state.write(); + let mut write_shared_state = res_consensus.shared_state.write(); let mut block_storage: PreHashMap = Default::default(); let notify_finals: HashMap = write_shared_state .get_all_final_blocks() @@ -261,11 +261,11 @@ impl GraphWorker { .update_blockclique_status(notify_finals, Some(notify_blockclique), block_storage); } - Ok(res_graph) + Ok(res_consensus) } - /// Internal function used at initialization of the `GraphWorker` to link blocks with their parents - fn claim_parent_refs(&mut self) -> Result<(), GraphError> { + /// Internal function used at initialization of the `ConsensusWorker` to link blocks with their parents + fn claim_parent_refs(&mut self) -> Result<(), ConsensusError> { let mut write_shared_state = self.shared_state.write(); for (_b_id, block_status) in write_shared_state.block_statuses.iter_mut() { if let BlockStatus::Active { @@ -281,7 +281,7 @@ impl GraphWorker { if !a_block.is_final { // note: parents of final blocks will be missing, that's ok, but it shouldn't be the case for non-finals if n_claimed_parents != self.config.thread_count as usize { - return Err(GraphError::MissingBlock( + return Err(ConsensusError::MissingBlock( "block storage could not claim refs to all parent blocks".into(), )); } diff --git a/massa-graph-2-worker/src/worker/main_loop.rs b/massa-consensus-worker/src/worker/main_loop.rs similarity index 91% rename from massa-graph-2-worker/src/worker/main_loop.rs rename to massa-consensus-worker/src/worker/main_loop.rs index e605a66c870..8caac3aef9d 100644 --- a/massa-graph-2-worker/src/worker/main_loop.rs +++ b/massa-consensus-worker/src/worker/main_loop.rs @@ -1,6 +1,6 @@ use std::{sync::mpsc, time::Instant}; -use massa_graph_2_exports::error::GraphError; +use massa_consensus_exports::error::ConsensusError; use massa_models::{ slot::Slot, timeslots::{get_block_slot_timestamp, get_closest_slot_to_timestamp}, @@ -8,9 +8,9 @@ use massa_models::{ use massa_time::MassaTime; use tracing::{info, log::warn}; -use crate::commands::GraphCommand; +use crate::commands::ConsensusCommand; -use super::GraphWorker; +use super::ConsensusWorker; enum WaitingStatus { Ended, @@ -18,7 +18,7 @@ enum WaitingStatus { Disconnected, } -impl GraphWorker { +impl ConsensusWorker { /// Execute a command received from the controller also run an update of the graph after processing the command. /// /// # Arguments: @@ -26,14 +26,14 @@ impl GraphWorker { /// /// # Returns: /// An error if the command failed - fn manage_command(&mut self, command: GraphCommand) -> Result<(), GraphError> { + fn manage_command(&mut self, command: ConsensusCommand) -> Result<(), ConsensusError> { let mut write_shared_state = self.shared_state.write(); match command { - GraphCommand::RegisterBlockHeader(block_id, header) => { + ConsensusCommand::RegisterBlockHeader(block_id, header) => { write_shared_state.register_block_header(block_id, header, self.previous_slot)?; write_shared_state.block_db_changed() } - GraphCommand::RegisterBlock(block_id, slot, block_storage, created) => { + ConsensusCommand::RegisterBlock(block_id, slot, block_storage, created) => { write_shared_state.register_block( block_id, slot, @@ -43,7 +43,7 @@ impl GraphWorker { )?; write_shared_state.block_db_changed() } - GraphCommand::MarkInvalidBlock(block_id, header) => { + ConsensusCommand::MarkInvalidBlock(block_id, header) => { write_shared_state.mark_invalid_block(&block_id, header); Ok(()) } @@ -61,7 +61,7 @@ impl GraphWorker { // message received => manage it Ok(command) => { if let Err(err) = self.manage_command(command) { - warn!("Error in graph: {}", err); + warn!("Error in consensus: {}", err); } WaitingStatus::Interrupted } diff --git a/massa-graph-2-worker/src/worker/mod.rs b/massa-consensus-worker/src/worker/mod.rs similarity index 62% rename from massa-graph-2-worker/src/worker/mod.rs rename to massa-consensus-worker/src/worker/mod.rs index 5324c1df32f..06db5787b6b 100644 --- a/massa-graph-2-worker/src/worker/mod.rs +++ b/massa-consensus-worker/src/worker/mod.rs @@ -1,6 +1,6 @@ -use massa_graph_2_exports::{ - bootstrapable_graph::BootstrapableGraph, GraphChannels, GraphConfig, GraphController, - GraphManager, +use massa_consensus_exports::{ + bootstrapable_graph::BootstrapableGraph, ConsensusChannels, ConsensusConfig, ConsensusController, + ConsensusManager, }; use massa_models::block::BlockId; use massa_models::clique::Clique; @@ -13,19 +13,19 @@ use std::sync::{mpsc, Arc}; use std::thread; use std::time::Instant; -use crate::commands::GraphCommand; -use crate::controller::GraphControllerImpl; -use crate::manager::GraphManagerImpl; -use crate::state::GraphState; +use crate::commands::ConsensusCommand; +use crate::controller::ConsensusControllerImpl; +use crate::manager::ConsensusManagerImpl; +use crate::state::ConsensusState; -/// The graph worker structure that contains all information and tools for the graph worker thread. -pub struct GraphWorker { +/// The consensus worker structure that contains all information and tools for the consensus worker thread. +pub struct ConsensusWorker { /// Channel to receive command from the controller - command_receiver: mpsc::Receiver, - /// Configuration of the graph - config: GraphConfig, + command_receiver: mpsc::Receiver, + /// Configuration of the consensus + config: ConsensusConfig, /// State shared with the controller - shared_state: Arc>, + shared_state: Arc>, /// Previous slot. previous_slot: Option, /// Next slot @@ -37,28 +37,28 @@ pub struct GraphWorker { mod init; mod main_loop; -/// Create a new graph worker thread. +/// Create a new consensus worker thread. /// /// # Arguments: -/// * `config`: Configuration of the graph +/// * `config`: Configuration of the consensus /// * `channels`: Channels to communicate with others modules /// * `init_graph`: Optional initial graph to bootstrap the graph. if None, the graph will have only genesis blocks. -/// * `storage`: Storage to use for the graph +/// * `storage`: Storage to use for the consensus /// /// # Returns: -/// * The graph controller to communicate with the graph worker thread -/// * The graph manager to manage the graph worker thread -pub fn start_graph_worker( - config: GraphConfig, - channels: GraphChannels, +/// * The consensus controller to communicate with the consensus worker thread +/// * The consensus manager to manage the consensus worker thread +pub fn start_consensus_worker( + config: ConsensusConfig, + channels: ConsensusChannels, init_graph: Option, storage: Storage, -) -> (Box, Box) { +) -> (Box, Box) { let (tx, rx) = mpsc::sync_channel(10); // desync detection timespan let stats_desync_detection_timespan = config.t0.checked_mul(config.periods_per_cycle * 2).unwrap(); - let shared_state = Arc::new(RwLock::new(GraphState { + let shared_state = Arc::new(RwLock::new(ConsensusState { storage: storage.clone(), config: config.clone(), channels, @@ -97,20 +97,20 @@ pub fn start_graph_worker( })); let shared_state_cloned = shared_state.clone(); - let thread_graph = thread::Builder::new() - .name("graph worker".into()) + let consensus_thread = thread::Builder::new() + .name("consensus worker".into()) .spawn(move || { - let mut graph_worker = - GraphWorker::new(config, rx, shared_state_cloned, init_graph, storage).unwrap(); - graph_worker.run() + let mut consensus_worker = + ConsensusWorker::new(config, rx, shared_state_cloned, init_graph, storage).unwrap(); + consensus_worker.run() }) - .expect("Can't spawn thread graph."); + .expect("Can't spawn consensus thread."); - let manager = GraphManagerImpl { - thread_graph: Some((tx.clone(), thread_graph)), + let manager = ConsensusManagerImpl { + consensus_thread: Some((tx.clone(), consensus_thread)), }; - let controller = GraphControllerImpl::new(tx, shared_state); + let controller = ConsensusControllerImpl::new(tx, shared_state); (Box::new(controller), Box::new(manager)) } diff --git a/massa-factory-exports/Cargo.toml b/massa-factory-exports/Cargo.toml index ae3e8e696c8..58648d00fa4 100644 --- a/massa-factory-exports/Cargo.toml +++ b/massa-factory-exports/Cargo.toml @@ -23,7 +23,7 @@ massa_ledger_exports = { path = "../massa-ledger-exports" } massa_serialization = { path = "../massa-serialization" } massa_signature = { path = "../massa-signature" } massa_pos_exports = { path = "../massa-pos-exports" } -massa_graph_2_exports = { path = "../massa-graph-2-exports" } +massa_consensus_exports = { path = "../massa-consensus-exports" } massa_pool_exports = { path = "../massa-pool-exports" } massa_protocol_exports = { path = "../massa-protocol-exports" } massa_execution_exports = { path = "../massa-execution-exports" } diff --git a/massa-factory-exports/src/types.rs b/massa-factory-exports/src/types.rs index 56f4fd0a16a..35a2675ce6f 100644 --- a/massa-factory-exports/src/types.rs +++ b/massa-factory-exports/src/types.rs @@ -1,4 +1,4 @@ -use massa_graph_2_exports::GraphController; +use massa_consensus_exports::ConsensusController; use massa_models::block::Block; use massa_pool_exports::PoolController; use massa_pos_exports::SelectorController; @@ -14,8 +14,8 @@ pub type ProductionHistory = Vec; pub struct FactoryChannels { /// selector controller to get draws pub selector: Box, - /// graph controller - pub graph: Box, + /// consensus controller + pub consensus: Box, /// pool controller pub pool: Box, /// protocol controller diff --git a/massa-factory-worker/Cargo.toml b/massa-factory-worker/Cargo.toml index eb793992fa6..dc3b8b42138 100644 --- a/massa-factory-worker/Cargo.toml +++ b/massa-factory-worker/Cargo.toml @@ -27,7 +27,7 @@ massa_pool_exports = { path = "../massa-pool-exports" } [dev-dependencies] serial_test = "0.9" massa_protocol_exports = { path = "../massa-protocol-exports", features=["testing"] } -massa_graph_2_exports = { path = "../massa-graph-2-exports", features = ["testing"] } +massa_consensus_exports = { path = "../massa-consensus-exports", features = ["testing"] } massa_factory_exports = { path = "../massa-factory-exports", features=["testing"] } massa_wallet = { path = "../massa-wallet", features=["testing"] } massa_pos_exports = { path = "../massa-pos-exports", features=["testing"] } diff --git a/massa-factory-worker/src/block_factory.rs b/massa-factory-worker/src/block_factory.rs index eb74b0855ac..ab0672139f6 100644 --- a/massa-factory-worker/src/block_factory.rs +++ b/massa-factory-worker/src/block_factory.rs @@ -143,7 +143,7 @@ impl BlockFactoryWorker { return; }; // get best parents and their periods - let parents: Vec<(BlockId, u64)> = self.channels.graph.get_best_parents(); // Vec<(parent_id, parent_period)> + let parents: Vec<(BlockId, u64)> = self.channels.consensus.get_best_parents(); // Vec<(parent_id, parent_period)> // generate the local storage object let mut block_storage = self.channels.storage.clone_without_refs(); @@ -232,7 +232,7 @@ impl BlockFactoryWorker { // send full block to consensus self.channels - .graph + .consensus .register_block(block_id, slot, block_storage, true); } diff --git a/massa-factory-worker/src/endorsement_factory.rs b/massa-factory-worker/src/endorsement_factory.rs index 40b8414f2fb..4c61cf0b062 100644 --- a/massa-factory-worker/src/endorsement_factory.rs +++ b/massa-factory-worker/src/endorsement_factory.rs @@ -164,7 +164,7 @@ impl EndorsementFactoryWorker { // get consensus block ID for that slot let endorsed_block: BlockId = self .channels - .graph + .consensus .get_latest_blockclique_block_at_slot(slot); // produce endorsements diff --git a/massa-factory-worker/src/tests/tools.rs b/massa-factory-worker/src/tests/tools.rs index b2151a39351..3505df5e2e0 100644 --- a/massa-factory-worker/src/tests/tools.rs +++ b/massa-factory-worker/src/tests/tools.rs @@ -1,5 +1,5 @@ -use massa_graph_2_exports::test_exports::{ - GraphEventReceiver, MockGraphController, MockGraphControllerMessage, +use massa_consensus_exports::test_exports::{ + ConsensusEventReceiver, MockConsensusController, MockConsensusControllerMessage, }; use parking_lot::RwLock; use std::{ @@ -36,7 +36,7 @@ use massa_wallet::test_exports::create_test_wallet; /// You can use the method `new` to build all the mocks and make the connections /// Then you can use the method `get_next_created_block` that will manage the answers from the mock to the factory depending on the parameters you gave. pub struct TestFactory { - graph_event_receiver: GraphEventReceiver, + consensus_event_receiver: ConsensusEventReceiver, pool_receiver: PoolEventReceiver, selector_receiver: Receiver, factory_config: FactoryConfig, @@ -55,7 +55,7 @@ impl TestFactory { /// - `TestFactory`: the structure that will be used to manage the tests pub fn new(default_keypair: &KeyPair) -> TestFactory { let (selector_controller, selector_receiver) = MockSelectorController::new_with_receiver(); - let (graph_controller, graph_event_receiver) = MockGraphController::new_with_receiver(); + let (consensus_controller, consensus_event_receiver) = MockConsensusController::new_with_receiver(); let (pool_controller, pool_receiver) = MockPoolController::new_with_receiver(); let mut storage = Storage::create_root(); let mut factory_config = FactoryConfig::default(); @@ -82,7 +82,7 @@ impl TestFactory { Arc::new(RwLock::new(create_test_wallet(Some(accounts)))), FactoryChannels { selector: selector_controller.clone(), - graph: graph_controller, + consensus: consensus_controller, pool: pool_controller.clone(), protocol: protocol_command_sender, storage: storage.clone_without_refs(), @@ -90,7 +90,7 @@ impl TestFactory { ); TestFactory { - graph_event_receiver, + consensus_event_receiver, pool_receiver, selector_receiver, factory_config, @@ -150,9 +150,9 @@ impl TestFactory { _ => panic!("unexpected message"), } } - self.graph_event_receiver + self.consensus_event_receiver .wait_command(MassaTime::from_millis(100), |command| { - if let MockGraphControllerMessage::GetBestParents { response_tx } = command { + if let MockConsensusControllerMessage::GetBestParents { response_tx } = command { response_tx.send(self.genesis_blocks.clone()).unwrap(); Some(()) } else { @@ -202,9 +202,9 @@ impl TestFactory { _ => panic!("unexpected message"), }) .unwrap(); - self.graph_event_receiver + self.consensus_event_receiver .wait_command(MassaTime::from_millis(100), |command| { - if let MockGraphControllerMessage::RegisterBlock { + if let MockConsensusControllerMessage::RegisterBlock { block_id, block_storage, slot: _, diff --git a/massa-graph-2-worker/src/manager.rs b/massa-graph-2-worker/src/manager.rs deleted file mode 100644 index b850e3c50f8..00000000000 --- a/massa-graph-2-worker/src/manager.rs +++ /dev/null @@ -1,23 +0,0 @@ -use massa_graph_2_exports::GraphManager; -use std::{sync::mpsc::SyncSender, thread::JoinHandle}; -use tracing::log::info; - -use crate::commands::GraphCommand; - -pub struct GraphManagerImpl { - pub thread_graph: Option<(SyncSender, JoinHandle<()>)>, -} - -impl GraphManager for GraphManagerImpl { - fn stop(&mut self) { - info!("stopping graph worker..."); - // join the graph thread - if let Some((tx, join_handle)) = self.thread_graph.take() { - drop(tx); - join_handle - .join() - .expect("graph thread panicked on try to join"); - } - info!("graph worker stopped"); - } -} diff --git a/massa-node/Cargo.toml b/massa-node/Cargo.toml index d48496dccde..463b70dbbdf 100644 --- a/massa-node/Cargo.toml +++ b/massa-node/Cargo.toml @@ -48,8 +48,8 @@ massa_time = { path = "../massa-time" } massa_wallet = { path = "../massa-wallet" } massa_factory_exports = { path = "../massa-factory-exports" } massa_factory_worker = { path = "../massa-factory-worker" } -massa_graph_2_exports = { path = "../massa-graph-2-exports" } -massa_graph_2_worker = { path = "../massa-graph-2-worker" } +massa_consensus_exports = { path = "../massa-consensus-exports" } +massa_consensus_worker = { path = "../massa-consensus-worker" } # for more information on what are the following features used for, see the cargo.toml at workspace level [features] @@ -57,7 +57,7 @@ beta = [] deadlock_detection = [] sandbox = [ "massa_bootstrap/sandbox", - "massa_graph_2_worker/sandbox", + "massa_consensus_worker/sandbox", "massa_execution_worker/sandbox", "massa_final_state/sandbox", "massa_models/sandbox", diff --git a/massa-node/src/main.rs b/massa-node/src/main.rs index 56791b6361f..0acd732deb3 100644 --- a/massa-node/src/main.rs +++ b/massa-node/src/main.rs @@ -11,14 +11,14 @@ use dialoguer::Password; use massa_api::{APIConfig, Private, Public, RpcServer, StopHandle, API}; use massa_async_pool::AsyncPoolConfig; use massa_bootstrap::{get_state, start_bootstrap_server, BootstrapConfig, BootstrapManager}; +use massa_consensus_exports::events::ConsensusEvent; +use massa_consensus_exports::{ConsensusChannels, ConsensusConfig, ConsensusManager}; +use massa_consensus_worker::start_consensus_worker; use massa_execution_exports::{ExecutionConfig, ExecutionManager, StorageCostsConstants}; use massa_execution_worker::start_execution_worker; use massa_factory_exports::{FactoryChannels, FactoryConfig, FactoryManager}; use massa_factory_worker::start_factory; use massa_final_state::{FinalState, FinalStateConfig}; -use massa_graph_2_exports::events::GraphEvent; -use massa_graph_2_exports::{GraphChannels, GraphConfig, GraphManager}; -use massa_graph_2_worker::start_graph_worker; use massa_ledger_exports::LedgerConfig; use massa_ledger_worker::FinalLedger; use massa_logging::massa_trace; @@ -73,9 +73,9 @@ mod settings; async fn launch( node_wallet: Arc>, ) -> ( - Receiver, + Receiver, Option, - Box, + Box, Box, Box, Box, @@ -336,7 +336,7 @@ async fn launch( let (protocol_command_sender, protocol_command_receiver) = mpsc::channel::(PROTOCOL_CONTROLLER_CHANNEL_SIZE); - let graph_config = GraphConfig { + let consensus_config = ConsensusConfig { genesis_timestamp: *GENESIS_TIMESTAMP, end_timestamp: *END_TIMESTAMP, thread_count: THREAD_COUNT, @@ -360,18 +360,18 @@ async fn launch( clock_compensation_millis: bootstrap_state.compensation_millis, }; - let (graph_event_sender, graph_event_receiver) = crossbeam_channel::bounded(CHANNEL_SIZE); - let graph_channels = GraphChannels { + let (consensus_event_sender, consensus_event_receiver) = crossbeam_channel::bounded(CHANNEL_SIZE); + let consensus_channels = ConsensusChannels { execution_controller: execution_controller.clone(), selector_controller: selector_controller.clone(), pool_command_sender: pool_controller.clone(), - controller_event_tx: graph_event_sender, + controller_event_tx: consensus_event_sender, protocol_command_sender: ProtocolCommandSender(protocol_command_sender.clone()), }; - let (graph_controller, graph_manager) = start_graph_worker( - graph_config, - graph_channels, + let (consensus_controller, consensus_manager) = start_consensus_worker( + consensus_config, + consensus_channels, bootstrap_state.graph, shared_storage.clone(), ); @@ -413,7 +413,7 @@ async fn launch( network_command_sender.clone(), network_event_receiver, protocol_command_receiver, - graph_controller.clone(), + consensus_controller.clone(), pool_controller.clone(), shared_storage.clone(), ) @@ -432,7 +432,7 @@ async fn launch( }; let factory_channels = FactoryChannels { selector: selector_controller.clone(), - graph: graph_controller.clone(), + consensus: consensus_controller.clone(), pool: pool_controller.clone(), protocol: ProtocolCommandSender(protocol_command_sender.clone()), storage: shared_storage.clone(), @@ -441,7 +441,7 @@ async fn launch( // launch bootstrap server let bootstrap_manager = start_bootstrap_server( - graph_controller.clone(), + consensus_controller.clone(), network_command_sender.clone(), final_state.clone(), bootstrap_config, @@ -480,7 +480,7 @@ async fn launch( // spawn public API let api_public = API::::new( - graph_controller.clone(), + consensus_controller.clone(), execution_controller.clone(), api_config, selector_controller.clone(), @@ -525,9 +525,9 @@ async fn launch( .expect("failed to spawn thread : deadlock-detection"); } ( - graph_event_receiver, + consensus_event_receiver, bootstrap_manager, - graph_manager, + consensus_manager, execution_manager, selector_manager, pool_manager, @@ -542,7 +542,7 @@ async fn launch( struct Managers { bootstrap_manager: Option, - graph_manager: Box, + consensus_manager: Box, execution_manager: Box, selector_manager: Box, pool_manager: Box, @@ -552,11 +552,11 @@ struct Managers { } async fn stop( - _graph_event_receiver: Receiver, + _consensus_event_receiver: Receiver, Managers { bootstrap_manager, mut execution_manager, - mut graph_manager, + mut consensus_manager, mut selector_manager, mut pool_manager, protocol_manager, @@ -589,8 +589,8 @@ async fn stop( .await .expect("protocol shutdown failed"); - // stop graph - graph_manager.stop(); + // stop consensus + consensus_manager.stop(); // stop pool pool_manager.stop(); @@ -695,9 +695,9 @@ async fn run(args: Args) -> anyhow::Result<()> { loop { let ( - graph_event_receiver, + consensus_event_receiver, bootstrap_manager, - graph_manager, + consensus_manager, execution_manager, selector_manager, pool_manager, @@ -719,9 +719,9 @@ async fn run(args: Args) -> anyhow::Result<()> { // loop over messages let restart = loop { massa_trace!("massa-node.main.run.select", {}); - match graph_event_receiver.try_recv() { + match consensus_event_receiver.try_recv() { Ok(evt) => match evt { - GraphEvent::NeedSync => { + ConsensusEvent::NeedSync => { warn!("in response to a desynchronization, the node is going to bootstrap again"); break true; } @@ -758,10 +758,10 @@ async fn run(args: Args) -> anyhow::Result<()> { sleep(Duration::from_millis(100)); }; stop( - graph_event_receiver, + consensus_event_receiver, Managers { bootstrap_manager, - graph_manager, + consensus_manager, execution_manager, selector_manager, pool_manager, diff --git a/massa-protocol-worker/Cargo.toml b/massa-protocol-worker/Cargo.toml index a554c52d978..2817e32c9fb 100644 --- a/massa-protocol-worker/Cargo.toml +++ b/massa-protocol-worker/Cargo.toml @@ -15,7 +15,7 @@ rayon = "1.5" massa_hash = { path = "../massa-hash" } massa_logging = { path = "../massa-logging" } massa_models = { path = "../massa-models" } -massa_graph_2_exports = { path = "../massa-graph-2-exports" } +massa_consensus_exports = { path = "../massa-consensus-exports" } massa_network_exports = { path = "../massa-network-exports" } massa_pool_exports = { path = "../massa-pool-exports" } massa_protocol_exports = { path = "../massa-protocol-exports" } @@ -34,4 +34,4 @@ massa_pool_exports = { path = "../massa-pool-exports", features = ["testing"] } [features] -testing = ["massa_graph_2_exports/testing", "massa_network_exports/testing", "massa_pool_exports/testing", "massa_protocol_exports/testing"] \ No newline at end of file +testing = ["massa_consensus_exports/testing", "massa_network_exports/testing", "massa_pool_exports/testing", "massa_protocol_exports/testing"] \ No newline at end of file diff --git a/massa-protocol-worker/src/protocol_network.rs b/massa-protocol-worker/src/protocol_network.rs index be0faada554..74c7c136615 100644 --- a/massa-protocol-worker/src/protocol_network.rs +++ b/massa-protocol-worker/src/protocol_network.rs @@ -98,7 +98,7 @@ impl ProtocolWorker { self.note_header_from_node(&header, &source_node_id).await? { if is_new { - self.graph_controller + self.consensus_controller .register_block_header(block_id, header); } self.update_ask_block(block_ask_timer).await?; @@ -281,7 +281,7 @@ impl ProtocolWorker { /// # Ban /// Start compute the operations serialized total size with the operation we know. /// Ban the node if the operations contained in the block overflow the max size. We don't - /// forward the block to the graph in that case. + /// forward the block to the consensus in that case. /// /// # Parameters: /// - `from_node_id`: Node which sent us the information. @@ -468,7 +468,7 @@ impl ProtocolWorker { warn!("Node id {} sent us full operations for block id {} but they exceed max size.", from_node_id, block_id); let _ = self.ban_node(&from_node_id).await; self.block_wishlist.remove(&block_id); - self.graph_controller.mark_invalid_block(block_id, header); + self.consensus_controller.mark_invalid_block(block_id, header); } else { if known_operations != block_ids_set { warn!( @@ -514,8 +514,8 @@ impl ProtocolWorker { // add block to local storage and claim ref block_storage.store_block(wrapped_block); - // Send to graph - self.graph_controller + // Send to consensus + self.consensus_controller .register_block(block_id, slot, block_storage, false); } } diff --git a/massa-protocol-worker/src/protocol_worker.rs b/massa-protocol-worker/src/protocol_worker.rs index ac45094a636..07aba466e1b 100644 --- a/massa-protocol-worker/src/protocol_worker.rs +++ b/massa-protocol-worker/src/protocol_worker.rs @@ -5,7 +5,7 @@ use crate::checked_operations::CheckedOperations; use crate::sig_verifier::verify_sigs_batch; use crate::{node_info::NodeInfo, worker_operations_impl::OperationBatchBuffer}; -use massa_graph_2_exports::GraphController; +use massa_consensus_exports::ConsensusController; use massa_logging::massa_trace; use massa_models::slot::Slot; @@ -51,7 +51,7 @@ pub async fn start_protocol_controller( network_command_sender: NetworkCommandSender, network_event_receiver: NetworkEventReceiver, protocol_command_receiver: mpsc::Receiver, - graph_controller: Box, + consensus_controller: Box, pool_controller: Box, storage: Storage, ) -> Result { @@ -69,7 +69,7 @@ pub async fn start_protocol_controller( controller_command_rx: protocol_command_receiver, controller_manager_rx, }, - graph_controller, + consensus_controller, pool_controller, storage, ) @@ -119,8 +119,8 @@ impl BlockInfo { pub struct ProtocolWorker { /// Protocol configuration. pub(crate) config: ProtocolConfig, - /// Graph controller - pub(crate) graph_controller: Box, + /// Consensus controller + pub(crate) consensus_controller: Box, /// Associated network command sender. pub(crate) network_command_sender: NetworkCommandSender, /// Associated network event receiver. @@ -181,7 +181,7 @@ impl ProtocolWorker { controller_command_rx, controller_manager_rx, }: ProtocolWorkerChannels, - graph_controller: Box, + consensus_controller: Box, pool_controller: Box, storage: Storage, ) -> ProtocolWorker { @@ -189,7 +189,7 @@ impl ProtocolWorker { config, network_command_sender, network_event_receiver, - graph_controller, + consensus_controller, pool_controller, controller_command_rx, controller_manager_rx, diff --git a/massa-protocol-worker/src/tests/ask_block_scenarios.rs b/massa-protocol-worker/src/tests/ask_block_scenarios.rs index bf2c9d096fe..e49a38481c8 100644 --- a/massa-protocol-worker/src/tests/ask_block_scenarios.rs +++ b/massa-protocol-worker/src/tests/ask_block_scenarios.rs @@ -1,7 +1,7 @@ // Copyright (c) 2022 MASSA LABS use super::tools::protocol_test; -use massa_graph_2_exports::test_exports::MockGraphControllerMessage; +use massa_consensus_exports::test_exports::MockConsensusControllerMessage; use massa_models::prehash::PreHashSet; use massa_models::{block::BlockId, slot::Slot}; use massa_network_exports::{AskForBlocksInfo, BlockInfoReply, NetworkCommand}; @@ -21,7 +21,7 @@ async fn test_full_ask_block_workflow() { async move |mut network_controller, mut protocol_command_sender, protocol_manager, - mut protocol_graph_event_receiver, + mut protocol_consensus_event_receiver, protocol_pool_event_receiver| { let node_a = tools::create_and_connect_nodes(1, &mut network_controller) .await @@ -105,13 +105,13 @@ async fn test_full_ask_block_workflow() { ) .await; - let protocol_graph_event_receiver = tokio::task::spawn_blocking(move || { + let protocol_consensus_event_receiver = tokio::task::spawn_blocking(move || { // Protocol sends expected block to consensus. loop { - match protocol_graph_event_receiver.wait_command( + match protocol_consensus_event_receiver.wait_command( MassaTime::from_millis(100), |command| match command { - MockGraphControllerMessage::RegisterBlock { + MockConsensusControllerMessage::RegisterBlock { slot, block_id, block_storage, @@ -138,7 +138,7 @@ async fn test_full_ask_block_workflow() { } } } - return protocol_graph_event_receiver; + return protocol_consensus_event_receiver; }) .await .unwrap(); @@ -147,7 +147,7 @@ async fn test_full_ask_block_workflow() { network_controller, protocol_command_sender, protocol_manager, - protocol_graph_event_receiver, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, @@ -166,7 +166,7 @@ async fn test_empty_block() { async move |mut network_controller, mut protocol_command_sender, protocol_manager, - mut protocol_graph_event_receiver, + mut protocol_consensus_event_receiver, protocol_pool_event_receiver| { let node_a = tools::create_and_connect_nodes(1, &mut network_controller) .await @@ -229,12 +229,12 @@ async fn test_empty_block() { ); // Protocol sends expected block to consensus. - let protocol_graph_event_receiver = tokio::task::spawn_blocking(move || { + let protocol_consensus_event_receiver = tokio::task::spawn_blocking(move || { loop { - match protocol_graph_event_receiver.wait_command( + match protocol_consensus_event_receiver.wait_command( MassaTime::from_millis(100), |command| match command { - MockGraphControllerMessage::RegisterBlock { + MockConsensusControllerMessage::RegisterBlock { slot, block_id, block_storage, @@ -261,7 +261,7 @@ async fn test_empty_block() { } } } - protocol_graph_event_receiver + protocol_consensus_event_receiver }) .await .unwrap(); @@ -269,7 +269,7 @@ async fn test_empty_block() { network_controller, protocol_command_sender, protocol_manager, - protocol_graph_event_receiver, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, @@ -287,7 +287,7 @@ async fn test_someone_knows_it() { async move |mut network_controller, mut protocol_command_sender, protocol_manager, - mut protocol_graph_event_receiver, + mut protocol_consensus_event_receiver, protocol_pool_event_receiver| { let node_a = tools::create_and_connect_nodes(1, &mut network_controller) .await @@ -318,15 +318,15 @@ async fn test_someone_knows_it() { .send_header(node_c.id, block.content.header.clone()) .await; - let protocol_graph_event_receiver = tokio::task::spawn_blocking(move || { - protocol_graph_event_receiver.wait_command( + let protocol_consensus_event_receiver = tokio::task::spawn_blocking(move || { + protocol_consensus_event_receiver.wait_command( MassaTime::from_millis(100), |command| match command { - MockGraphControllerMessage::RegisterBlockHeader { .. } => Some(()), + MockConsensusControllerMessage::RegisterBlockHeader { .. } => Some(()), _ => panic!("unexpected protocol event"), }, ); - protocol_graph_event_receiver + protocol_consensus_event_receiver }) .await .unwrap(); @@ -378,7 +378,7 @@ async fn test_someone_knows_it() { network_controller, protocol_command_sender, protocol_manager, - protocol_graph_event_receiver, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, @@ -396,7 +396,7 @@ async fn test_dont_want_it_anymore() { async move |mut network_controller, mut protocol_command_sender, protocol_manager, - protocol_graph_event_receiver, + protocol_consensus_event_receiver, protocol_pool_event_receiver| { let node_a = tools::create_and_connect_nodes(1, &mut network_controller) .await @@ -452,7 +452,7 @@ async fn test_dont_want_it_anymore() { network_controller, protocol_command_sender, protocol_manager, - protocol_graph_event_receiver, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, @@ -471,7 +471,7 @@ async fn test_no_one_has_it() { async move |mut network_controller, mut protocol_command_sender, protocol_manager, - protocol_graph_event_receiver, + protocol_consensus_event_receiver, protocol_pool_event_receiver| { let node_a = tools::create_and_connect_nodes(1, &mut network_controller) .await @@ -533,7 +533,7 @@ async fn test_no_one_has_it() { network_controller, protocol_command_sender, protocol_manager, - protocol_graph_event_receiver, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, @@ -551,7 +551,7 @@ async fn test_multiple_blocks_without_a_priori() { async move |mut network_controller, mut protocol_command_sender, protocol_manager, - protocol_graph_event_receiver, + protocol_consensus_event_receiver, protocol_pool_event_receiver| { let node_a = tools::create_and_connect_nodes(1, &mut network_controller) .await @@ -606,7 +606,7 @@ async fn test_multiple_blocks_without_a_priori() { network_controller, protocol_command_sender, protocol_manager, - protocol_graph_event_receiver, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, diff --git a/massa-protocol-worker/src/tests/ban_nodes_scenarios.rs b/massa-protocol-worker/src/tests/ban_nodes_scenarios.rs index af9478977ec..26a34102397 100644 --- a/massa-protocol-worker/src/tests/ban_nodes_scenarios.rs +++ b/massa-protocol-worker/src/tests/ban_nodes_scenarios.rs @@ -1,7 +1,7 @@ // Copyright (c) 2022 MASSA LABS use super::tools::protocol_test; -use massa_graph_2_exports::test_exports::MockGraphControllerMessage; +use massa_consensus_exports::test_exports::MockConsensusControllerMessage; use massa_hash::Hash; use massa_models::operation::OperationId; use massa_models::prehash::PreHashSet; @@ -25,7 +25,7 @@ async fn test_protocol_bans_node_sending_block_header_with_invalid_signature() { async move |mut network_controller, protocol_command_sender, protocol_manager, - mut protocol_graph_event_receiver, + mut protocol_consensus_event_receiver, protocol_pool_event_receiver| { // Create 1 node. let mut nodes = tools::create_and_connect_nodes(1, &mut network_controller).await; @@ -47,23 +47,23 @@ async fn test_protocol_bans_node_sending_block_header_with_invalid_signature() { tools::assert_banned_nodes(vec![creator_node.id], &mut network_controller).await; // Check protocol does not send block to consensus. - let protocol_graph_event_receiver = tokio::task::spawn_blocking(move || { - protocol_graph_event_receiver.wait_command( + let protocol_consensus_event_receiver = tokio::task::spawn_blocking(move || { + protocol_consensus_event_receiver.wait_command( MassaTime::from_millis(1000), |command| match command { - MockGraphControllerMessage::RegisterBlock { .. } => { + MockConsensusControllerMessage::RegisterBlock { .. } => { panic!("Protocol unexpectedly sent block.") } - MockGraphControllerMessage::RegisterBlockHeader { .. } => { + MockConsensusControllerMessage::RegisterBlockHeader { .. } => { panic!("Protocol unexpectedly sent header.") } - MockGraphControllerMessage::MarkInvalidBlock { .. } => { + MockConsensusControllerMessage::MarkInvalidBlock { .. } => { panic!("Protocol unexpectedly sent invalid block.") } _ => Some(()), }, ); - protocol_graph_event_receiver + protocol_consensus_event_receiver }) .await .unwrap(); @@ -71,7 +71,7 @@ async fn test_protocol_bans_node_sending_block_header_with_invalid_signature() { network_controller, protocol_command_sender, protocol_manager, - protocol_graph_event_receiver, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, @@ -136,7 +136,7 @@ async fn test_protocol_bans_node_sending_header_with_invalid_signature() { async move |mut network_controller, mut protocol_command_sender, protocol_manager, - mut protocol_graph_event_receiver, + mut protocol_consensus_event_receiver, protocol_pool_event_receiver| { // Create 1 node. let mut nodes = tools::create_and_connect_nodes(1, &mut network_controller).await; @@ -159,15 +159,15 @@ async fn test_protocol_bans_node_sending_header_with_invalid_signature() { .send_header(to_ban_node.id, block.content.header.clone()) .await; - let mut protocol_graph_event_receiver = tokio::task::spawn_blocking(move || { - protocol_graph_event_receiver.wait_command( + let mut protocol_consensus_event_receiver = tokio::task::spawn_blocking(move || { + protocol_consensus_event_receiver.wait_command( MassaTime::from_millis(1000), |command| match command { - MockGraphControllerMessage::RegisterBlockHeader { .. } => Some(()), + MockConsensusControllerMessage::RegisterBlockHeader { .. } => Some(()), _ => panic!("unexpected protocol event"), }, ); - protocol_graph_event_receiver + protocol_consensus_event_receiver }) .await .unwrap(); @@ -214,23 +214,23 @@ async fn test_protocol_bans_node_sending_header_with_invalid_signature() { .await; // Check protocol does not send block to consensus. - let protocol_graph_event_receiver = tokio::task::spawn_blocking(move || { - protocol_graph_event_receiver.wait_command( + let protocol_consensus_event_receiver = tokio::task::spawn_blocking(move || { + protocol_consensus_event_receiver.wait_command( MassaTime::from_millis(1000), |command| match command { - MockGraphControllerMessage::RegisterBlock { .. } => { + MockConsensusControllerMessage::RegisterBlock { .. } => { panic!("Protocol unexpectedly sent block.") } - MockGraphControllerMessage::RegisterBlockHeader { .. } => { + MockConsensusControllerMessage::RegisterBlockHeader { .. } => { panic!("Protocol unexpectedly sent header.") } - MockGraphControllerMessage::MarkInvalidBlock { .. } => { + MockConsensusControllerMessage::MarkInvalidBlock { .. } => { panic!("Protocol unexpectedly sent invalid block.") } _ => Some(()), }, ); - protocol_graph_event_receiver + protocol_consensus_event_receiver }) .await .unwrap(); @@ -238,7 +238,7 @@ async fn test_protocol_bans_node_sending_header_with_invalid_signature() { network_controller, protocol_command_sender, protocol_manager, - protocol_graph_event_receiver, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, @@ -255,7 +255,7 @@ async fn test_protocol_does_not_asks_for_block_from_banned_node_who_propagated_h async move |mut network_controller, mut protocol_command_sender, protocol_manager, - mut protocol_graph_event_receiver, + mut protocol_consensus_event_receiver, protocol_pool_event_receiver| { let ask_for_block_cmd_filter = |cmd| match cmd { cmd @ NetworkCommand::AskForBlocks { .. } => Some(cmd), @@ -275,18 +275,18 @@ async fn test_protocol_does_not_asks_for_block_from_banned_node_who_propagated_h .await; // Check protocol sends header to consensus. - let (protocol_graph_event_receiver, received_hash) = + let (protocol_consensus_event_receiver, received_hash) = tokio::task::spawn_blocking(move || { - let id = protocol_graph_event_receiver + let id = protocol_consensus_event_receiver .wait_command(MassaTime::from_millis(1000), |command| match command { - MockGraphControllerMessage::RegisterBlockHeader { + MockConsensusControllerMessage::RegisterBlockHeader { block_id, header: _, } => Some(block_id), _ => panic!("unexpected protocol event"), }) .unwrap(); - (protocol_graph_event_receiver, id) + (protocol_consensus_event_receiver, id) }) .await .unwrap(); @@ -329,7 +329,7 @@ async fn test_protocol_does_not_asks_for_block_from_banned_node_who_propagated_h network_controller, protocol_command_sender, protocol_manager, - protocol_graph_event_receiver, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, @@ -346,7 +346,7 @@ async fn test_protocol_does_not_send_blocks_when_asked_for_by_banned_node() { async move |mut network_controller, protocol_command_sender, protocol_manager, - protocol_graph_event_receiver, + protocol_consensus_event_receiver, protocol_pool_event_receiver| { let send_block_or_header_cmd_filter = |cmd| match cmd { cmd @ NetworkCommand::SendBlockInfo { .. } => Some(cmd), @@ -417,7 +417,7 @@ async fn test_protocol_does_not_send_blocks_when_asked_for_by_banned_node() { network_controller, protocol_command_sender, protocol_manager, - protocol_graph_event_receiver, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, @@ -434,7 +434,7 @@ async fn test_protocol_bans_all_nodes_propagating_an_attack_attempt() { async move |mut network_controller, mut protocol_command_sender, protocol_manager, - mut protocol_graph_event_receiver, + mut protocol_consensus_event_receiver, protocol_pool_event_receiver| { // Create 4 nodes. let nodes = tools::create_and_connect_nodes(4, &mut network_controller).await; @@ -451,23 +451,23 @@ async fn test_protocol_bans_all_nodes_propagating_an_attack_attempt() { .send_header(creator_node.id, block.content.header.clone()) .await; - let (old_protocol_graph_event_receiver, optional_block_id) = + let (old_protocol_consensus_event_receiver, optional_block_id) = tokio::task::spawn_blocking(move || { - let id = protocol_graph_event_receiver.wait_command( + let id = protocol_consensus_event_receiver.wait_command( MassaTime::from_millis(1000), |command| match command { - MockGraphControllerMessage::RegisterBlockHeader { + MockConsensusControllerMessage::RegisterBlockHeader { block_id, header: _, } => Some(block_id), _ => panic!("unexpected protocol event"), }, ); - (protocol_graph_event_receiver, id) + (protocol_consensus_event_receiver, id) }) .await .unwrap(); - protocol_graph_event_receiver = old_protocol_graph_event_receiver; + protocol_consensus_event_receiver = old_protocol_consensus_event_receiver; // Check protocol sends header to consensus (only the 1st time: later, there is caching). if idx == 0 { let received_hash = optional_block_id.unwrap(); @@ -515,7 +515,7 @@ async fn test_protocol_bans_all_nodes_propagating_an_attack_attempt() { network_controller, protocol_command_sender, protocol_manager, - protocol_graph_event_receiver, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, @@ -532,7 +532,7 @@ async fn test_protocol_removes_banned_node_on_disconnection() { async move |mut network_controller, protocol_command_sender, protocol_manager, - mut protocol_graph_event_receiver, + mut protocol_consensus_event_receiver, protocol_pool_event_receiver| { let mut nodes = tools::create_and_connect_nodes(1, &mut network_controller).await; @@ -559,18 +559,18 @@ async fn test_protocol_removes_banned_node_on_disconnection() { .await; // Check protocol sends header to consensus. - let (protocol_graph_event_receiver, received_hash) = + let (protocol_consensus_event_receiver, received_hash) = tokio::task::spawn_blocking(move || { - let id = protocol_graph_event_receiver + let id = protocol_consensus_event_receiver .wait_command(MassaTime::from_millis(1000), |command| match command { - MockGraphControllerMessage::RegisterBlockHeader { + MockConsensusControllerMessage::RegisterBlockHeader { block_id, header: _, } => Some(block_id), _ => panic!("unexpected protocol event"), }) .unwrap(); - (protocol_graph_event_receiver, id) + (protocol_consensus_event_receiver, id) }) .await .unwrap(); @@ -582,7 +582,7 @@ async fn test_protocol_removes_banned_node_on_disconnection() { network_controller, protocol_command_sender, protocol_manager, - protocol_graph_event_receiver, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, diff --git a/massa-protocol-worker/src/tests/endorsements_scenarios.rs b/massa-protocol-worker/src/tests/endorsements_scenarios.rs index e1ff633606d..588b329efd0 100644 --- a/massa-protocol-worker/src/tests/endorsements_scenarios.rs +++ b/massa-protocol-worker/src/tests/endorsements_scenarios.rs @@ -3,7 +3,7 @@ // RUST_BACKTRACE=1 cargo test test_one_handshake -- --nocapture --test-threads=1 use super::tools::protocol_test; -use massa_graph_2_exports::test_exports::MockGraphControllerMessage; +use massa_consensus_exports::test_exports::MockConsensusControllerMessage; use massa_models::{address::Address, slot::Slot}; use massa_network_exports::NetworkCommand; use massa_pool_exports::test_exports::MockPoolControllerMessage; @@ -23,7 +23,7 @@ async fn test_protocol_sends_valid_endorsements_it_receives_to_pool() { async move |mut network_controller, protocol_command_sender, protocol_manager, - protocol_graph_event_receiver, + protocol_consensus_event_receiver, mut pool_event_receiver| { // Create 1 node. let mut nodes = tools::create_and_connect_nodes(1, &mut network_controller).await; @@ -59,7 +59,7 @@ async fn test_protocol_sends_valid_endorsements_it_receives_to_pool() { network_controller, protocol_command_sender, protocol_manager, - protocol_graph_event_receiver, + protocol_consensus_event_receiver, pool_event_receiver, ) }, @@ -76,7 +76,7 @@ async fn test_protocol_does_not_send_invalid_endorsements_it_receives_to_pool() async move |mut network_controller, protocol_command_sender, protocol_manager, - protocol_graph_event_receiver, + protocol_consensus_event_receiver, mut pool_event_receiver| { // Create 1 node. let mut nodes = tools::create_and_connect_nodes(1, &mut network_controller).await; @@ -106,7 +106,7 @@ async fn test_protocol_does_not_send_invalid_endorsements_it_receives_to_pool() network_controller, protocol_command_sender, protocol_manager, - protocol_graph_event_receiver, + protocol_consensus_event_receiver, pool_event_receiver, ) }, @@ -123,7 +123,7 @@ async fn test_protocol_propagates_endorsements_to_active_nodes() { async move |mut network_controller, protocol_command_sender, protocol_manager, - protocol_graph_event_receiver, + protocol_consensus_event_receiver, mut pool_event_receiver| { // Create 2 nodes. let nodes = tools::create_and_connect_nodes(2, &mut network_controller).await; @@ -173,7 +173,7 @@ async fn test_protocol_propagates_endorsements_to_active_nodes() { network_controller, protocol_command_sender, protocol_manager, - protocol_graph_event_receiver, + protocol_consensus_event_receiver, pool_event_receiver, ) }, @@ -190,7 +190,7 @@ async fn test_protocol_propagates_endorsements_only_to_nodes_that_dont_know_abou async move |mut network_controller, protocol_command_sender, protocol_manager, - protocol_graph_event_receiver, + protocol_consensus_event_receiver, mut pool_event_receiver| { // Create 1 node. let nodes = tools::create_and_connect_nodes(1, &mut network_controller).await; @@ -249,7 +249,7 @@ async fn test_protocol_propagates_endorsements_only_to_nodes_that_dont_know_abou network_controller, protocol_command_sender, protocol_manager, - protocol_graph_event_receiver, + protocol_consensus_event_receiver, pool_event_receiver, ) }, @@ -267,7 +267,7 @@ async fn test_protocol_propagates_endorsements_only_to_nodes_that_dont_know_abou async move |mut network_controller, protocol_command_sender, protocol_manager, - protocol_graph_event_receiver, + protocol_consensus_event_receiver, protocol_pool_event_receiver| { // Create 1 node. let nodes = tools::create_and_connect_nodes(1, &mut network_controller).await; @@ -321,7 +321,7 @@ async fn test_protocol_propagates_endorsements_only_to_nodes_that_dont_know_abou network_controller, protocol_command_sender, protocol_manager, - protocol_graph_event_receiver, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, @@ -340,7 +340,7 @@ async fn test_protocol_propagates_endorsements_only_to_nodes_that_dont_know_abou async move |mut network_controller, protocol_command_sender, protocol_manager, - protocol_graph_event_receiver, + protocol_consensus_event_receiver, protocol_pool_event_receiver| { // Create 1 node. let nodes = tools::create_and_connect_nodes(1, &mut network_controller).await; @@ -395,7 +395,7 @@ async fn test_protocol_propagates_endorsements_only_to_nodes_that_dont_know_abou network_controller, protocol_command_sender, protocol_manager, - protocol_graph_event_receiver, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, @@ -413,7 +413,7 @@ async fn test_protocol_propagates_endorsements_only_to_nodes_that_dont_know_abou async move |mut network_controller, protocol_command_sender, protocol_manager, - mut protocol_graph_event_receiver, + mut protocol_consensus_event_receiver, protocol_pool_event_receiver| { // Create 2 nodes. let nodes = tools::create_and_connect_nodes(2, &mut network_controller).await; @@ -441,15 +441,15 @@ async fn test_protocol_propagates_endorsements_only_to_nodes_that_dont_know_abou // Wait for the event to be sure that the node is connected, // and noted as knowing the block and its endorsements. - let protocol_graph_event_receiver = tokio::task::spawn_blocking(move || { - protocol_graph_event_receiver.wait_command( + let protocol_consensus_event_receiver = tokio::task::spawn_blocking(move || { + protocol_consensus_event_receiver.wait_command( MassaTime::from_millis(1000), |command| match command { - MockGraphControllerMessage::RegisterBlockHeader { .. } => Some(()), + MockConsensusControllerMessage::RegisterBlockHeader { .. } => Some(()), _ => panic!("Node isn't connected or didn't mark block as known."), }, ); - protocol_graph_event_receiver + protocol_consensus_event_receiver }) .await .unwrap(); @@ -486,7 +486,7 @@ async fn test_protocol_propagates_endorsements_only_to_nodes_that_dont_know_abou network_controller, protocol_command_sender, protocol_manager, - protocol_graph_event_receiver, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, @@ -503,7 +503,7 @@ async fn test_protocol_does_not_propagates_endorsements_when_receiving_those_ins async move |mut network_controller, protocol_command_sender, protocol_manager, - protocol_graph_event_receiver, + protocol_consensus_event_receiver, protocol_pool_event_receiver| { // Create 2 nodes. let mut nodes = tools::create_and_connect_nodes(2, &mut network_controller).await; @@ -551,7 +551,7 @@ async fn test_protocol_does_not_propagates_endorsements_when_receiving_those_ins network_controller, protocol_command_sender, protocol_manager, - protocol_graph_event_receiver, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, diff --git a/massa-protocol-worker/src/tests/in_block_operations_scenarios.rs b/massa-protocol-worker/src/tests/in_block_operations_scenarios.rs index 0497805d564..c891c9bbaf1 100644 --- a/massa-protocol-worker/src/tests/in_block_operations_scenarios.rs +++ b/massa-protocol-worker/src/tests/in_block_operations_scenarios.rs @@ -1,7 +1,7 @@ // Copyright (c) 2022 MASSA LABS use super::tools::{protocol_test, send_and_propagate_block}; -use massa_graph_2_exports::test_exports::MockGraphControllerMessage; +use massa_consensus_exports::test_exports::MockConsensusControllerMessage; use massa_hash::Hash; use massa_models::operation::OperationId; use massa_models::wrapped::{Id, WrappedContent}; @@ -28,7 +28,7 @@ async fn test_protocol_does_propagate_operations_received_in_blocks() { async move |mut network_controller, mut protocol_command_sender, protocol_manager, - mut protocol_graph_event_receiver, + mut protocol_consensus_event_receiver, protocol_pool_event_receiver| { // Create 2 node. let mut nodes = create_and_connect_nodes(2, &mut network_controller).await; @@ -65,20 +65,20 @@ async fn test_protocol_does_propagate_operations_received_in_blocks() { .await; // Check protocol sends block to consensus. - let (protocol_graph_event_receiver, expected_hash) = + let (protocol_consensus_event_receiver, expected_hash) = tokio::task::spawn_blocking(move || { - let header_id = protocol_graph_event_receiver + let header_id = protocol_consensus_event_receiver .wait_command(MassaTime::from_millis(1000), |command| match command { - MockGraphControllerMessage::RegisterBlockHeader { + MockConsensusControllerMessage::RegisterBlockHeader { block_id, header: _, } => Some(block_id), _ => panic!("Unexpected or no protocol event."), }) .unwrap(); - let id = protocol_graph_event_receiver + let id = protocol_consensus_event_receiver .wait_command(MassaTime::from_millis(1000), |command| match command { - MockGraphControllerMessage::RegisterBlock { + MockConsensusControllerMessage::RegisterBlock { block_id, slot: _, block_storage: _, @@ -88,7 +88,7 @@ async fn test_protocol_does_propagate_operations_received_in_blocks() { }) .unwrap(); assert_eq!(header_id, id); - (protocol_graph_event_receiver, id) + (protocol_consensus_event_receiver, id) }) .await .unwrap(); @@ -113,7 +113,7 @@ async fn test_protocol_does_propagate_operations_received_in_blocks() { network_controller, protocol_command_sender, protocol_manager, - protocol_graph_event_receiver, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, @@ -136,7 +136,7 @@ async fn test_protocol_sends_blocks_with_operations_to_consensus() { async move |mut network_controller, mut protocol_command_sender, protocol_manager, - mut protocol_graph_event_receiver, + mut protocol_consensus_event_receiver, protocol_pool_event_receiver| { // Create 1 node. let mut nodes = create_and_connect_nodes(1, &mut network_controller).await; @@ -174,20 +174,20 @@ async fn test_protocol_sends_blocks_with_operations_to_consensus() { .await; // Check protocol sends block to consensus. - let (new_protocol_graph_event_receiver, expected_hash) = + let (new_protocol_consensus_event_receiver, expected_hash) = tokio::task::spawn_blocking(move || { - let header_id = protocol_graph_event_receiver + let header_id = protocol_consensus_event_receiver .wait_command(MassaTime::from_millis(1000), |command| match command { - MockGraphControllerMessage::RegisterBlockHeader { + MockConsensusControllerMessage::RegisterBlockHeader { block_id, header: _, } => Some(block_id), _ => panic!("Unexpected or no protocol event."), }) .unwrap(); - let id = protocol_graph_event_receiver + let id = protocol_consensus_event_receiver .wait_command(MassaTime::from_millis(1000), |command| match command { - MockGraphControllerMessage::RegisterBlock { + MockConsensusControllerMessage::RegisterBlock { block_id, slot: _, block_storage: _, @@ -197,11 +197,11 @@ async fn test_protocol_sends_blocks_with_operations_to_consensus() { }) .unwrap(); assert_eq!(header_id, id); - (protocol_graph_event_receiver, id) + (protocol_consensus_event_receiver, id) }) .await .unwrap(); - protocol_graph_event_receiver = new_protocol_graph_event_receiver; + protocol_consensus_event_receiver = new_protocol_consensus_event_receiver; assert_eq!(expected_hash, block_id); } @@ -245,23 +245,23 @@ async fn test_protocol_sends_blocks_with_operations_to_consensus() { .await; // Check protocol didn't send block to consensus. - let (new_protocol_graph_event_receiver, optional_expected_hash) = + let (new_protocol_consensus_event_receiver, optional_expected_hash) = tokio::task::spawn_blocking(move || { - let id = protocol_graph_event_receiver.wait_command( + let id = protocol_consensus_event_receiver.wait_command( MassaTime::from_millis(1000), |command| match command { - MockGraphControllerMessage::RegisterBlockHeader { + MockConsensusControllerMessage::RegisterBlockHeader { block_id, header: _, } => Some(block_id), _ => None, }, ); - (protocol_graph_event_receiver, id) + (protocol_consensus_event_receiver, id) }) .await .unwrap(); - protocol_graph_event_receiver = new_protocol_graph_event_receiver; + protocol_consensus_event_receiver = new_protocol_consensus_event_receiver; assert!( optional_expected_hash.is_none(), "Block sent to consensus but shouldn't." @@ -289,23 +289,23 @@ async fn test_protocol_sends_blocks_with_operations_to_consensus() { .await; // Check protocol didn't send block to consensus. - let (new_protocol_graph_event_receiver, optional_expected_hash) = + let (new_protocol_consensus_event_receiver, optional_expected_hash) = tokio::task::spawn_blocking(move || { - let id = protocol_graph_event_receiver.wait_command( + let id = protocol_consensus_event_receiver.wait_command( MassaTime::from_millis(1000), |command| match command { - MockGraphControllerMessage::RegisterBlockHeader { + MockConsensusControllerMessage::RegisterBlockHeader { block_id, header: _, } => Some(block_id), _ => None, }, ); - (protocol_graph_event_receiver, id) + (protocol_consensus_event_receiver, id) }) .await .unwrap(); - protocol_graph_event_receiver = new_protocol_graph_event_receiver; + protocol_consensus_event_receiver = new_protocol_consensus_event_receiver; assert!( optional_expected_hash.is_none(), "Block sent to consensus but shouldn't." @@ -316,7 +316,7 @@ async fn test_protocol_sends_blocks_with_operations_to_consensus() { network_controller, protocol_command_sender, protocol_manager, - protocol_graph_event_receiver, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, diff --git a/massa-protocol-worker/src/tests/operations_scenarios.rs b/massa-protocol-worker/src/tests/operations_scenarios.rs index 52c4e8ad2cb..9c9257cf3eb 100644 --- a/massa-protocol-worker/src/tests/operations_scenarios.rs +++ b/massa-protocol-worker/src/tests/operations_scenarios.rs @@ -3,7 +3,7 @@ // RUST_BACKTRACE=1 cargo test test_one_handshake -- --nocapture --test-threads=1 use super::tools::{protocol_test, protocol_test_with_storage}; -use massa_graph_2_exports::test_exports::MockGraphControllerMessage; +use massa_consensus_exports::test_exports::MockConsensusControllerMessage; use massa_models::prehash::PreHashSet; use massa_models::{self, address::Address, amount::Amount, block::BlockId, slot::Slot}; use massa_network_exports::{BlockInfoReply, NetworkCommand}; @@ -23,7 +23,7 @@ async fn test_protocol_sends_valid_operations_it_receives_to_consensus() { async move |mut network_controller, protocol_command_sender, protocol_manager, - protocol_graph_event_receiver, + protocol_consensus_event_receiver, mut protocol_pool_event_receiver| { // Create 1 node. let mut nodes = tools::create_and_connect_nodes(1, &mut network_controller).await; @@ -76,7 +76,7 @@ async fn test_protocol_sends_valid_operations_it_receives_to_consensus() { network_controller, protocol_command_sender, protocol_manager, - protocol_graph_event_receiver, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, @@ -93,7 +93,7 @@ async fn test_protocol_does_not_send_invalid_operations_it_receives_to_consensus async move |mut network_controller, protocol_command_sender, protocol_manager, - protocol_graph_event_receiver, + protocol_consensus_event_receiver, mut pool_event_receiver| { // Create 1 node. let mut nodes = tools::create_and_connect_nodes(1, &mut network_controller).await; @@ -122,7 +122,7 @@ async fn test_protocol_does_not_send_invalid_operations_it_receives_to_consensus network_controller, protocol_command_sender, protocol_manager, - protocol_graph_event_receiver, + protocol_consensus_event_receiver, pool_event_receiver, ) }, @@ -139,7 +139,7 @@ async fn test_protocol_propagates_operations_to_active_nodes() { async move |mut network_controller, mut protocol_command_sender, protocol_manager, - protocol_graph_event_receiver, + protocol_consensus_event_receiver, mut pool_event_receiver, mut storage| { // Create 2 nodes. @@ -189,7 +189,7 @@ async fn test_protocol_propagates_operations_to_active_nodes() { network_controller, protocol_command_sender, protocol_manager, - protocol_graph_event_receiver, + protocol_consensus_event_receiver, pool_event_receiver, ) }, @@ -206,7 +206,7 @@ async fn test_protocol_propagates_operations_only_to_nodes_that_dont_know_about_ async move |mut network_controller, mut protocol_command_sender, protocol_manager, - protocol_graph_event_receiver, + protocol_consensus_event_receiver, mut pool_event_receiver, mut storage| { // Create 1 nodes. @@ -262,7 +262,7 @@ async fn test_protocol_propagates_operations_only_to_nodes_that_dont_know_about_ network_controller, protocol_command_sender, protocol_manager, - protocol_graph_event_receiver, + protocol_consensus_event_receiver, pool_event_receiver, ) }, @@ -280,7 +280,7 @@ async fn test_protocol_propagates_operations_received_over_the_network_only_to_n async move |mut network_controller, protocol_command_sender, protocol_manager, - protocol_graph_event_receiver, + protocol_consensus_event_receiver, mut pool_event_receiver, _storage| { // Create 2 nodes. @@ -324,7 +324,7 @@ async fn test_protocol_propagates_operations_received_over_the_network_only_to_n network_controller, protocol_command_sender, protocol_manager, - protocol_graph_event_receiver, + protocol_consensus_event_receiver, pool_event_receiver, ) }, @@ -342,7 +342,7 @@ async fn test_protocol_batches_propagation_of_operations_received_over_the_netwo async move |mut network_controller, mut protocol_command_sender, protocol_manager, - protocol_graph_event_receiver, + protocol_consensus_event_receiver, mut pool_event_receiver, mut storage| { // Create 2 nodes. @@ -401,7 +401,7 @@ async fn test_protocol_batches_propagation_of_operations_received_over_the_netwo network_controller, protocol_command_sender, protocol_manager, - protocol_graph_event_receiver, + protocol_consensus_event_receiver, pool_event_receiver, ) }, @@ -419,7 +419,7 @@ async fn test_protocol_propagates_operations_only_to_nodes_that_dont_know_about_ async move |mut network_controller, mut protocol_command_sender, protocol_manager, - mut protocol_graph_event_receiver, + mut protocol_consensus_event_receiver, protocol_pool_event_receiver, mut storage| { // Create 1 node. @@ -440,15 +440,15 @@ async fn test_protocol_propagates_operations_only_to_nodes_that_dont_know_about_ .send_header(nodes[0].id, block.content.header.clone()) .await; - let protocol_graph_event_receiver = tokio::task::spawn_blocking(move || { - protocol_graph_event_receiver.wait_command( + let protocol_consensus_event_receiver = tokio::task::spawn_blocking(move || { + protocol_consensus_event_receiver.wait_command( MassaTime::from_millis(1000), |command| match command { - MockGraphControllerMessage::RegisterBlockHeader { .. } => Some(()), + MockConsensusControllerMessage::RegisterBlockHeader { .. } => Some(()), _ => panic!("unexpected protocol event"), }, ); - protocol_graph_event_receiver + protocol_consensus_event_receiver }) .await .unwrap(); @@ -510,7 +510,7 @@ async fn test_protocol_propagates_operations_only_to_nodes_that_dont_know_about_ network_controller, protocol_command_sender, protocol_manager, - protocol_graph_event_receiver, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, @@ -529,7 +529,7 @@ async fn test_protocol_propagates_operations_only_to_nodes_that_dont_know_about_ async move |mut network_controller, mut protocol_command_sender, protocol_manager, - mut protocol_graph_event_receiver, + mut protocol_consensus_event_receiver, protocol_pool_event_receiver, mut storage| { // Create 3 nodes. @@ -598,15 +598,15 @@ async fn test_protocol_propagates_operations_only_to_nodes_that_dont_know_about_ .await; // Wait for the event to be sure that the node is connected. - let protocol_graph_event_receiver = tokio::task::spawn_blocking(move || { - protocol_graph_event_receiver.wait_command( + let protocol_consensus_event_receiver = tokio::task::spawn_blocking(move || { + protocol_consensus_event_receiver.wait_command( MassaTime::from_millis(1000), |command| match command { - MockGraphControllerMessage::RegisterBlockHeader { .. } => Some(()), + MockConsensusControllerMessage::RegisterBlockHeader { .. } => Some(()), _ => panic!("unexpected protocol event"), }, ); - protocol_graph_event_receiver + protocol_consensus_event_receiver }) .await .unwrap(); @@ -638,7 +638,7 @@ async fn test_protocol_propagates_operations_only_to_nodes_that_dont_know_about_ network_controller, protocol_command_sender, protocol_manager, - protocol_graph_event_receiver, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, @@ -656,7 +656,7 @@ async fn test_protocol_does_not_propagates_operations_when_receiving_those_insid async move |mut network_controller, protocol_command_sender, protocol_manager, - protocol_graph_event_receiver, + protocol_consensus_event_receiver, mut pool_event_receiver| { // Create 2 nodes. let mut nodes = tools::create_and_connect_nodes(2, &mut network_controller).await; @@ -702,7 +702,7 @@ async fn test_protocol_does_not_propagates_operations_when_receiving_those_insid network_controller, protocol_command_sender, protocol_manager, - protocol_graph_event_receiver, + protocol_consensus_event_receiver, pool_event_receiver, ) }, @@ -719,7 +719,7 @@ async fn test_protocol_ask_operations_on_batch_received() { async move |mut network_controller, protocol_command_sender, protocol_manager, - protocol_graph_event_receiver, + protocol_consensus_event_receiver, protocol_pool_event_receiver| { // Create 1 node. let mut nodes = tools::create_and_connect_nodes(1, &mut network_controller).await; @@ -754,7 +754,7 @@ async fn test_protocol_ask_operations_on_batch_received() { network_controller, protocol_command_sender, protocol_manager, - protocol_graph_event_receiver, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, @@ -771,7 +771,7 @@ async fn test_protocol_re_ask_operations_to_another_node_on_batch_received_after async move |mut network_controller, protocol_command_sender, protocol_manager, - protocol_graph_event_receiver, + protocol_consensus_event_receiver, protocol_pool_event_receiver| { // Create 3 node. let mut nodes = tools::create_and_connect_nodes(3, &mut network_controller).await; @@ -827,7 +827,7 @@ async fn test_protocol_re_ask_operations_to_another_node_on_batch_received_after network_controller, protocol_command_sender, protocol_manager, - protocol_graph_event_receiver, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, @@ -844,7 +844,7 @@ async fn test_protocol_does_not_re_ask_operations_to_another_node_if_received() async move |mut network_controller, protocol_command_sender, protocol_manager, - protocol_graph_event_receiver, + protocol_consensus_event_receiver, protocol_pool_event_receiver| { // Create 3 node. let mut nodes = tools::create_and_connect_nodes(3, &mut network_controller).await; @@ -902,7 +902,7 @@ async fn test_protocol_does_not_re_ask_operations_to_another_node_if_received() network_controller, protocol_command_sender, protocol_manager, - protocol_graph_event_receiver, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, @@ -919,7 +919,7 @@ async fn test_protocol_on_ask_operations() { async move |mut network_controller, protocol_command_sender, protocol_manager, - protocol_graph_event_receiver, + protocol_consensus_event_receiver, protocol_pool_event_receiver, mut storage| { // Create 1 node. @@ -966,7 +966,7 @@ async fn test_protocol_on_ask_operations() { network_controller, protocol_command_sender, protocol_manager, - protocol_graph_event_receiver, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, diff --git a/massa-protocol-worker/src/tests/scenarios.rs b/massa-protocol-worker/src/tests/scenarios.rs index 4e1003021db..24021e0a38d 100644 --- a/massa-protocol-worker/src/tests/scenarios.rs +++ b/massa-protocol-worker/src/tests/scenarios.rs @@ -3,7 +3,7 @@ // RUST_BACKTRACE=1 cargo test test_one_handshake -- --nocapture --test-threads=1 use super::tools::{protocol_test, protocol_test_with_storage}; -use massa_graph_2_exports::test_exports::MockGraphControllerMessage; +use massa_consensus_exports::test_exports::MockConsensusControllerMessage; use massa_models::block::BlockId; use massa_models::prehash::{PreHashMap, PreHashSet}; use massa_network_exports::{AskForBlocksInfo, NetworkCommand}; @@ -26,7 +26,7 @@ async fn test_protocol_asks_for_block_from_node_who_propagated_header() { async move |mut network_controller, mut protocol_command_sender, protocol_manager, - mut protocol_graph_event_receiver, + mut protocol_consensus_event_receiver, protocol_pool_event_receiver| { let ask_for_block_cmd_filter = |cmd| match cmd { cmd @ NetworkCommand::AskForBlocks { .. } => Some(cmd), @@ -48,18 +48,18 @@ async fn test_protocol_asks_for_block_from_node_who_propagated_header() { .await; // Check protocol sends header to consensus. - let (protocol_graph_event_receiver, received_hash) = + let (protocol_consensus_event_receiver, received_hash) = tokio::task::spawn_blocking(move || { - let id = protocol_graph_event_receiver + let id = protocol_consensus_event_receiver .wait_command(MassaTime::from_millis(1000), |command| match command { - MockGraphControllerMessage::RegisterBlockHeader { + MockConsensusControllerMessage::RegisterBlockHeader { block_id, header: _, } => Some(block_id), _ => panic!("unexpected protocol event"), }) .unwrap(); - (protocol_graph_event_receiver, id) + (protocol_consensus_event_receiver, id) }) .await .unwrap(); @@ -106,7 +106,7 @@ async fn test_protocol_asks_for_block_from_node_who_propagated_header() { network_controller, protocol_command_sender, protocol_manager, - protocol_graph_event_receiver, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, @@ -123,7 +123,7 @@ async fn test_protocol_sends_blocks_when_asked_for() { async move |mut network_controller, mut protocol_command_sender, protocol_manager, - protocol_graph_event_receiver, + protocol_consensus_event_receiver, protocol_pool_event_receiver, mut storage| { let send_block_info_cmd_filter = |cmd| match cmd { @@ -192,7 +192,7 @@ async fn test_protocol_sends_blocks_when_asked_for() { network_controller, protocol_command_sender, protocol_manager, - protocol_graph_event_receiver, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, @@ -209,7 +209,7 @@ async fn test_protocol_propagates_block_to_all_nodes_including_those_who_asked_f async move |mut network_controller, mut protocol_command_sender, protocol_manager, - mut protocol_graph_event_receiver, + mut protocol_consensus_event_receiver, protocol_pool_event_receiver, mut storage| { // Create 4 nodes. @@ -237,18 +237,18 @@ async fn test_protocol_propagates_block_to_all_nodes_including_those_who_asked_f // node[1] asks for that block // Check protocol sends header to consensus. - let (protocol_graph_event_receiver, ref_hash) = + let (protocol_consensus_event_receiver, ref_hash) = tokio::task::spawn_blocking(move || { - let id = protocol_graph_event_receiver + let id = protocol_consensus_event_receiver .wait_command(MassaTime::from_millis(1000), |command| match command { - MockGraphControllerMessage::RegisterBlockHeader { + MockConsensusControllerMessage::RegisterBlockHeader { block_id, header: _, } => Some(block_id), _ => panic!("unexpected protocol event"), }) .unwrap(); - (protocol_graph_event_receiver, id) + (protocol_consensus_event_receiver, id) }) .await .unwrap(); @@ -307,7 +307,7 @@ async fn test_protocol_propagates_block_to_all_nodes_including_those_who_asked_f network_controller, protocol_command_sender, protocol_manager, - protocol_graph_event_receiver, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, @@ -325,7 +325,7 @@ async fn test_protocol_propagates_block_to_node_who_asked_for_operations_and_onl async move |mut network_controller, mut protocol_command_sender, protocol_manager, - mut protocol_graph_event_receiver, + mut protocol_consensus_event_receiver, protocol_pool_event_receiver, mut storage| { // Create 4 nodes. @@ -353,18 +353,18 @@ async fn test_protocol_propagates_block_to_node_who_asked_for_operations_and_onl // node[1] asks for that block // Check protocol sends header to consensus. - let (protocol_graph_event_receiver, ref_hash) = + let (protocol_consensus_event_receiver, ref_hash) = tokio::task::spawn_blocking(move || { - let id = protocol_graph_event_receiver + let id = protocol_consensus_event_receiver .wait_command(MassaTime::from_millis(1000), |command| match command { - MockGraphControllerMessage::RegisterBlockHeader { + MockConsensusControllerMessage::RegisterBlockHeader { block_id, header: _, } => Some(block_id), _ => panic!("unexpected protocol event"), }) .unwrap(); - (protocol_graph_event_receiver, id) + (protocol_consensus_event_receiver, id) }) .await .unwrap(); @@ -448,7 +448,7 @@ async fn test_protocol_propagates_block_to_node_who_asked_for_operations_and_onl network_controller, protocol_command_sender, protocol_manager, - protocol_graph_event_receiver, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, @@ -466,7 +466,7 @@ async fn test_protocol_sends_full_blocks_it_receives_to_consensus() { async move |mut network_controller, protocol_command_sender, protocol_manager, - protocol_graph_event_receiver, + protocol_consensus_event_receiver, protocol_pool_event_receiver| { // Create 1 node. let mut nodes = create_and_connect_nodes(1, &mut network_controller).await; @@ -484,7 +484,7 @@ async fn test_protocol_sends_full_blocks_it_receives_to_consensus() { network_controller, protocol_command_sender, protocol_manager, - protocol_graph_event_receiver, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, @@ -501,7 +501,7 @@ async fn test_protocol_block_not_found() { async move |mut network_controller, protocol_command_sender, protocol_manager, - protocol_graph_event_receiver, + protocol_consensus_event_receiver, protocol_pool_event_receiver| { // Create 1 node. let mut nodes = create_and_connect_nodes(1, &mut network_controller).await; @@ -539,7 +539,7 @@ async fn test_protocol_block_not_found() { network_controller, protocol_command_sender, protocol_manager, - protocol_graph_event_receiver, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, diff --git a/massa-protocol-worker/src/tests/tools.rs b/massa-protocol-worker/src/tests/tools.rs index 1125abdcc3b..1f50a071204 100644 --- a/massa-protocol-worker/src/tests/tools.rs +++ b/massa-protocol-worker/src/tests/tools.rs @@ -1,6 +1,6 @@ use crate::start_protocol_controller; use futures::Future; -use massa_graph_2_exports::test_exports::{GraphEventReceiver, MockGraphController}; +use massa_consensus_exports::test_exports::{ConsensusEventReceiver, MockConsensusController}; use massa_models::{ block::{BlockId, WrappedBlock}, node::NodeId, @@ -22,7 +22,7 @@ where MockNetworkController, ProtocolCommandSender, ProtocolManager, - GraphEventReceiver, + ConsensusEventReceiver, PoolEventReceiver, ) -> V, V: Future< @@ -30,7 +30,7 @@ where MockNetworkController, ProtocolCommandSender, ProtocolManager, - GraphEventReceiver, + ConsensusEventReceiver, PoolEventReceiver, ), >, @@ -39,7 +39,7 @@ where MockNetworkController::new(); let (pool_controller, pool_event_receiver) = MockPoolController::new_with_receiver(); - let (graph_controller, graph_event_receiver) = MockGraphController::new_with_receiver(); + let (consensus_controller, consensus_event_receiver) = MockConsensusController::new_with_receiver(); // start protocol controller let (protocol_command_sender, protocol_command_receiver) = mpsc::channel(protocol_config.controller_channel_size); @@ -49,7 +49,7 @@ where network_command_sender, network_event_receiver, protocol_command_receiver, - graph_controller, + consensus_controller, pool_controller, Storage::create_root(), ) @@ -61,13 +61,13 @@ where _network_controller, _protocol_command_sender, protocol_manager, - _graph_event_receiver, + _consensus_event_receiver, _pool_event_receiver, ) = test( network_controller, protocol_command_sender, protocol_manager, - graph_event_receiver, + consensus_event_receiver, pool_event_receiver, ) .await; @@ -84,7 +84,7 @@ where MockNetworkController, ProtocolCommandSender, ProtocolManager, - GraphEventReceiver, + ConsensusEventReceiver, PoolEventReceiver, Storage, ) -> V, @@ -93,7 +93,7 @@ where MockNetworkController, ProtocolCommandSender, ProtocolManager, - GraphEventReceiver, + ConsensusEventReceiver, PoolEventReceiver, ), >, @@ -101,7 +101,7 @@ where let (network_controller, network_command_sender, network_event_receiver) = MockNetworkController::new(); let (pool_controller, mock_pool_receiver) = MockPoolController::new_with_receiver(); - let (graph_controller, mock_graph_receiver) = MockGraphController::new_with_receiver(); + let (consensus_controller, mock_consensus_receiver) = MockConsensusController::new_with_receiver(); let storage = Storage::create_root(); // start protocol controller let (protocol_command_sender, protocol_command_receiver) = @@ -111,7 +111,7 @@ where network_command_sender, network_event_receiver, protocol_command_receiver, - graph_controller, + consensus_controller, pool_controller, storage.clone(), ) @@ -123,13 +123,13 @@ where _network_controller, _protocol_command_sender, protocol_manager, - _graph_event_receiver, + _consensus_event_receiver, _protocol_pool_event_receiver, ) = test( network_controller, protocol_command_sender, protocol_manager, - mock_graph_receiver, + mock_consensus_receiver, mock_pool_receiver, storage, ) From f3c1328bc70ee1d1a5789c066d327cf9fe94d643 Mon Sep 17 00:00:00 2001 From: AurelienFT Date: Tue, 25 Oct 2022 14:55:07 +0200 Subject: [PATCH 35/40] Fix format and remove todos. --- massa-api/src/public.rs | 6 ++++-- massa-bootstrap/src/tests/scenarios.rs | 7 +++++-- massa-consensus-exports/src/test_exports/mock.rs | 7 ++++++- massa-consensus-worker/src/controller.rs | 14 ++++++++------ massa-consensus-worker/src/state/process.rs | 9 --------- massa-consensus-worker/src/state/stats.rs | 5 ++++- massa-consensus-worker/src/worker/main_loop.rs | 1 - massa-consensus-worker/src/worker/mod.rs | 4 ++-- massa-factory-worker/src/block_factory.rs | 2 +- massa-factory-worker/src/tests/tools.rs | 3 ++- massa-node/src/main.rs | 3 ++- massa-protocol-worker/src/protocol_network.rs | 4 +++- massa-protocol-worker/src/tests/tools.rs | 6 ++++-- 13 files changed, 41 insertions(+), 30 deletions(-) diff --git a/massa-api/src/public.rs b/massa-api/src/public.rs index d920179300a..6e4e3a3b347 100644 --- a/massa-api/src/public.rs +++ b/massa-api/src/public.rs @@ -427,7 +427,8 @@ impl Endpoints for API { .unique() .cloned() .collect(); - let involved_block_statuses = consensus_controller.get_block_statuses(&involved_blocks); + let involved_block_statuses = + consensus_controller.get_block_statuses(&involved_blocks); let block_statuses: PreHashMap = involved_blocks .into_iter() .zip(involved_block_statuses.into_iter()) @@ -509,7 +510,8 @@ impl Endpoints for API { .unique() .cloned() .collect(); - let involved_block_statuses = consensus_controller.get_block_statuses(&involved_blocks); + let involved_block_statuses = + consensus_controller.get_block_statuses(&involved_blocks); let block_statuses: PreHashMap = involved_blocks .into_iter() .zip(involved_block_statuses.into_iter()) diff --git a/massa-bootstrap/src/tests/scenarios.rs b/massa-bootstrap/src/tests/scenarios.rs index ee16b1ed723..019290e8cff 100644 --- a/massa-bootstrap/src/tests/scenarios.rs +++ b/massa-bootstrap/src/tests/scenarios.rs @@ -15,7 +15,9 @@ use crate::{ get_state, start_bootstrap_server, tests::tools::{assert_eq_bootstrap_graph, get_bootstrap_config}, }; -use massa_consensus_exports::test_exports::{MockConsensusController, MockConsensusControllerMessage}; +use massa_consensus_exports::test_exports::{ + MockConsensusController, MockConsensusControllerMessage, +}; use massa_final_state::{test_exports::assert_eq_final_state, FinalState, StateChanges}; use massa_models::{address::Address, slot::Slot, version::Version}; use massa_network_exports::{NetworkCommand, NetworkCommandSender}; @@ -59,7 +61,8 @@ async fn test_bootstrap_server() { }) .expect("could not start client selector controller"); - let (consensus_controller, mut consensus_event_receiver) = MockConsensusController::new_with_receiver(); + let (consensus_controller, mut consensus_event_receiver) = + MockConsensusController::new_with_receiver(); let (network_cmd_tx, mut network_cmd_rx) = mpsc::channel::(5); let final_state_bootstrap = get_random_final_state_bootstrap( PoSFinalState::new( diff --git a/massa-consensus-exports/src/test_exports/mock.rs b/massa-consensus-exports/src/test_exports/mock.rs index b7d58cb71cd..38962bb09b9 100644 --- a/massa-consensus-exports/src/test_exports/mock.rs +++ b/massa-consensus-exports/src/test_exports/mock.rs @@ -199,7 +199,12 @@ impl ConsensusController for MockConsensusController { self.0 .lock() .unwrap() - .send(MockConsensusControllerMessage::GetLatestBlockcliqueBlockAtSlot { slot, response_tx }) + .send( + MockConsensusControllerMessage::GetLatestBlockcliqueBlockAtSlot { + slot, + response_tx, + }, + ) .unwrap(); response_rx.recv().unwrap() } diff --git a/massa-consensus-worker/src/controller.rs b/massa-consensus-worker/src/controller.rs index 89a5bfdf3cb..3039fc4a53b 100644 --- a/massa-consensus-worker/src/controller.rs +++ b/massa-consensus-worker/src/controller.rs @@ -162,12 +162,14 @@ impl ConsensusController for ConsensusControllerImpl { } fn register_block(&self, block_id: BlockId, slot: Slot, block_storage: Storage, created: bool) { - let _ = self.command_sender.try_send(ConsensusCommand::RegisterBlock( - block_id, - slot, - block_storage, - created, - )); + let _ = self + .command_sender + .try_send(ConsensusCommand::RegisterBlock( + block_id, + slot, + block_storage, + created, + )); } fn register_block_header(&self, block_id: BlockId, header: Wrapped) { diff --git a/massa-consensus-worker/src/state/process.rs b/massa-consensus-worker/src/state/process.rs index 722cec343d0..9c38e6d9164 100644 --- a/massa-consensus-worker/src/state/process.rs +++ b/massa-consensus-worker/src/state/process.rs @@ -425,7 +425,6 @@ impl ConsensusState { Ok(reprocess) } - /// TODO: Doc pub fn promote_dep_tree(&mut self, hash: BlockId) -> Result<(), ConsensusError> { let mut to_explore = vec![hash]; let mut to_promote: PreHashMap = PreHashMap::default(); @@ -868,14 +867,6 @@ impl ConsensusState { self.save_final_periods = latest_final_periods; } - /* - TODO add this again - let creator_addr = Address::from_public_key(&b_creator); - if self.staking_keys.contains_key(&creator_addr) { - warn!("block {} that was produced by our address {} at slot {} became stale. This is probably due to a temporary desynchronization.", b_id, creator_addr, b_slot); - } - */ - Ok(()) } } diff --git a/massa-consensus-worker/src/state/stats.rs b/massa-consensus-worker/src/state/stats.rs index aaea0b70489..4ab766a4ca4 100644 --- a/massa-consensus-worker/src/state/stats.rs +++ b/massa-consensus-worker/src/state/stats.rs @@ -60,7 +60,10 @@ impl ConsensusState { }) { warn!("desynchronization detected because the recent final block history is empty or contains only blocks produced by this node"); - let _ = self.channels.controller_event_tx.send(ConsensusEvent::NeedSync); + let _ = self + .channels + .controller_event_tx + .send(ConsensusEvent::NeedSync); } } // prune stats diff --git a/massa-consensus-worker/src/worker/main_loop.rs b/massa-consensus-worker/src/worker/main_loop.rs index 8caac3aef9d..7f31d6aff43 100644 --- a/massa-consensus-worker/src/worker/main_loop.rs +++ b/massa-consensus-worker/src/worker/main_loop.rs @@ -114,7 +114,6 @@ impl ConsensusWorker { /// Runs in loop forever. This loop must stop every slot to perform operations on stats and graph /// but can be stopped anytime by a command received. pub fn run(&mut self) { - //TODO: Add notify cs periods loop { match self.wait_slot_or_command(self.next_instant) { WaitingStatus::Ended => { diff --git a/massa-consensus-worker/src/worker/mod.rs b/massa-consensus-worker/src/worker/mod.rs index 06db5787b6b..fb0c5985a39 100644 --- a/massa-consensus-worker/src/worker/mod.rs +++ b/massa-consensus-worker/src/worker/mod.rs @@ -1,6 +1,6 @@ use massa_consensus_exports::{ - bootstrapable_graph::BootstrapableGraph, ConsensusChannels, ConsensusConfig, ConsensusController, - ConsensusManager, + bootstrapable_graph::BootstrapableGraph, ConsensusChannels, ConsensusConfig, + ConsensusController, ConsensusManager, }; use massa_models::block::BlockId; use massa_models::clique::Clique; diff --git a/massa-factory-worker/src/block_factory.rs b/massa-factory-worker/src/block_factory.rs index ab0672139f6..cc690b78be2 100644 --- a/massa-factory-worker/src/block_factory.rs +++ b/massa-factory-worker/src/block_factory.rs @@ -144,7 +144,7 @@ impl BlockFactoryWorker { }; // get best parents and their periods let parents: Vec<(BlockId, u64)> = self.channels.consensus.get_best_parents(); // Vec<(parent_id, parent_period)> - // generate the local storage object + // generate the local storage object let mut block_storage = self.channels.storage.clone_without_refs(); // claim block parents in local storage diff --git a/massa-factory-worker/src/tests/tools.rs b/massa-factory-worker/src/tests/tools.rs index 3505df5e2e0..ef2335f5874 100644 --- a/massa-factory-worker/src/tests/tools.rs +++ b/massa-factory-worker/src/tests/tools.rs @@ -55,7 +55,8 @@ impl TestFactory { /// - `TestFactory`: the structure that will be used to manage the tests pub fn new(default_keypair: &KeyPair) -> TestFactory { let (selector_controller, selector_receiver) = MockSelectorController::new_with_receiver(); - let (consensus_controller, consensus_event_receiver) = MockConsensusController::new_with_receiver(); + let (consensus_controller, consensus_event_receiver) = + MockConsensusController::new_with_receiver(); let (pool_controller, pool_receiver) = MockPoolController::new_with_receiver(); let mut storage = Storage::create_root(); let mut factory_config = FactoryConfig::default(); diff --git a/massa-node/src/main.rs b/massa-node/src/main.rs index 0acd732deb3..a10abf719e5 100644 --- a/massa-node/src/main.rs +++ b/massa-node/src/main.rs @@ -360,7 +360,8 @@ async fn launch( clock_compensation_millis: bootstrap_state.compensation_millis, }; - let (consensus_event_sender, consensus_event_receiver) = crossbeam_channel::bounded(CHANNEL_SIZE); + let (consensus_event_sender, consensus_event_receiver) = + crossbeam_channel::bounded(CHANNEL_SIZE); let consensus_channels = ConsensusChannels { execution_controller: execution_controller.clone(), selector_controller: selector_controller.clone(), diff --git a/massa-protocol-worker/src/protocol_network.rs b/massa-protocol-worker/src/protocol_network.rs index 74c7c136615..0d471c39b17 100644 --- a/massa-protocol-worker/src/protocol_network.rs +++ b/massa-protocol-worker/src/protocol_network.rs @@ -468,7 +468,8 @@ impl ProtocolWorker { warn!("Node id {} sent us full operations for block id {} but they exceed max size.", from_node_id, block_id); let _ = self.ban_node(&from_node_id).await; self.block_wishlist.remove(&block_id); - self.consensus_controller.mark_invalid_block(block_id, header); + self.consensus_controller + .mark_invalid_block(block_id, header); } else { if known_operations != block_ids_set { warn!( @@ -515,6 +516,7 @@ impl ProtocolWorker { block_storage.store_block(wrapped_block); // Send to consensus + info!("Send to consensus block for slot: {}", slot); self.consensus_controller .register_block(block_id, slot, block_storage, false); } diff --git a/massa-protocol-worker/src/tests/tools.rs b/massa-protocol-worker/src/tests/tools.rs index 1f50a071204..afe1bdf90d3 100644 --- a/massa-protocol-worker/src/tests/tools.rs +++ b/massa-protocol-worker/src/tests/tools.rs @@ -39,7 +39,8 @@ where MockNetworkController::new(); let (pool_controller, pool_event_receiver) = MockPoolController::new_with_receiver(); - let (consensus_controller, consensus_event_receiver) = MockConsensusController::new_with_receiver(); + let (consensus_controller, consensus_event_receiver) = + MockConsensusController::new_with_receiver(); // start protocol controller let (protocol_command_sender, protocol_command_receiver) = mpsc::channel(protocol_config.controller_channel_size); @@ -101,7 +102,8 @@ where let (network_controller, network_command_sender, network_event_receiver) = MockNetworkController::new(); let (pool_controller, mock_pool_receiver) = MockPoolController::new_with_receiver(); - let (consensus_controller, mock_consensus_receiver) = MockConsensusController::new_with_receiver(); + let (consensus_controller, mock_consensus_receiver) = + MockConsensusController::new_with_receiver(); let storage = Storage::create_root(); // start protocol controller let (protocol_command_sender, protocol_command_receiver) = From 58748fbe100dc688273c513234e3042ccecef0f7 Mon Sep 17 00:00:00 2001 From: AurelienFT Date: Wed, 26 Oct 2022 13:42:26 +0200 Subject: [PATCH 36/40] Add print debug each slot --- massa-consensus-worker/src/state/tick.rs | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/massa-consensus-worker/src/state/tick.rs b/massa-consensus-worker/src/state/tick.rs index 3165bc82669..7392b86aa6c 100644 --- a/massa-consensus-worker/src/state/tick.rs +++ b/massa-consensus-worker/src/state/tick.rs @@ -3,6 +3,7 @@ use std::collections::BTreeSet; use massa_consensus_exports::{block_status::BlockStatus, error::ConsensusError}; use massa_logging::massa_trace; use massa_models::{block::BlockId, slot::Slot}; +use tracing::info; use super::ConsensusState; @@ -18,6 +19,28 @@ impl ConsensusState { massa_trace!("consensus.consensus_worker.slot_tick", { "slot": current_slot }); + info!("AURELIEN: Size of gi_head {}", self.gi_head.len()); + info!("AURELIEN: Size of cliques {}", self.max_cliques.len()); + info!("AURELIEN: Size of active_index {}", self.active_index.len()); + info!("AURELIEN: Size of save_final_periods {}", self.save_final_periods.len()); + info!("AURELIEN: Size of latest_final_blocks_periods {}", self.latest_final_blocks_periods.len()); + info!("AURELIEN: Size of best_parents {}", self.best_parents.len()); + info!("AURELIEN: Size of block_statuses {}", self.block_statuses.len()); + info!("AURELIEN: Size of incoming_index {}", self.incoming_index.len()); + info!("AURELIEN: Size of waiting_for_slot_index {}", self.waiting_for_slot_index.len()); + info!("AURELIEN: Size of waiting_for_dependencies_index {}", self.waiting_for_dependencies_index.len()); + info!("AURELIEN: Size of discarded_index {}", self.discarded_index.len()); + info!("AURELIEN: Size of to_propagate {}", self.to_propagate.len()); + info!("AURELIEN: Size of attack_attempts {}", self.attack_attempts.len()); + info!("AURELIEN: Size of new_final_blocks {}", self.new_final_blocks.len()); + info!("AURELIEN: Size of new_stale_blocks {}", self.new_stale_blocks.len()); + info!("AURELIEN: Size of final_block_stats {}", self.final_block_stats.len()); + info!("AURELIEN: Size of protocol_blocks {}", self.protocol_blocks.len()); + info!("AURELIEN: Size of stale_block_stats {}", self.stale_block_stats.len()); + info!("AURELIEN: Size of wishlist {}", self.wishlist.len()); + info!("AURELIEN: Size of prev_blockclique {}", self.prev_blockclique.len()); + info!("AURELIEN: Size of storage block length {}", self.storage.get_block_refs().len()); + info!("AURELIEN: Size of storage endos length {}", self.storage.get_endorsement_refs().len()); // list all elements for which the time has come let to_process: BTreeSet<(Slot, BlockId)> = self From 747ebce6c7b48fb1d59c9ab99367543ed15aa23c Mon Sep 17 00:00:00 2001 From: AurelienFT Date: Thu, 27 Oct 2022 10:53:35 +0200 Subject: [PATCH 37/40] Add pruning functions. --- massa-consensus-worker/src/state/mod.rs | 1 + massa-consensus-worker/src/state/prune.rs | 348 ++++++++++++++++++ massa-consensus-worker/src/state/tick.rs | 80 +++- .../src/worker/main_loop.rs | 8 + 4 files changed, 421 insertions(+), 16 deletions(-) create mode 100644 massa-consensus-worker/src/state/prune.rs diff --git a/massa-consensus-worker/src/state/mod.rs b/massa-consensus-worker/src/state/mod.rs index 88ebbd9006c..8a8fc542205 100644 --- a/massa-consensus-worker/src/state/mod.rs +++ b/massa-consensus-worker/src/state/mod.rs @@ -21,6 +21,7 @@ use massa_time::MassaTime; mod graph; mod process; mod process_commands; +mod prune; mod stats; mod tick; mod verifications; diff --git a/massa-consensus-worker/src/state/prune.rs b/massa-consensus-worker/src/state/prune.rs new file mode 100644 index 00000000000..f7d5711a717 --- /dev/null +++ b/massa-consensus-worker/src/state/prune.rs @@ -0,0 +1,348 @@ +use massa_consensus_exports::{ + block_status::{BlockStatus, DiscardReason, HeaderOrBlock}, + error::ConsensusError, +}; +use massa_logging::massa_trace; +use massa_models::{active_block::ActiveBlock, block::BlockId, prehash::PreHashMap, slot::Slot}; +use tracing::debug; + +use super::ConsensusState; + +impl ConsensusState { + /// prune active blocks and return final blocks, return discarded final blocks + fn prune_active(&mut self) -> Result, ConsensusError> { + // list required active blocks + let mut retain_active = self.list_required_active_blocks()?; + + // retain extra history according to the config + // this is useful to avoid desync on temporary connection loss + for a_block in self.active_index.iter() { + if let Some(BlockStatus::Active { + a_block: active_block, + .. + }) = self.block_statuses.get(a_block) + { + let (_b_id, latest_final_period) = + self.latest_final_blocks_periods[active_block.slot.thread as usize]; + if active_block.slot.period + >= latest_final_period.saturating_sub(self.config.force_keep_final_periods) + { + retain_active.insert(*a_block); + } + } + } + + // remove unused final active blocks + let mut discarded_finals: PreHashMap = PreHashMap::default(); + let to_remove: Vec = self + .active_index + .difference(&retain_active) + .copied() + .collect(); + for discard_active_h in to_remove { + let block_slot; + let block_creator; + let block_parents; + { + let read_blocks = self.storage.read_blocks(); + let block = read_blocks.get(&discard_active_h).ok_or_else(|| { + ConsensusError::MissingBlock(format!( + "missing block when removing unused final active blocks: {}", + discard_active_h + )) + })?; + block_slot = block.content.header.content.slot; + block_creator = block.creator_address; + block_parents = block.content.header.content.parents.clone(); + }; + + let discarded_active = if let Some(BlockStatus::Active { + a_block: discarded_active, + .. + }) = self.block_statuses.remove(&discard_active_h) + { + self.active_index.remove(&discard_active_h); + discarded_active + } else { + return Err(ConsensusError::ContainerInconsistency(format!("inconsistency inside block statuses pruning and removing unused final active blocks - {} is missing", discard_active_h))); + }; + + // remove from parent's children + for (parent_h, _parent_period) in discarded_active.parents.iter() { + if let Some(BlockStatus::Active { + a_block: parent_active_block, + .. + }) = self.block_statuses.get_mut(parent_h) + { + parent_active_block.children[discarded_active.slot.thread as usize] + .remove(&discard_active_h); + } + } + + massa_trace!("consensus.block_graph.prune_active", {"hash": discard_active_h, "reason": DiscardReason::Final}); + + // mark as final + self.block_statuses.insert( + discard_active_h, + BlockStatus::Discarded { + slot: block_slot, + creator: block_creator, + parents: block_parents, + reason: DiscardReason::Final, + sequence_number: { + self.sequence_counter += 1; + self.sequence_counter + }, + }, + ); + self.discarded_index.insert(discard_active_h); + + discarded_finals.insert(discard_active_h, *discarded_active); + } + + Ok(discarded_finals) + } + + fn prune_slot_waiting(&mut self) { + if self.waiting_for_slot_index.len() <= self.config.max_future_processing_blocks { + return; + } + let mut slot_waiting: Vec<(Slot, BlockId)> = self + .waiting_for_slot_index + .iter() + .filter_map(|block_id| { + if let Some(BlockStatus::WaitingForSlot(header_or_block)) = + self.block_statuses.get(block_id) + { + return Some((header_or_block.get_slot(), *block_id)); + } + None + }) + .collect(); + slot_waiting.sort_unstable(); + let len_slot_waiting = slot_waiting.len(); + (self.config.max_future_processing_blocks..len_slot_waiting).for_each(|idx| { + let (_slot, block_id) = &slot_waiting[idx]; + self.block_statuses.remove(block_id); + self.waiting_for_slot_index.remove(block_id); + }); + } + + fn prune_discarded(&mut self) -> Result<(), ConsensusError> { + if self.discarded_index.len() <= self.config.max_discarded_blocks { + return Ok(()); + } + let mut discard_hashes: Vec<(u64, BlockId)> = self + .discarded_index + .iter() + .filter_map(|block_id| { + if let Some(BlockStatus::Discarded { + sequence_number, .. + }) = self.block_statuses.get(block_id) + { + return Some((*sequence_number, *block_id)); + } + None + }) + .collect(); + discard_hashes.sort_unstable(); + discard_hashes.truncate(self.discarded_index.len() - self.config.max_discarded_blocks); + for (_, block_id) in discard_hashes.iter() { + self.block_statuses.remove(block_id); + self.discarded_index.remove(block_id); + } + Ok(()) + } + + fn prune_waiting_for_dependencies(&mut self) -> Result<(), ConsensusError> { + let mut to_discard: PreHashMap> = PreHashMap::default(); + let mut to_keep: PreHashMap = PreHashMap::default(); + + // list items that are older than the latest final blocks in their threads or have deps that are discarded + { + for block_id in self.waiting_for_dependencies_index.iter() { + if let Some(BlockStatus::WaitingForDependencies { + header_or_block, + unsatisfied_dependencies, + sequence_number, + }) = self.block_statuses.get(block_id) + { + // has already discarded dependencies => discard (choose worst reason) + let mut discard_reason = None; + let mut discarded_dep_found = false; + for dep in unsatisfied_dependencies.iter() { + if let Some(BlockStatus::Discarded { reason, .. }) = + self.block_statuses.get(dep) + { + discarded_dep_found = true; + match reason { + DiscardReason::Invalid(reason) => { + discard_reason = Some(DiscardReason::Invalid(format!("discarded because depend on block:{} that has discard reason:{}", block_id, reason))); + break; + } + DiscardReason::Stale => discard_reason = Some(DiscardReason::Stale), + DiscardReason::Final => discard_reason = Some(DiscardReason::Stale), + } + } + } + if discarded_dep_found { + to_discard.insert(*block_id, discard_reason); + continue; + } + + // is at least as old as the latest final block in its thread => discard as stale + let slot = header_or_block.get_slot(); + if slot.period <= self.latest_final_blocks_periods[slot.thread as usize].1 { + to_discard.insert(*block_id, Some(DiscardReason::Stale)); + continue; + } + + // otherwise, mark as to_keep + to_keep.insert(*block_id, (*sequence_number, header_or_block.get_slot())); + } + } + } + + // discard in chain and because of limited size + while !to_keep.is_empty() { + // mark entries as to_discard and remove them from to_keep + for (hash, _old_order) in to_keep.clone().into_iter() { + if let Some(BlockStatus::WaitingForDependencies { + unsatisfied_dependencies, + .. + }) = self.block_statuses.get(&hash) + { + // has dependencies that will be discarded => discard (choose worst reason) + let mut discard_reason = None; + let mut dep_to_discard_found = false; + for dep in unsatisfied_dependencies.iter() { + if let Some(reason) = to_discard.get(dep) { + dep_to_discard_found = true; + match reason { + Some(DiscardReason::Invalid(reason)) => { + discard_reason = Some(DiscardReason::Invalid(format!("discarded because depend on block:{} that has discard reason:{}", hash, reason))); + break; + } + Some(DiscardReason::Stale) => { + discard_reason = Some(DiscardReason::Stale) + } + Some(DiscardReason::Final) => { + discard_reason = Some(DiscardReason::Stale) + } + None => {} // leave as None + } + } + } + if dep_to_discard_found { + to_keep.remove(&hash); + to_discard.insert(hash, discard_reason); + continue; + } + } + } + + // remove worst excess element + if to_keep.len() > self.config.max_dependency_blocks { + let remove_elt = to_keep + .iter() + .filter_map(|(hash, _old_order)| { + if let Some(BlockStatus::WaitingForDependencies { + header_or_block, + sequence_number, + .. + }) = self.block_statuses.get(hash) + { + return Some((sequence_number, header_or_block.get_slot(), *hash)); + } + None + }) + .min(); + if let Some((_seq_num, _slot, hash)) = remove_elt { + to_keep.remove(&hash); + to_discard.insert(hash, None); + continue; + } + } + + // nothing happened: stop loop + break; + } + + // transition states to Discarded if there is a reason, otherwise just drop + for (block_id, reason_opt) in to_discard.drain() { + if let Some(BlockStatus::WaitingForDependencies { + header_or_block, .. + }) = self.block_statuses.remove(&block_id) + { + self.waiting_for_dependencies_index.remove(&block_id); + let header = match header_or_block { + HeaderOrBlock::Header(h) => h, + HeaderOrBlock::Block { id: block_id, .. } => self + .storage + .read_blocks() + .get(&block_id) + .ok_or_else(|| { + ConsensusError::MissingBlock(format!( + "missing block when pruning waiting for deps: {}", + block_id + )) + })? + .content + .header + .clone(), + }; + massa_trace!("consensus.block_graph.prune_waiting_for_dependencies", {"hash": block_id, "reason": reason_opt}); + + if let Some(reason) = reason_opt { + // add to stats if reason is Stale + if reason == DiscardReason::Stale { + self.new_stale_blocks + .insert(block_id, (header.creator_address, header.content.slot)); + } + // transition to Discarded only if there is a reason + self.block_statuses.insert( + block_id, + BlockStatus::Discarded { + slot: header.content.slot, + creator: header.creator_address, + parents: header.content.parents.clone(), + reason, + sequence_number: { + self.sequence_counter += 1; + self.sequence_counter + }, + }, + ); + self.discarded_index.insert(block_id); + } + } + } + + Ok(()) + } + + pub fn prune(&mut self) -> Result<(), ConsensusError> { + let before = self.max_cliques.len(); + // Step 1: discard final blocks that are not useful to the graph anymore and return them + self.prune_active()?; + + // Step 2: prune slot waiting blocks + self.prune_slot_waiting(); + + // Step 3: prune dependency waiting blocks + self.prune_waiting_for_dependencies()?; + + // Step 4: prune discarded + self.prune_discarded()?; + + let after = self.max_cliques.len(); + if before != after { + debug!( + "clique number went from {} to {} after pruning", + before, after + ); + } + + Ok(()) + } +} diff --git a/massa-consensus-worker/src/state/tick.rs b/massa-consensus-worker/src/state/tick.rs index 7392b86aa6c..971b3de6ca4 100644 --- a/massa-consensus-worker/src/state/tick.rs +++ b/massa-consensus-worker/src/state/tick.rs @@ -22,25 +22,73 @@ impl ConsensusState { info!("AURELIEN: Size of gi_head {}", self.gi_head.len()); info!("AURELIEN: Size of cliques {}", self.max_cliques.len()); info!("AURELIEN: Size of active_index {}", self.active_index.len()); - info!("AURELIEN: Size of save_final_periods {}", self.save_final_periods.len()); - info!("AURELIEN: Size of latest_final_blocks_periods {}", self.latest_final_blocks_periods.len()); + info!( + "AURELIEN: Size of save_final_periods {}", + self.save_final_periods.len() + ); + info!( + "AURELIEN: Size of latest_final_blocks_periods {}", + self.latest_final_blocks_periods.len() + ); info!("AURELIEN: Size of best_parents {}", self.best_parents.len()); - info!("AURELIEN: Size of block_statuses {}", self.block_statuses.len()); - info!("AURELIEN: Size of incoming_index {}", self.incoming_index.len()); - info!("AURELIEN: Size of waiting_for_slot_index {}", self.waiting_for_slot_index.len()); - info!("AURELIEN: Size of waiting_for_dependencies_index {}", self.waiting_for_dependencies_index.len()); - info!("AURELIEN: Size of discarded_index {}", self.discarded_index.len()); + info!( + "AURELIEN: Size of block_statuses {}", + self.block_statuses.len() + ); + info!( + "AURELIEN: Size of incoming_index {}", + self.incoming_index.len() + ); + info!( + "AURELIEN: Size of waiting_for_slot_index {}", + self.waiting_for_slot_index.len() + ); + info!( + "AURELIEN: Size of waiting_for_dependencies_index {}", + self.waiting_for_dependencies_index.len() + ); + info!( + "AURELIEN: Size of discarded_index {}", + self.discarded_index.len() + ); info!("AURELIEN: Size of to_propagate {}", self.to_propagate.len()); - info!("AURELIEN: Size of attack_attempts {}", self.attack_attempts.len()); - info!("AURELIEN: Size of new_final_blocks {}", self.new_final_blocks.len()); - info!("AURELIEN: Size of new_stale_blocks {}", self.new_stale_blocks.len()); - info!("AURELIEN: Size of final_block_stats {}", self.final_block_stats.len()); - info!("AURELIEN: Size of protocol_blocks {}", self.protocol_blocks.len()); - info!("AURELIEN: Size of stale_block_stats {}", self.stale_block_stats.len()); + info!( + "AURELIEN: Size of attack_attempts {}", + self.attack_attempts.len() + ); + info!( + "AURELIEN: Size of new_final_blocks {}", + self.new_final_blocks.len() + ); + info!( + "AURELIEN: Size of new_stale_blocks {}", + self.new_stale_blocks.len() + ); + info!( + "AURELIEN: Size of final_block_stats {}", + self.final_block_stats.len() + ); + info!( + "AURELIEN: Size of protocol_blocks {}", + self.protocol_blocks.len() + ); + info!( + "AURELIEN: Size of stale_block_stats {}", + self.stale_block_stats.len() + ); info!("AURELIEN: Size of wishlist {}", self.wishlist.len()); - info!("AURELIEN: Size of prev_blockclique {}", self.prev_blockclique.len()); - info!("AURELIEN: Size of storage block length {}", self.storage.get_block_refs().len()); - info!("AURELIEN: Size of storage endos length {}", self.storage.get_endorsement_refs().len()); + info!( + "AURELIEN: Size of prev_blockclique {}", + self.prev_blockclique.len() + ); + info!( + "AURELIEN: Size of storage block length {}", + self.storage.get_block_refs().len() + ); + info!( + "AURELIEN: Size of storage endos length {}", + self.storage.get_endorsement_refs().len() + ); // list all elements for which the time has come let to_process: BTreeSet<(Slot, BlockId)> = self diff --git a/massa-consensus-worker/src/worker/main_loop.rs b/massa-consensus-worker/src/worker/main_loop.rs index 7f31d6aff43..1a9722a2064 100644 --- a/massa-consensus-worker/src/worker/main_loop.rs +++ b/massa-consensus-worker/src/worker/main_loop.rs @@ -114,6 +114,7 @@ impl ConsensusWorker { /// Runs in loop forever. This loop must stop every slot to perform operations on stats and graph /// but can be stopped anytime by a command received. pub fn run(&mut self) { + let mut last_prune = Instant::now(); loop { match self.wait_slot_or_command(self.next_instant) { WaitingStatus::Ended => { @@ -134,6 +135,13 @@ impl ConsensusWorker { warn!("Error while processing block tick: {}", err); } }; + if last_prune.elapsed().as_millis() + > self.config.block_db_prune_interval.to_millis() as u128 + { + info!("AURELIEN: Pruning block DB"); + self.shared_state.write().prune().expect("Error while pruning"); + last_prune = Instant::now(); + } self.previous_slot = Some(self.next_slot); (self.next_slot, self.next_instant) = self.get_next_slot(Some(self.next_slot)); } From 5dcf004722b8c9a0c5b874f63b4fc505b0689b55 Mon Sep 17 00:00:00 2001 From: AurelienFT Date: Thu, 27 Oct 2022 13:20:24 +0200 Subject: [PATCH 38/40] Remove debug print. --- massa-consensus-worker/src/state/tick.rs | 71 ------------------- .../src/worker/main_loop.rs | 6 +- massa-protocol-worker/src/protocol_network.rs | 1 - 3 files changed, 4 insertions(+), 74 deletions(-) diff --git a/massa-consensus-worker/src/state/tick.rs b/massa-consensus-worker/src/state/tick.rs index 971b3de6ca4..3165bc82669 100644 --- a/massa-consensus-worker/src/state/tick.rs +++ b/massa-consensus-worker/src/state/tick.rs @@ -3,7 +3,6 @@ use std::collections::BTreeSet; use massa_consensus_exports::{block_status::BlockStatus, error::ConsensusError}; use massa_logging::massa_trace; use massa_models::{block::BlockId, slot::Slot}; -use tracing::info; use super::ConsensusState; @@ -19,76 +18,6 @@ impl ConsensusState { massa_trace!("consensus.consensus_worker.slot_tick", { "slot": current_slot }); - info!("AURELIEN: Size of gi_head {}", self.gi_head.len()); - info!("AURELIEN: Size of cliques {}", self.max_cliques.len()); - info!("AURELIEN: Size of active_index {}", self.active_index.len()); - info!( - "AURELIEN: Size of save_final_periods {}", - self.save_final_periods.len() - ); - info!( - "AURELIEN: Size of latest_final_blocks_periods {}", - self.latest_final_blocks_periods.len() - ); - info!("AURELIEN: Size of best_parents {}", self.best_parents.len()); - info!( - "AURELIEN: Size of block_statuses {}", - self.block_statuses.len() - ); - info!( - "AURELIEN: Size of incoming_index {}", - self.incoming_index.len() - ); - info!( - "AURELIEN: Size of waiting_for_slot_index {}", - self.waiting_for_slot_index.len() - ); - info!( - "AURELIEN: Size of waiting_for_dependencies_index {}", - self.waiting_for_dependencies_index.len() - ); - info!( - "AURELIEN: Size of discarded_index {}", - self.discarded_index.len() - ); - info!("AURELIEN: Size of to_propagate {}", self.to_propagate.len()); - info!( - "AURELIEN: Size of attack_attempts {}", - self.attack_attempts.len() - ); - info!( - "AURELIEN: Size of new_final_blocks {}", - self.new_final_blocks.len() - ); - info!( - "AURELIEN: Size of new_stale_blocks {}", - self.new_stale_blocks.len() - ); - info!( - "AURELIEN: Size of final_block_stats {}", - self.final_block_stats.len() - ); - info!( - "AURELIEN: Size of protocol_blocks {}", - self.protocol_blocks.len() - ); - info!( - "AURELIEN: Size of stale_block_stats {}", - self.stale_block_stats.len() - ); - info!("AURELIEN: Size of wishlist {}", self.wishlist.len()); - info!( - "AURELIEN: Size of prev_blockclique {}", - self.prev_blockclique.len() - ); - info!( - "AURELIEN: Size of storage block length {}", - self.storage.get_block_refs().len() - ); - info!( - "AURELIEN: Size of storage endos length {}", - self.storage.get_endorsement_refs().len() - ); // list all elements for which the time has come let to_process: BTreeSet<(Slot, BlockId)> = self diff --git a/massa-consensus-worker/src/worker/main_loop.rs b/massa-consensus-worker/src/worker/main_loop.rs index 1a9722a2064..88489e1fafc 100644 --- a/massa-consensus-worker/src/worker/main_loop.rs +++ b/massa-consensus-worker/src/worker/main_loop.rs @@ -138,8 +138,10 @@ impl ConsensusWorker { if last_prune.elapsed().as_millis() > self.config.block_db_prune_interval.to_millis() as u128 { - info!("AURELIEN: Pruning block DB"); - self.shared_state.write().prune().expect("Error while pruning"); + self.shared_state + .write() + .prune() + .expect("Error while pruning"); last_prune = Instant::now(); } self.previous_slot = Some(self.next_slot); diff --git a/massa-protocol-worker/src/protocol_network.rs b/massa-protocol-worker/src/protocol_network.rs index 0d471c39b17..b794d439b7b 100644 --- a/massa-protocol-worker/src/protocol_network.rs +++ b/massa-protocol-worker/src/protocol_network.rs @@ -566,7 +566,6 @@ impl ProtocolWorker { // Send operations to pool, // before performing the below checks, // and wait for them to have been procesed(i.e. added to storage). - println!("AURELIEN: Full ops received"); self.on_block_full_operations_received(from_node_id, block_id, operations, op_timer) .await } From 3e2ba48823de44cc8d8435ed126f19078dd2a0f4 Mon Sep 17 00:00:00 2001 From: AurelienFT Date: Tue, 15 Nov 2022 16:24:12 +0100 Subject: [PATCH 39/40] Make channels from consensus to protocol blocking. --- massa-consensus-worker/src/controller.rs | 22 ++++++++++++++----- massa-consensus-worker/src/lib.rs | 13 +++++++++++ massa-consensus-worker/src/worker/mod.rs | 3 ++- .../src/protocol_controller.rs | 10 ++++----- 4 files changed, 36 insertions(+), 12 deletions(-) diff --git a/massa-consensus-worker/src/controller.rs b/massa-consensus-worker/src/controller.rs index 3039fc4a53b..2a622acc797 100644 --- a/massa-consensus-worker/src/controller.rs +++ b/massa-consensus-worker/src/controller.rs @@ -15,6 +15,7 @@ use massa_models::{ use massa_storage::Storage; use parking_lot::RwLock; use std::sync::{mpsc::SyncSender, Arc}; +use tracing::log::warn; use crate::{commands::ConsensusCommand, state::ConsensusState}; @@ -162,26 +163,35 @@ impl ConsensusController for ConsensusControllerImpl { } fn register_block(&self, block_id: BlockId, slot: Slot, block_storage: Storage, created: bool) { - let _ = self + if let Err(err) = self .command_sender .try_send(ConsensusCommand::RegisterBlock( block_id, slot, block_storage, created, - )); + )) + { + warn!("error trying to register a block: {}", err); + } } fn register_block_header(&self, block_id: BlockId, header: Wrapped) { - let _ = self + if let Err(err) = self .command_sender - .try_send(ConsensusCommand::RegisterBlockHeader(block_id, header)); + .try_send(ConsensusCommand::RegisterBlockHeader(block_id, header)) + { + warn!("error trying to register a block header: {}", err); + } } fn mark_invalid_block(&self, block_id: BlockId, header: Wrapped) { - let _ = self + if let Err(err) = self .command_sender - .try_send(ConsensusCommand::MarkInvalidBlock(block_id, header)); + .try_send(ConsensusCommand::MarkInvalidBlock(block_id, header)) + { + warn!("error trying to mark block as invalid: {}", err); + } } fn clone_box(&self) -> Box { diff --git a/massa-consensus-worker/src/lib.rs b/massa-consensus-worker/src/lib.rs index b7c05664fc0..71042385846 100644 --- a/massa-consensus-worker/src/lib.rs +++ b/massa-consensus-worker/src/lib.rs @@ -1,3 +1,16 @@ +// Copyright (c) 2022 MASSA LABS + +//! # General description +//! +//! The consensus worker launches a persistent thread that will run in the background. +//! This thread has a `run` function that triggers the consensus algorithm each slot. It can be interrupted by commands +//! that are managed on the fly. The consensus worker share a state with a controller. This controller can be called by the others modules. +//! It avoid sending message to the thread just for getting informations on the consensus. +//! +//! Communications with execution is blocking. Communications with protocol blocks on sending information to protocol but not blocking +//! when protocol sends informations to this module. +//! +//! This module doesn't use asynchronous code. #![feature(deadline_api)] mod commands; diff --git a/massa-consensus-worker/src/worker/mod.rs b/massa-consensus-worker/src/worker/mod.rs index fb0c5985a39..58214ea7206 100644 --- a/massa-consensus-worker/src/worker/mod.rs +++ b/massa-consensus-worker/src/worker/mod.rs @@ -4,6 +4,7 @@ use massa_consensus_exports::{ }; use massa_models::block::BlockId; use massa_models::clique::Clique; +use massa_models::config::CHANNEL_SIZE; use massa_models::prehash::PreHashSet; use massa_models::slot::Slot; use massa_storage::Storage; @@ -54,7 +55,7 @@ pub fn start_consensus_worker( init_graph: Option, storage: Storage, ) -> (Box, Box) { - let (tx, rx) = mpsc::sync_channel(10); + let (tx, rx) = mpsc::sync_channel(CHANNEL_SIZE); // desync detection timespan let stats_desync_detection_timespan = config.t0.checked_mul(config.periods_per_cycle * 2).unwrap(); diff --git a/massa-protocol-exports/src/protocol_controller.rs b/massa-protocol-exports/src/protocol_controller.rs index 3fec37b70ac..4f615dc194c 100644 --- a/massa-protocol-exports/src/protocol_controller.rs +++ b/massa-protocol-exports/src/protocol_controller.rs @@ -75,7 +75,7 @@ impl ProtocolCommandSender { "block_id": block_id }); self.0 - .try_send(ProtocolCommand::IntegratedBlock { block_id, storage }) + .blocking_send(ProtocolCommand::IntegratedBlock { block_id, storage }) .map_err(|_| ProtocolError::ChannelError("block_integrated command send error".into())) } @@ -85,7 +85,7 @@ impl ProtocolCommandSender { "block_id": block_id }); self.0 - .try_send(ProtocolCommand::AttackBlockDetected(block_id)) + .blocking_send(ProtocolCommand::AttackBlockDetected(block_id)) .map_err(|_| { ProtocolError::ChannelError("notify_block_attack command send error".into()) }) @@ -99,7 +99,7 @@ impl ProtocolCommandSender { ) -> Result<(), ProtocolError> { massa_trace!("protocol.command_sender.send_wishlist_delta", { "new": new, "remove": remove }); self.0 - .try_send(ProtocolCommand::WishlistDelta { new, remove }) + .blocking_send(ProtocolCommand::WishlistDelta { new, remove }) .map_err(|_| { ProtocolError::ChannelError("send_wishlist_delta command send error".into()) }) @@ -113,7 +113,7 @@ impl ProtocolCommandSender { "operations": operations.get_op_refs() }); self.0 - .try_send(ProtocolCommand::PropagateOperations(operations)) + .blocking_send(ProtocolCommand::PropagateOperations(operations)) .map_err(|_| { ProtocolError::ChannelError("propagate_operation command send error".into()) }) @@ -125,7 +125,7 @@ impl ProtocolCommandSender { "endorsements": endorsements.get_endorsement_refs() }); self.0 - .try_send(ProtocolCommand::PropagateEndorsements(endorsements)) + .blocking_send(ProtocolCommand::PropagateEndorsements(endorsements)) .map_err(|_| { ProtocolError::ChannelError("propagate_endorsements command send error".into()) }) From dd57ceb76c1bcc19f1b69d28720fb5971aafaf62 Mon Sep 17 00:00:00 2001 From: AurelienFT Date: Tue, 15 Nov 2022 17:15:23 +0100 Subject: [PATCH 40/40] Update tests. --- .../src/tests/ask_block_scenarios.rs | 141 +++++++++++------- .../src/tests/ban_nodes_scenarios.rs | 53 ++++--- .../src/tests/operations_scenarios.rs | 65 +++++--- massa-protocol-worker/src/tests/scenarios.rs | 54 ++++--- massa-protocol-worker/src/tests/tools.rs | 21 ++- 5 files changed, 216 insertions(+), 118 deletions(-) diff --git a/massa-protocol-worker/src/tests/ask_block_scenarios.rs b/massa-protocol-worker/src/tests/ask_block_scenarios.rs index e49a38481c8..cad0469fe5d 100644 --- a/massa-protocol-worker/src/tests/ask_block_scenarios.rs +++ b/massa-protocol-worker/src/tests/ask_block_scenarios.rs @@ -55,14 +55,18 @@ async fn test_full_ask_block_workflow() { .await; // Send wishlist - protocol_command_sender - .send_wishlist_delta( - vec![(block.id, Some(block.content.header.clone()))] - .into_iter() - .collect(), - PreHashSet::::default(), - ) - .unwrap(); + let header = block.content.header.clone(); + let protocol_command_sender = tokio::task::spawn_blocking(move || { + protocol_command_sender + .send_wishlist_delta( + vec![(block.id, Some(header))].into_iter().collect(), + PreHashSet::::default(), + ) + .unwrap(); + protocol_command_sender + }) + .await + .unwrap(); // assert it was asked to node A, then B assert_hash_asked_to_node(block.id, node_a.id, &mut network_controller).await; @@ -192,14 +196,18 @@ async fn test_empty_block() { .await; // send wishlist - protocol_command_sender - .send_wishlist_delta( - vec![(hash_1, Some(block.content.header.clone()))] - .into_iter() - .collect(), - PreHashSet::::default(), - ) - .unwrap(); + let header = block.content.header.clone(); + let protocol_command_sender = tokio::task::spawn_blocking(move || { + protocol_command_sender + .send_wishlist_delta( + vec![(hash_1, Some(header))].into_iter().collect(), + PreHashSet::::default(), + ) + .unwrap(); + protocol_command_sender + }) + .await + .unwrap(); // assert it was asked to node A, then B assert_hash_asked_to_node(hash_1, node_a.id, &mut network_controller).await; @@ -332,14 +340,19 @@ async fn test_someone_knows_it() { .unwrap(); // send wishlist - protocol_command_sender - .send_wishlist_delta( - vec![(hash_1, Some(block.content.header.clone()))] - .into_iter() - .collect(), - PreHashSet::::default(), - ) - .unwrap(); + let protocol_command_sender = tokio::task::spawn_blocking(move || { + protocol_command_sender + .send_wishlist_delta( + vec![(hash_1, Some(block.content.header.clone()))] + .into_iter() + .collect(), + PreHashSet::::default(), + ) + .unwrap(); + protocol_command_sender + }) + .await + .unwrap(); assert_hash_asked_to_node(hash_1, node_c.id, &mut network_controller).await; @@ -417,22 +430,32 @@ async fn test_dont_want_it_anymore() { // end set up // send wishlist - protocol_command_sender - .send_wishlist_delta( - vec![(hash_1, Some(block.content.header.clone()))] - .into_iter() - .collect(), - PreHashSet::::default(), - ) - .unwrap(); + protocol_command_sender = tokio::task::spawn_blocking(move || { + protocol_command_sender + .send_wishlist_delta( + vec![(hash_1, Some(block.content.header.clone()))] + .into_iter() + .collect(), + PreHashSet::::default(), + ) + .unwrap(); + protocol_command_sender + }) + .await + .unwrap(); // assert it was asked to node A assert_hash_asked_to_node(hash_1, node_a.id, &mut network_controller).await; // we don't want it anymore - protocol_command_sender - .send_wishlist_delta(Default::default(), vec![hash_1].into_iter().collect()) - .unwrap(); + protocol_command_sender = tokio::task::spawn_blocking(move || { + protocol_command_sender + .send_wishlist_delta(Default::default(), vec![hash_1].into_iter().collect()) + .unwrap(); + protocol_command_sender + }) + .await + .unwrap(); // 7. Make sure protocol did not send additional ask for block commands. let ask_for_block_cmd_filter = |cmd| match cmd { @@ -492,14 +515,19 @@ async fn test_no_one_has_it() { // end set up // send wishlist - protocol_command_sender - .send_wishlist_delta( - vec![(hash_1, Some(block.content.header.clone()))] - .into_iter() - .collect(), - PreHashSet::::default(), - ) - .unwrap(); + let protocol_command_sender = tokio::task::spawn_blocking(move || { + protocol_command_sender + .send_wishlist_delta( + vec![(hash_1, Some(block.content.header.clone()))] + .into_iter() + .collect(), + PreHashSet::::default(), + ) + .unwrap(); + protocol_command_sender + }) + .await + .unwrap(); // assert it was asked to node A assert_hash_asked_to_node(hash_1, node_a.id, &mut network_controller).await; @@ -579,17 +607,22 @@ async fn test_multiple_blocks_without_a_priori() { tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; // send wishlist - protocol_command_sender - .send_wishlist_delta( - vec![ - (hash_1, Some(block_1.content.header.clone())), - (hash_2, Some(block_2.content.header.clone())), - ] - .into_iter() - .collect(), - PreHashSet::::default(), - ) - .unwrap(); + let protocol_command_sender = tokio::task::spawn_blocking(move || { + protocol_command_sender + .send_wishlist_delta( + vec![ + (hash_1, Some(block_1.content.header.clone())), + (hash_2, Some(block_2.content.header.clone())), + ] + .into_iter() + .collect(), + PreHashSet::::default(), + ) + .unwrap(); + protocol_command_sender + }) + .await + .unwrap(); let list = asked_list(&mut network_controller).await; for (node_id, set) in list.into_iter() { diff --git a/massa-protocol-worker/src/tests/ban_nodes_scenarios.rs b/massa-protocol-worker/src/tests/ban_nodes_scenarios.rs index 26a34102397..55f91790adb 100644 --- a/massa-protocol-worker/src/tests/ban_nodes_scenarios.rs +++ b/massa-protocol-worker/src/tests/ban_nodes_scenarios.rs @@ -173,14 +173,19 @@ async fn test_protocol_bans_node_sending_header_with_invalid_signature() { .unwrap(); // send wishlist - protocol_command_sender - .send_wishlist_delta( - vec![(block.id, Some(block.content.header))] - .into_iter() - .collect(), - PreHashSet::::default(), - ) - .unwrap(); + let protocol_command_sender = tokio::task::spawn_blocking(move || { + protocol_command_sender + .send_wishlist_delta( + vec![(block.id, Some(block.content.header))] + .into_iter() + .collect(), + PreHashSet::::default(), + ) + .unwrap(); + protocol_command_sender + }) + .await + .unwrap(); tools::assert_hash_asked_to_node(block.id, to_ban_node.id, &mut network_controller) .await; @@ -307,14 +312,19 @@ async fn test_protocol_does_not_asks_for_block_from_banned_node_who_propagated_h tools::assert_banned_nodes(vec![creator_node.id], &mut network_controller).await; // 5. Ask for block. - protocol_command_sender - .send_wishlist_delta( - vec![(expected_hash, Some(block.content.header.clone()))] - .into_iter() - .collect(), - PreHashSet::::default(), - ) - .expect("Failed to ask for block."); + let protocol_command_sender = tokio::task::spawn_blocking(move || { + protocol_command_sender + .send_wishlist_delta( + vec![(expected_hash, Some(block.content.header.clone()))] + .into_iter() + .collect(), + PreHashSet::::default(), + ) + .expect("Failed to ask for block."); + protocol_command_sender + }) + .await + .unwrap(); // 6. Make sure protocol did not ask for the block from the banned node. let got_more_commands = network_controller @@ -489,9 +499,14 @@ async fn test_protocol_bans_all_nodes_propagating_an_attack_attempt() { tokio::time::sleep(Duration::from_millis(250)).await; // Simulate consensus notifying an attack attempt. - protocol_command_sender - .notify_block_attack(expected_hash) - .expect("Failed to ask for block."); + let protocol_command_sender = tokio::task::spawn_blocking(move || { + protocol_command_sender + .notify_block_attack(expected_hash) + .expect("Failed to ask for block."); + protocol_command_sender + }) + .await + .unwrap(); // Make sure all initial nodes are banned. let node_ids = nodes.into_iter().map(|node_info| node_info.id).collect(); diff --git a/massa-protocol-worker/src/tests/operations_scenarios.rs b/massa-protocol-worker/src/tests/operations_scenarios.rs index 9c9257cf3eb..b0ee9cc8fa1 100644 --- a/massa-protocol-worker/src/tests/operations_scenarios.rs +++ b/massa-protocol-worker/src/tests/operations_scenarios.rs @@ -164,9 +164,14 @@ async fn test_protocol_propagates_operations_to_active_nodes() { let expected_operation_id = operation.id; storage.store_operations(vec![operation.clone()]); - protocol_command_sender - .propagate_operations(storage) - .unwrap(); + protocol_command_sender = tokio::task::spawn_blocking(move || { + protocol_command_sender + .propagate_operations(storage) + .unwrap(); + protocol_command_sender + }) + .await + .unwrap(); loop { match network_controller @@ -237,9 +242,14 @@ async fn test_protocol_propagates_operations_only_to_nodes_that_dont_know_about_ // send endorsement to protocol // it should be propagated only to the node that doesn't know about it storage.store_operations(vec![operation.clone()]); - protocol_command_sender - .propagate_operations(storage) - .unwrap(); + protocol_command_sender = tokio::task::spawn_blocking(move || { + protocol_command_sender + .propagate_operations(storage) + .unwrap(); + protocol_command_sender + }) + .await + .unwrap(); loop { match network_controller @@ -369,9 +379,14 @@ async fn test_protocol_batches_propagation_of_operations_received_over_the_netwo // Send it via the API. storage.store_operations(vec![operation.clone()]); - protocol_command_sender - .propagate_operations(storage) - .unwrap(); + protocol_command_sender = tokio::task::spawn_blocking(move || { + protocol_command_sender + .propagate_operations(storage) + .unwrap(); + protocol_command_sender + }) + .await + .unwrap(); let expected_operation_id_2 = operation.id; @@ -454,14 +469,19 @@ async fn test_protocol_propagates_operations_only_to_nodes_that_dont_know_about_ .unwrap(); // send wishlist - protocol_command_sender - .send_wishlist_delta( - vec![(block.id, Some(block.content.header.clone()))] - .into_iter() - .collect(), - PreHashSet::::default(), - ) - .unwrap(); + protocol_command_sender = tokio::task::spawn_blocking(move || { + protocol_command_sender + .send_wishlist_delta( + vec![(block.id, Some(block.content.header.clone()))] + .into_iter() + .collect(), + PreHashSet::::default(), + ) + .unwrap(); + protocol_command_sender + }) + .await + .unwrap(); assert_hash_asked_to_node(block.id, nodes[0].id, &mut network_controller).await; @@ -483,9 +503,14 @@ async fn test_protocol_propagates_operations_only_to_nodes_that_dont_know_about_ // it should not propagate to the node that already knows about it // because of the previously received header. storage.store_operations(vec![operation.clone()]); - protocol_command_sender - .propagate_operations(storage) - .unwrap(); + protocol_command_sender = tokio::task::spawn_blocking(move || { + protocol_command_sender + .propagate_operations(storage) + .unwrap(); + protocol_command_sender + }) + .await + .unwrap(); match network_controller .wait_command(1000.into(), |cmd| match cmd { diff --git a/massa-protocol-worker/src/tests/scenarios.rs b/massa-protocol-worker/src/tests/scenarios.rs index 24021e0a38d..143ca55aa42 100644 --- a/massa-protocol-worker/src/tests/scenarios.rs +++ b/massa-protocol-worker/src/tests/scenarios.rs @@ -69,14 +69,19 @@ async fn test_protocol_asks_for_block_from_node_who_propagated_header() { assert_eq!(expected_hash, received_hash); // 5. Ask for block. - protocol_command_sender - .send_wishlist_delta( - vec![(expected_hash, Some(block.content.header.clone()))] - .into_iter() - .collect(), - PreHashSet::::default(), - ) - .expect("Failed to ask for block."); + protocol_command_sender = tokio::task::spawn_blocking(move || { + protocol_command_sender + .send_wishlist_delta( + vec![(expected_hash, Some(block.content.header.clone()))] + .into_iter() + .collect(), + PreHashSet::::default(), + ) + .expect("Failed to ask for block."); + protocol_command_sender + }) + .await + .unwrap(); // 6. Check that protocol asks the node for the full block. match network_controller @@ -144,9 +149,14 @@ async fn test_protocol_sends_blocks_when_asked_for() { // Add to storage, integrate. storage.store_block(block.clone()); - protocol_command_sender - .integrated_block(expected_hash, storage.clone()) - .unwrap(); + protocol_command_sender = tokio::task::spawn_blocking(move || { + protocol_command_sender + .integrated_block(expected_hash, storage.clone()) + .unwrap(); + protocol_command_sender + }) + .await + .unwrap(); // 3. Simulate two nodes asking for a block. for node in nodes.iter().take(2) { @@ -262,9 +272,14 @@ async fn test_protocol_propagates_block_to_all_nodes_including_those_who_asked_f // 5. Propagate header. let _op_ids = ref_block.content.operations.clone(); - protocol_command_sender - .integrated_block(ref_hash, storage) - .expect("Failed to ask for block."); + protocol_command_sender = tokio::task::spawn_blocking(move || { + protocol_command_sender + .integrated_block(ref_hash, storage.clone()) + .unwrap(); + protocol_command_sender + }) + .await + .unwrap(); // 6. Check that protocol propagates the header to the right nodes. // node_a created the block and should receive nothing @@ -372,9 +387,14 @@ async fn test_protocol_propagates_block_to_node_who_asked_for_operations_and_onl storage.store_block(ref_block.clone()); // 5. Propagate header. let _op_ids = ref_block.content.operations.clone(); - protocol_command_sender - .integrated_block(ref_hash, storage) - .expect("Failed to ask for block."); + protocol_command_sender = tokio::task::spawn_blocking(move || { + protocol_command_sender + .integrated_block(ref_hash, storage.clone()) + .unwrap(); + protocol_command_sender + }) + .await + .unwrap(); // 6. Check that protocol propagates the header to the right nodes. // node_a created the block and should receive nothing diff --git a/massa-protocol-worker/src/tests/tools.rs b/massa-protocol-worker/src/tests/tools.rs index afe1bdf90d3..7917116c4a1 100644 --- a/massa-protocol-worker/src/tests/tools.rs +++ b/massa-protocol-worker/src/tests/tools.rs @@ -155,14 +155,19 @@ pub async fn send_and_propagate_block( .send_header(source_node_id, block.content.header.clone()) .await; - protocol_command_sender - .send_wishlist_delta( - vec![(block.id, Some(block.content.header.clone()))] - .into_iter() - .collect(), - PreHashSet::::default(), - ) - .unwrap(); + let mut protocol_sender = protocol_command_sender.clone(); + tokio::task::spawn_blocking(move || { + protocol_sender + .send_wishlist_delta( + vec![(block.id, Some(block.content.header.clone()))] + .into_iter() + .collect(), + PreHashSet::::default(), + ) + .unwrap(); + }) + .await + .unwrap(); // Send block info to protocol. let info = vec![(