diff --git a/Cargo.lock b/Cargo.lock index a8b2df56a45..3e42343a864 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -908,6 +908,26 @@ dependencies = [ "instant", ] +[[package]] +name = "fix-hidden-lifetime-bug" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4ae9c2016a663983d4e40a9ff967d6dcac59819672f0b47f2b17574e99c33c8" +dependencies = [ + "fix-hidden-lifetime-bug-proc_macros", +] + +[[package]] +name = "fix-hidden-lifetime-bug-proc_macros" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e4c81935e123ab0741c4c4f0d9b8377e5fb21d3de7e062fa4b1263b1fbcba1ea" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "fixedbitset" version = "0.4.2" @@ -1575,6 +1595,7 @@ name = "massa-node" version = "0.1.0" dependencies = [ "anyhow", + "crossbeam-channel", "dialoguer", "enum-map", "lazy_static", @@ -1648,7 +1669,6 @@ dependencies = [ "jsonrpc-http-server", "massa_consensus_exports", "massa_execution_exports", - "massa_graph", "massa_hash", "massa_models", "massa_network_exports", @@ -1698,13 +1718,13 @@ dependencies = [ "async-speed-limit", "bitvec", "displaydoc", + "fix-hidden-lifetime-bug", "futures 0.3.24", "lazy_static", "massa_async_pool", "massa_consensus_exports", "massa_executed_ops", "massa_final_state", - "massa_graph", "massa_hash", "massa_ledger_exports", "massa_ledger_worker", @@ -1749,46 +1769,40 @@ dependencies = [ name = "massa_consensus_exports" version = "0.1.0" dependencies = [ + "crossbeam-channel", "displaydoc", - "massa_cipher", "massa_execution_exports", - "massa_graph", + "massa_hash", "massa_models", "massa_pool_exports", "massa_pos_exports", "massa_protocol_exports", + "massa_serialization", "massa_signature", "massa_storage", "massa_time", + "nom 7.1.1", + "serde 1.0.145", "serde_json", - "tempfile", "thiserror", - "tokio", ] [[package]] name = "massa_consensus_worker" version = "0.1.0" dependencies = [ - "massa_cipher", + "displaydoc", "massa_consensus_exports", - "massa_execution_exports", - "massa_graph", "massa_hash", "massa_logging", "massa_models", - "massa_pool_exports", - "massa_pos_exports", - "massa_pos_worker", - "massa_protocol_exports", - "massa_serialization", "massa_signature", "massa_storage", "massa_time", + "num", "parking_lot", + "serde 1.0.145", "serde_json", - "serial_test 0.9.0", - "tokio", "tracing", ] @@ -1915,27 +1929,6 @@ dependencies = [ "tracing", ] -[[package]] -name = "massa_graph" -version = "0.1.0" -dependencies = [ - "displaydoc", - "massa_execution_exports", - "massa_hash", - "massa_logging", - "massa_models", - "massa_pos_exports", - "massa_serialization", - "massa_signature", - "massa_storage", - "nom 7.1.1", - "num", - "serde 1.0.145", - "serde_json", - "thiserror", - "tracing", -] - [[package]] name = "massa_hash" version = "0.1.0" @@ -2155,6 +2148,7 @@ version = "0.1.0" dependencies = [ "futures 0.3.24", "lazy_static", + "massa_consensus_exports", "massa_hash", "massa_logging", "massa_models", diff --git a/Cargo.toml b/Cargo.toml index a6a0a6344cf..4c01e663b8e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -12,7 +12,8 @@ members = [ "massa-execution-worker", "massa-factory-exports", "massa-factory-worker", - "massa-graph", + "massa-consensus-exports", + "massa-consensus-worker", "massa-hash", "massa-logging", "massa-models", diff --git a/massa-api/Cargo.toml b/massa-api/Cargo.toml index faca8e0bef0..51c1035c758 100644 --- a/massa-api/Cargo.toml +++ b/massa-api/Cargo.toml @@ -18,7 +18,6 @@ itertools = "0.10" parking_lot = { version = "0.12", features = ["deadlock_detection"] } # custom modules massa_consensus_exports = { path = "../massa-consensus-exports" } -massa_graph = { path = "../massa-graph" } massa_hash = { path = "../massa-hash" } massa_models = { path = "../massa-models" } massa_network_exports = { path = "../massa-network-exports" } diff --git a/massa-api/src/config.rs b/massa-api/src/config.rs index 796dc3bfe43..3762ecd7ba0 100644 --- a/massa-api/src/config.rs +++ b/massa-api/src/config.rs @@ -1,6 +1,7 @@ // Copyright (c) 2022 MASSA LABS use jsonrpc_core::serde::Deserialize; +use massa_time::MassaTime; use std::net::SocketAddr; use std::path::PathBuf; @@ -30,4 +31,12 @@ pub struct APIConfig { pub max_function_name_length: u16, /// max parameter size pub max_parameter_size: u32, + /// thread count + pub thread_count: u8, + /// `genesis_timestamp` + pub genesis_timestamp: MassaTime, + /// t0 + pub t0: MassaTime, + /// periods per cycle + pub periods_per_cycle: u64, } diff --git a/massa-api/src/error.rs b/massa-api/src/error.rs index df948f67404..205dfeca3c9 100644 --- a/massa-api/src/error.rs +++ b/massa-api/src/error.rs @@ -20,9 +20,9 @@ pub enum ApiError { ReceiveChannelError(String), /// `massa_hash` error: {0} MassaHashError(#[from] MassaHashError), - /// Consensus error: {0} - ConsensusError(#[from] Box), - /// Execution error: {0} + /// consensus error: {0} + ConsensusError(#[from] ConsensusError), + /// execution error: {0} ExecutionError(#[from] ExecutionError), /// Network error: {0} NetworkError(#[from] NetworkError), @@ -79,9 +79,3 @@ impl From for jsonrpc_core::Error { } } } - -impl std::convert::From for ApiError { - fn from(err: ConsensusError) -> Self { - ApiError::ConsensusError(Box::new(err)) - } -} diff --git a/massa-api/src/lib.rs b/massa-api/src/lib.rs index 8179b133ff1..54eb7c3c50e 100644 --- a/massa-api/src/lib.rs +++ b/massa-api/src/lib.rs @@ -8,7 +8,7 @@ use error::ApiError; use jsonrpc_core::{serde_json, BoxFuture, IoHandler, Value}; use jsonrpc_derive::rpc; use jsonrpc_http_server::{CloseHandle, ServerBuilder}; -use massa_consensus_exports::{ConsensusCommandSender, ConsensusConfig}; +use massa_consensus_exports::ConsensusController; use massa_execution_exports::ExecutionController; use massa_models::api::{ AddressInfo, BlockInfo, BlockSummary, DatastoreEntryInput, DatastoreEntryOutput, @@ -52,7 +52,7 @@ pub use config::APIConfig; /// Public API component pub struct Public { /// link to the consensus component - pub consensus_command_sender: ConsensusCommandSender, + pub consensus_controller: Box, /// link to the execution component pub execution_controller: Box, /// link to the selector component @@ -63,8 +63,6 @@ pub struct Public { pub protocol_command_sender: ProtocolCommandSender, /// Massa storage pub storage: Storage, - /// consensus configuration (TODO: remove it, can be retrieved via an endpoint) - pub consensus_config: ConsensusConfig, /// API settings pub api_settings: APIConfig, /// network setting @@ -81,14 +79,10 @@ pub struct Public { /// Private API content pub struct Private { - /// link to the consensus component - pub consensus_command_sender: ConsensusCommandSender, /// link to the network component pub network_command_sender: NetworkCommandSender, /// link to the execution component pub execution_controller: Box, - /// consensus configuration (TODO: remove it, can be retrieved via an endpoint) - pub consensus_config: ConsensusConfig, /// API settings pub api_settings: APIConfig, /// stop channel diff --git a/massa-api/src/private.rs b/massa-api/src/private.rs index 07fab1453a0..64a974e28e1 100644 --- a/massa-api/src/private.rs +++ b/massa-api/src/private.rs @@ -7,7 +7,6 @@ use crate::{Endpoints, Private, RpcServer, StopHandle, Value, API}; use jsonrpc_core::BoxFuture; use jsonrpc_http_server::tokio::sync::mpsc; -use massa_consensus_exports::{ConsensusCommandSender, ConsensusConfig}; use massa_execution_exports::ExecutionController; use massa_models::api::{ AddressInfo, BlockInfo, BlockSummary, DatastoreEntryInput, DatastoreEntryOutput, @@ -39,20 +38,16 @@ use std::sync::Arc; impl API { /// generate a new private API pub fn new( - consensus_command_sender: ConsensusCommandSender, network_command_sender: NetworkCommandSender, execution_controller: Box, api_settings: APIConfig, - consensus_settings: ConsensusConfig, node_wallet: Arc>, ) -> (Self, mpsc::Receiver<()>) { let (stop_node_channel, rx) = mpsc::channel(1); ( API(Private { - consensus_command_sender, network_command_sender, execution_controller, - consensus_config: consensus_settings, api_settings, stop_node_channel, node_wallet, diff --git a/massa-api/src/public.rs b/massa-api/src/public.rs index 11bad9f707d..a9d9cfc07b7 100644 --- a/massa-api/src/public.rs +++ b/massa-api/src/public.rs @@ -5,11 +5,11 @@ use crate::config::APIConfig; use crate::error::ApiError; use crate::{serde_json, Endpoints, Public, RpcServer, StopHandle, Value, API}; use jsonrpc_core::BoxFuture; -use massa_consensus_exports::{ConsensusCommandSender, ConsensusConfig}; +use massa_consensus_exports::block_status::DiscardReason; +use massa_consensus_exports::ConsensusController; use massa_execution_exports::{ ExecutionController, ExecutionStackElement, ReadOnlyExecutionRequest, ReadOnlyExecutionTarget, }; -use massa_graph::DiscardReason; use massa_models::api::{ BlockGraphStatus, DatastoreEntryInput, DatastoreEntryOutput, OperationInput, ReadOnlyBytecodeExecution, ReadOnlyCall, SlotAmount, @@ -57,11 +57,10 @@ use std::net::{IpAddr, SocketAddr}; impl API { /// generate a new public API pub fn new( - consensus_command_sender: ConsensusCommandSender, + consensus_controller: Box, execution_controller: Box, api_settings: APIConfig, selector_controller: Box, - consensus_settings: ConsensusConfig, pool_command_sender: Box, protocol_command_sender: ProtocolCommandSender, network_settings: NetworkConfig, @@ -72,8 +71,7 @@ impl API { storage: Storage, ) -> Self { API(Public { - consensus_command_sender, - consensus_config: consensus_settings, + consensus_controller, api_settings, pool_command_sender, network_settings, @@ -299,28 +297,28 @@ impl Endpoints for API { fn get_status(&self) -> BoxFuture> { let execution_controller = self.0.execution_controller.clone(); - let consensus_command_sender = self.0.consensus_command_sender.clone(); + let consensus_controller = self.0.consensus_controller.clone(); let network_command_sender = self.0.network_command_sender.clone(); let network_config = self.0.network_settings.clone(); let version = self.0.version; - let consensus_settings = self.0.consensus_config.clone(); let compensation_millis = self.0.compensation_millis; let pool_command_sender = self.0.pool_command_sender.clone(); let node_id = self.0.node_id; let config = CompactConfig::default(); + let api_config = self.0.api_settings.clone(); let closure = async move || { let now = MassaTime::now(compensation_millis)?; let last_slot = get_latest_block_slot_at_timestamp( - consensus_settings.thread_count, - consensus_settings.t0, - consensus_settings.genesis_timestamp, + api_config.thread_count, + api_config.t0, + api_config.genesis_timestamp, now, )?; let execution_stats = execution_controller.get_stats(); + let consensus_stats = consensus_controller.get_stats()?; - let (consensus_stats, network_stats, peers) = tokio::join!( - consensus_command_sender.get_stats(), + let (network_stats, peers) = tokio::join!( network_command_sender.get_network_stats(), network_command_sender.get_peers() ); @@ -347,40 +345,40 @@ impl Endpoints for API { last_slot, next_slot: last_slot .unwrap_or_else(|| Slot::new(0, 0)) - .get_next_slot(consensus_settings.thread_count)?, + .get_next_slot(api_config.thread_count)?, execution_stats, - consensus_stats: consensus_stats?, + consensus_stats, network_stats: network_stats?, pool_stats, config, current_cycle: last_slot .unwrap_or_else(|| Slot::new(0, 0)) - .get_cycle(consensus_settings.periods_per_cycle), + .get_cycle(api_config.periods_per_cycle), }) }; Box::pin(closure()) } fn get_cliques(&self) -> BoxFuture, ApiError>> { - let consensus_command_sender = self.0.consensus_command_sender.clone(); - let closure = async move || Ok(consensus_command_sender.get_cliques().await?); + let consensus_controller = self.0.consensus_controller.clone(); + let closure = async move || Ok(consensus_controller.get_cliques()); Box::pin(closure()) } fn get_stakers(&self) -> BoxFuture, ApiError>> { let execution_controller = self.0.execution_controller.clone(); - let cfg = self.0.consensus_config.clone(); + let api_config = self.0.api_settings.clone(); let compensation_millis = self.0.compensation_millis; let closure = async move || { let curr_cycle = get_latest_block_slot_at_timestamp( - cfg.thread_count, - cfg.t0, - cfg.genesis_timestamp, + api_config.thread_count, + api_config.t0, + api_config.genesis_timestamp, MassaTime::now(compensation_millis)?, )? .unwrap_or_else(|| Slot::new(0, 0)) - .get_cycle(cfg.periods_per_cycle); + .get_cycle(api_config.periods_per_cycle); let mut staker_vec = execution_controller .get_cycle_active_rolls(curr_cycle) .into_iter() @@ -423,7 +421,7 @@ impl Endpoints for API { let in_pool = self.0.pool_command_sender.contains_operations(&ops); let api_cfg = self.0.api_settings.clone(); - let consensus_command_sender = self.0.consensus_command_sender.clone(); + let consensus_controller = self.0.consensus_controller.clone(); let closure = async move || { if ops.len() as u64 > api_cfg.max_arguments { return Err(ApiError::BadRequest("too many arguments".into())); @@ -437,9 +435,8 @@ impl Endpoints for API { .unique() .cloned() .collect(); - let involved_block_statuses = consensus_command_sender - .get_block_statuses(&involved_blocks) - .await?; + let involved_block_statuses = + consensus_controller.get_block_statuses(&involved_blocks); let block_statuses: PreHashMap = involved_blocks .into_iter() .zip(involved_block_statuses.into_iter()) @@ -506,7 +503,7 @@ impl Endpoints for API { // ask pool whether it carries the operations let in_pool = self.0.pool_command_sender.contains_endorsements(&eds); - let consensus_command_sender = self.0.consensus_command_sender.clone(); + let consensus_controller = self.0.consensus_controller.clone(); let api_cfg = self.0.api_settings.clone(); let closure = async move || { if eds.len() as u64 > api_cfg.max_arguments { @@ -521,9 +518,8 @@ impl Endpoints for API { .unique() .cloned() .collect(); - let involved_block_statuses = consensus_command_sender - .get_block_statuses(&involved_blocks) - .await?; + let involved_block_statuses = + consensus_controller.get_block_statuses(&involved_blocks); let block_statuses: PreHashMap = involved_blocks .into_iter() .zip(involved_block_statuses.into_iter()) @@ -564,7 +560,7 @@ impl Endpoints for API { /// gets a block. Returns None if not found /// only active blocks are returned fn get_block(&self, id: BlockId) -> BoxFuture> { - let consensus_command_sender = self.0.consensus_command_sender.clone(); + let consensus_controller = self.0.consensus_controller.clone(); let storage = self.0.storage.clone_without_refs(); let closure = async move || { let block = match storage.read_blocks().get(&id).cloned() { @@ -574,9 +570,8 @@ impl Endpoints for API { } }; - let graph_status = consensus_command_sender + let graph_status = consensus_controller .get_block_statuses(&[id]) - .await? .into_iter() .next() .expect("expected get_block_statuses to return one element"); @@ -605,13 +600,10 @@ impl Endpoints for API { &self, slot: Slot, ) -> BoxFuture, ApiError>> { - let consensus_command_sender = self.0.consensus_command_sender.clone(); + let consensus_controller = self.0.consensus_controller.clone(); let storage = self.0.storage.clone_without_refs(); let closure = async move || { - let block_id_result = consensus_command_sender - .get_blockclique_block_at_slot(slot) - .await; - let block_id = match block_id_result? { + let block_id = match consensus_controller.get_blockclique_block_at_slot(slot) { Some(id) => id, None => return Ok(None), }; @@ -630,20 +622,18 @@ impl Endpoints for API { &self, time: TimeInterval, ) -> BoxFuture, ApiError>> { - let consensus_command_sender = self.0.consensus_command_sender.clone(); - let consensus_settings = self.0.consensus_config.clone(); + let consensus_controller = self.0.consensus_controller.clone(); + let api_config = self.0.api_settings.clone(); let closure = async move || { // filter blocks from graph_export let (start_slot, end_slot) = time_range_to_slot_range( - consensus_settings.thread_count, - consensus_settings.t0, - consensus_settings.genesis_timestamp, + api_config.thread_count, + api_config.t0, + api_config.genesis_timestamp, time.start, time.end, )?; - let graph = consensus_command_sender - .get_block_graph_status(start_slot, end_slot) - .await?; + let graph = consensus_controller.get_block_graph_status(start_slot, end_slot)?; let mut res = Vec::with_capacity(graph.active_blocks.len()); let blockclique = graph .max_cliques @@ -751,9 +741,9 @@ impl Endpoints for API { // get future draws from selector let selection_draws = { let cur_slot = timeslots::get_current_latest_block_slot( - self.0.consensus_config.thread_count, - self.0.consensus_config.t0, - self.0.consensus_config.genesis_timestamp, + self.0.api_settings.thread_count, + self.0.api_settings.t0, + self.0.api_settings.genesis_timestamp, self.0.compensation_millis, ) .expect("could not get latest current slot") @@ -797,7 +787,7 @@ impl Endpoints for API { res.push(AddressInfo { // general address info address, - thread: address.get_thread(self.0.consensus_config.thread_count), + thread: address.get_thread(self.0.api_settings.thread_count), // final execution info final_balance: execution_infos.final_balance, @@ -891,7 +881,7 @@ impl Endpoints for API { to_send.store_operations(verified_ops.clone()); let ids: Vec = verified_ops.iter().map(|op| op.id).collect(); cmd_sender.add_operations(to_send.clone()); - protocol_sender.propagate_operations(to_send).await?; + protocol_sender.propagate_operations(to_send)?; Ok(ids) }; Box::pin(closure()) diff --git a/massa-bootstrap/Cargo.toml b/massa-bootstrap/Cargo.toml index 2cdcea04c2d..90c4db6eeb2 100644 --- a/massa-bootstrap/Cargo.toml +++ b/massa-bootstrap/Cargo.toml @@ -11,6 +11,7 @@ async-speed-limit = { git = "https://github.com/adrien-zinger/async-speed-limit" "default", "tokio", ] } +fix-hidden-lifetime-bug = "0.2.5" displaydoc = "0.2" futures = "0.3" num_enum = "0.5" @@ -28,7 +29,6 @@ massa_async_pool = { path = "../massa-async-pool" } massa_consensus_exports = { path = "../massa-consensus-exports" } massa_executed_ops = { path = "../massa-executed-ops" } massa_final_state = { path = "../massa-final-state" } -massa_graph = { path = "../massa-graph" } massa_hash = { path = "../massa-hash" } massa_ledger_exports = { path = "../massa-ledger-exports" } massa_logging = { path = "../massa-logging" } @@ -58,11 +58,11 @@ tempfile = "3.3" testing = [ "massa_final_state/testing", "massa_ledger_worker/testing", + "massa_consensus_exports/testing", "massa_async_pool/testing", ] sandbox = [ "massa_async_pool/sandbox", - "massa_consensus_exports/sandbox", "massa_final_state/sandbox", "massa_models/sandbox", ] diff --git a/massa-bootstrap/src/error.rs b/massa-bootstrap/src/error.rs index d072dccb0f6..5783ccce87a 100644 --- a/massa-bootstrap/src/error.rs +++ b/massa-bootstrap/src/error.rs @@ -30,12 +30,12 @@ pub enum BootstrapError { UnexpectedConnectionDrop, /// `massa_hash` error: {0} MassaHashError(#[from] MassaHashError), + /// `massa_consensus` error: {0} + MassaConsensusError(#[from] ConsensusError), /// `massa_signature` error {0} MassaSignatureError(#[from] massa_signature::MassaSignatureError), /// time error: {0} TimeError(#[from] TimeError), - /// consensus error: {0} - ConsensusError(#[from] ConsensusError), /// network error: {0} NetworkError(#[from] NetworkError), /// final state error: {0} diff --git a/massa-bootstrap/src/lib.rs b/massa-bootstrap/src/lib.rs index 248d23fadbe..9fe4a2ac427 100644 --- a/massa-bootstrap/src/lib.rs +++ b/massa-bootstrap/src/lib.rs @@ -13,9 +13,12 @@ #![feature(ip)] #![feature(let_chains)] +#[macro_use] +extern crate fix_hidden_lifetime_bug; + pub use establisher::types::Establisher; +use massa_consensus_exports::bootstrapable_graph::BootstrapableGraph; use massa_final_state::FinalState; -use massa_graph::BootstrapableGraph; use massa_network_exports::BootstrapPeers; use parking_lot::RwLock; use std::sync::Arc; diff --git a/massa-bootstrap/src/messages.rs b/massa-bootstrap/src/messages.rs index a169e9d0571..40139054753 100644 --- a/massa-bootstrap/src/messages.rs +++ b/massa-bootstrap/src/messages.rs @@ -4,11 +4,11 @@ use massa_async_pool::{ AsyncMessage, AsyncMessageId, AsyncMessageIdDeserializer, AsyncMessageIdSerializer, AsyncPoolDeserializer, AsyncPoolSerializer, }; -use massa_executed_ops::{ExecutedOpsDeserializer, ExecutedOpsSerializer}; -use massa_final_state::{StateChanges, StateChangesDeserializer, StateChangesSerializer}; -use massa_graph::{ +use massa_consensus_exports::bootstrapable_graph::{ BootstrapableGraph, BootstrapableGraphDeserializer, BootstrapableGraphSerializer, }; +use massa_executed_ops::{ExecutedOpsDeserializer, ExecutedOpsSerializer}; +use massa_final_state::{StateChanges, StateChangesDeserializer, StateChangesSerializer}; use massa_ledger_exports::{KeyDeserializer, KeySerializer}; use massa_models::operation::OperationId; use massa_models::prehash::PreHashSet; diff --git a/massa-bootstrap/src/server.rs b/massa-bootstrap/src/server.rs index 18f2b86f0f2..b67574d4a05 100644 --- a/massa-bootstrap/src/server.rs +++ b/massa-bootstrap/src/server.rs @@ -1,7 +1,7 @@ use futures::stream::FuturesUnordered; use futures::StreamExt; use massa_async_pool::AsyncMessageId; -use massa_consensus_exports::ConsensusCommandSender; +use massa_consensus_exports::ConsensusController; use massa_final_state::FinalState; use massa_logging::massa_trace; use massa_models::{slot::Slot, streaming_step::StreamingStep, version::Version}; @@ -50,7 +50,7 @@ impl BootstrapManager { /// start a bootstrap server. /// Once your node will be ready, you may want other to bootstrap from you. pub async fn start_bootstrap_server( - consensus_command_sender: ConsensusCommandSender, + consensus_controller: Box, network_command_sender: NetworkCommandSender, final_state: Arc>, bootstrap_config: BootstrapConfig, @@ -101,7 +101,7 @@ pub async fn start_bootstrap_server( let join_handle = tokio::spawn(async move { BootstrapServer { - consensus_command_sender, + consensus_controller, network_command_sender, final_state, establisher, @@ -128,7 +128,7 @@ pub async fn start_bootstrap_server( } struct BootstrapServer { - consensus_command_sender: ConsensusCommandSender, + consensus_controller: Box, network_command_sender: NetworkCommandSender, final_state: Arc>, establisher: Establisher, @@ -253,7 +253,7 @@ impl BootstrapServer { let compensation_millis = self.compensation_millis; let version = self.version; let data_execution = self.final_state.clone(); - let consensus_command_sender = self.consensus_command_sender.clone(); + let consensus_command_sender = self.consensus_controller.clone(); let network_command_sender = self.network_command_sender.clone(); let keypair = self.keypair.clone(); let config = self.bootstrap_config.clone(); @@ -429,14 +429,16 @@ pub async fn send_final_state_stream( Ok(()) } +#[allow(clippy::manual_async_fn)] #[allow(clippy::too_many_arguments)] +#[fix_hidden_lifetime_bug] async fn manage_bootstrap( bootstrap_config: &BootstrapConfig, server: &mut BootstrapServerBinder, final_state: Arc>, compensation_millis: i64, version: Version, - consensus_command_sender: ConsensusCommandSender, + consensus_controller: Box, network_command_sender: NetworkCommandSender, ) -> Result<(), BootstrapError> { massa_trace!("bootstrap.lib.manage_bootstrap", {}); @@ -539,7 +541,7 @@ async fn manage_bootstrap( match tokio::time::timeout( write_timeout, server.send(BootstrapServerMessage::ConsensusState { - graph: consensus_command_sender.get_bootstrap_state().await?, + graph: consensus_controller.get_bootstrap_graph()?, }), ) .await diff --git a/massa-bootstrap/src/tests/scenarios.rs b/massa-bootstrap/src/tests/scenarios.rs index 6fa8c695dc0..48be7c097aa 100644 --- a/massa-bootstrap/src/tests/scenarios.rs +++ b/massa-bootstrap/src/tests/scenarios.rs @@ -4,7 +4,7 @@ use super::{ mock_establisher, tools::{ bridge_mock_streams, get_boot_state, get_peers, get_random_final_state_bootstrap, - get_random_ledger_changes, wait_consensus_command, wait_network_command, + get_random_ledger_changes, wait_network_command, }, }; use crate::tests::tools::{ @@ -16,20 +16,18 @@ use crate::{ tests::tools::{assert_eq_bootstrap_graph, get_bootstrap_config}, }; use massa_async_pool::AsyncPoolConfig; -use massa_consensus_exports::{commands::ConsensusCommand, ConsensusCommandSender}; +use massa_consensus_exports::test_exports::{ + MockConsensusController, MockConsensusControllerMessage, +}; use massa_executed_ops::ExecutedOpsConfig; use massa_final_state::{ test_exports::assert_eq_final_state, FinalState, FinalStateConfig, StateChanges, }; use massa_ledger_exports::LedgerConfig; -use massa_models::{ - address::Address, - config::{ - MAX_ASYNC_MESSAGE_DATA, MAX_ASYNC_POOL_LENGTH, MAX_DATASTORE_KEY_LENGTH, POS_SAVED_CYCLES, - }, - slot::Slot, - version::Version, +use massa_models::config::{ + MAX_ASYNC_MESSAGE_DATA, MAX_ASYNC_POOL_LENGTH, MAX_DATASTORE_KEY_LENGTH, POS_SAVED_CYCLES, }; +use massa_models::{address::Address, slot::Slot, version::Version}; use massa_network_exports::{NetworkCommand, NetworkCommandSender}; use massa_pos_exports::{ test_exports::assert_eq_pos_selection, PoSConfig, PoSFinalState, SelectorConfig, @@ -59,8 +57,8 @@ async fn test_bootstrap_server() { let rolls_path = PathBuf::from_str("../massa-node/base_config/initial_rolls.json").unwrap(); let genesis_address = Address::from_public_key(&KeyPair::generate().get_public_key()); - // init the communication channels - let (consensus_cmd_tx, mut consensus_cmd_rx) = mpsc::channel::(5); + let (consensus_controller, mut consensus_event_receiver) = + MockConsensusController::new_with_receiver(); let (network_cmd_tx, mut network_cmd_rx) = mpsc::channel::(5); // setup final state local config @@ -139,7 +137,7 @@ async fn test_bootstrap_server() { // start bootstrap server let (bootstrap_establisher, bootstrap_interface) = mock_establisher::new(); let bootstrap_manager = start_bootstrap_server( - ConsensusCommandSender(consensus_cmd_tx), + consensus_controller, NetworkCommandSender(network_cmd_tx), final_state_server.clone(), bootstrap_config.clone(), @@ -216,23 +214,6 @@ async fn test_bootstrap_server() { sent_peers }; - // wait for bootstrap to ask consensus for bootstrap graph, send it - let wait_graph = async move || { - let response = - match wait_consensus_command(&mut consensus_cmd_rx, 1000.into(), |cmd| match cmd { - ConsensusCommand::GetBootstrapState(resp) => Some(resp), - _ => None, - }) - .await - { - Some(resp) => resp, - None => panic!("timeout waiting for get boot graph consensus command"), - }; - let sent_graph = get_boot_state(); - response.send(Box::new(sent_graph.clone())).await.unwrap(); - sent_graph - }; - // launch the modifier thread let list_changes: Arc>> = Arc::new(RwLock::new(Vec::new())); let list_changes_clone = list_changes.clone(); @@ -256,9 +237,26 @@ async fn test_bootstrap_server() { } }); - // wait for peers and graph let sent_peers = wait_peers().await; - let sent_graph = wait_graph().await; + + // wait for peers and graph + let sent_graph = tokio::task::spawn_blocking(move || { + let response = + consensus_event_receiver.wait_command(MassaTime::from_millis(10000), |cmd| match cmd { + MockConsensusControllerMessage::GetBootstrapableGraph { response_tx } => { + let sent_graph = get_boot_state(); + response_tx.send(Ok(sent_graph.clone())).unwrap(); + Some(sent_graph) + } + _ => panic!("bad command for get boot graph consensus command"), + }); + match response { + Some(graph) => graph, + None => panic!("error waiting for get boot graph consensus command"), + } + }) + .await + .unwrap(); // wait for get_state let bootstrap_res = get_state_h diff --git a/massa-bootstrap/src/tests/tools.rs b/massa-bootstrap/src/tests/tools.rs index 00e505acae0..8e485eadd53 100644 --- a/massa-bootstrap/src/tests/tools.rs +++ b/massa-bootstrap/src/tests/tools.rs @@ -5,13 +5,15 @@ use crate::settings::BootstrapConfig; use bitvec::vec::BitVec; use massa_async_pool::test_exports::{create_async_pool, get_random_message}; use massa_async_pool::{AsyncPoolChanges, Change}; -use massa_consensus_exports::commands::ConsensusCommand; +use massa_consensus_exports::{ + bootstrapable_graph::{ + BootstrapableGraph, BootstrapableGraphDeserializer, BootstrapableGraphSerializer, + }, + export_active_block::{ExportActiveBlock, ExportActiveBlockSerializer}, +}; use massa_executed_ops::{ExecutedOps, ExecutedOpsConfig}; use massa_final_state::test_exports::create_final_state; use massa_final_state::{FinalState, FinalStateConfig}; -use massa_graph::export_active_block::ExportActiveBlockSerializer; -use massa_graph::{export_active_block::ExportActiveBlock, BootstrapableGraph}; -use massa_graph::{BootstrapableGraphDeserializer, BootstrapableGraphSerializer}; use massa_hash::Hash; use massa_ledger_exports::{LedgerChanges, LedgerEntry, SetUpdateOrDelete}; use massa_ledger_worker::test_exports::create_final_ledger; @@ -327,27 +329,6 @@ pub fn get_bootstrap_config(bootstrap_public_key: PublicKey) -> BootstrapConfig } } -pub async fn wait_consensus_command( - consensus_command_receiver: &mut Receiver, - timeout: MassaTime, - filter_map: F, -) -> Option -where - F: Fn(ConsensusCommand) -> Option, -{ - let timer = sleep(timeout.into()); - tokio::pin!(timer); - loop { - tokio::select! { - cmd = consensus_command_receiver.recv() => match cmd { - Some(orig_evt) => if let Some(res_evt) = filter_map(orig_evt) { return Some(res_evt); }, - _ => panic!("network event channel died") - }, - _ = &mut timer => return None - } - } -} - pub async fn wait_network_command( network_command_receiver: &mut Receiver, timeout: MassaTime, diff --git a/massa-client/base_config/config.toml b/massa-client/base_config/config.toml index 089474105d7..f1055375ceb 100644 --- a/massa-client/base_config/config.toml +++ b/massa-client/base_config/config.toml @@ -3,7 +3,6 @@ history_file_path = "config/.massa_history" timeout = 1000 [default_node] -#ip = "145.239.66.206" ip = "127.0.0.1" private_port = 33034 public_port = 33035 \ No newline at end of file diff --git a/massa-consensus-exports/Cargo.toml b/massa-consensus-exports/Cargo.toml index abb6f5e3d83..7286e9bd634 100644 --- a/massa-consensus-exports/Cargo.toml +++ b/massa-consensus-exports/Cargo.toml @@ -7,34 +7,23 @@ edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] +crossbeam-channel = "0.5.6" displaydoc = "0.2" +nom = "7.1" +serde = { version = "1.0", features = ["derive"] } +serde_json = "1.0" thiserror = "1.0" -tokio = { version = "1.21", features = ["full"] } -tempfile = { version = "3.3", optional = true } # use with testing feature -serde_json = { version = "1.0", optional = true } # use with testing feature -# custom modules -massa_cipher = { path = "../massa-cipher" } +#custom modules +massa_hash = { path = "../massa-hash"} massa_execution_exports = { path = "../massa-execution-exports" } -massa_graph = { path = "../massa-graph" } massa_models = { path = "../massa-models" } massa_pool_exports = { path = "../massa-pool-exports" } massa_pos_exports = { path = "../massa-pos-exports" } -massa_protocol_exports = { path = "../massa-protocol-exports" } -massa_signature = { path = "../massa-signature" } -massa_time = { path = "../massa-time" } +massa_protocol_exports ={ path = "../massa-protocol-exports" } massa_storage = { path = "../massa-storage" } +massa_serialization = { path = "../massa-serialization" } +massa_time = { path = "../massa-time" } +massa_signature = { path = "../massa-signature" } -[dev-dependencies] -massa_models = { path = "../massa-models", features = ["testing"] } - -# for more information on what are the following features used for, see the cargo.toml at workspace level [features] -sandbox = [ "massa_protocol_exports/sandbox" ] -testing = [ - "massa_models/testing", - "massa_execution_exports/testing", - "massa_pool_exports/testing", - "massa_protocol_exports/testing", - "tempfile", - "serde_json" -] +testing = ["massa_models/testing", "massa_execution_exports/testing", "massa_pool_exports/testing", "massa_pos_exports/testing", "massa_protocol_exports/testing", "massa_storage/testing"] \ No newline at end of file diff --git a/massa-consensus-exports/src/block_graph_export.rs b/massa-consensus-exports/src/block_graph_export.rs new file mode 100644 index 00000000000..bd8f5d27069 --- /dev/null +++ b/massa-consensus-exports/src/block_graph_export.rs @@ -0,0 +1,29 @@ +use massa_models::{ + address::Address, + block::BlockId, + clique::Clique, + prehash::{PreHashMap, PreHashSet}, + slot::Slot, +}; + +use crate::block_status::{DiscardReason, ExportCompiledBlock}; + +/// Bootstrap compatible version of the block graph +#[derive(Debug, Clone)] +#[allow(clippy::type_complexity)] +pub struct BlockGraphExport { + /// Genesis blocks. + pub genesis_blocks: Vec, + /// Map of active blocks, were blocks are in their exported version. + pub active_blocks: PreHashMap, + /// Finite cache of discarded blocks, in exported version `(slot, creator_address, parents)`. + pub discarded_blocks: PreHashMap))>, + /// Best parents hashes in each thread. + pub best_parents: Vec<(BlockId, u64)>, + /// Latest final period and block hash in each thread. + pub latest_final_blocks_periods: Vec<(BlockId, u64)>, + /// Head of the incompatibility graph. + pub gi_head: PreHashMap>, + /// List of maximal cliques of compatible blocks. + pub max_cliques: Vec, +} diff --git a/massa-consensus-exports/src/block_status.rs b/massa-consensus-exports/src/block_status.rs new file mode 100644 index 00000000000..2138a969489 --- /dev/null +++ b/massa-consensus-exports/src/block_status.rs @@ -0,0 +1,120 @@ +use massa_models::{ + active_block::ActiveBlock, + address::Address, + block::{Block, BlockId, WrappedHeader}, + prehash::PreHashSet, + slot::Slot, +}; +use massa_storage::Storage; +use serde::{Deserialize, Serialize}; + +#[derive(Debug, Clone)] +#[allow(clippy::large_enum_variant)] +pub enum HeaderOrBlock { + Header(WrappedHeader), + Block { + id: BlockId, + slot: Slot, + storage: Storage, + }, +} + +impl HeaderOrBlock { + /// Gets slot for that header or block + pub fn get_slot(&self) -> Slot { + match self { + HeaderOrBlock::Header(header) => header.content.slot, + HeaderOrBlock::Block { slot, .. } => *slot, + } + } +} + +/// Something can be discarded +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub enum DiscardReason { + /// Block is invalid, either structurally, or because of some incompatibility. The String contains the reason for info or debugging. + Invalid(String), + /// Block is incompatible with a final block. + Stale, + /// Block has enough fitness. + Final, +} + +/// Enum used in `BlockGraph`'s state machine +#[derive(Debug, Clone)] +pub enum BlockStatus { + /// The block/header has reached consensus but no consensus-level check has been performed. + /// It will be processed during the next iteration + Incoming(HeaderOrBlock), + /// The block's or header's slot is too much in the future. + /// It will be processed at the block/header slot + WaitingForSlot(HeaderOrBlock), + /// The block references an unknown Block id + WaitingForDependencies { + /// Given header/block + header_or_block: HeaderOrBlock, + /// includes self if it's only a header + unsatisfied_dependencies: PreHashSet, + /// Used to limit and sort the number of blocks/headers waiting for dependencies + sequence_number: u64, + }, + /// The block was checked and included in the blockgraph + Active { + a_block: Box, + storage: Storage, + }, + /// The block was discarded and is kept to avoid reprocessing it + Discarded { + /// Just the slot of that block + slot: Slot, + /// Address of the creator of the block + creator: Address, + /// Ids of parents blocks + parents: Vec, + /// why it was discarded + reason: DiscardReason, + /// Used to limit and sort the number of blocks/headers waiting for dependencies + sequence_number: u64, + }, +} + +/// Block status in the graph that can be exported. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub enum ExportBlockStatus { + /// received but not yet graph processed + Incoming, + /// waiting for its slot + WaitingForSlot, + /// waiting for a missing dependency + WaitingForDependencies, + /// valid and not yet final + Active(Block), + /// immutable + Final(Block), + /// not part of the graph + Discarded(DiscardReason), +} + +/// The block version that can be exported. +/// Note that the detailed list of operation is not exported +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ExportCompiledBlock { + /// Header of the corresponding block. + pub header: WrappedHeader, + /// For (i, set) in children, + /// set contains the headers' hashes + /// of blocks referencing exported block as a parent, + /// in thread i. + pub children: Vec>, + /// Active or final + pub is_final: bool, +} + +/// Status +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub enum Status { + /// without enough fitness to be part of immutable history + Active, + /// with enough fitness to be part of immutable history + Final, +} diff --git a/massa-graph/src/bootstrapable_graph.rs b/massa-consensus-exports/src/bootstrapable_graph.rs similarity index 95% rename from massa-graph/src/bootstrapable_graph.rs rename to massa-consensus-exports/src/bootstrapable_graph.rs index a03e518596d..9f2f0f32a64 100644 --- a/massa-graph/src/bootstrapable_graph.rs +++ b/massa-consensus-exports/src/bootstrapable_graph.rs @@ -36,7 +36,7 @@ impl BootstrapableGraphSerializer { impl Serializer for BootstrapableGraphSerializer { /// ## Example /// ```rust - /// use massa_graph::{BootstrapableGraph, BootstrapableGraphSerializer}; + /// use massa_consensus_exports::bootstrapable_graph::{BootstrapableGraph, BootstrapableGraphSerializer}; /// use massa_serialization::Serializer; /// use massa_hash::Hash; /// use massa_models::{prehash::PreHashMap, block::BlockId, config::THREAD_COUNT}; @@ -115,7 +115,7 @@ impl BootstrapableGraphDeserializer { impl Deserializer for BootstrapableGraphDeserializer { /// ## Example /// ```rust - /// use massa_graph::{BootstrapableGraph, BootstrapableGraphDeserializer, BootstrapableGraphSerializer}; + /// use massa_consensus_exports::bootstrapable_graph::{BootstrapableGraph, BootstrapableGraphDeserializer, BootstrapableGraphSerializer}; /// use massa_serialization::{Deserializer, Serializer, DeserializeError}; /// use massa_hash::Hash; /// use massa_models::{prehash::PreHashMap, block::BlockId, config::THREAD_COUNT}; diff --git a/massa-consensus-exports/src/channels.rs b/massa-consensus-exports/src/channels.rs new file mode 100644 index 00000000000..a895b7cfc97 --- /dev/null +++ b/massa-consensus-exports/src/channels.rs @@ -0,0 +1,18 @@ +use crossbeam_channel::Sender; +use massa_execution_exports::ExecutionController; +use massa_pool_exports::PoolController; +use massa_pos_exports::SelectorController; +use massa_protocol_exports::ProtocolCommandSender; + +use crate::events::ConsensusEvent; + +/// Contains a reference to the pool, selector and execution controller +/// Contains a channel to send info to protocol +#[derive(Clone)] +pub struct ConsensusChannels { + pub execution_controller: Box, + pub selector_controller: Box, + pub pool_command_sender: Box, + pub controller_event_tx: Sender, + pub protocol_command_sender: ProtocolCommandSender, +} diff --git a/massa-consensus-exports/src/commands.rs b/massa-consensus-exports/src/commands.rs deleted file mode 100644 index f43c900b9ed..00000000000 --- a/massa-consensus-exports/src/commands.rs +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright (c) 2022 MASSA LABS - -//! Contains definitions of commands used by the controller -use massa_graph::{BlockGraphExport, BootstrapableGraph}; -use massa_models::api::BlockGraphStatus; -use massa_models::{block::BlockId, slot::Slot}; -use massa_models::{clique::Clique, stats::ConsensusStats}; -use massa_storage::Storage; -use tokio::sync::{mpsc, oneshot}; - -/// Commands that can be processed by consensus. -#[derive(Debug)] -pub enum ConsensusCommand { - /// Returns through a channel current blockgraph without block operations. - GetBlockGraphStatus { - /// optional start slot - slot_start: Option, - /// optional end slot - slot_end: Option, - /// response channel - response_tx: oneshot::Sender, - }, - /// Returns through a channel the graph statuses of a batch of blocks - GetBlockStatuses { - /// wanted block IDs - ids: Vec, - /// response channel - response_tx: oneshot::Sender>, - }, - /// Returns the bootstrap state - GetBootstrapState(mpsc::Sender>), - /// get current stats on consensus - GetStats(oneshot::Sender), - /// Get a block at a given slot in a blockclique - GetBlockcliqueBlockAtSlot { - /// wanted slot - slot: Slot, - /// response channel - response_tx: oneshot::Sender>, - }, - /// Get a block at a given slot in a blockclique - GetLatestBlockcliqueBlockAtSlot { - /// wanted slot - slot: Slot, - /// response channel - response_tx: oneshot::Sender, - }, - /// Get the best parents and their period - GetBestParents { - /// response channel - response_tx: oneshot::Sender>, - }, - /// Send a block - SendBlock { - /// block id - block_id: BlockId, - /// block slot - slot: Slot, - /// All the objects for the block - block_storage: Storage, - /// response channel - response_tx: oneshot::Sender<()>, - }, - /// Get cliques - GetCliques(oneshot::Sender>), -} - -/// Events that are emitted by consensus. -#[derive(Debug, Clone)] -pub enum ConsensusManagementCommand {} diff --git a/massa-consensus-exports/src/consensus_controller.rs b/massa-consensus-exports/src/consensus_controller.rs deleted file mode 100644 index 6657e1d17b9..00000000000 --- a/massa-consensus-exports/src/consensus_controller.rs +++ /dev/null @@ -1,269 +0,0 @@ -//! Copyright (c) 2022 MASSA LABS - -use massa_graph::{BlockGraphExport, BootstrapableGraph}; -use massa_models::api::BlockGraphStatus; -use massa_models::{block::BlockId, slot::Slot}; -use massa_models::{clique::Clique, stats::ConsensusStats}; -use massa_protocol_exports::ProtocolEventReceiver; -use massa_storage::Storage; -use std::collections::VecDeque; - -use tokio::{ - sync::{mpsc, oneshot}, - task::JoinHandle, -}; - -use crate::{ - commands::{ConsensusCommand, ConsensusManagementCommand}, - error::ConsensusResult as Result, - events::ConsensusEvent, - ConsensusError, -}; - -/// Consensus commands sender -/// TODO Make private -#[derive(Clone)] -pub struct ConsensusCommandSender(pub mpsc::Sender); - -impl ConsensusCommandSender { - /// Gets all the available information on the block graph returning a `BlockGraphExport`. - /// - /// # Arguments - /// * `slot_start`: optional slot start for slot-based filtering (included). - /// * `slot_end`: optional slot end for slot-based filtering (excluded). - pub async fn get_block_graph_status( - &self, - slot_start: Option, - slot_end: Option, - ) -> Result { - let (response_tx, response_rx) = oneshot::channel::(); - self.0 - .send(ConsensusCommand::GetBlockGraphStatus { - slot_start, - slot_end, - response_tx, - }) - .await - .map_err(|_| { - ConsensusError::SendChannelError( - "send error consensus command get_block_graph_status".to_string(), - ) - })?; - response_rx.await.map_err(|_| { - ConsensusError::ReceiveChannelError( - "consensus command get_block_graph_status response read error".to_string(), - ) - }) - } - - /// Gets all cliques. - /// - pub async fn get_cliques(&self) -> Result, ConsensusError> { - let (response_tx, response_rx) = oneshot::channel::>(); - self.0 - .send(ConsensusCommand::GetCliques(response_tx)) - .await - .map_err(|_| { - ConsensusError::SendChannelError( - "send error consensus command get_cliques".to_string(), - ) - })?; - response_rx.await.map_err(|_| { - ConsensusError::ReceiveChannelError( - "consensus command get_cliques response read error".to_string(), - ) - }) - } - - /// Gets the graph statuses of a batch of blocks. - /// - /// # Arguments - /// * ids: array of block IDs - pub async fn get_block_statuses( - &self, - ids: &[BlockId], - ) -> Result, ConsensusError> { - let (response_tx, response_rx) = oneshot::channel::>(); - self.0 - .send(ConsensusCommand::GetBlockStatuses { - ids: ids.to_vec(), - response_tx, - }) - .await - .map_err(|_| { - ConsensusError::SendChannelError( - "send error consensus command get_block_statuses".to_string(), - ) - })?; - response_rx.await.map_err(|_| { - ConsensusError::ReceiveChannelError( - "consensus command get_block_statuses response read error".to_string(), - ) - }) - } - - /// get bootstrap snapshot - pub async fn get_bootstrap_state(&self) -> Result { - let (response_tx, mut response_rx) = mpsc::channel::>(10); - self.0 - .send(ConsensusCommand::GetBootstrapState(response_tx)) - .await - .map_err(|_| { - ConsensusError::SendChannelError( - "send error consensus command get_bootstrap_state".into(), - ) - })?; - Ok(*response_rx.recv().await.ok_or_else(|| { - ConsensusError::ReceiveChannelError( - "consensus command get_bootstrap_state response read error".to_string(), - ) - })?) - } - - /// get best parents - pub fn get_best_parents(&self) -> Result, ConsensusError> { - let (response_tx, response_rx) = oneshot::channel::>(); - self.0 - .blocking_send(ConsensusCommand::GetBestParents { response_tx }) - .map_err(|_| { - ConsensusError::SendChannelError( - "send error consensus command get_best_parents".into(), - ) - })?; - response_rx.blocking_recv().map_err(|_| { - ConsensusError::ReceiveChannelError( - "consensus command get_best_parents response read error".to_string(), - ) - }) - } - - /// get block id of a slot in a blockclique - pub async fn get_blockclique_block_at_slot( - &self, - slot: Slot, - ) -> Result, ConsensusError> { - let (response_tx, response_rx) = oneshot::channel(); - self.0 - .send(ConsensusCommand::GetBlockcliqueBlockAtSlot { slot, response_tx }) - .await - .map_err(|_| { - ConsensusError::SendChannelError( - "send error consensus command get_blockclique_block_at_slot".into(), - ) - })?; - response_rx.await.map_err(|_| { - ConsensusError::ReceiveChannelError( - "consensus command get_blockclique_block_at_slot response read error".to_string(), - ) - }) - } - - /// get latest block id of a slot in a blockclique - pub fn get_latest_blockclique_block_at_slot( - &self, - slot: Slot, - ) -> Result { - let (response_tx, response_rx) = oneshot::channel(); - self.0 - .blocking_send(ConsensusCommand::GetLatestBlockcliqueBlockAtSlot { slot, response_tx }) - .map_err(|_| { - ConsensusError::SendChannelError( - "send error consensus command get_blockclique_block_at_slot".into(), - ) - })?; - response_rx.blocking_recv().map_err(|_| { - ConsensusError::ReceiveChannelError( - "consensus command get_blockclique_block_at_slot response read error".to_string(), - ) - }) - } - - /// get current consensus stats - pub async fn get_stats(&self) -> Result { - let (response_tx, response_rx) = oneshot::channel(); - self.0 - .send(ConsensusCommand::GetStats(response_tx)) - .await - .map_err(|_| { - ConsensusError::SendChannelError( - "send error consensus command get_stats".to_string(), - ) - })?; - response_rx.await.map_err(|_| { - ConsensusError::ReceiveChannelError( - "consensus command get_stats response read error".to_string(), - ) - }) - } - - ///send block - pub fn send_block( - &self, - block_id: BlockId, - slot: Slot, - block_storage: Storage, - ) -> Result<(), ConsensusError> { - let (response_tx, response_rx) = oneshot::channel(); - self.0 - .blocking_send(ConsensusCommand::SendBlock { - block_id, - slot, - block_storage, - response_tx, - }) - .map_err(|_| { - ConsensusError::SendChannelError("send error consensus command send_block".into()) - })?; - response_rx.blocking_recv().map_err(|_| { - ConsensusError::ReceiveChannelError( - "consensus command send_block response read error".to_string(), - ) - }) - } -} - -/// channel to receive consensus events -pub struct ConsensusEventReceiver(pub mpsc::Receiver); - -impl ConsensusEventReceiver { - /// wait for the next event - pub async fn wait_event(&mut self) -> Result { - self.0 - .recv() - .await - .ok_or(ConsensusError::ControllerEventError) - } - - /// drains remaining events and returns them in a `VecDeque` - /// note: events are sorted from oldest to newest - pub async fn drain(mut self) -> VecDeque { - let mut remaining_events: VecDeque = VecDeque::new(); - - while let Some(evt) = self.0.recv().await { - remaining_events.push_back(evt); - } - remaining_events - } -} - -/// Consensus manager -pub struct ConsensusManager { - /// protocol handler - pub join_handle: JoinHandle>, - /// consensus management sender - pub manager_tx: mpsc::Sender, -} - -impl ConsensusManager { - /// stop consensus - pub async fn stop( - self, - consensus_event_receiver: ConsensusEventReceiver, - ) -> Result { - drop(self.manager_tx); - let _remaining_events = consensus_event_receiver.drain().await; - let protocol_event_receiver = self.join_handle.await??; - - Ok(protocol_event_receiver) - } -} diff --git a/massa-consensus-exports/src/controller_trait.rs b/massa-consensus-exports/src/controller_trait.rs new file mode 100644 index 00000000000..a8aa52b675a --- /dev/null +++ b/massa-consensus-exports/src/controller_trait.rs @@ -0,0 +1,123 @@ +use crate::block_graph_export::BlockGraphExport; +use crate::{bootstrapable_graph::BootstrapableGraph, error::ConsensusError}; +use massa_models::{ + api::BlockGraphStatus, + block::{BlockHeader, BlockId}, + clique::Clique, + slot::Slot, + stats::ConsensusStats, + wrapped::Wrapped, +}; +use massa_storage::Storage; + +/// interface that communicates with the graph worker thread +pub trait ConsensusController: Send + Sync { + /// Get an export of a part of the graph + /// + /// # Arguments + /// * `start_slot`: the slot to start the export from, if None, the export starts from the genesis + /// * `end_slot`: the slot to end the export at, if None, the export ends at the current slot + /// + /// # Returns + /// The export of the graph + fn get_block_graph_status( + &self, + start_slot: Option, + end_slot: Option, + ) -> Result; + + /// Get statuses of a list of blocks + /// + /// # Arguments + /// * `block_ids`: the list of block ids to get the status of + /// + /// # Returns + /// The statuses of the blocks sorted by the order of the input list + fn get_block_statuses(&self, ids: &[BlockId]) -> Vec; + + /// Get all the cliques of the graph + /// + /// # Returns + /// The list of cliques + fn get_cliques(&self) -> Vec; + + /// Get a graph to bootstrap from + /// + /// # Returns + /// The graph to bootstrap from + fn get_bootstrap_graph(&self) -> Result; + + /// Get the stats of the consensus + /// + /// # Returns + /// The stats of the consensus + fn get_stats(&self) -> Result; + + /// Get the best parents for the next block to be produced + /// + /// # Returns + /// The id of best parents for the next block to be produced along with their period + fn get_best_parents(&self) -> Vec<(BlockId, u64)>; + + /// Get the block id of the block at a specific slot in the blockclique + /// + /// # Arguments + /// * `slot`: the slot to get the block id of + /// + /// # Returns + /// The block id of the block at the specified slot if exists + fn get_blockclique_block_at_slot(&self, slot: Slot) -> Option; + + /// Get the latest block, that is in the blockclique, in the thread of the given slot and before this `slot`. + /// + /// # Arguments: + /// * `slot`: the slot that will give us the thread and the upper bound + /// + /// # Returns: + /// The block id of the latest block in the thread of the given slot and before this slot + fn get_latest_blockclique_block_at_slot(&self, slot: Slot) -> BlockId; + + /// Register a block in the graph + /// + /// # Arguments + /// * `block_id`: the id of the block to register + /// * `slot`: the slot of the block + /// * `block_storage`: the storage that contains all the objects of the block + /// * `created`: is the block created by our node ? + fn register_block(&self, block_id: BlockId, slot: Slot, block_storage: Storage, created: bool); + + /// Register a block header in the graph + /// + /// # Arguments + /// * `block_id`: the id of the block to register + /// * `header`: the header of the block to register + fn register_block_header(&self, block_id: BlockId, header: Wrapped); + + /// Mark a block as invalid in the graph + /// + /// # Arguments + /// * `block_id`: the id of the block to mark as invalid + /// * `header`: the header of the block to mark as invalid + fn mark_invalid_block(&self, block_id: BlockId, header: Wrapped); + + /// Returns a boxed clone of self. + /// Useful to allow cloning `Box`. + fn clone_box(&self) -> Box; +} + +/// Allow cloning `Box` +/// Uses `ConsensusController::clone_box` internally +impl Clone for Box { + fn clone(&self) -> Box { + self.clone_box() + } +} + +/// Consensus manager used to stop the consensus thread +pub trait ConsensusManager { + /// Stop the consensus thread + /// Note that we do not take self by value to consume it + /// because it is not allowed to move out of Box + /// This will improve if the `unsized_fn_params` feature stabilizes enough to be safely usable. + fn stop(&mut self); +} diff --git a/massa-consensus-exports/src/error.rs b/massa-consensus-exports/src/error.rs index f74321ae503..417a1d6ce9d 100644 --- a/massa-consensus-exports/src/error.rs +++ b/massa-consensus-exports/src/error.rs @@ -1,15 +1,47 @@ // Copyright (c) 2022 MASSA LABS use displaydoc::Display; use massa_execution_exports::ExecutionError; -use massa_graph::error::GraphError; use massa_models::error::ModelsError; use massa_protocol_exports::ProtocolError; +use massa_time::TimeError; +use std::array::TryFromSliceError; use thiserror::Error; -use crate::events::ConsensusEvent; - -/// Consensus -pub type ConsensusResult = core::result::Result; +/// Consensus error +#[non_exhaustive] +#[derive(Display, Error, Debug)] +pub enum ConsensusError { + /// execution error: {0} + ExecutionError(#[from] ExecutionError), + /// models error: {0} + ModelsError(#[from] ModelsError), + /// Could not create genesis block {0} + GenesisCreationError(String), + /// missing block {0} + MissingBlock(String), + /// missing operation {0} + MissingOperation(String), + /// there was an inconsistency between containers {0} + ContainerInconsistency(String), + /// fitness overflow + FitnessOverflow, + /// invalid ledger change: {0} + InvalidLedgerChange(String), + /// io error {0} + IOError(#[from] std::io::Error), + /// serde error + SerdeError(#[from] serde_json::Error), + /// Proof of stake cycle unavailable {0} + PosCycleUnavailable(String), + /// Ledger error {0} + LedgerError(#[from] LedgerError), + /// Massa time error {0} + MassaTimeError(#[from] TimeError), + /// transaction error {0} + TransactionError(String), + /// Protocol error {0} + ProtocolError(#[from] ProtocolError), +} /// Internal error #[non_exhaustive] @@ -19,50 +51,20 @@ pub enum InternalError { TransactionError(String), } -/// Consensus errors +/// Ledger error #[non_exhaustive] #[derive(Display, Error, Debug)] -pub enum ConsensusError { - /// execution error: {0} - ExecutionError(#[from] ExecutionError), +pub enum LedgerError { + /// amount overflow + AmountOverflowError, + /// ledger inconsistency error {0} + LedgerInconsistency(String), /// models error: {0} ModelsError(#[from] ModelsError), - /// configuration error: {0} - ConfigError(String), - /// Protocol error {0} - ProtocolError(#[from] Box), - /// failed retrieving consensus controller event - ControllerEventError, - /// Join error {0} - JoinError(#[from] tokio::task::JoinError), - /// Time error {0} - TimeError(#[from] massa_time::TimeError), - /// there was an inconsistency between containers {0} - ContainerInconsistency(String), - /// Send channel error : {0} - SendChannelError(String), - /// Receive channel error : {0} - ReceiveChannelError(String), + /// try from slice error {0} + TryFromSliceError(#[from] TryFromSliceError), /// io error {0} IOError(#[from] std::io::Error), - /// missing block {0} - MissingBlock(String), - /// block creation error {0} - BlockCreationError(String), - /// error sending consensus event: {0} - TokioSendError(#[from] tokio::sync::mpsc::error::SendError), - /// channel error: {0} - ChannelError(String), - /// Graph error: {0} - GraphError(#[from] GraphError), - /// slot overflow - SlotOverflowError, - /// `MassaCipher` error: {0} - MassaCipherError(#[from] massa_cipher::CipherError), -} - -impl std::convert::From for ConsensusError { - fn from(err: massa_protocol_exports::ProtocolError) -> Self { - ConsensusError::ProtocolError(Box::new(err)) - } + /// serde error + SerdeError(#[from] serde_json::Error), } diff --git a/massa-graph/src/export_active_block.rs b/massa-consensus-exports/src/export_active_block.rs similarity index 97% rename from massa-graph/src/export_active_block.rs rename to massa-consensus-exports/src/export_active_block.rs index dbf93c88f14..05b8e49ca13 100644 --- a/massa-graph/src/export_active_block.rs +++ b/massa-consensus-exports/src/export_active_block.rs @@ -1,4 +1,4 @@ -use crate::error::{GraphError, GraphResult as Result}; +use crate::error::ConsensusError; use massa_hash::HashDeserializer; use massa_models::{ active_block::ActiveBlock, @@ -78,7 +78,7 @@ impl ExportActiveBlock { self, ref_storage: &Storage, thread_count: u8, - ) -> Result<(ActiveBlock, Storage), GraphError> { + ) -> Result<(ActiveBlock, Storage), ConsensusError> { // create resulting storage let mut storage = ref_storage.clone_without_refs(); @@ -95,7 +95,7 @@ impl ExportActiveBlock { .cloned() .collect::>() { - return Err(GraphError::MissingOperation( + return Err(ConsensusError::MissingOperation( "operation list mismatch on active block conversion".into(), )); } @@ -233,7 +233,7 @@ impl ExportActiveBlockDeserializer { impl Deserializer for ExportActiveBlockDeserializer { /// ## Example: /// ```rust - /// use massa_graph::export_active_block::{ExportActiveBlock, ExportActiveBlockDeserializer, ExportActiveBlockSerializer}; + /// use massa_consensus_exports::export_active_block::{ExportActiveBlock, ExportActiveBlockDeserializer, ExportActiveBlockSerializer}; /// use massa_models::{ledger_models::LedgerChanges, config::THREAD_COUNT, rolls::RollUpdates, block::{BlockId, Block, BlockSerializer, BlockHeader, BlockHeaderSerializer}, prehash::PreHashSet, endorsement::{Endorsement, EndorsementSerializerLW}, slot::Slot, wrapped::WrappedContent}; /// use massa_hash::Hash; /// use std::collections::HashSet; diff --git a/massa-consensus-exports/src/lib.rs b/massa-consensus-exports/src/lib.rs index c25fef460ae..13eb8124690 100644 --- a/massa-consensus-exports/src/lib.rs +++ b/massa-consensus-exports/src/lib.rs @@ -1,29 +1,22 @@ // Copyright (c) 2022 MASSA LABS -//! Consensus exports -#![feature(async_closure)] -#![feature(hash_drain_filter)] -#![feature(int_roundings)] -#![warn(missing_docs)] -#![warn(unused_crate_dependencies)] +//! Definition and exports of the graph types and errors. -pub use consensus_controller::{ConsensusCommandSender, ConsensusEventReceiver, ConsensusManager}; -pub use error::ConsensusError; -pub use settings::ConsensusConfig; - -mod consensus_controller; +mod channels; +mod controller_trait; +mod settings; -/// consensus errors +pub mod block_graph_export; +pub mod block_status; +pub mod bootstrapable_graph; pub mod error; - -/// consensus settings -pub mod settings; - -/// consensus commands -pub mod commands; - -/// consensus events pub mod events; +pub mod export_active_block; + +pub use channels::ConsensusChannels; +pub use controller_trait::{ConsensusController, ConsensusManager}; +pub use settings::ConsensusConfig; -/// consensus test tools +/// Test utils #[cfg(feature = "testing")] +/// Exports related to tests as Mocks and configurations pub mod test_exports; diff --git a/massa-consensus-exports/src/settings.rs b/massa-consensus-exports/src/settings.rs index f581026dfc3..bbaab35c25e 100644 --- a/massa-consensus-exports/src/settings.rs +++ b/massa-consensus-exports/src/settings.rs @@ -1,218 +1,49 @@ -// Copyright (c) 2022 MASSA LABS -#![allow(clippy::assertions_on_constants)] -//! Definition & Implementation of the consensus settings -//! ----------------------------------------------------- -//! -//! # Configurations -//! -//! * `setting`: read from user settings file -//! * `config`: merge of settings and hard-coded configuration that shouldn't be -//! modified by user. -//! -//! This file is allowed to use a lot of constants from `massa-models` as all -//! other files named `settings.rs` or `config.rs`. -//! -//! The `ConsensusSettings` is the most basic and complete configuration in the -//! node. You can get almost every configuration from that one. -//! -//! `From impl *`: -//! - `ConsensusConfig`: Create a configuration merging user settings and hard-coded values -//! (see `/massa-models/node_configuration/*`) -//! -//! `From<&ConsensusConfig> impl *`: -//! - `GraphConfig` -//! - `LedgerConfig` -//! - `ProofOfStakeConfig` -//! -//! > Development note: We clone the values on getting a configuration from another. -//! -//! # Usage of constants -//! -//! The default configuration is loaded from the `massa-models` crate. You shouldn't -//! write an hard-coded value in the following file but create a new value in -//! `default.rs` and the testing default equivalent value in `default_testing.rs`. See -//! `/node_configuration/mod.rs` documentation in `massa-models` sources for more -//! information. -//! -//! # Channels -//! -//! The following file contains the definition of the Channels structures used in -//! the current module. -//! -//! # Testing feature -//! -//! In unit test your allowed to use the `testing` feature flag that will -//! use the default values from `/node_configuration/default_testing.rs` in the -//! `massa-models` crate sources. -use massa_execution_exports::ExecutionController; -use massa_graph::settings::GraphConfig; -use massa_pool_exports::PoolController; -use massa_pos_exports::SelectorController; -use massa_protocol_exports::{ProtocolCommandSender, ProtocolEventReceiver}; use massa_signature::KeyPair; use massa_time::MassaTime; -use tokio::sync::mpsc; +use serde::{Deserialize, Serialize}; -use crate::{ - commands::{ConsensusCommand, ConsensusManagementCommand}, - events::ConsensusEvent, -}; - -/// Consensus full configuration (static + user defined) -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Deserialize, Serialize)] pub struct ConsensusConfig { - /// Time in milliseconds when the blockclique started. + /// Clock compensation + pub clock_compensation_millis: i64, + /// Genesis timestamp pub genesis_timestamp: MassaTime, - /// TESTNET: time when the blockclique is ended. - pub end_timestamp: Option, + /// Delta time between two period + pub t0: MassaTime, /// Number of threads pub thread_count: u8, - /// Time between the periods in the same thread. - pub t0: MassaTime, - /// `KeyPair` to sign genesis blocks. + /// Keypair to sign genesis blocks. pub genesis_key: KeyPair, /// Maximum number of blocks allowed in discarded blocks. pub max_discarded_blocks: usize, - /// If a block is `future_block_processing_max_periods` periods in the future, it is just discarded. + /// If a block `is future_block_processing_max_periods` periods in the future, it is just discarded. pub future_block_processing_max_periods: u64, /// Maximum number of blocks allowed in `FutureIncomingBlocks`. pub max_future_processing_blocks: usize, /// Maximum number of blocks allowed in `DependencyWaitingBlocks`. pub max_dependency_blocks: usize, + /// max event send wait + pub max_send_wait: MassaTime, + /// old blocks are pruned every `block_db_prune_interval` + pub block_db_prune_interval: MassaTime, + /// max number of items returned while querying + pub max_item_return_count: usize, + /// Max gas per block for the execution configuration + pub max_gas_per_block: u64, /// Threshold for fitness. pub delta_f0: u64, /// Maximum operation validity period count pub operation_validity_periods: u64, /// cycle duration in periods pub periods_per_cycle: u64, - /// stats time span - pub stats_timespan: MassaTime, - /// max event send wait - pub max_send_wait: MassaTime, /// force keep at least this number of final periods in RAM for each thread pub force_keep_final_periods: u64, /// target number of endorsement per block pub endorsement_count: u32, - /// old blocks are pruned every `block_db_prune_interval` - pub block_db_prune_interval: MassaTime, - /// max number of items returned while querying - pub max_item_return_count: usize, - /// Max gas per block for the execution configuration - pub max_gas_per_block: u64, + /// TESTNET: time when the blockclique is ended. + pub end_timestamp: Option, + /// stats time span + pub stats_timespan: MassaTime, /// channel size pub channel_size: usize, } - -impl From<&ConsensusConfig> for GraphConfig { - fn from(cfg: &ConsensusConfig) -> Self { - GraphConfig { - thread_count: cfg.thread_count, - genesis_key: cfg.genesis_key.clone(), - max_discarded_blocks: cfg.max_discarded_blocks, - future_block_processing_max_periods: cfg.future_block_processing_max_periods, - max_future_processing_blocks: cfg.max_future_processing_blocks, - max_dependency_blocks: cfg.max_dependency_blocks, - delta_f0: cfg.delta_f0, - operation_validity_periods: cfg.operation_validity_periods, - periods_per_cycle: cfg.periods_per_cycle, - force_keep_final_periods: cfg.force_keep_final_periods, - endorsement_count: cfg.endorsement_count, - max_item_return_count: cfg.max_item_return_count, - } - } -} - -/// Communication asynchronous channels for the consensus worker -/// Contains consensus channels associated (protocol & execution) -/// Contains also controller asynchronous channels (command, manager receivers and event sender) -/// Contains a sender to the pool worker commands -pub struct ConsensusWorkerChannels { - /// Associated protocol command sender. - pub protocol_command_sender: ProtocolCommandSender, - /// Associated protocol event listener. - pub protocol_event_receiver: ProtocolEventReceiver, - /// Execution command sender. - pub execution_controller: Box, - /// Associated Pool command sender. - pub pool_command_sender: Box, - /// Selector controller - pub selector_controller: Box, - /// Channel receiving consensus commands. - pub controller_command_rx: mpsc::Receiver, - /// Channel sending out consensus events. - pub controller_event_tx: mpsc::Sender, - /// Channel receiving consensus management commands. - pub controller_manager_rx: mpsc::Receiver, -} - -/// Public channels associated to the consensus module. -/// Execution & Protocol Sender/Receiver -pub struct ConsensusChannels { - /// outgoing link to execution component - pub execution_controller: Box, - /// outgoing link to protocol component - pub protocol_command_sender: ProtocolCommandSender, - /// incoming link to protocol component - pub protocol_event_receiver: ProtocolEventReceiver, - /// outgoing link to pool component - pub pool_command_sender: Box, - /// selector controller - pub selector_controller: Box, -} - -#[cfg(feature = "testing")] -/// -/// Create the default value of `ConsensusConfig`. -/// -/// Configuration has default values described in crate `massa-models`. -/// The most of `ConsensusConfig` values have in test mode a default value. -/// -/// You can create a `ConsensusConfig` with classic default values and redefining -/// dynamically the values of desired parameters: -/// -/// ```ignore -/// let cfg = ConsensusConfig { -/// max_discarded_blocks: 25, -/// ..Default::default() -/// }; -/// ``` -/// -/// You can also look at the divers `default()` implementation bellow. For example that -/// one is used to initialize the _default paths_ : -/// -/// ```ignore -/// let cfg = ConsensusConfig { -/// max_discarded_blocks: 21, -/// ..ConsensusConfig::default_with_paths(), -/// }; -/// ``` -/// -impl Default for ConsensusConfig { - fn default() -> Self { - use massa_models::config::*; - Self { - // reset genesis timestamp because we are in test mode that can take a while to process - genesis_timestamp: MassaTime::now(0) - .expect("Impossible to reset the timestamp in test"), - end_timestamp: *END_TIMESTAMP, - thread_count: THREAD_COUNT, - t0: T0, - genesis_key: GENESIS_KEY.clone(), - max_discarded_blocks: 100, - future_block_processing_max_periods: 2, - max_future_processing_blocks: 10, - max_dependency_blocks: 100, - delta_f0: DELTA_F0, - operation_validity_periods: OPERATION_VALIDITY_PERIODS, - periods_per_cycle: PERIODS_PER_CYCLE, - stats_timespan: MassaTime::from_millis(1000), - max_send_wait: MassaTime::from_millis(1000), - force_keep_final_periods: 20, - endorsement_count: ENDORSEMENT_COUNT, - block_db_prune_interval: MassaTime::from_millis(1000), - max_item_return_count: 100, - max_gas_per_block: MAX_GAS_PER_BLOCK, - channel_size: CHANNEL_SIZE, - } - } -} diff --git a/massa-consensus-exports/src/test_exports/config.rs b/massa-consensus-exports/src/test_exports/config.rs new file mode 100644 index 00000000000..5520031b9a4 --- /dev/null +++ b/massa-consensus-exports/src/test_exports/config.rs @@ -0,0 +1,35 @@ +use massa_models::config::constants::{ + CHANNEL_SIZE, DELTA_F0, ENDORSEMENT_COUNT, GENESIS_KEY, GENESIS_TIMESTAMP, MAX_GAS_PER_BLOCK, + OPERATION_VALIDITY_PERIODS, PERIODS_PER_CYCLE, T0, THREAD_COUNT, +}; +use massa_time::MassaTime; + +use crate::ConsensusConfig; + +impl Default for ConsensusConfig { + fn default() -> Self { + Self { + clock_compensation_millis: 0, + genesis_timestamp: *GENESIS_TIMESTAMP, + t0: T0, + thread_count: THREAD_COUNT, + genesis_key: GENESIS_KEY.clone(), + max_discarded_blocks: 10000, + future_block_processing_max_periods: 100, + max_future_processing_blocks: 100, + max_dependency_blocks: 2048, + max_send_wait: MassaTime::from_millis(100), + block_db_prune_interval: MassaTime::from_millis(5000), + max_item_return_count: 100, + max_gas_per_block: MAX_GAS_PER_BLOCK, + delta_f0: DELTA_F0, + operation_validity_periods: OPERATION_VALIDITY_PERIODS, + periods_per_cycle: PERIODS_PER_CYCLE, + force_keep_final_periods: 20, + endorsement_count: ENDORSEMENT_COUNT, + end_timestamp: None, + stats_timespan: MassaTime::from_millis(60000), + channel_size: CHANNEL_SIZE, + } + } +} diff --git a/massa-consensus-exports/src/test_exports/mock.rs b/massa-consensus-exports/src/test_exports/mock.rs index 8be461ec5cc..38962bb09b9 100644 --- a/massa-consensus-exports/src/test_exports/mock.rs +++ b/massa-consensus-exports/src/test_exports/mock.rs @@ -1,55 +1,244 @@ // Copyright (c) 2022 MASSA LABS -use massa_models::config::CHANNEL_SIZE; +use std::sync::{ + mpsc::{self, Receiver}, + Arc, Mutex, +}; + +use massa_models::{ + api::BlockGraphStatus, + block::{BlockHeader, BlockId}, + clique::Clique, + slot::Slot, + stats::ConsensusStats, + wrapped::Wrapped, +}; +use massa_storage::Storage; use massa_time::MassaTime; -use tokio::{sync::mpsc, time::sleep}; use crate::{ - commands::ConsensusCommand, events::ConsensusEvent, ConsensusCommandSender, - ConsensusEventReceiver, + block_graph_export::BlockGraphExport, bootstrapable_graph::BootstrapableGraph, + error::ConsensusError, ConsensusController, }; -/// Mock for the consensus controller. -/// We will receive the commands in this mock and accept callback functions depending of the command in `wait_command`. -/// We will also send the events that can be received by listening to the `ConsensusEventReceiver`. -pub struct MockConsensusController { - /// Command receiver - pub consensus_command_rx: mpsc::Receiver, - _consensus_event_tx: mpsc::Sender, +/// Test tool to mock graph controller responses +pub struct ConsensusEventReceiver(pub Receiver); + +/// List of possible messages you can receive from the mock +/// Each variant corresponds to a unique method in `ConsensusController`, +/// Some variants wait for a response on their `response_tx` field, if present. +/// See the documentation of `ConsensusController` for details on parameters and return values. +#[derive(Clone, Debug)] +pub enum MockConsensusControllerMessage { + GetBlockStatuses { + block_ids: Vec, + response_tx: mpsc::Sender>, + }, + GetBlockGraphStatuses { + start_slot: Option, + end_slot: Option, + response_tx: mpsc::Sender>, + }, + GetCliques { + response_tx: mpsc::Sender>, + }, + GetBootstrapableGraph { + response_tx: mpsc::Sender>, + }, + GetStats { + response_tx: mpsc::Sender>, + }, + GetBestParents { + response_tx: mpsc::Sender>, + }, + GetBlockcliqueBlockAtSlot { + slot: Slot, + response_tx: mpsc::Sender>, + }, + GetLatestBlockcliqueBlockAtSlot { + slot: Slot, + response_tx: mpsc::Sender, + }, + MarkInvalidBlock { + block_id: BlockId, + header: Wrapped, + }, + RegisterBlock { + block_id: BlockId, + slot: Slot, + block_storage: Storage, + created: bool, + }, + RegisterBlockHeader { + block_id: BlockId, + header: Wrapped, + }, } +/// A mocked graph controller that will intercept calls on its methods +/// and emit corresponding `MockConsensusControllerMessage` messages through a MPSC in a thread-safe way. +/// For messages with a `response_tx` field, the mock will await a response through their `response_tx` channel +/// in order to simulate returning this value at the end of the call. +#[derive(Clone)] +pub struct MockConsensusController(Arc>>); + impl MockConsensusController { - /// Create a new mock consensus controller. - pub fn new_with_receiver() -> (Self, ConsensusCommandSender, ConsensusEventReceiver) { - let (consensus_command_tx, consensus_command_rx) = - mpsc::channel::(CHANNEL_SIZE); - let (consensus_event_tx, consensus_event_rx) = - mpsc::channel::(CHANNEL_SIZE); + /// Create a new pair (mock graph controller, mpsc receiver for emitted messages) + /// Note that unbounded mpsc channels are used + pub fn new_with_receiver() -> (Box, ConsensusEventReceiver) { + let (tx, rx) = mpsc::channel(); ( - MockConsensusController { - consensus_command_rx, - _consensus_event_tx: consensus_event_tx, - }, - ConsensusCommandSender(consensus_command_tx), - ConsensusEventReceiver(consensus_event_rx), + Box::new(MockConsensusController(Arc::new(Mutex::new(tx)))), + ConsensusEventReceiver(rx), ) } +} +impl ConsensusEventReceiver { /// wait command - pub async fn wait_command(&mut self, timeout: MassaTime, filter_map: F) -> Option + pub fn wait_command(&mut self, timeout: MassaTime, filter_map: F) -> Option where - F: Fn(ConsensusCommand) -> Option, + F: Fn(MockConsensusControllerMessage) -> Option, { - let timer = sleep(timeout.into()); - tokio::pin!(timer); - loop { - tokio::select! { - cmd_opt = self.consensus_command_rx.recv() => match cmd_opt { - Some(orig_cmd) => if let Some(res_cmd) = filter_map(orig_cmd) { return Some(res_cmd); }, - None => panic!("Unexpected closure of network command channel."), - }, - _ = &mut timer => return None - } + match self.0.recv_timeout(timeout.into()) { + Ok(msg) => filter_map(msg), + Err(_) => None, } } } + +/// Implements all the methods of the `ConsensusController` trait, +/// but simply make them emit a `MockConsensusControllerMessage`. +/// If the message contains a `response_tx`, +/// a response from that channel is read and returned as return value. +/// See the documentation of `ConsensusController` for details on each function. +impl ConsensusController for MockConsensusController { + fn get_block_graph_status( + &self, + start_slot: Option, + end_slot: Option, + ) -> Result { + let (response_tx, response_rx) = mpsc::channel(); + self.0 + .lock() + .unwrap() + .send(MockConsensusControllerMessage::GetBlockGraphStatuses { + start_slot, + end_slot, + response_tx, + }) + .unwrap(); + response_rx.recv().unwrap() + } + + fn get_block_statuses(&self, ids: &[BlockId]) -> Vec { + let (response_tx, response_rx) = mpsc::channel(); + self.0 + .lock() + .unwrap() + .send(MockConsensusControllerMessage::GetBlockStatuses { + block_ids: ids.to_vec(), + response_tx, + }) + .unwrap(); + response_rx.recv().unwrap() + } + + fn get_cliques(&self) -> Vec { + let (response_tx, response_rx) = mpsc::channel(); + self.0 + .lock() + .unwrap() + .send(MockConsensusControllerMessage::GetCliques { response_tx }) + .unwrap(); + response_rx.recv().unwrap() + } + + fn get_bootstrap_graph(&self) -> Result { + let (response_tx, response_rx) = mpsc::channel(); + self.0 + .lock() + .unwrap() + .send(MockConsensusControllerMessage::GetBootstrapableGraph { response_tx }) + .unwrap(); + response_rx.recv().unwrap() + } + + fn get_stats(&self) -> Result { + let (response_tx, response_rx) = mpsc::channel(); + self.0 + .lock() + .unwrap() + .send(MockConsensusControllerMessage::GetStats { response_tx }) + .unwrap(); + response_rx.recv().unwrap() + } + + fn get_best_parents(&self) -> Vec<(BlockId, u64)> { + let (response_tx, response_rx) = mpsc::channel(); + self.0 + .lock() + .unwrap() + .send(MockConsensusControllerMessage::GetBestParents { response_tx }) + .unwrap(); + response_rx.recv().unwrap() + } + + fn get_blockclique_block_at_slot(&self, slot: Slot) -> Option { + let (response_tx, response_rx) = mpsc::channel(); + self.0 + .lock() + .unwrap() + .send(MockConsensusControllerMessage::GetBlockcliqueBlockAtSlot { slot, response_tx }) + .unwrap(); + response_rx.recv().unwrap() + } + + fn get_latest_blockclique_block_at_slot(&self, slot: Slot) -> BlockId { + let (response_tx, response_rx) = mpsc::channel(); + self.0 + .lock() + .unwrap() + .send( + MockConsensusControllerMessage::GetLatestBlockcliqueBlockAtSlot { + slot, + response_tx, + }, + ) + .unwrap(); + response_rx.recv().unwrap() + } + + fn mark_invalid_block(&self, block_id: BlockId, header: Wrapped) { + self.0 + .lock() + .unwrap() + .send(MockConsensusControllerMessage::MarkInvalidBlock { block_id, header }) + .unwrap(); + } + + fn register_block(&self, block_id: BlockId, slot: Slot, block_storage: Storage, created: bool) { + self.0 + .lock() + .unwrap() + .send(MockConsensusControllerMessage::RegisterBlock { + block_id, + slot, + block_storage, + created, + }) + .unwrap(); + } + + fn register_block_header(&self, block_id: BlockId, header: Wrapped) { + self.0 + .lock() + .unwrap() + .send(MockConsensusControllerMessage::RegisterBlockHeader { block_id, header }) + .unwrap(); + } + + fn clone_box(&self) -> Box { + Box::new(self.clone()) + } +} diff --git a/massa-consensus-exports/src/test_exports/mod.rs b/massa-consensus-exports/src/test_exports/mod.rs index a2f80855a21..aeddfb526b0 100644 --- a/massa-consensus-exports/src/test_exports/mod.rs +++ b/massa-consensus-exports/src/test_exports/mod.rs @@ -1,8 +1,7 @@ -//! Copyright (c) 2022 MASSA LABS +// Copyright (c) 2022 MASSA LABS -/// Mock of the execution module +mod config; mod mock; -/// Tooling to make test using a consensus -mod tools; + +pub use config::*; pub use mock::*; -pub use tools::*; diff --git a/massa-consensus-exports/src/test_exports/tools.rs b/massa-consensus-exports/src/test_exports/tools.rs deleted file mode 100644 index d7d6861ced3..00000000000 --- a/massa-consensus-exports/src/test_exports/tools.rs +++ /dev/null @@ -1,70 +0,0 @@ -use std::collections::HashMap; - -use massa_cipher::encrypt; -use massa_models::{ - address::Address, - ledger_models::LedgerData, - rolls::{RollCounts, RollUpdate, RollUpdates}, -}; -use massa_signature::KeyPair; -use tempfile::NamedTempFile; - -/// Password used for encryption in tests -pub const TEST_PASSWORD: &str = "PASSWORD"; - -/// generate a named temporary JSON ledger file -pub fn generate_ledger_file(ledger_vec: &HashMap) -> NamedTempFile { - use std::io::prelude::*; - let ledger_file_named = NamedTempFile::new().expect("cannot create temp file"); - serde_json::to_writer_pretty(ledger_file_named.as_file(), &ledger_vec) - .expect("unable to write ledger file"); - ledger_file_named - .as_file() - .seek(std::io::SeekFrom::Start(0)) - .expect("could not seek file"); - ledger_file_named -} - -/// generate staking key temp file from array of keypair -pub fn generate_staking_keys_file(staking_keys: &[KeyPair]) -> NamedTempFile { - use std::io::prelude::*; - let file_named = NamedTempFile::new().expect("cannot create temp file"); - let json = serde_json::to_string(&staking_keys).expect("json serialization failed"); - let encrypted_data = encrypt(TEST_PASSWORD, json.as_bytes()).expect("encryption failed"); - std::fs::write(file_named.as_ref(), encrypted_data).expect("data writing failed"); - file_named - .as_file() - .seek(std::io::SeekFrom::Start(0)) - .expect("could not seek file"); - file_named -} - -/// generate a named temporary JSON initial rolls file -pub fn generate_roll_counts_file(roll_counts: &RollCounts) -> NamedTempFile { - use std::io::prelude::*; - let roll_counts_file_named = NamedTempFile::new().expect("cannot create temp file"); - serde_json::to_writer_pretty(roll_counts_file_named.as_file(), &roll_counts.0) - .expect("unable to write ledger file"); - roll_counts_file_named - .as_file() - .seek(std::io::SeekFrom::Start(0)) - .expect("could not seek file"); - roll_counts_file_named -} - -/// generate a default named temporary JSON initial rolls file, -/// assuming two threads. -pub fn generate_default_roll_counts_file(stakers: Vec) -> NamedTempFile { - let mut roll_counts = RollCounts::default(); - for key in stakers.iter() { - let address = Address::from_public_key(&key.get_public_key()); - let update = RollUpdate { - roll_purchases: 1, - roll_sales: 0, - }; - let mut updates = RollUpdates::default(); - updates.apply(&address, &update).unwrap(); - roll_counts.apply_updates(&updates).unwrap(); - } - generate_roll_counts_file(&roll_counts) -} diff --git a/massa-consensus-worker/Cargo.toml b/massa-consensus-worker/Cargo.toml index 679d1ae75b8..a1cc2dd58ed 100644 --- a/massa-consensus-worker/Cargo.toml +++ b/massa-consensus-worker/Cargo.toml @@ -7,49 +7,21 @@ edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -serde_json = "1.0" -tokio = { version = "1.21", features = ["full"] } +displaydoc = "0.2" +num = { version = "0.4", features = ["serde"] } tracing = "0.1" -# custom modules +serde = { version = "1.0", features = ["derive"] } +serde_json = "1.0" +parking_lot = { version = "0.12", features = ["deadlock_detection"] } +#custom modules massa_consensus_exports = { path = "../massa-consensus-exports" } -massa_graph = { path = "../massa-graph" } -massa_logging = { path = "../massa-logging" } massa_models = { path = "../massa-models" } massa_storage = { path = "../massa-storage" } -massa_protocol_exports = { path = "../massa-protocol-exports" } +massa_signature = { path = "../massa-signature" } massa_time = { path = "../massa-time" } - -[dev-dependencies] -serial_test = "0.9" -#tempfile = "3.3" -parking_lot = { version = "0.12", features = ["deadlock_detection"] } -massa_models = { path = "../massa-models", features = ["testing"] } -massa_execution_exports = { path = "../massa-execution-exports", features = [ - "testing", -] } -massa_consensus_exports = { path = "../massa-consensus-exports", features = [ - "testing", -] } -massa_pos_exports = { path = "../massa-pos-exports", features = ["testing"]} -massa_pos_worker = { path = "../massa-pos-worker" } -massa_pool_exports = { path = "../massa-pool-exports" } -massa_serialization = { path = "../massa-serialization"} massa_hash = { path = "../massa-hash" } -massa_signature = { path = "../massa-signature" } -massa_cipher = { path = "../massa-cipher" } -massa_storage = { path = "../massa-storage" } -#num = { version = "0.4", features = ["serde"] } -#rand = "0.8" -#futures = "0.3" - +massa_logging = { path = "../massa-logging" } -# for more information on what are the following features used for, see the cargo.toml at workspace level [features] -sandbox = ["massa_consensus_exports/sandbox", "massa_protocol_exports/sandbox" ] -testing = [ - "massa_consensus_exports/testing", - "massa_execution_exports/testing", - "massa_models/testing", - "massa_pool_exports/testing", - "massa_protocol_exports/testing" -] \ No newline at end of file + +sandbox = [] \ No newline at end of file diff --git a/massa-consensus-worker/src/commands.rs b/massa-consensus-worker/src/commands.rs new file mode 100644 index 00000000000..4ca74d79f94 --- /dev/null +++ b/massa-consensus-worker/src/commands.rs @@ -0,0 +1,13 @@ +use massa_models::{ + block::{BlockHeader, BlockId}, + slot::Slot, + wrapped::Wrapped, +}; +use massa_storage::Storage; + +#[allow(clippy::large_enum_variant)] +pub enum ConsensusCommand { + RegisterBlock(BlockId, Slot, Storage, bool), + RegisterBlockHeader(BlockId, Wrapped), + MarkInvalidBlock(BlockId, Wrapped), +} diff --git a/massa-consensus-worker/src/consensus_worker.rs b/massa-consensus-worker/src/consensus_worker.rs deleted file mode 100644 index 02382805693..00000000000 --- a/massa-consensus-worker/src/consensus_worker.rs +++ /dev/null @@ -1,788 +0,0 @@ -// Copyright (c) 2022 MASSA LABS -use massa_consensus_exports::{ - commands::ConsensusCommand, - error::{ConsensusError, ConsensusResult as Result}, - settings::ConsensusWorkerChannels, - ConsensusConfig, -}; -use massa_graph::{BlockGraph, BlockGraphExport}; -use massa_models::timeslots::{get_block_slot_timestamp, get_latest_block_slot_at_timestamp}; -use massa_models::{address::Address, block::BlockId, slot::Slot}; -use massa_models::{block::WrappedHeader, prehash::PreHashMap}; -use massa_models::{prehash::PreHashSet, stats::ConsensusStats}; -use massa_protocol_exports::{ProtocolEvent, ProtocolEventReceiver}; -use massa_storage::Storage; -use massa_time::MassaTime; -use std::{ - cmp::max, - collections::{HashMap, VecDeque}, -}; -use tokio::time::{sleep, sleep_until, Sleep}; -use tracing::{info, warn}; - -#[cfg(not(feature = "sandbox"))] -use massa_consensus_exports::events::ConsensusEvent; -#[cfg(not(feature = "sandbox"))] -use tokio::sync::mpsc::error::SendTimeoutError; -#[cfg(not(feature = "sandbox"))] -use tracing::debug; - -/// Manages consensus. -pub struct ConsensusWorker { - /// Consensus Configuration - cfg: ConsensusConfig, - /// Associated channels, sender and receivers - channels: ConsensusWorkerChannels, - /// Database containing all information about blocks, the `BlockGraph` and cliques. - block_db: BlockGraph, - /// Previous slot. - previous_slot: Option, - /// Next slot - next_slot: Slot, - /// blocks we want - wishlist: PreHashMap>, - /// latest final periods - latest_final_periods: Vec, - /// clock compensation - clock_compensation: i64, - /// Final block stats `(time, creator, is_from_protocol)` - final_block_stats: VecDeque<(MassaTime, Address, bool)>, - /// Blocks that come from protocol used for stats and ids are removed when inserted in `final_block_stats` - protocol_blocks: VecDeque<(MassaTime, BlockId)>, - /// Stale block timestamp - stale_block_stats: VecDeque, - /// the time span considered for stats - stats_history_timespan: MassaTime, - /// the time span considered for desynchronization detection - #[allow(dead_code)] - stats_desync_detection_timespan: MassaTime, - /// time at which the node was launched (used for desynchronization detection) - launch_time: MassaTime, - /// previous blockclique notified to Execution - prev_blockclique: PreHashMap, -} - -impl ConsensusWorker { - /// Creates a new consensus controller. - /// Initiates the random selector. - /// - /// # Arguments - /// * `cfg`: consensus configuration. - /// * `protocol_command_sender`: associated protocol controller - /// * `block_db`: Database containing all information about blocks, the blockgraph and cliques. - /// * `controller_command_rx`: Channel receiving consensus commands. - /// * `controller_event_tx`: Channel sending out consensus events. - /// * `controller_manager_rx`: Channel receiving consensus management commands. - pub(crate) async fn new( - cfg: ConsensusConfig, - channels: ConsensusWorkerChannels, - block_db: BlockGraph, - clock_compensation: i64, - ) -> Result { - let now = MassaTime::now(clock_compensation)?; - let previous_slot = get_latest_block_slot_at_timestamp( - cfg.thread_count, - cfg.t0, - cfg.genesis_timestamp, - now, - )?; - let next_slot = previous_slot.map_or(Ok(Slot::new(0u64, 0u8)), |s| { - s.get_next_slot(cfg.thread_count) - })?; - let latest_final_periods: Vec = block_db - .get_latest_final_blocks_periods() - .iter() - .map(|(_block_id, period)| *period) - .collect(); - info!( - "Started node at time {}, cycle {}, period {}, thread {}", - now.to_utc_string(), - next_slot.get_cycle(cfg.periods_per_cycle), - next_slot.period, - next_slot.thread, - ); - if cfg.genesis_timestamp > now { - let (days, hours, mins, secs) = cfg - .genesis_timestamp - .saturating_sub(now) - .days_hours_mins_secs()?; - info!( - "{} days, {} hours, {} minutes, {} seconds remaining to genesis", - days, hours, mins, secs, - ) - } - massa_trace!("consensus.consensus_worker.new", {}); - - // desync detection timespan - let stats_desync_detection_timespan = cfg.t0.checked_mul(cfg.periods_per_cycle * 2)?; - - // Notify execution module of current blockclique and all final blocks. - // we need to do this because the bootstrap snapshots of the executor vs the consensus may not have been taken in sync - // because the two modules run concurrently and out of sync. - let mut block_storage: PreHashMap = Default::default(); - let notify_finals: HashMap = block_db - .get_all_final_blocks() - .into_iter() - .map(|(b_id, slot)| { - let (_a_block, storage) = block_db - .get_active_block(&b_id) - .expect("active block missing from block_db"); - block_storage.insert(b_id, storage.clone()); - (slot, b_id) - }) - .collect(); - let notify_blockclique: HashMap = block_db - .get_blockclique() - .iter() - .map(|b_id| { - let (a_block, storage) = block_db - .get_active_block(b_id) - .expect("active block missing from block_db"); - let slot = a_block.slot; - block_storage.insert(*b_id, storage.clone()); - (slot, *b_id) - }) - .collect(); - let prev_blockclique: PreHashMap = - notify_blockclique.iter().map(|(k, v)| (*v, *k)).collect(); - channels.execution_controller.update_blockclique_status( - notify_finals, - Some(notify_blockclique), - block_storage, - ); - - Ok(ConsensusWorker { - block_db, - previous_slot, - next_slot, - wishlist: Default::default(), - latest_final_periods, - clock_compensation, - channels, - final_block_stats: Default::default(), - protocol_blocks: Default::default(), - stale_block_stats: VecDeque::new(), - stats_desync_detection_timespan, - stats_history_timespan: max(stats_desync_detection_timespan, cfg.stats_timespan), - cfg, - launch_time: MassaTime::now(clock_compensation)?, - prev_blockclique, - }) - } - - /// Consensus work is managed here. - /// It's mostly a tokio::select within a loop. - pub async fn run_loop(mut self) -> Result { - // signal initial state to pool - self.channels - .pool_command_sender - .notify_final_cs_periods(&self.latest_final_periods); - - // set slot timer - let slot_deadline = get_block_slot_timestamp( - self.cfg.thread_count, - self.cfg.t0, - self.cfg.genesis_timestamp, - self.next_slot, - )? - .estimate_instant(self.clock_compensation)?; - let next_slot_timer = sleep_until(tokio::time::Instant::from(slot_deadline)); - - tokio::pin!(next_slot_timer); - - // set prune timer - let prune_timer = sleep(self.cfg.block_db_prune_interval.to_duration()); - tokio::pin!(prune_timer); - - loop { - massa_trace!("consensus.consensus_worker.run_loop.select", {}); - /* - select! without the "biased" modifier will randomly select the 1st branch to check, - then will check the next ones in the order they are written. - We choose this order: - * manager commands: low freq, avoid having to wait to stop - * consensus commands (low to medium freq): respond quickly - * slot timer (low freq, timing is important but does not have to be perfect either) - * prune timer: low freq, timing not important but should not wait too long - * receive protocol events (high freq) - */ - tokio::select! { - // listen to manager commands - cmd = self.channels.controller_manager_rx.recv() => { - massa_trace!("consensus.consensus_worker.run_loop.select.manager", {}); - match cmd { - None => break, - Some(_) => {} - }} - - // listen consensus commands - Some(cmd) = self.channels.controller_command_rx.recv() => { - massa_trace!("consensus.consensus_worker.run_loop.consensus_command", {}); - self.process_consensus_command(cmd).await? - }, - - // slot timer - _ = &mut next_slot_timer => { - massa_trace!("consensus.consensus_worker.run_loop.select.slot_tick", {}); - if let Some(end) = self.cfg.end_timestamp { - if MassaTime::now(self.clock_compensation)? > end { - info!("This episode has come to an end, please get the latest testnet node version to continue"); - break; - } - } - self.slot_tick(&mut next_slot_timer).await?; - }, - - // prune timer - _ = &mut prune_timer=> { - massa_trace!("consensus.consensus_worker.run_loop.prune_timer", {}); - // prune block db - let _discarded_final_blocks = self.block_db.prune()?; - - // reset timer - prune_timer.set(sleep( self.cfg.block_db_prune_interval.to_duration())) - } - - // receive protocol controller events - evt = self.channels.protocol_event_receiver.wait_event() =>{ - massa_trace!("consensus.consensus_worker.run_loop.select.protocol_event", {}); - match evt { - Ok(event) => self.process_protocol_event(event).await?, - Err(err) => return Err(ConsensusError::ProtocolError(Box::new(err))) - } - }, - } - } - // after this curly brace you can find the end of the loop - Ok(self.channels.protocol_event_receiver) - } - - /// this function is called around every slot tick - /// it checks for cycle increment - /// detects desynchronization - /// produce quite more logs than actual stuff - async fn slot_tick(&mut self, next_slot_timer: &mut std::pin::Pin<&mut Sleep>) -> Result<()> { - let now = MassaTime::now(self.clock_compensation)?; - let observed_slot = get_latest_block_slot_at_timestamp( - self.cfg.thread_count, - self.cfg.t0, - self.cfg.genesis_timestamp, - now, - )?; - - if observed_slot < Some(self.next_slot) { - // reset timer for next slot - let sleep_deadline = get_block_slot_timestamp( - self.cfg.thread_count, - self.cfg.t0, - self.cfg.genesis_timestamp, - self.next_slot, - )? - .estimate_instant(self.clock_compensation)?; - next_slot_timer.set(sleep_until(tokio::time::Instant::from(sleep_deadline))); - return Ok(()); - } - - let observed_slot = observed_slot.unwrap(); // does not panic, checked above - - massa_trace!("consensus.consensus_worker.slot_tick", { - "slot": observed_slot - }); - - let previous_cycle = self - .previous_slot - .map(|s| s.get_cycle(self.cfg.periods_per_cycle)); - let observed_cycle = observed_slot.get_cycle(self.cfg.periods_per_cycle); - if previous_cycle.is_none() { - // first cycle observed - info!("Massa network has started ! 🎉") - } - if previous_cycle < Some(observed_cycle) { - info!("Started cycle {}", observed_cycle); - } - - // check if there are any final blocks is coming from protocol - // if none => we are probably desync - #[cfg(not(feature = "sandbox"))] - if now - > max(self.cfg.genesis_timestamp, self.launch_time) - .saturating_add(self.stats_desync_detection_timespan) - && !self - .final_block_stats - .iter() - .any(|(time, _, is_from_protocol)| { - time > &now.saturating_sub(self.stats_desync_detection_timespan) - && *is_from_protocol - }) - { - warn!("desynchronization detected because the recent final block history is empty or contains only blocks produced by this node"); - let _ = self.send_consensus_event(ConsensusEvent::NeedSync).await; - } - - self.previous_slot = Some(observed_slot); - self.next_slot = observed_slot.get_next_slot(self.cfg.thread_count)?; - - // signal tick to block graph - self.block_db.slot_tick(Some(observed_slot))?; - - // take care of block db changes - self.block_db_changed().await?; - - // reset timer for next slot - let sleep_deadline = get_block_slot_timestamp( - self.cfg.thread_count, - self.cfg.t0, - self.cfg.genesis_timestamp, - self.next_slot, - )? - .estimate_instant(self.clock_compensation)?; - next_slot_timer.set(sleep_until(tokio::time::Instant::from(sleep_deadline))); - - // prune stats - self.prune_stats()?; - - Ok(()) - } - - /// Manages given consensus command. - /// They can come from the API or the bootstrap server - /// Please refactor me - /// - /// # Argument - /// * `cmd`: consensus command to process - async fn process_consensus_command(&mut self, cmd: ConsensusCommand) -> Result<()> { - match cmd { - ConsensusCommand::GetBlockGraphStatus { - slot_start, - slot_end, - response_tx, - } => { - massa_trace!( - "consensus.consensus_worker.process_consensus_command.get_block_graph_status", - {} - ); - if response_tx - .send(BlockGraphExport::extract_from( - &self.block_db, - slot_start, - slot_end, - )?) - .is_err() - { - warn!("consensus: could not send GetBlockGraphStatus answer"); - } - Ok(()) - } - // gets the graph status of a batch of blocks - ConsensusCommand::GetBlockStatuses { ids, response_tx } => { - massa_trace!( - "consensus.consensus_worker.process_consensus_command.get_block_statuses", - {} - ); - let res: Vec<_> = ids - .iter() - .map(|id| self.block_db.get_block_status(id)) - .collect(); - if response_tx.send(res).is_err() { - warn!("consensus: could not send get_block_statuses answer"); - } - Ok(()) - } - ConsensusCommand::GetCliques(response_tx) => { - massa_trace!( - "consensus.consensus_worker.process_consensus_command.get_cliques", - {} - ); - if response_tx.send(self.block_db.get_cliques()).is_err() { - warn!("consensus: could not send GetSelectionDraws response"); - } - Ok(()) - } - ConsensusCommand::GetBootstrapState(response_tx) => { - massa_trace!( - "consensus.consensus_worker.process_consensus_command.get_bootstrap_state", - {} - ); - let resp = self.block_db.export_bootstrap_graph()?; - if response_tx.send(Box::new(resp)).await.is_err() { - warn!("consensus: could not send GetBootstrapState answer"); - } - Ok(()) - } - ConsensusCommand::GetStats(response_tx) => { - massa_trace!( - "consensus.consensus_worker.process_consensus_command.get_stats", - {} - ); - let res = self.get_stats()?; - if response_tx.send(res).is_err() { - warn!("consensus: could not send get_stats response"); - } - Ok(()) - } - ConsensusCommand::GetBestParents { response_tx } => { - if response_tx - .send(self.block_db.get_best_parents().clone()) - .is_err() - { - warn!("consensus: could not send get best parents response"); - } - Ok(()) - } - ConsensusCommand::GetBlockcliqueBlockAtSlot { slot, response_tx } => { - let res = self.block_db.get_blockclique_block_at_slot(&slot); - if response_tx.send(res).is_err() { - warn!("consensus: could not send get block clique block at slot response"); - } - Ok(()) - } - ConsensusCommand::GetLatestBlockcliqueBlockAtSlot { slot, response_tx } => { - let res = self.block_db.get_latest_blockclique_block_at_slot(&slot); - if response_tx.send(res).is_err() { - warn!( - "consensus: could not send get latest block clique block at slot response" - ); - } - Ok(()) - } - ConsensusCommand::SendBlock { - block_id, - slot, - block_storage, - response_tx, - } => { - self.block_db - .incoming_block(block_id, slot, self.previous_slot, block_storage)?; - - if response_tx.send(()).is_err() { - warn!("consensus: could not send get block clique block at slot response"); - } - Ok(()) - } - } - } - - /// retrieve stats - /// Used in response to a API request - fn get_stats(&mut self) -> Result { - let timespan_end = max(self.launch_time, MassaTime::now(self.clock_compensation)?); - let timespan_start = max( - timespan_end.saturating_sub(self.cfg.stats_timespan), - self.launch_time, - ); - let final_block_count = self - .final_block_stats - .iter() - .filter(|(t, _, _)| *t >= timespan_start && *t < timespan_end) - .count() as u64; - let stale_block_count = self - .stale_block_stats - .iter() - .filter(|t| **t >= timespan_start && **t < timespan_end) - .count() as u64; - let clique_count = self.block_db.get_clique_count() as u64; - Ok(ConsensusStats { - final_block_count, - stale_block_count, - clique_count, - start_timespan: timespan_start, - end_timespan: timespan_end, - }) - } - - /// Manages received protocol events. - /// - /// # Arguments - /// * `event`: event type to process. - async fn process_protocol_event(&mut self, event: ProtocolEvent) -> Result<()> { - match event { - ProtocolEvent::ReceivedBlock { - block_id, - slot, - storage, - } => { - massa_trace!( - "consensus.consensus_worker.process_protocol_event.received_block", - { "block_id": block_id } - ); - self.block_db - .incoming_block(block_id, slot, self.previous_slot, storage)?; - let now = MassaTime::now(self.clock_compensation)?; - self.protocol_blocks.push_back((now, block_id)); - self.block_db_changed().await?; - } - ProtocolEvent::ReceivedBlockHeader { block_id, header } => { - massa_trace!("consensus.consensus_worker.process_protocol_event.received_header", { "block_id": block_id, "header": header }); - self.block_db - .incoming_header(block_id, header, self.previous_slot)?; - self.block_db_changed().await?; - } - ProtocolEvent::InvalidBlock { block_id, header } => { - massa_trace!( - "consensus.consensus_worker.process_protocol_event.invalid_block", - { "block_id": block_id } - ); - self.block_db.invalid_block(&block_id, header)?; - // Say it to consensus - } - } - Ok(()) - } - - /// prune statistics according to the stats span - fn prune_stats(&mut self) -> Result<()> { - let start_time = - MassaTime::now(self.clock_compensation)?.saturating_sub(self.stats_history_timespan); - while let Some((t, _, _)) = self.final_block_stats.front() { - if t < &start_time { - self.final_block_stats.pop_front(); - } else { - break; - } - } - while let Some(t) = self.stale_block_stats.front() { - if t < &start_time { - self.stale_block_stats.pop_front(); - } else { - break; - } - } - while let Some((t, _)) = self.protocol_blocks.front() { - if t < &start_time { - self.protocol_blocks.pop_front(); - } else { - break; - } - } - Ok(()) - } - - /// Notify execution about blockclique changes and finalized blocks. - fn notify_execution(&mut self, finalized_blocks: HashMap) { - // List new block storage instances that Execution doesn't know about. - // That's blocks that have not been sent to execution before, ie. in the previous blockclique). - let mut new_blocks_storage: PreHashMap = finalized_blocks - .iter() - .filter_map(|(_slot, b_id)| { - if self.prev_blockclique.contains_key(b_id) { - // was previously sent as a blockclique element - return None; - } - let (_a_block, storage) = self - .block_db - .get_active_block(b_id) - .expect("final block not found in active blocks"); - Some((*b_id, storage.clone())) - }) - .collect(); - - // Get new blockclique block list with slots. - let mut blockclique_changed = false; - let new_blockclique: PreHashMap = self - .block_db - .get_blockclique() - .iter() - .map(|b_id| { - if let Some(slot) = self.prev_blockclique.remove(b_id) { - // The block was already sent in the previous blockclique: - // the slot can be gathered from there without locking Storage. - // Note: the block is removed from self.prev_blockclique. - (*b_id, slot) - } else { - // The block was not present in the previous blockclique: - // the blockclique has changed => get the block's slot by querying Storage. - blockclique_changed = true; - let (a_block, storage) = self - .block_db - .get_active_block(b_id) - .expect("blockclique block not found in active blocks"); - new_blocks_storage.insert(*b_id, storage.clone()); - (*b_id, a_block.slot) - } - }) - .collect(); - if !self.prev_blockclique.is_empty() { - // All elements present in the new blockclique have been removed from `prev_blockclique` above. - // If `prev_blockclique` is not empty here, it means that it contained elements that are not in the new blockclique anymore. - // In that case, we mark the blockclique as having changed. - blockclique_changed = true; - } - // Overwrite previous blockclique. - // Should still be done even if unchanged because elements were removed from it above. - self.prev_blockclique = new_blockclique.clone(); - - if finalized_blocks.is_empty() && !blockclique_changed { - // There are no changes (neither block finalizations not blockclique changes) to send to execution. - return; - } - - // Notify execution of block finalizations and blockclique changes - self.channels - .execution_controller - .update_blockclique_status( - finalized_blocks, - if blockclique_changed { - Some(new_blockclique.into_iter().map(|(k, v)| (v, k)).collect()) - } else { - None - }, - new_blocks_storage, - ); - } - - /// call me if the block database changed - /// Processing of final blocks, pruning. - /// - /// 1. propagate blocks - /// 2. Notify of attack attempts - /// 3. get new final blocks - /// 4. get blockclique - /// 5. notify Execution - /// 6. Process new final blocks - /// 7. Notify pool of new final ops - /// 8. Notify PoS of final blocks - /// 9. notify protocol of block wish list - /// 10. note new latest final periods (prune graph if changed) - /// 11. add stale blocks to stats - async fn block_db_changed(&mut self) -> Result<()> { - massa_trace!("consensus.consensus_worker.block_db_changed", {}); - - // Propagate new blocks - for (block_id, storage) in self.block_db.get_blocks_to_propagate().into_iter() { - massa_trace!("consensus.consensus_worker.block_db_changed.integrated", { - "block_id": block_id - }); - self.channels - .protocol_command_sender - .integrated_block(block_id, storage) - .await?; - } - - // Notify protocol of attack attempts. - for hash in self.block_db.get_attack_attempts().into_iter() { - self.channels - .protocol_command_sender - .notify_block_attack(hash) - .await?; - massa_trace!("consensus.consensus_worker.block_db_changed.attack", { - "hash": hash - }); - } - - // manage finalized blocks - let timestamp = MassaTime::now(self.clock_compensation)?; - let finalized_blocks = self.block_db.get_new_final_blocks(); - let mut final_block_slots = HashMap::with_capacity(finalized_blocks.len()); - for b_id in finalized_blocks { - if let Some((a_block, _block_store)) = self.block_db.get_active_block(&b_id) { - // add to final blocks to notify execution - final_block_slots.insert(a_block.slot, b_id); - - // add to stats - let block_is_from_protocol = self - .protocol_blocks - .iter() - .any(|(_, block_id)| block_id == &b_id); - self.final_block_stats.push_back(( - timestamp, - a_block.creator_address, - block_is_from_protocol, - )); - } - } - - // notify execution - self.notify_execution(final_block_slots); - - // notify protocol of block wishlist - let new_wishlist = self.block_db.get_block_wishlist()?; - let new_blocks: PreHashMap> = new_wishlist - .iter() - .filter_map(|(id, header)| { - if !self.wishlist.contains_key(id) { - Some((*id, header.clone())) - } else { - None - } - }) - .collect(); - let remove_blocks: PreHashSet = self - .wishlist - .iter() - .filter_map(|(id, _)| { - if !new_wishlist.contains_key(id) { - Some(*id) - } else { - None - } - }) - .collect(); - if !new_blocks.is_empty() || !remove_blocks.is_empty() { - massa_trace!("consensus.consensus_worker.block_db_changed.send_wishlist_delta", { "new": new_wishlist, "remove": remove_blocks }); - self.channels - .protocol_command_sender - .send_wishlist_delta(new_blocks, remove_blocks) - .await?; - self.wishlist = new_wishlist; - } - - // note new latest final periods - let latest_final_periods: Vec = self - .block_db - .get_latest_final_blocks_periods() - .iter() - .map(|(_block_id, period)| *period) - .collect(); - // if changed... - if self.latest_final_periods != latest_final_periods { - // signal new last final periods to pool - self.channels - .pool_command_sender - .notify_final_cs_periods(&latest_final_periods); - // update final periods - self.latest_final_periods = latest_final_periods; - } - - // add stale blocks to stats - let new_stale_block_ids_creators_slots = self.block_db.get_new_stale_blocks(); - let timestamp = MassaTime::now(self.clock_compensation)?; - for (_b_id, (_b_creator, _b_slot)) in new_stale_block_ids_creators_slots.into_iter() { - self.stale_block_stats.push_back(timestamp); - - /* - TODO add this again - let creator_addr = Address::from_public_key(&b_creator); - if self.staking_keys.contains_key(&creator_addr) { - warn!("block {} that was produced by our address {} at slot {} became stale. This is probably due to a temporary desynchronization.", b_id, creator_addr, b_slot); - } - */ - } - - Ok(()) - } - - /// Channel management stuff - /// todo delete - /// or at least introduce some generic - #[cfg(not(feature = "sandbox"))] - async fn send_consensus_event(&self, event: ConsensusEvent) -> Result<()> { - let result = self - .channels - .controller_event_tx - .send_timeout(event, self.cfg.max_send_wait.to_duration()) - .await; - match result { - Ok(()) => return Ok(()), - Err(SendTimeoutError::Closed(event)) => { - debug!( - "failed to send ConsensusEvent due to channel closure: {:?}", - event - ); - } - Err(SendTimeoutError::Timeout(event)) => { - debug!("failed to send ConsensusEvent due to timeout: {:?}", event); - } - } - Err(ConsensusError::ChannelError("failed to send event".into())) - } -} diff --git a/massa-consensus-worker/src/controller.rs b/massa-consensus-worker/src/controller.rs new file mode 100644 index 00000000000..2a622acc797 --- /dev/null +++ b/massa-consensus-worker/src/controller.rs @@ -0,0 +1,200 @@ +use massa_consensus_exports::{ + block_graph_export::BlockGraphExport, block_status::BlockStatus, + bootstrapable_graph::BootstrapableGraph, error::ConsensusError, + export_active_block::ExportActiveBlock, ConsensusController, +}; +use massa_models::{ + api::BlockGraphStatus, + block::{BlockHeader, BlockId}, + clique::Clique, + prehash::PreHashSet, + slot::Slot, + stats::ConsensusStats, + wrapped::Wrapped, +}; +use massa_storage::Storage; +use parking_lot::RwLock; +use std::sync::{mpsc::SyncSender, Arc}; +use tracing::log::warn; + +use crate::{commands::ConsensusCommand, state::ConsensusState}; + +/// The retrieval of data is made using a shared state and modifications are asked by sending message to a channel. +/// This is done mostly to be able to: +/// +/// - send commands through the channel without waiting for them to be processed from the point of view of the sending thread, and channels are very much optimal for that (much faster than locks) +/// - still be able to read the current state of the graph as processed so far (for this we need a shared state) +/// +/// Note that sending commands and reading the state is done from different, mutually-asynchronous tasks and they can have data that are not sync yet. +#[derive(Clone)] +pub struct ConsensusControllerImpl { + command_sender: SyncSender, + shared_state: Arc>, +} + +impl ConsensusControllerImpl { + pub fn new( + command_sender: SyncSender, + shared_state: Arc>, + ) -> Self { + Self { + command_sender, + shared_state, + } + } +} + +impl ConsensusController for ConsensusControllerImpl { + /// Get a block graph export in a given period. + /// + /// # Arguments: + /// * `start_slot`: the start slot + /// * `end_slot`: the end slot + /// + /// # Returns: + /// An export of the block graph in this period + fn get_block_graph_status( + &self, + start_slot: Option, + end_slot: Option, + ) -> Result { + self.shared_state + .read() + .extract_block_graph_part(start_slot, end_slot) + } + + /// Get statuses of blocks present in the graph + /// + /// # Arguments: + /// * `block_ids`: the block ids to get the status of + /// + /// # Returns: + /// A vector of statuses sorted by the order of the block ids + fn get_block_statuses(&self, ids: &[BlockId]) -> Vec { + let read_shared_state = self.shared_state.read(); + ids.iter() + .map(|id| read_shared_state.get_block_status(id)) + .collect() + } + + /// Get all the cliques possible in the block graph. + /// + /// # Returns: + /// A vector of cliques + fn get_cliques(&self) -> Vec { + self.shared_state.read().max_cliques.clone() + } + + /// Get a part of the graph to send to a node so that he can setup his graph. + /// Used for bootstrap. + /// + /// # Returns: + /// A portion of the graph + fn get_bootstrap_graph(&self) -> Result { + let read_shared_state = self.shared_state.read(); + let mut required_final_blocks: PreHashSet<_> = + read_shared_state.list_required_active_blocks()?; + required_final_blocks.retain(|b_id| { + if let Some(BlockStatus::Active { a_block, .. }) = + read_shared_state.block_statuses.get(b_id) + { + if a_block.is_final { + // filter only final actives + return true; + } + } + false + }); + let mut final_blocks: Vec = + Vec::with_capacity(required_final_blocks.len()); + for b_id in &required_final_blocks { + if let Some(BlockStatus::Active { a_block, storage }) = + read_shared_state.block_statuses.get(b_id) + { + final_blocks.push(ExportActiveBlock::from_active_block(a_block, storage)); + } else { + return Err(ConsensusError::ContainerInconsistency(format!( + "block {} was expected to be active but wasn't on bootstrap graph export", + b_id + ))); + } + } + + Ok(BootstrapableGraph { final_blocks }) + } + + /// Get the stats of the consensus + fn get_stats(&self) -> Result { + self.shared_state.read().get_stats() + } + + /// Get the current best parents for a block creation + /// + /// # Returns: + /// A block id and a period for each thread of the graph + fn get_best_parents(&self) -> Vec<(BlockId, u64)> { + self.shared_state.read().best_parents.clone() + } + + /// Get the block, that is in the blockclique, at a given slot. + /// + /// # Arguments: + /// * `slot`: the slot to get the block at + /// + /// # Returns: + /// The block id of the block at the given slot if exists + fn get_blockclique_block_at_slot(&self, slot: Slot) -> Option { + self.shared_state + .read() + .get_blockclique_block_at_slot(&slot) + } + + /// Get the latest block, that is in the blockclique, in the thread of the given slot and before this `slot`. + /// + /// # Arguments: + /// * `slot`: the slot that will give us the thread and the upper bound + /// + /// # Returns: + /// The block id of the latest block in the thread of the given slot and before this slot + fn get_latest_blockclique_block_at_slot(&self, slot: Slot) -> BlockId { + self.shared_state + .read() + .get_latest_blockclique_block_at_slot(&slot) + } + + fn register_block(&self, block_id: BlockId, slot: Slot, block_storage: Storage, created: bool) { + if let Err(err) = self + .command_sender + .try_send(ConsensusCommand::RegisterBlock( + block_id, + slot, + block_storage, + created, + )) + { + warn!("error trying to register a block: {}", err); + } + } + + fn register_block_header(&self, block_id: BlockId, header: Wrapped) { + if let Err(err) = self + .command_sender + .try_send(ConsensusCommand::RegisterBlockHeader(block_id, header)) + { + warn!("error trying to register a block header: {}", err); + } + } + + fn mark_invalid_block(&self, block_id: BlockId, header: Wrapped) { + if let Err(err) = self + .command_sender + .try_send(ConsensusCommand::MarkInvalidBlock(block_id, header)) + { + warn!("error trying to mark block as invalid: {}", err); + } + } + + fn clone_box(&self) -> Box { + Box::new(self.clone()) + } +} diff --git a/massa-consensus-worker/src/lib.rs b/massa-consensus-worker/src/lib.rs index 5d590c1d394..71042385846 100644 --- a/massa-consensus-worker/src/lib.rs +++ b/massa-consensus-worker/src/lib.rs @@ -1,16 +1,22 @@ -//! Copyright (c) 2022 MASSA LABS +// Copyright (c) 2022 MASSA LABS -#![feature(async_closure)] -#![feature(hash_drain_filter)] -#![feature(int_roundings)] -#![warn(missing_docs)] -#![warn(unused_crate_dependencies)] -#[macro_use] -extern crate massa_logging; +//! # General description +//! +//! The consensus worker launches a persistent thread that will run in the background. +//! This thread has a `run` function that triggers the consensus algorithm each slot. It can be interrupted by commands +//! that are managed on the fly. The consensus worker share a state with a controller. This controller can be called by the others modules. +//! It avoid sending message to the thread just for getting informations on the consensus. +//! +//! Communications with execution is blocking. Communications with protocol blocks on sending information to protocol but not blocking +//! when protocol sends informations to this module. +//! +//! This module doesn't use asynchronous code. +#![feature(deadline_api)] -mod consensus_worker; -mod tools; -pub use tools::start_consensus_controller; +mod commands; +mod controller; +mod manager; +mod state; +mod worker; -#[cfg(test)] -mod tests; +pub use worker::start_consensus_worker; diff --git a/massa-consensus-worker/src/manager.rs b/massa-consensus-worker/src/manager.rs new file mode 100644 index 00000000000..d2ef67e5272 --- /dev/null +++ b/massa-consensus-worker/src/manager.rs @@ -0,0 +1,23 @@ +use massa_consensus_exports::ConsensusManager; +use std::{sync::mpsc::SyncSender, thread::JoinHandle}; +use tracing::log::info; + +use crate::commands::ConsensusCommand; + +pub struct ConsensusManagerImpl { + pub consensus_thread: Option<(SyncSender, JoinHandle<()>)>, +} + +impl ConsensusManager for ConsensusManagerImpl { + fn stop(&mut self) { + info!("stopping consensus worker..."); + // join the consensus thread + if let Some((tx, join_handle)) = self.consensus_thread.take() { + drop(tx); + join_handle + .join() + .expect("consensus thread panicked on try to join"); + } + info!("consensus worker stopped"); + } +} diff --git a/massa-consensus-worker/src/state/graph.rs b/massa-consensus-worker/src/state/graph.rs new file mode 100644 index 00000000000..b2c08e5c6d9 --- /dev/null +++ b/massa-consensus-worker/src/state/graph.rs @@ -0,0 +1,362 @@ +use std::collections::VecDeque; + +use massa_consensus_exports::{ + block_status::{BlockStatus, DiscardReason}, + error::ConsensusError, +}; +use massa_logging::massa_trace; +use massa_models::{block::BlockId, clique::Clique, prehash::PreHashSet, slot::Slot}; + +use super::ConsensusState; + +impl ConsensusState { + pub fn insert_parents_descendants( + &mut self, + add_block_id: BlockId, + add_block_slot: Slot, + parents_hash: Vec, + ) { + // add as child to parents + for parent_h in parents_hash.iter() { + if let Some(BlockStatus::Active { + a_block: a_parent, .. + }) = self.block_statuses.get_mut(parent_h) + { + a_parent.children[add_block_slot.thread as usize] + .insert(add_block_id, add_block_slot.period); + } + } + + // add as descendant to ancestors. Note: descendants are never removed. + let mut ancestors: VecDeque = parents_hash.iter().copied().collect(); + let mut visited = PreHashSet::::default(); + while let Some(ancestor_h) = ancestors.pop_back() { + if !visited.insert(ancestor_h) { + continue; + } + if let Some(BlockStatus::Active { a_block: ab, .. }) = + self.block_statuses.get_mut(&ancestor_h) + { + ab.descendants.insert(add_block_id); + for (ancestor_parent_h, _) in ab.parents.iter() { + ancestors.push_front(*ancestor_parent_h); + } + } + } + } + + pub fn compute_fitness_find_blockclique( + &mut self, + add_block_id: &BlockId, + ) -> Result { + let mut blockclique_i = 0usize; + let mut max_clique_fitness = (0u64, num::BigInt::default()); + for (clique_i, clique) in self.max_cliques.iter_mut().enumerate() { + clique.fitness = 0; + clique.is_blockclique = false; + let mut sum_hash = num::BigInt::default(); + for block_h in clique.block_ids.iter() { + let fitness = match self.block_statuses.get(block_h) { + Some(BlockStatus::Active { a_block, storage: _ }) => a_block.fitness, + _ => return Err(ConsensusError::ContainerInconsistency(format!("inconsistency inside block statuses computing fitness while adding {} - missing {}", add_block_id, block_h))), + }; + clique.fitness = clique + .fitness + .checked_add(fitness) + .ok_or(ConsensusError::FitnessOverflow)?; + sum_hash -= num::BigInt::from_bytes_be(num::bigint::Sign::Plus, block_h.to_bytes()); + } + let cur_fit = (clique.fitness, sum_hash); + if cur_fit > max_clique_fitness { + blockclique_i = clique_i; + max_clique_fitness = cur_fit; + } + } + self.max_cliques[blockclique_i].is_blockclique = true; + Ok(blockclique_i) + } + + pub fn list_stale_blocks(&self, fitness_threshold: u64) -> PreHashSet { + // iterate from largest to smallest to minimize reallocations + let mut indices: Vec = (0..self.max_cliques.len()).collect(); + indices.sort_unstable_by_key(|&i| std::cmp::Reverse(self.max_cliques[i].block_ids.len())); + let mut high_set = PreHashSet::::default(); + let mut low_set = PreHashSet::::default(); + for clique_i in indices.into_iter() { + if self.max_cliques[clique_i].fitness >= fitness_threshold { + high_set.extend(&self.max_cliques[clique_i].block_ids); + } else { + low_set.extend(&self.max_cliques[clique_i].block_ids); + } + } + &low_set - &high_set + } + + pub fn remove_block( + &mut self, + add_block_id: &BlockId, + block_id: &BlockId, + ) -> Result<(), ConsensusError> { + if let Some(BlockStatus::Active { + a_block: active_block, + storage: _storage, + }) = self.block_statuses.remove(block_id) + { + self.active_index.remove(block_id); + if active_block.is_final { + return Err(ConsensusError::ContainerInconsistency(format!("inconsistency inside block statuses removing stale blocks adding {} - block {} was already final", add_block_id, block_id))); + } + + // remove from gi_head + if let Some(other_incomps) = self.gi_head.remove(block_id) { + for other_incomp in other_incomps.into_iter() { + if let Some(other_incomp_lst) = self.gi_head.get_mut(&other_incomp) { + other_incomp_lst.remove(block_id); + } + } + } + + // remove from cliques + let stale_block_fitness = active_block.fitness; + self.max_cliques.iter_mut().for_each(|c| { + if c.block_ids.remove(block_id) { + c.fitness -= stale_block_fitness; + } + }); + self.max_cliques.retain(|c| !c.block_ids.is_empty()); // remove empty cliques + if self.max_cliques.is_empty() { + // make sure at least one clique remains + self.max_cliques = vec![Clique { + block_ids: PreHashSet::::default(), + fitness: 0, + is_blockclique: true, + }]; + } + + // remove from parent's children + for (parent_h, _parent_period) in active_block.parents.iter() { + if let Some(BlockStatus::Active { + a_block: parent_active_block, + .. + }) = self.block_statuses.get_mut(parent_h) + { + parent_active_block.children[active_block.slot.thread as usize] + .remove(block_id); + } + } + + massa_trace!("consensus.block_graph.add_block_to_graph.stale", { + "hash": block_id + }); + + // mark as stale + self.new_stale_blocks + .insert(*block_id, (active_block.creator_address, active_block.slot)); + self.block_statuses.insert( + *block_id, + BlockStatus::Discarded { + slot: active_block.slot, + creator: active_block.creator_address, + parents: active_block.parents.iter().map(|(h, _)| *h).collect(), + reason: DiscardReason::Stale, + sequence_number: { + self.sequence_counter += 1; + self.sequence_counter + }, + }, + ); + self.discarded_index.insert(*block_id); + Ok(()) + } else { + return Err(ConsensusError::ContainerInconsistency(format!("inconsistency inside block statuses removing stale blocks adding {} - block {} is missing", add_block_id, block_id))); + } + } + + pub fn list_final_blocks(&self) -> Result, ConsensusError> { + // short-circuiting intersection of cliques from smallest to largest + let mut indices: Vec = (0..self.max_cliques.len()).collect(); + indices.sort_unstable_by_key(|&i| self.max_cliques[i].block_ids.len()); + let mut final_candidates = self.max_cliques[indices[0]].block_ids.clone(); + for i in 1..indices.len() { + final_candidates.retain(|v| self.max_cliques[i].block_ids.contains(v)); + if final_candidates.is_empty() { + break; + } + } + + // restrict search to cliques with high enough fitness, sort cliques by fitness (highest to lowest) + massa_trace!( + "consensus.block_graph.add_block_to_graph.list_final_blocks.restrict", + {} + ); + indices.retain(|&i| self.max_cliques[i].fitness > self.config.delta_f0); + indices.sort_unstable_by_key(|&i| std::cmp::Reverse(self.max_cliques[i].fitness)); + + let mut final_blocks = PreHashSet::::default(); + for clique_i in indices.into_iter() { + massa_trace!( + "consensus.block_graph.add_block_to_graph.list_final_blocks.loop", + { "clique_i": clique_i } + ); + // check in cliques from highest to lowest fitness + if final_candidates.is_empty() { + // no more final candidates + break; + } + let clique = &self.max_cliques[clique_i]; + + // compute the total fitness of all the descendants of the candidate within the clique + let loc_candidates = final_candidates.clone(); + for candidate_h in loc_candidates.into_iter() { + let descendants = match self.block_statuses.get(&candidate_h) { + Some(BlockStatus::Active { + a_block, + storage: _, + }) => &a_block.descendants, + _ => { + return Err(ConsensusError::MissingBlock(format!( + "missing block when computing total fitness of descendants: {}", + candidate_h + ))) + } + }; + let desc_fit: u64 = descendants + .intersection(&clique.block_ids) + .map(|h| { + if let Some(BlockStatus::Active { a_block: ab, .. }) = + self.block_statuses.get(h) + { + return ab.fitness; + } + 0 + }) + .sum(); + if desc_fit > self.config.delta_f0 { + // candidate is final + final_candidates.remove(&candidate_h); + final_blocks.insert(candidate_h); + } + } + } + Ok(final_blocks) + } + + /// Computes max cliques of compatible blocks + pub fn compute_max_cliques(&self) -> Vec> { + let mut max_cliques: Vec> = Vec::new(); + + // algorithm adapted from IK_GPX as summarized in: + // Cazals et al., "A note on the problem of reporting maximal cliques" + // Theoretical Computer Science, 2008 + // https://doi.org/10.1016/j.tcs.2008.05.010 + + // stack: r, p, x + let mut stack: Vec<( + PreHashSet, + PreHashSet, + PreHashSet, + )> = vec![( + PreHashSet::::default(), + self.gi_head.keys().cloned().collect(), + PreHashSet::::default(), + )]; + while let Some((r, mut p, mut x)) = stack.pop() { + if p.is_empty() && x.is_empty() { + max_cliques.push(r); + continue; + } + // choose the pivot vertex following the GPX scheme: + // u_p = node from (p \/ x) that maximizes the cardinality of (P \ Neighbors(u_p, GI)) + let &u_p = p + .union(&x) + .max_by_key(|&u| { + p.difference(&(&self.gi_head[u] | &vec![*u].into_iter().collect())) + .count() + }) + .unwrap(); // p was checked to be non-empty before + + // iterate over u_set = (p /\ Neighbors(u_p, GI)) + let u_set: PreHashSet = + &p & &(&self.gi_head[&u_p] | &vec![u_p].into_iter().collect()); + for u_i in u_set.into_iter() { + p.remove(&u_i); + let u_i_set: PreHashSet = vec![u_i].into_iter().collect(); + let comp_n_u_i: PreHashSet = &self.gi_head[&u_i] | &u_i_set; + stack.push((&r | &u_i_set, &p - &comp_n_u_i, &x - &comp_n_u_i)); + x.insert(u_i); + } + } + if max_cliques.is_empty() { + // make sure at least one clique remains + max_cliques = vec![PreHashSet::::default()]; + } + max_cliques + } + + /// get the clique of higher fitness + pub fn get_blockclique(&self) -> PreHashSet { + self.max_cliques + .iter() + .find(|c| c.is_blockclique) + .expect("blockclique missing") + .block_ids + .clone() + } + + pub fn mark_final_blocks( + &mut self, + add_block_id: &BlockId, + final_blocks: PreHashSet, + ) -> Result<(), ConsensusError> { + for block_id in final_blocks.into_iter() { + // remove from gi_head + if let Some(other_incomps) = self.gi_head.remove(&block_id) { + for other_incomp in other_incomps.into_iter() { + if let Some(other_incomp_lst) = self.gi_head.get_mut(&other_incomp) { + other_incomp_lst.remove(&block_id); + } + } + } + + // mark as final and update latest_final_blocks_periods + if let Some(BlockStatus::Active { + a_block: final_block, + .. + }) = self.block_statuses.get_mut(&block_id) + { + massa_trace!("consensus.block_graph.add_block_to_graph.final", { + "hash": block_id + }); + final_block.is_final = true; + // remove from cliques + let final_block_fitness = final_block.fitness; + self.max_cliques.iter_mut().for_each(|c| { + if c.block_ids.remove(&block_id) { + c.fitness -= final_block_fitness; + } + }); + self.max_cliques.retain(|c| !c.block_ids.is_empty()); // remove empty cliques + if self.max_cliques.is_empty() { + // make sure at least one clique remains + self.max_cliques = vec![Clique { + block_ids: PreHashSet::::default(), + fitness: 0, + is_blockclique: true, + }]; + } + // update latest final blocks + if final_block.slot.period + > self.latest_final_blocks_periods[final_block.slot.thread as usize].1 + { + self.latest_final_blocks_periods[final_block.slot.thread as usize] = + (block_id, final_block.slot.period); + } + // update new final blocks list + self.new_final_blocks.insert(block_id); + } else { + return Err(ConsensusError::ContainerInconsistency(format!("inconsistency inside block statuses updating final blocks adding {} - block {} is missing", add_block_id, block_id))); + } + } + Ok(()) + } +} diff --git a/massa-consensus-worker/src/state/mod.rs b/massa-consensus-worker/src/state/mod.rs new file mode 100644 index 00000000000..8a8fc542205 --- /dev/null +++ b/massa-consensus-worker/src/state/mod.rs @@ -0,0 +1,474 @@ +use std::collections::{HashMap, VecDeque}; + +use massa_consensus_exports::{ + block_graph_export::BlockGraphExport, + block_status::{BlockStatus, ExportCompiledBlock, HeaderOrBlock}, + error::ConsensusError, + ConsensusChannels, ConsensusConfig, +}; +use massa_models::{ + active_block::ActiveBlock, + address::Address, + api::BlockGraphStatus, + block::{BlockId, WrappedHeader}, + clique::Clique, + prehash::{CapacityAllocator, PreHashMap, PreHashSet}, + slot::Slot, +}; +use massa_storage::Storage; +use massa_time::MassaTime; + +mod graph; +mod process; +mod process_commands; +mod prune; +mod stats; +mod tick; +mod verifications; + +#[derive(Clone)] +pub struct ConsensusState { + /// Configuration + pub config: ConsensusConfig, + /// Channels to communicate with other modules + pub channels: ConsensusChannels, + /// Storage + pub storage: Storage, + /// Block ids of genesis blocks + pub genesis_hashes: Vec, + /// Incompatibility graph: maps a block id to the block ids it is incompatible with + /// One entry per Active Block + pub gi_head: PreHashMap>, + /// All the cliques + pub max_cliques: Vec, + /// ids of active blocks + pub active_index: PreHashSet, + /// Save of latest periods + pub save_final_periods: Vec, + /// One (block id, period) per thread + pub latest_final_blocks_periods: Vec<(BlockId, u64)>, + /// One `(block id, period)` per thread TODO not sure I understand the difference with `latest_final_blocks_periods` + pub best_parents: Vec<(BlockId, u64)>, + /// Every block we know about + pub block_statuses: PreHashMap, + /// Ids of incoming blocks/headers + pub incoming_index: PreHashSet, + /// Used to limit the number of waiting and discarded blocks + pub sequence_counter: u64, + /// ids of waiting for slot blocks/headers + pub waiting_for_slot_index: PreHashSet, + /// ids of waiting for dependencies blocks/headers + pub waiting_for_dependencies_index: PreHashSet, + /// ids of discarded blocks + pub discarded_index: PreHashSet, + /// Blocks that need to be propagated + pub to_propagate: PreHashMap, + /// List of block ids we think are attack attempts + pub attack_attempts: Vec, + /// Newly final blocks + pub new_final_blocks: PreHashSet, + /// Newly stale block mapped to creator and slot + pub new_stale_blocks: PreHashMap, + /// time at which the node was launched (used for desynchronization detection) + pub launch_time: MassaTime, + /// Final block stats `(time, creator, is_from_protocol)` + pub final_block_stats: VecDeque<(MassaTime, Address, bool)>, + /// Blocks that come from protocol used for stats and ids are removed when inserted in `final_block_stats` + pub protocol_blocks: VecDeque<(MassaTime, BlockId)>, + /// Stale block timestamp + pub stale_block_stats: VecDeque, + /// the time span considered for stats + pub stats_history_timespan: MassaTime, + /// the time span considered for desynchronization detection + pub stats_desync_detection_timespan: MassaTime, + /// blocks we want + pub wishlist: PreHashMap>, + /// previous blockclique notified to Execution + pub prev_blockclique: PreHashMap, +} + +impl ConsensusState { + /// Get a full active block + pub fn get_full_active_block(&self, block_id: &BlockId) -> Option<(&ActiveBlock, &Storage)> { + match self.block_statuses.get(block_id) { + Some(BlockStatus::Active { a_block, storage }) => Some((a_block.as_ref(), storage)), + _ => None, + } + } + + pub fn get_clique_count(&self) -> usize { + self.max_cliques.len() + } + + /// get the blockclique (or final) block ID at a given slot, if any + pub fn get_blockclique_block_at_slot(&self, slot: &Slot) -> Option { + // List all blocks at this slot. + // The list should be small: make a copy of it to avoid holding the storage lock. + let blocks_at_slot = { + let storage_read = self.storage.read_blocks(); + let returned = match storage_read.get_blocks_by_slot(slot) { + Some(v) => v.clone(), + None => return None, + }; + returned + }; + + // search for the block in the blockclique + let search_in_blockclique = blocks_at_slot + .intersection( + &self + .max_cliques + .iter() + .find(|c| c.is_blockclique) + .expect("expected one clique to be the blockclique") + .block_ids, + ) + .next(); + if let Some(found_id) = search_in_blockclique { + return Some(*found_id); + } + + // block not found in the blockclique: search in the final blocks + blocks_at_slot + .into_iter() + .find(|b_id| match self.block_statuses.get(b_id) { + Some(BlockStatus::Active { a_block, .. }) => a_block.is_final, + _ => false, + }) + } + + /// get the latest blockclique (or final) block ID at a given slot, if any + pub fn get_latest_blockclique_block_at_slot(&self, slot: &Slot) -> BlockId { + let (mut best_block_id, mut best_block_period) = self + .latest_final_blocks_periods + .get(slot.thread as usize) + .unwrap_or_else(|| panic!("unexpected not found latest final block period")); + + self.get_blockclique() + .iter() + .for_each(|id| match self.block_statuses.get(id) { + Some(BlockStatus::Active { + a_block, + storage: _, + }) => { + if a_block.is_final { + panic!( + "unexpected final block on getting latest blockclique block at slot" + ); + } + if a_block.slot.thread == slot.thread + && a_block.slot.period < slot.period + && a_block.slot.period > best_block_period + { + best_block_period = a_block.slot.period; + best_block_id = *id; + } + } + _ => { + panic!("expected to find only active block but found another status") + } + }); + best_block_id + } + + pub fn get_block_status(&self, block_id: &BlockId) -> BlockGraphStatus { + match self.block_statuses.get(block_id) { + None => BlockGraphStatus::NotFound, + Some(BlockStatus::Active { a_block, .. }) => { + if a_block.is_final { + BlockGraphStatus::Final + } else if self + .max_cliques + .iter() + .find(|clique| clique.is_blockclique) + .expect("blockclique absent") + .block_ids + .contains(block_id) + { + BlockGraphStatus::ActiveInBlockclique + } else { + BlockGraphStatus::ActiveInAlternativeCliques + } + } + Some(BlockStatus::Discarded { .. }) => BlockGraphStatus::Discarded, + Some(BlockStatus::Incoming(_)) => BlockGraphStatus::Incoming, + Some(BlockStatus::WaitingForDependencies { .. }) => { + BlockGraphStatus::WaitingForDependencies + } + Some(BlockStatus::WaitingForSlot(_)) => BlockGraphStatus::WaitingForSlot, + } + } + + pub fn list_required_active_blocks(&self) -> Result, ConsensusError> { + // list all active blocks + let mut retain_active: PreHashSet = + PreHashSet::::with_capacity(self.active_index.len()); + + let latest_final_blocks: Vec = self + .latest_final_blocks_periods + .iter() + .map(|(hash, _)| *hash) + .collect(); + + // retain all non-final active blocks, + // the current "best parents", + // and the dependencies for both. + for block_id in self.active_index.iter() { + if let Some(BlockStatus::Active { + a_block: active_block, + .. + }) = self.block_statuses.get(block_id) + { + if !active_block.is_final + || self.best_parents.iter().any(|(b, _p)| b == block_id) + || latest_final_blocks.contains(block_id) + { + retain_active.extend(active_block.parents.iter().map(|(p, _)| *p)); + retain_active.insert(*block_id); + } + } + } + + // retain best parents + retain_active.extend(self.best_parents.iter().map(|(b, _p)| *b)); + + // retain last final blocks + retain_active.extend(self.latest_final_blocks_periods.iter().map(|(h, _)| *h)); + + for (thread, id) in latest_final_blocks.iter().enumerate() { + let mut current_block_id = *id; + while let Some((current_block, _)) = self.get_full_active_block(¤t_block_id) { + let parent_id = { + if !current_block.parents.is_empty() { + Some(current_block.parents[thread].0) + } else { + None + } + }; + + // retain block + retain_active.insert(current_block_id); + + // stop traversing when reaching a block with period number low enough + // so that any of its operations will have their validity period expired at the latest final block in thread + // note: one more is kept because of the way we iterate + if current_block.slot.period + < self.latest_final_blocks_periods[thread] + .1 + .saturating_sub(self.config.operation_validity_periods) + { + break; + } + + // if not genesis, traverse parent + match parent_id { + Some(p_id) => current_block_id = p_id, + None => break, + } + } + } + + // grow with parents & fill thread holes twice + for _ in 0..2 { + // retain the parents of the selected blocks + let retain_clone = retain_active.clone(); + + for retain_h in retain_clone.into_iter() { + retain_active.extend( + self.get_full_active_block(&retain_h) + .ok_or_else(|| ConsensusError::ContainerInconsistency(format!("inconsistency inside block statuses pruning and retaining the parents of the selected blocks - {} is missing", retain_h)))? + .0.parents + .iter() + .map(|(b_id, _p)| *b_id), + ) + } + + // find earliest kept slots in each thread + let mut earliest_retained_periods: Vec = self + .latest_final_blocks_periods + .iter() + .map(|(_, p)| *p) + .collect(); + for retain_h in retain_active.iter() { + let retain_slot = &self + .get_full_active_block(retain_h) + .ok_or_else(|| ConsensusError::ContainerInconsistency(format!("inconsistency inside block statuses pruning and finding earliest kept slots in each thread - {} is missing", retain_h)))? + .0.slot; + earliest_retained_periods[retain_slot.thread as usize] = std::cmp::min( + earliest_retained_periods[retain_slot.thread as usize], + retain_slot.period, + ); + } + + // fill up from the latest final block back to the earliest for each thread + for thread in 0..self.config.thread_count { + let mut cursor = self.latest_final_blocks_periods[thread as usize].0; // hash of tha latest final in that thread + while let Some((c_block, _)) = self.get_full_active_block(&cursor) { + if c_block.slot.period < earliest_retained_periods[thread as usize] { + break; + } + retain_active.insert(cursor); + if c_block.parents.is_empty() { + // genesis + break; + } + cursor = c_block.parents[thread as usize].0; + } + } + } + + Ok(retain_active) + } + + pub fn extract_block_graph_part( + &self, + slot_start: Option, + slot_end: Option, + ) -> Result { + let mut export = BlockGraphExport { + genesis_blocks: self.genesis_hashes.clone(), + active_blocks: PreHashMap::with_capacity(self.block_statuses.len()), + discarded_blocks: PreHashMap::with_capacity(self.block_statuses.len()), + best_parents: self.best_parents.clone(), + latest_final_blocks_periods: self.latest_final_blocks_periods.clone(), + gi_head: self.gi_head.clone(), + max_cliques: self.max_cliques.clone(), + }; + + let filter = |&s| { + if let Some(s_start) = slot_start { + if s < s_start { + return false; + } + } + if let Some(s_end) = slot_end { + if s >= s_end { + return false; + } + } + true + }; + + for (hash, block) in self.block_statuses.iter() { + match block { + BlockStatus::Discarded { + slot, + creator, + parents, + reason, + .. + } => { + if filter(slot) { + export + .discarded_blocks + .insert(*hash, (reason.clone(), (*slot, *creator, parents.clone()))); + } + } + BlockStatus::Active { a_block, storage } => { + if filter(&a_block.slot) { + let stored_block = + storage.read_blocks().get(hash).cloned().ok_or_else(|| { + ConsensusError::MissingBlock(format!( + "missing block in BlockGraphExport::extract_from: {}", + hash + )) + })?; + export.active_blocks.insert( + *hash, + ExportCompiledBlock { + header: stored_block.content.header, + children: a_block + .children + .iter() + .map(|thread| { + thread.keys().copied().collect::>() + }) + .collect(), + is_final: a_block.is_final, + }, + ); + } + } + _ => continue, + } + } + + Ok(export) + } + + /// Gets all stored final blocks, not only the still-useful ones + /// This is used when initializing Execution from Consensus. + /// Since the Execution bootstrap snapshot is older than the Consensus snapshot, + /// we might need to signal older final blocks for Execution to catch up. + pub fn get_all_final_blocks(&self) -> HashMap { + self.active_index + .iter() + .map(|b_id| { + let block_infos = match self.block_statuses.get(b_id) { + Some(BlockStatus::Active { a_block, storage }) => { + (a_block.slot, storage.clone()) + } + _ => panic!("active block missing"), + }; + (*b_id, block_infos) + }) + .collect() + } + + /// get the current block wish list, including the operations hash. + pub fn get_block_wishlist( + &self, + ) -> Result>, ConsensusError> { + let mut wishlist = PreHashMap::>::default(); + for block_id in self.waiting_for_dependencies_index.iter() { + if let Some(BlockStatus::WaitingForDependencies { + unsatisfied_dependencies, + .. + }) = self.block_statuses.get(block_id) + { + for unsatisfied_h in unsatisfied_dependencies.iter() { + match self.block_statuses.get(unsatisfied_h) { + Some(BlockStatus::WaitingForDependencies { + header_or_block: HeaderOrBlock::Header(header), + .. + }) => { + wishlist.insert(header.id, Some(header.clone())); + } + None => { + wishlist.insert(*unsatisfied_h, None); + } + _ => {} + } + } + } + } + + Ok(wishlist) + } + + /// Gets a block and all its descendants + /// + /// # Argument + /// * hash : hash of the given block + pub fn get_active_block_and_descendants( + &self, + block_id: &BlockId, + ) -> Result, ConsensusError> { + let mut to_visit = vec![*block_id]; + let mut result = PreHashSet::::default(); + while let Some(visit_h) = to_visit.pop() { + if !result.insert(visit_h) { + continue; // already visited + } + match self.block_statuses.get(&visit_h) { + Some(BlockStatus::Active { a_block, .. }) => { + a_block.as_ref() + .children.iter() + .for_each(|thread_children| to_visit.extend(thread_children.keys())) + }, + _ => return Err(ConsensusError::ContainerInconsistency(format!("inconsistency inside block statuses iterating through descendants of {} - missing {}", block_id, visit_h))), + } + } + Ok(result) + } +} diff --git a/massa-consensus-worker/src/state/process.rs b/massa-consensus-worker/src/state/process.rs new file mode 100644 index 00000000000..9c38e6d9164 --- /dev/null +++ b/massa-consensus-worker/src/state/process.rs @@ -0,0 +1,872 @@ +use std::{ + collections::{BTreeSet, HashMap, VecDeque}, + mem, +}; + +use massa_consensus_exports::{ + block_status::{BlockStatus, DiscardReason, HeaderOrBlock}, + error::ConsensusError, +}; +use massa_logging::massa_trace; +use massa_models::{ + active_block::ActiveBlock, + address::Address, + block::{BlockId, WrappedHeader}, + clique::Clique, + prehash::{PreHashMap, PreHashSet}, + slot::Slot, +}; +use massa_signature::PublicKey; +use massa_storage::Storage; +use massa_time::MassaTime; +use tracing::log::{debug, info}; + +use crate::state::verifications::HeaderCheckOutcome; + +use super::ConsensusState; + +impl ConsensusState { + /// Acknowledge a set of items recursively and process them + /// + /// # Arguments: + /// * `to_ack`: the set of items to acknowledge and process + /// * `current_slot`: the current slot when this function is called + /// + /// # Returns: + /// Success or error if an error happened during the processing of items + pub fn rec_process( + &mut self, + mut to_ack: BTreeSet<(Slot, BlockId)>, + current_slot: Option, + ) -> Result<(), ConsensusError> { + // order processing by (slot, hash) + while let Some((_slot, hash)) = to_ack.pop_first() { + to_ack.extend(self.process(hash, current_slot)?) + } + Ok(()) + } + + /// Acknowledge a single item, return a set of items to re-ack + /// + /// # Arguments: + /// * `block_id`: the id of the block to acknowledge + /// * `current_slot`: the current slot when this function is called + /// + /// # Returns: + /// A list of items to re-ack and process or an error if the process of an item failed + pub fn process( + &mut self, + block_id: BlockId, + current_slot: Option, + ) -> Result, ConsensusError> { + // list items to reprocess + let mut reprocess = BTreeSet::new(); + + massa_trace!("consensus.block_graph.process", { "block_id": block_id }); + // control all the waiting states and try to get a valid block + let ( + valid_block_creator, + valid_block_slot, + valid_block_parents_hash_period, + valid_block_incomp, + valid_block_inherited_incomp_count, + valid_block_storage, + valid_block_fitness, + ) = match self.block_statuses.get(&block_id) { + None => return Ok(BTreeSet::new()), // disappeared before being processed: do nothing + + // discarded: do nothing + Some(BlockStatus::Discarded { .. }) => { + massa_trace!("consensus.block_graph.process.discarded", { + "block_id": block_id + }); + return Ok(BTreeSet::new()); + } + + // already active: do nothing + Some(BlockStatus::Active { .. }) => { + massa_trace!("consensus.block_graph.process.active", { + "block_id": block_id + }); + return Ok(BTreeSet::new()); + } + + // incoming header + Some(BlockStatus::Incoming(HeaderOrBlock::Header(_))) => { + massa_trace!("consensus.block_graph.process.incoming_header", { + "block_id": block_id + }); + // remove header + let header = if let Some(BlockStatus::Incoming(HeaderOrBlock::Header(header))) = + self.block_statuses.remove(&block_id) + { + self.incoming_index.remove(&block_id); + header + } else { + return Err(ConsensusError::ContainerInconsistency(format!( + "inconsistency inside block statuses removing incoming header {}", + block_id + ))); + }; + match self.check_header(&block_id, &header, current_slot, self)? { + HeaderCheckOutcome::Proceed { .. } => { + // set as waiting dependencies + let mut dependencies = PreHashSet::::default(); + dependencies.insert(block_id); // add self as unsatisfied + self.block_statuses.insert( + block_id, + BlockStatus::WaitingForDependencies { + header_or_block: HeaderOrBlock::Header(header), + unsatisfied_dependencies: dependencies, + sequence_number: { + self.sequence_counter += 1; + self.sequence_counter + }, + }, + ); + self.waiting_for_dependencies_index.insert(block_id); + self.promote_dep_tree(block_id)?; + + massa_trace!( + "consensus.block_graph.process.incoming_header.waiting_for_self", + { "block_id": block_id } + ); + return Ok(BTreeSet::new()); + } + HeaderCheckOutcome::WaitForDependencies(mut dependencies) => { + // set as waiting dependencies + dependencies.insert(block_id); // add self as unsatisfied + massa_trace!("consensus.block_graph.process.incoming_header.waiting_for_dependencies", {"block_id": block_id, "dependencies": dependencies}); + + self.block_statuses.insert( + block_id, + BlockStatus::WaitingForDependencies { + header_or_block: HeaderOrBlock::Header(header), + unsatisfied_dependencies: dependencies, + sequence_number: { + self.sequence_counter += 1; + self.sequence_counter + }, + }, + ); + self.waiting_for_dependencies_index.insert(block_id); + self.promote_dep_tree(block_id)?; + + return Ok(BTreeSet::new()); + } + HeaderCheckOutcome::WaitForSlot => { + // make it wait for slot + self.block_statuses.insert( + block_id, + BlockStatus::WaitingForSlot(HeaderOrBlock::Header(header)), + ); + self.waiting_for_slot_index.insert(block_id); + + massa_trace!( + "consensus.block_graph.process.incoming_header.waiting_for_slot", + { "block_id": block_id } + ); + return Ok(BTreeSet::new()); + } + HeaderCheckOutcome::Discard(reason) => { + self.maybe_note_attack_attempt(&reason, &block_id); + massa_trace!("consensus.block_graph.process.incoming_header.discarded", {"block_id": block_id, "reason": reason}); + // count stales + if reason == DiscardReason::Stale { + self.new_stale_blocks + .insert(block_id, (header.creator_address, header.content.slot)); + } + // discard + self.block_statuses.insert( + block_id, + BlockStatus::Discarded { + slot: header.content.slot, + creator: header.creator_address, + parents: header.content.parents, + reason, + sequence_number: { + self.sequence_counter += 1; + self.sequence_counter + }, + }, + ); + self.discarded_index.insert(block_id); + + return Ok(BTreeSet::new()); + } + } + } + + // incoming block + Some(BlockStatus::Incoming(HeaderOrBlock::Block { id: block_id, .. })) => { + let block_id = *block_id; + massa_trace!("consensus.block_graph.process.incoming_block", { + "block_id": block_id + }); + let (slot, storage) = + if let Some(BlockStatus::Incoming(HeaderOrBlock::Block { + slot, storage, .. + })) = self.block_statuses.remove(&block_id) + { + self.incoming_index.remove(&block_id); + (slot, storage) + } else { + return Err(ConsensusError::ContainerInconsistency(format!( + "inconsistency inside block statuses removing incoming block {}", + block_id + ))); + }; + let stored_block = storage + .read_blocks() + .get(&block_id) + .cloned() + .expect("incoming block not found in storage"); + + match self.check_header( + &block_id, + &stored_block.content.header, + current_slot, + self, + )? { + HeaderCheckOutcome::Proceed { + parents_hash_period, + incompatibilities, + inherited_incompatibilities_count, + fitness, + } => { + // block is valid: remove it from Incoming and return it + massa_trace!("consensus.block_graph.process.incoming_block.valid", { + "block_id": block_id + }); + ( + stored_block.content.header.creator_public_key, + slot, + parents_hash_period, + incompatibilities, + inherited_incompatibilities_count, + storage, + fitness, + ) + } + HeaderCheckOutcome::WaitForDependencies(dependencies) => { + // set as waiting dependencies + self.block_statuses.insert( + block_id, + BlockStatus::WaitingForDependencies { + header_or_block: HeaderOrBlock::Block { + id: block_id, + slot, + storage, + }, + unsatisfied_dependencies: dependencies, + sequence_number: { + self.sequence_counter += 1; + self.sequence_counter + }, + }, + ); + self.waiting_for_dependencies_index.insert(block_id); + self.promote_dep_tree(block_id)?; + massa_trace!( + "consensus.block_graph.process.incoming_block.waiting_for_dependencies", + { "block_id": block_id } + ); + return Ok(BTreeSet::new()); + } + HeaderCheckOutcome::WaitForSlot => { + // set as waiting for slot + self.block_statuses.insert( + block_id, + BlockStatus::WaitingForSlot(HeaderOrBlock::Block { + id: block_id, + slot, + storage, + }), + ); + self.waiting_for_slot_index.insert(block_id); + + massa_trace!( + "consensus.block_graph.process.incoming_block.waiting_for_slot", + { "block_id": block_id } + ); + return Ok(BTreeSet::new()); + } + HeaderCheckOutcome::Discard(reason) => { + self.maybe_note_attack_attempt(&reason, &block_id); + massa_trace!("consensus.block_graph.process.incoming_block.discarded", {"block_id": block_id, "reason": reason}); + // count stales + if reason == DiscardReason::Stale { + self.new_stale_blocks.insert( + block_id, + ( + stored_block.content.header.creator_address, + stored_block.content.header.content.slot, + ), + ); + } + // add to discard + self.block_statuses.insert( + block_id, + BlockStatus::Discarded { + slot: stored_block.content.header.content.slot, + creator: stored_block.creator_address, + parents: stored_block.content.header.content.parents.clone(), + reason, + sequence_number: { + self.sequence_counter += 1; + self.sequence_counter + }, + }, + ); + self.discarded_index.insert(block_id); + + return Ok(BTreeSet::new()); + } + } + } + + Some(BlockStatus::WaitingForSlot(header_or_block)) => { + massa_trace!("consensus.block_graph.process.waiting_for_slot", { + "block_id": block_id + }); + let slot = header_or_block.get_slot(); + if Some(slot) > current_slot { + massa_trace!( + "consensus.block_graph.process.waiting_for_slot.in_the_future", + { "block_id": block_id } + ); + // in the future: ignore + return Ok(BTreeSet::new()); + } + // send back as incoming and ask for reprocess + if let Some(BlockStatus::WaitingForSlot(header_or_block)) = + self.block_statuses.remove(&block_id) + { + self.waiting_for_slot_index.remove(&block_id); + self.block_statuses + .insert(block_id, BlockStatus::Incoming(header_or_block)); + self.incoming_index.insert(block_id); + reprocess.insert((slot, block_id)); + massa_trace!( + "consensus.block_graph.process.waiting_for_slot.reprocess", + { "block_id": block_id } + ); + return Ok(reprocess); + } else { + return Err(ConsensusError::ContainerInconsistency(format!("inconsistency inside block statuses removing waiting for slot block or header {}", block_id))); + }; + } + + Some(BlockStatus::WaitingForDependencies { + unsatisfied_dependencies, + .. + }) => { + massa_trace!("consensus.block_graph.process.waiting_for_dependencies", { + "block_id": block_id + }); + if !unsatisfied_dependencies.is_empty() { + // still has unsatisfied dependencies: ignore + return Ok(BTreeSet::new()); + } + // send back as incoming and ask for reprocess + if let Some(BlockStatus::WaitingForDependencies { + header_or_block, .. + }) = self.block_statuses.remove(&block_id) + { + self.waiting_for_dependencies_index.remove(&block_id); + reprocess.insert((header_or_block.get_slot(), block_id)); + self.block_statuses + .insert(block_id, BlockStatus::Incoming(header_or_block)); + self.incoming_index.insert(block_id); + massa_trace!( + "consensus.block_graph.process.waiting_for_dependencies.reprocess", + { "block_id": block_id } + ); + return Ok(reprocess); + } else { + return Err(ConsensusError::ContainerInconsistency(format!("inconsistency inside block statuses removing waiting for slot header or block {}", block_id))); + } + } + }; + + // add block to graph + self.add_block_to_graph( + block_id, + valid_block_parents_hash_period, + valid_block_creator, + valid_block_slot, + valid_block_incomp, + valid_block_inherited_incomp_count, + valid_block_fitness, + valid_block_storage, + )?; + + // if the block was added, update linked dependencies and mark satisfied ones for recheck + if let Some(BlockStatus::Active { storage, .. }) = self.block_statuses.get(&block_id) { + massa_trace!("consensus.block_graph.process.is_active", { + "block_id": block_id + }); + self.to_propagate.insert(block_id, storage.clone()); + for itm_block_id in self.waiting_for_dependencies_index.iter() { + if let Some(BlockStatus::WaitingForDependencies { + header_or_block, + unsatisfied_dependencies, + .. + }) = self.block_statuses.get_mut(itm_block_id) + { + if unsatisfied_dependencies.remove(&block_id) { + // a dependency was satisfied: retry + reprocess.insert((header_or_block.get_slot(), *itm_block_id)); + } + } + } + } + + Ok(reprocess) + } + + pub fn promote_dep_tree(&mut self, hash: BlockId) -> Result<(), ConsensusError> { + let mut to_explore = vec![hash]; + let mut to_promote: PreHashMap = PreHashMap::default(); + while let Some(h) = to_explore.pop() { + if to_promote.contains_key(&h) { + continue; + } + if let Some(BlockStatus::WaitingForDependencies { + header_or_block, + unsatisfied_dependencies, + sequence_number, + .. + }) = self.block_statuses.get(&h) + { + // promote current block + to_promote.insert(h, (header_or_block.get_slot(), *sequence_number)); + // register dependencies for exploration + to_explore.extend(unsatisfied_dependencies); + } + } + + let mut to_promote: Vec<(Slot, u64, BlockId)> = to_promote + .into_iter() + .map(|(h, (slot, seq))| (slot, seq, h)) + .collect(); + to_promote.sort_unstable(); // last ones should have the highest seq number + for (_slot, _seq, h) in to_promote.into_iter() { + if let Some(BlockStatus::WaitingForDependencies { + sequence_number, .. + }) = self.block_statuses.get_mut(&h) + { + self.sequence_counter += 1; + *sequence_number = self.sequence_counter; + } + } + Ok(()) + } + + /// Add a block to the graph and update the cliques, the graph dependencies and incompatibilities + /// + /// # Arguments: + /// * `add_block_id`: Block id of the block to add + /// * `parents_hash_period`: Ids and periods of the parents of the block to add + /// * `add_block_creator`: Creator of the block to add + /// * `add_block_slot`: Slot of the block to add + /// * `incomp`: Block ids of the blocks incompatible with the block to add + /// * `fitness`: Fitness of the block to add + /// * `storage`: Storage containing all the data of the block to add + /// + /// # Returns: + /// Success or error if any steps failed + #[allow(clippy::too_many_arguments)] + fn add_block_to_graph( + &mut self, + add_block_id: BlockId, + parents_hash_period: Vec<(BlockId, u64)>, + add_block_creator: PublicKey, + add_block_slot: Slot, + incomp: PreHashSet, + inherited_incomp_count: usize, + fitness: u64, + mut storage: Storage, + ) -> Result<(), ConsensusError> { + massa_trace!("consensus.block_graph.add_block_to_graph", { + "block_id": add_block_id + }); + + // Ensure block parents are claimed by the block's storage. + // Note that operations and endorsements should already be there (claimed in Protocol). + storage.claim_block_refs(&parents_hash_period.iter().map(|(p_id, _)| *p_id).collect()); + + // add block to status structure + self.block_statuses.insert( + add_block_id, + BlockStatus::Active { + a_block: Box::new(ActiveBlock { + creator_address: Address::from_public_key(&add_block_creator), + parents: parents_hash_period.clone(), + descendants: PreHashSet::::default(), + block_id: add_block_id, + children: vec![Default::default(); self.config.thread_count as usize], + is_final: false, + slot: add_block_slot, + fitness, + }), + storage, + }, + ); + self.active_index.insert(add_block_id); + + // add as child to parents + // add as descendant to ancestors. Note: descendants are never removed. + self.insert_parents_descendants( + add_block_id, + add_block_slot, + parents_hash_period.iter().map(|(p_id, _)| *p_id).collect(), + ); + + // add incompatibilities to gi_head + massa_trace!( + "consensus.block_graph.add_block_to_graph.add_incompatibilities", + {} + ); + for incomp_h in incomp.iter() { + self.gi_head + .get_mut(incomp_h) + .ok_or_else(|| { + ConsensusError::MissingBlock(format!( + "missing block when adding incomp to gi_head: {}", + incomp_h + )) + })? + .insert(add_block_id); + } + self.gi_head.insert(add_block_id, incomp.clone()); + + // max cliques update + massa_trace!( + "consensus.block_graph.add_block_to_graph.max_cliques_update", + {} + ); + if incomp.len() == inherited_incomp_count { + // clique optimization routine: + // the block only has incompatibilities inherited from its parents + // therefore it is not forking and can simply be added to the cliques it is compatible with + self.max_cliques + .iter_mut() + .filter(|c| incomp.is_disjoint(&c.block_ids)) + .for_each(|c| { + c.block_ids.insert(add_block_id); + }); + } else { + // fully recompute max cliques + massa_trace!( + "consensus.block_graph.add_block_to_graph.clique_full_computing", + { "hash": add_block_id } + ); + let before = self.max_cliques.len(); + self.max_cliques = self + .compute_max_cliques() + .into_iter() + .map(|c| Clique { + block_ids: c, + fitness: 0, + is_blockclique: false, + }) + .collect(); + let after = self.max_cliques.len(); + if before != after { + massa_trace!( + "consensus.block_graph.add_block_to_graph.clique_full_computing more than one clique", + { "cliques": self.max_cliques, "gi_head": self.gi_head } + ); + // gi_head + debug!( + "clique number went from {} to {} after adding {}", + before, after, add_block_id + ); + } + } + + // compute clique fitnesses and find blockclique + massa_trace!("consensus.block_graph.add_block_to_graph.compute_clique_fitnesses_and_find_blockclique", {}); + // note: clique_fitnesses is pair (fitness, -hash_sum) where the second parameter is negative for sorting + let position_blockclique = self.compute_fitness_find_blockclique(&add_block_id)?; + + // update best parents + massa_trace!( + "consensus.block_graph.add_block_to_graph.update_best_parents", + {} + ); + { + let blockclique = &self.max_cliques[position_blockclique]; + + // init best parents as latest_final_blocks_periods + self.best_parents = self.latest_final_blocks_periods.clone(); + // for each blockclique block, set it as best_parent in its own thread + // if its period is higher than the current best_parent in that thread + for block_h in blockclique.block_ids.iter() { + let b_slot = match self.block_statuses.get(block_h) { + Some(BlockStatus::Active { a_block, storage: _ }) => a_block.slot, + _ => return Err(ConsensusError::ContainerInconsistency(format!("inconsistency inside block statuses updating best parents while adding {} - missing {}", add_block_id, block_h))), + }; + if b_slot.period > self.best_parents[b_slot.thread as usize].1 { + self.best_parents[b_slot.thread as usize] = (*block_h, b_slot.period); + } + } + } + + // list stale blocks + massa_trace!( + "consensus.block_graph.add_block_to_graph.list_stale_blocks", + {} + ); + let fitness_threshold = self.max_cliques[position_blockclique] + .fitness + .saturating_sub(self.config.delta_f0); + let stale_blocks = self.list_stale_blocks(fitness_threshold); + self.max_cliques.retain(|c| c.fitness >= fitness_threshold); + // mark stale blocks + massa_trace!( + "consensus.block_graph.add_block_to_graph.mark_stale_blocks", + {} + ); + for stale_block_hash in stale_blocks.into_iter() { + self.remove_block(&add_block_id, &stale_block_hash)?; + } + + // list final blocks + massa_trace!( + "consensus.block_graph.add_block_to_graph.list_final_blocks", + {} + ); + let final_blocks = self.list_final_blocks()?; + + // mark final blocks and update latest_final_blocks_periods + massa_trace!( + "consensus.block_graph.add_block_to_graph.mark_final_blocks", + {} + ); + self.mark_final_blocks(&add_block_id, final_blocks)?; + + massa_trace!("consensus.block_graph.add_block_to_graph.end", {}); + Ok(()) + } + + /// Note an attack attempt if the discard reason indicates one. + pub fn maybe_note_attack_attempt(&mut self, reason: &DiscardReason, hash: &BlockId) { + massa_trace!("consensus.block_graph.maybe_note_attack_attempt", {"hash": hash, "reason": reason}); + // If invalid, note the attack attempt. + if let DiscardReason::Invalid(reason) = reason { + info!( + "consensus.block_graph.maybe_note_attack_attempt DiscardReason::Invalid:{}", + reason + ); + self.attack_attempts.push(*hash); + } + } + + /// Notify execution about blockclique changes and finalized blocks. + /// + /// # Arguments: + /// * `finalized_blocks`: Block that became final and need to be send to execution + fn notify_execution(&mut self, finalized_blocks: HashMap) { + // List new block storage instances that Execution doesn't know about. + // That's blocks that have not been sent to execution before, ie. in the previous blockclique). + let mut new_blocks_storage: PreHashMap = finalized_blocks + .iter() + .filter_map(|(_slot, b_id)| { + if self.prev_blockclique.contains_key(b_id) { + // was previously sent as a blockclique element + return None; + } + let storage = match self.block_statuses.get(b_id) { + Some(BlockStatus::Active { + a_block: _, + storage, + }) => storage, + _ => panic!("final block not found in active blocks"), + }; + Some((*b_id, storage.clone())) + }) + .collect(); + + // Get new blockclique block list with slots. + let mut blockclique_changed = false; + let new_blockclique: PreHashMap = self + .get_blockclique() + .iter() + .map(|b_id| { + if let Some(slot) = self.prev_blockclique.remove(b_id) { + // The block was already sent in the previous blockclique: + // the slot can be gathered from there without locking Storage. + // Note: the block is removed from self.prev_blockclique. + (*b_id, slot) + } else { + // The block was not present in the previous blockclique: + // the blockclique has changed => get the block's slot by querying Storage. + blockclique_changed = true; + let (slot, storage) = match self.block_statuses.get(b_id) { + Some(BlockStatus::Active { a_block, storage }) => (a_block.slot, storage), + _ => panic!("blockclique block not found in active blocks"), + }; + new_blocks_storage.insert(*b_id, storage.clone()); + (*b_id, slot) + } + }) + .collect(); + if !self.prev_blockclique.is_empty() { + // All elements present in the new blockclique have been removed from `prev_blockclique` above. + // If `prev_blockclique` is not empty here, it means that it contained elements that are not in the new blockclique anymore. + // In that case, we mark the blockclique as having changed. + blockclique_changed = true; + } + // Overwrite previous blockclique. + // Should still be done even if unchanged because elements were removed from it above. + self.prev_blockclique = new_blockclique.clone(); + + if finalized_blocks.is_empty() && !blockclique_changed { + // There are no changes (neither block finalizations not blockclique changes) to send to execution. + return; + } + + // Notify execution of block finalizations and blockclique changes + self.channels + .execution_controller + .update_blockclique_status( + finalized_blocks, + if blockclique_changed { + Some(new_blockclique.into_iter().map(|(k, v)| (v, k)).collect()) + } else { + None + }, + new_blocks_storage, + ); + } + + /// call me if the block database changed + /// Processing of final blocks, pruning. + /// + /// 1. propagate blocks + /// 2. Notify of attack attempts + /// 3. get new final blocks + /// 4. get blockclique + /// 5. notify Execution + /// 6. Process new final blocks + /// 7. Notify pool of new final ops + /// 8. Notify PoS of final blocks + /// 9. notify protocol of block wish list + /// 10. note new latest final periods (prune graph if changed) + /// 11. add stale blocks to stats + pub fn block_db_changed(&mut self) -> Result<(), ConsensusError> { + let final_block_slots = { + massa_trace!("consensus.consensus_worker.block_db_changed", {}); + + // Propagate new blocks + for (block_id, storage) in mem::take(&mut self.to_propagate).into_iter() { + massa_trace!("consensus.consensus_worker.block_db_changed.integrated", { + "block_id": block_id + }); + self.channels + .protocol_command_sender + .integrated_block(block_id, storage)?; + } + + // Notify protocol of attack attempts. + for hash in mem::take(&mut self.attack_attempts).into_iter() { + self.channels + .protocol_command_sender + .notify_block_attack(hash)?; + massa_trace!("consensus.consensus_worker.block_db_changed.attack", { + "hash": hash + }); + } + + // manage finalized blocks + let timestamp = MassaTime::now(self.config.clock_compensation_millis)?; + let finalized_blocks = mem::take(&mut self.new_final_blocks); + let mut final_block_slots = HashMap::with_capacity(finalized_blocks.len()); + let mut final_block_stats = VecDeque::with_capacity(finalized_blocks.len()); + for b_id in finalized_blocks { + if let Some(BlockStatus::Active { + a_block, + storage: _, + }) = self.block_statuses.get(&b_id) + { + // add to final blocks to notify execution + final_block_slots.insert(a_block.slot, b_id); + + // add to stats + let block_is_from_protocol = self + .protocol_blocks + .iter() + .any(|(_, block_id)| block_id == &b_id); + final_block_stats.push_back(( + timestamp, + a_block.creator_address, + block_is_from_protocol, + )); + } + } + self.final_block_stats.extend(final_block_stats); + + // add stale blocks to stats + let new_stale_block_ids_creators_slots = mem::take(&mut self.new_stale_blocks); + let timestamp = MassaTime::now(self.config.clock_compensation_millis)?; + for (_b_id, (_b_creator, _b_slot)) in new_stale_block_ids_creators_slots.into_iter() { + self.stale_block_stats.push_back(timestamp); + } + final_block_slots + }; + + // notify execution + self.notify_execution(final_block_slots); + + // notify protocol of block wishlist + let new_wishlist = self.get_block_wishlist()?; + let new_blocks: PreHashMap> = new_wishlist + .iter() + .filter_map(|(id, header)| { + if !self.wishlist.contains_key(id) { + Some((*id, header.clone())) + } else { + None + } + }) + .collect(); + let remove_blocks: PreHashSet = self + .wishlist + .iter() + .filter_map(|(id, _)| { + if !new_wishlist.contains_key(id) { + Some(*id) + } else { + None + } + }) + .collect(); + if !new_blocks.is_empty() || !remove_blocks.is_empty() { + massa_trace!("consensus.consensus_worker.block_db_changed.send_wishlist_delta", { "new": new_wishlist, "remove": remove_blocks }); + self.channels + .protocol_command_sender + .send_wishlist_delta(new_blocks, remove_blocks)?; + self.wishlist = new_wishlist; + } + + // note new latest final periods + let latest_final_periods: Vec = self + .latest_final_blocks_periods + .iter() + .map(|(_block_id, period)| *period) + .collect(); + // if changed... + if self.save_final_periods != latest_final_periods { + // signal new last final periods to pool + self.channels + .pool_command_sender + .notify_final_cs_periods(&latest_final_periods); + // update final periods + self.save_final_periods = latest_final_periods; + } + + Ok(()) + } +} diff --git a/massa-consensus-worker/src/state/process_commands.rs b/massa-consensus-worker/src/state/process_commands.rs new file mode 100644 index 00000000000..fd923648f10 --- /dev/null +++ b/massa-consensus-worker/src/state/process_commands.rs @@ -0,0 +1,188 @@ +use std::collections::{hash_map::Entry, BTreeSet}; + +use massa_consensus_exports::{ + block_status::{BlockStatus, DiscardReason, HeaderOrBlock}, + error::ConsensusError, +}; +use massa_logging::massa_trace; +use massa_models::{ + block::{BlockId, WrappedHeader}, + slot::Slot, +}; +use massa_storage::Storage; +use massa_time::MassaTime; +use tracing::debug; + +use super::ConsensusState; + +impl ConsensusState { + /// Register a block header in the graph. Ignore genesis hashes. + /// + /// # Arguments: + /// * `block_id`: the block id + /// * `header`: the header to register + /// * `current_slot`: the slot when this function is called + /// + /// # Returns: + /// Success or error if the header is invalid or too old + pub fn register_block_header( + &mut self, + block_id: BlockId, + header: WrappedHeader, + current_slot: Option, + ) -> Result<(), ConsensusError> { + // ignore genesis blocks + if self.genesis_hashes.contains(&block_id) { + return Ok(()); + } + + debug!( + "received header {} for slot {}", + block_id, header.content.slot + ); + massa_trace!("consensus.block_graph.incoming_header", {"block_id": block_id, "header": header}); + let mut to_ack: BTreeSet<(Slot, BlockId)> = BTreeSet::new(); + match self.block_statuses.entry(block_id) { + // if absent => add as Incoming, call rec_ack on it + Entry::Vacant(vac) => { + to_ack.insert((header.content.slot, block_id)); + vac.insert(BlockStatus::Incoming(HeaderOrBlock::Header(header))); + self.incoming_index.insert(block_id); + } + Entry::Occupied(mut occ) => match occ.get_mut() { + BlockStatus::Discarded { + sequence_number, .. + } => { + // promote if discarded + self.sequence_counter += 1; + *sequence_number = self.sequence_counter; + } + BlockStatus::WaitingForDependencies { .. } => { + // promote in dependencies + self.promote_dep_tree(block_id)?; + } + _ => {} + }, + } + + // process + self.rec_process(to_ack, current_slot)?; + + Ok(()) + } + + /// Register a new full block in the graph. Ignore genesis hashes. + /// + /// # Arguments: + /// * `block_id`: the block id + /// * `slot`: the slot of the block + /// * `current_slot`: the slot when this function is called + /// * `storage`: Storage containing the whole content of the block + /// * `created`: is the block created by the node or received from the network + /// + /// # Returns: + /// Success or error if the block is invalid or too old + pub fn register_block( + &mut self, + block_id: BlockId, + slot: Slot, + current_slot: Option, + storage: Storage, + created: bool, + ) -> Result<(), ConsensusError> { + // ignore genesis blocks + if self.genesis_hashes.contains(&block_id) { + return Ok(()); + } + + // Block is coming from protocol mark it for desync calculation + if !created { + let now = MassaTime::now(self.config.clock_compensation_millis)?; + self.protocol_blocks.push_back((now, block_id)); + } + + debug!("received block {} for slot {}", block_id, slot); + + let mut to_ack: BTreeSet<(Slot, BlockId)> = BTreeSet::new(); + match self.block_statuses.entry(block_id) { + // if absent => add as Incoming, call rec_ack on it + Entry::Vacant(vac) => { + to_ack.insert((slot, block_id)); + vac.insert(BlockStatus::Incoming(HeaderOrBlock::Block { + id: block_id, + slot, + storage, + })); + self.incoming_index.insert(block_id); + } + Entry::Occupied(mut occ) => match occ.get_mut() { + BlockStatus::Discarded { + sequence_number, .. + } => { + // promote if discarded + self.sequence_counter += 1; + *sequence_number = self.sequence_counter; + } + BlockStatus::WaitingForSlot(header_or_block) => { + // promote to full block + *header_or_block = HeaderOrBlock::Block { + id: block_id, + slot, + storage, + }; + } + BlockStatus::WaitingForDependencies { + header_or_block, + unsatisfied_dependencies, + .. + } => { + // promote to full block and satisfy self-dependency + if unsatisfied_dependencies.remove(&block_id) { + // a dependency was satisfied: process + to_ack.insert((slot, block_id)); + } + *header_or_block = HeaderOrBlock::Block { + id: block_id, + slot, + storage, + }; + // promote in dependencies + self.promote_dep_tree(block_id)?; + } + _ => return Ok(()), + }, + } + + // process + self.rec_process(to_ack, current_slot)?; + + Ok(()) + } + + /// Mark a block that is in the graph as invalid. + /// + /// # Arguments: + /// * `block_id`: Block id of the block to mark as invalid + /// * `header`: Header of the block to mark as invalid + pub fn mark_invalid_block(&mut self, block_id: &BlockId, header: WrappedHeader) { + let reason = DiscardReason::Invalid("invalid".to_string()); + self.maybe_note_attack_attempt(&reason, block_id); + massa_trace!("consensus.block_graph.process.invalid_block", {"block_id": block_id, "reason": reason}); + + // add to discard + self.block_statuses.insert( + *block_id, + BlockStatus::Discarded { + slot: header.content.slot, + creator: header.creator_address, + parents: header.content.parents, + reason, + sequence_number: { + self.sequence_counter += 1; + self.sequence_counter + }, + }, + ); + self.discarded_index.insert(*block_id); + } +} diff --git a/massa-consensus-worker/src/state/prune.rs b/massa-consensus-worker/src/state/prune.rs new file mode 100644 index 00000000000..f7d5711a717 --- /dev/null +++ b/massa-consensus-worker/src/state/prune.rs @@ -0,0 +1,348 @@ +use massa_consensus_exports::{ + block_status::{BlockStatus, DiscardReason, HeaderOrBlock}, + error::ConsensusError, +}; +use massa_logging::massa_trace; +use massa_models::{active_block::ActiveBlock, block::BlockId, prehash::PreHashMap, slot::Slot}; +use tracing::debug; + +use super::ConsensusState; + +impl ConsensusState { + /// prune active blocks and return final blocks, return discarded final blocks + fn prune_active(&mut self) -> Result, ConsensusError> { + // list required active blocks + let mut retain_active = self.list_required_active_blocks()?; + + // retain extra history according to the config + // this is useful to avoid desync on temporary connection loss + for a_block in self.active_index.iter() { + if let Some(BlockStatus::Active { + a_block: active_block, + .. + }) = self.block_statuses.get(a_block) + { + let (_b_id, latest_final_period) = + self.latest_final_blocks_periods[active_block.slot.thread as usize]; + if active_block.slot.period + >= latest_final_period.saturating_sub(self.config.force_keep_final_periods) + { + retain_active.insert(*a_block); + } + } + } + + // remove unused final active blocks + let mut discarded_finals: PreHashMap = PreHashMap::default(); + let to_remove: Vec = self + .active_index + .difference(&retain_active) + .copied() + .collect(); + for discard_active_h in to_remove { + let block_slot; + let block_creator; + let block_parents; + { + let read_blocks = self.storage.read_blocks(); + let block = read_blocks.get(&discard_active_h).ok_or_else(|| { + ConsensusError::MissingBlock(format!( + "missing block when removing unused final active blocks: {}", + discard_active_h + )) + })?; + block_slot = block.content.header.content.slot; + block_creator = block.creator_address; + block_parents = block.content.header.content.parents.clone(); + }; + + let discarded_active = if let Some(BlockStatus::Active { + a_block: discarded_active, + .. + }) = self.block_statuses.remove(&discard_active_h) + { + self.active_index.remove(&discard_active_h); + discarded_active + } else { + return Err(ConsensusError::ContainerInconsistency(format!("inconsistency inside block statuses pruning and removing unused final active blocks - {} is missing", discard_active_h))); + }; + + // remove from parent's children + for (parent_h, _parent_period) in discarded_active.parents.iter() { + if let Some(BlockStatus::Active { + a_block: parent_active_block, + .. + }) = self.block_statuses.get_mut(parent_h) + { + parent_active_block.children[discarded_active.slot.thread as usize] + .remove(&discard_active_h); + } + } + + massa_trace!("consensus.block_graph.prune_active", {"hash": discard_active_h, "reason": DiscardReason::Final}); + + // mark as final + self.block_statuses.insert( + discard_active_h, + BlockStatus::Discarded { + slot: block_slot, + creator: block_creator, + parents: block_parents, + reason: DiscardReason::Final, + sequence_number: { + self.sequence_counter += 1; + self.sequence_counter + }, + }, + ); + self.discarded_index.insert(discard_active_h); + + discarded_finals.insert(discard_active_h, *discarded_active); + } + + Ok(discarded_finals) + } + + fn prune_slot_waiting(&mut self) { + if self.waiting_for_slot_index.len() <= self.config.max_future_processing_blocks { + return; + } + let mut slot_waiting: Vec<(Slot, BlockId)> = self + .waiting_for_slot_index + .iter() + .filter_map(|block_id| { + if let Some(BlockStatus::WaitingForSlot(header_or_block)) = + self.block_statuses.get(block_id) + { + return Some((header_or_block.get_slot(), *block_id)); + } + None + }) + .collect(); + slot_waiting.sort_unstable(); + let len_slot_waiting = slot_waiting.len(); + (self.config.max_future_processing_blocks..len_slot_waiting).for_each(|idx| { + let (_slot, block_id) = &slot_waiting[idx]; + self.block_statuses.remove(block_id); + self.waiting_for_slot_index.remove(block_id); + }); + } + + fn prune_discarded(&mut self) -> Result<(), ConsensusError> { + if self.discarded_index.len() <= self.config.max_discarded_blocks { + return Ok(()); + } + let mut discard_hashes: Vec<(u64, BlockId)> = self + .discarded_index + .iter() + .filter_map(|block_id| { + if let Some(BlockStatus::Discarded { + sequence_number, .. + }) = self.block_statuses.get(block_id) + { + return Some((*sequence_number, *block_id)); + } + None + }) + .collect(); + discard_hashes.sort_unstable(); + discard_hashes.truncate(self.discarded_index.len() - self.config.max_discarded_blocks); + for (_, block_id) in discard_hashes.iter() { + self.block_statuses.remove(block_id); + self.discarded_index.remove(block_id); + } + Ok(()) + } + + fn prune_waiting_for_dependencies(&mut self) -> Result<(), ConsensusError> { + let mut to_discard: PreHashMap> = PreHashMap::default(); + let mut to_keep: PreHashMap = PreHashMap::default(); + + // list items that are older than the latest final blocks in their threads or have deps that are discarded + { + for block_id in self.waiting_for_dependencies_index.iter() { + if let Some(BlockStatus::WaitingForDependencies { + header_or_block, + unsatisfied_dependencies, + sequence_number, + }) = self.block_statuses.get(block_id) + { + // has already discarded dependencies => discard (choose worst reason) + let mut discard_reason = None; + let mut discarded_dep_found = false; + for dep in unsatisfied_dependencies.iter() { + if let Some(BlockStatus::Discarded { reason, .. }) = + self.block_statuses.get(dep) + { + discarded_dep_found = true; + match reason { + DiscardReason::Invalid(reason) => { + discard_reason = Some(DiscardReason::Invalid(format!("discarded because depend on block:{} that has discard reason:{}", block_id, reason))); + break; + } + DiscardReason::Stale => discard_reason = Some(DiscardReason::Stale), + DiscardReason::Final => discard_reason = Some(DiscardReason::Stale), + } + } + } + if discarded_dep_found { + to_discard.insert(*block_id, discard_reason); + continue; + } + + // is at least as old as the latest final block in its thread => discard as stale + let slot = header_or_block.get_slot(); + if slot.period <= self.latest_final_blocks_periods[slot.thread as usize].1 { + to_discard.insert(*block_id, Some(DiscardReason::Stale)); + continue; + } + + // otherwise, mark as to_keep + to_keep.insert(*block_id, (*sequence_number, header_or_block.get_slot())); + } + } + } + + // discard in chain and because of limited size + while !to_keep.is_empty() { + // mark entries as to_discard and remove them from to_keep + for (hash, _old_order) in to_keep.clone().into_iter() { + if let Some(BlockStatus::WaitingForDependencies { + unsatisfied_dependencies, + .. + }) = self.block_statuses.get(&hash) + { + // has dependencies that will be discarded => discard (choose worst reason) + let mut discard_reason = None; + let mut dep_to_discard_found = false; + for dep in unsatisfied_dependencies.iter() { + if let Some(reason) = to_discard.get(dep) { + dep_to_discard_found = true; + match reason { + Some(DiscardReason::Invalid(reason)) => { + discard_reason = Some(DiscardReason::Invalid(format!("discarded because depend on block:{} that has discard reason:{}", hash, reason))); + break; + } + Some(DiscardReason::Stale) => { + discard_reason = Some(DiscardReason::Stale) + } + Some(DiscardReason::Final) => { + discard_reason = Some(DiscardReason::Stale) + } + None => {} // leave as None + } + } + } + if dep_to_discard_found { + to_keep.remove(&hash); + to_discard.insert(hash, discard_reason); + continue; + } + } + } + + // remove worst excess element + if to_keep.len() > self.config.max_dependency_blocks { + let remove_elt = to_keep + .iter() + .filter_map(|(hash, _old_order)| { + if let Some(BlockStatus::WaitingForDependencies { + header_or_block, + sequence_number, + .. + }) = self.block_statuses.get(hash) + { + return Some((sequence_number, header_or_block.get_slot(), *hash)); + } + None + }) + .min(); + if let Some((_seq_num, _slot, hash)) = remove_elt { + to_keep.remove(&hash); + to_discard.insert(hash, None); + continue; + } + } + + // nothing happened: stop loop + break; + } + + // transition states to Discarded if there is a reason, otherwise just drop + for (block_id, reason_opt) in to_discard.drain() { + if let Some(BlockStatus::WaitingForDependencies { + header_or_block, .. + }) = self.block_statuses.remove(&block_id) + { + self.waiting_for_dependencies_index.remove(&block_id); + let header = match header_or_block { + HeaderOrBlock::Header(h) => h, + HeaderOrBlock::Block { id: block_id, .. } => self + .storage + .read_blocks() + .get(&block_id) + .ok_or_else(|| { + ConsensusError::MissingBlock(format!( + "missing block when pruning waiting for deps: {}", + block_id + )) + })? + .content + .header + .clone(), + }; + massa_trace!("consensus.block_graph.prune_waiting_for_dependencies", {"hash": block_id, "reason": reason_opt}); + + if let Some(reason) = reason_opt { + // add to stats if reason is Stale + if reason == DiscardReason::Stale { + self.new_stale_blocks + .insert(block_id, (header.creator_address, header.content.slot)); + } + // transition to Discarded only if there is a reason + self.block_statuses.insert( + block_id, + BlockStatus::Discarded { + slot: header.content.slot, + creator: header.creator_address, + parents: header.content.parents.clone(), + reason, + sequence_number: { + self.sequence_counter += 1; + self.sequence_counter + }, + }, + ); + self.discarded_index.insert(block_id); + } + } + } + + Ok(()) + } + + pub fn prune(&mut self) -> Result<(), ConsensusError> { + let before = self.max_cliques.len(); + // Step 1: discard final blocks that are not useful to the graph anymore and return them + self.prune_active()?; + + // Step 2: prune slot waiting blocks + self.prune_slot_waiting(); + + // Step 3: prune dependency waiting blocks + self.prune_waiting_for_dependencies()?; + + // Step 4: prune discarded + self.prune_discarded()?; + + let after = self.max_cliques.len(); + if before != after { + debug!( + "clique number went from {} to {} after pruning", + before, after + ); + } + + Ok(()) + } +} diff --git a/massa-consensus-worker/src/state/stats.rs b/massa-consensus-worker/src/state/stats.rs new file mode 100644 index 00000000000..4ab766a4ca4 --- /dev/null +++ b/massa-consensus-worker/src/state/stats.rs @@ -0,0 +1,101 @@ +use super::ConsensusState; +use massa_consensus_exports::error::ConsensusError; +use massa_models::stats::ConsensusStats; +use massa_time::MassaTime; +use std::cmp::max; + +#[cfg(not(feature = "sandbox"))] +use tracing::log::warn; + +#[cfg(not(feature = "sandbox"))] +use massa_consensus_exports::events::ConsensusEvent; + +impl ConsensusState { + /// Calculate and return stats about consensus + pub fn get_stats(&self) -> Result { + let timespan_end = max( + self.launch_time, + MassaTime::now(self.config.clock_compensation_millis)?, + ); + let timespan_start = max( + timespan_end.saturating_sub(self.config.stats_timespan), + self.launch_time, + ); + let final_block_count = self + .final_block_stats + .iter() + .filter(|(t, _, _)| *t >= timespan_start && *t < timespan_end) + .count() as u64; + let stale_block_count = self + .stale_block_stats + .iter() + .filter(|t| **t >= timespan_start && **t < timespan_end) + .count() as u64; + let clique_count = self.get_clique_count() as u64; + Ok(ConsensusStats { + final_block_count, + stale_block_count, + clique_count, + start_timespan: timespan_start, + end_timespan: timespan_end, + }) + } + + /// Must be called each tick to update stats. Will detect if a desynchronization happened + pub fn stats_tick(&mut self) -> Result<(), ConsensusError> { + // check if there are any final blocks is coming from protocol + // if none => we are probably desync + #[cfg(not(feature = "sandbox"))] + { + let now = MassaTime::now(self.config.clock_compensation_millis)?; + if now + > max(self.config.genesis_timestamp, self.launch_time) + .saturating_add(self.stats_desync_detection_timespan) + && !self + .final_block_stats + .iter() + .any(|(time, _, is_from_protocol)| { + time > &now.saturating_sub(self.stats_desync_detection_timespan) + && *is_from_protocol + }) + { + warn!("desynchronization detected because the recent final block history is empty or contains only blocks produced by this node"); + let _ = self + .channels + .controller_event_tx + .send(ConsensusEvent::NeedSync); + } + } + // prune stats + self.prune_stats()?; + Ok(()) + } + + /// Remove old stats from consensus storage + pub fn prune_stats(&mut self) -> Result<(), ConsensusError> { + let start_time = MassaTime::now(self.config.clock_compensation_millis)? + .saturating_sub(self.stats_history_timespan); + while let Some((t, _, _)) = self.final_block_stats.front() { + if t < &start_time { + self.final_block_stats.pop_front(); + } else { + break; + } + } + while let Some(t) = self.stale_block_stats.front() { + if t < &start_time { + self.stale_block_stats.pop_front(); + } else { + break; + } + } + while let Some((t, _)) = self.protocol_blocks.front() { + if t < &start_time { + self.protocol_blocks.pop_front(); + } else { + break; + } + } + Ok(()) + } +} diff --git a/massa-consensus-worker/src/state/tick.rs b/massa-consensus-worker/src/state/tick.rs new file mode 100644 index 00000000000..3165bc82669 --- /dev/null +++ b/massa-consensus-worker/src/state/tick.rs @@ -0,0 +1,49 @@ +use std::collections::BTreeSet; + +use massa_consensus_exports::{block_status::BlockStatus, error::ConsensusError}; +use massa_logging::massa_trace; +use massa_models::{block::BlockId, slot::Slot}; + +use super::ConsensusState; + +impl ConsensusState { + /// This function should be called each tick and will check if there is a block in the graph that should be processed at this slot, and if so, process it. + /// + /// # Arguments: + /// * `current_slot`: the current slot + /// + /// # Returns: + /// Error if the process of a block returned an error. + pub fn slot_tick(&mut self, current_slot: Slot) -> Result<(), ConsensusError> { + massa_trace!("consensus.consensus_worker.slot_tick", { + "slot": current_slot + }); + + // list all elements for which the time has come + let to_process: BTreeSet<(Slot, BlockId)> = self + .waiting_for_slot_index + .iter() + .filter_map(|b_id| match self.block_statuses.get(b_id) { + Some(BlockStatus::WaitingForSlot(header_or_block)) => { + let slot = header_or_block.get_slot(); + if slot <= current_slot { + Some((slot, *b_id)) + } else { + None + } + } + _ => None, + }) + .collect(); + + massa_trace!("consensus.block_graph.slot_tick", {}); + // process those elements + self.rec_process(to_process, Some(current_slot))?; + + self.stats_tick()?; + // take care of block db changes + self.block_db_changed()?; + + Ok(()) + } +} diff --git a/massa-consensus-worker/src/state/verifications.rs b/massa-consensus-worker/src/state/verifications.rs new file mode 100644 index 00000000000..9fc6dc11be8 --- /dev/null +++ b/massa-consensus-worker/src/state/verifications.rs @@ -0,0 +1,411 @@ +use super::ConsensusState; + +use massa_consensus_exports::{ + block_status::{BlockStatus, DiscardReason}, + error::ConsensusError, +}; +use massa_logging::massa_trace; +use massa_models::{ + block::{BlockId, WrappedHeader}, + prehash::PreHashSet, + slot::Slot, +}; + +/// Possible output of a header check +#[derive(Debug)] +pub enum HeaderCheckOutcome { + /// it's ok and here are some useful values + Proceed { + /// one (parent block id, parent's period) per thread + parents_hash_period: Vec<(BlockId, u64)>, + /// blocks that header is incompatible with + incompatibilities: PreHashSet, + /// number of incompatibilities that are inherited from the parents + inherited_incompatibilities_count: usize, + /// fitness + fitness: u64, + }, + /// there is something wrong with that header + Discard(DiscardReason), + /// it must wait for its slot to be fully processed + WaitForSlot, + /// it must wait for these block ids to be fully processed + WaitForDependencies(PreHashSet), +} + +/// Possible outcomes of endorsements check +#[derive(Debug)] +pub enum EndorsementsCheckOutcome { + /// Everything is ok + Proceed, + /// There is something wrong with that endorsement + Discard(DiscardReason), + /// It must wait for its slot to be fully processed + WaitForSlot, +} + +impl ConsensusState { + /// Process an incoming header. + /// + /// Checks performed: + /// - Number of parents matches thread count. + /// - Slot above 0. + /// - Valid thread. + /// - Check that the block is older than the latest final one in thread. + /// - Check that the block slot is not too much into the future, + /// as determined by the configuration `future_block_processing_max_periods`. + /// - Check if it was the creator's turn to create this block. + /// - TODO: check for double staking. + /// - Check parents are present. + /// - Check the topological consistency of the parents. + /// - Check endorsements. + /// - Check thread incompatibility test. + /// - Check grandpa incompatibility test. + /// - Check if the block is incompatible with a parent. + /// - Check if the block is incompatible with a final block. + pub fn check_header( + &self, + block_id: &BlockId, + header: &WrappedHeader, + current_slot: Option, + read_shared_state: &ConsensusState, + ) -> Result { + massa_trace!("consensus.block_graph.check_header", { + "block_id": block_id + }); + let mut parents: Vec<(BlockId, u64)> = + Vec::with_capacity(self.config.thread_count as usize); + let mut incomp = PreHashSet::::default(); + let mut missing_deps = PreHashSet::::default(); + let creator_addr = header.creator_address; + + // check that is older than the latest final block in that thread + // Note: this excludes genesis blocks + if header.content.slot.period + <= read_shared_state.latest_final_blocks_periods[header.content.slot.thread as usize].1 + { + return Ok(HeaderCheckOutcome::Discard(DiscardReason::Stale)); + } + + // check if block slot is too much in the future + if let Some(cur_slot) = current_slot { + if header.content.slot.period + > cur_slot + .period + .saturating_add(self.config.future_block_processing_max_periods) + { + return Ok(HeaderCheckOutcome::WaitForSlot); + } + } + + // check if it was the creator's turn to create this block + // (step 1 in consensus/pos.md) + let slot_draw_address = match self + .channels + .selector_controller + .get_producer(header.content.slot) + { + Ok(draw) => draw, + Err(_) => return Ok(HeaderCheckOutcome::WaitForSlot), // TODO properly handle PoS errors + }; + if creator_addr != slot_draw_address { + // it was not the creator's turn to create a block for this slot + return Ok(HeaderCheckOutcome::Discard(DiscardReason::Invalid( + format!("Bad creator turn for the slot:{}", header.content.slot), + ))); + } + + // check if block is in the future: queue it + // note: do it after testing signature + draw to prevent queue flooding/DoS + // note: Some(x) > None + if Some(header.content.slot) > current_slot { + return Ok(HeaderCheckOutcome::WaitForSlot); + } + + // Note: here we will check if we already have a block for that slot + // and if someone double staked, they will be denounced + + // list parents and ensure they are present + let parent_set: PreHashSet = header.content.parents.iter().copied().collect(); + for parent_thread in 0u8..self.config.thread_count { + let parent_hash = header.content.parents[parent_thread as usize]; + match read_shared_state.block_statuses.get(&parent_hash) { + Some(BlockStatus::Discarded { reason, .. }) => { + // parent is discarded + return Ok(HeaderCheckOutcome::Discard(match reason { + DiscardReason::Invalid(invalid_reason) => DiscardReason::Invalid(format!( + "discarded because a parent was discarded for the following reason: {}", + invalid_reason + )), + r => r.clone(), + })); + } + Some(BlockStatus::Active { + a_block: parent, .. + }) => { + // parent is active + + // check that the parent is from an earlier slot in the right thread + if parent.slot.thread != parent_thread || parent.slot >= header.content.slot { + return Ok(HeaderCheckOutcome::Discard(DiscardReason::Invalid( + format!( + "Bad parent {} in thread:{} or slot:{} for {}.", + parent_hash, parent_thread, parent.slot, header.content.slot + ), + ))); + } + + // inherit parent incompatibilities + // and ensure parents are mutually compatible + if let Some(p_incomp) = read_shared_state.gi_head.get(&parent_hash) { + if !p_incomp.is_disjoint(&parent_set) { + return Ok(HeaderCheckOutcome::Discard(DiscardReason::Invalid( + "Parent not mutually compatible".to_string(), + ))); + } + incomp.extend(p_incomp); + } + + parents.push((parent_hash, parent.slot.period)); + } + _ => { + // parent is missing or queued + if read_shared_state.genesis_hashes.contains(&parent_hash) { + // forbid depending on discarded genesis block + return Ok(HeaderCheckOutcome::Discard(DiscardReason::Stale)); + } + missing_deps.insert(parent_hash); + } + } + } + if !missing_deps.is_empty() { + return Ok(HeaderCheckOutcome::WaitForDependencies(missing_deps)); + } + let inherited_incomp_count = incomp.len(); + + // check the topological consistency of the parents + { + let mut gp_max_slots = vec![0u64; self.config.thread_count as usize]; + for parent_i in 0..self.config.thread_count { + let (parent_h, parent_period) = parents[parent_i as usize]; + let parent = match read_shared_state.block_statuses.get(&parent_h) { + Some(BlockStatus::Active { + a_block, + storage: _, + }) => a_block, + _ => { + return Err(ConsensusError::ContainerInconsistency(format!( + "inconsistency inside block statuses searching parent {} of block {}", + parent_h, block_id + ))) + } + }; + if parent_period < gp_max_slots[parent_i as usize] { + // a parent is earlier than a block known by another parent in that thread + return Ok(HeaderCheckOutcome::Discard(DiscardReason::Invalid( + "a parent is earlier than a block known by another parent in that thread" + .to_string(), + ))); + } + gp_max_slots[parent_i as usize] = parent_period; + if parent_period == 0 { + // genesis + continue; + } + for gp_i in 0..self.config.thread_count { + if gp_i == parent_i { + continue; + } + let gp_h = parent.parents[gp_i as usize].0; + match read_shared_state.block_statuses.get(&gp_h) { + // this grandpa is discarded + Some(BlockStatus::Discarded { reason, .. }) => { + return Ok(HeaderCheckOutcome::Discard(reason.clone())); + } + // this grandpa is active + Some(BlockStatus::Active { a_block: gp, .. }) => { + if gp.slot.period > gp_max_slots[gp_i as usize] { + if gp_i < parent_i { + return Ok(HeaderCheckOutcome::Discard( + DiscardReason::Invalid( + "grandpa error: gp_i < parent_i".to_string(), + ), + )); + } + gp_max_slots[gp_i as usize] = gp.slot.period; + } + } + // this grandpa is missing, assume stale + _ => return Ok(HeaderCheckOutcome::Discard(DiscardReason::Stale)), + } + } + } + } + + // get parent in own thread + let parent_in_own_thread = match read_shared_state + .block_statuses + .get(&parents[header.content.slot.thread as usize].0) + { + Some(BlockStatus::Active { + a_block, + storage: _, + }) => Some(a_block), + _ => None, + } + .ok_or_else(|| { + ConsensusError::ContainerInconsistency(format!( + "inconsistency inside block statuses searching parent {} in own thread of block {}", + parents[header.content.slot.thread as usize].0, block_id + )) + })?; + + // check endorsements + match self.check_endorsements(header)? { + EndorsementsCheckOutcome::Proceed => {} + EndorsementsCheckOutcome::Discard(reason) => { + return Ok(HeaderCheckOutcome::Discard(reason)) + } + EndorsementsCheckOutcome::WaitForSlot => return Ok(HeaderCheckOutcome::WaitForSlot), + } + + // thread incompatibility test + parent_in_own_thread.children[header.content.slot.thread as usize] + .keys() + .filter(|&sibling_h| sibling_h != block_id) + .try_for_each(|&sibling_h| { + incomp.extend(self.get_active_block_and_descendants(&sibling_h)?); + Result::<(), ConsensusError>::Ok(()) + })?; + + // grandpa incompatibility test + for tau in (0u8..self.config.thread_count).filter(|&t| t != header.content.slot.thread) { + // for each parent in a different thread tau + // traverse parent's descendants in tau + let mut to_explore = vec![(0usize, header.content.parents[tau as usize])]; + while let Some((cur_gen, cur_h)) = to_explore.pop() { + let cur_b = match read_shared_state.block_statuses.get(&cur_h) { + Some(BlockStatus::Active { a_block, storage: _ }) => Some(a_block), + _ => None, + }.ok_or_else(|| ConsensusError::ContainerInconsistency(format!("inconsistency inside block statuses searching {} while checking grandpa incompatibility of block {}",cur_h, block_id)))?; + + // traverse but do not check up to generation 1 + if cur_gen <= 1 { + to_explore.extend( + cur_b.children[tau as usize] + .keys() + .map(|&c_h| (cur_gen + 1, c_h)), + ); + continue; + } + + let parent_id = { + self.storage + .read_blocks() + .get(&cur_b.block_id) + .ok_or_else(|| { + ConsensusError::MissingBlock(format!( + "missing block in grandpa incomp test: {}", + cur_b.block_id + )) + })? + .content + .header + .content + .parents[header.content.slot.thread as usize] + }; + + // check if the parent in tauB has a strictly lower period number than B's parent in tauB + // note: cur_b cannot be genesis at gen > 1 + let parent_period = match read_shared_state.block_statuses.get(&parent_id) { + Some(BlockStatus::Active { a_block, storage: _ }) => Some(a_block), + _ => None, + }.ok_or_else(|| + ConsensusError::ContainerInconsistency( + format!("inconsistency inside block statuses searching {} check if the parent in tauB has a strictly lower period number than B's parent in tauB while checking grandpa incompatibility of block {}", + parent_id, + block_id) + ))?.slot.period; + if parent_period < parent_in_own_thread.slot.period { + // GPI detected + incomp.extend(self.get_active_block_and_descendants(&cur_h)?); + } // otherwise, cur_b and its descendants cannot be GPI with the block: don't traverse + } + } + + // check if the block is incompatible with a parent + if !incomp.is_disjoint(&parents.iter().map(|(h, _p)| *h).collect()) { + return Ok(HeaderCheckOutcome::Discard(DiscardReason::Invalid( + "Block incompatible with a parent".to_string(), + ))); + } + + // check if the block is incompatible with a final block + if !incomp.is_disjoint( + &read_shared_state + .active_index + .iter() + .filter_map(|h| { + if let Some(BlockStatus::Active { a_block: a, .. }) = + read_shared_state.block_statuses.get(h) + { + if a.is_final { + return Some(*h); + } + } + None + }) + .collect(), + ) { + return Ok(HeaderCheckOutcome::Discard(DiscardReason::Stale)); + } + massa_trace!("consensus.block_graph.check_header.ok", { + "block_id": block_id + }); + + Ok(HeaderCheckOutcome::Proceed { + parents_hash_period: parents, + incompatibilities: incomp, + inherited_incompatibilities_count: inherited_incomp_count, + fitness: header.get_fitness(), + }) + } + + /// check endorsements: + /// * endorser was selected for that (slot, index) + /// * endorsed slot is `parent_in_own_thread` slot + pub fn check_endorsements( + &self, + header: &WrappedHeader, + ) -> Result { + // check endorsements + let endorsement_draws = match self + .channels + .selector_controller + .get_selection(header.content.slot) + { + Ok(sel) => sel.endorsements, + Err(_) => return Ok(EndorsementsCheckOutcome::WaitForSlot), + }; + for endorsement in header.content.endorsements.iter() { + // check that the draw is correct + if endorsement.creator_address != endorsement_draws[endorsement.content.index as usize] + { + return Ok(EndorsementsCheckOutcome::Discard(DiscardReason::Invalid( + format!( + "endorser draw mismatch for header in slot: {}", + header.content.slot + ), + ))); + } + + // note that the following aspects are checked in protocol + // * signature + // * index reuse + // * slot matching the block's + // * the endorsed block is the containing block's parent + } + + Ok(EndorsementsCheckOutcome::Proceed) + } +} diff --git a/massa-consensus-worker/src/tests/block_factory.rs b/massa-consensus-worker/src/tests/block_factory.rs deleted file mode 100644 index 0a415dde219..00000000000 --- a/massa-consensus-worker/src/tests/block_factory.rs +++ /dev/null @@ -1,139 +0,0 @@ -// Copyright (c) 2022 MASSA LABS - -//! This is a factory that can be used in consensus test -//! but at it was introduced quite late in the development process -//! it has only be used in scenarios basic - -use super::tools::{validate_notpropagate_block, validate_propagate_block}; -use massa_hash::Hash; -use massa_models::{ - block::{Block, BlockHeader, BlockHeaderSerializer, BlockId, BlockSerializer, WrappedBlock}, - endorsement::WrappedEndorsement, - operation::WrappedOperation, - slot::Slot, - wrapped::{Id, WrappedContent}, -}; -use massa_protocol_exports::test_exports::MockProtocolController; -use massa_signature::KeyPair; -use massa_storage::Storage; - -pub struct BlockFactory { - pub best_parents: Vec, - pub creator_keypair: KeyPair, - pub slot: Slot, - pub endorsements: Vec, - pub operations: Vec, - pub protocol_controller: MockProtocolController, -} - -impl BlockFactory { - pub fn start_block_factory( - genesis: Vec, - protocol_controller: MockProtocolController, - ) -> BlockFactory { - BlockFactory { - best_parents: genesis, - creator_keypair: KeyPair::generate(), - slot: Slot::new(1, 0), - endorsements: Vec::new(), - operations: Vec::new(), - protocol_controller, - } - } - - pub async fn create_and_receive_block(&mut self, valid: bool) -> WrappedBlock { - let header = BlockHeader::new_wrapped( - BlockHeader { - slot: self.slot, - parents: self.best_parents.clone(), - operation_merkle_root: Hash::compute_from( - &self - .operations - .iter() - .flat_map(|op| op.id.get_hash().into_bytes()) - .collect::>()[..], - ), - endorsements: self.endorsements.clone(), - }, - BlockHeaderSerializer::new(), - &self.creator_keypair, - ) - .unwrap(); - - let block = Block::new_wrapped( - Block { - header, - operations: self - .operations - .clone() - .into_iter() - .map(|op| op.id) - .collect(), - }, - BlockSerializer::new(), - &self.creator_keypair, - ) - .unwrap(); - - let mut storage = Storage::create_root(); - let id = block.id; - let slot = block.content.header.content.slot; - storage.store_block(block.clone()); - - self.protocol_controller - .receive_block(id, slot, storage) - .await; - if valid { - // Assert that the block is propagated. - validate_propagate_block(&mut self.protocol_controller, id, 2000).await; - } else { - // Assert that the the block is not propagated. - validate_notpropagate_block(&mut self.protocol_controller, id, 500).await; - } - block - } - - pub fn sign_header(&self, header: BlockHeader) -> WrappedBlock { - let header = - BlockHeader::new_wrapped(header, BlockHeaderSerializer::new(), &self.creator_keypair) - .unwrap(); - - Block::new_wrapped( - Block { - header, - operations: self - .operations - .clone() - .into_iter() - .map(|op| op.id) - .collect(), - }, - BlockSerializer::new(), - &self.creator_keypair, - ) - .unwrap() - } - - pub async fn receive_block( - &mut self, - valid: bool, - block_id: BlockId, - slot: Slot, - storage: Storage, - ) { - self.protocol_controller - .receive_block(block_id, slot, storage) - .await; - if valid { - // Assert that the block is propagated. - validate_propagate_block(&mut self.protocol_controller, block_id, 2000).await; - } else { - // Assert that the the block is not propagated. - validate_notpropagate_block(&mut self.protocol_controller, block_id, 500).await; - } - } - - pub fn take_protocol_controller(self) -> MockProtocolController { - self.protocol_controller - } -} diff --git a/massa-consensus-worker/src/tests/inter_cycle_batch_finalization.rs b/massa-consensus-worker/src/tests/inter_cycle_batch_finalization.rs deleted file mode 100644 index d9b7da9b817..00000000000 --- a/massa-consensus-worker/src/tests/inter_cycle_batch_finalization.rs +++ /dev/null @@ -1,199 +0,0 @@ -//! Copyright (c) 2022 MASSA LABS - -use super::tools::*; -use massa_consensus_exports::ConsensusConfig; -use massa_models::{block::BlockId, slot::Slot}; -use massa_signature::KeyPair; -use massa_time::MassaTime; -use serial_test::serial; -use std::{collections::HashSet, str::FromStr}; - -/// # Context -/// -/// Regression test for `https://github.com/massalabs/massa/pull/2433` -/// -/// When we have the following block sequence -/// ``` -/// 1 thread, periods_per_cycle = 2, delta_f0 = 1, 1 endorsement per block -/// -/// cycle 0 | cycle 1 | cycle 2 -/// G - B1 - B2 - B3 - B4 -/// where G is the genesis block -/// and B4 contains a roll sell operation -/// ``` -/// -/// And the block `B1` is received AFTER `B4`, blocks will be processed recursively: -/// ``` -/// * B1 is received and included -/// * B2 is processed -/// * B1 becomes final in the graph -/// * B3 is processed -/// * B2 becomes final in the graph -/// * B4 is processed -/// * B3 becomes final in the graph -/// * PoS is told about all finalized blocks -/// ``` -/// -/// The problem we had is that in order to check rolls to verify `B4`'s roll sell, -/// the final roll registry was assumed to be attached to the last final block known by the graph, -/// but that was inaccurate because PoS was the one holding the final roll registry, -/// and PoS was not yet aware of the blocks that finalized during recursion, -/// so it was actually still attached to G when `B4` was checked. -/// -/// The correction involved taking the point of view of PoS on where the final roll registry is attached. -/// This test ensures non-regression by making sure `B4` is propagated when `B1` is received. -#[tokio::test(flavor = "multi_thread", worker_threads = 2)] -#[serial] -async fn test_inter_cycle_batch_finalization() { - let t0: MassaTime = 1000.into(); - let staking_key = - KeyPair::from_str("S1UxdCJv5ckDK8z87E5Jq5fEfSVLi2cTHgtpfZy7iURs3KpPns8").unwrap(); - let warmup_time: MassaTime = 1000.into(); - let margin_time: MassaTime = 300.into(); - let cfg = ConsensusConfig { - periods_per_cycle: 2, - delta_f0: 1, - thread_count: 1, - endorsement_count: 1, - max_future_processing_blocks: 10, - max_dependency_blocks: 10, - future_block_processing_max_periods: 10, - t0, - genesis_timestamp: MassaTime::now(0).unwrap().saturating_add(warmup_time), - ..ConsensusConfig::default() - }; - - consensus_pool_test_with_storage( - cfg.clone(), - None, - async move |pool_controller, - mut protocol_controller, - consensus_command_sender, - consensus_event_receiver, - mut storage, - selector_controller| { - // wait for consensus warmup time - tokio::time::sleep(warmup_time.to_duration()).await; - - let genesis_blocks: Vec = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .best_parents - .iter() - .map(|(b, _p)| *b) - .collect(); - - // create B1 but DO NOT SEND IT - tokio::time::sleep(t0.to_duration()).await; - let b1_block = - create_block(&cfg, Slot::new(1, 0), genesis_blocks.clone(), &staking_key); - - // create and send B2 - tokio::time::sleep(t0.to_duration()).await; - let b2_block = create_block_with_operations_and_endorsements( - &cfg, - Slot::new(2, 0), - &vec![b1_block.id], - &staking_key, - vec![], - vec![create_endorsement( - &staking_key, - Slot::new(1, 0), - b1_block.id, - 0, - )], - ); - let b2_block_id = b2_block.id; - let b2_block_slot = b2_block.content.header.content.slot; - storage.store_block(b2_block); - protocol_controller - .receive_block(b2_block_id, b2_block_slot, storage.clone()) - .await; - - // create and send B3 - tokio::time::sleep(t0.to_duration()).await; - let b3_block = create_block_with_operations_and_endorsements( - &cfg, - Slot::new(3, 0), - &vec![b2_block_id], - &staking_key, - vec![], - vec![create_endorsement( - &staking_key, - Slot::new(2, 0), - b2_block_id, - 0, - )], - ); - let b3_block_id = b3_block.id; - let b3_block_slot = b3_block.content.header.content.slot; - storage.store_block(b3_block); - protocol_controller - .receive_block(b3_block_id, b3_block_slot, storage.clone()) - .await; - - // create and send B4 - tokio::time::sleep(t0.to_duration()).await; - let roll_sell = create_roll_sell(&staking_key, 1, 4, 0); - storage.store_operations(vec![roll_sell.clone()]); - let b4_block = create_block_with_operations_and_endorsements( - &cfg, - Slot::new(4, 0), - &vec![b3_block_id], - &staking_key, - vec![roll_sell], - vec![create_endorsement( - &staking_key, - Slot::new(3, 0), - b3_block_id, - 0, - )], - ); - let b4_block_id = b4_block.id; - let b4_block_slot = b4_block.content.header.content.slot; - storage.store_block(b4_block); - protocol_controller - .receive_block(b4_block_id, b4_block_slot, storage.clone()) - .await; - - // wait for the slot after B4 - tokio::time::sleep(t0.saturating_mul(5).to_duration()).await; - - // send B1 - let b1_block_id = b1_block.id; - let b1_block_slot = b1_block.content.header.content.slot; - storage.store_block(b1_block); - protocol_controller - .receive_block(b1_block_id, b1_block_slot, storage.clone()) - .await; - - approve_producer_and_selector_for_staker(&staking_key, &selector_controller); - - // wait for the propagation of B1, B2, B3 and B4 (unordered) - let mut to_propagate: HashSet<_> = - vec![b1_block_id, b2_block_id, b3_block_id, b4_block_id] - .into_iter() - .collect(); - for _ in 0u8..4 { - to_propagate.remove( - &validate_propagate_block_in_list( - &mut protocol_controller, - &to_propagate.clone().into_iter().collect(), - margin_time.to_millis(), - ) - .await, - ); - } - - ( - pool_controller, - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} diff --git a/massa-consensus-worker/src/tests/mod.rs b/massa-consensus-worker/src/tests/mod.rs deleted file mode 100644 index 5d62262b27a..00000000000 --- a/massa-consensus-worker/src/tests/mod.rs +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (c) 2022 MASSA LABS - -mod block_factory; -// mod inter_cycle_batch_finalization; /* TODO repair this test https://github.com/massalabs/massa/issues/3099 -mod scenario_block_creation; -mod scenario_roll; -mod scenarios106; -mod scenarios91_1; -mod scenarios91_2; -mod scenarios_basic; -mod scenarios_endorsements; -mod scenarios_get_operations; -mod scenarios_get_selection_draws; -mod scenarios_header_check; -mod scenarios_incompatibilities; -mod scenarios_note_attack_attempt; -mod scenarios_operations_check; -mod scenarios_parents; -mod scenarios_pool_commands; -mod scenarios_pruning; -mod scenarios_reward_split; -mod scenarios_send_block; -mod scenarios_wishlist; -mod test_block_graph; -pub mod tools; diff --git a/massa-consensus-worker/src/tests/scenario_block_creation.rs b/massa-consensus-worker/src/tests/scenario_block_creation.rs deleted file mode 100644 index 709a78a78a6..00000000000 --- a/massa-consensus-worker/src/tests/scenario_block_creation.rs +++ /dev/null @@ -1,849 +0,0 @@ -// Copyright (c) 2022 MASSA LABS - -use super::tools::random_address_on_thread; -use crate::tests::tools; -use massa_consensus_exports::ConsensusConfig; -use massa_models::ledger_models::LedgerData; -use massa_models::rolls::{RollCounts, RollUpdate, RollUpdates}; -use massa_models::{amount::Amount, slot::Slot}; -use massa_protocol_exports::ProtocolCommand; -use massa_time::MassaTime; -use serial_test::serial; -use std::collections::HashMap; -use tokio::time::sleep_until; - -// #[tokio::test] -// #[serial] -// async fn test_genesis_block_creation() { -// // define addresses use for the test -// // addresses a and b both in thread 0 -// // addr 1 has 1 roll and 0 coins -// // addr 2 is in consensus and has 0 roll and 1000 coins -// let thread_count = 2; -// let (address_1, keypair_1) = random_address_on_thread(0, thread_count).into(); -// let (address_2, keypair_2) = random_address_on_thread(0, thread_count).into(); -// let mut ledger = HashMap::new(); -// ledger.insert( -// address_2, -// LedgerData::new(Amount::from_str("1000").unwrap()), -// ); -// let mut cfg = ConsensusConfig { -// genesis_timestamp: MassaTime::now(0) -// .unwrap() -// .saturating_sub(MassaTime::from(30000)), -// ..ConsensusConfig::default_with_staking_keys_and_ledger(&[keypair_1, keypair_2], &ledger) -// }; -// // init roll count -// let mut roll_counts = RollCounts::default(); -// let update = RollUpdate { -// roll_purchases: 1, -// roll_sales: 0, -// }; -// let mut updates = RollUpdates::default(); -// updates.apply(&address_1, &update).unwrap(); -// roll_counts.apply_updates(&updates).unwrap(); - -// let initial_rolls_file = generate_roll_counts_file(&roll_counts); -// cfg.initial_rolls_path = initial_rolls_file.path().to_path_buf(); - -// tools::consensus_without_pool_test( -// cfg.clone(), -// async move |protocol_controller, -// consensus_command_sender, -// consensus_event_receiver, -// selector_controller| { -// let _genesis_ids = consensus_command_sender -// .get_block_graph_status(None, None) -// .await -// .expect("could not get block graph status") -// .genesis_blocks; - -// ( -// protocol_controller, -// consensus_command_sender, -// consensus_event_receiver, -// selector_controller, -// ) -// }, -// ) -// .await; -// } - -// /// /// See the test removed at https://gitlab.com/massalabs/massa-network/-/merge_requests/381/diffs#a5bee3b1b5cc9d8157b6feee0ac3e775aa457a33_544_539 -// /// -// /// **NOTE: that test is expected to fail 1 / 1000 times** -// /// -// /// -// /// ### Context -// /// ``` -// /// * price per roll = 1000 -// /// * periods per cycle = 30 000 -// /// * t0 = 500ms -// /// * look-back = 2 -// /// * thread count = 2 -// /// * delta f0 = 3 -// /// * genesis timestamp = now - t0 * periods per cycle * 3 - 1000 -// /// * block reward = 0 -// /// * fee = 0 for every operation -// /// * address 1 has 1 roll and 0 coins -// /// * address 2 is in consensus and has 0 roll and 1000 coins -// /// ``` -// /// ### Initialization -// /// Following blocks are sent through a protocol event to consensus right at the beginning. They all have best parents as parents. -// /// * block at slot(1,0) with operation address 2 buys 1 roll -// /// * block at slot( period per cycle, 0) -// /// * block at slot( period per cycle, 1) -// /// * block at slot( period per cycle + 1, 0) -// /// * block at slot( period per cycle + 1, 1) -// /// * block at slot( period per cycle + 2, 0) -// /// * block at slot( period per cycle + 2, 0) -// /// -// /// ### Scenario -// /// -// /// * start consensus -// /// * blocks previously described are sent to consensus through a protocol event -// /// * assert they are propagated -// /// * ```let draws = get_selection_draws( (3*periods_per cycle, 0), (4*periods_per cycle, 0)``` -// /// * assert -// /// ```math -// /// abs(1/2 - \frac{TimesAddr1WasDrawn}{ThreadCount * PeriodsPerCycle}) < 0.01 -// /// ``` -// /// (see [the math](https://en.wikipedia.org/wiki/Checking_whether_a_coin_is_fair)) -// /// * wait for cycle 3 beginning -// /// * for the 10 first slots of cycle 3 -// /// * if address 2 was selected assert consensus created and propagated a block -// /// * if address 1 was selected assert nothing is propagated -// #[tokio::test] -// #[serial] -// //#[ignore] -// async fn test_block_creation_with_draw() { -// let thread_count = 2; -// // define addresses use for the test -// // addresses a and b both in thread 0 -// // addr 1 has 1 roll and 0 coins -// // addr 2 is in consensus and has 0 roll and 1000 coins -// let (address_1, keypair_1) = random_address_on_thread(0, thread_count).into(); -// let (address_2, keypair_2) = random_address_on_thread(0, thread_count).into(); - -// let staking_keys = vec![keypair_1.clone(), keypair_2.clone()]; - -// // init address_2 with 1000 coins -// let mut ledger = HashMap::new(); -// ledger.insert( -// address_2, -// LedgerData::new(Amount::from_str("1000").unwrap()), -// ); - -// // finally create the configuration -// let t0 = MassaTime::from(1000); -// let periods_per_cycle = 1000; -// let mut cfg = ConsensusConfig { -// block_reward: Amount::default(), -// delta_f0: 3, -// max_operations_per_block: 50, -// operation_validity_periods: 100, -// periods_per_cycle, -// roll_price: Amount::from_str("1000").unwrap(), -// t0, -// genesis_timestamp: MassaTime::now(0) -// .unwrap() -// .checked_sub((t0.to_millis() * periods_per_cycle * 3).into()) -// .unwrap() -// .checked_add(2000.into()) -// .unwrap(), -// ..ConsensusConfig::default_with_staking_keys_and_ledger(&staking_keys, &ledger) -// }; - -// // init roll count -// let mut roll_counts = RollCounts::default(); -// let update = RollUpdate { -// roll_purchases: 1, -// roll_sales: 0, -// }; -// let mut updates = RollUpdates::default(); -// updates.apply(&address_1, &update).unwrap(); -// roll_counts.apply_updates(&updates).unwrap(); -// let initial_rolls_file = generate_roll_counts_file(&roll_counts); -// cfg.initial_rolls_path = initial_rolls_file.path().to_path_buf(); - -// let operation_fee = 0; -// tools::consensus_without_pool_with_storage_test( -// cfg.clone(), -// async move |mut storage, -// mut protocol_controller, -// consensus_command_sender, -// consensus_event_receiver, -// selector_controller| { -// let genesis_ids = consensus_command_sender -// .get_block_graph_status(None, None) -// .await -// .expect("could not get block graph status") -// .genesis_blocks; - -// // initial block: addr2 buys 1 roll -// let op1 = create_roll_transaction(&keypair_2, 1, true, 10, operation_fee); -// storage.store_operations(vec![op1.clone()]); -// let block = tools::create_block_with_operations( -// &cfg, -// Slot::new(1, 0), -// &genesis_ids, -// &staking_keys[0], -// vec![op1], -// ); - -// tools::propagate_block(&mut protocol_controller, block.clone(), true, 1000).await; - -// // make cycle 0 final/finished by sending enough blocks in each thread in cycle 1 -// // note that blocks in cycle 3 may be created during this, so make sure that their clique is overrun by sending a large amount of blocks -// let mut cur_parents = vec![block.id, genesis_ids[1]]; -// for delta_period in 0u64..10 { -// for thread in 0..cfg.thread_count { -// let res_block_id = tools::create_and_test_block( -// &mut protocol_controller, -// &cfg, -// Slot::new(cfg.periods_per_cycle + delta_period, thread), -// cur_parents.clone(), -// true, -// false, -// &staking_keys[0], -// ) -// .await; -// cur_parents[thread as usize] = res_block_id; -// } -// } - -// // get draws for cycle 3 (lookback = cycle 0) -// let mut draws: HashMap = HashMap::default(); -// for i in (3 * cfg.periods_per_cycle)..(4 * cfg.periods_per_cycle) { -// let slot = Slot::new(i, 0); -// draws.insert( -// slot, -// selector_controller.get_selection(slot).unwrap().producer, -// ); -// } -// let nb_address1_draws = draws.iter().filter(|(_, addr)| **addr == address_1).count(); -// // fair coin test. See https://en.wikipedia.org/wiki/Checking_whether_a_coin_is_fair -// // note: this is a statistical test. It may fail in rare occasions. -// assert!( -// (0.5 - ((nb_address1_draws as f32) -// / ((cfg.thread_count as u64 * cfg.periods_per_cycle) as f32))) -// .abs() -// < 0.15 -// ); - -// // check 10 draws -// let draws: HashMap = draws.into_iter().collect(); -// let mut cur_slot = Slot::new(cfg.periods_per_cycle * 3, 0); -// for _ in 0..10 { -// // wait block propagation -// let block_creator = protocol_controller -// .wait_command(3500.into(), |cmd| match cmd { -// ProtocolCommand::IntegratedBlock { block_id } => { -// let block = storage -// .retrieve_block(&block_id) -// .expect(&format!("Block id : {} not found in storage", block_id)); -// let stored_block = block.read(); -// if stored_block.content.header.content.slot == cur_slot { -// Some(stored_block.creator_public_key) -// } else { -// None -// } -// } -// _ => None, -// }) -// .await -// .expect("block did not propagate in time"); -// assert_eq!( -// draws[&cur_slot], -// Address::from_public_key(&block_creator), -// "wrong block creator" -// ); -// cur_slot = cur_slot.get_next_slot(cfg.thread_count).unwrap(); -// } - -// ( -// protocol_controller, -// consensus_command_sender, -// consensus_event_receiver, -// selector_controller, -// ) -// }, -// ) -// .await; -// } - -/// https://gitlab.com/massalabs/massa/-/issues/301 -/// -/// Block creation reception mix test -/// -/// see https://gitlab.com/massalabs/massa/-/issues/295#note_693561778 -/// -/// -/// two staking keys. Only key a is registered in consensus -/// start before genesis timestamp -/// retrieve next draws -/// for a few slots: -/// if it's key b time to create a block create it and send it to consensus -/// if key a created a block, assert it has chosen as parents expected blocks (no misses), and that it was sent to protocol around the time it was expected. -#[tokio::test] -#[serial] -#[ignore] -async fn test_interleaving_block_creation_with_reception() { - let thread_count = 1; - // define addresses use for the test - // addresses a and b both in thread 0 - let (address_1, _) = random_address_on_thread(0, thread_count).into(); - let (address_2, keypair_2) = random_address_on_thread(0, thread_count).into(); - - let mut ledger = HashMap::new(); - ledger.insert( - address_2, - LedgerData::new(Amount::from_mantissa_scale(1000, 0)), - ); - let cfg = ConsensusConfig { - thread_count, - t0: 1000.into(), - genesis_timestamp: MassaTime::now(0).unwrap().checked_add(1000.into()).unwrap(), - ..ConsensusConfig::default() - }; - // init roll count - let mut roll_counts = RollCounts::default(); - let update = RollUpdate { - roll_purchases: 1, - roll_sales: 0, - }; - let mut updates = RollUpdates::default(); - updates.apply(&address_1, &update).unwrap(); - updates.apply(&address_2, &update).unwrap(); - roll_counts.apply_updates(&updates).unwrap(); - - tools::consensus_without_pool_with_storage_test( - cfg.clone(), - async move |mut storage, - mut protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let mut parents = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .genesis_blocks; - - sleep_until(tokio::time::Instant::from_std( - cfg.genesis_timestamp - .saturating_add(cfg.t0) - .saturating_sub(150.into()) - .estimate_instant(0) - .expect("could not estimate instant for genesis timestamps"), - )) - .await; - - // check 10 draws - // Key1 and key2 can be drawn to produce block, - // but the local node only has key1, - // so when key2 is selected a block must be produced remotly - // and sent to the local node through protocol - for i in 1..11 { - let cur_slot = Slot::new(i, 0); - let creator = &selector_controller - .get_selection(cur_slot) - .expect("missing slot in drawss") - .producer; - - let block_id = if *creator == address_1 { - // wait block propagation - let (header, id) = protocol_controller - .wait_command(cfg.t0.saturating_add(300.into()), |cmd| match cmd { - ProtocolCommand::IntegratedBlock { block_id, storage } => { - let block = storage - .read_blocks() - .get(&block_id) - .unwrap_or_else(|| { - panic!("Block id : {} not found in storage", block_id) - }) - .clone(); - if block.content.header.content.slot == cur_slot { - Some((block.content.header, block_id)) - } else { - None - } - } - _ => None, - }) - .await - .expect("block did not propagate in time"); - assert_eq!(*creator, header.creator_address, "wrong block creator"); - id - } else if *creator == address_2 { - // create block and propagate it - let block = tools::create_block_with_operations( - &cfg, - cur_slot, - &parents, - &keypair_2, - vec![], - ); - storage.store_block(block.clone()); - tools::propagate_block( - &mut protocol_controller, - block.id, - block.content.header.content.slot, - storage.clone(), - true, - cfg.t0.to_millis() + 300, - ) - .await; - block.id - } else { - panic!("unexpected block creator"); - }; - parents[0] = block_id; - } - - ( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} - -// /// https://gitlab.com/massalabs/massa-network-archive/-/issues/343 -// /// Test block creation with operations -// /// -// /// Test consensus block creation with an initial graph and simulated pool -// /// -// /// In all tests, once it has started there is only one block creator, so we expect consensus to create blocks at every slots after initialization. -// /// -// /// context -// /// -// /// ``` -// /// initial ledger: A:100 -// /// op1 : A -> B : 5, fee 1 -// /// op2 : A -> B : 50, fee 10 -// /// op3 : B -> A : 10, fee 15 -// /// ``` -// /// -// /// --- -// /// -// /// ``` -// /// create block at (0,1) -// /// operations should be [op2, op1] -// /// ``` -// #[tokio::test] -// #[serial] -// async fn test_order_of_inclusion() { -// let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); -// // Increase timestamp a bit to avoid missing the first slot. -// let init_time: MassaTime = 1000.into(); -// let mut cfg = ConsensusConfig { -// genesis_timestamp: MassaTime::now(0).unwrap().checked_add(init_time).unwrap(), -// max_operations_per_block: 50, -// operation_validity_periods: 10, -// t0: 1000.into(), -// ..ConsensusConfig::default() -// }; -// // define addresses use for the test -// // addresses a and b both in thread 0 -// let (address_a, keypair_a) = random_address_on_thread(0, cfg.thread_count).into(); -// let (address_b, keypair_b) = random_address_on_thread(0, cfg.thread_count).into(); - -// let mut ledger = HashMap::new(); -// ledger.insert(address_a, LedgerData::new(Amount::from_str("100").unwrap())); -// let initial_ledger_file = generate_ledger_file(&ledger); // don't drop the `NamedTempFile` -// cfg.initial_ledger_path = initial_ledger_file.path().to_path_buf(); - -// let op1 = create_transaction(&keypair_a, address_b, 5, 10, 1); -// let op2 = create_transaction(&keypair_a, address_b, 50, 10, 10); -// let op3 = create_transaction(&keypair_b, address_a, 10, 10, 15); - -// // there is only one node so it should be drawn at every slot - -// tools::consensus_pool_test_with_storage( -// cfg.clone(), -// None, -// async move |pool_controller, -// mut protocol_controller, -// consensus_command_sender, -// consensus_event_receiver, -// storage, -// selector_controller| { -// //TODO: Replace -// // wait for first slot -// // pool_controller -// // .wait_command( -// // cfg.t0.saturating_mul(2).saturating_add(init_time), -// // |cmd| match cmd { -// // PoolCommand::UpdateCurrentSlot(s) => { -// // if s == Slot::new(1, 0) { -// // Some(()) -// // } else { -// // None -// // } -// // } -// // PoolCommand::GetEndorsements { response_tx, .. } => { -// // response_tx.send(Vec::new()).unwrap(); -// // None -// // } -// // _ => None, -// // }, -// // ) -// // .await -// // .expect("timeout while waiting for slot"); -// // -// // respond to first pool batch command -// // pool_controller -// // .wait_command(300.into(), |cmd| match cmd { -// // PoolCommand::GetOperationBatch { response_tx, .. } => { -// // response_tx -// // .send(vec![ -// // (op3.clone(), 50), -// // (op2.clone(), 50), -// // (op1.clone(), 50), -// // ]) -// // .unwrap(); -// // Some(()) -// // } -// // PoolCommand::GetEndorsements { response_tx, .. } => { -// // response_tx.send(Vec::new()).unwrap(); -// // None -// // } -// // _ => None, -// // }) -// // .await -// // .expect("timeout while waiting for 1st operation batch request"); - -// // // respond to second pool batch command -// // pool_controller -// // .wait_command(300.into(), |cmd| match cmd { -// // PoolCommand::GetOperationBatch { -// // response_tx, -// // exclude, -// // .. -// // } => { -// // assert!(!exclude.is_empty()); -// // response_tx.send(vec![]).unwrap(); -// // Some(()) -// // } -// // PoolCommand::GetEndorsements { response_tx, .. } => { -// // response_tx.send(Vec::new()).unwrap(); -// // None -// // } -// // _ => None, -// // }) -// // .await -// // .expect("timeout while waiting for 2nd operation batch request"); - -// // wait for block -// let block = protocol_controller -// .wait_command(300.into(), |cmd| match cmd { -// ProtocolCommand::IntegratedBlock { block_id } => { -// let block = storage -// .retrieve_block(&block_id) -// .expect(&format!("Block id : {} not found in storage", block_id)); -// let stored_block = block.read(); -// Some(stored_block.clone()) -// } -// _ => None, -// }) -// .await -// .expect("timeout while waiting for block"); - -// // assert it's the expected block -// assert_eq!(block.content.header.content.slot, Slot::new(1, 0)); -// let expected = vec![op2.clone(), op1.clone()]; -// let res = block.content.operations.clone(); -// assert_eq!(block.content.operations.len(), 2); -// for i in 0..2 { -// assert!(res.contains(&expected[i].id)); -// } -// ( -// pool_controller, -// protocol_controller, -// consensus_command_sender, -// consensus_event_receiver, -// selector_controller, -// ) -// }, -// ) -// .await; -// } - -// /// https://gitlab.com/massalabs/massa-network-archive/-/issues/343 -// /// Test block creation with operations -// /// -// /// Test consensus block creation with an initial graph and simulated pool -// /// -// /// In all tests, once it has started there is only one block creator, so we expect consensus to create blocks at every slots after initialization. -// /// -// /// context -// /// -// /// ```` -// /// initial ledger A = 1 000 000 -// /// max_block_size = 500 -// /// max_operations_per_block = 10 000 -// /// op_i = A -> B : 10, 1, signed for the i-th time -// /// ``` -// /// -// /// --- -// /// ``` -// /// let block_size = size of dummy block at (1,0) without any operation -// /// let op_size = size of an operation -// /// while consensus is asking for operations send next ops -// /// assert created_block_size is max_block_size +/- one op_size -// /// assert created_block_size = block_size +`op_size * op_count -// /// ``` -// #[tokio::test] -// #[serial] -// #[ignore] -// async fn test_block_filling() { -// let thread_count = 2; -// // define addresses use for the test -// // addresses a and b both in thread 0 -// let (address_a, keypair_a) = random_address_on_thread(0, thread_count).into(); -// let (address_b, keypair_b) = random_address_on_thread(0, thread_count).into(); -// let mut ledger = HashMap::new(); -// ledger.insert( -// address_a, -// LedgerData::new(Amount::from_str("1000000000").unwrap()), -// ); -// let cfg = ConsensusConfig { -// endorsement_count: 10, -// max_block_size: 2000, -// max_operations_per_block: 5000, -// operation_validity_periods: 10, -// periods_per_cycle: 3, -// t0: 1000.into(), -// ..ConsensusConfig::default_with_staking_keys_and_ledger( -// &[keypair_a.clone(), keypair_b.clone()], -// &ledger, -// ) -// }; - -// let mut ops = vec![create_executesc( -// &keypair_a, -// 10, -// 10, -// vec![1; 200], // dummy bytes as here we do not test the content -// 1_000, -// 0, -// 1, -// )]; // this operation has an higher rentability than any other - -// for _ in 0..500 { -// ops.push(create_transaction(&keypair_a, address_a, 5, 10, 1)) -// } - -// tools::consensus_pool_test_with_storage( -// cfg.clone(), -// None, -// async move |pool_controller, -// mut protocol_controller, -// consensus_command_sender, -// consensus_event_receiver, -// storage, -// selector_controller| { -// let op_size = 10; - -// // wait for slot -// //let mut prev_blocks = Vec::new(); -// for cur_slot in [Slot::new(1, 0), Slot::new(1, 1)] { -// //TODO: Replace -// // pool_controller -// // .wait_command(cfg.t0.checked_mul(2).unwrap(), |cmd| match cmd { -// // PoolCommand::UpdateCurrentSlot(s) => { -// // if s == cur_slot { -// // Some(()) -// // } else { -// // None -// // } -// // } -// // _ => None, -// // }) -// // .await -// // .expect("timeout while waiting for slot"); -// // // respond to pool batch command -// // pool_controller -// // .wait_command(300.into(), |cmd| match cmd { -// // PoolCommand::GetOperationBatch { response_tx, .. } => { -// // response_tx.send(Default::default()).unwrap(); -// // Some(()) -// // } -// // _ => None, -// // }) -// // .await -// // .expect("timeout while waiting for operation batch request"); -// // // wait for block -// // let block = protocol_controller -// // .wait_command(500.into(), |cmd| match cmd { -// // ProtocolCommand::IntegratedBlock { block_id } => { -// // let block = storage -// // .retrieve_block(&block_id) -// // .expect(&format!("Block id : {} not found in storage", block_id)); -// // let stored_block = block.read(); -// // Some(stored_block.clone()) -// // } -// // _ => None, -// // }) -// // .await -// // .expect("timeout while waiting for block"); -// // assert_eq!(block.content.header.content.slot, cur_slot); -// // prev_blocks.push(block.id); -// // } -// } - -// // // wait for slot p2t0 -// // pool_controller -// // .wait_command(cfg.t0, |cmd| match cmd { -// // PoolCommand::UpdateCurrentSlot(s) => { -// // if s == Slot::new(2, 0) { -// // Some(()) -// // } else { -// // None -// // } -// // } -// // _ => None, -// // }) -// // .await -// // .expect("timeout while waiting for slot"); - -// // // respond to endorsement command -// // let eds = pool_controller -// // .wait_command(300.into(), |cmd| match cmd { -// // PoolCommand::GetEndorsements { -// // target_slot, -// // parent, -// // creators, -// // response_tx, -// // .. -// // } => { -// // assert_eq!(Slot::new(1, 0), target_slot); -// // assert_eq!(parent, prev_blocks[0]); -// // let mut eds: Vec = Vec::new(); -// // for (index, creator) in creators.iter().enumerate() { -// // let ed = if *creator == address_a { -// // create_endorsement(&keypair_a, target_slot, parent, index as u32) -// // } else if *creator == address_b { -// // create_endorsement(&keypair_b, target_slot, parent, index as u32) -// // } else { -// // panic!("invalid endorser choice"); -// // }; -// // eds.push(ed); -// // } -// // response_tx.send(eds.clone()).unwrap(); -// // Some(eds) -// // } -// // _ => None, -// // }) -// // .await -// // .expect("timeout while waiting for endorsement request"); -// // assert_eq!(eds.len() as u32, cfg.endorsement_count); - -// // respond to first pool batch command -// //TODO: Replace -// // pool_controller -// // .wait_command(300.into(), |cmd| match cmd { -// // PoolCommand::GetOperationBatch { response_tx, .. } => { -// // response_tx -// // .send(ops.iter().map(|op| (op.clone(), op_size)).collect()) -// // .unwrap(); -// // Some(()) -// // } -// // _ => None, -// // }) -// // .await -// // .expect("timeout while waiting for 1st operation batch request"); - -// // respond to second pool batch command -// //TODO: Replace -// // pool_controller -// // .wait_command(300.into(), |cmd| match cmd { -// // PoolCommand::GetOperationBatch { -// // response_tx, -// // exclude, -// // .. -// // } => { -// // assert!(!exclude.is_empty()); -// // response_tx.send(vec![]).unwrap(); -// // Some(()) -// // } -// // _ => None, -// // }) -// // .await -// // .expect("timeout while waiting for 2nd operation batch request"); - -// let eds: Vec = Vec::new(); -// // wait for block -// let block = protocol_controller -// .wait_command(500.into(), |cmd| match cmd { -// ProtocolCommand::IntegratedBlock { block_id } => { -// let block = storage -// .retrieve_block(&block_id) -// .expect(&format!("Block id : {} not found in storage", block_id)); -// let stored_block = block.read(); -// Some(stored_block.clone()) -// } -// _ => None, -// }) -// .await -// .expect("timeout while waiting for block"); - -// // assert it's the expected block -// assert_eq!(block.content.header.content.slot, Slot::new(2, 0)); - -// // assert it includes the sent endorsements -// assert_eq!(block.content.header.content.endorsements.len(), eds.len()); -// for (e_found, e_expected) in block -// .content -// .header -// .content -// .endorsements -// .iter() -// .zip(eds.iter()) -// { -// assert_eq!(e_found.id, e_expected.id); -// assert_eq!(e_expected.id, e_expected.id); -// } - -// // create empty block -// let header = BlockHeader::new_wrapped( -// BlockHeader { -// slot: block.content.header.content.slot, -// parents: block.content.header.content.parents.clone(), -// operation_merkle_root: Hash::compute_from(&Vec::new()[..]), -// endorsements: eds, -// }, -// BlockHeaderSerializer::new(), -// &keypair_a, -// ) -// .unwrap(); -// let empty: WrappedBlock = Block::new_wrapped( -// Block { -// header, -// operations: Default::default(), -// }, -// BlockSerializer::new(), -// &keypair_a, -// ) -// .unwrap(); -// let remaining_block_space = (cfg.max_block_size as usize) -// .checked_sub(empty.serialized_data.len() as usize) -// .unwrap(); - -// let nb = remaining_block_space / (op_size as usize); -// assert_eq!(block.content.operations.len(), nb); -// ( -// pool_controller, -// protocol_controller, -// consensus_command_sender, -// consensus_event_receiver, -// selector_controller, -// ) -// }, -// ) -// .await; -// } diff --git a/massa-consensus-worker/src/tests/scenario_roll.rs b/massa-consensus-worker/src/tests/scenario_roll.rs deleted file mode 100644 index 20c30933c3d..00000000000 --- a/massa-consensus-worker/src/tests/scenario_roll.rs +++ /dev/null @@ -1,974 +0,0 @@ -// // Copyright (c) 2022 MASSA LABS - -// use massa_consensus_exports::{ -// settings::ConsensusChannels, -// test_exports::{ -// generate_default_roll_counts_file, generate_ledger_file, generate_staking_keys_file, -// }, -// ConsensusConfig, -// }; -// use massa_execution_exports::test_exports::MockExecutionController; -// use massa_models::{Address, Amount, BlockId, Slot}; -// use massa_pos_exports::SelectorConfig; -// use massa_pos_worker::start_selector_worker; -// use massa_protocol_exports::ProtocolCommand; -// use massa_storage::Storage; -// use massa_time::MassaTime; -// use num::rational::Ratio; -// use rand::{prelude::SliceRandom, rngs::StdRng, SeedableRng}; -// use serial_test::serial; -// use std::collections::{HashMap, VecDeque}; -// use std::str::FromStr; - -// use crate::{ -// start_consensus_controller, -// tests::{ -// mock_pool_controller::MockPoolController, -// mock_protocol_controller::MockProtocolController, -// tools::{ -// consensus_pool_test_with_storage, create_block, create_block_with_operations, -// create_roll_buy, create_roll_sell, get_creator_for_draw, propagate_block, -// random_address_on_thread, wait_pool_slot, -// }, -// }, -// }; -// use massa_models::ledger_models::LedgerData; -// use massa_models::prehash::Set; - -// #[tokio::test] -// #[serial] -// async fn test_roll() { -// // setup logging -// /* -// stderrlog::new() -// .verbosity(2) -// .timestamp(stderrlog::Timestamp::Millisecond) -// .init() -// .unwrap(); -// */ -// let init_time: MassaTime = 1000.into(); -// let mut cfg = ConsensusConfig { -// t0: 500.into(), -// periods_per_cycle: 2, -// delta_f0: 3, -// block_reward: Amount::default(), -// roll_price: Amount::from_str("1000").unwrap(), -// operation_validity_periods: 100, -// genesis_timestamp: MassaTime::now(0).unwrap().saturating_add(init_time), -// ..Default::default() -// }; -// // define addresses use for the test -// // addresses 1 and 2 both in thread 0 -// let (address_1, keypair_1) = random_address_on_thread(0, cfg.thread_count).into(); -// let (address_2, keypair_2) = random_address_on_thread(0, cfg.thread_count).into(); - -// let mut ledger = HashMap::new(); -// ledger.insert( -// address_2, -// LedgerData::new(Amount::from_str("10000").unwrap()), -// ); -// let initial_ledger_file = generate_ledger_file(&ledger); -// cfg.initial_ledger_path = initial_ledger_file.path().to_path_buf(); - -// let staking_keys_file = generate_staking_keys_file(&[keypair_2.clone()]); -// cfg.staking_keys_path = staking_keys_file.path().to_path_buf(); - -// let initial_rolls_file = generate_default_roll_counts_file(vec![keypair_1.clone()]); -// cfg.initial_rolls_path = initial_rolls_file.path().to_path_buf(); - -// consensus_pool_test_with_storage( -// cfg.clone(), -// None, -// async move |mut pool_controller, -// mut protocol_controller, -// consensus_command_sender, -// consensus_event_receiver, -// mut storage, -// selector_controller| { -// let mut parents: Vec = consensus_command_sender -// .get_block_graph_status(None, None) -// .await -// .expect("could not get block graph status") -// .best_parents -// .iter() -// .map(|(b, _p)| *b) -// .collect(); - -// // operations -// let rb_a1_r1_err = create_roll_buy(&keypair_1, 1, 90, 0); -// let rs_a2_r1_err = create_roll_sell(&keypair_2, 1, 90, 0); -// let rb_a2_r1 = create_roll_buy(&keypair_2, 1, 90, 0); -// let rs_a2_r1 = create_roll_sell(&keypair_2, 1, 90, 0); -// let rb_a2_r2 = create_roll_buy(&keypair_2, 2, 90, 0); -// let rs_a2_r2 = create_roll_sell(&keypair_2, 2, 90, 0); - -// // Store operations to make them accessible to the consensus worker. -// storage.store_operations(vec![ -// rb_a1_r1_err.clone(), -// rs_a2_r1_err.clone(), -// rb_a2_r1.clone(), -// rs_a2_r1.clone(), -// rb_a2_r2.clone(), -// rs_a2_r2.clone(), -// ]); - -// let mut addresses = Set::
::default(); -// addresses.insert(address_2); -// let addresses = addresses; - -// // cycle 0 -// let block1_err1 = create_block_with_operations( -// &cfg, -// Slot::new(1, 0), -// &parents, -// &keypair_1, -// vec![rb_a1_r1_err], -// ); -// tokio::time::sleep(init_time.to_duration()).await; -// wait_pool_slot(&mut pool_controller, cfg.t0, 1, 0).await; -// // invalid because a1 has not enough coins to buy a roll -// propagate_block(&mut protocol_controller, block1_err1, false, 150).await; - -// let block1_err2 = create_block_with_operations( -// &cfg, -// Slot::new(1, 0), -// &parents, -// &keypair_1, -// vec![rs_a2_r1_err], -// ); -// // invalid because a2 does not have enough rolls to sell -// propagate_block(&mut protocol_controller, block1_err2, false, 150).await; - -// let block1 = create_block_with_operations( -// &cfg, -// Slot::new(1, 0), -// &parents, -// &keypair_1, -// vec![rb_a2_r1], -// ); - -// // valid -// propagate_block(&mut protocol_controller, block1.clone(), true, 150).await; -// parents[0] = block1.id; - -// let addr_state = consensus_command_sender -// .get_addresses_info(addresses.clone()) -// .await -// .unwrap() -// .get(&address_2) -// .unwrap() -// .clone(); -// assert_eq!(addr_state.rolls.active_rolls, 0); -// assert_eq!(addr_state.rolls.final_rolls, 0); -// assert_eq!(addr_state.rolls.candidate_rolls, 1); -// assert_eq!( -// addr_state.ledger_info.candidate_ledger_info.balance, -// Amount::from_str("9000").unwrap() -// ); - -// let block1t1 = -// create_block_with_operations(&cfg, Slot::new(1, 1), &parents, &keypair_1, vec![]); - -// wait_pool_slot(&mut pool_controller, cfg.t0, 1, 1).await; -// // valid -// propagate_block(&mut protocol_controller, block1t1.clone(), true, 150).await; -// parents[1] = block1t1.id; - -// // cycle 1 - -// let block2 = create_block_with_operations( -// &cfg, -// Slot::new(2, 0), -// &parents, -// &keypair_1, -// vec![rs_a2_r1], -// ); - -// wait_pool_slot(&mut pool_controller, cfg.t0, 2, 0).await; -// // valid -// propagate_block(&mut protocol_controller, block2.clone(), true, 150).await; -// parents[0] = block2.id; - -// let addr_state = consensus_command_sender -// .get_addresses_info(addresses.clone()) -// .await -// .unwrap() -// .get(&address_2) -// .unwrap() -// .clone(); -// assert_eq!(addr_state.rolls.active_rolls, 0); -// assert_eq!(addr_state.rolls.final_rolls, 0); -// assert_eq!(addr_state.rolls.candidate_rolls, 0); -// let balance = addr_state.ledger_info.candidate_ledger_info.balance; -// assert_eq!(balance, Amount::from_str("9000").unwrap()); - -// let block2t2 = -// create_block_with_operations(&cfg, Slot::new(2, 1), &parents, &keypair_1, vec![]); -// wait_pool_slot(&mut pool_controller, cfg.t0, 2, 1).await; -// // valid -// propagate_block(&mut protocol_controller, block2t2.clone(), true, 150).await; -// parents[1] = block2t2.id; - -// // miss block 3 in thread 0 - -// // block 3 in thread 1 -// let block3t1 = -// create_block_with_operations(&cfg, Slot::new(3, 1), &parents, &keypair_1, vec![]); -// wait_pool_slot(&mut pool_controller, cfg.t0, 3, 1).await; -// // valid -// propagate_block(&mut protocol_controller, block3t1.clone(), true, 150).await; -// parents[1] = block3t1.id; - -// // cycle 2 - -// // miss block 4 - -// let block4t1 = -// create_block_with_operations(&cfg, Slot::new(4, 1), &parents, &keypair_1, vec![]); -// wait_pool_slot(&mut pool_controller, cfg.t0, 4, 1).await; -// // valid -// propagate_block(&mut protocol_controller, block4t1.clone(), true, 150).await; -// parents[1] = block4t1.id; - -// let block5 = -// create_block_with_operations(&cfg, Slot::new(5, 0), &parents, &keypair_1, vec![]); -// wait_pool_slot(&mut pool_controller, cfg.t0, 5, 0).await; -// // valid -// propagate_block(&mut protocol_controller, block5.clone(), true, 150).await; -// parents[0] = block5.id; - -// let block5t1 = -// create_block_with_operations(&cfg, Slot::new(5, 1), &parents, &keypair_1, vec![]); -// wait_pool_slot(&mut pool_controller, cfg.t0, 5, 1).await; -// // valid -// propagate_block(&mut protocol_controller, block5t1.clone(), true, 150).await; -// parents[1] = block5t1.id; - -// let other_addr = -// if selector_controller.get_producer(Slot::new(6, 0)).unwrap() == address_1 { -// address_2 -// } else { -// address_1 -// }; - -// let block6_err = create_block_with_operations( -// &cfg, -// Slot::new(6, 0), -// &parents, -// &get_creator_for_draw(&other_addr, &vec![keypair_1.clone(), keypair_2.clone()]), -// vec![], -// ); -// wait_pool_slot(&mut pool_controller, cfg.t0, 6, 0).await; -// // invalid: other_addr wasn't drawn for that block creation -// propagate_block(&mut protocol_controller, block6_err, false, 150).await; - -// let block6 = create_block_with_operations( -// &cfg, -// Slot::new(6, 0), -// &parents, -// &get_creator_for_draw( -// &selector_controller.get_producer(Slot::new(6, 0)).unwrap(), -// &vec![keypair_1.clone(), keypair_2.clone()], -// ), -// vec![], -// ); - -// // valid -// propagate_block(&mut protocol_controller, block6.clone(), true, 150).await; -// parents[0] = block6.id; - -// let addr_state = consensus_command_sender -// .get_addresses_info(addresses.clone()) -// .await -// .unwrap() -// .get(&address_2) -// .unwrap() -// .clone(); -// assert_eq!(addr_state.rolls.active_rolls, 1); -// assert_eq!(addr_state.rolls.final_rolls, 0); -// assert_eq!(addr_state.rolls.candidate_rolls, 0); - -// let block6t1 = create_block_with_operations( -// &cfg, -// Slot::new(6, 1), -// &parents, -// &get_creator_for_draw( -// &selector_controller.get_producer(Slot::new(6, 0)).unwrap(), -// &vec![keypair_1.clone(), keypair_2.clone()], -// ), -// vec![], -// ); - -// wait_pool_slot(&mut pool_controller, cfg.t0, 6, 1).await; -// // valid -// propagate_block(&mut protocol_controller, block6t1.clone(), true, 150).await; -// parents[1] = block6t1.id; - -// let block7 = create_block_with_operations( -// &cfg, -// Slot::new(7, 0), -// &parents, -// &get_creator_for_draw( -// &selector_controller.get_producer(Slot::new(6, 0)).unwrap(), -// &vec![keypair_1.clone(), keypair_2.clone()], -// ), -// vec![], -// ); - -// wait_pool_slot(&mut pool_controller, cfg.t0, 7, 0).await; -// // valid -// propagate_block(&mut protocol_controller, block7.clone(), true, 150).await; -// parents[0] = block7.id; - -// let addr_state = consensus_command_sender -// .get_addresses_info(addresses.clone()) -// .await -// .unwrap() -// .get(&address_2) -// .unwrap() -// .clone(); -// assert_eq!(addr_state.rolls.active_rolls, 1); -// assert_eq!(addr_state.rolls.final_rolls, 0); -// assert_eq!(addr_state.rolls.candidate_rolls, 0); - -// let block7t1 = create_block_with_operations( -// &cfg, -// Slot::new(7, 1), -// &parents, -// &get_creator_for_draw( -// &selector_controller.get_producer(Slot::new(6, 0)).unwrap(), -// &vec![keypair_1.clone(), keypair_2.clone()], -// ), -// vec![], -// ); - -// wait_pool_slot(&mut pool_controller, cfg.t0, 7, 1).await; -// // valid -// propagate_block(&mut protocol_controller, block7t1.clone(), true, 150).await; -// parents[1] = block7t1.id; - -// // cycle 4 - -// let block8 = create_block_with_operations( -// &cfg, -// Slot::new(8, 0), -// &parents, -// &keypair_1, -// vec![rb_a2_r2], -// ); -// wait_pool_slot(&mut pool_controller, cfg.t0, 8, 0).await; -// // valid -// propagate_block(&mut protocol_controller, block8.clone(), true, 150).await; -// parents[0] = block8.id; - -// let addr_state = consensus_command_sender -// .get_addresses_info(addresses.clone()) -// .await -// .unwrap() -// .get(&address_2) -// .unwrap() -// .clone(); -// assert_eq!(addr_state.rolls.active_rolls, 0); -// assert_eq!(addr_state.rolls.final_rolls, 0); -// assert_eq!(addr_state.rolls.candidate_rolls, 2); -// let balance = addr_state.ledger_info.candidate_ledger_info.balance; -// assert_eq!(balance, Amount::from_str("7000").unwrap()); - -// let block8t1 = -// create_block_with_operations(&cfg, Slot::new(8, 1), &parents, &keypair_1, vec![]); -// wait_pool_slot(&mut pool_controller, cfg.t0, 8, 1).await; -// // valid -// propagate_block(&mut protocol_controller, block8t1.clone(), true, 150).await; -// parents[1] = block8t1.id; - -// let block9 = create_block_with_operations( -// &cfg, -// Slot::new(9, 0), -// &parents, -// &keypair_1, -// vec![rs_a2_r2], -// ); -// wait_pool_slot(&mut pool_controller, cfg.t0, 9, 0).await; -// // valid -// propagate_block(&mut protocol_controller, block9.clone(), true, 150).await; -// parents[0] = block9.id; - -// let addr_state = consensus_command_sender -// .get_addresses_info(addresses.clone()) -// .await -// .unwrap() -// .get(&address_2) -// .unwrap() -// .clone(); -// assert_eq!(addr_state.rolls.active_rolls, 0); -// assert_eq!(addr_state.rolls.final_rolls, 0); -// assert_eq!(addr_state.rolls.candidate_rolls, 0); -// let balance = addr_state.ledger_info.candidate_ledger_info.balance; -// assert_eq!(balance, Amount::from_str("9000").unwrap()); - -// let block9t1 = -// create_block_with_operations(&cfg, Slot::new(9, 1), &parents, &keypair_1, vec![]); -// wait_pool_slot(&mut pool_controller, cfg.t0, 9, 1).await; -// // valid -// propagate_block(&mut protocol_controller, block9t1.clone(), true, 150).await; -// parents[1] = block9t1.id; - -// // cycle 5 - -// let block10 = -// create_block_with_operations(&cfg, Slot::new(10, 0), &parents, &keypair_1, vec![]); -// wait_pool_slot(&mut pool_controller, cfg.t0, 10, 0).await; -// // valid -// propagate_block(&mut protocol_controller, block10.clone(), true, 150).await; -// parents[0] = block10.id; - -// let addr_state = consensus_command_sender -// .get_addresses_info(addresses.clone()) -// .await -// .unwrap() -// .get(&address_2) -// .unwrap() -// .clone(); -// assert_eq!(addr_state.rolls.active_rolls, 0); -// assert_eq!(addr_state.rolls.final_rolls, 2); -// assert_eq!(addr_state.rolls.candidate_rolls, 0); - -// let balance = consensus_command_sender -// .get_addresses_info(addresses.clone()) -// .await -// .unwrap() -// .get(&address_2) -// .unwrap() -// .ledger_info -// .candidate_ledger_info -// .balance; -// assert_eq!(balance, Amount::from_str("10000").unwrap()); - -// let block10t1 = -// create_block_with_operations(&cfg, Slot::new(10, 1), &parents, &keypair_1, vec![]); -// wait_pool_slot(&mut pool_controller, cfg.t0, 10, 1).await; -// // valid -// propagate_block(&mut protocol_controller, block10t1.clone(), true, 150).await; -// parents[1] = block10t1.id; - -// let block11 = -// create_block_with_operations(&cfg, Slot::new(11, 0), &parents, &keypair_1, vec![]); -// wait_pool_slot(&mut pool_controller, cfg.t0, 11, 0).await; -// // valid -// propagate_block(&mut protocol_controller, block11.clone(), true, 150).await; -// parents[0] = block11.id; - -// let addr_state = consensus_command_sender -// .get_addresses_info(addresses.clone()) -// .await -// .unwrap() -// .get(&address_2) -// .unwrap() -// .clone(); -// assert_eq!(addr_state.rolls.active_rolls, 0); -// assert_eq!(addr_state.rolls.final_rolls, 0); -// assert_eq!(addr_state.rolls.candidate_rolls, 0); -// ( -// pool_controller, -// protocol_controller, -// consensus_command_sender, -// consensus_event_receiver, -// selector_controller, -// ) -// }, -// ) -// .await; -// } - -// #[tokio::test] -// #[serial] -// async fn test_roll_block_creation() { -// // setup logging -// /* -// stderrlog::new() -// .verbosity(4) -// .timestamp(stderrlog::Timestamp::Millisecond) -// .init() -// .unwrap(); -// */ -// let mut cfg = ConsensusConfig { -// block_reward: Amount::default(), -// delta_f0: 3, -// operation_validity_periods: 10, -// max_block_size: 500, -// max_operations_per_block: 5000, -// periods_per_cycle: 2, -// roll_price: Amount::from_str("1000").unwrap(), -// t0: 500.into(), -// ..Default::default() -// }; -// // define addresses use for the test -// // addresses 1 and 2 both in thread 0 -// let (_, keypair_1) = random_address_on_thread(0, cfg.thread_count).into(); -// let (address_2, keypair_2) = random_address_on_thread(0, cfg.thread_count).into(); - -// let mut ledger = HashMap::new(); -// ledger.insert( -// address_2, -// LedgerData::new(Amount::from_str("10000").unwrap()), -// ); -// let initial_ledger_file = generate_ledger_file(&ledger); -// let staking_keys_file = generate_staking_keys_file(&[keypair_1.clone()]); -// let initial_rolls_file = generate_default_roll_counts_file(vec![keypair_1.clone()]); -// cfg.initial_ledger_path = initial_ledger_file.path().to_path_buf(); -// cfg.staking_keys_path = staking_keys_file.path().to_path_buf(); -// cfg.initial_rolls_path = initial_rolls_file.path().to_path_buf(); -// // mock protocol & pool -// let (mut protocol_controller, protocol_command_sender, protocol_event_receiver) = -// MockProtocolController::new(); -// let mut pool_controller = MockPoolController::new(); -// let (execution_controller, _execution_rx) = MockExecutionController::new_with_receiver(); - -// let init_time: MassaTime = 1000.into(); -// cfg.genesis_timestamp = MassaTime::now(0).unwrap().saturating_add(init_time); -// let storage: Storage = Storage::create_root(); -// // launch consensus controller -// let selector_config = SelectorConfig { -// initial_rolls_path: cfg.initial_rolls_path.clone(), -// ..Default::default() -// }; -// let (_selector_manager, selector_controller) = -// start_selector_worker(selector_config, VecDeque::new()).unwrap(); -// let (consensus_command_sender, _consensus_event_receiver, _consensus_manager) = -// start_consensus_controller( -// cfg.clone(), -// ConsensusChannels { -// execution_controller, -// protocol_command_sender: protocol_command_sender.clone(), -// protocol_event_receiver, -// pool_command_sender: Box::new(pool_controller.clone()), -// selector_controller, -// }, -// None, -// storage.clone(), -// 0, -// ) -// .await -// .expect("could not start consensus controller"); - -// // operations -// let rb_a2_r1 = create_roll_buy(&keypair_2, 1, 90, 0); -// let rs_a2_r1 = create_roll_sell(&keypair_2, 1, 90, 0); - -// let mut addresses = Set::
::default(); -// addresses.insert(address_2); -// let addresses = addresses; - -// // wait for first slot -// // TODO: Replace ?? -// // pool_controller -// // .wait_command( -// // cfg.t0.saturating_mul(2).saturating_add(init_time), -// // |cmd| match cmd { -// // PoolCommand::UpdateCurrentSlot(s) => { -// // if s == Slot::new(1, 0) { -// // Some(()) -// // } else { -// // None -// // } -// // } -// // PoolCommand::GetEndorsements { response_tx, .. } => { -// // response_tx.send(Vec::new()).unwrap(); -// // None -// // } -// // _ => None, -// // }, -// // ) -// // .await -// // .expect("timeout while waiting for slot"); - -// // // cycle 0 -// // println!("Test"); -// // // respond to first pool batch command -// // pool_controller -// // .wait_command(300.into(), |cmd| match cmd { -// // PoolCommand::GetOperationBatch { -// // response_tx, -// // target_slot, -// // .. -// // } => { -// // assert_eq!(target_slot, Slot::new(1, 0)); -// // response_tx.send(vec![(rb_a2_r1.clone(), 10)]).unwrap(); -// // Some(()) -// // } -// // PoolCommand::GetEndorsements { response_tx, .. } => { -// // response_tx.send(Vec::new()).unwrap(); -// // None -// // } -// // _ => None, -// // }) -// // .await -// // .expect("timeout while waiting for 1st operation batch request"); - -// // wait for block -// let block = protocol_controller -// .wait_command(500.into(), |cmd| match cmd { -// ProtocolCommand::IntegratedBlock { block_id } => { -// println!("Integrated block"); -// let block = storage -// .retrieve_block(&block_id) -// .expect(&format!("Block id : {} not found in storage", block_id)); -// let stored_block = block.read(); -// Some(stored_block.clone()) -// } -// _ => None, -// }) -// .await -// .expect("timeout while waiting for block"); - -// // assert it's the expected block -// assert_eq!(block.content.header.content.slot, Slot::new(1, 0)); -// assert_eq!(block.content.operations.len(), 1); -// assert!(block.content.operations.contains(&rb_a2_r1.id)); - -// let addr_state = consensus_command_sender -// .get_addresses_info(addresses.clone()) -// .await -// .unwrap() -// .get(&address_2) -// .unwrap() -// .clone(); -// assert_eq!(addr_state.rolls.active_rolls, 0); -// assert_eq!(addr_state.rolls.final_rolls, 0); -// assert_eq!(addr_state.rolls.candidate_rolls, 1); - -// let balance = consensus_command_sender -// .get_addresses_info(addresses.clone()) -// .await -// .unwrap() -// .get(&address_2) -// .unwrap() -// .ledger_info -// .candidate_ledger_info -// .balance; -// assert_eq!(balance, Amount::from_str("9000").unwrap()); - -// wait_pool_slot(&mut pool_controller, cfg.t0, 1, 1).await; -// // TODO: Replace ?? -// // slot 1,1 -// // pool_controller -// // .wait_command(300.into(), |cmd| match cmd { -// // PoolCommand::GetOperationBatch { -// // response_tx, -// // target_slot, -// // .. -// // } => { -// // assert_eq!(target_slot, Slot::new(1, 1)); -// // response_tx.send(vec![]).unwrap(); -// // Some(()) -// // } -// // PoolCommand::GetEndorsements { response_tx, .. } => { -// // response_tx.send(Vec::new()).unwrap(); -// // None -// // } -// // _ => None, -// // }) -// // .await -// // .expect("timeout while waiting for operation batch request"); - -// // wait for block -// let block = protocol_controller -// .wait_command(500.into(), |cmd| match cmd { -// ProtocolCommand::IntegratedBlock { block_id } => { -// let block = storage -// .retrieve_block(&block_id) -// .expect(&format!("Block id : {} not found in storage", block_id)); -// let stored_block = block.read(); -// Some(stored_block.clone()) -// } -// _ => None, -// }) -// .await -// .expect("timeout while waiting for block"); - -// // assert it's the expected block -// assert_eq!(block.content.header.content.slot, Slot::new(1, 1)); -// assert!(block.content.operations.is_empty()); - -// // cycle 1 - -// //TODO: replace -// // pool_controller -// // .wait_command(300.into(), |cmd| match cmd { -// // PoolCommand::GetOperationBatch { -// // response_tx, -// // target_slot, -// // .. -// // } => { -// // assert_eq!(target_slot, Slot::new(2, 0)); -// // response_tx.send(vec![(rs_a2_r1.clone(), 10)]).unwrap(); -// // Some(()) -// // } -// // PoolCommand::GetEndorsements { response_tx, .. } => { -// // response_tx.send(Vec::new()).unwrap(); -// // None -// // } -// // _ => None, -// // }) -// // .await -// // .expect("timeout while waiting for 1st operation batch request"); - -// // wait for block -// let block = protocol_controller -// .wait_command(500.into(), |cmd| match cmd { -// ProtocolCommand::IntegratedBlock { block_id } => { -// let block = storage -// .retrieve_block(&block_id) -// .expect(&format!("Block id : {} not found in storage", block_id)); -// let stored_block = block.read(); -// Some(stored_block.clone()) -// } -// _ => None, -// }) -// .await -// .expect("timeout while waiting for block"); - -// // assert it's the expected block -// assert_eq!(block.content.header.content.slot, Slot::new(2, 0)); -// assert_eq!(block.content.operations.len(), 1); -// assert!(block.content.operations.contains(&rs_a2_r1.id)); - -// let addr_state = consensus_command_sender -// .get_addresses_info(addresses.clone()) -// .await -// .unwrap() -// .get(&address_2) -// .unwrap() -// .clone(); -// assert_eq!(addr_state.rolls.active_rolls, 0); -// assert_eq!(addr_state.rolls.final_rolls, 0); -// assert_eq!(addr_state.rolls.candidate_rolls, 0); -// let balance = addr_state.ledger_info.candidate_ledger_info.balance; -// assert_eq!(balance, Amount::from_str("9000").unwrap()); -// } - -// #[tokio::test] -// #[serial] -// async fn test_roll_deactivation() { -// /* -// Scenario: -// * deactivation threshold at 50% -// * thread_count = 10 -// * lookback_cycles = 2 -// * periods_per_cycle = 10 -// * delta_f0 = 2 -// * all addresses have 1 roll initially -// * in cycle 0: -// * an address A0 in thread 0 produces 20% of its blocks -// * an address B0 in thread 0 produces 80% of its blocks -// * an address A1 in thread 1 produces 20% of its blocks -// * an address B1 in thread 1 produces 80% of its blocks -// * at the next cycles, all addresses produce all their blocks -// * at the 1st block of thread 0 in cycle 2: -// * address A0 has (0 candidate, 1 final, 1 active) rolls -// * address B0 has (1 candidate, 1 final, 1 active) rolls -// * address A1 has (1 candidate, 1 final, 1 active) rolls -// * address B1 has (1 candidate, 1 final, 1 active) rolls -// * at the 1st block of thread 1 in cycle 2: -// * address A0 has (0 candidate, 1 final, 1 active) rolls -// * address B0 has (1 candidate, 1 final, 1 active) rolls -// * address A1 has (0 candidate, 1 final, 1 active) rolls -// * address B1 has (1 candidate, 1 final, 1 active) rolls -// */ -// let mut cfg = ConsensusConfig { -// delta_f0: 2, -// thread_count: 4, -// periods_per_cycle: 5, -// pos_lookback_cycles: 1, -// t0: 400.into(), -// roll_price: Amount::from_mantissa_scale(10, 0), -// pos_miss_rate_deactivation_threshold: Ratio::new(50, 100), -// ..Default::default() -// }; -// let storage: Storage = Storage::create_root(); - -// // setup addresses -// let (address_a0, keypair_a0) = random_address_on_thread(0, cfg.thread_count).into(); -// let (address_b0, keypair_b0) = random_address_on_thread(0, cfg.thread_count).into(); -// let (address_a1, keypair_a1) = random_address_on_thread(1, cfg.thread_count).into(); -// let (address_b1, keypair_b1) = random_address_on_thread(1, cfg.thread_count).into(); - -// let initial_ledger_file = generate_ledger_file(&HashMap::new()); -// let staking_keys_file = generate_staking_keys_file(&[]); -// let initial_rolls_file = generate_default_roll_counts_file(vec![ -// keypair_a0.clone(), -// keypair_a1.clone(), -// keypair_b0.clone(), -// keypair_b1.clone(), -// ]); - -// cfg.initial_ledger_path = initial_ledger_file.path().to_path_buf(); -// cfg.staking_keys_path = staking_keys_file.path().to_path_buf(); -// cfg.initial_rolls_path = initial_rolls_file.path().to_path_buf(); - -// // mock protocol & pool -// let (mut protocol_controller, protocol_command_sender, protocol_event_receiver) = -// MockProtocolController::new(); -// let (pool_controller, _pool_event_receiver) = MockPoolController::new_with_receiver(); -// let (execution_controller, _execution_rx) = MockExecutionController::new_with_receiver(); -// let selector_config = SelectorConfig { -// initial_rolls_path: cfg.initial_rolls_path.clone(), -// ..Default::default() -// }; -// let (_selector_manager, selector_controller) = -// start_selector_worker(selector_config, VecDeque::new()).unwrap(); -// cfg.genesis_timestamp = MassaTime::now(0).unwrap().saturating_add(300.into()); - -// // launch consensus controller -// let (consensus_command_sender, _consensus_event_receiver, _consensus_manager) = -// start_consensus_controller( -// cfg.clone(), -// ConsensusChannels { -// execution_controller, -// protocol_command_sender: protocol_command_sender.clone(), -// protocol_event_receiver, -// pool_command_sender: pool_controller, -// selector_controller: selector_controller.clone(), -// }, -// None, -// storage, -// 0, -// ) -// .await -// .expect("could not start consensus controller"); - -// let mut cur_slot = Slot::new(0, 0); -// let mut best_parents = consensus_command_sender -// .get_block_graph_status(None, None) -// .await -// .unwrap() -// .genesis_blocks; -// let mut cycle_draws = HashMap::new(); -// let mut draws_cycle = None; -// 'outer: loop { -// // wait for slot info -// // let latest_slot = pool_controller -// // .wait_command(cfg.t0.checked_mul(2).unwrap(), |cmd| match cmd { -// // PoolCommand::UpdateCurrentSlot(s) => Some(s), -// // _ => None, -// // }) -// // .await -// // .expect("timeout while waiting for slot"); -// let latest_slot = Slot::new(0, 0); -// // apply all slots in-between -// while cur_slot <= latest_slot { -// // skip genesis -// if cur_slot.period == 0 { -// cur_slot = cur_slot.get_next_slot(cfg.thread_count).unwrap(); -// continue; -// } -// let cur_cycle = cur_slot.get_cycle(cfg.periods_per_cycle); - -// // get draws -// if draws_cycle != Some(cur_cycle) { -// for i in std::cmp::max(cur_cycle * cfg.periods_per_cycle, 1)..(cur_cycle + 1) { -// let slot = Slot::new(i, 0); -// cycle_draws.insert( -// slot, -// Some(selector_controller.get_selection(slot).unwrap().producer), -// ); -// } -// if cur_cycle == 0 { -// // controlled misses in cycle 0 -// for address in [address_a0, address_a1, address_b0, address_b1] { -// let mut address_draws: Vec = cycle_draws -// .iter() -// .filter_map(|(s, opt_a)| { -// if let Some(a) = opt_a { -// if *a == address { -// return Some(*s); -// } -// } -// None -// }) -// .collect(); -// assert!( -// !address_draws.is_empty(), -// "unlucky seed: address has no draws in cycle 0, cannot perform test" -// ); -// address_draws.shuffle(&mut StdRng::from_entropy()); -// let produce_count: usize = if address == address_a0 || address == address_a1 -// { -// // produce less than 20% -// 20 * address_draws.len() / 100 -// } else { -// // produce more than 80% -// std::cmp::min(address_draws.len(), (80 * address_draws.len() / 100) + 1) -// }; -// address_draws.truncate(produce_count); -// for (slt, opt_addr) in cycle_draws.iter_mut() { -// if *opt_addr == Some(address) && !address_draws.contains(slt) { -// *opt_addr = None; -// } -// } -// } -// } -// draws_cycle = Some(cur_cycle); -// } -// let cur_draw = cycle_draws[&cur_slot]; - -// // create and propagate block -// if let Some(addr) = cur_draw { -// let creator_privkey = if addr == address_a0 { -// keypair_a0.clone() -// } else if addr == address_a1 { -// keypair_a1.clone() -// } else if addr == address_b0 { -// keypair_b0.clone() -// } else if addr == address_b1 { -// keypair_b1.clone() -// } else { -// panic!("invalid address selected"); -// }; -// let block_id = propagate_block( -// &mut protocol_controller, -// create_block(&cfg, cur_slot, best_parents.clone(), &creator_privkey), -// true, -// 500, -// ) -// .await; - -// // update best parents -// best_parents[cur_slot.thread as usize] = block_id; -// } - -// // check candidate rolls -// let addrs_info = consensus_command_sender -// .get_addresses_info( -// vec![address_a0, address_a1, address_b0, address_b1] -// .into_iter() -// .collect(), -// ) -// .await -// .unwrap() -// .clone(); -// if cur_slot.period == (1 + cfg.pos_lookback_cycles) * cfg.periods_per_cycle { -// if cur_slot.thread == 0 { -// assert_eq!(addrs_info[&address_a0].rolls.candidate_rolls, 0); -// assert_eq!(addrs_info[&address_b0].rolls.candidate_rolls, 1); -// assert_eq!(addrs_info[&address_a1].rolls.candidate_rolls, 1); -// assert_eq!(addrs_info[&address_b1].rolls.candidate_rolls, 1); -// } else if cur_slot.thread == 1 { -// assert_eq!(addrs_info[&address_a0].rolls.candidate_rolls, 0); -// assert_eq!(addrs_info[&address_b0].rolls.candidate_rolls, 1); -// assert_eq!(addrs_info[&address_a1].rolls.candidate_rolls, 0); -// assert_eq!(addrs_info[&address_b1].rolls.candidate_rolls, 1); -// } else { -// break 'outer; -// } -// } else { -// assert_eq!(addrs_info[&address_a0].rolls.candidate_rolls, 1); -// assert_eq!(addrs_info[&address_b0].rolls.candidate_rolls, 1); -// assert_eq!(addrs_info[&address_a1].rolls.candidate_rolls, 1); -// assert_eq!(addrs_info[&address_b1].rolls.candidate_rolls, 1); -// } - -// cur_slot = cur_slot.get_next_slot(cfg.thread_count).unwrap(); -// } -// } -// } diff --git a/massa-consensus-worker/src/tests/scenarios106.rs b/massa-consensus-worker/src/tests/scenarios106.rs deleted file mode 100644 index 38ccc690cf9..00000000000 --- a/massa-consensus-worker/src/tests/scenarios106.rs +++ /dev/null @@ -1,869 +0,0 @@ -// Copyright (c) 2022 MASSA LABS - -// RUST_BACKTRACE=1 cargo test scenarios106 -- --nocapture - -use super::tools::*; -use massa_consensus_exports::ConsensusConfig; - -use massa_models::prehash::PreHashSet; -use massa_models::timeslots; -use massa_models::{block::BlockId, slot::Slot}; -use massa_signature::KeyPair; -use massa_storage::Storage; -use massa_time::MassaTime; -use serial_test::serial; -use std::collections::HashSet; -use std::time::Duration; - -#[tokio::test] -#[serial] -#[ignore] -async fn test_unsorted_block() { - /*stderrlog::new() - .verbosity(4) - .timestamp(stderrlog::Timestamp::Millisecond) - .init() - .unwrap();*/ - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - t0: 1000.into(), - future_block_processing_max_periods: 50, - max_future_processing_blocks: 10, - ..ConsensusConfig::default() - }; - - let mut storage = Storage::create_root(); - - consensus_without_pool_test( - cfg.clone(), - async move |mut protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let start_period = 3; - let genesis_hashes = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .genesis_blocks; - // create test blocks - - let t0s1 = create_block( - &cfg, - Slot::new(1 + start_period, 0), - genesis_hashes.clone(), - &staking_keys[0], - ); - - let t1s1 = create_block( - &cfg, - Slot::new(1 + start_period, 1), - genesis_hashes.clone(), - &staking_keys[0], - ); - - let t0s2 = create_block( - &cfg, - Slot::new(2 + start_period, 0), - vec![t0s1.id, t1s1.id], - &staking_keys[0], - ); - let t1s2 = create_block( - &cfg, - Slot::new(2 + start_period, 1), - vec![t0s1.id, t1s1.id], - &staking_keys[0], - ); - - let t0s3 = create_block( - &cfg, - Slot::new(3 + start_period, 0), - vec![t0s2.id, t1s2.id], - &staking_keys[0], - ); - let t1s3 = create_block( - &cfg, - Slot::new(3 + start_period, 1), - vec![t0s2.id, t1s2.id], - &staking_keys[0], - ); - - let t0s4 = create_block( - &cfg, - Slot::new(4 + start_period, 0), - vec![t0s3.id, t1s3.id], - &staking_keys[0], - ); - let t1s4 = create_block( - &cfg, - Slot::new(4 + start_period, 1), - vec![t0s3.id, t1s3.id], - &staking_keys[0], - ); - - // send blocks t0s1, t1s1, - storage.store_block(t0s1.clone()); - protocol_controller - .receive_block(t0s1.id, t0s1.content.header.content.slot, storage.clone()) - .await; - storage.store_block(t1s1.clone()); - protocol_controller - .receive_block(t1s1.id, t1s1.content.header.content.slot, storage.clone()) - .await; - // send blocks t0s3, t1s4, t0s4, t0s2, t1s3, t1s2 - storage.store_block(t0s3.clone()); - protocol_controller - .receive_block(t0s3.id, t0s3.content.header.content.slot, storage.clone()) - .await; - storage.store_block(t1s4.clone()); - protocol_controller - .receive_block(t1s4.id, t1s4.content.header.content.slot, storage.clone()) - .await; - storage.store_block(t0s4.clone()); - protocol_controller - .receive_block(t0s4.id, t0s4.content.header.content.slot, storage.clone()) - .await; - storage.store_block(t0s2.clone()); - protocol_controller - .receive_block(t0s2.id, t0s2.content.header.content.slot, storage.clone()) - .await; - storage.store_block(t1s3.clone()); - protocol_controller - .receive_block(t1s3.id, t1s3.content.header.content.slot, storage.clone()) - .await; - storage.store_block(t1s2.clone()); - protocol_controller - .receive_block(t1s2.id, t1s2.content.header.content.slot, storage.clone()) - .await; - - // block t0s1 and t1s1 are propagated - let hash_list = vec![t0s1.id, t1s1.id]; - validate_propagate_block_in_list( - &mut protocol_controller, - &hash_list, - 3000 + start_period * 1000, - ) - .await; - validate_propagate_block_in_list(&mut protocol_controller, &hash_list, 1000).await; - // block t0s2 and t1s2 are propagated - let hash_list = vec![t0s2.id, t1s2.id]; - validate_propagate_block_in_list(&mut protocol_controller, &hash_list, 1000).await; - validate_propagate_block_in_list(&mut protocol_controller, &hash_list, 1000).await; - // block t0s3 and t1s3 are propagated - let hash_list = vec![t0s3.id, t1s3.id]; - validate_propagate_block_in_list(&mut protocol_controller, &hash_list, 1000).await; - validate_propagate_block_in_list(&mut protocol_controller, &hash_list, 1000).await; - // block t0s4 and t1s4 are propagated - let hash_list = vec![t0s4.id, t1s4.id]; - validate_propagate_block_in_list(&mut protocol_controller, &hash_list, 1000).await; - validate_propagate_block_in_list(&mut protocol_controller, &hash_list, 4000).await; - ( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} - -//test future_incoming_blocks block in the future with max_future_processing_blocks. -#[tokio::test] -#[serial] -#[ignore] -async fn test_unsorted_block_with_to_much_in_the_future() { - /*stderrlog::new() - .verbosity(4) - .timestamp(stderrlog::Timestamp::Millisecond) - .init() - .unwrap();*/ - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - t0: 1000.into(), - // slot 1 is in the past - genesis_timestamp: MassaTime::now(0).unwrap().saturating_sub(2000.into()), - future_block_processing_max_periods: 3, - max_future_processing_blocks: 5, - ..ConsensusConfig::default() - }; - - let mut storage = Storage::create_root(); - - consensus_without_pool_test( - cfg.clone(), - async move |mut protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - // create test blocks - let genesis_hashes = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .genesis_blocks; - - // a block in the past must be propagated - let block1 = create_block( - &cfg, - Slot::new(1, 0), - genesis_hashes.clone(), - &staking_keys[0], - ); - storage.store_block(block1.clone()); - protocol_controller - .receive_block( - block1.id, - block1.content.header.content.slot, - storage.clone(), - ) - .await; - validate_propagate_block(&mut protocol_controller, block1.id, 2500).await; - - // this block is slightly in the future: will wait for it - let slot = timeslots::get_current_latest_block_slot( - cfg.thread_count, - cfg.t0, - cfg.genesis_timestamp, - 0, - ) - .unwrap() - .unwrap(); - let block2 = create_block( - &cfg, - Slot::new(slot.period + 2, slot.thread), - genesis_hashes.clone(), - &staking_keys[0], - ); - storage.store_block(block2.clone()); - protocol_controller - .receive_block( - block2.id, - block2.content.header.content.slot, - storage.clone(), - ) - .await; - assert!(!validate_notpropagate_block(&mut protocol_controller, block2.id, 500).await); - validate_propagate_block(&mut protocol_controller, block2.id, 2500).await; - - // this block is too much in the future: do not process - let slot = timeslots::get_current_latest_block_slot( - cfg.thread_count, - cfg.t0, - cfg.genesis_timestamp, - 0, - ) - .unwrap() - .unwrap(); - let block3 = create_block( - &cfg, - Slot::new(slot.period + 1000, slot.thread), - genesis_hashes.clone(), - &staking_keys[0], - ); - storage.store_block(block3.clone()); - protocol_controller - .receive_block( - block3.id, - block3.content.header.content.slot, - storage.clone(), - ) - .await; - assert!(!validate_notpropagate_block(&mut protocol_controller, block3.id, 2500).await); - - // Check that the block has been silently dropped and not discarded for being too much in the future. - let block_graph = consensus_command_sender - .get_block_graph_status(None, None) - .await - .unwrap(); - assert!(!block_graph.active_blocks.contains_key(&block3.id)); - assert!(!block_graph.discarded_blocks.contains_key(&block3.id)); - ( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} - -#[tokio::test] -#[serial] -#[ignore] -async fn test_too_many_blocks_in_the_future() { - /*stderrlog::new() - .verbosity(4) - .timestamp(stderrlog::Timestamp::Millisecond) - .init() - .unwrap();*/ - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - delta_f0: 1000, - future_block_processing_max_periods: 100, - // slot 1 is in the past - genesis_timestamp: MassaTime::now(0).unwrap().saturating_sub(2000.into()), - max_future_processing_blocks: 2, - t0: 1000.into(), - ..ConsensusConfig::default() - }; - - let mut storage = Storage::create_root(); - - consensus_without_pool_test( - cfg.clone(), - async move |mut protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - // get genesis block hashes - let genesis_hashes = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .genesis_blocks; - - // generate 5 blocks but there is only space for 2 in the waiting line - let mut expected_block_hashes: HashSet = HashSet::new(); - let mut max_period = 0; - let slot = timeslots::get_current_latest_block_slot( - cfg.thread_count, - cfg.t0, - cfg.genesis_timestamp, - 0, - ) - .unwrap() - .unwrap(); - for period in 0..5 { - max_period = slot.period + 2 + period; - let block = create_block( - &cfg, - Slot::new(max_period, slot.thread), - genesis_hashes.clone(), - &staking_keys[0], - ); - storage.store_block(block.clone()); - protocol_controller - .receive_block(block.id, block.content.header.content.slot, storage.clone()) - .await; - if period < 2 { - expected_block_hashes.insert(block.id); - } - } - // wait for the 2 waiting blocks to propagate - let mut expected_clone = expected_block_hashes.clone(); - while !expected_block_hashes.is_empty() { - assert!( - expected_block_hashes.remove( - &validate_propagate_block_in_list( - &mut protocol_controller, - &expected_block_hashes.iter().copied().collect(), - 2500 - ) - .await - ), - "unexpected block propagated" - ); - } - // wait until we reach the slot of the last block - while timeslots::get_current_latest_block_slot( - cfg.thread_count, - cfg.t0, - cfg.genesis_timestamp, - 0, - ) - .unwrap() - .unwrap() - < Slot::new(max_period + 1, 0) - {} - // ensure that the graph contains only what we expect - let graph = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status"); - expected_clone.extend(graph.genesis_blocks); - assert_eq!( - expected_clone, - graph - .active_blocks - .keys() - .copied() - .collect::>(), - "unexpected block graph" - ); - ( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} - -#[tokio::test] -#[serial] -#[ignore] -async fn test_dep_in_back_order() { - /*stderrlog::new() - .verbosity(4) - .timestamp(stderrlog::Timestamp::Millisecond) - .init() - .unwrap();*/ - - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - genesis_timestamp: MassaTime::now(0) - .unwrap() - .saturating_sub(MassaTime::from_millis(1000).checked_mul(1000).unwrap()), - t0: 1000.into(), - ..ConsensusConfig::default() - }; - - let mut storage = Storage::create_root(); - consensus_without_pool_test( - cfg.clone(), - async move |mut protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let genesis_hashes = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .genesis_blocks; - - // create test blocks - let t0s1 = create_block( - &cfg, - Slot::new(1, 0), - genesis_hashes.clone(), - &staking_keys[0], - ); - - let t1s1 = create_block( - &cfg, - Slot::new(1, 1), - genesis_hashes.clone(), - &staking_keys[0], - ); - - let t0s2 = create_block( - &cfg, - Slot::new(2, 0), - vec![t0s1.id, t1s1.id], - &staking_keys[0], - ); - let t1s2 = create_block( - &cfg, - Slot::new(2, 1), - vec![t0s1.id, t1s1.id], - &staking_keys[0], - ); - - let t0s3 = create_block( - &cfg, - Slot::new(3, 0), - vec![t0s2.id, t1s2.id], - &staking_keys[0], - ); - let t1s3 = create_block( - &cfg, - Slot::new(3, 1), - vec![t0s2.id, t1s2.id], - &staking_keys[0], - ); - - let t0s4 = create_block( - &cfg, - Slot::new(4, 0), - vec![t0s3.id, t1s3.id], - &staking_keys[0], - ); - let t1s4 = create_block( - &cfg, - Slot::new(4, 1), - vec![t0s3.id, t1s3.id], - &staking_keys[0], - ); - - // send blocks t0s2, t1s3, t0s1, t0s4, t1s4, t1s1, t0s3, t1s2 - storage.store_block(t0s2.clone()); - protocol_controller - .receive_block(t0s2.id, t0s2.content.header.content.slot, storage.clone()) - .await; // not propagated and update wishlist - validate_wishlist( - &mut protocol_controller, - vec![t0s1.id, t1s1.id].into_iter().collect(), - PreHashSet::::default(), - 500, - ) - .await; - validate_notpropagate_block(&mut protocol_controller, t0s2.id, 500).await; - - storage.store_block(t1s3.clone()); - protocol_controller - .receive_block(t1s3.id, t1s3.content.header.content.slot, storage.clone()) - .await; // not propagated and no wishlist update - validate_notpropagate_block(&mut protocol_controller, t1s3.id, 500).await; - - storage.store_block(t0s1.clone()); - protocol_controller - .receive_block(t0s1.id, t0s1.content.header.content.slot, storage.clone()) - .await; // we have its parents so it should be integrated right now and update wishlist - - validate_propagate_block(&mut protocol_controller, t0s1.id, 500).await; - validate_wishlist( - &mut protocol_controller, - PreHashSet::::default(), - vec![t0s1.id].into_iter().collect(), - 500, - ) - .await; - - storage.store_block(t0s4.clone()); - protocol_controller - .receive_block(t0s4.id, t0s4.content.header.content.slot, storage.clone()) - .await; // not propagated and no wishlist update - validate_notpropagate_block(&mut protocol_controller, t0s4.id, 500).await; - - storage.store_block(t1s4.clone()); - protocol_controller - .receive_block(t1s4.id, t1s4.content.header.content.slot, storage.clone()) - .await; // not propagated and no wishlist update - validate_notpropagate_block(&mut protocol_controller, t1s4.id, 500).await; - - storage.store_block(t1s1.clone()); - protocol_controller - .receive_block(t1s1.id, t1s1.content.header.content.slot, storage.clone()) - .await; // assert t1s1 is integrated and t0s2 is integrated and wishlist updated - validate_propagate_block_in_list( - &mut protocol_controller, - &vec![t1s1.id, t0s2.id], - 500, - ) - .await; - - validate_propagate_block_in_list( - &mut protocol_controller, - &vec![t1s1.id, t0s2.id], - 500, - ) - .await; - validate_wishlist( - &mut protocol_controller, - vec![].into_iter().collect(), - vec![t1s1.id].into_iter().collect(), - 500, - ) - .await; - - storage.store_block(t0s3.clone()); - protocol_controller - .receive_block(t0s3.id, t0s3.content.header.content.slot, storage.clone()) - .await; // not propagated and no wishlist update - validate_notpropagate_block(&mut protocol_controller, t0s3.id, 500).await; - - storage.store_block(t1s2.clone()); - protocol_controller - .receive_block(t1s2.id, t1s2.content.header.content.slot, storage.clone()) - .await; - - // All remaining blocks are propagated - let integrated = vec![t1s2.id, t0s3.id, t1s3.id, t0s4.id, t1s4.id]; - validate_propagate_block_in_list(&mut protocol_controller, &integrated, 1000).await; - validate_propagate_block_in_list(&mut protocol_controller, &integrated, 1000).await; - validate_propagate_block_in_list(&mut protocol_controller, &integrated, 1000).await; - validate_propagate_block_in_list(&mut protocol_controller, &integrated, 1000).await; - validate_propagate_block_in_list(&mut protocol_controller, &integrated, 1000).await; - validate_wishlist( - &mut protocol_controller, - PreHashSet::::default(), - vec![t1s2.id].into_iter().collect(), - 500, - ) - .await; - ( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} - -#[tokio::test] -#[serial] -#[ignore] -async fn test_dep_in_back_order_with_max_dependency_blocks() { - /*stderrlog::new() - .verbosity(4) - .timestamp(stderrlog::Timestamp::Millisecond) - .init() - .unwrap();*/ - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - genesis_timestamp: MassaTime::now(0) - .unwrap() - .saturating_sub(MassaTime::from_millis(1000).checked_mul(1000).unwrap()), - max_dependency_blocks: 2, - t0: 1000.into(), - ..ConsensusConfig::default() - }; - tokio::time::sleep(Duration::from_millis(1000)).await; - let mut storage = Storage::create_root(); - - consensus_without_pool_test( - cfg.clone(), - async move |mut protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let genesis_hashes = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .genesis_blocks; - - // create test blocks - - let t0s1 = create_block( - &cfg, - Slot::new(1, 0), - genesis_hashes.clone(), - &staking_keys[0], - ); - - let t1s1 = create_block( - &cfg, - Slot::new(1, 1), - genesis_hashes.clone(), - &staking_keys[0], - ); - - let t0s2 = create_block( - &cfg, - Slot::new(2, 0), - vec![t0s1.id, t1s1.id], - &staking_keys[0], - ); - let t1s2 = create_block( - &cfg, - Slot::new(2, 1), - vec![t0s1.id, t1s1.id], - &staking_keys[0], - ); - - let t0s3 = create_block( - &cfg, - Slot::new(3, 0), - vec![t0s2.id, t1s2.id], - &staking_keys[0], - ); - let t1s3 = create_block( - &cfg, - Slot::new(3, 1), - vec![t0s2.id, t1s2.id], - &staking_keys[0], - ); - - // send blocks t0s2, t1s3, t0s1, t0s4, t1s4, t1s1, t0s3, t1s2 - storage.store_block(t0s2.clone()); - protocol_controller - .receive_block(t0s2.id, t0s2.content.header.content.slot, storage.clone()) - .await; - validate_wishlist( - &mut protocol_controller, - vec![t0s1.id, t1s1.id].into_iter().collect(), - PreHashSet::::default(), - 500, - ) - .await; - validate_notpropagate_block(&mut protocol_controller, t0s2.id, 500).await; - - storage.store_block(t1s3.clone()); - protocol_controller - .receive_block(t1s3.id, t1s3.content.header.content.slot, storage.clone()) - .await; - validate_notpropagate_block(&mut protocol_controller, t1s3.id, 500).await; - - storage.store_block(t0s1.clone()); - protocol_controller - .receive_block(t0s1.id, t0s1.content.header.content.slot, storage.clone()) - .await; - validate_propagate_block(&mut protocol_controller, t0s1.id, 500).await; - validate_wishlist( - &mut protocol_controller, - PreHashSet::::default(), - vec![t0s1.id].into_iter().collect(), - 500, - ) - .await; - storage.store_block(t0s3.clone()); - protocol_controller - .receive_block(t0s3.id, t0s3.content.header.content.slot, storage.clone()) - .await; - validate_notpropagate_block(&mut protocol_controller, t0s3.id, 500).await; - - storage.store_block(t1s2.clone()); - protocol_controller - .receive_block(t1s2.id, t1s2.content.header.content.slot, storage.clone()) - .await; - validate_notpropagate_block(&mut protocol_controller, t1s2.id, 500).await; - - storage.store_block(t1s1.clone()); - protocol_controller - .receive_block(t1s1.id, t1s1.content.header.content.slot, storage.clone()) - .await; - validate_propagate_block_in_list( - &mut protocol_controller, - &vec![t1s1.id, t1s2.id], - 500, - ) - .await; - validate_propagate_block_in_list( - &mut protocol_controller, - &vec![t1s1.id, t1s2.id], - 500, - ) - .await; - validate_wishlist( - &mut protocol_controller, - PreHashSet::::default(), - vec![t1s1.id].into_iter().collect(), - 500, - ) - .await; - - ( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} - -#[tokio::test] -#[serial] -#[ignore] -async fn test_add_block_that_depends_on_invalid_block() { - /*stderrlog::new() - .verbosity(4) - .timestamp(stderrlog::Timestamp::Millisecond) - .init() - .unwrap();*/ - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - genesis_timestamp: MassaTime::now(0) - .unwrap() - .saturating_sub(MassaTime::from_millis(1000).checked_mul(1000).unwrap()), - max_dependency_blocks: 7, - t0: 1000.into(), - ..ConsensusConfig::default() - }; - let mut storage = Storage::create_root(); - - consensus_without_pool_test( - cfg.clone(), - async move |mut protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let genesis_hashes = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .genesis_blocks; - - // create test blocks - let t0s1 = create_block( - &cfg, - Slot::new(1, 0), - genesis_hashes.clone(), - &staking_keys[0], - ); - - let t1s1 = create_block( - &cfg, - Slot::new(1, 1), - genesis_hashes.clone(), - &staking_keys[0], - ); - - // blocks t3s2 with wrong thread and (t0s1, t1s1) parents. - let t3s2 = create_block( - &cfg, - Slot::new(2, 3), - vec![t0s1.id, t1s1.id], - &staking_keys[0], - ); - - // blocks t0s3 and t1s3 with (t3s2, t1s2) parents. - let t0s3 = create_block( - &cfg, - Slot::new(3, 0), - vec![t3s2.id, t1s1.id], - &staking_keys[0], - ); - let t1s3 = create_block( - &cfg, - Slot::new(3, 1), - vec![t3s2.id, t1s1.id], - &staking_keys[0], - ); - - // add block in this order t0s1, t1s1, t0s3, t1s3, t3s2 - // send blocks t0s2, t1s3, t0s1, t0s4, t1s4, t1s1, t0s3, t1s2 - storage.store_block(t0s1.clone()); - protocol_controller - .receive_block(t0s1.id, t0s1.content.header.content.slot, storage.clone()) - .await; - storage.store_block(t1s1.clone()); - protocol_controller - .receive_block(t1s1.id, t1s1.content.header.content.slot, storage.clone()) - .await; - storage.store_block(t0s3.clone()); - protocol_controller - .receive_block(t0s3.id, t0s1.content.header.content.slot, storage.clone()) - .await; - storage.store_block(t1s3.clone()); - protocol_controller - .receive_block(t1s3.id, t1s3.content.header.content.slot, storage.clone()) - .await; - storage.store_block(t3s2.clone()); - protocol_controller - .receive_block(t3s2.id, t3s2.content.header.content.slot, storage.clone()) - .await; - - // block t0s1 and t1s1 are propagated - let hash_list = vec![t0s1.id, t1s1.id]; - validate_propagate_block_in_list(&mut protocol_controller, &hash_list, 1000).await; - validate_propagate_block_in_list(&mut protocol_controller, &hash_list, 1000).await; - - // block t0s3, t1s3 are not propagated - let hash_list = vec![t0s3.id, t1s3.id]; - assert!( - !validate_notpropagate_block_in_list(&mut protocol_controller, &hash_list, 2000) - .await - ); - assert!( - !validate_notpropagate_block_in_list(&mut protocol_controller, &hash_list, 2000) - .await - ); - ( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} diff --git a/massa-consensus-worker/src/tests/scenarios91_1.rs b/massa-consensus-worker/src/tests/scenarios91_1.rs deleted file mode 100644 index 4d3c0d4a858..00000000000 --- a/massa-consensus-worker/src/tests/scenarios91_1.rs +++ /dev/null @@ -1,446 +0,0 @@ -// Copyright (c) 2022 MASSA LABS - -// RUST_BACKTRACE=1 cargo test test_block_validity -- --nocapture - -use super::tools::*; -use massa_consensus_exports::ConsensusConfig; - -use massa_hash::Hash; -use massa_models::{block::BlockId, slot::Slot}; -use massa_signature::KeyPair; -use massa_storage::Storage; -use massa_time::MassaTime; -use serial_test::serial; - -//use time::MassaTime; - -#[tokio::test] -#[serial] -#[ignore] -async fn test_ti() { - /* stderrlog::new() - .verbosity(4) - .timestamp(stderrlog::Timestamp::Millisecond) - .init() - .unwrap(); */ - - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - future_block_processing_max_periods: 50, - // to avoid timing problems for blocks in the future - genesis_timestamp: MassaTime::now(0) - .unwrap() - .saturating_sub(MassaTime::from_millis(32000).checked_mul(1000).unwrap()), - ..ConsensusConfig::default() - }; - let mut storage = Storage::create_root(); - - // to avoid timing pb for block in the future - - consensus_without_pool_test( - cfg.clone(), - async move |mut protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let genesis_hashes = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .genesis_blocks; - - // create a valid block for thread 0 - let valid_hasht0s1 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(1, 0), - genesis_hashes.clone(), - true, - false, - &staking_keys[0], - ) - .await; - - // create a valid block on the other thread. - let valid_hasht1s1 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(1, 1), - genesis_hashes.clone(), - true, - false, - &staking_keys[0], - ) - .await; - - // one click with 2 block compatible - let block_graph = consensus_command_sender - .get_block_graph_status(None, None) - .await - .unwrap(); - let block1_clic = get_cliques(&block_graph, valid_hasht0s1); - let block2_clic = get_cliques(&block_graph, valid_hasht1s1); - assert_eq!(1, block1_clic.len()); - assert_eq!(1, block2_clic.len()); - assert_eq!(block1_clic, block2_clic); - - // Create other clique bock T0S2 - let fork_block = create_block_with_merkle_root( - &cfg, - Hash::compute_from("Other hash!".as_bytes()), - Slot::new(2, 0), - genesis_hashes.clone(), - &staking_keys[0], - ); - - storage.store_block(fork_block.clone()); - protocol_controller - .receive_block( - fork_block.id, - fork_block.content.header.content.slot, - storage.clone(), - ) - .await; - validate_propagate_block(&mut protocol_controller, fork_block.id, 1000).await; - // two clique with valid_hasht0s1 and valid_hasht1s1 in one and fork_block_hash, valid_hasht1s1 in the other - // test the first clique hasn't changed. - let block_graph = consensus_command_sender - .get_block_graph_status(None, None) - .await - .unwrap(); - let block1_clic = get_cliques(&block_graph, valid_hasht0s1); - let block2_clic = get_cliques(&block_graph, valid_hasht1s1); - assert_eq!(1, block1_clic.len()); - assert_eq!(2, block2_clic.len()); - assert!(block2_clic.intersection(&block1_clic).next().is_some()); - // test the new click - let fork_clic = get_cliques(&block_graph, fork_block.id); - assert_eq!(1, fork_clic.len()); - assert!(fork_clic.intersection(&block1_clic).next().is_none()); - assert!(fork_clic.intersection(&block2_clic).next().is_some()); - - // extend first clique - let mut parentt0sn_hash = valid_hasht0s1; - for period in 3..=35 { - let block_hash = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(period, 0), - vec![parentt0sn_hash, valid_hasht1s1], - true, - false, - &staking_keys[0], - ) - .await; - // validate the added block isn't in the forked block click. - let block_graph = consensus_command_sender - .get_block_graph_status(None, None) - .await - .unwrap(); - let block_clic = get_cliques(&block_graph, block_hash); - let fork_clic = get_cliques(&block_graph, fork_block.id); - assert!(fork_clic.intersection(&block_clic).next().is_none()); - - parentt0sn_hash = block_hash; - } - - // create new block in other clique - let block = create_block( - &cfg, - Slot::new(2, 1), - vec![fork_block.id, valid_hasht1s1], - &staking_keys[0], - ); - storage.store_block(block.clone()); - protocol_controller - .receive_block(block.id, block.content.header.content.slot, storage.clone()) - .await; - assert!(!validate_notpropagate_block(&mut protocol_controller, block.id, 1000,).await); - // verify that the clique has been pruned. - let block_graph = consensus_command_sender - .get_block_graph_status(None, None) - .await - .unwrap(); - let fork_clic = get_cliques(&block_graph, fork_block.id); - assert_eq!(0, fork_clic.len()); - ( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} - -#[tokio::test] -#[serial] -#[ignore] -async fn test_gpi() { - // // setup logging - /*stderrlog::new() - .verbosity(4) - .timestamp(stderrlog::Timestamp::Millisecond) - .init() - .unwrap();*/ - - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - future_block_processing_max_periods: 50, - // to avoid timing problems for blocks in the future - genesis_timestamp: MassaTime::now(0) - .unwrap() - .saturating_sub(MassaTime::from_millis(32000).checked_mul(1000).unwrap()), - ..ConsensusConfig::default() - }; - - consensus_without_pool_test( - cfg.clone(), - async move |mut protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let genesis_hashes = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .genesis_blocks; - - // * create 1 normal block in each thread (t0s1 and t1s1) with genesis parents - // create a valids block for thread 0 - let valid_hasht0s1 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(1, 0), - genesis_hashes.clone(), - true, - false, - &staking_keys[0], - ) - .await; - - // create a valid block on the other thread. - let valid_hasht1s1 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(1, 1), - genesis_hashes.clone(), - true, - false, - &staking_keys[0], - ) - .await; - - // one click with 2 block compatible - let block_graph = consensus_command_sender - .get_block_graph_status(None, None) - .await - .unwrap(); - let block1_clic = get_cliques(&block_graph, valid_hasht0s1); - let block2_clic = get_cliques(&block_graph, valid_hasht1s1); - assert_eq!(1, block1_clic.len()); - assert_eq!(1, block2_clic.len()); - assert_eq!(block1_clic, block2_clic); - - // create 2 clique - // * create 1 block in t0s2 with parents of slots (t0s1, t1s0) - let valid_hasht0s2 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(2, 0), - vec![valid_hasht0s1, genesis_hashes[1]], - true, - false, - &staking_keys[0], - ) - .await; - // * create 1 block in t1s2 with parents of slots (t0s0, t1s1) - let valid_hasht1s2 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(2, 1), - vec![genesis_hashes[0], valid_hasht1s1], - true, - false, - &staking_keys[0], - ) - .await; - - // * after processing the block in t1s2, the block of t0s2 is incompatible with block of t1s2 (link in gi) - let block_graph = consensus_command_sender - .get_block_graph_status(None, None) - .await - .unwrap(); - let blockt1s2_clic = get_cliques(&block_graph, valid_hasht1s2); - let blockt0s2_clic = get_cliques(&block_graph, valid_hasht0s2); - assert!(blockt1s2_clic - .intersection(&blockt0s2_clic) - .next() - .is_none()); - // * after processing the block in t1s2, there are 2 cliques, one with block of t0s2 and one with block of t1s2, and the parent vector uses the clique of minimum hash sum so the block of minimum hash between t0s2 and t1s2 - assert_eq!(1, blockt1s2_clic.len()); - assert_eq!(1, blockt0s2_clic.len()); - let parents: Vec = block_graph.best_parents.iter().map(|(b, _p)| *b).collect(); - if valid_hasht1s2 > valid_hasht0s2 { - assert_eq!(parents[0], valid_hasht0s2) - } else { - assert_eq!(parents[1], valid_hasht1s2) - } - - // * continue with 33 additional blocks in thread 0, that extend the clique of the block in t0s2: - // - a block in slot t0sX has parents (t0sX-1, t1s1), for X from 3 to 35 - let mut parentt0sn_hash = valid_hasht0s2; - for period in 3..=35 { - let block_hash = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(period, 0), - vec![parentt0sn_hash, valid_hasht1s1], - true, - false, - &staking_keys[0], - ) - .await; - parentt0sn_hash = block_hash; - } - // * create 1 block in t1s2 with the genesis blocks as parents - create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(3, 1), - vec![valid_hasht0s1, valid_hasht1s2], - false, - false, - &staking_keys[0], - ) - .await; - - // * after processing the 33 blocks, one clique is removed (too late), - // the block of minimum hash becomes final, the one of maximum hash becomes stale - // verify that the clique has been pruned. - let block_graph = consensus_command_sender - .get_block_graph_status(None, None) - .await - .unwrap(); - let fork_clic = get_cliques(&block_graph, valid_hasht1s2); - assert_eq!(0, fork_clic.len()); - assert!(block_graph.discarded_blocks.contains_key(&valid_hasht1s2)); - assert!(block_graph.active_blocks.contains_key(&valid_hasht0s2)); - assert!(!block_graph.active_blocks.contains_key(&valid_hasht1s2)); - ( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} - -#[tokio::test] -#[serial] -#[ignore] -async fn test_old_stale() { - // // setup logging - // stderrlog::new() - // .verbosity(4) - // .timestamp(stderrlog::Timestamp::Millisecond) - // .init() - // .unwrap(); - - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - future_block_processing_max_periods: 50, - // to avoid timing problems for blocks in the future - genesis_timestamp: MassaTime::now(0) - .unwrap() - .saturating_sub(MassaTime::from_millis(32000).checked_mul(1000).unwrap()), - ..ConsensusConfig::default() - }; - - consensus_without_pool_test( - cfg.clone(), - async move |mut protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let genesis_hashes = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .genesis_blocks; - - // * create 40 normal blocks in each thread: in slot 1 they have genesis parents, in slot 2 they have slot 1 parents - // create a valid block for slot 1 - let mut valid_hasht0 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(1, 0), - genesis_hashes.clone(), - true, - false, - &staking_keys[0], - ) - .await; - - // create a valid block on the other thread. - let mut valid_hasht1 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(1, 1), - genesis_hashes.clone(), - true, - false, - &staking_keys[0], - ) - .await; - - // and loop for the 39 other blocks - for i in 0..39 { - valid_hasht0 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(i + 2, 0), - vec![valid_hasht0, valid_hasht1], - true, - false, - &staking_keys[0], - ) - .await; - - // create a valid block on the other thread. - valid_hasht1 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(i + 2, 1), - vec![valid_hasht0, valid_hasht1], - true, - false, - &staking_keys[0], - ) - .await; - } - - // create 1 block in thread 0 slot 1 with genesis parents - let _valid_hasht0s2 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(1, 0), - genesis_hashes.clone(), - false, - false, - &staking_keys[0], - ) - .await; - ( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} diff --git a/massa-consensus-worker/src/tests/scenarios91_2.rs b/massa-consensus-worker/src/tests/scenarios91_2.rs deleted file mode 100644 index 2ba975e4f3d..00000000000 --- a/massa-consensus-worker/src/tests/scenarios91_2.rs +++ /dev/null @@ -1,514 +0,0 @@ -// Copyright (c) 2022 MASSA LABS - -use super::tools::*; -use massa_consensus_exports::ConsensusConfig; - -use massa_hash::Hash; -use massa_models::slot::Slot; -use massa_signature::KeyPair; -use massa_storage::Storage; -use massa_time::MassaTime; -use serial_test::serial; - -#[tokio::test] -#[serial] -#[ignore] -async fn test_queueing() { - // setup logging - // stderrlog::new() - // .verbosity(3) - // .timestamp(stderrlog::Timestamp::Millisecond) - // .init() - // .unwrap(); - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - future_block_processing_max_periods: 50, - // to avoid timing problems for blocks in the future - genesis_timestamp: MassaTime::now(0) - .unwrap() - .saturating_sub(MassaTime::from_millis(32000).checked_mul(1000).unwrap()), - ..ConsensusConfig::default() - }; - - consensus_without_pool_test( - cfg.clone(), - async move |mut protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let genesis_hashes = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .genesis_blocks; - - // * create 30 normal blocks in each thread: in slot 1 they have genesis parents, in slot 2 they have slot 1 parents - // create a valid block for slot 1 - let mut valid_hasht0 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(1, 0), - genesis_hashes.clone(), - true, - false, - &staking_keys[0], - ) - .await; - - // create a valid block on the other thread. - let mut valid_hasht1 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(1, 1), - genesis_hashes.clone(), - true, - false, - &staking_keys[0], - ) - .await; - - // and loop for the 29 other blocks - for i in 0..29 { - valid_hasht0 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(i + 2, 0), - vec![valid_hasht0, valid_hasht1], - true, - false, - &staking_keys[0], - ) - .await; - - // create a valid block on the other thread. - valid_hasht1 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(i + 2, 1), - vec![valid_hasht0, valid_hasht1], - true, - false, - &staking_keys[0], - ) - .await; - } - - let missed_block = create_block( - &cfg, - Slot::new(32, 0), - vec![valid_hasht0, valid_hasht1], - &staking_keys[0], - ); - - // create 1 block in thread 0 slot 33 with missed block as parent - valid_hasht0 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(33, 0), - vec![missed_block.id, valid_hasht1], - false, - false, - &staking_keys[0], - ) - .await; - - // and loop again for the 99 other blocks - for i in 0..30 { - valid_hasht0 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(i + 34, 0), - vec![valid_hasht0, valid_hasht1], - false, - false, - &staking_keys[0], - ) - .await; - - // create a valid block on the other thread. - valid_hasht1 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(i + 34, 1), - vec![valid_hasht0, valid_hasht1], - false, - false, - &staking_keys[0], - ) - .await; - } - ( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} - -#[tokio::test] -#[serial] -#[ignore] -async fn test_doubles() { - // setup logging - // stderrlog::new() - // .verbosity(3) - // .timestamp(stderrlog::Timestamp::Millisecond) - // .init() - // .unwrap(); - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - future_block_processing_max_periods: 50, - // to avoid timing problems for blocks in the future - genesis_timestamp: MassaTime::now(0) - .unwrap() - .saturating_sub(MassaTime::from_millis(32000).checked_mul(1000).unwrap()), - ..ConsensusConfig::default() - }; - - consensus_without_pool_test( - cfg.clone(), - async move |mut protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let genesis_hashes = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .genesis_blocks; - - // * create 40 normal blocks in each thread: in slot 1 they have genesis parents, in slot 2 they have slot 1 parents - // create a valid block for slot 1 - let mut valid_hasht0 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(1, 0), - genesis_hashes.clone(), - true, - false, - &staking_keys[0], - ) - .await; - - // create a valid block on the other thread. - let mut valid_hasht1 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(1, 1), - genesis_hashes.clone(), - true, - false, - &staking_keys[0], - ) - .await; - - // and loop for the 39 other blocks - for i in 0..39 { - valid_hasht0 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(i + 2, 0), - vec![valid_hasht0, valid_hasht1], - true, - false, - &staking_keys[0], - ) - .await; - - // create a valid block on the other thread. - valid_hasht1 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(i + 2, 1), - vec![valid_hasht0, valid_hasht1], - true, - false, - &staking_keys[0], - ) - .await; - } - - // create 1 block in thread 0 slot 41 with missed block as parent - create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(41, 0), - vec![valid_hasht0, valid_hasht1], - true, - false, - &staking_keys[0], - ) - .await; - ( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} - -#[tokio::test] -#[serial] -#[ignore] -async fn test_double_staking() { - // setup logging - // stderrlog::new() - // .verbosity(3) - // .timestamp(stderrlog::Timestamp::Millisecond) - // .init() - // .unwrap(); - - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - future_block_processing_max_periods: 50, - // to avoid timing problems for blocks in the future - genesis_timestamp: MassaTime::now(0) - .unwrap() - .saturating_sub(MassaTime::from_millis(32000).checked_mul(1000).unwrap()), - ..ConsensusConfig::default() - }; - let mut storage = Storage::create_root(); - - consensus_without_pool_test( - cfg.clone(), - async move |mut protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let genesis_hashes = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .genesis_blocks; - - // * create 40 normal blocks in each thread: in slot 1 they have genesis parents, in slot 2 they have slot 1 parents - // create a valid block for slot 1 - let mut valid_hasht0 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(1, 0), - genesis_hashes.clone(), - true, - false, - &staking_keys[0], - ) - .await; - - // create a valid block on the other thread. - let mut valid_hasht1 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(1, 1), - genesis_hashes.clone(), - true, - false, - &staking_keys[0], - ) - .await; - - // and loop for the 39 other blocks - for i in 0..39 { - valid_hasht0 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(i + 2, 0), - vec![valid_hasht0, valid_hasht1], - true, - false, - &staking_keys[0], - ) - .await; - - // create a valid block on the other thread. - valid_hasht1 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(i + 2, 1), - vec![valid_hasht0, valid_hasht1], - true, - false, - &staking_keys[0], - ) - .await; - } - - // same creator same slot, different block - let operation_merkle_root = Hash::compute_from("42".as_bytes()); - let block_1 = create_block_with_merkle_root( - &cfg, - operation_merkle_root, - Slot::new(41, 0), - vec![valid_hasht0, valid_hasht1], - &staking_keys[0], - ); - storage.store_block(block_1.clone()); - propagate_block( - &mut protocol_controller, - block_1.id, - block_1.content.header.content.slot, - storage.clone(), - true, - 150, - ) - .await; - - let operation_merkle_root = - Hash::compute_from("so long and thanks for all the fish".as_bytes()); - let block_2 = create_block_with_merkle_root( - &cfg, - operation_merkle_root, - Slot::new(41, 0), - vec![valid_hasht0, valid_hasht1], - &staking_keys[0], - ); - storage.store_block(block_2.clone()); - propagate_block( - &mut protocol_controller, - block_2.id, - block_2.content.header.content.slot, - storage.clone(), - true, - 150, - ) - .await; - - let graph = consensus_command_sender - .get_block_graph_status(None, None) - .await - .unwrap(); - let cliques_1 = get_cliques(&graph, block_1.id); - let cliques_2 = get_cliques(&graph, block_2.id); - assert!(cliques_1.is_disjoint(&cliques_2)); - ( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} - -#[tokio::test] -#[serial] -#[ignore] -async fn test_test_parents() { - // // setup logging - // stderrlog::new() - // .verbosity(4) - // .timestamp(stderrlog::Timestamp::Millisecond) - // .init() - // .unwrap(); - - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - future_block_processing_max_periods: 50, - // to avoid timing problems for blocks in the future - genesis_timestamp: MassaTime::now(0) - .unwrap() - .saturating_sub(MassaTime::from_millis(32000).checked_mul(1000).unwrap()), - ..ConsensusConfig::default() - }; - - consensus_without_pool_test( - cfg.clone(), - async move |mut protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let genesis_hashes = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .genesis_blocks; - - // * create 2 normal blocks in each thread: in slot 1 they have genesis parents, in slot 2 they have slot 1 parents - // create a valid block for slot 1 - let valid_hasht0s1 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(1, 0), - genesis_hashes.clone(), - true, - false, - &staking_keys[0], - ) - .await; - - // create a valid block on the other thread. - let valid_hasht1s1 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(1, 1), - genesis_hashes.clone(), - true, - false, - &staking_keys[0], - ) - .await; - - // create a valid block for slot 2 - let valid_hasht0s2 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(2, 0), - vec![valid_hasht0s1, valid_hasht1s1], - true, - false, - &staking_keys[0], - ) - .await; - - // create a valid block on the other thread. - create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(2, 1), - vec![valid_hasht0s1, valid_hasht1s1], - true, - false, - &staking_keys[0], - ) - .await; - - // * create 1 block in t0s3 with parents (t0s2, t1s0) - // create a valid block for slot 2 - create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(3, 0), - vec![valid_hasht0s2, genesis_hashes[1usize]], - false, - false, - &staking_keys[0], - ) - .await; - - // * create 1 block in t1s3 with parents (t0s0, t0s0) - create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(3, 1), - vec![genesis_hashes[0usize], genesis_hashes[0usize]], - false, - false, - &staking_keys[0], - ) - .await; - ( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} diff --git a/massa-consensus-worker/src/tests/scenarios_basic.rs b/massa-consensus-worker/src/tests/scenarios_basic.rs deleted file mode 100644 index a0e773f60af..00000000000 --- a/massa-consensus-worker/src/tests/scenarios_basic.rs +++ /dev/null @@ -1,261 +0,0 @@ -// Copyright (c) 2022 MASSA LABS - -use super::tools; -use crate::tests::block_factory::BlockFactory; -use massa_consensus_exports::ConsensusConfig; -use massa_hash::Hash; -use massa_models::{block::BlockId, slot::Slot}; -use massa_signature::KeyPair; -use massa_storage::Storage; -use serial_test::serial; - -#[tokio::test] -#[serial] -#[ignore] -async fn test_old_stale_not_propagated_and_discarded() { - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - t0: 32.into(), - future_block_processing_max_periods: 50, - ..ConsensusConfig::default() - }; - - tools::consensus_without_pool_test( - cfg.clone(), - async move |protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let parents: Vec = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .best_parents - .iter() - .map(|(b, _p)| *b) - .collect(); - - let mut block_factory = - BlockFactory::start_block_factory(parents.clone(), protocol_controller); - block_factory.creator_keypair = staking_keys[0].clone(); - block_factory.slot = Slot::new(1, 0); - - let block_1 = block_factory.create_and_receive_block(true).await; - - block_factory.slot = Slot::new(1, 1); - block_factory.create_and_receive_block(true).await; - - block_factory.slot = Slot::new(1, 0); - block_factory.best_parents = vec![block_1.id, parents[0]]; - let block_3 = block_factory.create_and_receive_block(false).await; - - // Old stale block was discarded. - let status = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status"); - assert_eq!(status.discarded_blocks.len(), 1); - assert!(status.discarded_blocks.get(&block_3.id).is_some()); - ( - block_factory.take_protocol_controller(), - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} - -#[tokio::test] -#[serial] -#[ignore] -async fn test_block_not_processed_multiple_times() { - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - t0: 500.into(), - future_block_processing_max_periods: 50, - ..ConsensusConfig::default() - }; - let mut storage = Storage::create_root(); - - tools::consensus_without_pool_test( - cfg.clone(), - async move |protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let parents: Vec = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .best_parents - .iter() - .map(|(b, _p)| *b) - .collect(); - - let mut block_factory = - BlockFactory::start_block_factory(parents.clone(), protocol_controller); - block_factory.creator_keypair = staking_keys[0].clone(); - block_factory.slot = Slot::new(1, 0); - let block_1 = block_factory.create_and_receive_block(true).await; - - // Send it again, it should not be propagated. - storage.store_block(block_1.clone()); - block_factory - .receive_block( - false, - block_1.id, - block_1.content.header.content.slot, - storage.clone(), - ) - .await; - - // Send it again, it should not be propagated. - block_factory - .receive_block( - false, - block_1.id, - block_1.content.header.content.slot, - storage.clone(), - ) - .await; - - // Block was not discarded. - let status = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status"); - assert_eq!(status.discarded_blocks.len(), 0); - ( - block_factory.take_protocol_controller(), - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} - -#[tokio::test] -#[serial] -#[ignore] -async fn test_queuing() { - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - future_block_processing_max_periods: 50, - t0: 1000.into(), - ..ConsensusConfig::default() - }; - - tools::consensus_without_pool_test( - cfg.clone(), - async move |protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let parents: Vec = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .best_parents - .iter() - .map(|(b, _p)| *b) - .collect(); - - let mut block_factory = - BlockFactory::start_block_factory(parents.clone(), protocol_controller); - block_factory.creator_keypair = staking_keys[0].clone(); - block_factory.slot = Slot::new(3, 0); - - let block_1 = block_factory.create_and_receive_block(false).await; - - block_factory.slot = Slot::new(4, 0); - block_factory.best_parents = vec![block_1.id, parents[1]]; - - block_factory.create_and_receive_block(false).await; - - // Blocks were queued, not discarded. - let status = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status"); - assert_eq!(status.discarded_blocks.len(), 0); - ( - block_factory.take_protocol_controller(), - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} - -#[tokio::test] -#[serial] -#[ignore] -async fn test_double_staking_does_not_propagate() { - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - future_block_processing_max_periods: 50, - t0: 1000.into(), - ..ConsensusConfig::default() - }; - - let mut storage = Storage::create_root(); - - tools::consensus_without_pool_test( - cfg.clone(), - async move |protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let parents: Vec = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .best_parents - .iter() - .map(|(b, _p)| *b) - .collect(); - - let mut block_factory = - BlockFactory::start_block_factory(parents.clone(), protocol_controller); - block_factory.creator_keypair = staking_keys[0].clone(); - block_factory.slot = Slot::new(1, 0); - let mut block_1 = block_factory.create_and_receive_block(true).await; - - // Same creator, same slot, different block - block_1.content.header.content.operation_merkle_root = - Hash::compute_from("hello world".as_bytes()); - let block = block_factory.sign_header(block_1.content.header.content); - - // Note: currently does propagate, see #190. - storage.store_block(block.clone()); - block_factory - .receive_block( - true, - block.id, - block.content.header.content.slot, - storage.clone(), - ) - .await; - - // Block was not discarded. - let status = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status"); - assert_eq!(status.discarded_blocks.len(), 0); - ( - block_factory.take_protocol_controller(), - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} diff --git a/massa-consensus-worker/src/tests/scenarios_endorsements.rs b/massa-consensus-worker/src/tests/scenarios_endorsements.rs deleted file mode 100644 index 0900906ba9e..00000000000 --- a/massa-consensus-worker/src/tests/scenarios_endorsements.rs +++ /dev/null @@ -1,196 +0,0 @@ -// Copyright (c) 2022 MASSA LABS - -use massa_models::{ - block::BlockId, - endorsement::{Endorsement, EndorsementSerializer}, - slot::Slot, - wrapped::WrappedContent, -}; -use massa_signature::KeyPair; -use massa_storage::Storage; -use massa_time::MassaTime; -use serial_test::serial; - -use super::tools::*; -use massa_consensus_exports::ConsensusConfig; - -#[tokio::test] -#[serial] -#[ignore] -async fn test_endorsement_check() { - // setup logging - /* - stderrlog::new() - .verbosity(4) - .timestamp(stderrlog::Timestamp::Millisecond) - .init() - .unwrap(); - */ - let cfg = ConsensusConfig { - delta_f0: 3, - endorsement_count: 1, - genesis_timestamp: MassaTime::now(0).unwrap().saturating_add(300.into()), - operation_validity_periods: 100, - periods_per_cycle: 2, - t0: 500.into(), - ..ConsensusConfig::default() - }; - // define addresses use for the test - // addresses 1 and 2 both in thread 0 - - let (address_1, keypair_1) = random_address_on_thread(0, cfg.thread_count).into(); - let (address_2, keypair_2) = random_address_on_thread(0, cfg.thread_count).into(); - assert_eq!(0, address_2.get_thread(cfg.thread_count)); - - let mut storage = Storage::create_root(); - consensus_without_pool_test( - cfg.clone(), - async move |mut protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let address_a = selector_controller - .get_selection(Slot::new(1, 0)) - .unwrap() - .producer; - let address_b = selector_controller - .get_selection(Slot::new(1, 0)) - .unwrap() - .endorsements[0]; - let address_c = selector_controller - .get_selection(Slot::new(1, 1)) - .unwrap() - .endorsements[1]; - - let keypair_a = if address_a == address_1 { - keypair_1.clone() - } else { - keypair_2.clone() - }; - let keypair_b = if address_b == address_1 { - keypair_1.clone() - } else { - keypair_2.clone() - }; - let keypair_c = if address_c == address_1 { - keypair_1.clone() - } else { - keypair_2.clone() - }; - - let parents: Vec = consensus_command_sender - .get_block_graph_status(None, None) - .await - .unwrap() - .best_parents - .iter() - .map(|(b, _p)| *b) - .collect(); - - let mut b10 = create_block(&cfg, Slot::new(1, 0), parents.clone(), &keypair_a); - - // create an otherwise valid endorsement with another address, include it in valid block(1,0), assert it is not propagated - let sender_keypair = KeyPair::generate(); - let content = Endorsement { - slot: Slot::new(1, 0), - index: 0, - endorsed_block: parents[0], - }; - let ed = Endorsement::new_wrapped( - content.clone(), - EndorsementSerializer::new(), - &sender_keypair, - ) - .unwrap(); - b10.content.header.content.endorsements = vec![ed]; - - storage.store_block(b10.clone()); - propagate_block( - &mut protocol_controller, - b10.id, - b10.content.header.content.slot, - storage.clone(), - false, - 500, - ) - .await; - - // create an otherwise valid endorsement at slot (1,1), include it in valid block(1,0), assert it is not propagated - let content = Endorsement { - slot: Slot::new(1, 1), - index: 0, - endorsed_block: parents[1], - }; - let ed = - Endorsement::new_wrapped(content.clone(), EndorsementSerializer::new(), &keypair_c) - .unwrap(); - let mut b10 = create_block(&cfg, Slot::new(1, 0), parents.clone(), &keypair_a); - b10.content.header.content.endorsements = vec![ed]; - - storage.store_block(b10.clone()); - propagate_block( - &mut protocol_controller, - b10.id, - b10.content.header.content.slot, - storage.clone(), - false, - 500, - ) - .await; - - // create an otherwise valid endorsement with genesis 1 as endorsed block, include it in valid block(1,0), assert it is not propagated - let content = Endorsement { - slot: Slot::new(1, 0), - index: 0, - endorsed_block: parents[1], - }; - let ed = - Endorsement::new_wrapped(content.clone(), EndorsementSerializer::new(), &keypair_b) - .unwrap(); - let mut b10 = create_block(&cfg, Slot::new(1, 0), parents.clone(), &keypair_a); - b10.content.header.content.endorsements = vec![ed]; - - storage.store_block(b10.clone()); - propagate_block( - &mut protocol_controller, - b10.id, - b10.content.header.content.slot, - storage.clone(), - false, - 500, - ) - .await; - - // create a valid endorsement, include it in valid block(1,1), assert it is propagated - let content = Endorsement { - slot: Slot::new(1, 0), - index: 0, - endorsed_block: parents[0], - }; - let ed = - Endorsement::new_wrapped(content.clone(), EndorsementSerializer::new(), &keypair_b) - .unwrap(); - let mut b10 = create_block(&cfg, Slot::new(1, 0), parents.clone(), &keypair_a); - b10.content.header.content.endorsements = vec![ed]; - - storage.store_block(b10.clone()); - propagate_block( - &mut protocol_controller, - b10.id, - b10.content.header.content.slot, - storage.clone(), - false, - 500, - ) - .await; - - ( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} diff --git a/massa-consensus-worker/src/tests/scenarios_get_operations.rs b/massa-consensus-worker/src/tests/scenarios_get_operations.rs deleted file mode 100644 index a2b53535c8c..00000000000 --- a/massa-consensus-worker/src/tests/scenarios_get_operations.rs +++ /dev/null @@ -1,201 +0,0 @@ -// // Copyright (c) 2022 MASSA LABS - -// use super::tools::*; -// use massa_consensus_exports::ConsensusConfig; - -// use massa_graph::BootstrapableGraph; -// use massa_models::WrappedOperation; -// use massa_models::{ -// clique::Clique, BlockId, OperationSearchResult, OperationSearchResultStatus, Slot, -// }; -// use massa_signature::KeyPair; -// use massa_time::MassaTime; -// use serial_test::serial; -// use std::collections::HashMap; - -// #[tokio::test] -// #[serial] -// #[ignore] -// async fn test_get_operation() { -// // // setup logging -// // stderrlog::new() -// // .verbosity(4) -// // .timestamp(stderrlog::Timestamp::Millisecond) -// // .init() -// // .unwrap(); -// let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); -// let cfg = ConsensusConfig { -// t0: 1000.into(), -// operation_validity_periods: 10, -// max_operations_per_block: 50, -// genesis_timestamp: MassaTime::now(0) -// .unwrap() -// .saturating_sub(MassaTime::from(32000).checked_mul(4).unwrap()) -// .saturating_add(300.into()), -// ..ConsensusConfig::default() -// }; -// // define addresses use for the test -// // addresses a and b both in thread 0 -// let (_address_a, keypair_a) = random_address_on_thread(0, cfg.thread_count).into(); -// let (address_b, _) = random_address_on_thread(0, cfg.thread_count).into(); -// // to avoid timing pb for block in the future - -// let op1 = create_transaction(&keypair_a, address_b, 1, 10, 1); -// let op2 = create_transaction(&keypair_a, address_b, 2, 10, 1); -// let op3 = create_transaction(&keypair_a, address_b, 3, 10, 1); -// let op4 = create_transaction(&keypair_a, address_b, 4, 10, 1); -// let op5 = create_transaction(&keypair_a, address_b, 5, 10, 1); - -// let ops = [ -// op1.clone(), -// op2.clone(), -// op3.clone(), -// op4.clone(), -// op5.clone(), -// ]; - -// let (boot_graph, b1, b2) = get_bootgraph(vec![op2.clone(), op3.clone()]); -// // there is only one node so it should be drawn at every slot - -// consensus_pool_test( -// cfg.clone(), -// Some(boot_graph), -// async move |pool_controller, -// protocol_controller, -// consensus_command_sender, -// consensus_event_receiver| { -// let (ops, _storage) = consensus_command_sender -// .get_operations(ops.iter().map(|op| op.id).collect()) -// .await -// .unwrap(); - -// let mut expected = HashMap::new(); - -// expected.insert( -// op2.id, -// OperationSearchResult { -// status: OperationSearchResultStatus::Pending, -// op: op2, -// in_pool: false, -// in_blocks: vec![(b1, (0, true))].into_iter().collect(), -// }, -// ); -// expected.insert( -// op3.id, -// OperationSearchResult { -// status: OperationSearchResultStatus::Pending, -// op: op3, -// in_pool: false, -// in_blocks: vec![(b2, (0, false))].into_iter().collect(), -// }, -// ); - -// assert_eq!(ops.len(), expected.len()); - -// for ( -// id, -// OperationSearchResult { -// op, -// in_blocks, -// in_pool, -// .. -// }, -// ) in ops.iter() -// { -// assert!(expected.contains_key(id)); -// let OperationSearchResult { -// op: ex_op, -// in_pool: ex_pool, -// in_blocks: ex_blocks, -// .. -// } = expected.get(id).unwrap(); -// assert_eq!(op.id, ex_op.id); -// assert_eq!(in_pool, ex_pool); -// assert_eq!(in_blocks.len(), ex_blocks.len()); -// for (b_id, val) in in_blocks.iter() { -// assert!(ex_blocks.contains_key(b_id)); -// assert_eq!(ex_blocks.get(b_id).unwrap(), val); -// } -// } -// ( -// pool_controller, -// protocol_controller, -// consensus_command_sender, -// consensus_event_receiver, -// ) -// }, -// ) -// .await; -// } - -// fn get_bootgraph(operations: Vec) -> (BootstrapableGraph, BlockId, BlockId) { -// let genesis_0 = get_export_active_test_block(vec![], vec![], Slot::new(0, 0), true); -// let genesis_1 = get_export_active_test_block(vec![], vec![], Slot::new(0, 1), true); -// let p1t0 = get_export_active_test_block( -// vec![(genesis_0.block_id, 0), (genesis_1.block_id, 0)], -// vec![operations[0].clone()], -// Slot::new(1, 0), -// true, -// ); -// let p1t1 = get_export_active_test_block( -// vec![(genesis_0.block_id, 0), (genesis_1.block_id, 0)], -// vec![], -// Slot::new(1, 1), -// false, -// ); -// let p2t0 = get_export_active_test_block( -// vec![(p1t0.block_id, 1), (p1t1.block_id, 1)], -// vec![operations[1].clone()], -// Slot::new(2, 0), -// false, -// ); -// ( -// BootstrapableGraph { -// /// Map of active blocks, where blocks are in their exported version. -// final_blocks: vec![ -// (genesis_0.block_id, genesis_0.clone()), -// (genesis_1.block_id, genesis_1.clone()), -// (p1t0.block_id, p1t0.clone()), -// (p1t1.block_id, p1t1.clone()), -// (p2t0.block_id, p2t0.clone()), -// ] -// .into_iter() -// .collect(), -// /// Best parents hashes in each thread. -// best_parents: vec![(p2t0.block_id, 2), (p1t1.block_id, 1)], -// /// Latest final period and block hash in each thread. -// latest_final_blocks_periods: vec![ -// (genesis_0.block_id, 0u64), -// (genesis_1.block_id, 0u64), -// ], -// /// Head of the incompatibility graph. -// gi_head: vec![ -// (genesis_0.block_id, Default::default()), -// (p1t0.block_id, Default::default()), -// (p2t0.block_id, Default::default()), -// (genesis_1.block_id, Default::default()), -// (p1t0.block_id, Default::default()), -// (p2t0.block_id, Default::default()), -// ] -// .into_iter() -// .collect(), - -// /// List of maximal cliques of compatible blocks. -// max_cliques: vec![Clique { -// block_ids: vec![ -// genesis_0.block_id, -// p1t0.block_id, -// genesis_1.block_id, -// p1t1.block_id, -// p2t0.block_id, -// ] -// .into_iter() -// .collect(), -// fitness: 123, -// is_blockclique: true, -// }], -// }, -// p1t0.block_id, -// p2t0.block_id, -// ) -// } diff --git a/massa-consensus-worker/src/tests/scenarios_get_selection_draws.rs b/massa-consensus-worker/src/tests/scenarios_get_selection_draws.rs deleted file mode 100644 index dba75bcde76..00000000000 --- a/massa-consensus-worker/src/tests/scenarios_get_selection_draws.rs +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright (c) 2022 MASSA LABS - -use super::tools::*; -use massa_consensus_exports::ConsensusConfig; - -use massa_models::ledger_models::LedgerData; -use massa_models::{amount::Amount, slot::Slot}; -use massa_time::MassaTime; -use serial_test::serial; -use std::collections::HashMap; -use std::str::FromStr; - -#[tokio::test] -#[serial] -#[ignore] -async fn test_get_selection_draws_high_end_slot() { - // setup logging - /* - stderrlog::new() - .verbosity(4) - .timestamp(stderrlog::Timestamp::Millisecond) - .init() - .unwrap(); - */ - let cfg = ConsensusConfig { - periods_per_cycle: 2, - t0: 500.into(), - delta_f0: 3, - operation_validity_periods: 100, - genesis_timestamp: MassaTime::now(0).unwrap().saturating_add(300.into()), - ..Default::default() - }; - // define addresses use for the test - // addresses 1 and 2 both in thread 0 - //let addr_1 = random_address_on_thread(0, cfg.thread_count); - let addr_2 = random_address_on_thread(0, cfg.thread_count); - - let mut ledger = HashMap::new(); - ledger.insert( - addr_2.address, - LedgerData::new(Amount::from_str("10000").unwrap()), - ); - - consensus_without_pool_test( - cfg.clone(), - async move |protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let draws = selector_controller.get_selection(Slot::new(1, 0)); - assert!(draws.is_ok()); - - // Too high end selection should return an error. - let too_high_draws = selector_controller.get_selection(Slot::new(200, 0)); - assert!(too_high_draws.is_err()); - ( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} diff --git a/massa-consensus-worker/src/tests/scenarios_header_check.rs b/massa-consensus-worker/src/tests/scenarios_header_check.rs deleted file mode 100644 index 98554102664..00000000000 --- a/massa-consensus-worker/src/tests/scenarios_header_check.rs +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright (c) 2022 MASSA LABS - -// RUST_BACKTRACE=1 cargo test scenarios106 -- --nocapture - -use super::tools::*; -use massa_consensus_exports::ConsensusConfig; - -use massa_models::slot::Slot; -use massa_signature::KeyPair; -use massa_storage::Storage; -use serial_test::serial; - -#[tokio::test] -#[serial] -#[ignore] -async fn test_consensus_asks_for_block() { - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - t0: 500.into(), - future_block_processing_max_periods: 50, - ..ConsensusConfig::default() - }; - - consensus_without_pool_test( - cfg.clone(), - async move |mut protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let genesis_hashes = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .genesis_blocks; - - // create test blocks - let t0s1 = create_block( - &cfg, - Slot::new(1, 0), - genesis_hashes.clone(), - &staking_keys[0], - ); - // send header for block t0s1 - protocol_controller - .receive_header(t0s1.content.header.clone()) - .await; - - validate_ask_for_block(&mut protocol_controller, t0s1.id, 1000).await; - ( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} - -#[tokio::test] -#[serial] -#[ignore] -async fn test_consensus_does_not_ask_for_block() { - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - t0: 32.into(), - future_block_processing_max_periods: 50, - ..ConsensusConfig::default() - }; - let mut storage = Storage::create_root(); - - consensus_without_pool_test( - cfg.clone(), - async move |mut protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let start_slot = 3; - let genesis_hashes = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .genesis_blocks; - - // create test blocks - let t0s1 = create_block( - &cfg, - Slot::new(1 + start_slot, 0), - genesis_hashes.clone(), - &staking_keys[0], - ); - let header = t0s1.content.header.clone(); - let id = t0s1.id; - // Send the actual block. - storage.store_block(t0s1); - protocol_controller - .receive_block(header.id, header.content.slot, storage.clone()) - .await; - - // block t0s1 is propagated - let hash_list = vec![id]; - validate_propagate_block_in_list( - &mut protocol_controller, - &hash_list, - 3000 + start_slot * 1000, - ) - .await; - - // Send the hash - protocol_controller.receive_header(header).await; - - // Consensus should not ask for the block, so the time-out should be hit. - validate_does_not_ask_for_block(&mut protocol_controller, &id, 10).await; - ( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} diff --git a/massa-consensus-worker/src/tests/scenarios_incompatibilities.rs b/massa-consensus-worker/src/tests/scenarios_incompatibilities.rs deleted file mode 100644 index 422557c0a68..00000000000 --- a/massa-consensus-worker/src/tests/scenarios_incompatibilities.rs +++ /dev/null @@ -1,311 +0,0 @@ -// Copyright (c) 2022 MASSA LABS - -// use super::tools::*; -// use massa_consensus_exports::ConsensusConfig; - -// use massa_models::{BlockId, Slot}; -// use massa_signature::KeyPair; -// use serial_test::serial; -// use std::collections::{HashSet, VecDeque}; - -// #[tokio::test] -// #[serial] -// async fn test_thread_incompatibility() { -// let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); -// let cfg = ConsensusConfig { -// t0: 200.into(), -// future_block_processing_max_periods: 50, -// ..ConsensusConfig::default() -// }; - -// consensus_without_pool_test( -// cfg.clone(), -// async move |mut protocol_controller, -// consensus_command_sender, -// consensus_event_receiver, -// selector_controller| { -// let parents: Vec = consensus_command_sender -// .get_block_graph_status(None, None) -// .await -// .expect("could not get block graph status") -// .best_parents -// .iter() -// .map(|(b, _p)| *b) -// .collect(); - -// let hash_1 = create_and_test_block( -// &mut protocol_controller, -// &cfg, -// Slot::new(1, 0), -// parents.clone(), -// true, -// false, -// &staking_keys[0], -// ) -// .await; - -// let hash_2 = create_and_test_block( -// &mut protocol_controller, -// &cfg, -// Slot::new(1, 1), -// parents.clone(), -// true, -// false, -// &staking_keys[0], -// ) -// .await; - -// let hash_3 = create_and_test_block( -// &mut protocol_controller, -// &cfg, -// Slot::new(2, 0), -// parents.clone(), -// true, -// false, -// &staking_keys[0], -// ) -// .await; - -// let status = consensus_command_sender -// .get_block_graph_status(None, None) -// .await -// .expect("could not get block graph status"); - -// if hash_1 > hash_3 { -// assert_eq!(status.best_parents[0].0, hash_3); -// } else { -// assert_eq!(status.best_parents[0].0, hash_1); -// } -// assert_eq!(status.best_parents[1].0, hash_2); - -// assert!(if let Some(h) = status.gi_head.get(&hash_3) { -// h.contains(&hash_1) -// } else { -// panic!("missing hash in gi_head") -// }); - -// assert_eq!(status.max_cliques.len(), 2); - -// for clique in status.max_cliques.clone() { -// if clique.block_ids.contains(&hash_1) && clique.block_ids.contains(&hash_3) { -// panic!("incompatible blocks in the same clique") -// } -// } - -// let mut current_period = 3; -// let mut parents = vec![hash_1, hash_2]; -// for _ in 0..3 { -// let hash = create_and_test_block( -// &mut protocol_controller, -// &cfg, -// Slot::new(current_period, 0), -// parents.clone(), -// true, -// false, -// &staking_keys[0], -// ) -// .await; -// current_period += 1; -// parents[0] = hash; -// } - -// let status = consensus_command_sender -// .get_block_graph_status(None, None) -// .await -// .expect("could not get block graph status"); - -// assert!(if let Some(h) = status.gi_head.get(&hash_3) { -// h.contains(&status.best_parents[0].0) -// } else { -// panic!("missing block in clique") -// }); - -// let mut parents = vec![status.best_parents[0].0, hash_2]; -// let mut current_period = 8; -// for _ in 0..30 { -// let b = create_block( -// &cfg, -// Slot::new(current_period, 0), -// parents.clone(), -// &staking_keys[0], -// ); -// current_period += 1; -// parents[0] = b.id; -// protocol_controller.receive_block(b.clone()).await; - -// // Note: higher timeout required. -// validate_propagate_block_in_list(&mut protocol_controller, &vec![b.id], 5000).await; -// } - -// let status = consensus_command_sender -// .get_block_graph_status(None, None) -// .await -// .expect("could not get block graph status"); - -// assert_eq!(status.max_cliques.len(), 1); - -// // clique should have been deleted by now -// let parents = vec![hash_3, hash_2]; -// let _ = create_and_test_block( -// &mut protocol_controller, -// &cfg, -// Slot::new(40, 0), -// parents.clone(), -// false, -// false, -// &staking_keys[0], -// ) -// .await; - -// ( -// protocol_controller, -// consensus_command_sender, -// consensus_event_receiver, -// selector_controller, -// ) -// }, -// ) -// .await; -// } - -// #[tokio::test] -// #[serial] -// async fn test_grandpa_incompatibility() { -// let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); -// let cfg = ConsensusConfig { -// t0: 200.into(), -// future_block_processing_max_periods: 50, -// ..ConsensusConfig::default() -// }; - -// consensus_without_pool_test( -// cfg.clone(), -// async move |mut protocol_controller, -// consensus_command_sender, -// consensus_event_receiver, -// selector_controller| { -// let genesis = consensus_command_sender -// .get_block_graph_status(None, None) -// .await -// .expect("could not get block graph status") -// .genesis_blocks; - -// let hash_1 = create_and_test_block( -// &mut protocol_controller, -// &cfg, -// Slot::new(1, 0), -// vec![genesis[0], genesis[1]], -// true, -// false, -// &staking_keys[0], -// ) -// .await; - -// let hash_2 = create_and_test_block( -// &mut protocol_controller, -// &cfg, -// Slot::new(1, 1), -// vec![genesis[0], genesis[1]], -// true, -// false, -// &staking_keys[0], -// ) -// .await; - -// let hash_3 = create_and_test_block( -// &mut protocol_controller, -// &cfg, -// Slot::new(2, 0), -// vec![hash_1, genesis[1]], -// true, -// false, -// &staking_keys[0], -// ) -// .await; - -// let hash_4 = create_and_test_block( -// &mut protocol_controller, -// &cfg, -// Slot::new(2, 1), -// vec![genesis[0], hash_2], -// true, -// false, -// &staking_keys[0], -// ) -// .await; - -// let status = consensus_command_sender -// .get_block_graph_status(None, None) -// .await -// .expect("could not get block graph status"); - -// assert!(if let Some(h) = status.gi_head.get(&hash_4) { -// h.contains(&hash_3) -// } else { -// panic!("missing block in gi_head") -// }); - -// assert_eq!(status.max_cliques.len(), 2); - -// for clique in status.max_cliques.clone() { -// if clique.block_ids.contains(&hash_3) && clique.block_ids.contains(&hash_4) { -// panic!("incompatible blocks in the same clique") -// } -// } - -// let parents: Vec = status.best_parents.iter().map(|(b, _p)| *b).collect(); -// if hash_4 > hash_3 { -// assert_eq!(parents[0], hash_3) -// } else { -// assert_eq!(parents[1], hash_4) -// } - -// let mut latest_extra_blocks = VecDeque::new(); -// for extend_i in 0..33 { -// let status = consensus_command_sender -// .get_block_graph_status(None, None) -// .await -// .expect("could not get block graph status"); -// let hash = create_and_test_block( -// &mut protocol_controller, -// &cfg, -// Slot::new(3 + extend_i, 0), -// status.best_parents.iter().map(|(b, _p)| *b).collect(), -// true, -// false, -// &staking_keys[0], -// ) -// .await; - -// latest_extra_blocks.push_back(hash); -// while latest_extra_blocks.len() > cfg.delta_f0 as usize + 1 { -// latest_extra_blocks.pop_front(); -// } -// } - -// let latest_extra_blocks: HashSet = latest_extra_blocks.into_iter().collect(); -// let status = consensus_command_sender -// .get_block_graph_status(None, None) -// .await -// .expect("could not get block graph status"); -// assert_eq!(status.max_cliques.len(), 1, "wrong cliques (len)"); -// assert_eq!( -// status.max_cliques[0] -// .block_ids -// .iter() -// .cloned() -// .collect::>(), -// latest_extra_blocks, -// "wrong cliques" -// ); - -// ( -// protocol_controller, -// consensus_command_sender, -// consensus_event_receiver, -// selector_controller, -// ) -// }, -// ) -// .await; -// } diff --git a/massa-consensus-worker/src/tests/scenarios_note_attack_attempt.rs b/massa-consensus-worker/src/tests/scenarios_note_attack_attempt.rs deleted file mode 100644 index ccabf914f68..00000000000 --- a/massa-consensus-worker/src/tests/scenarios_note_attack_attempt.rs +++ /dev/null @@ -1,156 +0,0 @@ -// Copyright (c) 2022 MASSA LABS - -use super::tools::*; -use crate::start_consensus_controller; -use massa_pool_exports::test_exports::MockPoolController; - -use massa_consensus_exports::settings::ConsensusChannels; -use massa_consensus_exports::ConsensusConfig; -use massa_execution_exports::test_exports::MockExecutionController; -use massa_hash::Hash; -use massa_models::{address::Address, block::BlockId, slot::Slot}; -use massa_pos_exports::SelectorConfig; -use massa_pos_worker::start_selector_worker; -use massa_protocol_exports::test_exports::MockProtocolController; -use massa_signature::KeyPair; -use massa_storage::Storage; -use serial_test::serial; - -#[tokio::test] -#[serial] -#[ignore] -async fn test_invalid_block_notified_as_attack_attempt() { - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - t0: 32.into(), - future_block_processing_max_periods: 50, - ..ConsensusConfig::default() - }; - - let storage: Storage = Storage::create_root(); - - // mock protocol & pool - let (mut protocol_controller, protocol_command_sender, protocol_event_receiver) = - MockProtocolController::new(); - let selector_config = SelectorConfig { - thread_count: 2, - periods_per_cycle: 100, - genesis_address: Address::from_public_key(&staking_keys[0].get_public_key()), - endorsement_count: 0, - max_draw_cache: 10, - channel_size: 256, - }; - let (_selector_manager, selector_controller) = start_selector_worker(selector_config).unwrap(); - let (pool_controller, _pool_event_receiver) = MockPoolController::new_with_receiver(); - let (execution_controller, _execution_rx) = MockExecutionController::new_with_receiver(); - // launch consensus controller - let (consensus_command_sender, _consensus_event_receiver, _consensus_manager) = - start_consensus_controller( - cfg.clone(), - ConsensusChannels { - execution_controller, - protocol_command_sender: protocol_command_sender.clone(), - protocol_event_receiver, - pool_command_sender: pool_controller, - selector_controller, - }, - None, - storage.clone(), - 0, - ) - .await - .expect("could not start consensus controller"); - - let parents: Vec = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .best_parents - .iter() - .map(|(b, _p)| *b) - .collect(); - - // Block for a non-existent thread. - let block = create_block_with_merkle_root( - &cfg, - Hash::compute_from("different".as_bytes()), - Slot::new(1, cfg.thread_count + 1), - parents.clone(), - &staking_keys[0], - ); - let block_id = block.id; - let slot = block.content.header.content.slot; - protocol_controller - .receive_block(block_id, slot, storage.clone()) - .await; - - validate_notify_block_attack_attempt(&mut protocol_controller, block_id, 1000).await; -} - -#[tokio::test] -#[serial] -#[ignore] -async fn test_invalid_header_notified_as_attack_attempt() { - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - t0: 32.into(), - future_block_processing_max_periods: 50, - ..ConsensusConfig::default() - }; - - // mock protocol & pool - let (mut protocol_controller, protocol_command_sender, protocol_event_receiver) = - MockProtocolController::new(); - let (pool_controller, _pool_event_receiver) = MockPoolController::new_with_receiver(); - let selector_config = SelectorConfig { - thread_count: 2, - periods_per_cycle: 100, - genesis_address: Address::from_public_key(&staking_keys[0].get_public_key()), - endorsement_count: 0, - max_draw_cache: 10, - channel_size: 256, - }; - let (_selector_manager, selector_controller) = start_selector_worker(selector_config).unwrap(); - let (execution_controller, _execution_rx) = MockExecutionController::new_with_receiver(); - let storage: Storage = Storage::create_root(); - // launch consensus controller - let (consensus_command_sender, _consensus_event_receiver, _consensus_manager) = - start_consensus_controller( - cfg.clone(), - ConsensusChannels { - execution_controller, - protocol_command_sender: protocol_command_sender.clone(), - protocol_event_receiver, - pool_command_sender: pool_controller, - selector_controller, - }, - None, - storage, - 0, - ) - .await - .expect("could not start consensus controller"); - - let parents: Vec = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .best_parents - .iter() - .map(|(b, _p)| *b) - .collect(); - - // Block for a non-existent thread. - let block = create_block_with_merkle_root( - &cfg, - Hash::compute_from("different".as_bytes()), - Slot::new(1, cfg.thread_count + 1), - parents.clone(), - &staking_keys[0], - ); - protocol_controller - .receive_header(block.content.header) - .await; - - validate_notify_block_attack_attempt(&mut protocol_controller, block.id, 1000).await; -} diff --git a/massa-consensus-worker/src/tests/scenarios_operations_check.rs b/massa-consensus-worker/src/tests/scenarios_operations_check.rs deleted file mode 100644 index 8d41645e1b7..00000000000 --- a/massa-consensus-worker/src/tests/scenarios_operations_check.rs +++ /dev/null @@ -1,203 +0,0 @@ -// // Copyright (c) 2022 MASSA LABS -// use super::tools::*; -// use massa_consensus_exports::ConsensusConfig; - -// use massa_models::ledger_models::LedgerData; -// use massa_models::prehash::Set; -// use massa_models::{Address, Amount, Slot}; -// use massa_signature::KeyPair; -// use massa_time::MassaTime; -// use serial_test::serial; -// use std::collections::HashMap; -// use std::str::FromStr; - -// #[tokio::test] -// #[serial] -// async fn test_operations_check() { -// // setup logging -// /* -// stderrlog::new() -// .verbosity(4) -// .timestamp(stderrlog::Timestamp::Millisecond) -// .init() -// .unwrap(); -// */ -// let thread_count = 2; - -// let (address_1, keypair_1) = random_address_on_thread(0, thread_count).into(); -// let (address_2, keypair_2) = random_address_on_thread(1, thread_count).into(); - -// assert_eq!(1, address_2.get_thread(thread_count)); -// let mut ledger = HashMap::new(); -// ledger.insert(address_1, LedgerData::new(Amount::from_str("5").unwrap())); - -// let cfg = ConsensusConfig { -// t0: 1000.into(), -// future_block_processing_max_periods: 50, -// operation_validity_periods: 10, -// genesis_timestamp: MassaTime::now(0).unwrap().saturating_sub(10000.into()), -// endorsement_count: 0, -// ..ConsensusConfig::default_with_staking_keys_and_ledger(&[keypair_1.clone()], &ledger) -// }; - -// consensus_without_pool_with_storage_test( -// cfg.clone(), -// async move |mut storage, -// mut protocol_controller, -// consensus_command_sender, -// consensus_event_receiver, -// selector_controller| { -// let genesis_ids = consensus_command_sender -// .get_block_graph_status(None, None) -// .await -// .expect("could not get block graph status") -// .genesis_blocks; - -// // Valid block A sending 5 from addr1 to addr2 + reward 1 to addr1 -// let operation_1 = create_transaction(&keypair_1, address_2, 5, 5, 1); -// storage.store_operations(vec![operation_1.clone()]); -// let block_a = create_block_with_operations( -// &cfg, -// Slot::new(1, 0), -// &genesis_ids, -// &keypair_1, -// vec![operation_1.clone()], -// ); -// propagate_block(&mut protocol_controller, block_a.clone(), true, 150).await; - -// // assert address 1 has 1 coin at blocks (A, genesis_ids[1]) (see #269) -// let mut set = Set::
::default(); -// set.insert(address_1); -// let res = consensus_command_sender -// .get_addresses_info(set) -// .await -// .unwrap() -// .get(&address_1) -// .unwrap() -// .ledger_info -// .candidate_ledger_info; -// assert_eq!(res.balance, Amount::from_str("1").unwrap()); - -// // receive block b with invalid operation (not enough coins) -// let operation_2 = create_transaction(&keypair_2, address_1, 10, 8, 1); -// storage.store_operations(vec![operation_2.clone()]); -// let block_2b = create_block_with_operations( -// &cfg, -// Slot::new(1, 1), -// &vec![block_a.id, genesis_ids[1]], -// &keypair_2, -// vec![operation_2], -// ); -// propagate_block(&mut protocol_controller, block_2b, false, 1000).await; - -// // receive empty block b -// let block_b = create_block_with_operations( -// &cfg, -// Slot::new(1, 1), -// &vec![block_a.id, genesis_ids[1]], -// &keypair_1, -// vec![], -// ); -// propagate_block(&mut protocol_controller, block_b.clone(), true, 150).await; - -// // assert address 2 has 5 coins at block B -// let mut set = Set::
::default(); -// set.insert(address_2); -// let res = consensus_command_sender -// .get_addresses_info(set) -// .await -// .unwrap() -// .get(&address_2) -// .unwrap() -// .ledger_info -// .candidate_ledger_info; -// assert_eq!(res.balance, Amount::from_str("5").unwrap()); - -// // receive block with reused operation -// let block_1c = create_block_with_operations( -// &cfg, -// Slot::new(1, 0), -// &vec![block_a.id, block_b.id], -// &keypair_1, -// vec![operation_1.clone()], -// ); -// propagate_block(&mut protocol_controller, block_1c.clone(), false, 1000).await; - -// ( -// protocol_controller, -// consensus_command_sender, -// consensus_event_receiver, -// selector_controller, -// ) -// }, -// ) -// .await; -// } - -// #[tokio::test] -// #[serial] -// async fn test_execution_check() { -// let (address_1, keypair_1) = random_address().into(); - -// let mut ledger = HashMap::new(); -// ledger.insert(address_1, LedgerData::new(Amount::from_str("5").unwrap())); - -// let staking_keys: Vec = vec![keypair_1.clone()]; -// let cfg = ConsensusConfig { -// t0: 1000.into(), -// future_block_processing_max_periods: 50, -// operation_validity_periods: 10, -// genesis_key: keypair_1.clone(), -// genesis_timestamp: MassaTime::now(0).unwrap().saturating_sub(10000.into()), -// endorsement_count: 0, -// ..ConsensusConfig::default_with_staking_keys_and_ledger(&staking_keys, &ledger) -// }; - -// consensus_without_pool_with_storage_test( -// cfg.clone(), -// async move |mut storage, -// mut protocol_controller, -// consensus_command_sender, -// consensus_event_receiver, -// selector_controller| { -// let genesis_ids = consensus_command_sender -// .get_block_graph_status(None, None) -// .await -// .expect("could not get block graph status") -// .genesis_blocks; - -// // Valid block A executing some bytecode and spending 2 coins. -// let operation_1 = create_executesc(&keypair_1, 5, 5, Default::default(), 1, 2, 1); -// storage.store_operations(vec![operation_1.clone()]); -// let block_a = create_block_with_operations( -// &cfg, -// Slot::new(1, 0), -// &genesis_ids, -// &keypair_1, -// vec![operation_1.clone()], -// ); -// propagate_block(&mut protocol_controller, block_a, true, 150).await; - -// // assert the `coins` argument as been deducted from the balance of address 1. -// let mut set = Set::
::default(); -// set.insert(address_1); -// let res = consensus_command_sender -// .get_addresses_info(set) -// .await -// .unwrap() -// .get(&address_1) -// .unwrap() -// .ledger_info -// .candidate_ledger_info; -// assert_eq!(res.balance, Amount::from_str("3").unwrap()); - -// ( -// protocol_controller, -// consensus_command_sender, -// consensus_event_receiver, -// selector_controller, -// ) -// }, -// ) -// .await; -// } diff --git a/massa-consensus-worker/src/tests/scenarios_parents.rs b/massa-consensus-worker/src/tests/scenarios_parents.rs deleted file mode 100644 index 1dd8edfcabb..00000000000 --- a/massa-consensus-worker/src/tests/scenarios_parents.rs +++ /dev/null @@ -1,206 +0,0 @@ -// Copyright (c) 2022 MASSA LABS -use super::tools::*; -use massa_consensus_exports::ConsensusConfig; - -use massa_models::slot::Slot; -use massa_signature::KeyPair; -use serial_test::serial; - -#[tokio::test] -#[serial] -#[ignore] -async fn test_parent_in_the_future() { - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - t0: 32.into(), - future_block_processing_max_periods: 50, - ..ConsensusConfig::default() - }; - - consensus_without_pool_test( - cfg.clone(), - async move |mut protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let genesis_hashes = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .genesis_blocks; - - // Parent, in the future. - let t0s1 = create_block( - &cfg, - Slot::new(4, 0), - genesis_hashes.clone(), - &staking_keys[0], - ); - - let _ = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(5, 0), - vec![t0s1.id], - false, - false, - &staking_keys[0], - ) - .await; - ( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} - -#[tokio::test] -#[serial] -#[ignore] -async fn test_parents() { - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - t0: 32.into(), - future_block_processing_max_periods: 50, - ..ConsensusConfig::default() - }; - - consensus_without_pool_test( - cfg.clone(), - async move |mut protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let genesis_hashes = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .genesis_blocks; - - // generate two normal blocks in each thread - let hasht1s1 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(1, 0), - genesis_hashes.clone(), - true, - false, - &staking_keys[0], - ) - .await; - - let _ = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(1, 1), - genesis_hashes.clone(), - true, - false, - &staking_keys[0], - ) - .await; - - let _ = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(3, 0), - vec![hasht1s1, genesis_hashes[0]], - false, - false, - &staking_keys[0], - ) - .await; - ( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} - -#[tokio::test] -#[serial] -#[ignore] -async fn test_parents_in_incompatible_cliques() { - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - t0: 32.into(), - future_block_processing_max_periods: 50, - ..ConsensusConfig::default() - }; - - consensus_without_pool_test( - cfg.clone(), - async move |mut protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let genesis_hashes = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .genesis_blocks; - - let hasht0s1 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(1, 0), - genesis_hashes.clone(), - true, - false, - &staking_keys[0], - ) - .await; - - let hasht0s2 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(2, 0), - genesis_hashes.clone(), - true, - false, - &staking_keys[0], - ) - .await; - - // from that point we have two incompatible clique - - let _ = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(1, 1), - vec![hasht0s1, genesis_hashes[1]], - true, - false, - &staking_keys[0], - ) - .await; - - // Block with incompatible parents. - let _ = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(2, 1), - vec![hasht0s1, hasht0s2], - false, - false, - &staking_keys[0], - ) - .await; - ( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} diff --git a/massa-consensus-worker/src/tests/scenarios_pool_commands.rs b/massa-consensus-worker/src/tests/scenarios_pool_commands.rs deleted file mode 100644 index 46b08bd7d85..00000000000 --- a/massa-consensus-worker/src/tests/scenarios_pool_commands.rs +++ /dev/null @@ -1,460 +0,0 @@ -// Copyright (c) 2022 MASSA LABS - -//TODO: Still needed ? -// use super::tools::*; -// use massa_consensus_exports::ConsensusConfig; - -// use massa_graph::BootstrapableGraph; -// use massa_models::clique::Clique; -// use massa_models::ledger_models::LedgerData; -// use massa_models::{Amount, BlockId, Slot, WrappedOperation}; -// use massa_signature::KeyPair; -// use massa_time::MassaTime; -// use serial_test::serial; -// use std::str::FromStr; - -// #[tokio::test] -// #[serial] -// async fn test_update_current_slot_cmd_notification() { -// let cfg = ConsensusConfig { -// t0: 1000.into(), -// thread_count: 1, -// genesis_timestamp: MassaTime::now(0).unwrap().checked_add(1000.into()).unwrap(), -// ..ConsensusConfig::default_with_paths() -// }; - -// let timeout = 150; - -// consensus_pool_test( -// cfg.clone(), -// None, -// async move |mut pool_controller, -// protocol_controller, -// consensus_command_sender, -// consensus_event_receiver| { -// let slot_notification_filter = |cmd| match cmd { -// massa_pool::PoolCommand::UpdateCurrentSlot(slot) => { -// Some((slot, MassaTime::now(0).unwrap())) -// } -// _ => None, -// }; - -// // wait for UpdateCurrentSlot pool command -// if let Some((slot_cmd, rec_time)) = pool_controller -// .wait_command(1500.into(), slot_notification_filter) -// .await -// { -// assert_eq!(slot_cmd, Slot::new(0, 0)); -// if rec_time > cfg.genesis_timestamp { -// assert!( -// rec_time.saturating_sub(cfg.genesis_timestamp) < MassaTime::from(timeout) -// ) -// } else { -// assert!( -// cfg.genesis_timestamp.saturating_sub(rec_time) < MassaTime::from(timeout) -// ) -// } -// } - -// // wait for next UpdateCurrentSlot pool command -// if let Some((slot_cmd, rec_time)) = pool_controller -// .wait_command(500.into(), slot_notification_filter) -// .await -// { -// assert_eq!(slot_cmd, Slot::new(0, 1)); -// if rec_time > cfg.genesis_timestamp { -// assert!( -// rec_time.saturating_sub(cfg.genesis_timestamp.saturating_add(cfg.t0)) -// < MassaTime::from(timeout) -// ); -// } else { -// assert!( -// cfg.genesis_timestamp -// .saturating_add(cfg.t0) -// .saturating_sub(rec_time) -// < MassaTime::from(timeout) -// ); -// } -// } -// ( -// pool_controller, -// protocol_controller, -// consensus_command_sender, -// consensus_event_receiver, -// ) -// }, -// ) -// .await; -// } - -// #[tokio::test] -// #[serial] -// async fn test_update_latest_final_block_cmd_notification() { -// let cfg = ConsensusConfig { -// t0: 1000.into(), -// delta_f0: 2, -// ..ConsensusConfig::default_with_paths() -// }; - -// consensus_pool_test( -// cfg.clone(), -// None, -// None, -// async move |mut pool_controller, -// protocol_controller, -// consensus_command_sender, -// consensus_event_receiver| { -// // UpdateLatestFinalPeriods pool command filter -// let update_final_notification_filter = |cmd| match cmd { -// massa_pool::PoolCommand::UpdateLatestFinalPeriods(periods) => Some(periods), -// PoolCommand::GetOperationBatch { response_tx, .. } => { -// response_tx.send(Vec::new()).unwrap(); -// None -// } -// PoolCommand::GetEndorsements { response_tx, .. } => { -// response_tx.send(Vec::new()).unwrap(); -// None -// } -// _ => None, -// }; -// // wait for initial final periods notification -// let final_periods = pool_controller -// .wait_command(1000.into(), update_final_notification_filter) -// .await; -// assert_eq!(final_periods, Some(vec![0, 0])); - -// // wait for next final periods notification -// let final_periods = pool_controller -// .wait_command( -// (cfg.t0.to_millis() * 3).into(), -// update_final_notification_filter, -// ) -// .await; -// assert_eq!(final_periods, Some(vec![1, 0])); -// ( -// pool_controller, -// protocol_controller, -// consensus_command_sender, -// consensus_event_receiver, -// ) -// }, -// ) -// .await; -// } - -// #[tokio::test] -// #[serial] -// #[ignore] -// async fn test_new_final_ops() { -// let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); -// let cfg = ConsensusConfig { -// t0: 1000.into(), -// delta_f0: 2, -// genesis_timestamp: MassaTime::now(0).unwrap(), -// ..ConsensusConfig::default() -// }; - -// // define addresses use for the test -// // addresses a and b both in thread 0 - -// let (address_a, keypair_a) = random_address_on_thread(0, cfg.thread_count).into(); -// let (address_b, _) = random_address_on_thread(0, cfg.thread_count).into(); - -// let boot_ledger = ConsensusLedgerSubset( -// vec![(address_a, LedgerData::new(Amount::from_str("100").unwrap()))] -// .into_iter() -// .collect(), -// ); -// let op = create_transaction(&keypair_a, address_b, 1, 10, 1); -// let (boot_graph, mut p0, mut p1) = get_bootgraph(op.clone(), boot_ledger); - -// consensus_pool_test( -// cfg.clone(), -// None, -// Some(boot_graph), -// async move |mut pool_controller, -// mut protocol_controller, -// consensus_command_sender, -// consensus_event_receiver| { -// p1 = create_and_test_block( -// &mut protocol_controller, -// &cfg, -// Slot::new(1, 1), -// vec![p0, p1], -// true, -// false, -// &staking_keys[0], -// ) -// .await; - -// p0 = create_and_test_block( -// &mut protocol_controller, -// &cfg, -// Slot::new(2, 0), -// vec![p0, p1], -// true, -// false, -// &staking_keys[0], -// ) -// .await; - -// create_and_test_block( -// &mut protocol_controller, -// &cfg, -// Slot::new(2, 1), -// vec![p0, p1], -// true, -// false, -// &staking_keys[0], -// ) -// .await; -// // UpdateLatestFinalPeriods pool command filter -// let new_final_ops_filter = |cmd| match cmd { -// PoolCommand::FinalOperations(ops) => Some(ops), -// _ => None, -// }; - -// // wait for initial final periods notification -// let final_ops = pool_controller -// .wait_command(300.into(), new_final_ops_filter) -// .await; -// if let Some(finals) = final_ops { -// assert!(finals.contains_key(&op.id)); -// assert_eq!(finals.get(&op.id), Some(&(10, 0))) -// } else { -// panic!("no final ops") -// } -// ( -// pool_controller, -// protocol_controller, -// consensus_command_sender, -// consensus_event_receiver, -// ) -// }, -// ) -// .await; -// } - -// #[tokio::test] -// #[serial] -// #[ignore] -// async fn test_max_attempts_get_operations() { -// let cfg = ConsensusConfig { -// t0: 1000.into(), -// genesis_timestamp: MassaTime::now(0).unwrap().checked_sub(1000.into()).unwrap(), -// delta_f0: 2, -// ..ConsensusConfig::default_with_paths() -// }; -// // define addresses use for the test -// // addresses a and b both in thread 0 -// let (address_a, keypair_a) = random_address_on_thread(0, cfg.thread_count).into(); -// let (address_b, _) = random_address_on_thread(0, cfg.thread_count).into(); - -// let boot_ledger = ConsensusLedgerSubset( -// vec![(address_a, LedgerData::new(Amount::from_str("100").unwrap()))] -// .into_iter() -// .collect(), -// ); -// let op = create_transaction(&keypair_a, address_b, 1, 10, 1); -// let (boot_graph, _p0, _p1) = get_bootgraph(op.clone(), boot_ledger); - -// consensus_pool_test( -// cfg.clone(), -// None, -// Some(boot_graph), -// async move |mut pool_controller, -// protocol_controller, -// consensus_command_sender, -// consensus_event_receiver| { -// // Test that consensus keeps trying to fill the block, -// // until the max number of attempts has been reached. -// let mut attempts = 0; -// let mut slot = None; -// while attempts != cfg.max_operations_fill_attempts { -// let get_operations_batch_filter = |cmd| match cmd { -// PoolCommand::GetOperationBatch { -// response_tx, -// target_slot, -// .. -// } => Some((response_tx, target_slot)), -// PoolCommand::GetEndorsements { response_tx, .. } => { -// response_tx.send(Vec::new()).unwrap(); -// None -// } -// _ => None, -// }; - -// let (response_tx, target_slot) = pool_controller -// .wait_command(1000.into(), get_operations_batch_filter) -// .await -// .expect("No response chan and target slot."); - -// // Test that the batch requests are still for the same slot. -// if let Some(slot) = slot { -// assert_eq!(slot, target_slot); -// } else { -// slot = Some(target_slot); -// } - -// // Send a full batch back. -// response_tx -// .send(vec![(op.clone(), 10)]) -// .expect("Failed to send empty batch."); -// attempts += 1; -// } - -// // The next command should be a slot update. -// let slot_filter = |cmd| match cmd { -// PoolCommand::UpdateCurrentSlot(slot) => Some(slot), -// PoolCommand::GetEndorsements { response_tx, .. } => { -// response_tx.send(Vec::new()).unwrap(); -// None -// } -// _ => None, -// }; - -// pool_controller.wait_command(3000.into(), slot_filter).await; -// ( -// pool_controller, -// protocol_controller, -// consensus_command_sender, -// consensus_event_receiver, -// ) -// }, -// ) -// .await; -// } - -// #[tokio::test] -// #[serial] -// #[ignore] -// async fn test_max_batch_size_get_operations() { -// let cfg = ConsensusConfig { -// t0: 1000.into(), -// genesis_timestamp: MassaTime::now(0).unwrap().checked_sub(1000.into()).unwrap(), -// delta_f0: 2, -// ..ConsensusConfig::default_with_paths() -// }; -// // define addresses use for the test -// // addresses a and b both in thread 0 -// let (address_a, keypair_a) = random_address_on_thread(0, cfg.thread_count).into(); -// let (address_b, _) = random_address_on_thread(0, cfg.thread_count).into(); - -// let boot_ledger = ConsensusLedgerSubset( -// vec![(address_a, LedgerData::new(Amount::from_str("100").unwrap()))] -// .into_iter() -// .collect(), -// ); -// let op = create_transaction(&keypair_a, address_b, 1, 10, 1); -// let (boot_graph, _p0, _p1) = get_bootgraph(op.clone(), boot_ledger); - -// consensus_pool_test( -// cfg.clone(), -// None, -// Some(boot_graph), -// async move |mut pool_controller, -// protocol_controller, -// consensus_command_sender, -// consensus_event_receiver| { -// // Test that consensus stops trying to fill the block, -// // once a non-full batch has been received. -// let get_operations_batch_filter = |cmd| match cmd { -// PoolCommand::GetOperationBatch { -// response_tx, -// target_slot, -// .. -// } => Some((response_tx, target_slot)), -// PoolCommand::GetEndorsements { response_tx, .. } => { -// response_tx.send(Vec::new()).unwrap(); -// None -// } -// _ => None, -// }; - -// let (response_tx, target_slot) = pool_controller -// .wait_command(1000.into(), get_operations_batch_filter) -// .await -// .expect("No response chan and target slot."); - -// // Send a non-full batch back. -// response_tx -// .send(vec![(op.clone(), 10)]) -// .expect("Failed to send non-full batch."); - -// // The next command should be a slot update. -// let slot_filter = |cmd| match cmd { -// PoolCommand::UpdateCurrentSlot(slot) => Some(slot), -// _ => None, -// }; - -// let slot_update = pool_controller -// .wait_command(3000.into(), slot_filter) -// .await -// .expect("Not slot update received."); - -// // Test that the update is for the slot -// // after the one for the just created block. -// assert_eq!(slot_update.period, target_slot.period + 1); -// ( -// pool_controller, -// protocol_controller, -// consensus_command_sender, -// consensus_event_receiver, -// ) -// }, -// ) -// .await; -// } - -// fn get_bootgraph( -// operation: WrappedOperation, -// ledger: ConsensusLedgerSubset, -// ) -> (BootstrapableGraph, BlockId, BlockId) { -// let genesis_0 = get_export_active_test_block(vec![], vec![], Slot::new(0, 0), true); -// let genesis_1 = get_export_active_test_block(vec![], vec![], Slot::new(0, 1), true); -// let p1t0 = get_export_active_test_block( -// vec![(genesis_0.block_id, 0), (genesis_1.block_id, 0)], -// vec![operation], -// Slot::new(1, 0), -// false, -// ); -// ( -// BootstrapableGraph { -// /// Map of active blocks, where blocks are in their exported version. -// active_blocks: vec![ -// (genesis_0.block_id, genesis_0.clone()), -// (genesis_1.block_id, genesis_1.clone()), -// (p1t0.block_id, p1t0.clone()), -// ] -// .into_iter() -// .collect(), -// /// Best parents hash in each thread. -// best_parents: vec![(p1t0.block_id, 1), (genesis_1.block_id, 0)], -// /// Latest final period and block hash in each thread. -// latest_final_blocks_periods: vec![ -// (genesis_0.block_id, 0u64), -// (genesis_1.block_id, 0u64), -// ], -// /// Head of the incompatibility graph. -// gi_head: vec![ -// (genesis_0.block_id, Default::default()), -// (p1t0.block_id, Default::default()), -// (genesis_1.block_id, Default::default()), -// ] -// .into_iter() -// .collect(), - -// /// List of maximal cliques of compatible blocks. -// max_cliques: vec![Clique { -// block_ids: vec![genesis_0.block_id, p1t0.block_id, genesis_1.block_id] -// .into_iter() -// .collect(), -// fitness: 1111, -// is_blockclique: true, -// }], -// ledger, -// }, -// p1t0.block_id, -// genesis_1.block_id, -// ) -// } diff --git a/massa-consensus-worker/src/tests/scenarios_pruning.rs b/massa-consensus-worker/src/tests/scenarios_pruning.rs deleted file mode 100644 index 943d35d387a..00000000000 --- a/massa-consensus-worker/src/tests/scenarios_pruning.rs +++ /dev/null @@ -1,196 +0,0 @@ -// Copyright (c) 2022 MASSA LABS - -use super::tools::*; -use massa_consensus_exports::ConsensusConfig; - -use massa_models::{block::BlockId, slot::Slot}; -use massa_signature::KeyPair; -use serial_test::serial; - -#[tokio::test] -#[serial] -#[ignore] -async fn test_pruning_of_discarded_blocks() { - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - t0: 32.into(), - future_block_processing_max_periods: 50, - ..ConsensusConfig::default() - }; - - consensus_without_pool_test( - cfg.clone(), - async move |mut protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let parents: Vec = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .best_parents - .iter() - .map(|(b, _p)| *b) - .collect(); - - // Send more bad blocks than the max number of cached discarded. - for i in 0..(cfg.max_discarded_blocks + 5) as u64 { - // Too far into the future. - let _ = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(100000000 + i, 0), - parents.clone(), - false, - false, - &staking_keys[0], - ) - .await; - } - - let status = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status"); - assert!(status.discarded_blocks.len() <= cfg.max_discarded_blocks); - - ( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} - -#[tokio::test] -#[serial] -#[ignore] -async fn test_pruning_of_awaiting_slot_blocks() { - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - t0: 32.into(), - future_block_processing_max_periods: 50, - ..ConsensusConfig::default() - }; - - consensus_without_pool_test( - cfg.clone(), - async move |mut protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let parents: Vec = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .best_parents - .iter() - .map(|(b, _p)| *b) - .collect(); - - // Send more blocks in the future than the max number of future processing blocks. - for i in 0..(cfg.max_future_processing_blocks + 5) as u64 { - // Too far into the future. - let _ = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(10 + i, 0), - parents.clone(), - false, - false, - &staking_keys[0], - ) - .await; - } - - let status = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status"); - assert!(status.discarded_blocks.len() <= cfg.max_future_processing_blocks); - ( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} - -#[tokio::test] -#[serial] -#[ignore] -async fn test_pruning_of_awaiting_dependencies_blocks_with_discarded_dependency() { - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - t0: 200.into(), - future_block_processing_max_periods: 50, - ..ConsensusConfig::default() - }; - - consensus_without_pool_test( - cfg.clone(), - async move |mut protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let parents: Vec = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .best_parents - .iter() - .map(|(b, _p)| *b) - .collect(); - - // Too far into the future. - let bad_block = - create_block(&cfg, Slot::new(10000, 0), parents.clone(), &staking_keys[0]); - - for i in 1..4 { - // Sent several headers with the bad parent as dependency. - let _ = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(i, 0), - vec![bad_block.id, parents.clone()[0]], - false, - false, - &staking_keys[0], - ) - .await; - } - - // Now, send the bad parent. - protocol_controller - .receive_header(bad_block.content.header) - .await; - validate_notpropagate_block_in_list(&mut protocol_controller, &vec![bad_block.id], 10) - .await; - - // Eventually, all blocks will be discarded due to their bad parent. - // Note the parent too much in the future will not be discarded, but ignored. - loop { - let status = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status"); - if status.discarded_blocks.len() == 3 { - break; - } - } - ( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} diff --git a/massa-consensus-worker/src/tests/scenarios_reward_split.rs b/massa-consensus-worker/src/tests/scenarios_reward_split.rs deleted file mode 100644 index 8fe0f8313e0..00000000000 --- a/massa-consensus-worker/src/tests/scenarios_reward_split.rs +++ /dev/null @@ -1,295 +0,0 @@ -// // Copyright (c) 2022 MASSA LABS - -// use super::tools::*; -// use massa_consensus_exports::ConsensusConfig; -// use massa_models::ledger_models::LedgerData; -// use massa_models::wrapped::WrappedContent; -// use massa_models::{Address, Amount, BlockId, Endorsement, EndorsementSerializer, Slot}; -// use massa_pos_exports::Selection; -// use massa_time::MassaTime; -// use serial_test::serial; -// use std::collections::HashMap; -// use std::str::FromStr; - -// #[tokio::test] -// #[serial] -// async fn test_reward_split() { -// // setup logging -// // stderrlog::new() -// // .verbosity(2) -// // .timestamp(stderrlog::Timestamp::Millisecond) -// // .init() -// // .unwrap(); -// let thread_count = 2; - -// // Create addresses -// let (address_a, keypair_a) = random_address_on_thread(0, thread_count).into(); -// let (address_b, keypair_b) = random_address_on_thread(0, thread_count).into(); - -// let mut ledger = HashMap::new(); -// ledger.insert(address_a, LedgerData::new(Amount::from_str("10").unwrap())); -// ledger.insert(address_b, LedgerData::new(Amount::from_str("10").unwrap())); -// let staking_keys = vec![keypair_a.clone(), keypair_b.clone()]; -// let init_time: MassaTime = 1000.into(); -// let cfg = ConsensusConfig { -// endorsement_count: 5, -// genesis_timestamp: MassaTime::now(0).unwrap().saturating_add(init_time), -// max_block_size: 2000, -// max_operations_per_block: 5000, -// operation_validity_periods: 10, -// periods_per_cycle: 3, -// t0: 500.into(), -// ..ConsensusConfig::default_with_staking_keys_and_ledger(&staking_keys, &ledger) -// }; - -// consensus_without_pool_test( -// cfg.clone(), -// async move |mut protocol_controller, -// consensus_command_sender, -// consensus_event_receiver, -// selector_controller| { -// // Check initial balances. -// let addresses_state = consensus_command_sender -// .get_addresses_info(vec![address_a, address_b].into_iter().collect()) -// .await -// .unwrap(); - -// let addresse_a_state = addresses_state.get(&address_a).unwrap(); -// assert_eq!( -// addresse_a_state.ledger_info.candidate_ledger_info.balance, -// Amount::from_str("10").unwrap() -// ); - -// let addresse_b_state = addresses_state.get(&address_b).unwrap(); -// assert_eq!( -// addresse_b_state.ledger_info.candidate_ledger_info.balance, -// Amount::from_str("10").unwrap() -// ); - -// let draws: Selection = selector_controller.get_selection(Slot::new(1, 0)).unwrap(); - -// let slot_one_block_addr = draws.producer; -// let slot_one_endorsements_addrs = draws.endorsements; - -// let slot_one_keypair = if slot_one_block_addr == address_a { -// keypair_a.clone() -// } else { -// keypair_b.clone() -// }; - -// // Create, and propagate, block 1. -// let parents: Vec = consensus_command_sender -// .get_block_graph_status(None, None) -// .await -// .unwrap() -// .best_parents -// .iter() -// .map(|(b, _p)| *b) -// .collect(); - -// let b1 = create_block(&cfg, Slot::new(1, 0), parents, &slot_one_keypair); - -// propagate_block( -// &mut protocol_controller, -// b1.clone(), -// true, -// init_time -// .saturating_add(cfg.t0.saturating_mul(2)) -// .to_millis(), -// ) -// .await; - -// let slot_two_block_addr = selector_controller.get_producer(Slot::new(2, 0)).unwrap(); - -// let slot_two_keypair = if slot_two_block_addr == address_a { -// keypair_a.clone() -// } else { -// keypair_b.clone() -// }; - -// // Create, and propagate, block 2. -// let parents: Vec = consensus_command_sender -// .get_block_graph_status(None, None) -// .await -// .unwrap() -// .best_parents -// .iter() -// .map(|(b, _p)| *b) -// .collect(); -// assert!(parents.contains(&b1.id)); - -// let mut b2 = create_block(&cfg, Slot::new(2, 0), parents, &slot_two_keypair); - -// // Endorsements in block 2. - -// // Creator of second block endorses the first. -// let index = slot_one_endorsements_addrs -// .iter() -// .position(|&addr| { -// addr == Address::from_public_key(&slot_two_keypair.get_public_key()) -// }) -// .unwrap() as u32; -// let content = Endorsement { -// slot: Slot::new(1, 0), -// index, -// endorsed_block: b1.id, -// }; -// let ed_1 = Endorsement::new_wrapped( -// content.clone(), -// EndorsementSerializer::new(), -// &slot_two_keypair, -// ) -// .unwrap(); - -// // Creator of first block endorses the first. -// let index = slot_one_endorsements_addrs -// .iter() -// .position(|&addr| { -// addr == Address::from_public_key(&slot_one_keypair.get_public_key()) -// }) -// .unwrap() as u32; -// let content = Endorsement { -// slot: Slot::new(1, 0), -// index, -// endorsed_block: b1.id, -// }; -// let ed_2 = Endorsement::new_wrapped( -// content.clone(), -// EndorsementSerializer::new(), -// &slot_one_keypair, -// ) -// .unwrap(); - -// // Creator of second block endorses the first, again. -// let index = slot_one_endorsements_addrs -// .iter() -// .position(|&addr| { -// addr == Address::from_public_key(&slot_two_keypair.get_public_key()) -// }) -// .unwrap() as u32; -// let content = Endorsement { -// slot: Slot::new(1, 0), -// index, -// endorsed_block: b1.id, -// }; -// let ed_3 = Endorsement::new_wrapped( -// content.clone(), -// EndorsementSerializer::new(), -// &slot_two_keypair, -// ) -// .unwrap(); - -// // Add endorsements to block. -// b2.content.header.content.endorsements = vec![ed_1, ed_2, ed_3]; - -// // Propagate block. -// tokio::time::sleep(cfg.t0.to_duration()).await; -// propagate_block(&mut protocol_controller, b2, true, 300).await; - -// // Check balances after second block. -// let addresses_state = consensus_command_sender -// .get_addresses_info(vec![address_a, address_b].into_iter().collect()) -// .await -// .unwrap(); - -// let third = cfg -// .block_reward -// .checked_div_u64((3 * (1 + cfg.endorsement_count)).into()) -// .unwrap(); - -// let expected_a = Amount::from_str("10") -// .unwrap() // initial ledger -// .saturating_add(if keypair_a.to_bytes() == slot_one_keypair.to_bytes() { -// // block 1 reward -// cfg.block_reward -// .checked_mul_u64(1) -// .unwrap() -// .checked_div_u64((1 + cfg.endorsement_count).into()) -// .unwrap() -// .saturating_sub(third.checked_mul_u64(0).unwrap()) -// // endorsements reward -// .saturating_add( -// third // parent in ed 1 -// .saturating_add(third) // creator of ed 2 -// .saturating_add(third) // parent in ed 2 -// .saturating_add(third), // parent in ed 3 -// ) -// } else { -// Default::default() -// }) -// .saturating_add(if keypair_a.to_bytes() == slot_two_keypair.to_bytes() { -// // block 2 creation reward -// cfg.block_reward -// .checked_mul_u64(1 + 3) -// .unwrap() -// .checked_div_u64((1 + cfg.endorsement_count).into()) -// .unwrap() -// .saturating_sub(third.checked_mul_u64(2 * 3).unwrap()) -// // endorsement rewards -// .saturating_add( -// third // creator of ed 1 -// .saturating_add(third), // creator of ed 3 -// ) -// } else { -// Default::default() -// }); - -// let expected_b = Amount::from_str("10") -// .unwrap() // initial ledger -// .saturating_add(if keypair_b.to_bytes() == slot_one_keypair.to_bytes() { -// // block 1 reward -// cfg.block_reward -// .checked_mul_u64(1) -// .unwrap() -// .checked_div_u64((1 + cfg.endorsement_count).into()) -// .unwrap() -// .saturating_sub(third.checked_mul_u64(0).unwrap()) -// // endorsements reward -// .saturating_add( -// third // parent in ed 1 -// .saturating_add(third) // creator of ed 2 -// .saturating_add(third) // parent in ed 2 -// .saturating_add(third), // parent in ed 3 -// ) -// } else { -// Default::default() -// }) -// .saturating_add(if keypair_b.to_bytes() == slot_two_keypair.to_bytes() { -// // block 2 creation reward -// cfg.block_reward -// .checked_mul_u64(1 + 3) -// .unwrap() -// .checked_div_u64((1 + cfg.endorsement_count).into()) -// .unwrap() -// .saturating_sub(third.checked_mul_u64(2 * 3).unwrap()) -// // endorsement rewards -// .saturating_add( -// third // creator of ed 1 -// .saturating_add(third), // creator of ed 3 -// ) -// } else { -// Default::default() -// }); - -// let state_a = addresses_state.get(&address_a).unwrap(); -// assert_eq!( -// state_a.ledger_info.candidate_ledger_info.balance, -// expected_a -// ); - -// let state_b = addresses_state.get(&address_b).unwrap(); -// assert_eq!( -// state_b.ledger_info.candidate_ledger_info.balance, -// expected_b -// ); - -// ( -// protocol_controller, -// consensus_command_sender, -// consensus_event_receiver, -// selector_controller, -// ) -// }, -// ) -// .await; -// } diff --git a/massa-consensus-worker/src/tests/scenarios_send_block.rs b/massa-consensus-worker/src/tests/scenarios_send_block.rs deleted file mode 100644 index c2201b0eb1a..00000000000 --- a/massa-consensus-worker/src/tests/scenarios_send_block.rs +++ /dev/null @@ -1,133 +0,0 @@ -// Copyright (c) 2022 MASSA LABS - -// RUST_BACKTRACE=1 cargo test scenarios106 -- --nocapture - -use super::tools::*; -use massa_consensus_exports::ConsensusConfig; - -use massa_models::slot::Slot; -use massa_signature::KeyPair; -use massa_storage::Storage; -use serial_test::serial; - -#[tokio::test] -#[serial] -#[ignore] -async fn test_consensus_sends_block_to_peer_who_asked_for_it() { - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - t0: 32.into(), - future_block_processing_max_periods: 50, - ..ConsensusConfig::default() - }; - let mut storage = Storage::create_root(); - - consensus_without_pool_test( - cfg.clone(), - async move |mut protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let start_slot = 3; - let genesis_hashes = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .genesis_blocks; - - // create test blocks - let slot = Slot::new(1 + start_slot, 0); - let draw = selector_controller.get_selection(slot).unwrap().producer; - let creator = get_creator_for_draw(&draw, &staking_keys.clone()); - let t0s1 = create_block( - &cfg, - Slot::new(1 + start_slot, 0), - genesis_hashes.clone(), - &creator, - ); - - let t0s1_id = t0s1.id; - let t0s1_slot = t0s1.content.header.content.slot; - storage.store_block(t0s1); - - // Send the actual block. - protocol_controller - .receive_block(t0s1_id, t0s1_slot, storage.clone()) - .await; - - // block t0s1 is propagated - let hash_list = vec![t0s1_id]; - validate_propagate_block_in_list( - &mut protocol_controller, - &hash_list, - 3000 + start_slot * 1000, - ) - .await; - - // Ask for the block to consensus. - protocol_controller - .receive_get_active_blocks(vec![t0s1_id]) - .await; - - // Consensus should respond with results including the block. - validate_block_found(&mut protocol_controller, &t0s1_id, 100).await; - ( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} - -#[tokio::test] -#[serial] -#[ignore] -async fn test_consensus_block_not_found() { - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - t0: 32.into(), - future_block_processing_max_periods: 50, - ..ConsensusConfig::default() - }; - - consensus_without_pool_test( - cfg.clone(), - async move |mut protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let start_slot = 3; - let genesis_hashes = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .genesis_blocks; - - // create test blocks - let t0s1 = create_block( - &cfg, - Slot::new(1 + start_slot, 0), - genesis_hashes.clone(), - &staking_keys[0], - ); - - // Ask for the block to consensus. - protocol_controller - .receive_get_active_blocks(vec![t0s1.id]) - .await; - - // Consensus should not have the block. - validate_block_not_found(&mut protocol_controller, &t0s1.id, 100).await; - ( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} diff --git a/massa-consensus-worker/src/tests/scenarios_wishlist.rs b/massa-consensus-worker/src/tests/scenarios_wishlist.rs deleted file mode 100644 index b1b29dc19ea..00000000000 --- a/massa-consensus-worker/src/tests/scenarios_wishlist.rs +++ /dev/null @@ -1,140 +0,0 @@ -// Copyright (c) 2022 MASSA LABS - -// RUST_BACKTRACE=1 cargo test scenarios106 -- --nocapture - -use super::tools::*; -use massa_consensus_exports::ConsensusConfig; - -use massa_models::slot::Slot; -use massa_signature::KeyPair; -use massa_storage::Storage; -use serial_test::serial; -use std::collections::HashSet; -use std::iter::FromIterator; - -#[tokio::test] -#[serial] -#[ignore] -async fn test_wishlist_delta_with_empty_remove() { - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - t0: 32.into(), - future_block_processing_max_periods: 50, - ..ConsensusConfig::default() - }; - - consensus_without_pool_test( - cfg.clone(), - async move |mut protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let genesis_hashes = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .genesis_blocks; - // create test blocks - let slot = Slot::new(1, 0); - let draw = selector_controller - .get_selection(slot) - .expect("could not get selection draws.") - .producer; - let creator = get_creator_for_draw(&draw, &staking_keys.clone()); - let t0s1 = create_block(&cfg, Slot::new(1, 0), genesis_hashes.clone(), &creator); - - // send header for block t0s1 - protocol_controller - .receive_header(t0s1.content.header.clone()) - .await; - - let expected_new = HashSet::from_iter(vec![t0s1.id].into_iter()); - let expected_remove = HashSet::from_iter(vec![].into_iter()); - validate_wishlist( - &mut protocol_controller, - expected_new, - expected_remove, - cfg.t0.saturating_add(1000.into()).to_millis(), // leave 1sec extra for init and margin - ) - .await; - ( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} - -#[tokio::test] -#[serial] -#[ignore] -async fn test_wishlist_delta_remove() { - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - t0: 32.into(), - future_block_processing_max_periods: 50, - ..ConsensusConfig::default() - }; - - let mut storage = Storage::create_root(); - - consensus_without_pool_test( - cfg.clone(), - async move |mut protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let genesis_hashes = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .genesis_blocks; - - // create test blocks - let t0s1 = create_block( - &cfg, - Slot::new(1, 0), - genesis_hashes.clone(), - &staking_keys[0], - ); - // send header for block t0s1 - protocol_controller - .receive_header(t0s1.content.header.clone()) - .await; - - let expected_new = HashSet::from_iter(vec![t0s1.id].into_iter()); - let expected_remove = HashSet::from_iter(vec![].into_iter()); - validate_wishlist( - &mut protocol_controller, - expected_new, - expected_remove, - cfg.t0.saturating_add(1000.into()).to_millis(), // leave 1sec extra for init and margin, - ) - .await; - - storage.store_block(t0s1.clone()); - protocol_controller - .receive_block(t0s1.id, t0s1.content.header.content.slot, storage.clone()) - .await; - let expected_new = HashSet::from_iter(vec![].into_iter()); - let expected_remove = HashSet::from_iter(vec![t0s1.id].into_iter()); - validate_wishlist( - &mut protocol_controller, - expected_new, - expected_remove, - 1000, - ) - .await; - ( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} diff --git a/massa-consensus-worker/src/tests/test_block_graph.rs b/massa-consensus-worker/src/tests/test_block_graph.rs deleted file mode 100644 index 13f9a086eb8..00000000000 --- a/massa-consensus-worker/src/tests/test_block_graph.rs +++ /dev/null @@ -1,174 +0,0 @@ -use crate::tests::tools::get_dummy_block_id; -use massa_graph::{ - export_active_block::ExportActiveBlock, BootstrapableGraph, BootstrapableGraphDeserializer, - BootstrapableGraphSerializer, -}; -use massa_hash::Hash; -use massa_models::{ - block::{Block, BlockHeader, BlockHeaderSerializer, BlockSerializer, WrappedBlock}, - endorsement::{Endorsement, EndorsementSerializerLW}, - slot::Slot, - wrapped::WrappedContent, -}; - -use massa_serialization::{DeserializeError, Deserializer, Serializer}; -use massa_signature::KeyPair; -use serial_test::serial; - -/// the data input to create the public keys was generated using the `secp256k1` curve -/// a test using this function is a regression test not an implementation test -fn get_export_active_test_block() -> (WrappedBlock, ExportActiveBlock) { - let keypair = KeyPair::generate(); - let block = Block::new_wrapped( - Block { - header: BlockHeader::new_wrapped( - BlockHeader { - operation_merkle_root: Hash::compute_from(&Vec::new()), - parents: vec![get_dummy_block_id("parent1"), get_dummy_block_id("parent2")], - slot: Slot::new(1, 0), - endorsements: vec![Endorsement::new_wrapped( - Endorsement { - endorsed_block: get_dummy_block_id("parent1"), - index: 0, - slot: Slot::new(1, 0), - }, - EndorsementSerializerLW::new(), - &keypair, - ) - .unwrap()], - }, - BlockHeaderSerializer::new(), - &keypair, - ) - .unwrap(), - operations: Default::default(), - }, - BlockSerializer::new(), - &keypair, - ) - .unwrap(); - - ( - block.clone(), - ExportActiveBlock { - parents: vec![ - (get_dummy_block_id("parent11"), 23), - (get_dummy_block_id("parent12"), 24), - ], - block, - operations: vec![], - is_final: true, - }, - ) -} - -#[test] -#[serial] -fn test_bootstrapable_graph_serialized() { - //let storage: Storage = Storage::create_root(); - - let (_, active_block) = get_export_active_test_block(); - - //storage.store_block(block.header.content.compute_id().expect("Fail to calculate block id."), block, block.to_bytes_compact().expect("Fail to serialize block")); - - let graph = BootstrapableGraph { - /// Map of active blocks, were blocks are in their exported version. - final_blocks: vec![active_block].into_iter().collect(), - }; - - let bootstrapable_graph_serializer = BootstrapableGraphSerializer::new(); - let bootstrapable_graph_deserializer = BootstrapableGraphDeserializer::new( - 2, 8, 10000, 10000, 10000, 10000, 10000, 10, 255, 10_000, - ); - let mut bytes = Vec::new(); - - bootstrapable_graph_serializer - .serialize(&graph, &mut bytes) - .unwrap(); - let (_, new_graph) = bootstrapable_graph_deserializer - .deserialize::(&bytes) - .unwrap(); - - assert_eq!( - graph.final_blocks[0].block.serialized_data, - new_graph.final_blocks[0].block.serialized_data - ); -} - -// #[tokio::test] -// #[serial] -// async fn test_clique_calculation() { -// let ledger_file = generate_ledger_file(&Map::default()); -// let cfg = ConsensusConfig::from(ledger_file.path()); -// let storage: Storage = Storage::create_root(); -// let selector_config = SelectorConfig { -// initial_rolls_path: cfg.initial_rolls_path.clone(), -// thread_count: 2, -// periods_per_cycle: 100, -// genesis_address: Address::from_str("A12hgh5ULW9o8fJE9muLNXhQENaUUswQbxPyDSq8ridnDGu5gRiJ") -// .unwrap(), -// endorsement_count: 0, -// max_draw_cache: 10, -// initial_draw_seed: "".to_string(), -// }; -// let (mut selector_manager, selector_controller) = -// start_selector_worker(selector_config).unwrap(); -// let mut block_graph = -// BlockGraph::new(GraphConfig::from(&cfg), None, storage, selector_controller) -// .await -// .unwrap(); -// let hashes: Vec = vec![ -// "VzCRpnoZVYY1yQZTXtVQbbxwzdu6hYtdCUZB5BXWSabsiXyfP", -// "JnWwNHRR1tUD7UJfnEFgDB4S4gfDTX2ezLadr7pcwuZnxTvn1", -// "xtvLedxC7CigAPytS5qh9nbTuYyLbQKCfbX8finiHsKMWH6SG", -// "2Qs9sSbc5sGpVv5GnTeDkTKdDpKhp4AgCVT4XFcMaf55msdvJN", -// "2VNc8pR4tNnZpEPudJr97iNHxXbHiubNDmuaSzrxaBVwKXxV6w", -// "2bsrYpfLdvVWAJkwXoJz1kn4LWshdJ6QjwTrA7suKg8AY3ecH1", -// "kfUeGj3ZgBprqFRiAQpE47dW5tcKTAueVaWXZquJW6SaPBd4G", -// ] -// .into_iter() -// .map(|h| BlockId::from_bs58_check(h).unwrap()) -// .collect(); -// block_graph.gi_head = vec![ -// (0, vec![1, 2, 3, 4]), -// (1, vec![0]), -// (2, vec![0]), -// (3, vec![0]), -// (4, vec![0]), -// (5, vec![6]), -// (6, vec![5]), -// ] -// .into_iter() -// .map(|(idx, lst)| (hashes[idx], lst.into_iter().map(|i| hashes[i]).collect())) -// .collect(); -// let computed_sets = block_graph.compute_max_cliques(); - -// let expected_sets: Vec> = vec![ -// vec![1, 2, 3, 4, 5], -// vec![1, 2, 3, 4, 6], -// vec![0, 5], -// vec![0, 6], -// ] -// .into_iter() -// .map(|lst| lst.into_iter().map(|i| hashes[i]).collect()) -// .collect(); - -// assert_eq!(computed_sets.len(), expected_sets.len()); -// for expected in expected_sets.into_iter() { -// assert!(computed_sets.iter().any(|v| v == &expected)); -// } -// selector_manager.stop(); -// } - -// /// generate a named temporary JSON ledger file -// fn generate_ledger_file(ledger_vec: &Map) -> NamedTempFile { -// use std::io::prelude::*; -// let ledger_file_named = NamedTempFile::new().expect("cannot create temp file"); -// serde_json::to_writer_pretty(ledger_file_named.as_file(), &ledger_vec) -// .expect("unable to write ledger file"); -// ledger_file_named -// .as_file() -// .seek(std::io::SeekFrom::Start(0)) -// .expect("could not seek file"); -// ledger_file_named -// } diff --git a/massa-consensus-worker/src/tests/tools.rs b/massa-consensus-worker/src/tests/tools.rs deleted file mode 100644 index c29c1d3093e..00000000000 --- a/massa-consensus-worker/src/tests/tools.rs +++ /dev/null @@ -1,1056 +0,0 @@ -// Copyright (c) 2022 MASSA LABS -#![allow(clippy::ptr_arg)] // this allow &Vec<..> as function argument type - -use crate::start_consensus_controller; -use massa_cipher::decrypt; -use massa_consensus_exports::error::ConsensusResult; -use massa_consensus_exports::{ - settings::ConsensusChannels, ConsensusCommandSender, ConsensusConfig, ConsensusEventReceiver, -}; -use massa_execution_exports::test_exports::MockExecutionController; -use massa_graph::{export_active_block::ExportActiveBlock, BlockGraphExport, BootstrapableGraph}; -use massa_hash::Hash; -use massa_models::prehash::PreHashMap; -use massa_models::{ - address::Address, - amount::Amount, - block::{ - Block, BlockHeader, BlockHeaderSerializer, BlockId, BlockSerializer, WrappedBlock, - WrappedHeader, - }, - operation::{Operation, OperationSerializer, OperationType, WrappedOperation}, - prehash::PreHashSet, - slot::Slot, - wrapped::{Id, WrappedContent}, -}; -use massa_pool_exports::test_exports::MockPoolController; -use massa_pool_exports::PoolController; -use massa_pos_exports::{SelectorConfig, SelectorController}; -use massa_pos_worker::start_selector_worker; -use massa_protocol_exports::test_exports::MockProtocolController; -use massa_protocol_exports::ProtocolCommand; -use massa_signature::KeyPair; -use massa_storage::Storage; -use massa_time::MassaTime; -use parking_lot::Mutex; -use std::{collections::BTreeMap, collections::HashSet, future::Future, path::Path}; -use std::{str::FromStr, sync::Arc, time::Duration}; - -use tracing::info; - -/* TODO https://github.com/massalabs/massa/issues/3099 -/// Handle the expected selector messages, always approving the address. -pub fn approve_producer_and_selector_for_staker( - staking_key: &KeyPair, - selector_controller: &Receiver, -) { - let addr = Address::from_public_key(&staking_key.get_public_key()); - // Drain all messages, assuming there can be a slight delay between sending some. - loop { - let timeout = Duration::from_millis(100); - match selector_controller.recv_timeout(timeout) { - Ok(MockSelectorControllerMessage::GetSelection { - slot: _, - response_tx, - }) => { - let selection = Selection { - producer: addr.clone(), - endorsements: vec![addr.clone(); ENDORSEMENT_COUNT as usize], - }; - response_tx.send(Ok(selection)).unwrap(); - } - Ok(MockSelectorControllerMessage::GetProducer { - slot: _, - response_tx, - }) => { - response_tx.send(Ok(addr.clone())).unwrap(); - } - Ok(msg) => panic!("Unexpected selector message {:?}", msg), - Err(RecvTimeoutError::Timeout) => break, - _ => panic!("Unexpected error from selector receiver"), - } - } -} -*/ - -pub fn get_dummy_block_id(s: &str) -> BlockId { - BlockId(Hash::compute_from(s.as_bytes())) -} - -pub struct AddressTest { - pub address: Address, - pub keypair: KeyPair, -} - -impl From for (Address, KeyPair) { - fn from(addr: AddressTest) -> Self { - (addr.address, addr.keypair) - } -} - -/// Same as `random_address()` but force a specific thread -pub fn random_address_on_thread(thread: u8, thread_count: u8) -> AddressTest { - loop { - let keypair = KeyPair::generate(); - let address = Address::from_public_key(&keypair.get_public_key()); - if thread == address.get_thread(thread_count) { - return AddressTest { address, keypair }; - } - } -} - -/// Generate a random address -pub fn _random_address() -> AddressTest { - let keypair = KeyPair::generate(); - AddressTest { - address: Address::from_public_key(&keypair.get_public_key()), - keypair, - } -} - -/// return true if another block has been seen -pub async fn validate_notpropagate_block( - protocol_controller: &mut MockProtocolController, - not_propagated: BlockId, - timeout_ms: u64, -) -> bool { - let param = protocol_controller - .wait_command(timeout_ms.into(), |cmd| match cmd { - ProtocolCommand::IntegratedBlock { - block_id, - storage: _, - } => Some(block_id), - _ => None, - }) - .await; - match param { - Some(block_id) => not_propagated != block_id, - None => false, - } -} - -/// return true if another block has been seen -pub async fn validate_notpropagate_block_in_list( - protocol_controller: &mut MockProtocolController, - not_propagated: &Vec, - timeout_ms: u64, -) -> bool { - let param = protocol_controller - .wait_command(timeout_ms.into(), |cmd| match cmd { - ProtocolCommand::IntegratedBlock { - block_id, - storage: _, - } => Some(block_id), - _ => None, - }) - .await; - match param { - Some(block_id) => !not_propagated.contains(&block_id), - None => false, - } -} - -pub async fn validate_propagate_block_in_list( - protocol_controller: &mut MockProtocolController, - valid: &Vec, - timeout_ms: u64, -) -> BlockId { - let param = protocol_controller - .wait_command(timeout_ms.into(), |cmd| match cmd { - ProtocolCommand::IntegratedBlock { - block_id, - storage: _, - } => Some(block_id), - _ => None, - }) - .await; - match param { - Some(block_id) => { - assert!( - valid.contains(&block_id), - "not the valid hash propagated, it can be a genesis_timestamp problem" - ); - block_id - } - None => panic!("Hash not propagated."), - } -} - -pub async fn validate_ask_for_block( - protocol_controller: &mut MockProtocolController, - valid: BlockId, - timeout_ms: u64, -) -> BlockId { - let param = protocol_controller - .wait_command(timeout_ms.into(), |cmd| match cmd { - ProtocolCommand::WishlistDelta { new, .. } => Some(new), - _ => None, - }) - .await; - match param { - Some(new) => { - assert!(new.contains_key(&valid), "not the valid hash asked for"); - assert_eq!(new.len(), 1); - valid - } - None => panic!("Block not asked for before timeout."), - } -} - -pub async fn validate_wishlist( - protocol_controller: &mut MockProtocolController, - new: PreHashSet, - remove: PreHashSet, - timeout_ms: u64, -) { - let new: PreHashMap> = - new.into_iter().map(|id| (id, None)).collect(); - let param = protocol_controller - .wait_command(timeout_ms.into(), |cmd| match cmd { - ProtocolCommand::WishlistDelta { new, remove } => Some((new, remove)), - _ => None, - }) - .await; - match param { - Some((got_new, got_remove)) => { - for key in got_new.keys() { - assert!(new.contains_key(key)); - } - assert_eq!(remove, got_remove); - } - None => panic!("Wishlist delta not sent for before timeout."), - } -} - -pub async fn validate_does_not_ask_for_block( - protocol_controller: &mut MockProtocolController, - hash: &BlockId, - timeout_ms: u64, -) { - let param = protocol_controller - .wait_command(timeout_ms.into(), |cmd| match cmd { - ProtocolCommand::WishlistDelta { new, .. } => Some(new), - _ => None, - }) - .await; - if let Some(new) = param { - if new.contains_key(hash) { - panic!("unexpected ask for block {}", hash); - } - } -} - -pub async fn validate_propagate_block( - protocol_controller: &mut MockProtocolController, - valid_hash: BlockId, - timeout_ms: u64, -) { - protocol_controller - .wait_command(timeout_ms.into(), |cmd| match cmd { - ProtocolCommand::IntegratedBlock { - block_id, - storage: _, - } => { - if block_id == valid_hash { - return Some(()); - } - None - } - _ => None, - }) - .await - .expect("Block not propagated before timeout.") -} - -pub async fn validate_notify_block_attack_attempt( - protocol_controller: &mut MockProtocolController, - valid_hash: BlockId, - timeout_ms: u64, -) { - let param = protocol_controller - .wait_command(timeout_ms.into(), |cmd| match cmd { - ProtocolCommand::AttackBlockDetected(hash) => Some(hash), - _ => None, - }) - .await; - match param { - Some(hash) => assert_eq!(valid_hash, hash, "Attack attempt notified for wrong hash."), - None => panic!("Attack attempt not notified before timeout."), - } -} - -pub async fn validate_block_found( - _protocol_controller: &mut MockProtocolController, - _valid_hash: &BlockId, - _timeout_ms: u64, -) { -} - -pub async fn validate_block_not_found( - _protocol_controller: &mut MockProtocolController, - _valid_hash: &BlockId, - _timeout_ms: u64, -) { -} - -pub async fn create_and_test_block( - protocol_controller: &mut MockProtocolController, - cfg: &ConsensusConfig, - slot: Slot, - best_parents: Vec, - valid: bool, - trace: bool, - creator: &KeyPair, -) -> BlockId { - let block = create_block(cfg, slot, best_parents, creator); - let block_id = block.id; - let slot = block.content.header.content.slot; - let mut storage = Storage::create_root(); - if trace { - info!("create block:{}", block.id); - } - - storage.store_block(block); - protocol_controller - .receive_block(block_id, slot, storage.clone()) - .await; - if valid { - // Assert that the block is propagated. - validate_propagate_block(protocol_controller, block_id, 2000).await; - } else { - // Assert that the the block is not propagated. - validate_notpropagate_block(protocol_controller, block_id, 500).await; - } - block_id -} - -pub async fn propagate_block( - protocol_controller: &mut MockProtocolController, - block_id: BlockId, - slot: Slot, - storage: Storage, - valid: bool, - timeout_ms: u64, -) -> BlockId { - let block_hash = block_id; - protocol_controller - .receive_block(block_id, slot, storage) - .await; - if valid { - // see if the block is propagated. - validate_propagate_block(protocol_controller, block_hash, timeout_ms).await; - } else { - // see if the block is propagated. - validate_notpropagate_block(protocol_controller, block_hash, timeout_ms).await; - } - block_hash -} - -pub fn _create_roll_transaction( - keypair: &KeyPair, - roll_count: u64, - buy: bool, - expire_period: u64, - fee: u64, -) -> WrappedOperation { - let op = if buy { - OperationType::RollBuy { roll_count } - } else { - OperationType::RollSell { roll_count } - }; - - let content = Operation { - fee: Amount::from_str(&fee.to_string()).unwrap(), - expire_period, - op, - }; - Operation::new_wrapped(content, OperationSerializer::new(), keypair).unwrap() -} - -pub async fn _wait_pool_slot( - _pool_controller: &mut MockPoolController, - _t0: MassaTime, - period: u64, - thread: u8, -) -> Slot { - // TODO: Replace ?? - // pool_controller - // .wait_command(t0.checked_mul(2).unwrap(), |cmd| match cmd { - // PoolCommand::UpdateCurrentSlot(s) => { - // if s >= Slot::new(period, thread) { - // Some(s) - // } else { - // None - // } - // } - // _ => None, - // }) - // .await - // .expect("timeout while waiting for slot") - Slot::new(period, thread) -} - -pub fn _create_transaction( - keypair: &KeyPair, - recipient_address: Address, - amount: u64, - expire_period: u64, - fee: u64, -) -> WrappedOperation { - let op = OperationType::Transaction { - recipient_address, - amount: Amount::from_str(&amount.to_string()).unwrap(), - }; - - let content = Operation { - fee: Amount::from_str(&fee.to_string()).unwrap(), - expire_period, - op, - }; - Operation::new_wrapped(content, OperationSerializer::new(), keypair).unwrap() -} - -#[allow(clippy::too_many_arguments)] -pub fn _create_executesc( - keypair: &KeyPair, - expire_period: u64, - fee: u64, - data: Vec, - max_gas: u64, - gas_price: u64, -) -> WrappedOperation { - let op = OperationType::ExecuteSC { - data, - max_gas, - gas_price: Amount::from_str(&gas_price.to_string()).unwrap(), - datastore: BTreeMap::new(), - }; - - let content = Operation { - fee: Amount::from_str(&fee.to_string()).unwrap(), - expire_period, - op, - }; - Operation::new_wrapped(content, OperationSerializer::new(), keypair).unwrap() -} - -pub fn _create_roll_buy( - keypair: &KeyPair, - roll_count: u64, - expire_period: u64, - fee: u64, -) -> WrappedOperation { - let op = OperationType::RollBuy { roll_count }; - let content = Operation { - fee: Amount::from_str(&fee.to_string()).unwrap(), - expire_period, - op, - }; - Operation::new_wrapped(content, OperationSerializer::new(), keypair).unwrap() -} - -/* TODO https://github.com/massalabs/massa/issues/3099 -pub fn create_roll_sell( - keypair: &KeyPair, - roll_count: u64, - expire_period: u64, - fee: u64, -) -> WrappedOperation { - let op = OperationType::RollSell { roll_count }; - let content = Operation { - fee: Amount::from_str(&fee.to_string()).unwrap(), - expire_period, - op, - }; - Operation::new_wrapped(content, OperationSerializer::new(), keypair).unwrap() -} -*/ - -// returns hash and resulting discarded blocks -pub fn create_block( - cfg: &ConsensusConfig, - slot: Slot, - best_parents: Vec, - creator: &KeyPair, -) -> WrappedBlock { - create_block_with_merkle_root( - cfg, - Hash::compute_from("default_val".as_bytes()), - slot, - best_parents, - creator, - ) -} - -// returns hash and resulting discarded blocks -pub fn create_block_with_merkle_root( - _cfg: &ConsensusConfig, - operation_merkle_root: Hash, - slot: Slot, - best_parents: Vec, - creator: &KeyPair, -) -> WrappedBlock { - let header = BlockHeader::new_wrapped( - BlockHeader { - slot, - parents: best_parents, - operation_merkle_root, - endorsements: Vec::new(), - }, - BlockHeaderSerializer::new(), - creator, - ) - .unwrap(); - - Block::new_wrapped( - Block { - header, - operations: Default::default(), - }, - BlockSerializer::new(), - creator, - ) - .unwrap() -} - -/* TODO https://github.com/massalabs/massa/issues/3099 -/// Creates an endorsement for use in consensus tests. -pub fn create_endorsement( - sender_keypair: &KeyPair, - slot: Slot, - endorsed_block: BlockId, - index: u32, -) -> WrappedEndorsement { - let content = Endorsement { - slot, - index, - endorsed_block, - }; - Endorsement::new_wrapped(content, EndorsementSerializer::new(), sender_keypair).unwrap() -} -*/ - -pub fn _get_export_active_test_block( - parents: Vec<(BlockId, u64)>, - operations: Vec, - slot: Slot, - is_final: bool, -) -> ExportActiveBlock { - let keypair = KeyPair::generate(); - let block = Block::new_wrapped( - Block { - header: BlockHeader::new_wrapped( - BlockHeader { - operation_merkle_root: Hash::compute_from( - &operations - .iter() - .flat_map(|op| op.id.into_bytes()) - .collect::>()[..], - ), - parents: parents.iter().map(|(id, _)| *id).collect(), - slot, - endorsements: Vec::new(), - }, - BlockHeaderSerializer::new(), - &keypair, - ) - .unwrap(), - operations: operations.iter().cloned().map(|op| op.id).collect(), - }, - BlockSerializer::new(), - &keypair, - ) - .unwrap(); - - ExportActiveBlock { - parents, - block, - operations, - is_final, - } -} - -pub fn create_block_with_operations( - _cfg: &ConsensusConfig, - slot: Slot, - best_parents: &Vec, - creator: &KeyPair, - operations: Vec, -) -> WrappedBlock { - let operation_merkle_root = Hash::compute_from( - &operations.iter().fold(Vec::new(), |acc, v| { - [acc, v.id.get_hash().to_bytes().to_vec()].concat() - })[..], - ); - - let header = BlockHeader::new_wrapped( - BlockHeader { - slot, - parents: best_parents.clone(), - operation_merkle_root, - endorsements: Vec::new(), - }, - BlockHeaderSerializer::new(), - creator, - ) - .unwrap(); - - Block::new_wrapped( - Block { - header, - operations: operations.into_iter().map(|op| op.id).collect(), - }, - BlockSerializer::new(), - creator, - ) - .unwrap() -} - -/* TODO https://github.com/massalabs/massa/issues/3099 -pub fn create_block_with_operations_and_endorsements( - _cfg: &ConsensusConfig, - slot: Slot, - best_parents: &Vec, - creator: &KeyPair, - operations: Vec, - endorsements: Vec, -) -> WrappedBlock { - let operation_merkle_root = Hash::compute_from( - &operations.iter().fold(Vec::new(), |acc, v| { - [acc, v.id.get_hash().to_bytes().to_vec()].concat() - })[..], - ); - - let header = BlockHeader::new_wrapped( - BlockHeader { - slot, - parents: best_parents.clone(), - operation_merkle_root, - endorsements, - }, - BlockHeaderSerializer::new(), - creator, - ) - .unwrap(); - - Block::new_wrapped( - Block { - header, - operations: operations.into_iter().map(|op| op.id).collect(), - }, - BlockSerializer::new(), - creator, - ) - .unwrap() -} -*/ - -pub fn get_creator_for_draw(draw: &Address, nodes: &Vec) -> KeyPair { - for key in nodes.iter() { - let address = Address::from_public_key(&key.get_public_key()); - if address == *draw { - return key.clone(); - } - } - panic!("Matching key for draw not found."); -} - -/// Load staking keys from file and derive public keys and addresses -pub async fn _load_initial_staking_keys( - path: &Path, - password: &str, -) -> ConsensusResult> { - if !std::path::Path::is_file(path) { - return Ok(PreHashMap::default()); - } - let (_version, data) = decrypt(password, &tokio::fs::read(path).await?)?; - serde_json::from_slice::>(&data) - .unwrap() - .into_iter() - .map(|key| Ok((Address::from_public_key(&key.get_public_key()), key))) - .collect() -} - -/// Runs a consensus test, passing a mock pool controller to it. -pub async fn _consensus_pool_test( - cfg: ConsensusConfig, - boot_graph: Option, - test: F, -) where - F: FnOnce( - Box, - MockProtocolController, - ConsensusCommandSender, - ConsensusEventReceiver, - ) -> V, - V: Future< - Output = ( - Box, - MockProtocolController, - ConsensusCommandSender, - ConsensusEventReceiver, - ), - >, -{ - let mut storage: Storage = Storage::create_root(); - if let Some(ref graph) = boot_graph { - for export_block in &graph.final_blocks { - storage.store_block(export_block.block.clone()); - } - } - // mock protocol & pool - let (protocol_controller, protocol_command_sender, protocol_event_receiver) = - MockProtocolController::new(); - let (pool_controller, _pool_event_receiver) = MockPoolController::new_with_receiver(); - // for now, execution_rx is ignored: cique updates to Execution pile up and are discarded - let (execution_controller, execution_rx) = MockExecutionController::new_with_receiver(); - let stop_sinks = Arc::new(Mutex::new(false)); - let stop_sinks_clone = stop_sinks.clone(); - let execution_sink = std::thread::spawn(move || { - while !*stop_sinks_clone.lock() { - let _ = execution_rx.recv_timeout(Duration::from_millis(500)); - } - }); - let staking_key = - KeyPair::from_str("S1UxdCJv5ckDK8z87E5Jq5fEfSVLi2cTHgtpfZy7iURs3KpPns8").unwrap(); - let genesis_address = Address::from_public_key(&staking_key.get_public_key()); - let selector_config = SelectorConfig { - max_draw_cache: 12, - channel_size: 256, - thread_count: 2, - endorsement_count: 8, - periods_per_cycle: 2, - genesis_address, - }; - // launch consensus controller - let (_selector_manager, selector_controller) = start_selector_worker(selector_config).unwrap(); - let (consensus_command_sender, consensus_event_receiver, consensus_manager) = - start_consensus_controller( - cfg.clone(), - ConsensusChannels { - execution_controller, - protocol_command_sender: protocol_command_sender.clone(), - protocol_event_receiver, - pool_command_sender: pool_controller.clone(), - selector_controller, - }, - boot_graph, - storage.clone(), - 0, - ) - .await - .expect("could not start consensus controller"); - - // Call test func. - let ( - _pool_controller, - mut protocol_controller, - _consensus_command_sender, - consensus_event_receiver, - ) = test( - pool_controller, - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - ) - .await; - - // stop controller while ignoring all commands - let stop_fut = consensus_manager.stop(consensus_event_receiver); - tokio::pin!(stop_fut); - protocol_controller - .ignore_commands_while(stop_fut) - .await - .unwrap(); - - // stop sinks - *stop_sinks.lock() = true; - execution_sink.join().unwrap(); -} - -/* TODO https://github.com/massalabs/massa/issues/3099 -/// Runs a consensus test, passing a mock pool controller to it. -pub async fn consensus_pool_test_with_storage( - cfg: ConsensusConfig, - boot_graph: Option, - test: F, -) where - F: FnOnce( - Box, - MockProtocolController, - ConsensusCommandSender, - ConsensusEventReceiver, - Storage, - Receiver, - ) -> V, - V: Future< - Output = ( - Box, - MockProtocolController, - ConsensusCommandSender, - ConsensusEventReceiver, - Receiver, - ), - >, -{ - let mut storage: Storage = Storage::create_root(); - if let Some(ref graph) = boot_graph { - for export_block in &graph.final_blocks { - storage.store_block(export_block.block.clone()); - } - } - // mock protocol & pool - let (protocol_controller, protocol_command_sender, protocol_event_receiver) = - MockProtocolController::new(); - let (pool_controller, _pool_event_receiver) = MockPoolController::new_with_receiver(); - // for now, execution_rx is ignored: cique updates to Execution pile up and are discarded - let (execution_controller, execution_rx) = MockExecutionController::new_with_receiver(); - let stop_sinks = Arc::new(Mutex::new(false)); - let stop_sinks_clone = stop_sinks.clone(); - let execution_sink = std::thread::spawn(move || { - while !*stop_sinks_clone.lock() { - let _ = execution_rx.recv_timeout(Duration::from_millis(500)); - } - }); - let (selector_controller, selector_receiver) = MockSelectorController::new_with_receiver(); - // launch consensus controller - let (consensus_command_sender, consensus_event_receiver, consensus_manager) = - start_consensus_controller( - cfg.clone(), - ConsensusChannels { - execution_controller, - protocol_command_sender: protocol_command_sender.clone(), - protocol_event_receiver, - pool_command_sender: pool_controller.clone(), - selector_controller: selector_controller, - }, - boot_graph, - storage.clone(), - 0, - ) - .await - .expect("could not start consensus controller"); - - // Call test func. - let ( - _pool_controller, - mut protocol_controller, - _consensus_command_sender, - consensus_event_receiver, - _selector_controller, - ) = test( - pool_controller, - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - storage, - selector_receiver, - ) - .await; - - // stop controller while ignoring all commands - let stop_fut = consensus_manager.stop(consensus_event_receiver); - tokio::pin!(stop_fut); - protocol_controller - .ignore_commands_while(stop_fut) - .await - .unwrap(); - - // stop sinks - *stop_sinks.lock() = true; - execution_sink.join().unwrap(); -} -*/ - -/// Runs a consensus test, without passing a mock pool controller to it. -pub async fn consensus_without_pool_test(cfg: ConsensusConfig, test: F) -where - F: FnOnce( - MockProtocolController, - ConsensusCommandSender, - ConsensusEventReceiver, - Box, - ) -> V, - V: Future< - Output = ( - MockProtocolController, - ConsensusCommandSender, - ConsensusEventReceiver, - Box, - ), - >, -{ - let storage: Storage = Storage::create_root(); - // mock protocol & pool - let (protocol_controller, protocol_command_sender, protocol_event_receiver) = - MockProtocolController::new(); - let (pool_controller, _pool_event_receiver) = MockPoolController::new_with_receiver(); - let staking_key = - KeyPair::from_str("S1UxdCJv5ckDK8z87E5Jq5fEfSVLi2cTHgtpfZy7iURs3KpPns8").unwrap(); - let genesis_address = Address::from_public_key(&staking_key.get_public_key()); - let selector_config = SelectorConfig { - max_draw_cache: 12, - channel_size: 256, - thread_count: 2, - endorsement_count: 8, - periods_per_cycle: 2, - genesis_address, - }; - let (mut selector_manager, selector_controller) = - start_selector_worker(selector_config).unwrap(); - // for now, execution_rx is ignored: clique updates to Execution pile up and are discarded - let (execution_controller, execution_rx) = MockExecutionController::new_with_receiver(); - let stop_sinks = Arc::new(Mutex::new(false)); - let stop_sinks_clone = stop_sinks.clone(); - let execution_sink = std::thread::spawn(move || { - while !*stop_sinks_clone.lock() { - let _ = execution_rx.recv_timeout(Duration::from_millis(500)); - } - }); - // launch consensus controller - let (consensus_command_sender, consensus_event_receiver, consensus_manager) = - start_consensus_controller( - cfg.clone(), - ConsensusChannels { - execution_controller, - protocol_command_sender: protocol_command_sender.clone(), - protocol_event_receiver, - pool_command_sender: pool_controller, - selector_controller: selector_controller.clone(), - }, - None, - storage.clone(), - 0, - ) - .await - .expect("could not start consensus controller"); - - // Call test func. - let ( - mut protocol_controller, - _consensus_command_sender, - consensus_event_receiver, - _selector_controller, - ) = test( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - .await; - - // stop controller while ignoring all commands - let stop_fut = consensus_manager.stop(consensus_event_receiver); - tokio::pin!(stop_fut); - protocol_controller - .ignore_commands_while(stop_fut) - .await - .unwrap(); - selector_manager.stop(); - // stop sinks - *stop_sinks.lock() = true; - execution_sink.join().unwrap(); -} - -/// Runs a consensus test, without passing a mock pool controller to it, -/// and passing a reference to storage. -pub async fn consensus_without_pool_with_storage_test(cfg: ConsensusConfig, test: F) -where - F: FnOnce( - Storage, - MockProtocolController, - ConsensusCommandSender, - ConsensusEventReceiver, - Box, - ) -> V, - V: Future< - Output = ( - MockProtocolController, - ConsensusCommandSender, - ConsensusEventReceiver, - Box, - ), - >, -{ - let storage: Storage = Storage::create_root(); - // mock protocol & pool - let (protocol_controller, protocol_command_sender, protocol_event_receiver) = - MockProtocolController::new(); - let (pool_controller, _pool_event_receiver) = MockPoolController::new_with_receiver(); - // for now, execution_rx is ignored: clique updates to Execution pile up and are discarded - let (execution_controller, execution_rx) = MockExecutionController::new_with_receiver(); - let stop_sinks = Arc::new(Mutex::new(false)); - let stop_sinks_clone = stop_sinks.clone(); - let execution_sink = std::thread::spawn(move || { - while !*stop_sinks_clone.lock() { - let _ = execution_rx.recv_timeout(Duration::from_millis(500)); - } - }); - let staking_key = - KeyPair::from_str("S1UxdCJv5ckDK8z87E5Jq5fEfSVLi2cTHgtpfZy7iURs3KpPns8").unwrap(); - let genesis_address = Address::from_public_key(&staking_key.get_public_key()); - let selector_config = SelectorConfig { - max_draw_cache: 12, - channel_size: 256, - thread_count: 2, - endorsement_count: 8, - periods_per_cycle: 2, - genesis_address, - }; - let (mut selector_manager, selector_controller) = - start_selector_worker(selector_config).unwrap(); - // launch consensus controller - let (consensus_command_sender, consensus_event_receiver, consensus_manager) = - start_consensus_controller( - cfg.clone(), - ConsensusChannels { - execution_controller, - protocol_command_sender: protocol_command_sender.clone(), - protocol_event_receiver, - pool_command_sender: pool_controller, - selector_controller: selector_controller.clone(), - }, - None, - storage.clone(), - 0, - ) - .await - .expect("could not start consensus controller"); - - // Call test func. - let ( - mut protocol_controller, - _consensus_command_sender, - consensus_event_receiver, - _selector_controller, - ) = test( - storage, - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - .await; - - // stop controller while ignoring all commands - let stop_fut = consensus_manager.stop(consensus_event_receiver); - tokio::pin!(stop_fut); - protocol_controller - .ignore_commands_while(stop_fut) - .await - .unwrap(); - selector_manager.stop(); - // stop sinks - *stop_sinks.lock() = true; - execution_sink.join().unwrap(); -} - -pub fn get_cliques(graph: &BlockGraphExport, hash: BlockId) -> HashSet { - let mut res = HashSet::new(); - for (i, clique) in graph.max_cliques.iter().enumerate() { - if clique.block_ids.contains(&hash) { - res.insert(i); - } - } - res -} diff --git a/massa-consensus-worker/src/tools.rs b/massa-consensus-worker/src/tools.rs deleted file mode 100644 index 2d771e2de71..00000000000 --- a/massa-consensus-worker/src/tools.rs +++ /dev/null @@ -1,107 +0,0 @@ -use crate::consensus_worker::ConsensusWorker; -use massa_consensus_exports::settings::ConsensusConfig; -use massa_consensus_exports::{ - commands::{ConsensusCommand, ConsensusManagementCommand}, - error::{ConsensusError, ConsensusResult as Result}, - events::ConsensusEvent, - settings::{ConsensusChannels, ConsensusWorkerChannels}, - ConsensusCommandSender, ConsensusEventReceiver, ConsensusManager, -}; -use massa_graph::{settings::GraphConfig, BlockGraph, BootstrapableGraph}; -use massa_storage::Storage; -use tokio::sync::mpsc; -use tracing::{debug, error, info}; - -/// Creates a new consensus controller. -/// -/// # Arguments -/// * `cfg`: consensus configuration -/// * `protocol_command_sender`: a `ProtocolCommandSender` instance to send commands to Protocol. -/// * `protocol_event_receiver`: a `ProtocolEventReceiver` instance to receive events from Protocol. -#[allow(clippy::too_many_arguments)] -pub async fn start_consensus_controller( - cfg: ConsensusConfig, - channels: ConsensusChannels, - boot_graph: Option, - storage: Storage, - clock_compensation: i64, -) -> Result<( - ConsensusCommandSender, - ConsensusEventReceiver, - ConsensusManager, -)> { - debug!("starting consensus controller"); - massa_trace!( - "consensus.consensus_controller.start_consensus_controller", - {} - ); - - // todo that is checked when loading the config, should be removed - // ensure that the parameters are sane - if cfg.thread_count == 0 { - return Err(ConsensusError::ConfigError( - "thread_count should be strictly more than 0".to_string(), - )); - } - if cfg.t0 == 0.into() { - return Err(ConsensusError::ConfigError( - "t0 should be strictly more than 0".to_string(), - )); - } - if cfg.t0.checked_rem_u64(cfg.thread_count as u64)? != 0.into() { - return Err(ConsensusError::ConfigError( - "thread_count should divide t0".to_string(), - )); - } - - // start worker - let block_db = BlockGraph::new( - GraphConfig::from(&cfg), - boot_graph, - storage.clone_without_refs(), - channels.selector_controller.clone(), - ) - .await?; - let (command_tx, command_rx) = mpsc::channel::(cfg.channel_size); - let (event_tx, event_rx) = mpsc::channel::(cfg.channel_size); - let (manager_tx, manager_rx) = mpsc::channel::(1); - let cfg_copy = cfg.clone(); - let join_handle = tokio::spawn(async move { - let res = ConsensusWorker::new( - cfg_copy, - ConsensusWorkerChannels { - protocol_command_sender: channels.protocol_command_sender, - protocol_event_receiver: channels.protocol_event_receiver, - execution_controller: channels.execution_controller, - pool_command_sender: channels.pool_command_sender, - selector_controller: channels.selector_controller, - controller_command_rx: command_rx, - controller_event_tx: event_tx, - controller_manager_rx: manager_rx, - }, - block_db, - clock_compensation, - ) - .await? - .run_loop() - .await; - match res { - Err(err) => { - error!("consensus worker crashed: {}", err); - Err(err) - } - Ok(v) => { - info!("consensus worker finished cleanly"); - Ok(v) - } - } - }); - Ok(( - ConsensusCommandSender(command_tx), - ConsensusEventReceiver(event_rx), - ConsensusManager { - manager_tx, - join_handle, - }, - )) -} diff --git a/massa-consensus-worker/src/worker/init.rs b/massa-consensus-worker/src/worker/init.rs new file mode 100644 index 00000000000..5af5969165c --- /dev/null +++ b/massa-consensus-worker/src/worker/init.rs @@ -0,0 +1,309 @@ +use massa_consensus_exports::{ + block_status::BlockStatus, bootstrapable_graph::BootstrapableGraph, error::ConsensusError, + ConsensusConfig, +}; +use massa_hash::Hash; +use massa_models::{ + active_block::ActiveBlock, + address::Address, + block::{Block, BlockHeader, BlockHeaderSerializer, BlockId, BlockSerializer, WrappedBlock}, + prehash::PreHashMap, + slot::Slot, + timeslots::{get_block_slot_timestamp, get_latest_block_slot_at_timestamp}, + wrapped::WrappedContent, +}; +use massa_storage::Storage; +use massa_time::MassaTime; +use parking_lot::RwLock; +use std::{ + collections::{HashMap, VecDeque}, + sync::{mpsc, Arc}, +}; +use tracing::log::info; + +use crate::{commands::ConsensusCommand, state::ConsensusState}; + +use super::ConsensusWorker; + +/// Creates genesis block in given thread. +/// +/// # Arguments +/// * `cfg`: consensus configuration +/// * `thread_number`: thread in which we want a genesis block +/// +/// # Returns +/// A genesis block +pub fn create_genesis_block( + cfg: &ConsensusConfig, + thread_number: u8, +) -> Result { + let keypair = &cfg.genesis_key; + let header = BlockHeader::new_wrapped( + BlockHeader { + slot: Slot::new(0, thread_number), + parents: Vec::new(), + operation_merkle_root: Hash::compute_from(&Vec::new()), + endorsements: Vec::new(), + }, + BlockHeaderSerializer::new(), + keypair, + )?; + + Ok(Block::new_wrapped( + Block { + header, + operations: Default::default(), + }, + BlockSerializer::new(), + keypair, + )?) +} + +impl ConsensusWorker { + /// Creates a new consensus worker. + /// + /// # Arguments + /// * `config`: consensus configuration + /// * `command_receiver`: channel to receive commands from controller + /// * `channels`: channels to communicate with other workers + /// * `shared_state`: shared state with the controller + /// * `init_graph`: Optional graph of blocks to initiate the worker + /// * `storage`: shared storage + /// + /// # Returns: + /// A `ConsensusWorker`, to interact with it use the `ConsensusController` + pub fn new( + config: ConsensusConfig, + command_receiver: mpsc::Receiver, + shared_state: Arc>, + init_graph: Option, + storage: Storage, + ) -> Result { + let now = MassaTime::now(config.clock_compensation_millis) + .expect("Couldn't init timer consensus"); + let previous_slot = get_latest_block_slot_at_timestamp( + config.thread_count, + config.t0, + config.genesis_timestamp, + now, + ) + .expect("Couldn't get the init slot consensus."); + + // load genesis blocks + let mut block_statuses = PreHashMap::default(); + let mut genesis_block_ids = Vec::with_capacity(config.thread_count as usize); + for thread in 0u8..config.thread_count { + let block = create_genesis_block(&config, thread).map_err(|err| { + ConsensusError::GenesisCreationError(format!("genesis error {}", err)) + })?; + let mut storage = storage.clone_without_refs(); + storage.store_block(block.clone()); + genesis_block_ids.push(block.id); + block_statuses.insert( + block.id, + BlockStatus::Active { + a_block: Box::new(ActiveBlock { + creator_address: block.creator_address, + parents: Vec::new(), + children: vec![PreHashMap::default(); config.thread_count as usize], + descendants: Default::default(), + is_final: true, + block_id: block.id, + slot: block.content.header.content.slot, + fitness: block.get_fitness(), + }), + storage, + }, + ); + } + + let next_slot = previous_slot.map_or(Ok(Slot::new(0u64, 0u8)), |s| { + s.get_next_slot(config.thread_count) + })?; + let next_instant = get_block_slot_timestamp( + config.thread_count, + config.t0, + config.genesis_timestamp, + next_slot, + )? + .estimate_instant(config.clock_compensation_millis)?; + + info!( + "Started node at time {}, cycle {}, period {}, thread {}", + now.to_utc_string(), + next_slot.get_cycle(config.periods_per_cycle), + next_slot.period, + next_slot.thread, + ); + + if config.genesis_timestamp > now { + let (days, hours, mins, secs) = config + .genesis_timestamp + .saturating_sub(now) + .days_hours_mins_secs()?; + info!( + "{} days, {} hours, {} minutes, {} seconds remaining to genesis", + days, hours, mins, secs, + ) + } + + // add genesis blocks to stats + let genesis_addr = Address::from_public_key(&config.genesis_key.get_public_key()); + let mut final_block_stats = VecDeque::new(); + for thread in 0..config.thread_count { + final_block_stats.push_back(( + get_block_slot_timestamp( + config.thread_count, + config.t0, + config.genesis_timestamp, + Slot::new(0, thread), + )?, + genesis_addr, + false, + )) + } + + let mut res_consensus = ConsensusWorker { + config: config.clone(), + command_receiver, + shared_state, + previous_slot, + next_slot, + next_instant, + }; + + if let Some(BootstrapableGraph { final_blocks }) = init_graph { + // load final blocks + let final_blocks: Vec<(ActiveBlock, Storage)> = final_blocks + .into_iter() + .map(|export_b| export_b.to_active_block(&storage, config.thread_count)) + .collect::>()?; + + // compute latest_final_blocks_periods + let mut latest_final_blocks_periods: Vec<(BlockId, u64)> = + genesis_block_ids.iter().map(|id| (*id, 0u64)).collect(); + for (b, _) in &final_blocks { + if let Some(v) = latest_final_blocks_periods.get_mut(b.slot.thread as usize) { + if b.slot.period > v.1 { + *v = (b.block_id, b.slot.period); + } + } + } + + { + let mut write_shared_state = res_consensus.shared_state.write(); + write_shared_state.genesis_hashes = genesis_block_ids; + write_shared_state.active_index = + final_blocks.iter().map(|(b, _)| b.block_id).collect(); + write_shared_state.best_parents = latest_final_blocks_periods.clone(); + write_shared_state.latest_final_blocks_periods = latest_final_blocks_periods; + write_shared_state.block_statuses = final_blocks + .into_iter() + .map(|(b, s)| { + Ok(( + b.block_id, + BlockStatus::Active { + a_block: Box::new(b), + storage: s, + }, + )) + }) + .collect::>()?; + write_shared_state.final_block_stats = final_block_stats; + } + + res_consensus.claim_parent_refs()?; + } else { + { + let mut write_shared_state = res_consensus.shared_state.write(); + write_shared_state.active_index = genesis_block_ids.iter().copied().collect(); + write_shared_state.latest_final_blocks_periods = + genesis_block_ids.iter().map(|h| (*h, 0)).collect(); + write_shared_state.best_parents = + genesis_block_ids.iter().map(|v| (*v, 0)).collect(); + write_shared_state.genesis_hashes = genesis_block_ids; + write_shared_state.block_statuses = block_statuses; + write_shared_state.final_block_stats = final_block_stats; + } + } + + // Notify execution module of current blockclique and all final blocks. + // we need to do this because the bootstrap snapshots of the executor vs the consensus may not have been taken in sync + // because the two modules run concurrently and out of sync. + { + let mut write_shared_state = res_consensus.shared_state.write(); + let mut block_storage: PreHashMap = Default::default(); + let notify_finals: HashMap = write_shared_state + .get_all_final_blocks() + .into_iter() + .map(|(b_id, block_infos)| { + block_storage.insert(b_id, block_infos.1); + (block_infos.0, b_id) + }) + .collect(); + let notify_blockclique: HashMap = write_shared_state + .get_blockclique() + .iter() + .map(|b_id| { + let (a_block, storage) = write_shared_state + .get_full_active_block(b_id) + .expect("active block missing from block_db"); + let slot = a_block.slot; + block_storage.insert(*b_id, storage.clone()); + (slot, *b_id) + }) + .collect(); + write_shared_state.prev_blockclique = + notify_blockclique.iter().map(|(k, v)| (*v, *k)).collect(); + write_shared_state + .channels + .execution_controller + .update_blockclique_status(notify_finals, Some(notify_blockclique), block_storage); + } + + Ok(res_consensus) + } + + /// Internal function used at initialization of the `ConsensusWorker` to link blocks with their parents + fn claim_parent_refs(&mut self) -> Result<(), ConsensusError> { + let mut write_shared_state = self.shared_state.write(); + for (_b_id, block_status) in write_shared_state.block_statuses.iter_mut() { + if let BlockStatus::Active { + a_block, + storage: block_storage, + } = block_status + { + // claim parent refs + let n_claimed_parents = block_storage + .claim_block_refs(&a_block.parents.iter().map(|(p_id, _)| *p_id).collect()) + .len(); + + if !a_block.is_final { + // note: parents of final blocks will be missing, that's ok, but it shouldn't be the case for non-finals + if n_claimed_parents != self.config.thread_count as usize { + return Err(ConsensusError::MissingBlock( + "block storage could not claim refs to all parent blocks".into(), + )); + } + } + } + } + + // list active block parents + let active_blocks_map: PreHashMap)> = write_shared_state + .block_statuses + .iter() + .filter_map(|(h, s)| { + if let BlockStatus::Active { a_block: a, .. } = s { + return Some((*h, (a.slot, a.parents.iter().map(|(ph, _)| *ph).collect()))); + } + None + }) + .collect(); + + for (b_id, (b_slot, b_parents)) in active_blocks_map.into_iter() { + write_shared_state.insert_parents_descendants(b_id, b_slot, b_parents); + } + Ok(()) + } +} diff --git a/massa-consensus-worker/src/worker/main_loop.rs b/massa-consensus-worker/src/worker/main_loop.rs new file mode 100644 index 00000000000..88489e1fafc --- /dev/null +++ b/massa-consensus-worker/src/worker/main_loop.rs @@ -0,0 +1,159 @@ +use std::{sync::mpsc, time::Instant}; + +use massa_consensus_exports::error::ConsensusError; +use massa_models::{ + slot::Slot, + timeslots::{get_block_slot_timestamp, get_closest_slot_to_timestamp}, +}; +use massa_time::MassaTime; +use tracing::{info, log::warn}; + +use crate::commands::ConsensusCommand; + +use super::ConsensusWorker; + +enum WaitingStatus { + Ended, + Interrupted, + Disconnected, +} + +impl ConsensusWorker { + /// Execute a command received from the controller also run an update of the graph after processing the command. + /// + /// # Arguments: + /// * `command`: the command to execute + /// + /// # Returns: + /// An error if the command failed + fn manage_command(&mut self, command: ConsensusCommand) -> Result<(), ConsensusError> { + let mut write_shared_state = self.shared_state.write(); + match command { + ConsensusCommand::RegisterBlockHeader(block_id, header) => { + write_shared_state.register_block_header(block_id, header, self.previous_slot)?; + write_shared_state.block_db_changed() + } + ConsensusCommand::RegisterBlock(block_id, slot, block_storage, created) => { + write_shared_state.register_block( + block_id, + slot, + self.previous_slot, + block_storage, + created, + )?; + write_shared_state.block_db_changed() + } + ConsensusCommand::MarkInvalidBlock(block_id, header) => { + write_shared_state.mark_invalid_block(&block_id, header); + Ok(()) + } + } + } + + /// Wait and interrupt or wait until an instant or a stop signal + /// + /// # Return value + /// Returns the error of the process of the command if any. + /// Returns true if we reached the instant. + /// Returns false if we were interrupted by a command. + fn wait_slot_or_command(&mut self, deadline: Instant) -> WaitingStatus { + match self.command_receiver.recv_deadline(deadline) { + // message received => manage it + Ok(command) => { + if let Err(err) = self.manage_command(command) { + warn!("Error in consensus: {}", err); + } + WaitingStatus::Interrupted + } + // timeout => continue main loop + Err(mpsc::RecvTimeoutError::Timeout) => WaitingStatus::Ended, + // channel disconnected (sender dropped) => quit main loop + Err(mpsc::RecvTimeoutError::Disconnected) => WaitingStatus::Disconnected, + } + } + + /// Gets the next slot and the instant when it will happen. + /// Slots can be skipped if we waited too much in-between. + /// Extra safety against double-production caused by clock adjustments (this is the role of the `previous_slot` parameter). + fn get_next_slot(&self, previous_slot: Option) -> (Slot, Instant) { + // get current absolute time + let now = MassaTime::now(self.config.clock_compensation_millis) + .expect("could not get current time"); + + // get closest slot according to the current absolute time + let mut next_slot = get_closest_slot_to_timestamp( + self.config.thread_count, + self.config.t0, + self.config.genesis_timestamp, + now, + ); + + // protection against double-production on unexpected system clock adjustment + if let Some(prev_slot) = previous_slot { + if next_slot <= prev_slot { + next_slot = prev_slot + .get_next_slot(self.config.thread_count) + .expect("could not compute next slot"); + } + } + + // get the timestamp of the target slot + let next_instant = get_block_slot_timestamp( + self.config.thread_count, + self.config.t0, + self.config.genesis_timestamp, + next_slot, + ) + .expect("could not get block slot timestamp") + .estimate_instant(self.config.clock_compensation_millis) + .expect("could not estimate block slot instant"); + + (next_slot, next_instant) + } + + /// Runs in loop forever. This loop must stop every slot to perform operations on stats and graph + /// but can be stopped anytime by a command received. + pub fn run(&mut self) { + let mut last_prune = Instant::now(); + loop { + match self.wait_slot_or_command(self.next_instant) { + WaitingStatus::Ended => { + let previous_cycle = self + .previous_slot + .map(|s| s.get_cycle(self.config.periods_per_cycle)); + let observed_cycle = self.next_slot.get_cycle(self.config.periods_per_cycle); + if previous_cycle.is_none() { + // first cycle observed + info!("Massa network has started ! 🎉") + } + if previous_cycle < Some(observed_cycle) { + info!("Started cycle {}", observed_cycle); + } + { + let mut write_shared_state = self.shared_state.write(); + if let Err(err) = write_shared_state.slot_tick(self.next_slot) { + warn!("Error while processing block tick: {}", err); + } + }; + if last_prune.elapsed().as_millis() + > self.config.block_db_prune_interval.to_millis() as u128 + { + self.shared_state + .write() + .prune() + .expect("Error while pruning"); + last_prune = Instant::now(); + } + self.previous_slot = Some(self.next_slot); + (self.next_slot, self.next_instant) = self.get_next_slot(Some(self.next_slot)); + } + WaitingStatus::Disconnected => { + break; + } + WaitingStatus::Interrupted => { + continue; + } + }; + } + } +} diff --git a/massa-consensus-worker/src/worker/mod.rs b/massa-consensus-worker/src/worker/mod.rs new file mode 100644 index 00000000000..58214ea7206 --- /dev/null +++ b/massa-consensus-worker/src/worker/mod.rs @@ -0,0 +1,117 @@ +use massa_consensus_exports::{ + bootstrapable_graph::BootstrapableGraph, ConsensusChannels, ConsensusConfig, + ConsensusController, ConsensusManager, +}; +use massa_models::block::BlockId; +use massa_models::clique::Clique; +use massa_models::config::CHANNEL_SIZE; +use massa_models::prehash::PreHashSet; +use massa_models::slot::Slot; +use massa_storage::Storage; +use massa_time::MassaTime; +use parking_lot::RwLock; +use std::sync::{mpsc, Arc}; +use std::thread; +use std::time::Instant; + +use crate::commands::ConsensusCommand; +use crate::controller::ConsensusControllerImpl; +use crate::manager::ConsensusManagerImpl; +use crate::state::ConsensusState; + +/// The consensus worker structure that contains all information and tools for the consensus worker thread. +pub struct ConsensusWorker { + /// Channel to receive command from the controller + command_receiver: mpsc::Receiver, + /// Configuration of the consensus + config: ConsensusConfig, + /// State shared with the controller + shared_state: Arc>, + /// Previous slot. + previous_slot: Option, + /// Next slot + next_slot: Slot, + /// Next slot instant + next_instant: Instant, +} + +mod init; +mod main_loop; + +/// Create a new consensus worker thread. +/// +/// # Arguments: +/// * `config`: Configuration of the consensus +/// * `channels`: Channels to communicate with others modules +/// * `init_graph`: Optional initial graph to bootstrap the graph. if None, the graph will have only genesis blocks. +/// * `storage`: Storage to use for the consensus +/// +/// # Returns: +/// * The consensus controller to communicate with the consensus worker thread +/// * The consensus manager to manage the consensus worker thread +pub fn start_consensus_worker( + config: ConsensusConfig, + channels: ConsensusChannels, + init_graph: Option, + storage: Storage, +) -> (Box, Box) { + let (tx, rx) = mpsc::sync_channel(CHANNEL_SIZE); + // desync detection timespan + let stats_desync_detection_timespan = + config.t0.checked_mul(config.periods_per_cycle * 2).unwrap(); + let shared_state = Arc::new(RwLock::new(ConsensusState { + storage: storage.clone(), + config: config.clone(), + channels, + max_cliques: vec![Clique { + block_ids: PreHashSet::::default(), + fitness: 0, + is_blockclique: true, + }], + sequence_counter: 0, + waiting_for_slot_index: Default::default(), + waiting_for_dependencies_index: Default::default(), + discarded_index: Default::default(), + to_propagate: Default::default(), + attack_attempts: Default::default(), + new_final_blocks: Default::default(), + new_stale_blocks: Default::default(), + incoming_index: Default::default(), + active_index: Default::default(), + save_final_periods: Default::default(), + latest_final_blocks_periods: Default::default(), + best_parents: Default::default(), + block_statuses: Default::default(), + genesis_hashes: Default::default(), + gi_head: Default::default(), + final_block_stats: Default::default(), + stale_block_stats: Default::default(), + protocol_blocks: Default::default(), + wishlist: Default::default(), + launch_time: MassaTime::now(config.clock_compensation_millis).unwrap(), + stats_desync_detection_timespan, + stats_history_timespan: std::cmp::max( + stats_desync_detection_timespan, + config.stats_timespan, + ), + prev_blockclique: Default::default(), + })); + + let shared_state_cloned = shared_state.clone(); + let consensus_thread = thread::Builder::new() + .name("consensus worker".into()) + .spawn(move || { + let mut consensus_worker = + ConsensusWorker::new(config, rx, shared_state_cloned, init_graph, storage).unwrap(); + consensus_worker.run() + }) + .expect("Can't spawn consensus thread."); + + let manager = ConsensusManagerImpl { + consensus_thread: Some((tx.clone(), consensus_thread)), + }; + + let controller = ConsensusControllerImpl::new(tx, shared_state); + + (Box::new(controller), Box::new(manager)) +} diff --git a/massa-factory-exports/src/types.rs b/massa-factory-exports/src/types.rs index 591b7ac4ea9..35a2675ce6f 100644 --- a/massa-factory-exports/src/types.rs +++ b/massa-factory-exports/src/types.rs @@ -1,4 +1,4 @@ -use massa_consensus_exports::ConsensusCommandSender; +use massa_consensus_exports::ConsensusController; use massa_models::block::Block; use massa_pool_exports::PoolController; use massa_pos_exports::SelectorController; @@ -15,7 +15,7 @@ pub struct FactoryChannels { /// selector controller to get draws pub selector: Box, /// consensus controller - pub consensus: ConsensusCommandSender, + pub consensus: Box, /// pool controller pub pool: Box, /// protocol controller diff --git a/massa-factory-worker/Cargo.toml b/massa-factory-worker/Cargo.toml index 86afe9b3336..dc3b8b42138 100644 --- a/massa-factory-worker/Cargo.toml +++ b/massa-factory-worker/Cargo.toml @@ -22,18 +22,17 @@ massa_wallet = { path = "../massa-wallet" } massa_hash = { path = "../massa-hash" } massa_pos_exports = { path = "../massa-pos-exports" } massa_serialization = { path = "../massa-serialization" } -massa_consensus_exports = { path = "../massa-consensus-exports" } massa_pool_exports = { path = "../massa-pool-exports" } [dev-dependencies] serial_test = "0.9" massa_protocol_exports = { path = "../massa-protocol-exports", features=["testing"] } +massa_consensus_exports = { path = "../massa-consensus-exports", features = ["testing"] } massa_factory_exports = { path = "../massa-factory-exports", features=["testing"] } massa_wallet = { path = "../massa-wallet", features=["testing"] } massa_pos_exports = { path = "../massa-pos-exports", features=["testing"] } -massa_consensus_exports = { path = "../massa-consensus-exports", features=["testing"] } massa_pool_exports = { path = "../massa-pool-exports", features=["testing"] } [features] sandbox = [] -testing = ["massa_factory_exports/testing", "massa_pos_exports/testing", "massa_pool_exports/testing", "massa_consensus_exports/testing", "massa_protocol_exports/testing", "massa_wallet/testing"] +testing = ["massa_factory_exports/testing", "massa_pos_exports/testing", "massa_pool_exports/testing", "massa_protocol_exports/testing", "massa_wallet/testing"] diff --git a/massa-factory-worker/src/block_factory.rs b/massa-factory-worker/src/block_factory.rs index a0a094d0f7f..cc690b78be2 100644 --- a/massa-factory-worker/src/block_factory.rs +++ b/massa-factory-worker/src/block_factory.rs @@ -143,12 +143,8 @@ impl BlockFactoryWorker { return; }; // get best parents and their periods - let parents: Vec<(BlockId, u64)> = self - .channels - .consensus - .get_best_parents() - .expect("Couldn't get best parents"); // Vec<(parent_id, parent_period)> - // generate the local storage object + let parents: Vec<(BlockId, u64)> = self.channels.consensus.get_best_parents(); // Vec<(parent_id, parent_period)> + // generate the local storage object let mut block_storage = self.channels.storage.clone_without_refs(); // claim block parents in local storage @@ -235,14 +231,9 @@ impl BlockFactoryWorker { ); // send full block to consensus - if self - .channels + self.channels .consensus - .send_block(block_id, slot, block_storage) - .is_err() - { - warn!("could not send produced block to consensus: channel error"); - } + .register_block(block_id, slot, block_storage, true); } /// main run loop of the block creator thread diff --git a/massa-factory-worker/src/endorsement_factory.rs b/massa-factory-worker/src/endorsement_factory.rs index 0c3205eb0a9..4c61cf0b062 100644 --- a/massa-factory-worker/src/endorsement_factory.rs +++ b/massa-factory-worker/src/endorsement_factory.rs @@ -162,23 +162,10 @@ impl EndorsementFactoryWorker { } // get consensus block ID for that slot - let endorsed_block: BlockId = match self + let endorsed_block: BlockId = self .channels .consensus - .get_latest_blockclique_block_at_slot(slot) - { - // error getting block ID at target slot - Err(_) => { - warn!( - "could not get latest blockclique block to create endorsement to be included at slot {}", - slot - ); - return; - } - - // latest block found - Ok(b_id) => b_id, - }; + .get_latest_blockclique_block_at_slot(slot); // produce endorsements let mut endorsements: Vec = Vec::with_capacity(producers_indices.len()); diff --git a/massa-factory-worker/src/tests/tools.rs b/massa-factory-worker/src/tests/tools.rs index 4e1c0cf82c3..ef2335f5874 100644 --- a/massa-factory-worker/src/tests/tools.rs +++ b/massa-factory-worker/src/tests/tools.rs @@ -1,3 +1,6 @@ +use massa_consensus_exports::test_exports::{ + ConsensusEventReceiver, MockConsensusController, MockConsensusControllerMessage, +}; use parking_lot::RwLock; use std::{ sync::{mpsc::Receiver, Arc}, @@ -5,7 +8,6 @@ use std::{ time::Duration, }; -use massa_consensus_exports::{commands::ConsensusCommand, test_exports::MockConsensusController}; use massa_factory_exports::{ test_exports::create_empty_block, FactoryChannels, FactoryConfig, FactoryManager, }; @@ -34,7 +36,7 @@ use massa_wallet::test_exports::create_test_wallet; /// You can use the method `new` to build all the mocks and make the connections /// Then you can use the method `get_next_created_block` that will manage the answers from the mock to the factory depending on the parameters you gave. pub struct TestFactory { - consensus_controller: MockConsensusController, + consensus_event_receiver: ConsensusEventReceiver, pool_receiver: PoolEventReceiver, selector_receiver: Receiver, factory_config: FactoryConfig, @@ -53,13 +55,12 @@ impl TestFactory { /// - `TestFactory`: the structure that will be used to manage the tests pub fn new(default_keypair: &KeyPair) -> TestFactory { let (selector_controller, selector_receiver) = MockSelectorController::new_with_receiver(); - let (consensus_controller, consensus_command_sender, _consensus_event_receiver) = + let (consensus_controller, consensus_event_receiver) = MockConsensusController::new_with_receiver(); let (pool_controller, pool_receiver) = MockPoolController::new_with_receiver(); let mut storage = Storage::create_root(); let mut factory_config = FactoryConfig::default(); - let (_protocol_controller, protocol_command_sender, _protocol_event_receiver) = - MockProtocolController::new(); + let (_protocol_controller, protocol_command_sender) = MockProtocolController::new(); let producer_keypair = default_keypair; let producer_address = Address::from_public_key(&producer_keypair.get_public_key()); let mut accounts = PreHashMap::default(); @@ -82,7 +83,7 @@ impl TestFactory { Arc::new(RwLock::new(create_test_wallet(Some(accounts)))), FactoryChannels { selector: selector_controller.clone(), - consensus: consensus_command_sender, + consensus: consensus_controller, pool: pool_controller.clone(), protocol: protocol_command_sender, storage: storage.clone_without_refs(), @@ -90,7 +91,7 @@ impl TestFactory { ); TestFactory { - consensus_controller, + consensus_event_receiver, pool_receiver, selector_receiver, factory_config, @@ -150,17 +151,16 @@ impl TestFactory { _ => panic!("unexpected message"), } } - match self - .consensus_controller - .consensus_command_rx - .blocking_recv() - .unwrap() - { - ConsensusCommand::GetBestParents { response_tx } => { - response_tx.send(self.genesis_blocks.clone()).unwrap(); - } - _ => panic!("unexpected message"), - } + self.consensus_event_receiver + .wait_command(MassaTime::from_millis(100), |command| { + if let MockConsensusControllerMessage::GetBestParents { response_tx } = command { + response_tx.send(self.genesis_blocks.clone()).unwrap(); + Some(()) + } else { + None + } + }) + .unwrap(); self.pool_receiver .wait_command(MassaTime::from_millis(100), |command| match command { MockPoolControllerMessage::GetBlockEndorsements { @@ -203,23 +203,21 @@ impl TestFactory { _ => panic!("unexpected message"), }) .unwrap(); - match self - .consensus_controller - .consensus_command_rx - .blocking_recv() + self.consensus_event_receiver + .wait_command(MassaTime::from_millis(100), |command| { + if let MockConsensusControllerMessage::RegisterBlock { + block_id, + block_storage, + slot: _, + created: _, + } = command + { + Some((block_id, block_storage)) + } else { + None + } + }) .unwrap() - { - ConsensusCommand::SendBlock { - block_id, - block_storage, - slot: _, - response_tx, - } => { - response_tx.send(()).unwrap(); - (block_id, block_storage) - } - _ => panic!("unexpected message"), - } } } diff --git a/massa-graph/Cargo.toml b/massa-graph/Cargo.toml deleted file mode 100644 index dac2300d26f..00000000000 --- a/massa-graph/Cargo.toml +++ /dev/null @@ -1,26 +0,0 @@ -[package] -name = "massa_graph" -version = "0.1.0" -authors = ["Massa Labs "] -edition = "2021" - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -displaydoc = "0.2" -num = { version = "0.4", features = ["serde"] } -nom = "7.1" -serde = { version = "1.0", features = ["derive"] } -serde_json = "1.0" -thiserror = "1.0" -tracing = "0.1" -# custom modules -massa_pos_exports = { path = "../massa-pos-exports" } -massa_execution_exports = { path = "../massa-execution-exports" } -massa_hash = { path = "../massa-hash" } -massa_logging = { path = "../massa-logging" } -massa_models = { path = "../massa-models" } -massa_storage = { path = "../massa-storage" } -massa_signature = { path = "../massa-signature" } -massa_serialization = { path = "../massa-serialization"} - diff --git a/massa-graph/src/block_graph.rs b/massa-graph/src/block_graph.rs deleted file mode 100644 index 66549955784..00000000000 --- a/massa-graph/src/block_graph.rs +++ /dev/null @@ -1,2742 +0,0 @@ -// Copyright (c) 2022 MASSA LABS - -//! All information concerning blocks, the block graph and cliques is managed here. -use crate::{ - bootstrapable_graph::BootstrapableGraph, - error::{GraphError, GraphResult as Result}, - export_active_block::ExportActiveBlock, - settings::GraphConfig, -}; -use massa_hash::Hash; -use massa_logging::massa_trace; -use massa_models::prehash::{CapacityAllocator, PreHashMap, PreHashSet}; -use massa_models::{ - active_block::ActiveBlock, api::BlockGraphStatus, clique::Clique, wrapped::WrappedContent, -}; -use massa_models::{ - address::Address, - block::{ - Block, BlockHeader, BlockHeaderSerializer, BlockId, BlockSerializer, WrappedBlock, - WrappedHeader, - }, - slot::Slot, -}; -use massa_pos_exports::SelectorController; -use massa_signature::PublicKey; -use massa_storage::Storage; -use serde::{Deserialize, Serialize}; -use std::collections::{hash_map, BTreeSet, HashMap, VecDeque}; -use std::mem; -use tracing::{debug, info}; - -#[derive(Debug, Clone)] -#[allow(clippy::large_enum_variant)] -enum HeaderOrBlock { - Header(WrappedHeader), - Block { - id: BlockId, - slot: Slot, - storage: Storage, - }, -} - -impl HeaderOrBlock { - /// Gets slot for that header or block - pub fn get_slot(&self) -> Slot { - match self { - HeaderOrBlock::Header(header) => header.content.slot, - HeaderOrBlock::Block { slot, .. } => *slot, - } - } -} - -/// Something can be discarded -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub enum DiscardReason { - /// Block is invalid, either structurally, or because of some incompatibility. The String contains the reason for info or debugging. - Invalid(String), - /// Block is incompatible with a final block. - Stale, - /// Block has enough fitness. - Final, -} - -/// Enum used in `BlockGraph`'s state machine -#[derive(Debug, Clone)] -enum BlockStatus { - /// The block/header has reached consensus but no consensus-level check has been performed. - /// It will be processed during the next iteration - Incoming(HeaderOrBlock), - /// The block's or header's slot is too much in the future. - /// It will be processed at the block/header slot - WaitingForSlot(HeaderOrBlock), - /// The block references an unknown Block id - WaitingForDependencies { - /// Given header/block - header_or_block: HeaderOrBlock, - /// includes self if it's only a header - unsatisfied_dependencies: PreHashSet, - /// Used to limit and sort the number of blocks/headers waiting for dependencies - sequence_number: u64, - }, - /// The block was checked and included in the blockgraph - Active { - a_block: Box, - storage: Storage, - }, - /// The block was discarded and is kept to avoid reprocessing it - Discarded { - /// Just the slot of that block - slot: Slot, - /// Address of the creator of the block - creator: Address, - /// Ids of parents blocks - parents: Vec, - /// why it was discarded - reason: DiscardReason, - /// Used to limit and sort the number of blocks/headers waiting for dependencies - sequence_number: u64, - }, -} - -/// Block status in the graph that can be exported. -#[derive(Debug, Clone, Serialize, Deserialize)] -pub enum ExportBlockStatus { - /// received but not yet graph processed - Incoming, - /// waiting for its slot - WaitingForSlot, - /// waiting for a missing dependency - WaitingForDependencies, - /// valid and not yet final - Active(Block), - /// immutable - Final(Block), - /// not part of the graph - Discarded(DiscardReason), -} - -/// The block version that can be exported. -/// Note that the detailed list of operation is not exported -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct ExportCompiledBlock { - /// Header of the corresponding block. - pub header: WrappedHeader, - /// For (i, set) in children, - /// set contains the headers' hashes - /// of blocks referencing exported block as a parent, - /// in thread i. - pub children: Vec>, - /// Active or final - pub is_final: bool, -} - -/// Status -#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] -pub enum Status { - /// without enough fitness to be part of immutable history - Active, - /// with enough fitness to be part of immutable history - Final, -} - -impl<'a> BlockGraphExport { - /// Conversion from blockgraph. - pub fn extract_from( - block_graph: &'a BlockGraph, - slot_start: Option, - slot_end: Option, - ) -> Result { - let mut export = BlockGraphExport { - genesis_blocks: block_graph.genesis_hashes.clone(), - active_blocks: PreHashMap::with_capacity(block_graph.block_statuses.len()), - discarded_blocks: PreHashMap::with_capacity(block_graph.block_statuses.len()), - best_parents: block_graph.best_parents.clone(), - latest_final_blocks_periods: block_graph.latest_final_blocks_periods.clone(), - gi_head: block_graph.gi_head.clone(), - max_cliques: block_graph.max_cliques.clone(), - }; - - let filter = |&s| { - if let Some(s_start) = slot_start { - if s < s_start { - return false; - } - } - if let Some(s_end) = slot_end { - if s >= s_end { - return false; - } - } - true - }; - - for (hash, block) in block_graph.block_statuses.iter() { - match block { - BlockStatus::Discarded { - slot, - creator, - parents, - reason, - .. - } => { - if filter(slot) { - export - .discarded_blocks - .insert(*hash, (reason.clone(), (*slot, *creator, parents.clone()))); - } - } - BlockStatus::Active { a_block, storage } => { - if filter(&a_block.slot) { - let stored_block = - storage.read_blocks().get(hash).cloned().ok_or_else(|| { - GraphError::MissingBlock(format!( - "missing block in BlockGraphExport::extract_from: {}", - hash - )) - })?; - export.active_blocks.insert( - *hash, - ExportCompiledBlock { - header: stored_block.content.header, - children: a_block - .children - .iter() - .map(|thread| { - thread.keys().copied().collect::>() - }) - .collect(), - is_final: a_block.is_final, - }, - ); - } - } - _ => continue, - } - } - - Ok(export) - } -} - -/// Bootstrap compatible version of the block graph -#[derive(Debug, Clone)] -#[allow(clippy::type_complexity)] -pub struct BlockGraphExport { - /// Genesis blocks. - pub genesis_blocks: Vec, - /// Map of active blocks, were blocks are in their exported version. - pub active_blocks: PreHashMap, - /// Finite cache of discarded blocks, in exported version `(slot, creator_address, parents)`. - pub discarded_blocks: PreHashMap))>, - /// Best parents hashes in each thread. - pub best_parents: Vec<(BlockId, u64)>, - /// Latest final period and block hash in each thread. - pub latest_final_blocks_periods: Vec<(BlockId, u64)>, - /// Head of the incompatibility graph. - pub gi_head: PreHashMap>, - /// List of maximal cliques of compatible blocks. - pub max_cliques: Vec, -} - -/// Graph management -pub struct BlockGraph { - /// Consensus related configuration - cfg: GraphConfig, - /// Block ids of genesis blocks - genesis_hashes: Vec, - /// Used to limit the number of waiting and discarded blocks - sequence_counter: u64, - /// Every block we know about - block_statuses: PreHashMap, - /// Ids of incoming blocks/headers - incoming_index: PreHashSet, - /// ids of waiting for slot blocks/headers - waiting_for_slot_index: PreHashSet, - /// ids of waiting for dependencies blocks/headers - waiting_for_dependencies_index: PreHashSet, - /// ids of active blocks - active_index: PreHashSet, - /// ids of discarded blocks - discarded_index: PreHashSet, - /// One (block id, period) per thread - latest_final_blocks_periods: Vec<(BlockId, u64)>, - /// One `(block id, period)` per thread TODO not sure I understand the difference with `latest_final_blocks_periods` - best_parents: Vec<(BlockId, u64)>, - /// Incompatibility graph: maps a block id to the block ids it is incompatible with - /// One entry per Active Block - gi_head: PreHashMap>, - /// All the cliques - max_cliques: Vec, - /// Blocks that need to be propagated - to_propagate: PreHashMap, - /// List of block ids we think are attack attempts - attack_attempts: Vec, - /// Newly final blocks - new_final_blocks: PreHashSet, - /// Newly stale block mapped to creator and slot - new_stale_blocks: PreHashMap, - /// Shared storage, - storage: Storage, - /// Selector controller - selector_controller: Box, -} - -/// Possible output of a header check -#[derive(Debug)] -enum HeaderCheckOutcome { - /// it's ok and here are some useful values - Proceed { - /// one (parent block id, parent's period) per thread - parents_hash_period: Vec<(BlockId, u64)>, - /// blocks that header is incompatible with - incompatibilities: PreHashSet, - /// number of incompatibilities that are inherited from the parents - inherited_incompatibilities_count: usize, - /// fitness - fitness: u64, - }, - /// there is something wrong with that header - Discard(DiscardReason), - /// it must wait for its slot to be fully processed - WaitForSlot, - /// it must wait for these block ids to be fully processed - WaitForDependencies(PreHashSet), -} - -/// Possible outcomes of endorsements check -#[derive(Debug)] -enum EndorsementsCheckOutcome { - /// Everything is ok - Proceed, - /// There is something wrong with that endorsement - Discard(DiscardReason), - /// It must wait for its slot to be fully processed - WaitForSlot, -} - -/// Creates genesis block in given thread. -/// -/// # Arguments -/// * `cfg`: consensus configuration -/// * `thread_number`: thread in which we want a genesis block -pub fn create_genesis_block( - cfg: &GraphConfig, - thread_number: u8, -) -> Result<(BlockId, WrappedBlock)> { - let keypair = &cfg.genesis_key; - let header = BlockHeader::new_wrapped( - BlockHeader { - slot: Slot::new(0, thread_number), - parents: Vec::new(), - operation_merkle_root: Hash::compute_from(&Vec::new()), - endorsements: Vec::new(), - }, - BlockHeaderSerializer::new(), - keypair, - )?; - - Ok(( - header.id, - Block::new_wrapped( - Block { - header, - operations: Default::default(), - }, - BlockSerializer::new(), - keypair, - )?, - )) -} - -impl BlockGraph { - /// Creates a new `BlockGraph`. - /// - /// # Argument - /// * `cfg`: consensus configuration. - /// * `init`: A bootstrap graph to start the graph with - /// * `storage`: A shared storage that share data across all modules. - /// * `selector_controller`: Access to the PoS selector to get draws - pub async fn new( - cfg: GraphConfig, - init: Option, - storage: Storage, - selector_controller: Box, - ) -> Result { - // load genesis blocks - - let mut block_statuses = PreHashMap::default(); - let mut genesis_block_ids = Vec::with_capacity(cfg.thread_count as usize); - for thread in 0u8..cfg.thread_count { - let (block_id, block) = create_genesis_block(&cfg, thread).map_err(|err| { - GraphError::GenesisCreationError(format!("genesis error {}", err)) - })?; - let mut storage = storage.clone_without_refs(); - storage.store_block(block.clone()); - genesis_block_ids.push(block_id); - block_statuses.insert( - block_id, - BlockStatus::Active { - a_block: Box::new(ActiveBlock { - creator_address: block.creator_address, - parents: Vec::new(), - children: vec![PreHashMap::default(); cfg.thread_count as usize], - descendants: Default::default(), - is_final: true, - block_id, - slot: block.content.header.content.slot, - fitness: block.get_fitness(), - }), - storage, - }, - ); - } - - massa_trace!("consensus.block_graph.new", {}); - if let Some(BootstrapableGraph { final_blocks }) = init { - // load final blocks - let final_blocks: Vec<(ActiveBlock, Storage)> = final_blocks - .into_iter() - .map(|export_b| export_b.to_active_block(&storage, cfg.thread_count)) - .collect::>()?; - - // compute latest_final_blocks_periods - let mut latest_final_blocks_periods: Vec<(BlockId, u64)> = - genesis_block_ids.iter().map(|id| (*id, 0u64)).collect(); - for (b, _) in &final_blocks { - if let Some(v) = latest_final_blocks_periods.get_mut(b.slot.thread as usize) { - if b.slot.period > v.1 { - *v = (b.block_id, b.slot.period); - } - } - } - - // generate graph - let mut res_graph = BlockGraph { - cfg: cfg.clone(), - sequence_counter: 0, - genesis_hashes: genesis_block_ids, - active_index: final_blocks.iter().map(|(b, _)| b.block_id).collect(), - incoming_index: Default::default(), - waiting_for_slot_index: Default::default(), - waiting_for_dependencies_index: Default::default(), - discarded_index: Default::default(), - best_parents: latest_final_blocks_periods.clone(), - latest_final_blocks_periods, - gi_head: Default::default(), - max_cliques: vec![Clique { - block_ids: PreHashSet::::default(), - fitness: 0, - is_blockclique: true, - }], - to_propagate: Default::default(), - attack_attempts: Default::default(), - new_final_blocks: Default::default(), - new_stale_blocks: Default::default(), - storage, - selector_controller, - block_statuses: final_blocks - .into_iter() - .map(|(b, s)| { - Ok(( - b.block_id, - BlockStatus::Active { - a_block: Box::new(b), - storage: s, - }, - )) - }) - .collect::>()?, - }; - - // claim parent refs - for (_b_id, block_status) in res_graph.block_statuses.iter_mut() { - if let BlockStatus::Active { - a_block, - storage: block_storage, - } = block_status - { - // claim parent refs - let n_claimed_parents = block_storage - .claim_block_refs(&a_block.parents.iter().map(|(p_id, _)| *p_id).collect()) - .len(); - - if !a_block.is_final { - // note: parents of final blocks will be missing, that's ok, but it shouldn't be the case for non-finals - if n_claimed_parents != cfg.thread_count as usize { - return Err(GraphError::MissingBlock( - "block storage could not claim refs to all parent blocks".into(), - )); - } - } - } - } - - // list active block parents - let active_blocks_map: PreHashMap)> = res_graph - .block_statuses - .iter() - .filter_map(|(h, s)| { - if let BlockStatus::Active { a_block: a, .. } = s { - return Some((*h, (a.slot, a.parents.iter().map(|(ph, _)| *ph).collect()))); - } - None - }) - .collect(); - // deduce children and descendants - for (b_id, (b_slot, b_parents)) in active_blocks_map.into_iter() { - // deduce children - for parent_id in &b_parents { - if let Some(BlockStatus::Active { - a_block: parent, .. - }) = res_graph.block_statuses.get_mut(parent_id) - { - parent.children[b_slot.thread as usize].insert(b_id, b_slot.period); - } - } - - // deduce descendants - let mut ancestors: VecDeque = b_parents.into_iter().collect(); - let mut visited: PreHashSet = Default::default(); - while let Some(ancestor_h) = ancestors.pop_back() { - if !visited.insert(ancestor_h) { - continue; - } - if let Some(BlockStatus::Active { a_block: ab, .. }) = - res_graph.block_statuses.get_mut(&ancestor_h) - { - ab.descendants.insert(b_id); - for (ancestor_parent_h, _) in ab.parents.iter() { - ancestors.push_front(*ancestor_parent_h); - } - } - } - } - Ok(res_graph) - } else { - Ok(BlockGraph { - cfg, - sequence_counter: 0, - block_statuses, - incoming_index: Default::default(), - waiting_for_slot_index: Default::default(), - waiting_for_dependencies_index: Default::default(), - active_index: genesis_block_ids.iter().copied().collect(), - discarded_index: Default::default(), - latest_final_blocks_periods: genesis_block_ids.iter().map(|h| (*h, 0)).collect(), - best_parents: genesis_block_ids.iter().map(|v| (*v, 0)).collect(), - genesis_hashes: genesis_block_ids, - gi_head: PreHashMap::default(), - max_cliques: vec![Clique { - block_ids: PreHashSet::::default(), - fitness: 0, - is_blockclique: true, - }], - to_propagate: Default::default(), - attack_attempts: Default::default(), - new_final_blocks: Default::default(), - new_stale_blocks: Default::default(), - storage, - selector_controller, - }) - } - } - - /// export full graph in a bootstrap compatible version - pub fn export_bootstrap_graph(&self) -> Result { - let mut required_final_blocks: PreHashSet<_> = self.list_required_active_blocks()?; - required_final_blocks.retain(|b_id| { - if let Some(BlockStatus::Active { a_block, .. }) = self.block_statuses.get(b_id) { - if a_block.is_final { - // filter only final actives - return true; - } - } - false - }); - let mut final_blocks: Vec = - Vec::with_capacity(required_final_blocks.len()); - for b_id in &required_final_blocks { - if let Some(BlockStatus::Active { a_block, storage }) = self.block_statuses.get(b_id) { - final_blocks.push(ExportActiveBlock::from_active_block(a_block, storage)); - } else { - return Err(GraphError::ContainerInconsistency(format!( - "block {} was expected to be active but wasn't on bootstrap graph export", - b_id - ))); - } - } - - Ok(BootstrapableGraph { final_blocks }) - } - - /// Gets latest final blocks (hash, period) for each thread. - pub fn get_latest_final_blocks_periods(&self) -> &Vec<(BlockId, u64)> { - &self.latest_final_blocks_periods - } - - /// Gets best parents. - pub fn get_best_parents(&self) -> &Vec<(BlockId, u64)> { - &self.best_parents - } - - /// Gets the list of cliques. - pub fn get_cliques(&self) -> Vec { - self.max_cliques.clone() - } - - /// Returns the list of block IDs created by a given address, and their finality statuses - pub fn get_block_ids_by_creator(&self, address: &Address) -> PreHashMap { - // iterate on active (final and non-final) blocks - self.active_index - .iter() - .filter_map(|block_id| match self.block_statuses.get(block_id) { - Some(BlockStatus::Active { a_block, .. }) => { - if a_block.creator_address == *address { - Some(( - *block_id, - if a_block.is_final { - Status::Final - } else { - Status::Active - }, - )) - } else { - None - } - } - _ => None, - }) - .collect() - } - - /// Gets whole compiled block corresponding to given hash, if it is active. - /// - /// # Argument - /// * `block_id`: block ID - pub fn get_active_block(&self, block_id: &BlockId) -> Option<(&ActiveBlock, &Storage)> { - BlockGraph::get_full_active_block(&self.block_statuses, *block_id) - } - - /// get block graph status - pub fn get_block_status(&self, block_id: &BlockId) -> BlockGraphStatus { - match self.block_statuses.get(block_id) { - None => BlockGraphStatus::NotFound, - Some(BlockStatus::Active { a_block, .. }) => { - if a_block.is_final { - BlockGraphStatus::Final - } else if self - .max_cliques - .iter() - .find(|clique| clique.is_blockclique) - .expect("blockclique absent") - .block_ids - .contains(block_id) - { - BlockGraphStatus::ActiveInBlockclique - } else { - BlockGraphStatus::ActiveInAlternativeCliques - } - } - Some(BlockStatus::Discarded { .. }) => BlockGraphStatus::Discarded, - Some(BlockStatus::Incoming(_)) => BlockGraphStatus::Incoming, - Some(BlockStatus::WaitingForDependencies { .. }) => { - BlockGraphStatus::WaitingForDependencies - } - Some(BlockStatus::WaitingForSlot(_)) => BlockGraphStatus::WaitingForSlot, - } - } - - /// signal new slot - pub fn slot_tick(&mut self, current_slot: Option) -> Result<()> { - // list all elements for which the time has come - let to_process: BTreeSet<(Slot, BlockId)> = self - .waiting_for_slot_index - .iter() - .filter_map(|b_id| match self.block_statuses.get(b_id) { - Some(BlockStatus::WaitingForSlot(header_or_block)) => { - let slot = header_or_block.get_slot(); - if Some(slot) <= current_slot { - Some((slot, *b_id)) - } else { - None - } - } - _ => None, - }) - .collect(); - - massa_trace!("consensus.block_graph.slot_tick", {}); - // process those elements - self.rec_process(to_process, current_slot)?; - - Ok(()) - } - - /// A new header has come ! - /// - /// Checks performed: - /// - Ignore genesis blocks. - /// - See `process`. - pub fn incoming_header( - &mut self, - block_id: BlockId, - header: WrappedHeader, - current_slot: Option, - ) -> Result<()> { - // ignore genesis blocks - if self.genesis_hashes.contains(&block_id) { - return Ok(()); - } - - debug!( - "received header {} for slot {}", - block_id, header.content.slot - ); - massa_trace!("consensus.block_graph.incoming_header", {"block_id": block_id, "header": header}); - let mut to_ack: BTreeSet<(Slot, BlockId)> = BTreeSet::new(); - match self.block_statuses.entry(block_id) { - // if absent => add as Incoming, call rec_ack on it - hash_map::Entry::Vacant(vac) => { - to_ack.insert((header.content.slot, block_id)); - vac.insert(BlockStatus::Incoming(HeaderOrBlock::Header(header))); - self.incoming_index.insert(block_id); - } - hash_map::Entry::Occupied(mut occ) => match occ.get_mut() { - BlockStatus::Discarded { - sequence_number, .. - } => { - // promote if discarded - *sequence_number = BlockGraph::new_sequence_number(&mut self.sequence_counter); - } - BlockStatus::WaitingForDependencies { .. } => { - // promote in dependencies - self.promote_dep_tree(block_id)?; - } - _ => {} - }, - } - - // process - self.rec_process(to_ack, current_slot)?; - - Ok(()) - } - - /// A new block has come - /// - /// Checks performed: - /// - Ignore genesis blocks. - /// - See `process`. - pub fn incoming_block( - &mut self, - block_id: BlockId, - slot: Slot, - current_slot: Option, - storage: Storage, - ) -> Result<()> { - // ignore genesis blocks - if self.genesis_hashes.contains(&block_id) { - return Ok(()); - } - - debug!("received block {} for slot {}", block_id, slot); - - let mut to_ack: BTreeSet<(Slot, BlockId)> = BTreeSet::new(); - match self.block_statuses.entry(block_id) { - // if absent => add as Incoming, call rec_ack on it - hash_map::Entry::Vacant(vac) => { - to_ack.insert((slot, block_id)); - vac.insert(BlockStatus::Incoming(HeaderOrBlock::Block { - id: block_id, - slot, - storage, - })); - self.incoming_index.insert(block_id); - } - hash_map::Entry::Occupied(mut occ) => match occ.get_mut() { - BlockStatus::Discarded { - sequence_number, .. - } => { - // promote if discarded - *sequence_number = BlockGraph::new_sequence_number(&mut self.sequence_counter); - } - BlockStatus::WaitingForSlot(header_or_block) => { - // promote to full block - *header_or_block = HeaderOrBlock::Block { - id: block_id, - slot, - storage, - }; - } - BlockStatus::WaitingForDependencies { - header_or_block, - unsatisfied_dependencies, - .. - } => { - // promote to full block and satisfy self-dependency - if unsatisfied_dependencies.remove(&block_id) { - // a dependency was satisfied: process - to_ack.insert((slot, block_id)); - } - *header_or_block = HeaderOrBlock::Block { - id: block_id, - slot, - storage, - }; - // promote in dependencies - self.promote_dep_tree(block_id)?; - } - _ => return Ok(()), - }, - } - - // process - self.rec_process(to_ack, current_slot)?; - - Ok(()) - } - - fn new_sequence_number(sequence_counter: &mut u64) -> u64 { - let res = *sequence_counter; - *sequence_counter += 1; - res - } - - /// acknowledge a set of items recursively - fn rec_process( - &mut self, - mut to_ack: BTreeSet<(Slot, BlockId)>, - current_slot: Option, - ) -> Result<()> { - // order processing by (slot, hash) - while let Some((_slot, hash)) = to_ack.pop_first() { - to_ack.extend(self.process(hash, current_slot)?) - } - Ok(()) - } - - /// Acknowledge a single item, return a set of items to re-ack - fn process( - &mut self, - block_id: BlockId, - current_slot: Option, - ) -> Result> { - // list items to reprocess - let mut reprocess = BTreeSet::new(); - - massa_trace!("consensus.block_graph.process", { "block_id": block_id }); - // control all the waiting states and try to get a valid block - let ( - valid_block_creator, - valid_block_slot, - valid_block_parents_hash_period, - valid_block_incomp, - valid_block_inherited_incomp_count, - valid_block_storage, - valid_block_fitness, - ) = match self.block_statuses.get(&block_id) { - None => return Ok(BTreeSet::new()), // disappeared before being processed: do nothing - - // discarded: do nothing - Some(BlockStatus::Discarded { .. }) => { - massa_trace!("consensus.block_graph.process.discarded", { - "block_id": block_id - }); - return Ok(BTreeSet::new()); - } - - // already active: do nothing - Some(BlockStatus::Active { .. }) => { - massa_trace!("consensus.block_graph.process.active", { - "block_id": block_id - }); - return Ok(BTreeSet::new()); - } - - // incoming header - Some(BlockStatus::Incoming(HeaderOrBlock::Header(_))) => { - massa_trace!("consensus.block_graph.process.incoming_header", { - "block_id": block_id - }); - // remove header - let header = if let Some(BlockStatus::Incoming(HeaderOrBlock::Header(header))) = - self.block_statuses.remove(&block_id) - { - self.incoming_index.remove(&block_id); - header - } else { - return Err(GraphError::ContainerInconsistency(format!( - "inconsistency inside block statuses removing incoming header {}", - block_id - ))); - }; - match self.check_header(&block_id, &header, current_slot)? { - HeaderCheckOutcome::Proceed { .. } => { - // set as waiting dependencies - let mut dependencies = PreHashSet::::default(); - dependencies.insert(block_id); // add self as unsatisfied - self.block_statuses.insert( - block_id, - BlockStatus::WaitingForDependencies { - header_or_block: HeaderOrBlock::Header(header), - unsatisfied_dependencies: dependencies, - sequence_number: BlockGraph::new_sequence_number( - &mut self.sequence_counter, - ), - }, - ); - self.waiting_for_dependencies_index.insert(block_id); - self.promote_dep_tree(block_id)?; - - massa_trace!( - "consensus.block_graph.process.incoming_header.waiting_for_self", - { "block_id": block_id } - ); - return Ok(BTreeSet::new()); - } - HeaderCheckOutcome::WaitForDependencies(mut dependencies) => { - // set as waiting dependencies - dependencies.insert(block_id); // add self as unsatisfied - massa_trace!("consensus.block_graph.process.incoming_header.waiting_for_dependencies", {"block_id": block_id, "dependencies": dependencies}); - - self.block_statuses.insert( - block_id, - BlockStatus::WaitingForDependencies { - header_or_block: HeaderOrBlock::Header(header), - unsatisfied_dependencies: dependencies, - sequence_number: BlockGraph::new_sequence_number( - &mut self.sequence_counter, - ), - }, - ); - self.waiting_for_dependencies_index.insert(block_id); - self.promote_dep_tree(block_id)?; - - return Ok(BTreeSet::new()); - } - HeaderCheckOutcome::WaitForSlot => { - // make it wait for slot - self.block_statuses.insert( - block_id, - BlockStatus::WaitingForSlot(HeaderOrBlock::Header(header)), - ); - self.waiting_for_slot_index.insert(block_id); - - massa_trace!( - "consensus.block_graph.process.incoming_header.waiting_for_slot", - { "block_id": block_id } - ); - return Ok(BTreeSet::new()); - } - HeaderCheckOutcome::Discard(reason) => { - self.maybe_note_attack_attempt(&reason, &block_id); - massa_trace!("consensus.block_graph.process.incoming_header.discarded", {"block_id": block_id, "reason": reason}); - // count stales - if reason == DiscardReason::Stale { - self.new_stale_blocks - .insert(block_id, (header.creator_address, header.content.slot)); - } - // discard - self.block_statuses.insert( - block_id, - BlockStatus::Discarded { - slot: header.content.slot, - creator: header.creator_address, - parents: header.content.parents, - reason, - sequence_number: BlockGraph::new_sequence_number( - &mut self.sequence_counter, - ), - }, - ); - self.discarded_index.insert(block_id); - - return Ok(BTreeSet::new()); - } - } - } - - // incoming block - Some(BlockStatus::Incoming(HeaderOrBlock::Block { id: block_id, .. })) => { - let block_id = *block_id; - massa_trace!("consensus.block_graph.process.incoming_block", { - "block_id": block_id - }); - let (slot, storage) = - if let Some(BlockStatus::Incoming(HeaderOrBlock::Block { - slot, storage, .. - })) = self.block_statuses.remove(&block_id) - { - self.incoming_index.remove(&block_id); - (slot, storage) - } else { - return Err(GraphError::ContainerInconsistency(format!( - "inconsistency inside block statuses removing incoming block {}", - block_id - ))); - }; - let stored_block = storage - .read_blocks() - .get(&block_id) - .cloned() - .expect("incoming block not found in storage"); - - match self.check_header(&block_id, &stored_block.content.header, current_slot)? { - HeaderCheckOutcome::Proceed { - parents_hash_period, - incompatibilities, - inherited_incompatibilities_count, - fitness, - } => { - // block is valid: remove it from Incoming and return it - massa_trace!("consensus.block_graph.process.incoming_block.valid", { - "block_id": block_id - }); - ( - stored_block.content.header.creator_public_key, - slot, - parents_hash_period, - incompatibilities, - inherited_incompatibilities_count, - storage, - fitness, - ) - } - HeaderCheckOutcome::WaitForDependencies(dependencies) => { - // set as waiting dependencies - self.block_statuses.insert( - block_id, - BlockStatus::WaitingForDependencies { - header_or_block: HeaderOrBlock::Block { - id: block_id, - slot, - storage, - }, - unsatisfied_dependencies: dependencies, - sequence_number: BlockGraph::new_sequence_number( - &mut self.sequence_counter, - ), - }, - ); - self.waiting_for_dependencies_index.insert(block_id); - self.promote_dep_tree(block_id)?; - massa_trace!( - "consensus.block_graph.process.incoming_block.waiting_for_dependencies", - { "block_id": block_id } - ); - return Ok(BTreeSet::new()); - } - HeaderCheckOutcome::WaitForSlot => { - // set as waiting for slot - self.block_statuses.insert( - block_id, - BlockStatus::WaitingForSlot(HeaderOrBlock::Block { - id: block_id, - slot, - storage, - }), - ); - self.waiting_for_slot_index.insert(block_id); - - massa_trace!( - "consensus.block_graph.process.incoming_block.waiting_for_slot", - { "block_id": block_id } - ); - return Ok(BTreeSet::new()); - } - HeaderCheckOutcome::Discard(reason) => { - self.maybe_note_attack_attempt(&reason, &block_id); - massa_trace!("consensus.block_graph.process.incoming_block.discarded", {"block_id": block_id, "reason": reason}); - // count stales - if reason == DiscardReason::Stale { - self.new_stale_blocks.insert( - block_id, - ( - stored_block.content.header.creator_address, - stored_block.content.header.content.slot, - ), - ); - } - // add to discard - self.block_statuses.insert( - block_id, - BlockStatus::Discarded { - slot: stored_block.content.header.content.slot, - creator: stored_block.creator_address, - parents: stored_block.content.header.content.parents.clone(), - reason, - sequence_number: BlockGraph::new_sequence_number( - &mut self.sequence_counter, - ), - }, - ); - self.discarded_index.insert(block_id); - - return Ok(BTreeSet::new()); - } - } - } - - Some(BlockStatus::WaitingForSlot(header_or_block)) => { - massa_trace!("consensus.block_graph.process.waiting_for_slot", { - "block_id": block_id - }); - let slot = header_or_block.get_slot(); - if Some(slot) > current_slot { - massa_trace!( - "consensus.block_graph.process.waiting_for_slot.in_the_future", - { "block_id": block_id } - ); - // in the future: ignore - return Ok(BTreeSet::new()); - } - // send back as incoming and ask for reprocess - if let Some(BlockStatus::WaitingForSlot(header_or_block)) = - self.block_statuses.remove(&block_id) - { - self.waiting_for_slot_index.remove(&block_id); - self.block_statuses - .insert(block_id, BlockStatus::Incoming(header_or_block)); - self.incoming_index.insert(block_id); - reprocess.insert((slot, block_id)); - massa_trace!( - "consensus.block_graph.process.waiting_for_slot.reprocess", - { "block_id": block_id } - ); - return Ok(reprocess); - } else { - return Err(GraphError::ContainerInconsistency(format!("inconsistency inside block statuses removing waiting for slot block or header {}", block_id))); - }; - } - - Some(BlockStatus::WaitingForDependencies { - unsatisfied_dependencies, - .. - }) => { - massa_trace!("consensus.block_graph.process.waiting_for_dependencies", { - "block_id": block_id - }); - if !unsatisfied_dependencies.is_empty() { - // still has unsatisfied dependencies: ignore - return Ok(BTreeSet::new()); - } - // send back as incoming and ask for reprocess - if let Some(BlockStatus::WaitingForDependencies { - header_or_block, .. - }) = self.block_statuses.remove(&block_id) - { - self.waiting_for_dependencies_index.remove(&block_id); - reprocess.insert((header_or_block.get_slot(), block_id)); - self.block_statuses - .insert(block_id, BlockStatus::Incoming(header_or_block)); - self.incoming_index.insert(block_id); - massa_trace!( - "consensus.block_graph.process.waiting_for_dependencies.reprocess", - { "block_id": block_id } - ); - return Ok(reprocess); - } else { - return Err(GraphError::ContainerInconsistency(format!("inconsistency inside block statuses removing waiting for slot header or block {}", block_id))); - } - } - }; - - // add block to graph - self.add_block_to_graph( - block_id, - valid_block_parents_hash_period, - valid_block_creator, - valid_block_slot, - valid_block_incomp, - valid_block_inherited_incomp_count, - valid_block_fitness, - valid_block_storage, - )?; - - // if the block was added, update linked dependencies and mark satisfied ones for recheck - if let Some(BlockStatus::Active { storage, .. }) = self.block_statuses.get(&block_id) { - massa_trace!("consensus.block_graph.process.is_active", { - "block_id": block_id - }); - self.to_propagate.insert(block_id, storage.clone()); - for itm_block_id in self.waiting_for_dependencies_index.iter() { - if let Some(BlockStatus::WaitingForDependencies { - header_or_block, - unsatisfied_dependencies, - .. - }) = self.block_statuses.get_mut(itm_block_id) - { - if unsatisfied_dependencies.remove(&block_id) { - // a dependency was satisfied: retry - reprocess.insert((header_or_block.get_slot(), *itm_block_id)); - } - } - } - } - - Ok(reprocess) - } - - /// Mark a block as invalid - pub fn invalid_block( - &mut self, - block_id: &BlockId, - header: WrappedHeader, - ) -> Result<(), GraphError> { - let reason = DiscardReason::Invalid("invalid".to_string()); - self.maybe_note_attack_attempt(&reason, block_id); - massa_trace!("consensus.block_graph.process.invalid_block", {"block_id": block_id, "reason": reason}); - - // add to discard - self.block_statuses.insert( - *block_id, - BlockStatus::Discarded { - slot: header.content.slot, - creator: header.creator_address, - parents: header.content.parents, - reason, - sequence_number: BlockGraph::new_sequence_number(&mut self.sequence_counter), - }, - ); - self.discarded_index.insert(*block_id); - - Ok(()) - } - - /// Note an attack attempt if the discard reason indicates one. - fn maybe_note_attack_attempt(&mut self, reason: &DiscardReason, hash: &BlockId) { - massa_trace!("consensus.block_graph.maybe_note_attack_attempt", {"hash": hash, "reason": reason}); - // If invalid, note the attack attempt. - if let DiscardReason::Invalid(reason) = reason { - info!( - "consensus.block_graph.maybe_note_attack_attempt DiscardReason::Invalid:{}", - reason - ); - self.attack_attempts.push(*hash); - } - } - - /// Gets whole `ActiveBlock` corresponding to given `block_id` - /// - /// # Argument - /// * `block_id`: block ID - fn get_full_active_block( - block_statuses: &PreHashMap, - block_id: BlockId, - ) -> Option<(&ActiveBlock, &Storage)> { - match block_statuses.get(&block_id) { - Some(BlockStatus::Active { a_block, storage }) => Some((a_block.as_ref(), storage)), - _ => None, - } - } - - /// Gets a block and all its descendants - /// - /// # Argument - /// * hash : hash of the given block - fn get_active_block_and_descendants(&self, block_id: &BlockId) -> Result> { - let mut to_visit = vec![*block_id]; - let mut result = PreHashSet::::default(); - while let Some(visit_h) = to_visit.pop() { - if !result.insert(visit_h) { - continue; // already visited - } - BlockGraph::get_full_active_block(&self.block_statuses, visit_h) - .ok_or_else(|| GraphError::ContainerInconsistency(format!("inconsistency inside block statuses iterating through descendants of {} - missing {}", block_id, visit_h)))? - .0 - .children - .iter() - .for_each(|thread_children| to_visit.extend(thread_children.keys())); - } - Ok(result) - } - - /// Process an incoming header. - /// - /// Checks performed: - /// - Number of parents matches thread count. - /// - Slot above 0. - /// - Valid thread. - /// - Check that the block is older than the latest final one in thread. - /// - Check that the block slot is not too much into the future, - /// as determined by the configuration `future_block_processing_max_periods`. - /// - Check if it was the creator's turn to create this block. - /// - TODO: check for double staking. - /// - Check parents are present. - /// - Check the topological consistency of the parents. - /// - Check endorsements. - /// - Check thread incompatibility test. - /// - Check grandpa incompatibility test. - /// - Check if the block is incompatible with a parent. - /// - Check if the block is incompatible with a final block. - fn check_header( - &self, - block_id: &BlockId, - header: &WrappedHeader, - current_slot: Option, - ) -> Result { - massa_trace!("consensus.block_graph.check_header", { - "block_id": block_id - }); - let mut parents: Vec<(BlockId, u64)> = Vec::with_capacity(self.cfg.thread_count as usize); - let mut incomp = PreHashSet::::default(); - let mut missing_deps = PreHashSet::::default(); - let creator_addr = header.creator_address; - - // check that is older than the latest final block in that thread - // Note: this excludes genesis blocks - if header.content.slot.period - <= self.latest_final_blocks_periods[header.content.slot.thread as usize].1 - { - return Ok(HeaderCheckOutcome::Discard(DiscardReason::Stale)); - } - - // check if block slot is too much in the future - if let Some(cur_slot) = current_slot { - if header.content.slot.period - > cur_slot - .period - .saturating_add(self.cfg.future_block_processing_max_periods) - { - return Ok(HeaderCheckOutcome::WaitForSlot); - } - } - - // check if it was the creator's turn to create this block - let slot_draw_address = match self.selector_controller.get_producer(header.content.slot) { - Ok(draw) => draw, - Err(_) => return Ok(HeaderCheckOutcome::WaitForSlot), // TODO properly handle PoS errors - }; - if creator_addr != slot_draw_address { - // it was not the creator's turn to create a block for this slot - return Ok(HeaderCheckOutcome::Discard(DiscardReason::Invalid( - format!("Bad creator turn for the slot:{}", header.content.slot), - ))); - } - - // check if block is in the future: queue it - // note: do it after testing signature + draw to prevent queue flooding/DoS - // note: Some(x) > None - if Some(header.content.slot) > current_slot { - return Ok(HeaderCheckOutcome::WaitForSlot); - } - - // Note: here we will check if we already have a block for that slot - // and if someone double staked, they will be denounced - - // list parents and ensure they are present - let parent_set: PreHashSet = header.content.parents.iter().copied().collect(); - for parent_thread in 0u8..self.cfg.thread_count { - let parent_hash = header.content.parents[parent_thread as usize]; - match self.block_statuses.get(&parent_hash) { - Some(BlockStatus::Discarded { reason, .. }) => { - // parent is discarded - return Ok(HeaderCheckOutcome::Discard(match reason { - DiscardReason::Invalid(invalid_reason) => DiscardReason::Invalid(format!( - "discarded because a parent was discarded for the following reason: {}", - invalid_reason - )), - r => r.clone(), - })); - } - Some(BlockStatus::Active { - a_block: parent, .. - }) => { - // parent is active - - // check that the parent is from an earlier slot in the right thread - if parent.slot.thread != parent_thread || parent.slot >= header.content.slot { - return Ok(HeaderCheckOutcome::Discard(DiscardReason::Invalid( - format!( - "Bad parent {} in thread:{} or slot:{} for {}.", - parent_hash, parent_thread, parent.slot, header.content.slot - ), - ))); - } - - // inherit parent incompatibilities - // and ensure parents are mutually compatible - if let Some(p_incomp) = self.gi_head.get(&parent_hash) { - if !p_incomp.is_disjoint(&parent_set) { - return Ok(HeaderCheckOutcome::Discard(DiscardReason::Invalid( - "Parent not mutually compatible".to_string(), - ))); - } - incomp.extend(p_incomp); - } - - parents.push((parent_hash, parent.slot.period)); - } - _ => { - // parent is missing or queued - if self.genesis_hashes.contains(&parent_hash) { - // forbid depending on discarded genesis block - return Ok(HeaderCheckOutcome::Discard(DiscardReason::Stale)); - } - missing_deps.insert(parent_hash); - } - } - } - if !missing_deps.is_empty() { - return Ok(HeaderCheckOutcome::WaitForDependencies(missing_deps)); - } - let inherited_incomp_count = incomp.len(); - - // check the topological consistency of the parents - { - let mut gp_max_slots = vec![0u64; self.cfg.thread_count as usize]; - for parent_i in 0..self.cfg.thread_count { - let (parent_h, parent_period) = parents[parent_i as usize]; - let parent = self - .get_active_block(&parent_h) - .ok_or_else(|| { - GraphError::ContainerInconsistency(format!( - "inconsistency inside block statuses searching parent {} of block {}", - parent_h, block_id - )) - })? - .0; - if parent_period < gp_max_slots[parent_i as usize] { - // a parent is earlier than a block known by another parent in that thread - return Ok(HeaderCheckOutcome::Discard(DiscardReason::Invalid( - "a parent is earlier than a block known by another parent in that thread" - .to_string(), - ))); - } - gp_max_slots[parent_i as usize] = parent_period; - if parent_period == 0 { - // genesis - continue; - } - for gp_i in 0..self.cfg.thread_count { - if gp_i == parent_i { - continue; - } - let gp_h = parent.parents[gp_i as usize].0; - match self.block_statuses.get(&gp_h) { - // this grandpa is discarded - Some(BlockStatus::Discarded { reason, .. }) => { - return Ok(HeaderCheckOutcome::Discard(reason.clone())); - } - // this grandpa is active - Some(BlockStatus::Active { a_block: gp, .. }) => { - if gp.slot.period > gp_max_slots[gp_i as usize] { - if gp_i < parent_i { - return Ok(HeaderCheckOutcome::Discard( - DiscardReason::Invalid( - "grandpa error: gp_i < parent_i".to_string(), - ), - )); - } - gp_max_slots[gp_i as usize] = gp.slot.period; - } - } - // this grandpa is missing, assume stale - _ => return Ok(HeaderCheckOutcome::Discard(DiscardReason::Stale)), - } - } - } - } - - // get parent in own thread - let parent_in_own_thread = BlockGraph::get_full_active_block( - &self.block_statuses, - parents[header.content.slot.thread as usize].0, - ) - .ok_or_else(|| { - GraphError::ContainerInconsistency(format!( - "inconsistency inside block statuses searching parent {} in own thread of block {}", - parents[header.content.slot.thread as usize].0, block_id - )) - })? - .0; - - // check endorsements - match self.check_endorsements(header)? { - EndorsementsCheckOutcome::Proceed => {} - EndorsementsCheckOutcome::Discard(reason) => { - return Ok(HeaderCheckOutcome::Discard(reason)) - } - EndorsementsCheckOutcome::WaitForSlot => return Ok(HeaderCheckOutcome::WaitForSlot), - } - - // thread incompatibility test - parent_in_own_thread.children[header.content.slot.thread as usize] - .keys() - .filter(|&sibling_h| sibling_h != block_id) - .try_for_each(|&sibling_h| { - incomp.extend(self.get_active_block_and_descendants(&sibling_h)?); - Result::<()>::Ok(()) - })?; - - // grandpa incompatibility test - for tau in (0u8..self.cfg.thread_count).filter(|&t| t != header.content.slot.thread) { - // for each parent in a different thread tau - // traverse parent's descendants in tau - let mut to_explore = vec![(0usize, header.content.parents[tau as usize])]; - while let Some((cur_gen, cur_h)) = to_explore.pop() { - let cur_b = BlockGraph::get_full_active_block(&self.block_statuses, cur_h) - .ok_or_else(|| GraphError::ContainerInconsistency(format!("inconsistency inside block statuses searching {} while checking grandpa incompatibility of block {}",cur_h, block_id)))?.0; - - // traverse but do not check up to generation 1 - if cur_gen <= 1 { - to_explore.extend( - cur_b.children[tau as usize] - .keys() - .map(|&c_h| (cur_gen + 1, c_h)), - ); - continue; - } - - let parent_id = { - self.storage - .read_blocks() - .get(&cur_b.block_id) - .ok_or_else(|| { - GraphError::MissingBlock(format!( - "missing block in grandpa incomp test: {}", - cur_b.block_id - )) - })? - .content - .header - .content - .parents[header.content.slot.thread as usize] - }; - - // check if the parent in tauB has a strictly lower period number than B's parent in tauB - // note: cur_b cannot be genesis at gen > 1 - if BlockGraph::get_full_active_block( - &self.block_statuses, - parent_id, - ) - .ok_or_else(|| - GraphError::ContainerInconsistency( - format!("inconsistency inside block statuses searching {} check if the parent in tauB has a strictly lower period number than B's parent in tauB while checking grandpa incompatibility of block {}", - parent_id, - block_id) - ))? - .0 - .slot - .period - < parent_in_own_thread.slot.period - { - // GPI detected - incomp.extend(self.get_active_block_and_descendants(&cur_h)?); - } // otherwise, cur_b and its descendants cannot be GPI with the block: don't traverse - } - } - - // check if the block is incompatible with a parent - if !incomp.is_disjoint(&parents.iter().map(|(h, _p)| *h).collect()) { - return Ok(HeaderCheckOutcome::Discard(DiscardReason::Invalid( - "Block incompatible with a parent".to_string(), - ))); - } - - // check if the block is incompatible with a final block - if !incomp.is_disjoint( - &self - .active_index - .iter() - .filter_map(|h| { - if let Some(BlockStatus::Active { a_block: a, .. }) = self.block_statuses.get(h) - { - if a.is_final { - return Some(*h); - } - } - None - }) - .collect(), - ) { - return Ok(HeaderCheckOutcome::Discard(DiscardReason::Stale)); - } - massa_trace!("consensus.block_graph.check_header.ok", { - "block_id": block_id - }); - - Ok(HeaderCheckOutcome::Proceed { - parents_hash_period: parents, - incompatibilities: incomp, - inherited_incompatibilities_count: inherited_incomp_count, - fitness: header.get_fitness(), - }) - } - - /// check endorsements: - /// * endorser was selected for that (slot, index) - /// * endorsed slot is `parent_in_own_thread` slot - fn check_endorsements(&self, header: &WrappedHeader) -> Result { - // check endorsements - let endorsement_draws = match self.selector_controller.get_selection(header.content.slot) { - Ok(sel) => sel.endorsements, - Err(_) => return Ok(EndorsementsCheckOutcome::WaitForSlot), - }; - for endorsement in header.content.endorsements.iter() { - // check that the draw is correct - if endorsement.creator_address != endorsement_draws[endorsement.content.index as usize] - { - return Ok(EndorsementsCheckOutcome::Discard(DiscardReason::Invalid( - format!( - "endorser draw mismatch for header in slot: {}", - header.content.slot - ), - ))); - } - - // note that the following aspects are checked in protocol - // * signature - // * index reuse - // * slot matching the block's - // * the endorsed block is the containing block's parent - } - - Ok(EndorsementsCheckOutcome::Proceed) - } - - /// get genesis block ids - pub fn get_genesis_block_ids(&self) -> &Vec { - &self.genesis_hashes - } - - /// Computes max cliques of compatible blocks - pub fn compute_max_cliques(&self) -> Vec> { - let mut max_cliques: Vec> = Vec::new(); - - // algorithm adapted from IK_GPX as summarized in: - // Cazals et al., "A note on the problem of reporting maximal cliques" - // Theoretical Computer Science, 2008 - // https://doi.org/10.1016/j.tcs.2008.05.010 - - // stack: r, p, x - let mut stack: Vec<( - PreHashSet, - PreHashSet, - PreHashSet, - )> = vec![( - PreHashSet::::default(), - self.gi_head.keys().cloned().collect(), - PreHashSet::::default(), - )]; - while let Some((r, mut p, mut x)) = stack.pop() { - if p.is_empty() && x.is_empty() { - max_cliques.push(r); - continue; - } - // choose the pivot vertex following the GPX scheme: - // u_p = node from (p \/ x) that maximizes the cardinality of (P \ Neighbors(u_p, GI)) - let &u_p = p - .union(&x) - .max_by_key(|&u| { - p.difference(&(&self.gi_head[u] | &vec![*u].into_iter().collect())) - .count() - }) - .unwrap(); // p was checked to be non-empty before - - // iterate over u_set = (p /\ Neighbors(u_p, GI)) - let u_set: PreHashSet = - &p & &(&self.gi_head[&u_p] | &vec![u_p].into_iter().collect()); - for u_i in u_set.into_iter() { - p.remove(&u_i); - let u_i_set: PreHashSet = vec![u_i].into_iter().collect(); - let comp_n_u_i: PreHashSet = &self.gi_head[&u_i] | &u_i_set; - stack.push((&r | &u_i_set, &p - &comp_n_u_i, &x - &comp_n_u_i)); - x.insert(u_i); - } - } - if max_cliques.is_empty() { - // make sure at least one clique remains - max_cliques = vec![PreHashSet::::default()]; - } - max_cliques - } - - #[allow(clippy::too_many_arguments)] - fn add_block_to_graph( - &mut self, - add_block_id: BlockId, - parents_hash_period: Vec<(BlockId, u64)>, - add_block_creator: PublicKey, - add_block_slot: Slot, - incomp: PreHashSet, - inherited_incomp_count: usize, - fitness: u64, - mut storage: Storage, - ) -> Result<()> { - massa_trace!("consensus.block_graph.add_block_to_graph", { - "block_id": add_block_id - }); - - // Ensure block parents are claimed by the block's storage. - // Note that operations and endorsements should already be there (claimed in Protocol). - storage.claim_block_refs(&parents_hash_period.iter().map(|(p_id, _)| *p_id).collect()); - - // add block to status structure - self.block_statuses.insert( - add_block_id, - BlockStatus::Active { - a_block: Box::new(ActiveBlock { - creator_address: Address::from_public_key(&add_block_creator), - parents: parents_hash_period.clone(), - descendants: PreHashSet::::default(), - block_id: add_block_id, - children: vec![Default::default(); self.cfg.thread_count as usize], - is_final: false, - slot: add_block_slot, - fitness, - }), - storage, - }, - ); - self.active_index.insert(add_block_id); - - // add as child to parents - for (parent_h, _parent_period) in parents_hash_period.iter() { - if let Some(BlockStatus::Active { - a_block: a_parent, .. - }) = self.block_statuses.get_mut(parent_h) - { - a_parent.children[add_block_slot.thread as usize] - .insert(add_block_id, add_block_slot.period); - } else { - return Err(GraphError::ContainerInconsistency(format!( - "inconsistency inside block statuses adding child {} of block {}", - add_block_id, parent_h - ))); - } - } - - // add as descendant to ancestors. Note: descendants are never removed. - { - let mut ancestors: VecDeque = - parents_hash_period.iter().map(|(h, _)| *h).collect(); - let mut visited = PreHashSet::::default(); - while let Some(ancestor_h) = ancestors.pop_back() { - if !visited.insert(ancestor_h) { - continue; - } - if let Some(BlockStatus::Active { a_block: ab, .. }) = - self.block_statuses.get_mut(&ancestor_h) - { - ab.descendants.insert(add_block_id); - for (ancestor_parent_h, _) in ab.parents.iter() { - ancestors.push_front(*ancestor_parent_h); - } - } - } - } - - // add incompatibilities to gi_head - massa_trace!( - "consensus.block_graph.add_block_to_graph.add_incompatibilities", - {} - ); - for incomp_h in incomp.iter() { - self.gi_head - .get_mut(incomp_h) - .ok_or_else(|| { - GraphError::MissingBlock(format!( - "missing block when adding incomp to gi_head: {}", - incomp_h - )) - })? - .insert(add_block_id); - } - self.gi_head.insert(add_block_id, incomp.clone()); - - // max cliques update - massa_trace!( - "consensus.block_graph.add_block_to_graph.max_cliques_update", - {} - ); - if incomp.len() == inherited_incomp_count { - // clique optimization routine: - // the block only has incompatibilities inherited from its parents - // therefore it is not forking and can simply be added to the cliques it is compatible with - self.max_cliques - .iter_mut() - .filter(|c| incomp.is_disjoint(&c.block_ids)) - .for_each(|c| { - c.block_ids.insert(add_block_id); - }); - } else { - // fully recompute max cliques - massa_trace!( - "consensus.block_graph.add_block_to_graph.clique_full_computing", - { "hash": add_block_id } - ); - let before = self.max_cliques.len(); - self.max_cliques = self - .compute_max_cliques() - .into_iter() - .map(|c| Clique { - block_ids: c, - fitness: 0, - is_blockclique: false, - }) - .collect(); - let after = self.max_cliques.len(); - if before != after { - massa_trace!( - "consensus.block_graph.add_block_to_graph.clique_full_computing more than one clique", - { "cliques": self.max_cliques, "gi_head": self.gi_head } - ); - // gi_head - debug!( - "clique number went from {} to {} after adding {}", - before, after, add_block_id - ); - } - } - - // compute clique fitnesses and find blockclique - massa_trace!("consensus.block_graph.add_block_to_graph.compute_clique_fitnesses_and_find_blockclique", {}); - // note: clique_fitnesses is pair (fitness, -hash_sum) where the second parameter is negative for sorting - { - let mut blockclique_i = 0usize; - let mut max_clique_fitness = (0u64, num::BigInt::default()); - for (clique_i, clique) in self.max_cliques.iter_mut().enumerate() { - clique.fitness = 0; - clique.is_blockclique = false; - let mut sum_hash = num::BigInt::default(); - for block_h in clique.block_ids.iter() { - clique.fitness = clique.fitness - .checked_add( - BlockGraph::get_full_active_block(&self.block_statuses, *block_h) - .ok_or_else(|| GraphError::ContainerInconsistency(format!("inconsistency inside block statuses computing fitness while adding {} - missing {}", add_block_id, block_h)))? - .0.fitness, - ) - .ok_or(GraphError::FitnessOverflow)?; - sum_hash -= - num::BigInt::from_bytes_be(num::bigint::Sign::Plus, block_h.to_bytes()); - } - let cur_fit = (clique.fitness, sum_hash); - if cur_fit > max_clique_fitness { - blockclique_i = clique_i; - max_clique_fitness = cur_fit; - } - } - self.max_cliques[blockclique_i].is_blockclique = true; - } - - // update best parents - massa_trace!( - "consensus.block_graph.add_block_to_graph.update_best_parents", - {} - ); - { - // find blockclique - let blockclique_i = self - .max_cliques - .iter() - .position(|c| c.is_blockclique) - .unwrap_or_default(); - let blockclique = &self.max_cliques[blockclique_i]; - - // init best parents as latest_final_blocks_periods - self.best_parents = self.latest_final_blocks_periods.clone(); - // for each blockclique block, set it as best_parent in its own thread - // if its period is higher than the current best_parent in that thread - for block_h in blockclique.block_ids.iter() { - let b_slot = BlockGraph::get_full_active_block(&self.block_statuses, *block_h) - .ok_or_else(|| GraphError::ContainerInconsistency(format!("inconsistency inside block statuses updating best parents while adding {} - missing {}", add_block_id, block_h)))? - .0.slot; - if b_slot.period > self.best_parents[b_slot.thread as usize].1 { - self.best_parents[b_slot.thread as usize] = (*block_h, b_slot.period); - } - } - } - - // list stale blocks - massa_trace!( - "consensus.block_graph.add_block_to_graph.list_stale_blocks", - {} - ); - let stale_blocks = { - let blockclique_i = self - .max_cliques - .iter() - .position(|c| c.is_blockclique) - .unwrap_or_default(); - let fitness_threshold = self.max_cliques[blockclique_i] - .fitness - .saturating_sub(self.cfg.delta_f0); - // iterate from largest to smallest to minimize reallocations - let mut indices: Vec = (0..self.max_cliques.len()).collect(); - indices - .sort_unstable_by_key(|&i| std::cmp::Reverse(self.max_cliques[i].block_ids.len())); - let mut high_set = PreHashSet::::default(); - let mut low_set = PreHashSet::::default(); - for clique_i in indices.into_iter() { - if self.max_cliques[clique_i].fitness >= fitness_threshold { - high_set.extend(&self.max_cliques[clique_i].block_ids); - } else { - low_set.extend(&self.max_cliques[clique_i].block_ids); - } - } - self.max_cliques.retain(|c| c.fitness >= fitness_threshold); - &low_set - &high_set - }; - // mark stale blocks - massa_trace!( - "consensus.block_graph.add_block_to_graph.mark_stale_blocks", - {} - ); - for stale_block_hash in stale_blocks.into_iter() { - if let Some(BlockStatus::Active { - a_block: active_block, - storage: _storage, - }) = self.block_statuses.remove(&stale_block_hash) - { - self.active_index.remove(&stale_block_hash); - if active_block.is_final { - return Err(GraphError::ContainerInconsistency(format!("inconsistency inside block statuses removing stale blocks adding {} - block {} was already final", add_block_id, stale_block_hash))); - } - - // remove from gi_head - if let Some(other_incomps) = self.gi_head.remove(&stale_block_hash) { - for other_incomp in other_incomps.into_iter() { - if let Some(other_incomp_lst) = self.gi_head.get_mut(&other_incomp) { - other_incomp_lst.remove(&stale_block_hash); - } - } - } - - // remove from cliques - let stale_block_fitness = active_block.fitness; - self.max_cliques.iter_mut().for_each(|c| { - if c.block_ids.remove(&stale_block_hash) { - c.fitness -= stale_block_fitness; - } - }); - self.max_cliques.retain(|c| !c.block_ids.is_empty()); // remove empty cliques - if self.max_cliques.is_empty() { - // make sure at least one clique remains - self.max_cliques = vec![Clique { - block_ids: PreHashSet::::default(), - fitness: 0, - is_blockclique: true, - }]; - } - - // remove from parent's children - for (parent_h, _parent_period) in active_block.parents.iter() { - if let Some(BlockStatus::Active { - a_block: parent_active_block, - .. - }) = self.block_statuses.get_mut(parent_h) - { - parent_active_block.children[active_block.slot.thread as usize] - .remove(&stale_block_hash); - } - } - - massa_trace!("consensus.block_graph.add_block_to_graph.stale", { - "hash": stale_block_hash - }); - - // mark as stale - self.new_stale_blocks.insert( - stale_block_hash, - (active_block.creator_address, active_block.slot), - ); - self.block_statuses.insert( - stale_block_hash, - BlockStatus::Discarded { - slot: active_block.slot, - creator: active_block.creator_address, - parents: active_block.parents.iter().map(|(h, _)| *h).collect(), - reason: DiscardReason::Stale, - sequence_number: BlockGraph::new_sequence_number( - &mut self.sequence_counter, - ), - }, - ); - self.discarded_index.insert(stale_block_hash); - } else { - return Err(GraphError::ContainerInconsistency(format!("inconsistency inside block statuses removing stale blocks adding {} - block {} is missing", add_block_id, stale_block_hash))); - } - } - - // list final blocks - massa_trace!( - "consensus.block_graph.add_block_to_graph.list_final_blocks", - {} - ); - let final_blocks = { - // short-circuiting intersection of cliques from smallest to largest - let mut indices: Vec = (0..self.max_cliques.len()).collect(); - indices.sort_unstable_by_key(|&i| self.max_cliques[i].block_ids.len()); - let mut final_candidates = self.max_cliques[indices[0]].block_ids.clone(); - for i in 1..indices.len() { - final_candidates.retain(|v| self.max_cliques[i].block_ids.contains(v)); - if final_candidates.is_empty() { - break; - } - } - - // restrict search to cliques with high enough fitness, sort cliques by fitness (highest to lowest) - massa_trace!( - "consensus.block_graph.add_block_to_graph.list_final_blocks.restrict", - {} - ); - indices.retain(|&i| self.max_cliques[i].fitness > self.cfg.delta_f0); - indices.sort_unstable_by_key(|&i| std::cmp::Reverse(self.max_cliques[i].fitness)); - - let mut final_blocks = PreHashSet::::default(); - for clique_i in indices.into_iter() { - massa_trace!( - "consensus.block_graph.add_block_to_graph.list_final_blocks.loop", - { "clique_i": clique_i } - ); - // check in cliques from highest to lowest fitness - if final_candidates.is_empty() { - // no more final candidates - break; - } - let clique = &self.max_cliques[clique_i]; - - // compute the total fitness of all the descendants of the candidate within the clique - let loc_candidates = final_candidates.clone(); - for candidate_h in loc_candidates.into_iter() { - let desc_fit: u64 = - BlockGraph::get_full_active_block(&self.block_statuses, candidate_h) - .ok_or_else(|| { - GraphError::MissingBlock(format!( - "missing block when computing total fitness of descendants: {}", - candidate_h - )) - })? - .0 - .descendants - .intersection(&clique.block_ids) - .map(|h| { - if let Some(BlockStatus::Active { a_block: ab, .. }) = - self.block_statuses.get(h) - { - return ab.fitness; - } - 0 - }) - .sum(); - if desc_fit > self.cfg.delta_f0 { - // candidate is final - final_candidates.remove(&candidate_h); - final_blocks.insert(candidate_h); - } - } - } - final_blocks - }; - - // mark final blocks and update latest_final_blocks_periods - massa_trace!( - "consensus.block_graph.add_block_to_graph.mark_final_blocks", - {} - ); - for final_block_hash in final_blocks.into_iter() { - // remove from gi_head - if let Some(other_incomps) = self.gi_head.remove(&final_block_hash) { - for other_incomp in other_incomps.into_iter() { - if let Some(other_incomp_lst) = self.gi_head.get_mut(&other_incomp) { - other_incomp_lst.remove(&final_block_hash); - } - } - } - - // mark as final and update latest_final_blocks_periods - if let Some(BlockStatus::Active { - a_block: final_block, - .. - }) = self.block_statuses.get_mut(&final_block_hash) - { - massa_trace!("consensus.block_graph.add_block_to_graph.final", { - "hash": final_block_hash - }); - final_block.is_final = true; - // remove from cliques - let final_block_fitness = final_block.fitness; - self.max_cliques.iter_mut().for_each(|c| { - if c.block_ids.remove(&final_block_hash) { - c.fitness -= final_block_fitness; - } - }); - self.max_cliques.retain(|c| !c.block_ids.is_empty()); // remove empty cliques - if self.max_cliques.is_empty() { - // make sure at least one clique remains - self.max_cliques = vec![Clique { - block_ids: PreHashSet::::default(), - fitness: 0, - is_blockclique: true, - }]; - } - // update latest final blocks - if final_block.slot.period - > self.latest_final_blocks_periods[final_block.slot.thread as usize].1 - { - self.latest_final_blocks_periods[final_block.slot.thread as usize] = - (final_block_hash, final_block.slot.period); - } - // update new final blocks list - self.new_final_blocks.insert(final_block_hash); - } else { - return Err(GraphError::ContainerInconsistency(format!("inconsistency inside block statuses updating final blocks adding {} - block {} is missing", add_block_id, final_block_hash))); - } - } - - massa_trace!("consensus.block_graph.add_block_to_graph.end", {}); - Ok(()) - } - - fn list_required_active_blocks(&self) -> Result> { - // list all active blocks - let mut retain_active: PreHashSet = - PreHashSet::::with_capacity(self.active_index.len()); - - let latest_final_blocks: Vec = self - .latest_final_blocks_periods - .iter() - .map(|(hash, _)| *hash) - .collect(); - - // retain all non-final active blocks, - // the current "best parents", - // and the dependencies for both. - for block_id in self.active_index.iter() { - if let Some(BlockStatus::Active { - a_block: active_block, - .. - }) = self.block_statuses.get(block_id) - { - if !active_block.is_final - || self.best_parents.iter().any(|(b, _p)| b == block_id) - || latest_final_blocks.contains(block_id) - { - retain_active.extend(active_block.parents.iter().map(|(p, _)| *p)); - retain_active.insert(*block_id); - } - } - } - - // retain best parents - retain_active.extend(self.best_parents.iter().map(|(b, _p)| *b)); - - // retain last final blocks - retain_active.extend(self.latest_final_blocks_periods.iter().map(|(h, _)| *h)); - - for (thread, id) in latest_final_blocks.iter().enumerate() { - let mut current_block_id = *id; - while let Some((current_block, _)) = self.get_active_block(¤t_block_id) { - let parent_id = { - if !current_block.parents.is_empty() { - Some(current_block.parents[thread].0) - } else { - None - } - }; - - // retain block - retain_active.insert(current_block_id); - - // stop traversing when reaching a block with period number low enough - // so that any of its operations will have their validity period expired at the latest final block in thread - // note: one more is kept because of the way we iterate - if current_block.slot.period - < self.latest_final_blocks_periods[thread] - .1 - .saturating_sub(self.cfg.operation_validity_periods) - { - break; - } - - // if not genesis, traverse parent - match parent_id { - Some(p_id) => current_block_id = p_id, - None => break, - } - } - } - - // grow with parents & fill thread holes twice - for _ in 0..2 { - // retain the parents of the selected blocks - let retain_clone = retain_active.clone(); - - for retain_h in retain_clone.into_iter() { - retain_active.extend( - self.get_active_block(&retain_h) - .ok_or_else(|| GraphError::ContainerInconsistency(format!("inconsistency inside block statuses pruning and retaining the parents of the selected blocks - {} is missing", retain_h)))? - .0.parents - .iter() - .map(|(b_id, _p)| *b_id), - ) - } - - // find earliest kept slots in each thread - let mut earliest_retained_periods: Vec = self - .latest_final_blocks_periods - .iter() - .map(|(_, p)| *p) - .collect(); - for retain_h in retain_active.iter() { - let retain_slot = &self - .get_active_block(retain_h) - .ok_or_else(|| GraphError::ContainerInconsistency(format!("inconsistency inside block statuses pruning and finding earliest kept slots in each thread - {} is missing", retain_h)))? - .0.slot; - earliest_retained_periods[retain_slot.thread as usize] = std::cmp::min( - earliest_retained_periods[retain_slot.thread as usize], - retain_slot.period, - ); - } - - // fill up from the latest final block back to the earliest for each thread - for thread in 0..self.cfg.thread_count { - let mut cursor = self.latest_final_blocks_periods[thread as usize].0; // hash of tha latest final in that thread - while let Some((c_block, _)) = self.get_active_block(&cursor) { - if c_block.slot.period < earliest_retained_periods[thread as usize] { - break; - } - retain_active.insert(cursor); - if c_block.parents.is_empty() { - // genesis - break; - } - cursor = c_block.parents[thread as usize].0; - } - } - } - - Ok(retain_active) - } - - /// prune active blocks and return final blocks, return discarded final blocks - fn prune_active(&mut self) -> Result> { - // list required active blocks - let mut retain_active = self.list_required_active_blocks()?; - - // retain extra history according to the config - // this is useful to avoid desync on temporary connection loss - for a_block in self.active_index.iter() { - if let Some(BlockStatus::Active { - a_block: active_block, - .. - }) = self.block_statuses.get(a_block) - { - let (_b_id, latest_final_period) = - self.latest_final_blocks_periods[active_block.slot.thread as usize]; - if active_block.slot.period - >= latest_final_period.saturating_sub(self.cfg.force_keep_final_periods) - { - retain_active.insert(*a_block); - } - } - } - - // remove unused final active blocks - let mut discarded_finals: PreHashMap = PreHashMap::default(); - let to_remove: Vec = self - .active_index - .difference(&retain_active) - .copied() - .collect(); - for discard_active_h in to_remove { - let block_slot; - let block_creator; - let block_parents; - { - let read_blocks = self.storage.read_blocks(); - let block = read_blocks.get(&discard_active_h).ok_or_else(|| { - GraphError::MissingBlock(format!( - "missing block when removing unused final active blocks: {}", - discard_active_h - )) - })?; - block_slot = block.content.header.content.slot; - block_creator = block.creator_address; - block_parents = block.content.header.content.parents.clone(); - }; - - let discarded_active = if let Some(BlockStatus::Active { - a_block: discarded_active, - .. - }) = self.block_statuses.remove(&discard_active_h) - { - self.active_index.remove(&discard_active_h); - discarded_active - } else { - return Err(GraphError::ContainerInconsistency(format!("inconsistency inside block statuses pruning and removing unused final active blocks - {} is missing", discard_active_h))); - }; - - // remove from parent's children - for (parent_h, _parent_period) in discarded_active.parents.iter() { - if let Some(BlockStatus::Active { - a_block: parent_active_block, - .. - }) = self.block_statuses.get_mut(parent_h) - { - parent_active_block.children[discarded_active.slot.thread as usize] - .remove(&discard_active_h); - } - } - - massa_trace!("consensus.block_graph.prune_active", {"hash": discard_active_h, "reason": DiscardReason::Final}); - - // mark as final - self.block_statuses.insert( - discard_active_h, - BlockStatus::Discarded { - slot: block_slot, - creator: block_creator, - parents: block_parents, - reason: DiscardReason::Final, - sequence_number: BlockGraph::new_sequence_number(&mut self.sequence_counter), - }, - ); - self.discarded_index.insert(discard_active_h); - - discarded_finals.insert(discard_active_h, *discarded_active); - } - - Ok(discarded_finals) - } - - fn promote_dep_tree(&mut self, hash: BlockId) -> Result<()> { - let mut to_explore = vec![hash]; - let mut to_promote: PreHashMap = PreHashMap::default(); - while let Some(h) = to_explore.pop() { - if to_promote.contains_key(&h) { - continue; - } - if let Some(BlockStatus::WaitingForDependencies { - header_or_block, - unsatisfied_dependencies, - sequence_number, - .. - }) = self.block_statuses.get(&h) - { - // promote current block - to_promote.insert(h, (header_or_block.get_slot(), *sequence_number)); - // register dependencies for exploration - to_explore.extend(unsatisfied_dependencies); - } - } - - let mut to_promote: Vec<(Slot, u64, BlockId)> = to_promote - .into_iter() - .map(|(h, (slot, seq))| (slot, seq, h)) - .collect(); - to_promote.sort_unstable(); // last ones should have the highest seq number - for (_slot, _seq, h) in to_promote.into_iter() { - if let Some(BlockStatus::WaitingForDependencies { - sequence_number, .. - }) = self.block_statuses.get_mut(&h) - { - *sequence_number = BlockGraph::new_sequence_number(&mut self.sequence_counter); - } - } - Ok(()) - } - - fn prune_waiting_for_dependencies(&mut self) -> Result<()> { - let mut to_discard: PreHashMap> = PreHashMap::default(); - let mut to_keep: PreHashMap = PreHashMap::default(); - - // list items that are older than the latest final blocks in their threads or have deps that are discarded - { - for block_id in self.waiting_for_dependencies_index.iter() { - if let Some(BlockStatus::WaitingForDependencies { - header_or_block, - unsatisfied_dependencies, - sequence_number, - }) = self.block_statuses.get(block_id) - { - // has already discarded dependencies => discard (choose worst reason) - let mut discard_reason = None; - let mut discarded_dep_found = false; - for dep in unsatisfied_dependencies.iter() { - if let Some(BlockStatus::Discarded { reason, .. }) = - self.block_statuses.get(dep) - { - discarded_dep_found = true; - match reason { - DiscardReason::Invalid(reason) => { - discard_reason = Some(DiscardReason::Invalid(format!("discarded because depend on block:{} that has discard reason:{}", block_id, reason))); - break; - } - DiscardReason::Stale => discard_reason = Some(DiscardReason::Stale), - DiscardReason::Final => discard_reason = Some(DiscardReason::Stale), - } - } - } - if discarded_dep_found { - to_discard.insert(*block_id, discard_reason); - continue; - } - - // is at least as old as the latest final block in its thread => discard as stale - let slot = header_or_block.get_slot(); - if slot.period <= self.latest_final_blocks_periods[slot.thread as usize].1 { - to_discard.insert(*block_id, Some(DiscardReason::Stale)); - continue; - } - - // otherwise, mark as to_keep - to_keep.insert(*block_id, (*sequence_number, header_or_block.get_slot())); - } - } - } - - // discard in chain and because of limited size - while !to_keep.is_empty() { - // mark entries as to_discard and remove them from to_keep - for (hash, _old_order) in to_keep.clone().into_iter() { - if let Some(BlockStatus::WaitingForDependencies { - unsatisfied_dependencies, - .. - }) = self.block_statuses.get(&hash) - { - // has dependencies that will be discarded => discard (choose worst reason) - let mut discard_reason = None; - let mut dep_to_discard_found = false; - for dep in unsatisfied_dependencies.iter() { - if let Some(reason) = to_discard.get(dep) { - dep_to_discard_found = true; - match reason { - Some(DiscardReason::Invalid(reason)) => { - discard_reason = Some(DiscardReason::Invalid(format!("discarded because depend on block:{} that has discard reason:{}", hash, reason))); - break; - } - Some(DiscardReason::Stale) => { - discard_reason = Some(DiscardReason::Stale) - } - Some(DiscardReason::Final) => { - discard_reason = Some(DiscardReason::Stale) - } - None => {} // leave as None - } - } - } - if dep_to_discard_found { - to_keep.remove(&hash); - to_discard.insert(hash, discard_reason); - continue; - } - } - } - - // remove worst excess element - if to_keep.len() > self.cfg.max_dependency_blocks { - let remove_elt = to_keep - .iter() - .filter_map(|(hash, _old_order)| { - if let Some(BlockStatus::WaitingForDependencies { - header_or_block, - sequence_number, - .. - }) = self.block_statuses.get(hash) - { - return Some((sequence_number, header_or_block.get_slot(), *hash)); - } - None - }) - .min(); - if let Some((_seq_num, _slot, hash)) = remove_elt { - to_keep.remove(&hash); - to_discard.insert(hash, None); - continue; - } - } - - // nothing happened: stop loop - break; - } - - // transition states to Discarded if there is a reason, otherwise just drop - for (block_id, reason_opt) in to_discard.drain() { - if let Some(BlockStatus::WaitingForDependencies { - header_or_block, .. - }) = self.block_statuses.remove(&block_id) - { - self.waiting_for_dependencies_index.remove(&block_id); - let header = match header_or_block { - HeaderOrBlock::Header(h) => h, - HeaderOrBlock::Block { id: block_id, .. } => self - .storage - .read_blocks() - .get(&block_id) - .ok_or_else(|| { - GraphError::MissingBlock(format!( - "missing block when pruning waiting for deps: {}", - block_id - )) - })? - .content - .header - .clone(), - }; - massa_trace!("consensus.block_graph.prune_waiting_for_dependencies", {"hash": block_id, "reason": reason_opt}); - - if let Some(reason) = reason_opt { - // add to stats if reason is Stale - if reason == DiscardReason::Stale { - self.new_stale_blocks - .insert(block_id, (header.creator_address, header.content.slot)); - } - // transition to Discarded only if there is a reason - self.block_statuses.insert( - block_id, - BlockStatus::Discarded { - slot: header.content.slot, - creator: header.creator_address, - parents: header.content.parents.clone(), - reason, - sequence_number: BlockGraph::new_sequence_number( - &mut self.sequence_counter, - ), - }, - ); - self.discarded_index.insert(block_id); - } - } - } - - Ok(()) - } - - fn prune_slot_waiting(&mut self) { - if self.waiting_for_slot_index.len() <= self.cfg.max_future_processing_blocks { - return; - } - let mut slot_waiting: Vec<(Slot, BlockId)> = self - .waiting_for_slot_index - .iter() - .filter_map(|block_id| { - if let Some(BlockStatus::WaitingForSlot(header_or_block)) = - self.block_statuses.get(block_id) - { - return Some((header_or_block.get_slot(), *block_id)); - } - None - }) - .collect(); - slot_waiting.sort_unstable(); - let len_slot_waiting = slot_waiting.len(); - (self.cfg.max_future_processing_blocks..len_slot_waiting).for_each(|idx| { - let (_slot, block_id) = &slot_waiting[idx]; - self.block_statuses.remove(block_id); - self.waiting_for_slot_index.remove(block_id); - }); - } - - fn prune_discarded(&mut self) -> Result<()> { - if self.discarded_index.len() <= self.cfg.max_discarded_blocks { - return Ok(()); - } - let mut discard_hashes: Vec<(u64, BlockId)> = self - .discarded_index - .iter() - .filter_map(|block_id| { - if let Some(BlockStatus::Discarded { - sequence_number, .. - }) = self.block_statuses.get(block_id) - { - return Some((*sequence_number, *block_id)); - } - None - }) - .collect(); - discard_hashes.sort_unstable(); - discard_hashes.truncate(self.discarded_index.len() - self.cfg.max_discarded_blocks); - for (_, block_id) in discard_hashes.iter() { - self.block_statuses.remove(block_id); - self.discarded_index.remove(block_id); - } - Ok(()) - } - - /// prune and return final blocks, return discarded final blocks - pub fn prune(&mut self) -> Result> { - let before = self.max_cliques.len(); - // Step 1: discard final blocks that are not useful to the graph anymore and return them - let discarded_finals = self.prune_active()?; - - // Step 2: prune slot waiting blocks - self.prune_slot_waiting(); - - // Step 3: prune dependency waiting blocks - self.prune_waiting_for_dependencies()?; - - // Step 4: prune discarded - self.prune_discarded()?; - - let after = self.max_cliques.len(); - if before != after { - debug!( - "clique number went from {} to {} after pruning", - before, after - ); - } - - Ok(discarded_finals) - } - - /// get the current block wish list, including the operations hash. - pub fn get_block_wishlist(&self) -> Result>> { - let mut wishlist = PreHashMap::>::default(); - for block_id in self.waiting_for_dependencies_index.iter() { - if let Some(BlockStatus::WaitingForDependencies { - unsatisfied_dependencies, - .. - }) = self.block_statuses.get(block_id) - { - for unsatisfied_h in unsatisfied_dependencies.iter() { - match self.block_statuses.get(unsatisfied_h) { - Some(BlockStatus::WaitingForDependencies { - header_or_block: HeaderOrBlock::Header(header), - .. - }) => { - wishlist.insert(header.id, Some(header.clone())); - } - None => { - wishlist.insert(*unsatisfied_h, None); - } - _ => {} - } - } - } - } - - Ok(wishlist) - } - - /// get clique count - pub fn get_clique_count(&self) -> usize { - self.max_cliques.len() - } - - /// get the clique of higher fitness - pub fn get_blockclique(&self) -> &PreHashSet { - &self - .max_cliques - .iter() - .find(|c| c.is_blockclique) - .expect("blockclique missing") - .block_ids - } - - /// get the blockclique (or final) block ID at a given slot, if any - pub fn get_blockclique_block_at_slot(&self, slot: &Slot) -> Option { - // List all blocks at this slot. - // The list should be small: make a copy of it to avoid holding the storage lock. - let blocks_at_slot = { - let storage_read = self.storage.read_blocks(); - let returned = match storage_read.get_blocks_by_slot(slot) { - Some(v) => v.clone(), - None => return None, - }; - returned - }; - - // search for the block in the blockclique - let search_in_blockclique = blocks_at_slot - .intersection( - &self - .max_cliques - .iter() - .find(|c| c.is_blockclique) - .expect("expected one clique to be the blockclique") - .block_ids, - ) - .next(); - if let Some(found_id) = search_in_blockclique { - return Some(*found_id); - } - - // block not found in the blockclique: search in the final blocks - blocks_at_slot - .into_iter() - .find(|b_id| match self.block_statuses.get(b_id) { - Some(BlockStatus::Active { a_block, .. }) => a_block.is_final, - _ => false, - }) - } - - /// get the latest blockclique (or final) block ID that is the most recent, but still strictly older than `slot`, in the same thread as `slot` - pub fn get_latest_blockclique_block_at_slot(&self, slot: &Slot) -> BlockId { - let (mut best_block_id, mut best_block_period) = self - .latest_final_blocks_periods - .get(slot.thread as usize) - .unwrap_or_else(|| panic!("unexpected not found latest final block period")); - - self.max_cliques - .iter() - .find(|c| c.is_blockclique) - .expect("expected one clique to be the blockclique") - .block_ids - .iter() - .for_each(|id| match self.block_statuses.get(id) { - Some(BlockStatus::Active { - a_block, - storage: _, - }) => { - if a_block.is_final { - panic!( - "unexpected final block on getting latest blockclique block at slot" - ); - } - if a_block.slot.thread == slot.thread - && a_block.slot.period < slot.period - && a_block.slot.period > best_block_period - { - best_block_period = a_block.slot.period; - best_block_id = *id; - } - } - _ => { - panic!("expected to find only active block but found another status") - } - }); - best_block_id - } - - /// Gets all stored final blocks, not only the still-useful ones - /// This is used when initializing Execution from Consensus. - /// Since the Execution bootstrap snapshot is older than the Consensus snapshot, - /// we might need to signal older final blocks for Execution to catch up. - pub fn get_all_final_blocks(&self) -> HashMap { - self.active_index - .iter() - .map(|b_id| { - let (a_block, _storage) = - self.get_active_block(b_id).expect("active block missing"); - (*b_id, a_block.slot) - }) - .collect() - } - - /// Get the block id's to be propagated. - /// Must be called by the consensus worker within `block_db_changed`. - pub fn get_blocks_to_propagate(&mut self) -> PreHashMap { - mem::take(&mut self.to_propagate) - } - - /// Get the hashes of objects that were attack attempts. - /// Must be called by the consensus worker within `block_db_changed`. - pub fn get_attack_attempts(&mut self) -> Vec { - mem::take(&mut self.attack_attempts) - } - - /// Get the ids of blocks that became final. - /// Must be called by the consensus worker within `block_db_changed`. - pub fn get_new_final_blocks(&mut self) -> PreHashSet { - mem::take(&mut self.new_final_blocks) - } - - /// Get the ids of blocks that became stale. - /// Must be called by the consensus worker within `block_db_changed`. - pub fn get_new_stale_blocks(&mut self) -> PreHashMap { - mem::take(&mut self.new_stale_blocks) - } -} diff --git a/massa-graph/src/error.rs b/massa-graph/src/error.rs deleted file mode 100644 index 43822b0c09e..00000000000 --- a/massa-graph/src/error.rs +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright (c) 2022 MASSA LABS -use displaydoc::Display; -use massa_execution_exports::ExecutionError; -use massa_models::error::ModelsError; -use std::array::TryFromSliceError; -use thiserror::Error; - -/// Result used in the graph -pub type GraphResult = core::result::Result; - -/// Result used in the ledger -pub type LedgerResult = core::result::Result; - -/// Graph error -#[non_exhaustive] -#[derive(Display, Error, Debug)] -pub enum GraphError { - /// execution error: {0} - ExecutionError(#[from] ExecutionError), - /// models error: {0} - ModelsError(#[from] ModelsError), - /// Could not create genesis block {0} - GenesisCreationError(String), - /// missing block {0} - MissingBlock(String), - /// missing operation {0} - MissingOperation(String), - /// there was an inconsistency between containers {0} - ContainerInconsistency(String), - /// fitness overflow - FitnessOverflow, - /// invalid ledger change: {0} - InvalidLedgerChange(String), - /// io error {0} - IOError(#[from] std::io::Error), - /// serde error - SerdeError(#[from] serde_json::Error), - /// Proof of stake cycle unavailable {0} - PosCycleUnavailable(String), - /// Ledger error {0} - LedgerError(#[from] LedgerError), - /// transaction error {0} - TransactionError(String), -} - -/// Internal error -#[non_exhaustive] -#[derive(Display, Error, Debug)] -pub enum InternalError { - /// transaction error {0} - TransactionError(String), -} - -/// Ledger error -#[non_exhaustive] -#[derive(Display, Error, Debug)] -pub enum LedgerError { - /// amount overflow - AmountOverflowError, - /// ledger inconsistency error {0} - LedgerInconsistency(String), - /// models error: {0} - ModelsError(#[from] ModelsError), - /// try from slice error {0} - TryFromSliceError(#[from] TryFromSliceError), - /// io error {0} - IOError(#[from] std::io::Error), - /// serde error - SerdeError(#[from] serde_json::Error), -} diff --git a/massa-graph/src/lib.rs b/massa-graph/src/lib.rs deleted file mode 100644 index 6f78cb49505..00000000000 --- a/massa-graph/src/lib.rs +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (c) 2022 MASSA LABS -//! graph management -#![warn(missing_docs)] -#![warn(unused_crate_dependencies)] -#![feature(async_closure)] -#![feature(hash_drain_filter)] -#![feature(int_roundings)] - -extern crate massa_logging; - -/// useful structures -pub mod export_active_block; - -mod bootstrapable_graph; -pub use bootstrapable_graph::{ - BootstrapableGraph, BootstrapableGraphDeserializer, BootstrapableGraphSerializer, -}; - -mod block_graph; -pub use block_graph::*; - -/// graph errors -pub mod error; - -/// graph settings -pub mod settings; diff --git a/massa-graph/src/settings.rs b/massa-graph/src/settings.rs deleted file mode 100644 index 751df7f0bf6..00000000000 --- a/massa-graph/src/settings.rs +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright (c) 2022 MASSA LABS - -#![allow(clippy::assertions_on_constants)] -use massa_signature::KeyPair; -use serde::{Deserialize, Serialize}; - -/// Graph configuration -#[derive(Debug, Deserialize, Serialize, Clone)] -pub struct GraphConfig { - /// Number of threads - pub thread_count: u8, - /// Keypair to sign genesis blocks. - pub genesis_key: KeyPair, - /// Maximum number of blocks allowed in discarded blocks. - pub max_discarded_blocks: usize, - /// If a block `is future_block_processing_max_periods` periods in the future, it is just discarded. - pub future_block_processing_max_periods: u64, - /// Maximum number of blocks allowed in `FutureIncomingBlocks`. - pub max_future_processing_blocks: usize, - /// Maximum number of blocks allowed in `DependencyWaitingBlocks`. - pub max_dependency_blocks: usize, - /// Threshold for fitness. - pub delta_f0: u64, - /// Maximum operation validity period count - pub operation_validity_periods: u64, - /// cycle duration in periods - pub periods_per_cycle: u64, - /// force keep at least this number of final periods in RAM for each thread - pub force_keep_final_periods: u64, - /// target number of endorsement per block - pub endorsement_count: u32, - /// pub `block_db_prune_interval`: `MassaTime`, - pub max_item_return_count: usize, -} diff --git a/massa-node/Cargo.toml b/massa-node/Cargo.toml index 875b407ea1e..06f063d07bf 100644 --- a/massa-node/Cargo.toml +++ b/massa-node/Cargo.toml @@ -7,6 +7,7 @@ edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] +crossbeam-channel = "0.5.6" anyhow = "1.0" enum-map = { version = "2.4", features = ["serde"] } lazy_static = "1.4" @@ -51,14 +52,12 @@ massa_wallet = { path = "../massa-wallet" } massa_factory_exports = { path = "../massa-factory-exports" } massa_factory_worker = { path = "../massa-factory-worker" } - # for more information on what are the following features used for, see the cargo.toml at workspace level [features] beta = [] deadlock_detection = [] sandbox = [ "massa_bootstrap/sandbox", - "massa_consensus_exports/sandbox", "massa_consensus_worker/sandbox", "massa_execution_worker/sandbox", "massa_final_state/sandbox", diff --git a/massa-node/src/main.rs b/massa-node/src/main.rs index b3bf865fd11..dc352cb63b6 100644 --- a/massa-node/src/main.rs +++ b/massa-node/src/main.rs @@ -6,15 +6,14 @@ extern crate massa_logging; use crate::settings::SETTINGS; +use crossbeam_channel::{Receiver, TryRecvError}; use dialoguer::Password; use massa_api::{APIConfig, Private, Public, RpcServer, StopHandle, API}; use massa_async_pool::AsyncPoolConfig; use massa_bootstrap::{get_state, start_bootstrap_server, BootstrapConfig, BootstrapManager}; -use massa_consensus_exports::ConsensusManager; -use massa_consensus_exports::{ - events::ConsensusEvent, settings::ConsensusChannels, ConsensusConfig, ConsensusEventReceiver, -}; -use massa_consensus_worker::start_consensus_controller; +use massa_consensus_exports::events::ConsensusEvent; +use massa_consensus_exports::{ConsensusChannels, ConsensusConfig, ConsensusManager}; +use massa_consensus_worker::start_consensus_worker; use massa_executed_ops::ExecutedOpsConfig; use massa_execution_exports::{ExecutionConfig, ExecutionManager, StorageCostsConstants}; use massa_execution_worker::start_execution_worker; @@ -51,7 +50,9 @@ use massa_pool_exports::{PoolConfig, PoolManager}; use massa_pool_worker::start_pool_controller; use massa_pos_exports::{PoSConfig, SelectorConfig, SelectorManager}; use massa_pos_worker::start_selector_worker; -use massa_protocol_exports::{ProtocolConfig, ProtocolManager}; +use massa_protocol_exports::{ + ProtocolCommand, ProtocolCommandSender, ProtocolConfig, ProtocolManager, +}; use massa_protocol_worker::start_protocol_controller; use massa_storage::Storage; use massa_time::MassaTime; @@ -59,21 +60,22 @@ use massa_wallet::Wallet; use parking_lot::RwLock; use std::path::PathBuf; use std::sync::atomic::{AtomicUsize, Ordering}; +use std::thread::sleep; +use std::time::Duration; use std::{path::Path, process, sync::Arc}; use structopt::StructOpt; use tokio::signal; use tokio::sync::mpsc; use tracing::{error, info, warn}; use tracing_subscriber::filter::{filter_fn, LevelFilter}; - mod settings; async fn launch( node_wallet: Arc>, ) -> ( - ConsensusEventReceiver, + Receiver, Option, - ConsensusManager, + Box, Box, Box, Box, @@ -347,6 +349,50 @@ async fn launch( let (pool_manager, pool_controller) = start_pool_controller(pool_config, &shared_storage, execution_controller.clone()); + let (protocol_command_sender, protocol_command_receiver) = + mpsc::channel::(PROTOCOL_CONTROLLER_CHANNEL_SIZE); + + let consensus_config = ConsensusConfig { + genesis_timestamp: *GENESIS_TIMESTAMP, + end_timestamp: *END_TIMESTAMP, + thread_count: THREAD_COUNT, + t0: T0, + genesis_key: GENESIS_KEY.clone(), + max_discarded_blocks: SETTINGS.consensus.max_discarded_blocks, + future_block_processing_max_periods: SETTINGS.consensus.future_block_processing_max_periods, + max_future_processing_blocks: SETTINGS.consensus.max_future_processing_blocks, + max_dependency_blocks: SETTINGS.consensus.max_dependency_blocks, + delta_f0: DELTA_F0, + operation_validity_periods: OPERATION_VALIDITY_PERIODS, + periods_per_cycle: PERIODS_PER_CYCLE, + stats_timespan: SETTINGS.consensus.stats_timespan, + max_send_wait: SETTINGS.consensus.max_send_wait, + force_keep_final_periods: SETTINGS.consensus.force_keep_final_periods, + endorsement_count: ENDORSEMENT_COUNT, + block_db_prune_interval: SETTINGS.consensus.block_db_prune_interval, + max_item_return_count: SETTINGS.consensus.max_item_return_count, + max_gas_per_block: MAX_GAS_PER_BLOCK, + channel_size: CHANNEL_SIZE, + clock_compensation_millis: bootstrap_state.compensation_millis, + }; + + let (consensus_event_sender, consensus_event_receiver) = + crossbeam_channel::bounded(CHANNEL_SIZE); + let consensus_channels = ConsensusChannels { + execution_controller: execution_controller.clone(), + selector_controller: selector_controller.clone(), + pool_command_sender: pool_controller.clone(), + controller_event_tx: consensus_event_sender, + protocol_command_sender: ProtocolCommandSender(protocol_command_sender.clone()), + }; + + let (consensus_controller, consensus_manager) = start_consensus_worker( + consensus_config, + consensus_channels, + bootstrap_state.graph, + shared_storage.clone(), + ); + // launch protocol controller let protocol_config = ProtocolConfig { thread_count: THREAD_COUNT, @@ -378,57 +424,18 @@ async fn launch( max_operations_propagation_time: SETTINGS.protocol.max_operations_propagation_time, max_endorsements_propagation_time: SETTINGS.protocol.max_endorsements_propagation_time, }; - let (protocol_command_sender, protocol_event_receiver, protocol_manager) = - start_protocol_controller( - protocol_config, - network_command_sender.clone(), - network_event_receiver, - pool_controller.clone(), - shared_storage.clone(), - ) - .await - .expect("could not start protocol controller"); - // init consensus configuration - let consensus_config = ConsensusConfig { - genesis_timestamp: *GENESIS_TIMESTAMP, - end_timestamp: *END_TIMESTAMP, - thread_count: THREAD_COUNT, - t0: T0, - genesis_key: GENESIS_KEY.clone(), - max_discarded_blocks: SETTINGS.consensus.max_discarded_blocks, - future_block_processing_max_periods: SETTINGS.consensus.future_block_processing_max_periods, - max_future_processing_blocks: SETTINGS.consensus.max_future_processing_blocks, - max_dependency_blocks: SETTINGS.consensus.max_dependency_blocks, - delta_f0: DELTA_F0, - operation_validity_periods: OPERATION_VALIDITY_PERIODS, - periods_per_cycle: PERIODS_PER_CYCLE, - stats_timespan: SETTINGS.consensus.stats_timespan, - max_send_wait: SETTINGS.consensus.max_send_wait, - force_keep_final_periods: SETTINGS.consensus.force_keep_final_periods, - endorsement_count: ENDORSEMENT_COUNT, - block_db_prune_interval: SETTINGS.consensus.block_db_prune_interval, - max_item_return_count: SETTINGS.consensus.max_item_return_count, - max_gas_per_block: MAX_GAS_PER_BLOCK, - channel_size: CHANNEL_SIZE, - }; - // launch consensus controller - let (consensus_command_sender, consensus_event_receiver, consensus_manager) = - start_consensus_controller( - consensus_config.clone(), - ConsensusChannels { - execution_controller: execution_controller.clone(), - protocol_command_sender: protocol_command_sender.clone(), - protocol_event_receiver, - pool_command_sender: pool_controller.clone(), - selector_controller: selector_controller.clone(), - }, - bootstrap_state.graph, - shared_storage.clone(), - bootstrap_state.compensation_millis, - ) - .await - .expect("could not start consensus controller"); + let protocol_manager = start_protocol_controller( + protocol_config, + network_command_sender.clone(), + network_event_receiver, + protocol_command_receiver, + consensus_controller.clone(), + pool_controller.clone(), + shared_storage.clone(), + ) + .await + .expect("could not start protocol controller"); // launch factory let factory_config = FactoryConfig { @@ -442,16 +449,16 @@ async fn launch( }; let factory_channels = FactoryChannels { selector: selector_controller.clone(), - consensus: consensus_command_sender.clone(), + consensus: consensus_controller.clone(), pool: pool_controller.clone(), - protocol: protocol_command_sender.clone(), + protocol: ProtocolCommandSender(protocol_command_sender.clone()), storage: shared_storage.clone(), }; let factory_manager = start_factory(factory_config, node_wallet.clone(), factory_channels); // launch bootstrap server let bootstrap_manager = start_bootstrap_server( - consensus_command_sender.clone(), + consensus_controller.clone(), network_command_sender.clone(), final_state.clone(), bootstrap_config, @@ -475,27 +482,28 @@ async fn launch( max_op_datastore_value_length: MAX_OPERATION_DATASTORE_VALUE_LENGTH, max_function_name_length: MAX_FUNCTION_NAME_LENGTH, max_parameter_size: MAX_PARAMETERS_SIZE, + thread_count: THREAD_COUNT, + genesis_timestamp: *GENESIS_TIMESTAMP, + t0: T0, + periods_per_cycle: PERIODS_PER_CYCLE, }; // spawn private API let (api_private, api_private_stop_rx) = API::::new( - consensus_command_sender.clone(), network_command_sender.clone(), execution_controller.clone(), api_config.clone(), - consensus_config.clone(), node_wallet, ); let api_private_handle = api_private.serve(&SETTINGS.api.bind_private); // spawn public API let api_public = API::::new( - consensus_command_sender.clone(), + consensus_controller.clone(), execution_controller.clone(), api_config, selector_controller.clone(), - consensus_config, pool_controller.clone(), - protocol_command_sender.clone(), + ProtocolCommandSender(protocol_command_sender.clone()), network_config, *VERSION, network_command_sender.clone(), @@ -552,7 +560,7 @@ async fn launch( struct Managers { bootstrap_manager: Option, - consensus_manager: ConsensusManager, + consensus_manager: Box, execution_manager: Box, selector_manager: Box, pool_manager: Box, @@ -562,11 +570,11 @@ struct Managers { } async fn stop( - consensus_event_receiver: ConsensusEventReceiver, + _consensus_event_receiver: Receiver, Managers { bootstrap_manager, mut execution_manager, - consensus_manager, + mut consensus_manager, mut selector_manager, mut pool_manager, protocol_manager, @@ -593,10 +601,14 @@ async fn stop( // stop factory factory_manager.stop(); - let protocol_event_receiver = consensus_manager - .stop(consensus_event_receiver) + // stop protocol controller + let network_event_receiver = protocol_manager + .stop() .await - .expect("consensus shutdown failed"); + .expect("protocol shutdown failed"); + + // stop consensus + consensus_manager.stop(); // stop pool pool_manager.stop(); @@ -611,12 +623,6 @@ async fn stop( // TODO //let protocol_pool_event_receiver = pool_manager.stop().await.expect("pool shutdown failed"); - // stop protocol controller - let network_event_receiver = protocol_manager - .stop(protocol_event_receiver) - .await - .expect("protocol shutdown failed"); - // stop network controller network_manager .stop(network_event_receiver) @@ -707,7 +713,7 @@ async fn run(args: Args) -> anyhow::Result<()> { loop { let ( - mut consensus_event_receiver, + consensus_event_receiver, bootstrap_manager, consensus_manager, execution_manager, @@ -722,37 +728,52 @@ async fn run(args: Args) -> anyhow::Result<()> { ) = launch(node_wallet.clone()).await; // interrupt signal listener - let stop_signal = signal::ctrl_c(); - tokio::pin!(stop_signal); + let (tx, rx) = crossbeam_channel::bounded(1); + let interrupt_signal_listener = tokio::spawn(async move { + signal::ctrl_c().await.unwrap(); + tx.send(()).unwrap(); + }); + // loop over messages let restart = loop { massa_trace!("massa-node.main.run.select", {}); - tokio::select! { - evt = consensus_event_receiver.wait_event() => { - massa_trace!("massa-node.main.run.select.consensus_event", {}); - match evt { - Ok(ConsensusEvent::NeedSync) => { - warn!("in response to a desynchronization, the node is going to bootstrap again"); - break true; - }, - Err(err) => { - error!("consensus_event_receiver.wait_event error: {}", err); - break false; - } + match consensus_event_receiver.try_recv() { + Ok(evt) => match evt { + ConsensusEvent::NeedSync => { + warn!("in response to a desynchronization, the node is going to bootstrap again"); + break true; } }, - - _ = &mut stop_signal => { - massa_trace!("massa-node.main.run.select.stop", {}); - info!("interrupt signal received"); + Err(TryRecvError::Disconnected) => { + error!("consensus_event_receiver.wait_event disconnected"); break false; } + _ => {} + }; - _ = api_private_stop_rx.recv() => { + match api_private_stop_rx.try_recv() { + Ok(_) => { info!("stop command received from private API"); break false; } + Err(tokio::sync::mpsc::error::TryRecvError::Disconnected) => { + error!("api_private_stop_rx disconnected"); + break false; + } + _ => {} + } + match rx.try_recv() { + Ok(_) => { + info!("interrupt signal received"); + break false; + } + Err(crossbeam_channel::TryRecvError::Disconnected) => { + error!("interrupt_signal_listener disconnected"); + break false; + } + _ => {} } + sleep(Duration::from_millis(100)); }; stop( consensus_event_receiver, @@ -774,6 +795,7 @@ async fn run(args: Args) -> anyhow::Result<()> { if !restart { break; } + interrupt_signal_listener.abort(); } Ok(()) } diff --git a/massa-protocol-exports/src/error.rs b/massa-protocol-exports/src/error.rs index ead3ae5d7d6..778654bf8f3 100644 --- a/massa-protocol-exports/src/error.rs +++ b/massa-protocol-exports/src/error.rs @@ -1,6 +1,5 @@ // Copyright (c) 2022 MASSA LABS -use crate::ProtocolEvent; use displaydoc::Display; use massa_models::error::ModelsError; use massa_network_exports::ConnectionId; @@ -22,8 +21,6 @@ pub enum ProtocolError { TokioTaskJoinError(#[from] tokio::task::JoinError), /// error receiving one shot response : {0} TokioRecvError(#[from] tokio::sync::oneshot::error::RecvError), - /// error sending protocol event: {0} - TokioSendError(#[from] Box>), /// Error during network connection:`{0:?}` PeerConnectionError(NetworkConnectionErrorType), /// The ip:`{0}` address is not valid diff --git a/massa-protocol-exports/src/lib.rs b/massa-protocol-exports/src/lib.rs index dd2562b6b9a..ab550c45e2e 100644 --- a/massa-protocol-exports/src/lib.rs +++ b/massa-protocol-exports/src/lib.rs @@ -11,8 +11,8 @@ mod settings; pub use error::ProtocolError; pub use protocol_controller::{ - BlocksResults, ProtocolCommand, ProtocolCommandSender, ProtocolEvent, ProtocolEventReceiver, - ProtocolManagementCommand, ProtocolManager, + BlocksResults, ProtocolCommand, ProtocolCommandSender, ProtocolManagementCommand, + ProtocolManager, }; pub use settings::ProtocolConfig; diff --git a/massa-protocol-exports/src/protocol_controller.rs b/massa-protocol-exports/src/protocol_controller.rs index 502e7a290ff..4f615dc194c 100644 --- a/massa-protocol-exports/src/protocol_controller.rs +++ b/massa-protocol-exports/src/protocol_controller.rs @@ -1,53 +1,19 @@ // Copyright (c) 2022 MASSA LABS -use std::collections::VecDeque; - use crate::error::ProtocolError; use massa_logging::massa_trace; +use massa_models::prehash::{PreHashMap, PreHashSet}; use massa_models::{ block::{BlockId, WrappedHeader}, endorsement::EndorsementId, operation::OperationId, }; -use massa_models::{ - prehash::{PreHashMap, PreHashSet}, - slot::Slot, -}; use massa_network_exports::NetworkEventReceiver; use massa_storage::Storage; use serde::Serialize; use tokio::{sync::mpsc, task::JoinHandle}; -use tracing::{debug, info}; - -/// Possible types of events that can happen. -#[allow(clippy::large_enum_variant)] -#[derive(Debug)] -pub enum ProtocolEvent { - /// A block with a valid signature has been received. - ReceivedBlock { - /// block ID - block_id: BlockId, - /// block slot - slot: Slot, - /// storage instance containing the block and its dependencies (except the parents) - storage: Storage, - }, - /// A message to tell the consensus that a block is invalid - InvalidBlock { - /// block ID - block_id: BlockId, - /// header - header: WrappedHeader, - }, - /// A block header with a valid signature has been received. - ReceivedBlockHeader { - /// its id - block_id: BlockId, - /// The header - header: WrappedHeader, - }, -} +use tracing::info; /// block result: map block id to /// ```md @@ -100,7 +66,7 @@ impl ProtocolCommandSender { /// # Arguments /// * `block_id`: ID of the block /// * `storage`: Storage instance containing references to the block and all its dependencies - pub async fn integrated_block( + pub fn integrated_block( &mut self, block_id: BlockId, storage: Storage, @@ -109,34 +75,31 @@ impl ProtocolCommandSender { "block_id": block_id }); self.0 - .send(ProtocolCommand::IntegratedBlock { block_id, storage }) - .await + .blocking_send(ProtocolCommand::IntegratedBlock { block_id, storage }) .map_err(|_| ProtocolError::ChannelError("block_integrated command send error".into())) } /// Notify to protocol an attack attempt. - pub async fn notify_block_attack(&mut self, block_id: BlockId) -> Result<(), ProtocolError> { + pub fn notify_block_attack(&mut self, block_id: BlockId) -> Result<(), ProtocolError> { massa_trace!("protocol.command_sender.notify_block_attack", { "block_id": block_id }); self.0 - .send(ProtocolCommand::AttackBlockDetected(block_id)) - .await + .blocking_send(ProtocolCommand::AttackBlockDetected(block_id)) .map_err(|_| { ProtocolError::ChannelError("notify_block_attack command send error".into()) }) } /// update the block wish list - pub async fn send_wishlist_delta( + pub fn send_wishlist_delta( &mut self, new: PreHashMap>, remove: PreHashSet, ) -> Result<(), ProtocolError> { massa_trace!("protocol.command_sender.send_wishlist_delta", { "new": new, "remove": remove }); self.0 - .send(ProtocolCommand::WishlistDelta { new, remove }) - .await + .blocking_send(ProtocolCommand::WishlistDelta { new, remove }) .map_err(|_| { ProtocolError::ChannelError("send_wishlist_delta command send error".into()) }) @@ -145,13 +108,12 @@ impl ProtocolCommandSender { /// Propagate a batch of operation ids (from pool). /// /// note: Full `OperationId` is replaced by a `OperationPrefixId` later by the worker. - pub async fn propagate_operations(&mut self, operations: Storage) -> Result<(), ProtocolError> { + pub fn propagate_operations(&mut self, operations: Storage) -> Result<(), ProtocolError> { massa_trace!("protocol.command_sender.propagate_operations", { "operations": operations.get_op_refs() }); self.0 - .send(ProtocolCommand::PropagateOperations(operations)) - .await + .blocking_send(ProtocolCommand::PropagateOperations(operations)) .map_err(|_| { ProtocolError::ChannelError("propagate_operation command send error".into()) }) @@ -170,36 +132,6 @@ impl ProtocolCommandSender { } } -/// Protocol event receiver -pub struct ProtocolEventReceiver(pub mpsc::Receiver); - -impl ProtocolEventReceiver { - /// Receives the next `ProtocolEvent` from connected Node. - /// None is returned when all Sender halves have dropped, - /// indicating that no further values can be sent on the channel - pub async fn wait_event(&mut self) -> Result { - massa_trace!("protocol.event_receiver.wait_event", {}); - self.0.recv().await.ok_or_else(|| { - ProtocolError::ChannelError( - "DefaultProtocolController wait_event channel recv failed".into(), - ) - }) - } - - /// drains remaining events and returns them in a `VecDeque` - /// note: events are sorted from oldest to newest - pub async fn drain(mut self) -> VecDeque { - let mut remaining_events: VecDeque = VecDeque::new(); - while let Some(evt) = self.0.recv().await { - debug!( - "after receiving event from ProtocolEventReceiver.0 in protocol_controller drain" - ); - remaining_events.push_back(evt); - } - remaining_events - } -} - /// protocol manager used to stop the protocol pub struct ProtocolManager { join_handle: JoinHandle>, @@ -219,14 +151,9 @@ impl ProtocolManager { } /// Stop the protocol controller - pub async fn stop( - self, - protocol_event_receiver: ProtocolEventReceiver, - //protocol_pool_event_receiver: ProtocolPoolEventReceiver, - ) -> Result { + pub async fn stop(self) -> Result { info!("stopping protocol controller..."); drop(self.manager_tx); - let _remaining_events = protocol_event_receiver.drain().await; let network_event_receiver = self.join_handle.await??; info!("protocol controller stopped"); Ok(network_event_receiver) diff --git a/massa-protocol-exports/src/test_exports/mock.rs b/massa-protocol-exports/src/test_exports/mock.rs index 5639ab80d92..b38436b1606 100644 --- a/massa-protocol-exports/src/test_exports/mock.rs +++ b/massa-protocol-exports/src/test_exports/mock.rs @@ -1,14 +1,7 @@ // Copyright (c) 2022 MASSA LABS -use crate::{ - protocol_controller::ProtocolEventReceiver, ProtocolCommand, ProtocolCommandSender, - ProtocolEvent, -}; -use massa_models::{ - block::{BlockId, WrappedHeader}, - slot::Slot, -}; -use massa_storage::Storage; +use crate::{ProtocolCommand, ProtocolCommandSender}; +use massa_models::block::BlockId; use massa_time::MassaTime; use tokio::{sync::mpsc, time::sleep}; @@ -16,22 +9,17 @@ use tokio::{sync::mpsc, time::sleep}; /// TODO: Improve doc pub struct MockProtocolController { protocol_command_rx: mpsc::Receiver, - protocol_event_tx: mpsc::Sender, } impl MockProtocolController { /// Creates a new protocol mock - /// TODO: Improve doc - pub fn new() -> (Self, ProtocolCommandSender, ProtocolEventReceiver) { + pub fn new() -> (Self, ProtocolCommandSender) { let (protocol_command_tx, protocol_command_rx) = mpsc::channel::(256); - let (protocol_event_tx, protocol_event_rx) = mpsc::channel::(256); ( MockProtocolController { - protocol_event_tx, protocol_command_rx, }, ProtocolCommandSender(protocol_command_tx), - ProtocolEventReceiver(protocol_event_rx), ) } @@ -53,27 +41,6 @@ impl MockProtocolController { } } - /// Note: if you care about the operation set, use another method. - pub async fn receive_block(&mut self, block_id: BlockId, slot: Slot, storage: Storage) { - self.protocol_event_tx - .send(ProtocolEvent::ReceivedBlock { - block_id, - slot, - storage, - }) - .await - .expect("could not send protocol event"); - } - - /// Send a receive header to the protocol event channel - pub async fn receive_header(&mut self, header: WrappedHeader) { - let block_id = header.id; - self.protocol_event_tx - .send(ProtocolEvent::ReceivedBlockHeader { block_id, header }) - .await - .expect("could not send protocol event"); - } - /// Not implemented pub async fn receive_get_active_blocks(&mut self, _list: Vec) {} diff --git a/massa-protocol-exports/src/tests/tools.rs b/massa-protocol-exports/src/tests/tools.rs index 80d6d57f907..3544c84c7fa 100644 --- a/massa-protocol-exports/src/tests/tools.rs +++ b/massa-protocol-exports/src/tests/tools.rs @@ -1,12 +1,10 @@ // Copyright (c) 2022 MASSA LABS use super::mock_network_controller::MockNetworkController; -use crate::protocol_controller::{ProtocolCommandSender, ProtocolEventReceiver}; -use crate::{ProtocolConfig, ProtocolEvent}; +use crate::ProtocolConfig; use massa_hash::Hash; use massa_models::node::NodeId; use massa_models::operation::OperationSerializer; -use massa_models::prehash::PreHashSet; use massa_models::wrapped::WrappedContent; use massa_models::{ address::Address, @@ -16,7 +14,7 @@ use massa_models::{ operation::{Operation, OperationType, WrappedOperation}, slot::Slot, }; -use massa_network_exports::{AskForBlocksInfo, BlockInfoReply, NetworkCommand}; +use massa_network_exports::{AskForBlocksInfo, NetworkCommand}; use massa_signature::KeyPair; use massa_time::MassaTime; use std::collections::HashMap; @@ -162,65 +160,6 @@ pub fn create_block_with_endorsements( .unwrap() } -/// send a block and assert it has been propagate (or not) -pub async fn send_and_propagate_block( - network_controller: &mut MockNetworkController, - block: WrappedBlock, - valid: bool, - source_node_id: NodeId, - protocol_event_receiver: &mut ProtocolEventReceiver, - protocol_command_sender: &mut ProtocolCommandSender, - operations: Vec, -) { - let expected_hash = block.id; - - network_controller - .send_header(source_node_id, block.content.header.clone()) - .await; - - protocol_command_sender - .send_wishlist_delta( - vec![(block.id, Some(block.content.header.clone()))] - .into_iter() - .collect(), - PreHashSet::::default(), - ) - .await - .unwrap(); - - // Send block info to protocol. - let info = vec![( - block.id, - BlockInfoReply::Info(block.content.operations.clone()), - )]; - network_controller - .send_block_info(source_node_id, info) - .await; - - // Send full ops. - let info = vec![(block.id, BlockInfoReply::Operations(operations))]; - network_controller - .send_block_info(source_node_id, info) - .await; - - // Check protocol sends block to consensus. - let hash = match wait_protocol_event(protocol_event_receiver, 1000.into(), |evt| match evt { - evt @ ProtocolEvent::ReceivedBlock { .. } => Some(evt), - _ => None, - }) - .await - { - Some(ProtocolEvent::ReceivedBlock { block_id, .. }) => Some(block_id), - None => None, - _ => panic!("Unexpected or no protocol event."), - }; - if valid { - assert_eq!(expected_hash, hash.unwrap()); - } else { - assert!(hash.is_none(), "unexpected protocol event") - } -} - /// Creates an endorsement for use in protocol tests, /// without paying attention to consensus related things. pub fn create_endorsement() -> WrappedEndorsement { @@ -288,28 +227,6 @@ pub fn create_protocol_config() -> ProtocolConfig { } } -/// wait protocol event -pub async fn wait_protocol_event( - protocol_event_receiver: &mut ProtocolEventReceiver, - timeout: MassaTime, - filter_map: F, -) -> Option -where - F: Fn(ProtocolEvent) -> Option, -{ - let timer = sleep(timeout.into()); - tokio::pin!(timer); - loop { - tokio::select! { - evt_opt = protocol_event_receiver.wait_event() => match evt_opt { - Ok(orig_evt) => if let Some(res_evt) = filter_map(orig_evt) { return Some(res_evt); }, - _ => return None - }, - _ = &mut timer => return None - } - } -} - /// assert block id has been asked to node pub async fn assert_hash_asked_to_node( hash_1: BlockId, diff --git a/massa-protocol-worker/Cargo.toml b/massa-protocol-worker/Cargo.toml index 18b7f04dd9e..2817e32c9fb 100644 --- a/massa-protocol-worker/Cargo.toml +++ b/massa-protocol-worker/Cargo.toml @@ -15,6 +15,7 @@ rayon = "1.5" massa_hash = { path = "../massa-hash" } massa_logging = { path = "../massa-logging" } massa_models = { path = "../massa-models" } +massa_consensus_exports = { path = "../massa-consensus-exports" } massa_network_exports = { path = "../massa-network-exports" } massa_pool_exports = { path = "../massa-pool-exports" } massa_protocol_exports = { path = "../massa-protocol-exports" } @@ -30,3 +31,7 @@ futures = "0.3" massa_signature = { path = "../massa-signature" } massa_pool_exports = { path = "../massa-pool-exports", features = ["testing"] } + +[features] + +testing = ["massa_consensus_exports/testing", "massa_network_exports/testing", "massa_pool_exports/testing", "massa_protocol_exports/testing"] \ No newline at end of file diff --git a/massa-protocol-worker/src/protocol_network.rs b/massa-protocol-worker/src/protocol_network.rs index d4e79f6d7a9..b794d439b7b 100644 --- a/massa-protocol-worker/src/protocol_network.rs +++ b/massa-protocol-worker/src/protocol_network.rs @@ -17,7 +17,7 @@ use massa_models::{ wrapped::{Id, Wrapped}, }; use massa_network_exports::{AskForBlocksInfo, BlockInfoReply, NetworkEvent}; -use massa_protocol_exports::{ProtocolError, ProtocolEvent}; +use massa_protocol_exports::ProtocolError; use massa_serialization::Serializer; use massa_storage::Storage; use std::pin::Pin; @@ -98,11 +98,8 @@ impl ProtocolWorker { self.note_header_from_node(&header, &source_node_id).await? { if is_new { - self.send_protocol_event(ProtocolEvent::ReceivedBlockHeader { - block_id, - header, - }) - .await; + self.consensus_controller + .register_block_header(block_id, header); } self.update_ask_block(block_ask_timer).await?; } else { @@ -284,7 +281,7 @@ impl ProtocolWorker { /// # Ban /// Start compute the operations serialized total size with the operation we know. /// Ban the node if the operations contained in the block overflow the max size. We don't - /// forward the block to the graph in that case. + /// forward the block to the consensus in that case. /// /// # Parameters: /// - `from_node_id`: Node which sent us the information. @@ -428,7 +425,7 @@ impl ProtocolWorker { return Ok(()); } - let protocol_event_full_block = match self.block_wishlist.entry(block_id) { + match self.block_wishlist.entry(block_id) { Entry::Occupied(mut entry) => { let info = entry.get_mut(); let header = if let Some(header) = &info.header { @@ -471,7 +468,8 @@ impl ProtocolWorker { warn!("Node id {} sent us full operations for block id {} but they exceed max size.", from_node_id, block_id); let _ = self.ban_node(&from_node_id).await; self.block_wishlist.remove(&block_id); - ProtocolEvent::InvalidBlock { block_id, header } + self.consensus_controller + .mark_invalid_block(block_id, header); } else { if known_operations != block_ids_set { warn!( @@ -516,11 +514,11 @@ impl ProtocolWorker { let slot = wrapped_block.content.header.content.slot; // add block to local storage and claim ref block_storage.store_block(wrapped_block); - ProtocolEvent::ReceivedBlock { - slot, - block_id, - storage: block_storage, - } + + // Send to consensus + info!("Send to consensus block for slot: {}", slot); + self.consensus_controller + .register_block(block_id, slot, block_storage, false); } } Entry::Vacant(_) => { @@ -532,8 +530,6 @@ impl ProtocolWorker { return Ok(()); } }; - // Send to graph - self.send_protocol_event(protocol_event_full_block).await; // Update ask block let remove_hashes = vec![block_id].into_iter().collect(); diff --git a/massa-protocol-worker/src/protocol_worker.rs b/massa-protocol-worker/src/protocol_worker.rs index b26234100ad..07aba466e1b 100644 --- a/massa-protocol-worker/src/protocol_worker.rs +++ b/massa-protocol-worker/src/protocol_worker.rs @@ -5,6 +5,7 @@ use crate::checked_operations::CheckedOperations; use crate::sig_verifier::verify_sigs_batch; use crate::{node_info::NodeInfo, worker_operations_impl::OperationBatchBuffer}; +use massa_consensus_exports::ConsensusController; use massa_logging::massa_trace; use massa_models::slot::Slot; @@ -20,8 +21,7 @@ use massa_models::{ use massa_network_exports::{AskForBlocksInfo, NetworkCommandSender, NetworkEventReceiver}; use massa_pool_exports::PoolController; use massa_protocol_exports::{ - ProtocolCommand, ProtocolCommandSender, ProtocolConfig, ProtocolError, ProtocolEvent, - ProtocolEventReceiver, ProtocolManagementCommand, ProtocolManager, + ProtocolCommand, ProtocolConfig, ProtocolError, ProtocolManagementCommand, ProtocolManager, }; use massa_models::wrapped::Id; @@ -32,7 +32,6 @@ use std::mem; use std::pin::Pin; use tokio::{ sync::mpsc, - sync::mpsc::error::SendTimeoutError, time::{sleep, sleep_until, Instant, Sleep}, }; use tracing::{debug, error, info, warn}; @@ -51,22 +50,14 @@ pub async fn start_protocol_controller( config: ProtocolConfig, network_command_sender: NetworkCommandSender, network_event_receiver: NetworkEventReceiver, + protocol_command_receiver: mpsc::Receiver, + consensus_controller: Box, pool_controller: Box, storage: Storage, -) -> Result< - ( - ProtocolCommandSender, - ProtocolEventReceiver, - ProtocolManager, - ), - ProtocolError, -> { +) -> Result { debug!("starting protocol controller"); // launch worker - let (controller_event_tx, event_rx) = mpsc::channel::(config.event_channel_size); - let (command_tx, controller_command_rx) = - mpsc::channel::(config.controller_channel_size); let (manager_tx, controller_manager_rx) = mpsc::channel::(1); let pool_controller = pool_controller.clone(); let join_handle = tokio::spawn(async move { @@ -75,10 +66,10 @@ pub async fn start_protocol_controller( ProtocolWorkerChannels { network_command_sender, network_event_receiver, - controller_event_tx, - controller_command_rx, + controller_command_rx: protocol_command_receiver, controller_manager_rx, }, + consensus_controller, pool_controller, storage, ) @@ -96,11 +87,7 @@ pub async fn start_protocol_controller( } }); debug!("protocol controller ready"); - Ok(( - ProtocolCommandSender(command_tx), - ProtocolEventReceiver(event_rx), - ProtocolManager::new(join_handle, manager_tx), - )) + Ok(ProtocolManager::new(join_handle, manager_tx)) } /// Info about a block we've seen @@ -132,12 +119,12 @@ impl BlockInfo { pub struct ProtocolWorker { /// Protocol configuration. pub(crate) config: ProtocolConfig, + /// Consensus controller + pub(crate) consensus_controller: Box, /// Associated network command sender. pub(crate) network_command_sender: NetworkCommandSender, /// Associated network event receiver. network_event_receiver: NetworkEventReceiver, - /// Channel to send protocol events to the controller. - controller_event_tx: mpsc::Sender, /// Channel to send protocol pool events to the controller. pool_controller: Box, /// Channel receiving commands from the controller. @@ -171,8 +158,6 @@ pub struct ProtocolWorkerChannels { pub network_command_sender: NetworkCommandSender, /// network event receiver pub network_event_receiver: NetworkEventReceiver, - /// protocol event sender - pub controller_event_tx: mpsc::Sender, /// protocol command receiver pub controller_command_rx: mpsc::Receiver, /// protocol management command receiver @@ -193,10 +178,10 @@ impl ProtocolWorker { ProtocolWorkerChannels { network_command_sender, network_event_receiver, - controller_event_tx, controller_command_rx, controller_manager_rx, }: ProtocolWorkerChannels, + consensus_controller: Box, pool_controller: Box, storage: Storage, ) -> ProtocolWorker { @@ -204,7 +189,7 @@ impl ProtocolWorker { config, network_command_sender, network_event_receiver, - controller_event_tx, + consensus_controller, pool_controller, controller_command_rx, controller_manager_rx, @@ -224,25 +209,6 @@ impl ProtocolWorker { } } - pub(crate) async fn send_protocol_event(&self, event: ProtocolEvent) { - let result = self - .controller_event_tx - .send_timeout(event, self.config.max_send_wait.to_duration()) - .await; - match result { - Ok(()) => {} - Err(SendTimeoutError::Closed(event)) => { - warn!( - "Failed to send ProtocolEvent due to channel closure: {:?}.", - event - ); - } - Err(SendTimeoutError::Timeout(event)) => { - warn!("Failed to send ProtocolEvent due to timeout: {:?}.", event); - } - } - } - /// Main protocol worker loop. Consumes self. /// It is mostly a `tokio::select!` inside a loop /// waiting on : diff --git a/massa-protocol-worker/src/tests/ask_block_scenarios.rs b/massa-protocol-worker/src/tests/ask_block_scenarios.rs index bbe8c4f6a8f..cad0469fe5d 100644 --- a/massa-protocol-worker/src/tests/ask_block_scenarios.rs +++ b/massa-protocol-worker/src/tests/ask_block_scenarios.rs @@ -1,12 +1,13 @@ // Copyright (c) 2022 MASSA LABS use super::tools::protocol_test; +use massa_consensus_exports::test_exports::MockConsensusControllerMessage; use massa_models::prehash::PreHashSet; use massa_models::{block::BlockId, slot::Slot}; use massa_network_exports::{AskForBlocksInfo, BlockInfoReply, NetworkCommand}; use massa_protocol_exports::tests::tools; use massa_protocol_exports::tests::tools::{asked_list, assert_hash_asked_to_node}; -use massa_protocol_exports::ProtocolEvent; +use massa_time::MassaTime; use serial_test::serial; #[tokio::test] @@ -18,9 +19,9 @@ async fn test_full_ask_block_workflow() { protocol_test( protocol_config, async move |mut network_controller, - mut protocol_event_receiver, mut protocol_command_sender, protocol_manager, + mut protocol_consensus_event_receiver, protocol_pool_event_receiver| { let node_a = tools::create_and_connect_nodes(1, &mut network_controller) .await @@ -54,15 +55,18 @@ async fn test_full_ask_block_workflow() { .await; // Send wishlist - protocol_command_sender - .send_wishlist_delta( - vec![(block.id, Some(block.content.header.clone()))] - .into_iter() - .collect(), - PreHashSet::::default(), - ) - .await - .unwrap(); + let header = block.content.header.clone(); + let protocol_command_sender = tokio::task::spawn_blocking(move || { + protocol_command_sender + .send_wishlist_delta( + vec![(block.id, Some(header))].into_iter().collect(), + PreHashSet::::default(), + ) + .unwrap(); + protocol_command_sender + }) + .await + .unwrap(); // assert it was asked to node A, then B assert_hash_asked_to_node(block.id, node_a.id, &mut network_controller).await; @@ -105,28 +109,49 @@ async fn test_full_ask_block_workflow() { ) .await; - // Protocol sends expected block to consensus. - loop { - match protocol_event_receiver.wait_event().await.unwrap() { - ProtocolEvent::ReceivedBlock { - slot, - block_id, - storage, - } => { - assert_eq!(slot, block.content.header.content.slot); - assert_eq!(block_id, block.id); - let received_block = storage.read_blocks().get(&block_id).cloned().unwrap(); - assert_eq!(received_block.content.operations, block.content.operations); - break; + let protocol_consensus_event_receiver = tokio::task::spawn_blocking(move || { + // Protocol sends expected block to consensus. + loop { + match protocol_consensus_event_receiver.wait_command( + MassaTime::from_millis(100), + |command| match command { + MockConsensusControllerMessage::RegisterBlock { + slot, + block_id, + block_storage, + created: _, + } => { + assert_eq!(slot, block.content.header.content.slot); + assert_eq!(block_id, block.id); + let received_block = + block_storage.read_blocks().get(&block_id).cloned().unwrap(); + assert_eq!( + received_block.content.operations, + block.content.operations + ); + Some(()) + } + _evt => None, + }, + ) { + Some(()) => { + break; + } + None => { + continue; + } } - _evt => continue, - }; - } + } + return protocol_consensus_event_receiver; + }) + .await + .unwrap(); + ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, @@ -143,9 +168,9 @@ async fn test_empty_block() { protocol_test( protocol_config, async move |mut network_controller, - mut protocol_event_receiver, mut protocol_command_sender, protocol_manager, + mut protocol_consensus_event_receiver, protocol_pool_event_receiver| { let node_a = tools::create_and_connect_nodes(1, &mut network_controller) .await @@ -171,15 +196,18 @@ async fn test_empty_block() { .await; // send wishlist - protocol_command_sender - .send_wishlist_delta( - vec![(hash_1, Some(block.content.header.clone()))] - .into_iter() - .collect(), - PreHashSet::::default(), - ) - .await - .unwrap(); + let header = block.content.header.clone(); + let protocol_command_sender = tokio::task::spawn_blocking(move || { + protocol_command_sender + .send_wishlist_delta( + vec![(hash_1, Some(header))].into_iter().collect(), + PreHashSet::::default(), + ) + .unwrap(); + protocol_command_sender + }) + .await + .unwrap(); // assert it was asked to node A, then B assert_hash_asked_to_node(hash_1, node_a.id, &mut network_controller).await; @@ -209,27 +237,47 @@ async fn test_empty_block() { ); // Protocol sends expected block to consensus. - loop { - match protocol_event_receiver.wait_event().await.unwrap() { - ProtocolEvent::ReceivedBlock { - slot, - block_id, - storage, - } => { - assert_eq!(slot, block.content.header.content.slot); - assert_eq!(block_id, block.id); - let received_block = storage.read_blocks().get(&block_id).cloned().unwrap(); - assert_eq!(received_block.content.operations, block.content.operations); - break; + let protocol_consensus_event_receiver = tokio::task::spawn_blocking(move || { + loop { + match protocol_consensus_event_receiver.wait_command( + MassaTime::from_millis(100), + |command| match command { + MockConsensusControllerMessage::RegisterBlock { + slot, + block_id, + block_storage, + created: _, + } => { + assert_eq!(slot, block.content.header.content.slot); + assert_eq!(block_id, block.id); + let received_block = + block_storage.read_blocks().get(&block_id).cloned().unwrap(); + assert_eq!( + received_block.content.operations, + block.content.operations + ); + Some(()) + } + _evt => None, + }, + ) { + Some(()) => { + break; + } + None => { + continue; + } } - _evt => continue, - }; - } + } + protocol_consensus_event_receiver + }) + .await + .unwrap(); ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, @@ -245,9 +293,9 @@ async fn test_someone_knows_it() { protocol_test( protocol_config, async move |mut network_controller, - mut protocol_event_receiver, mut protocol_command_sender, protocol_manager, + mut protocol_consensus_event_receiver, protocol_pool_event_receiver| { let node_a = tools::create_and_connect_nodes(1, &mut network_controller) .await @@ -278,21 +326,33 @@ async fn test_someone_knows_it() { .send_header(node_c.id, block.content.header.clone()) .await; - match protocol_event_receiver.wait_event().await.unwrap() { - ProtocolEvent::ReceivedBlockHeader { .. } => {} - _ => panic!("unexpected protocol event"), - }; + let protocol_consensus_event_receiver = tokio::task::spawn_blocking(move || { + protocol_consensus_event_receiver.wait_command( + MassaTime::from_millis(100), + |command| match command { + MockConsensusControllerMessage::RegisterBlockHeader { .. } => Some(()), + _ => panic!("unexpected protocol event"), + }, + ); + protocol_consensus_event_receiver + }) + .await + .unwrap(); // send wishlist - protocol_command_sender - .send_wishlist_delta( - vec![(hash_1, Some(block.content.header.clone()))] - .into_iter() - .collect(), - PreHashSet::::default(), - ) - .await - .unwrap(); + let protocol_command_sender = tokio::task::spawn_blocking(move || { + protocol_command_sender + .send_wishlist_delta( + vec![(hash_1, Some(block.content.header.clone()))] + .into_iter() + .collect(), + PreHashSet::::default(), + ) + .unwrap(); + protocol_command_sender + }) + .await + .unwrap(); assert_hash_asked_to_node(hash_1, node_c.id, &mut network_controller).await; @@ -329,9 +389,9 @@ async fn test_someone_knows_it() { ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, @@ -347,9 +407,9 @@ async fn test_dont_want_it_anymore() { protocol_test( protocol_config, async move |mut network_controller, - protocol_event_receiver, mut protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver| { let node_a = tools::create_and_connect_nodes(1, &mut network_controller) .await @@ -370,24 +430,32 @@ async fn test_dont_want_it_anymore() { // end set up // send wishlist - protocol_command_sender - .send_wishlist_delta( - vec![(hash_1, Some(block.content.header.clone()))] - .into_iter() - .collect(), - PreHashSet::::default(), - ) - .await - .unwrap(); + protocol_command_sender = tokio::task::spawn_blocking(move || { + protocol_command_sender + .send_wishlist_delta( + vec![(hash_1, Some(block.content.header.clone()))] + .into_iter() + .collect(), + PreHashSet::::default(), + ) + .unwrap(); + protocol_command_sender + }) + .await + .unwrap(); // assert it was asked to node A assert_hash_asked_to_node(hash_1, node_a.id, &mut network_controller).await; // we don't want it anymore - protocol_command_sender - .send_wishlist_delta(Default::default(), vec![hash_1].into_iter().collect()) - .await - .unwrap(); + protocol_command_sender = tokio::task::spawn_blocking(move || { + protocol_command_sender + .send_wishlist_delta(Default::default(), vec![hash_1].into_iter().collect()) + .unwrap(); + protocol_command_sender + }) + .await + .unwrap(); // 7. Make sure protocol did not send additional ask for block commands. let ask_for_block_cmd_filter = |cmd| match cmd { @@ -405,9 +473,9 @@ async fn test_dont_want_it_anymore() { ); ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, @@ -424,9 +492,9 @@ async fn test_no_one_has_it() { protocol_test( protocol_config, async move |mut network_controller, - protocol_event_receiver, mut protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver| { let node_a = tools::create_and_connect_nodes(1, &mut network_controller) .await @@ -447,15 +515,19 @@ async fn test_no_one_has_it() { // end set up // send wishlist - protocol_command_sender - .send_wishlist_delta( - vec![(hash_1, Some(block.content.header.clone()))] - .into_iter() - .collect(), - PreHashSet::::default(), - ) - .await - .unwrap(); + let protocol_command_sender = tokio::task::spawn_blocking(move || { + protocol_command_sender + .send_wishlist_delta( + vec![(hash_1, Some(block.content.header.clone()))] + .into_iter() + .collect(), + PreHashSet::::default(), + ) + .unwrap(); + protocol_command_sender + }) + .await + .unwrap(); // assert it was asked to node A assert_hash_asked_to_node(hash_1, node_a.id, &mut network_controller).await; @@ -487,9 +559,9 @@ async fn test_no_one_has_it() { ); ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, @@ -505,9 +577,9 @@ async fn test_multiple_blocks_without_a_priori() { protocol_test( protocol_config, async move |mut network_controller, - protocol_event_receiver, mut protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver| { let node_a = tools::create_and_connect_nodes(1, &mut network_controller) .await @@ -535,18 +607,22 @@ async fn test_multiple_blocks_without_a_priori() { tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; // send wishlist - protocol_command_sender - .send_wishlist_delta( - vec![ - (hash_1, Some(block_1.content.header.clone())), - (hash_2, Some(block_2.content.header.clone())), - ] - .into_iter() - .collect(), - PreHashSet::::default(), - ) - .await - .unwrap(); + let protocol_command_sender = tokio::task::spawn_blocking(move || { + protocol_command_sender + .send_wishlist_delta( + vec![ + (hash_1, Some(block_1.content.header.clone())), + (hash_2, Some(block_2.content.header.clone())), + ] + .into_iter() + .collect(), + PreHashSet::::default(), + ) + .unwrap(); + protocol_command_sender + }) + .await + .unwrap(); let list = asked_list(&mut network_controller).await; for (node_id, set) in list.into_iter() { @@ -561,9 +637,9 @@ async fn test_multiple_blocks_without_a_priori() { } ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, diff --git a/massa-protocol-worker/src/tests/ban_nodes_scenarios.rs b/massa-protocol-worker/src/tests/ban_nodes_scenarios.rs index fec4bb7dfac..55f91790adb 100644 --- a/massa-protocol-worker/src/tests/ban_nodes_scenarios.rs +++ b/massa-protocol-worker/src/tests/ban_nodes_scenarios.rs @@ -1,6 +1,7 @@ // Copyright (c) 2022 MASSA LABS use super::tools::protocol_test; +use massa_consensus_exports::test_exports::MockConsensusControllerMessage; use massa_hash::Hash; use massa_models::operation::OperationId; use massa_models::prehash::PreHashSet; @@ -9,8 +10,8 @@ use massa_models::{block::BlockId, slot::Slot}; use massa_network_exports::{BlockInfoReply, NetworkCommand}; use massa_pool_exports::test_exports::MockPoolControllerMessage; use massa_protocol_exports::tests::tools; -use massa_protocol_exports::ProtocolEvent; use massa_signature::KeyPair; +use massa_time::MassaTime; use serial_test::serial; use std::collections::HashSet; use std::time::Duration; @@ -22,9 +23,9 @@ async fn test_protocol_bans_node_sending_block_header_with_invalid_signature() { protocol_test( protocol_config, async move |mut network_controller, - mut protocol_event_receiver, protocol_command_sender, protocol_manager, + mut protocol_consensus_event_receiver, protocol_pool_event_receiver| { // Create 1 node. let mut nodes = tools::create_and_connect_nodes(1, &mut network_controller).await; @@ -46,23 +47,31 @@ async fn test_protocol_bans_node_sending_block_header_with_invalid_signature() { tools::assert_banned_nodes(vec![creator_node.id], &mut network_controller).await; // Check protocol does not send block to consensus. - match tools::wait_protocol_event(&mut protocol_event_receiver, 1000.into(), |evt| { - match evt { - evt @ ProtocolEvent::ReceivedBlock { .. } => Some(evt), - evt @ ProtocolEvent::ReceivedBlockHeader { .. } => Some(evt), - evt @ ProtocolEvent::InvalidBlock { .. } => Some(evt), - } + let protocol_consensus_event_receiver = tokio::task::spawn_blocking(move || { + protocol_consensus_event_receiver.wait_command( + MassaTime::from_millis(1000), + |command| match command { + MockConsensusControllerMessage::RegisterBlock { .. } => { + panic!("Protocol unexpectedly sent block.") + } + MockConsensusControllerMessage::RegisterBlockHeader { .. } => { + panic!("Protocol unexpectedly sent header.") + } + MockConsensusControllerMessage::MarkInvalidBlock { .. } => { + panic!("Protocol unexpectedly sent invalid block.") + } + _ => Some(()), + }, + ); + protocol_consensus_event_receiver }) .await - { - None => {} - _ => panic!("Protocol unexpectedly sent block or header."), - } + .unwrap(); ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, @@ -125,9 +134,9 @@ async fn test_protocol_bans_node_sending_header_with_invalid_signature() { protocol_test( protocol_config, async move |mut network_controller, - mut protocol_event_receiver, mut protocol_command_sender, protocol_manager, + mut protocol_consensus_event_receiver, protocol_pool_event_receiver| { // Create 1 node. let mut nodes = tools::create_and_connect_nodes(1, &mut network_controller).await; @@ -150,21 +159,33 @@ async fn test_protocol_bans_node_sending_header_with_invalid_signature() { .send_header(to_ban_node.id, block.content.header.clone()) .await; - match protocol_event_receiver.wait_event().await.unwrap() { - ProtocolEvent::ReceivedBlockHeader { .. } => {} - _ => panic!("unexpected protocol event"), - }; + let mut protocol_consensus_event_receiver = tokio::task::spawn_blocking(move || { + protocol_consensus_event_receiver.wait_command( + MassaTime::from_millis(1000), + |command| match command { + MockConsensusControllerMessage::RegisterBlockHeader { .. } => Some(()), + _ => panic!("unexpected protocol event"), + }, + ); + protocol_consensus_event_receiver + }) + .await + .unwrap(); // send wishlist - protocol_command_sender - .send_wishlist_delta( - vec![(block.id, Some(block.content.header))] - .into_iter() - .collect(), - PreHashSet::::default(), - ) - .await - .unwrap(); + let protocol_command_sender = tokio::task::spawn_blocking(move || { + protocol_command_sender + .send_wishlist_delta( + vec![(block.id, Some(block.content.header))] + .into_iter() + .collect(), + PreHashSet::::default(), + ) + .unwrap(); + protocol_command_sender + }) + .await + .unwrap(); tools::assert_hash_asked_to_node(block.id, to_ban_node.id, &mut network_controller) .await; @@ -198,23 +219,31 @@ async fn test_protocol_bans_node_sending_header_with_invalid_signature() { .await; // Check protocol does not send block to consensus. - match tools::wait_protocol_event(&mut protocol_event_receiver, 1000.into(), |evt| { - match evt { - evt @ ProtocolEvent::ReceivedBlock { .. } => Some(evt), - evt @ ProtocolEvent::ReceivedBlockHeader { .. } => Some(evt), - evt @ ProtocolEvent::InvalidBlock { .. } => Some(evt), - } + let protocol_consensus_event_receiver = tokio::task::spawn_blocking(move || { + protocol_consensus_event_receiver.wait_command( + MassaTime::from_millis(1000), + |command| match command { + MockConsensusControllerMessage::RegisterBlock { .. } => { + panic!("Protocol unexpectedly sent block.") + } + MockConsensusControllerMessage::RegisterBlockHeader { .. } => { + panic!("Protocol unexpectedly sent header.") + } + MockConsensusControllerMessage::MarkInvalidBlock { .. } => { + panic!("Protocol unexpectedly sent invalid block.") + } + _ => Some(()), + }, + ); + protocol_consensus_event_receiver }) .await - { - None => {} - _ => panic!("Protocol unexpectedly sent header coming from banned node."), - } + .unwrap(); ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, @@ -229,9 +258,9 @@ async fn test_protocol_does_not_asks_for_block_from_banned_node_who_propagated_h protocol_test( protocol_config, async move |mut network_controller, - mut protocol_event_receiver, mut protocol_command_sender, protocol_manager, + mut protocol_consensus_event_receiver, protocol_pool_event_receiver| { let ask_for_block_cmd_filter = |cmd| match cmd { cmd @ NetworkCommand::AskForBlocks { .. } => Some(cmd), @@ -251,18 +280,21 @@ async fn test_protocol_does_not_asks_for_block_from_banned_node_who_propagated_h .await; // Check protocol sends header to consensus. - let received_hash = - match tools::wait_protocol_event(&mut protocol_event_receiver, 1000.into(), |evt| { - match evt { - evt @ ProtocolEvent::ReceivedBlockHeader { .. } => Some(evt), - _ => None, - } + let (protocol_consensus_event_receiver, received_hash) = + tokio::task::spawn_blocking(move || { + let id = protocol_consensus_event_receiver + .wait_command(MassaTime::from_millis(1000), |command| match command { + MockConsensusControllerMessage::RegisterBlockHeader { + block_id, + header: _, + } => Some(block_id), + _ => panic!("unexpected protocol event"), + }) + .unwrap(); + (protocol_consensus_event_receiver, id) }) .await - { - Some(ProtocolEvent::ReceivedBlockHeader { block_id, .. }) => block_id, - _ => panic!("Unexpected or no protocol event."), - }; + .unwrap(); // 3. Check that protocol sent the right header to consensus. let expected_hash = block.id; @@ -280,15 +312,19 @@ async fn test_protocol_does_not_asks_for_block_from_banned_node_who_propagated_h tools::assert_banned_nodes(vec![creator_node.id], &mut network_controller).await; // 5. Ask for block. - protocol_command_sender - .send_wishlist_delta( - vec![(expected_hash, Some(block.content.header.clone()))] - .into_iter() - .collect(), - PreHashSet::::default(), - ) - .await - .expect("Failed to ask for block."); + let protocol_command_sender = tokio::task::spawn_blocking(move || { + protocol_command_sender + .send_wishlist_delta( + vec![(expected_hash, Some(block.content.header.clone()))] + .into_iter() + .collect(), + PreHashSet::::default(), + ) + .expect("Failed to ask for block."); + protocol_command_sender + }) + .await + .unwrap(); // 6. Make sure protocol did not ask for the block from the banned node. let got_more_commands = network_controller @@ -301,9 +337,9 @@ async fn test_protocol_does_not_asks_for_block_from_banned_node_who_propagated_h ); ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, @@ -318,9 +354,9 @@ async fn test_protocol_does_not_send_blocks_when_asked_for_by_banned_node() { protocol_test( protocol_config, async move |mut network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver| { let send_block_or_header_cmd_filter = |cmd| match cmd { cmd @ NetworkCommand::SendBlockInfo { .. } => Some(cmd), @@ -389,9 +425,9 @@ async fn test_protocol_does_not_send_blocks_when_asked_for_by_banned_node() { ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, @@ -406,9 +442,9 @@ async fn test_protocol_bans_all_nodes_propagating_an_attack_attempt() { protocol_test( protocol_config, async move |mut network_controller, - mut protocol_event_receiver, mut protocol_command_sender, protocol_manager, + mut protocol_consensus_event_receiver, protocol_pool_event_receiver| { // Create 4 nodes. let nodes = tools::create_and_connect_nodes(4, &mut network_controller).await; @@ -425,38 +461,30 @@ async fn test_protocol_bans_all_nodes_propagating_an_attack_attempt() { .send_header(creator_node.id, block.content.header.clone()) .await; + let (old_protocol_consensus_event_receiver, optional_block_id) = + tokio::task::spawn_blocking(move || { + let id = protocol_consensus_event_receiver.wait_command( + MassaTime::from_millis(1000), + |command| match command { + MockConsensusControllerMessage::RegisterBlockHeader { + block_id, + header: _, + } => Some(block_id), + _ => panic!("unexpected protocol event"), + }, + ); + (protocol_consensus_event_receiver, id) + }) + .await + .unwrap(); + protocol_consensus_event_receiver = old_protocol_consensus_event_receiver; // Check protocol sends header to consensus (only the 1st time: later, there is caching). if idx == 0 { - let received_hash = match tools::wait_protocol_event( - &mut protocol_event_receiver, - 1000.into(), - |evt| match evt { - evt @ ProtocolEvent::ReceivedBlockHeader { .. } => Some(evt), - _ => None, - }, - ) - .await - { - Some(ProtocolEvent::ReceivedBlockHeader { block_id, .. }) => block_id, - Some(evt) => panic!("Unexpected protocol event {:?}", evt), - None => panic!("no protocol event"), - }; + let received_hash = optional_block_id.unwrap(); // Check that protocol sent the right header to consensus. assert_eq!(expected_hash, received_hash); } else { - assert!( - tools::wait_protocol_event( - &mut protocol_event_receiver, - 150.into(), - |evt| match evt { - evt @ ProtocolEvent::ReceivedBlockHeader { .. } => Some(evt), - _ => None, - }, - ) - .await - .is_none(), - "caching was ignored" - ); + assert!(optional_block_id.is_none(), "caching was ignored"); } } @@ -471,10 +499,14 @@ async fn test_protocol_bans_all_nodes_propagating_an_attack_attempt() { tokio::time::sleep(Duration::from_millis(250)).await; // Simulate consensus notifying an attack attempt. - protocol_command_sender - .notify_block_attack(expected_hash) - .await - .expect("Failed to ask for block."); + let protocol_command_sender = tokio::task::spawn_blocking(move || { + protocol_command_sender + .notify_block_attack(expected_hash) + .expect("Failed to ask for block."); + protocol_command_sender + }) + .await + .unwrap(); // Make sure all initial nodes are banned. let node_ids = nodes.into_iter().map(|node_info| node_info.id).collect(); @@ -496,9 +528,9 @@ async fn test_protocol_bans_all_nodes_propagating_an_attack_attempt() { ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, @@ -513,9 +545,9 @@ async fn test_protocol_removes_banned_node_on_disconnection() { protocol_test( protocol_config, async move |mut network_controller, - mut protocol_event_receiver, protocol_command_sender, protocol_manager, + mut protocol_consensus_event_receiver, protocol_pool_event_receiver| { let mut nodes = tools::create_and_connect_nodes(1, &mut network_controller).await; @@ -542,27 +574,30 @@ async fn test_protocol_removes_banned_node_on_disconnection() { .await; // Check protocol sends header to consensus. - let received_hash = - match tools::wait_protocol_event(&mut protocol_event_receiver, 1000.into(), |evt| { - match evt { - evt @ ProtocolEvent::ReceivedBlockHeader { .. } => Some(evt), - _ => None, - } + let (protocol_consensus_event_receiver, received_hash) = + tokio::task::spawn_blocking(move || { + let id = protocol_consensus_event_receiver + .wait_command(MassaTime::from_millis(1000), |command| match command { + MockConsensusControllerMessage::RegisterBlockHeader { + block_id, + header: _, + } => Some(block_id), + _ => panic!("unexpected protocol event"), + }) + .unwrap(); + (protocol_consensus_event_receiver, id) }) .await - { - Some(ProtocolEvent::ReceivedBlockHeader { block_id, .. }) => block_id, - _ => panic!("Unexpected or no protocol event."), - }; + .unwrap(); // Check that protocol sent the right header to consensus. let expected_hash = block.id; assert_eq!(expected_hash, received_hash); ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, diff --git a/massa-protocol-worker/src/tests/endorsements_scenarios.rs b/massa-protocol-worker/src/tests/endorsements_scenarios.rs index 41273569bf4..588b329efd0 100644 --- a/massa-protocol-worker/src/tests/endorsements_scenarios.rs +++ b/massa-protocol-worker/src/tests/endorsements_scenarios.rs @@ -3,12 +3,13 @@ // RUST_BACKTRACE=1 cargo test test_one_handshake -- --nocapture --test-threads=1 use super::tools::protocol_test; +use massa_consensus_exports::test_exports::MockConsensusControllerMessage; use massa_models::{address::Address, slot::Slot}; use massa_network_exports::NetworkCommand; use massa_pool_exports::test_exports::MockPoolControllerMessage; use massa_protocol_exports::tests::tools; -use massa_protocol_exports::ProtocolEvent; use massa_storage::Storage; +use massa_time::MassaTime; use serial_test::serial; use std::thread; use std::time::Duration; @@ -20,9 +21,9 @@ async fn test_protocol_sends_valid_endorsements_it_receives_to_pool() { protocol_test( protocol_config, async move |mut network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, mut pool_event_receiver| { // Create 1 node. let mut nodes = tools::create_and_connect_nodes(1, &mut network_controller).await; @@ -56,9 +57,9 @@ async fn test_protocol_sends_valid_endorsements_it_receives_to_pool() { ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, pool_event_receiver, ) }, @@ -73,9 +74,9 @@ async fn test_protocol_does_not_send_invalid_endorsements_it_receives_to_pool() protocol_test( protocol_config, async move |mut network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, mut pool_event_receiver| { // Create 1 node. let mut nodes = tools::create_and_connect_nodes(1, &mut network_controller).await; @@ -103,9 +104,9 @@ async fn test_protocol_does_not_send_invalid_endorsements_it_receives_to_pool() ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, pool_event_receiver, ) }, @@ -120,9 +121,9 @@ async fn test_protocol_propagates_endorsements_to_active_nodes() { protocol_test( protocol_config, async move |mut network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, mut pool_event_receiver| { // Create 2 nodes. let nodes = tools::create_and_connect_nodes(2, &mut network_controller).await; @@ -170,9 +171,9 @@ async fn test_protocol_propagates_endorsements_to_active_nodes() { } ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, pool_event_receiver, ) }, @@ -187,9 +188,9 @@ async fn test_protocol_propagates_endorsements_only_to_nodes_that_dont_know_abou protocol_test( protocol_config, async move |mut network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, mut pool_event_receiver| { // Create 1 node. let nodes = tools::create_and_connect_nodes(1, &mut network_controller).await; @@ -246,9 +247,9 @@ async fn test_protocol_propagates_endorsements_only_to_nodes_that_dont_know_abou } ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, pool_event_receiver, ) }, @@ -264,9 +265,9 @@ async fn test_protocol_propagates_endorsements_only_to_nodes_that_dont_know_abou protocol_test( protocol_config, async move |mut network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver| { // Create 1 node. let nodes = tools::create_and_connect_nodes(1, &mut network_controller).await; @@ -318,9 +319,9 @@ async fn test_protocol_propagates_endorsements_only_to_nodes_that_dont_know_abou ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, @@ -337,9 +338,9 @@ async fn test_protocol_propagates_endorsements_only_to_nodes_that_dont_know_abou protocol_test( protocol_config, async move |mut network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver| { // Create 1 node. let nodes = tools::create_and_connect_nodes(1, &mut network_controller).await; @@ -392,9 +393,9 @@ async fn test_protocol_propagates_endorsements_only_to_nodes_that_dont_know_abou ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, @@ -410,9 +411,9 @@ async fn test_protocol_propagates_endorsements_only_to_nodes_that_dont_know_abou protocol_test( protocol_config, async move |mut network_controller, - mut protocol_event_receiver, protocol_command_sender, protocol_manager, + mut protocol_consensus_event_receiver, protocol_pool_event_receiver| { // Create 2 nodes. let nodes = tools::create_and_connect_nodes(2, &mut network_controller).await; @@ -440,13 +441,18 @@ async fn test_protocol_propagates_endorsements_only_to_nodes_that_dont_know_abou // Wait for the event to be sure that the node is connected, // and noted as knowing the block and its endorsements. - let _ = tools::wait_protocol_event(&mut protocol_event_receiver, 1000.into(), |evt| { - match evt { - evt @ ProtocolEvent::ReceivedBlockHeader { .. } => Some(evt), - _ => None, - } + let protocol_consensus_event_receiver = tokio::task::spawn_blocking(move || { + protocol_consensus_event_receiver.wait_command( + MassaTime::from_millis(1000), + |command| match command { + MockConsensusControllerMessage::RegisterBlockHeader { .. } => Some(()), + _ => panic!("Node isn't connected or didn't mark block as known."), + }, + ); + protocol_consensus_event_receiver }) - .await; + .await + .unwrap(); // Send the endorsement to protocol // it should not propagate to the node that already knows about it @@ -478,9 +484,9 @@ async fn test_protocol_propagates_endorsements_only_to_nodes_that_dont_know_abou ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, @@ -495,9 +501,9 @@ async fn test_protocol_does_not_propagates_endorsements_when_receiving_those_ins protocol_test( protocol_config, async move |mut network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver| { // Create 2 nodes. let mut nodes = tools::create_and_connect_nodes(2, &mut network_controller).await; @@ -543,9 +549,9 @@ async fn test_protocol_does_not_propagates_endorsements_when_receiving_those_ins } ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, diff --git a/massa-protocol-worker/src/tests/in_block_operations_scenarios.rs b/massa-protocol-worker/src/tests/in_block_operations_scenarios.rs index 1daca60df23..c891c9bbaf1 100644 --- a/massa-protocol-worker/src/tests/in_block_operations_scenarios.rs +++ b/massa-protocol-worker/src/tests/in_block_operations_scenarios.rs @@ -1,6 +1,7 @@ // Copyright (c) 2022 MASSA LABS -use super::tools::protocol_test; +use super::tools::{protocol_test, send_and_propagate_block}; +use massa_consensus_exports::test_exports::MockConsensusControllerMessage; use massa_hash::Hash; use massa_models::operation::OperationId; use massa_models::wrapped::{Id, WrappedContent}; @@ -13,9 +14,9 @@ use massa_network_exports::NetworkCommand; use massa_protocol_exports::tests::tools; use massa_protocol_exports::tests::tools::{ create_and_connect_nodes, create_block_with_operations, create_operation_with_expire_period, - send_and_propagate_block, }; use massa_signature::KeyPair; +use massa_time::MassaTime; use serial_test::serial; #[tokio::test] @@ -25,9 +26,9 @@ async fn test_protocol_does_propagate_operations_received_in_blocks() { protocol_test( protocol_config, async move |mut network_controller, - mut protocol_event_receiver, mut protocol_command_sender, protocol_manager, + mut protocol_consensus_event_receiver, protocol_pool_event_receiver| { // Create 2 node. let mut nodes = create_and_connect_nodes(2, &mut network_controller).await; @@ -53,17 +54,46 @@ async fn test_protocol_does_propagate_operations_received_in_blocks() { Slot::new(1, op_thread), vec![op.clone()], ); + let block_id = block.id; send_and_propagate_block( &mut network_controller, block, - true, creator_node.id, - &mut protocol_event_receiver, &mut protocol_command_sender, vec![op.clone()], ) .await; + // Check protocol sends block to consensus. + let (protocol_consensus_event_receiver, expected_hash) = + tokio::task::spawn_blocking(move || { + let header_id = protocol_consensus_event_receiver + .wait_command(MassaTime::from_millis(1000), |command| match command { + MockConsensusControllerMessage::RegisterBlockHeader { + block_id, + header: _, + } => Some(block_id), + _ => panic!("Unexpected or no protocol event."), + }) + .unwrap(); + let id = protocol_consensus_event_receiver + .wait_command(MassaTime::from_millis(1000), |command| match command { + MockConsensusControllerMessage::RegisterBlock { + block_id, + slot: _, + block_storage: _, + created: _, + } => Some(block_id), + _ => panic!("Unexpected or no protocol event."), + }) + .unwrap(); + assert_eq!(header_id, id); + (protocol_consensus_event_receiver, id) + }) + .await + .unwrap(); + assert_eq!(expected_hash, block_id); + // Propagates the operation found in the block. if let Some(NetworkCommand::SendOperationAnnouncements { to_node, batch }) = network_controller @@ -81,9 +111,9 @@ async fn test_protocol_does_propagate_operations_received_in_blocks() { }; ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, @@ -104,9 +134,9 @@ async fn test_protocol_sends_blocks_with_operations_to_consensus() { protocol_test( protocol_config, async move |mut network_controller, - mut protocol_event_receiver, mut protocol_command_sender, protocol_manager, + mut protocol_consensus_event_receiver, protocol_pool_event_receiver| { // Create 1 node. let mut nodes = create_and_connect_nodes(1, &mut network_controller).await; @@ -133,16 +163,46 @@ async fn test_protocol_sends_blocks_with_operations_to_consensus() { Slot::new(1, op_thread), vec![op.clone()], ); + let block_id = block.id; send_and_propagate_block( &mut network_controller, block, - true, creator_node.id, - &mut protocol_event_receiver, &mut protocol_command_sender, vec![op.clone()], ) .await; + + // Check protocol sends block to consensus. + let (new_protocol_consensus_event_receiver, expected_hash) = + tokio::task::spawn_blocking(move || { + let header_id = protocol_consensus_event_receiver + .wait_command(MassaTime::from_millis(1000), |command| match command { + MockConsensusControllerMessage::RegisterBlockHeader { + block_id, + header: _, + } => Some(block_id), + _ => panic!("Unexpected or no protocol event."), + }) + .unwrap(); + let id = protocol_consensus_event_receiver + .wait_command(MassaTime::from_millis(1000), |command| match command { + MockConsensusControllerMessage::RegisterBlock { + block_id, + slot: _, + block_storage: _, + created: _, + } => Some(block_id), + _ => panic!("Unexpected or no protocol event."), + }) + .unwrap(); + assert_eq!(header_id, id); + (protocol_consensus_event_receiver, id) + }) + .await + .unwrap(); + protocol_consensus_event_receiver = new_protocol_consensus_event_receiver; + assert_eq!(expected_hash, block_id); } // block with wrong merkle root @@ -178,13 +238,34 @@ async fn test_protocol_sends_blocks_with_operations_to_consensus() { send_and_propagate_block( &mut network_controller, block, - false, creator_node.id, - &mut protocol_event_receiver, &mut protocol_command_sender, vec![op.clone()], ) .await; + + // Check protocol didn't send block to consensus. + let (new_protocol_consensus_event_receiver, optional_expected_hash) = + tokio::task::spawn_blocking(move || { + let id = protocol_consensus_event_receiver.wait_command( + MassaTime::from_millis(1000), + |command| match command { + MockConsensusControllerMessage::RegisterBlockHeader { + block_id, + header: _, + } => Some(block_id), + _ => None, + }, + ); + (protocol_consensus_event_receiver, id) + }) + .await + .unwrap(); + protocol_consensus_event_receiver = new_protocol_consensus_event_receiver; + assert!( + optional_expected_hash.is_none(), + "Block sent to consensus but shouldn't." + ); } // block with operation with wrong signature @@ -201,20 +282,41 @@ async fn test_protocol_sends_blocks_with_operations_to_consensus() { send_and_propagate_block( &mut network_controller, block, - false, creator_node.id, - &mut protocol_event_receiver, &mut protocol_command_sender, vec![op.clone()], ) .await; + + // Check protocol didn't send block to consensus. + let (new_protocol_consensus_event_receiver, optional_expected_hash) = + tokio::task::spawn_blocking(move || { + let id = protocol_consensus_event_receiver.wait_command( + MassaTime::from_millis(1000), + |command| match command { + MockConsensusControllerMessage::RegisterBlockHeader { + block_id, + header: _, + } => Some(block_id), + _ => None, + }, + ); + (protocol_consensus_event_receiver, id) + }) + .await + .unwrap(); + protocol_consensus_event_receiver = new_protocol_consensus_event_receiver; + assert!( + optional_expected_hash.is_none(), + "Block sent to consensus but shouldn't." + ); } ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, diff --git a/massa-protocol-worker/src/tests/operations_scenarios.rs b/massa-protocol-worker/src/tests/operations_scenarios.rs index 55e70e2c0a7..b0ee9cc8fa1 100644 --- a/massa-protocol-worker/src/tests/operations_scenarios.rs +++ b/massa-protocol-worker/src/tests/operations_scenarios.rs @@ -3,12 +3,13 @@ // RUST_BACKTRACE=1 cargo test test_one_handshake -- --nocapture --test-threads=1 use super::tools::{protocol_test, protocol_test_with_storage}; +use massa_consensus_exports::test_exports::MockConsensusControllerMessage; use massa_models::prehash::PreHashSet; use massa_models::{self, address::Address, amount::Amount, block::BlockId, slot::Slot}; use massa_network_exports::{BlockInfoReply, NetworkCommand}; use massa_pool_exports::test_exports::MockPoolControllerMessage; use massa_protocol_exports::tests::tools::{self, assert_hash_asked_to_node}; -use massa_protocol_exports::ProtocolEvent; +use massa_time::MassaTime; use serial_test::serial; use std::str::FromStr; use std::time::Duration; @@ -20,9 +21,9 @@ async fn test_protocol_sends_valid_operations_it_receives_to_consensus() { protocol_test( protocol_config, async move |mut network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, mut protocol_pool_event_receiver| { // Create 1 node. let mut nodes = tools::create_and_connect_nodes(1, &mut network_controller).await; @@ -73,9 +74,9 @@ async fn test_protocol_sends_valid_operations_it_receives_to_consensus() { ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, @@ -90,9 +91,9 @@ async fn test_protocol_does_not_send_invalid_operations_it_receives_to_consensus protocol_test( protocol_config, async move |mut network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, mut pool_event_receiver| { // Create 1 node. let mut nodes = tools::create_and_connect_nodes(1, &mut network_controller).await; @@ -119,9 +120,9 @@ async fn test_protocol_does_not_send_invalid_operations_it_receives_to_consensus ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, pool_event_receiver, ) }, @@ -136,9 +137,9 @@ async fn test_protocol_propagates_operations_to_active_nodes() { protocol_test_with_storage( protocol_config, async move |mut network_controller, - protocol_event_receiver, mut protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, mut pool_event_receiver, mut storage| { // Create 2 nodes. @@ -163,10 +164,14 @@ async fn test_protocol_propagates_operations_to_active_nodes() { let expected_operation_id = operation.id; storage.store_operations(vec![operation.clone()]); - protocol_command_sender - .propagate_operations(storage) - .await - .unwrap(); + protocol_command_sender = tokio::task::spawn_blocking(move || { + protocol_command_sender + .propagate_operations(storage) + .unwrap(); + protocol_command_sender + }) + .await + .unwrap(); loop { match network_controller @@ -187,9 +192,9 @@ async fn test_protocol_propagates_operations_to_active_nodes() { } ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, pool_event_receiver, ) }, @@ -204,9 +209,9 @@ async fn test_protocol_propagates_operations_only_to_nodes_that_dont_know_about_ protocol_test_with_storage( protocol_config, async move |mut network_controller, - protocol_event_receiver, mut protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, mut pool_event_receiver, mut storage| { // Create 1 nodes. @@ -237,10 +242,14 @@ async fn test_protocol_propagates_operations_only_to_nodes_that_dont_know_about_ // send endorsement to protocol // it should be propagated only to the node that doesn't know about it storage.store_operations(vec![operation.clone()]); - protocol_command_sender - .propagate_operations(storage) - .await - .unwrap(); + protocol_command_sender = tokio::task::spawn_blocking(move || { + protocol_command_sender + .propagate_operations(storage) + .unwrap(); + protocol_command_sender + }) + .await + .unwrap(); loop { match network_controller @@ -261,9 +270,9 @@ async fn test_protocol_propagates_operations_only_to_nodes_that_dont_know_about_ } ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, pool_event_receiver, ) }, @@ -279,9 +288,9 @@ async fn test_protocol_propagates_operations_received_over_the_network_only_to_n protocol_test_with_storage( protocol_config, async move |mut network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, mut pool_event_receiver, _storage| { // Create 2 nodes. @@ -323,9 +332,9 @@ async fn test_protocol_propagates_operations_received_over_the_network_only_to_n } ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, pool_event_receiver, ) }, @@ -341,9 +350,9 @@ async fn test_protocol_batches_propagation_of_operations_received_over_the_netwo protocol_test_with_storage( protocol_config, async move |mut network_controller, - protocol_event_receiver, mut protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, mut pool_event_receiver, mut storage| { // Create 2 nodes. @@ -370,10 +379,14 @@ async fn test_protocol_batches_propagation_of_operations_received_over_the_netwo // Send it via the API. storage.store_operations(vec![operation.clone()]); - protocol_command_sender - .propagate_operations(storage) - .await - .unwrap(); + protocol_command_sender = tokio::task::spawn_blocking(move || { + protocol_command_sender + .propagate_operations(storage) + .unwrap(); + protocol_command_sender + }) + .await + .unwrap(); let expected_operation_id_2 = operation.id; @@ -401,9 +414,9 @@ async fn test_protocol_batches_propagation_of_operations_received_over_the_netwo } ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, pool_event_receiver, ) }, @@ -419,9 +432,9 @@ async fn test_protocol_propagates_operations_only_to_nodes_that_dont_know_about_ protocol_test_with_storage( protocol_config, async move |mut network_controller, - mut protocol_event_receiver, mut protocol_command_sender, protocol_manager, + mut protocol_consensus_event_receiver, protocol_pool_event_receiver, mut storage| { // Create 1 node. @@ -442,21 +455,33 @@ async fn test_protocol_propagates_operations_only_to_nodes_that_dont_know_about_ .send_header(nodes[0].id, block.content.header.clone()) .await; - match protocol_event_receiver.wait_event().await.unwrap() { - ProtocolEvent::ReceivedBlockHeader { .. } => {} - _ => panic!("unexpected protocol event"), - }; + let protocol_consensus_event_receiver = tokio::task::spawn_blocking(move || { + protocol_consensus_event_receiver.wait_command( + MassaTime::from_millis(1000), + |command| match command { + MockConsensusControllerMessage::RegisterBlockHeader { .. } => Some(()), + _ => panic!("unexpected protocol event"), + }, + ); + protocol_consensus_event_receiver + }) + .await + .unwrap(); // send wishlist - protocol_command_sender - .send_wishlist_delta( - vec![(block.id, Some(block.content.header.clone()))] - .into_iter() - .collect(), - PreHashSet::::default(), - ) - .await - .unwrap(); + protocol_command_sender = tokio::task::spawn_blocking(move || { + protocol_command_sender + .send_wishlist_delta( + vec![(block.id, Some(block.content.header.clone()))] + .into_iter() + .collect(), + PreHashSet::::default(), + ) + .unwrap(); + protocol_command_sender + }) + .await + .unwrap(); assert_hash_asked_to_node(block.id, nodes[0].id, &mut network_controller).await; @@ -478,10 +503,14 @@ async fn test_protocol_propagates_operations_only_to_nodes_that_dont_know_about_ // it should not propagate to the node that already knows about it // because of the previously received header. storage.store_operations(vec![operation.clone()]); - protocol_command_sender - .propagate_operations(storage) - .await - .unwrap(); + protocol_command_sender = tokio::task::spawn_blocking(move || { + protocol_command_sender + .propagate_operations(storage) + .unwrap(); + protocol_command_sender + }) + .await + .unwrap(); match network_controller .wait_command(1000.into(), |cmd| match cmd { @@ -504,9 +533,9 @@ async fn test_protocol_propagates_operations_only_to_nodes_that_dont_know_about_ ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, @@ -523,9 +552,9 @@ async fn test_protocol_propagates_operations_only_to_nodes_that_dont_know_about_ protocol_test_with_storage( protocol_config, async move |mut network_controller, - mut protocol_event_receiver, mut protocol_command_sender, protocol_manager, + mut protocol_consensus_event_receiver, protocol_pool_event_receiver, mut storage| { // Create 3 nodes. @@ -569,7 +598,6 @@ async fn test_protocol_propagates_operations_only_to_nodes_that_dont_know_about_ .collect(), PreHashSet::::default(), ) - .await .unwrap(); // assert it was asked to node A, then B, then C. @@ -595,20 +623,24 @@ async fn test_protocol_propagates_operations_only_to_nodes_that_dont_know_about_ .await; // Wait for the event to be sure that the node is connected. - let _ = tools::wait_protocol_event(&mut protocol_event_receiver, 1000.into(), |evt| { - match evt { - evt @ ProtocolEvent::ReceivedBlockHeader { .. } => Some(evt), - _ => None, - } + let protocol_consensus_event_receiver = tokio::task::spawn_blocking(move || { + protocol_consensus_event_receiver.wait_command( + MassaTime::from_millis(1000), + |command| match command { + MockConsensusControllerMessage::RegisterBlockHeader { .. } => Some(()), + _ => panic!("unexpected protocol event"), + }, + ); + protocol_consensus_event_receiver }) - .await; + .await + .unwrap(); // Send the operation to protocol // it should propagate to the node because it isn't in the block. storage.store_operations(vec![op_2.clone()]); protocol_command_sender .propagate_operations(storage) - .await .unwrap(); match network_controller @@ -629,9 +661,9 @@ async fn test_protocol_propagates_operations_only_to_nodes_that_dont_know_about_ ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, @@ -647,9 +679,9 @@ async fn test_protocol_does_not_propagates_operations_when_receiving_those_insid protocol_test( protocol_config, async move |mut network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, mut pool_event_receiver| { // Create 2 nodes. let mut nodes = tools::create_and_connect_nodes(2, &mut network_controller).await; @@ -693,9 +725,9 @@ async fn test_protocol_does_not_propagates_operations_when_receiving_those_insid } ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, pool_event_receiver, ) }, @@ -710,9 +742,9 @@ async fn test_protocol_ask_operations_on_batch_received() { protocol_test( protocol_config, async move |mut network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver| { // Create 1 node. let mut nodes = tools::create_and_connect_nodes(1, &mut network_controller).await; @@ -745,9 +777,9 @@ async fn test_protocol_ask_operations_on_batch_received() { ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, @@ -762,9 +794,9 @@ async fn test_protocol_re_ask_operations_to_another_node_on_batch_received_after protocol_test( protocol_config, async move |mut network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver| { // Create 3 node. let mut nodes = tools::create_and_connect_nodes(3, &mut network_controller).await; @@ -818,9 +850,9 @@ async fn test_protocol_re_ask_operations_to_another_node_on_batch_received_after }; ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, @@ -835,9 +867,9 @@ async fn test_protocol_does_not_re_ask_operations_to_another_node_if_received() protocol_test( protocol_config, async move |mut network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver| { // Create 3 node. let mut nodes = tools::create_and_connect_nodes(3, &mut network_controller).await; @@ -893,9 +925,9 @@ async fn test_protocol_does_not_re_ask_operations_to_another_node_if_received() ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, @@ -910,9 +942,9 @@ async fn test_protocol_on_ask_operations() { protocol_test_with_storage( protocol_config, async move |mut network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver, mut storage| { // Create 1 node. @@ -957,9 +989,9 @@ async fn test_protocol_on_ask_operations() { ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, diff --git a/massa-protocol-worker/src/tests/scenarios.rs b/massa-protocol-worker/src/tests/scenarios.rs index 829c59f0c55..143ca55aa42 100644 --- a/massa-protocol-worker/src/tests/scenarios.rs +++ b/massa-protocol-worker/src/tests/scenarios.rs @@ -3,14 +3,16 @@ // RUST_BACKTRACE=1 cargo test test_one_handshake -- --nocapture --test-threads=1 use super::tools::{protocol_test, protocol_test_with_storage}; +use massa_consensus_exports::test_exports::MockConsensusControllerMessage; use massa_models::block::BlockId; use massa_models::prehash::{PreHashMap, PreHashSet}; use massa_network_exports::{AskForBlocksInfo, NetworkCommand}; use massa_protocol_exports::tests::tools; use massa_protocol_exports::{ - tests::tools::{create_and_connect_nodes, create_block, wait_protocol_event}, - BlocksResults, ProtocolEvent, + tests::tools::{create_and_connect_nodes, create_block}, + BlocksResults, }; +use massa_time::MassaTime; use serial_test::serial; use std::collections::HashSet; @@ -22,9 +24,9 @@ async fn test_protocol_asks_for_block_from_node_who_propagated_header() { protocol_test( protocol_config, async move |mut network_controller, - mut protocol_event_receiver, mut protocol_command_sender, protocol_manager, + mut protocol_consensus_event_receiver, protocol_pool_event_receiver| { let ask_for_block_cmd_filter = |cmd| match cmd { cmd @ NetworkCommand::AskForBlocks { .. } => Some(cmd), @@ -46,34 +48,40 @@ async fn test_protocol_asks_for_block_from_node_who_propagated_header() { .await; // Check protocol sends header to consensus. - let received_hash = match wait_protocol_event( - &mut protocol_event_receiver, - 1000.into(), - |evt| match evt { - evt @ ProtocolEvent::ReceivedBlockHeader { .. } => Some(evt), - _ => None, - }, - ) - .await - { - Some(ProtocolEvent::ReceivedBlockHeader { block_id, .. }) => block_id, - _ => panic!("Unexpected or no protocol event."), - }; + let (protocol_consensus_event_receiver, received_hash) = + tokio::task::spawn_blocking(move || { + let id = protocol_consensus_event_receiver + .wait_command(MassaTime::from_millis(1000), |command| match command { + MockConsensusControllerMessage::RegisterBlockHeader { + block_id, + header: _, + } => Some(block_id), + _ => panic!("unexpected protocol event"), + }) + .unwrap(); + (protocol_consensus_event_receiver, id) + }) + .await + .unwrap(); // 4. Check that protocol sent the right header to consensus. let expected_hash = block.id; assert_eq!(expected_hash, received_hash); // 5. Ask for block. - protocol_command_sender - .send_wishlist_delta( - vec![(expected_hash, Some(block.content.header.clone()))] - .into_iter() - .collect(), - PreHashSet::::default(), - ) - .await - .expect("Failed to ask for block."); + protocol_command_sender = tokio::task::spawn_blocking(move || { + protocol_command_sender + .send_wishlist_delta( + vec![(expected_hash, Some(block.content.header.clone()))] + .into_iter() + .collect(), + PreHashSet::::default(), + ) + .expect("Failed to ask for block."); + protocol_command_sender + }) + .await + .unwrap(); // 6. Check that protocol asks the node for the full block. match network_controller @@ -101,9 +109,9 @@ async fn test_protocol_asks_for_block_from_node_who_propagated_header() { ); ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, @@ -118,9 +126,9 @@ async fn test_protocol_sends_blocks_when_asked_for() { protocol_test_with_storage( protocol_config, async move |mut network_controller, - protocol_event_receiver, mut protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver, mut storage| { let send_block_info_cmd_filter = |cmd| match cmd { @@ -141,10 +149,14 @@ async fn test_protocol_sends_blocks_when_asked_for() { // Add to storage, integrate. storage.store_block(block.clone()); - protocol_command_sender - .integrated_block(expected_hash, storage.clone()) - .await - .unwrap(); + protocol_command_sender = tokio::task::spawn_blocking(move || { + protocol_command_sender + .integrated_block(expected_hash, storage.clone()) + .unwrap(); + protocol_command_sender + }) + .await + .unwrap(); // 3. Simulate two nodes asking for a block. for node in nodes.iter().take(2) { @@ -188,9 +200,9 @@ async fn test_protocol_sends_blocks_when_asked_for() { ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, @@ -205,9 +217,9 @@ async fn test_protocol_propagates_block_to_all_nodes_including_those_who_asked_f protocol_test_with_storage( protocol_config, async move |mut network_controller, - mut protocol_event_receiver, mut protocol_command_sender, protocol_manager, + mut protocol_consensus_event_receiver, protocol_pool_event_receiver, mut storage| { // Create 4 nodes. @@ -235,19 +247,21 @@ async fn test_protocol_propagates_block_to_all_nodes_including_those_who_asked_f // node[1] asks for that block // Check protocol sends header to consensus. - let (ref_hash, _) = match wait_protocol_event( - &mut protocol_event_receiver, - 1000.into(), - |evt| match evt { - evt @ ProtocolEvent::ReceivedBlockHeader { .. } => Some(evt), - _ => None, - }, - ) - .await - { - Some(ProtocolEvent::ReceivedBlockHeader { block_id, header }) => (block_id, header), - _ => panic!("Unexpected or no protocol event."), - }; + let (protocol_consensus_event_receiver, ref_hash) = + tokio::task::spawn_blocking(move || { + let id = protocol_consensus_event_receiver + .wait_command(MassaTime::from_millis(1000), |command| match command { + MockConsensusControllerMessage::RegisterBlockHeader { + block_id, + header: _, + } => Some(block_id), + _ => panic!("unexpected protocol event"), + }) + .unwrap(); + (protocol_consensus_event_receiver, id) + }) + .await + .unwrap(); storage.store_block(ref_block.clone()); @@ -258,10 +272,14 @@ async fn test_protocol_propagates_block_to_all_nodes_including_those_who_asked_f // 5. Propagate header. let _op_ids = ref_block.content.operations.clone(); - protocol_command_sender - .integrated_block(ref_hash, storage) - .await - .expect("Failed to ask for block."); + protocol_command_sender = tokio::task::spawn_blocking(move || { + protocol_command_sender + .integrated_block(ref_hash, storage.clone()) + .unwrap(); + protocol_command_sender + }) + .await + .unwrap(); // 6. Check that protocol propagates the header to the right nodes. // node_a created the block and should receive nothing @@ -302,9 +320,9 @@ async fn test_protocol_propagates_block_to_all_nodes_including_those_who_asked_f } ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, @@ -320,9 +338,9 @@ async fn test_protocol_propagates_block_to_node_who_asked_for_operations_and_onl protocol_test_with_storage( protocol_config, async move |mut network_controller, - mut protocol_event_receiver, mut protocol_command_sender, protocol_manager, + mut protocol_consensus_event_receiver, protocol_pool_event_receiver, mut storage| { // Create 4 nodes. @@ -350,27 +368,33 @@ async fn test_protocol_propagates_block_to_node_who_asked_for_operations_and_onl // node[1] asks for that block // Check protocol sends header to consensus. - let (ref_hash, _) = match wait_protocol_event( - &mut protocol_event_receiver, - 1000.into(), - |evt| match evt { - evt @ ProtocolEvent::ReceivedBlockHeader { .. } => Some(evt), - _ => None, - }, - ) - .await - { - Some(ProtocolEvent::ReceivedBlockHeader { block_id, header }) => (block_id, header), - _ => panic!("Unexpected or no protocol event."), - }; + let (protocol_consensus_event_receiver, ref_hash) = + tokio::task::spawn_blocking(move || { + let id = protocol_consensus_event_receiver + .wait_command(MassaTime::from_millis(1000), |command| match command { + MockConsensusControllerMessage::RegisterBlockHeader { + block_id, + header: _, + } => Some(block_id), + _ => panic!("unexpected protocol event"), + }) + .unwrap(); + (protocol_consensus_event_receiver, id) + }) + .await + .unwrap(); storage.store_block(ref_block.clone()); // 5. Propagate header. let _op_ids = ref_block.content.operations.clone(); - protocol_command_sender - .integrated_block(ref_hash, storage) - .await - .expect("Failed to ask for block."); + protocol_command_sender = tokio::task::spawn_blocking(move || { + protocol_command_sender + .integrated_block(ref_hash, storage.clone()) + .unwrap(); + protocol_command_sender + }) + .await + .unwrap(); // 6. Check that protocol propagates the header to the right nodes. // node_a created the block and should receive nothing @@ -442,9 +466,9 @@ async fn test_protocol_propagates_block_to_node_who_asked_for_operations_and_onl } ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, @@ -460,9 +484,9 @@ async fn test_protocol_sends_full_blocks_it_receives_to_consensus() { protocol_test( protocol_config, async move |mut network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver| { // Create 1 node. let mut nodes = create_and_connect_nodes(1, &mut network_controller).await; @@ -478,9 +502,9 @@ async fn test_protocol_sends_full_blocks_it_receives_to_consensus() { ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, @@ -495,9 +519,9 @@ async fn test_protocol_block_not_found() { protocol_test( protocol_config, async move |mut network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver| { // Create 1 node. let mut nodes = create_and_connect_nodes(1, &mut network_controller).await; @@ -533,9 +557,9 @@ async fn test_protocol_block_not_found() { ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, diff --git a/massa-protocol-worker/src/tests/tools.rs b/massa-protocol-worker/src/tests/tools.rs index 3ef0904ecba..7917116c4a1 100644 --- a/massa-protocol-worker/src/tests/tools.rs +++ b/massa-protocol-worker/src/tests/tools.rs @@ -1,27 +1,36 @@ use crate::start_protocol_controller; use futures::Future; +use massa_consensus_exports::test_exports::{ConsensusEventReceiver, MockConsensusController}; +use massa_models::{ + block::{BlockId, WrappedBlock}, + node::NodeId, + operation::WrappedOperation, + prehash::PreHashSet, +}; +use massa_network_exports::BlockInfoReply; use massa_pool_exports::test_exports::{MockPoolController, PoolEventReceiver}; use massa_protocol_exports::{ tests::mock_network_controller::MockNetworkController, ProtocolCommandSender, ProtocolConfig, - ProtocolEventReceiver, ProtocolManager, + ProtocolManager, }; use massa_storage::Storage; +use tokio::sync::mpsc; pub async fn protocol_test(protocol_config: &ProtocolConfig, test: F) where F: FnOnce( MockNetworkController, - ProtocolEventReceiver, ProtocolCommandSender, ProtocolManager, + ConsensusEventReceiver, PoolEventReceiver, ) -> V, V: Future< Output = ( MockNetworkController, - ProtocolEventReceiver, ProtocolCommandSender, ProtocolManager, + ConsensusEventReceiver, PoolEventReceiver, ), >, @@ -30,39 +39,42 @@ where MockNetworkController::new(); let (pool_controller, pool_event_receiver) = MockPoolController::new_with_receiver(); - + let (consensus_controller, consensus_event_receiver) = + MockConsensusController::new_with_receiver(); // start protocol controller - let (protocol_command_sender, protocol_event_receiver, protocol_manager): ( - ProtocolCommandSender, - ProtocolEventReceiver, - ProtocolManager, - ) = start_protocol_controller( + let (protocol_command_sender, protocol_command_receiver) = + mpsc::channel(protocol_config.controller_channel_size); + // start protocol controller + let protocol_manager: ProtocolManager = start_protocol_controller( *protocol_config, network_command_sender, network_event_receiver, + protocol_command_receiver, + consensus_controller, pool_controller, Storage::create_root(), ) .await .expect("could not start protocol controller"); + let protocol_command_sender = ProtocolCommandSender(protocol_command_sender); let ( _network_controller, - protocol_event_receiver, _protocol_command_sender, protocol_manager, + _consensus_event_receiver, _pool_event_receiver, ) = test( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + consensus_event_receiver, pool_event_receiver, ) .await; protocol_manager - .stop(protocol_event_receiver) + .stop() .await .expect("Failed to shutdown protocol."); } @@ -71,18 +83,18 @@ pub async fn protocol_test_with_storage(protocol_config: &ProtocolConfig, where F: FnOnce( MockNetworkController, - ProtocolEventReceiver, ProtocolCommandSender, ProtocolManager, + ConsensusEventReceiver, PoolEventReceiver, Storage, ) -> V, V: Future< Output = ( MockNetworkController, - ProtocolEventReceiver, ProtocolCommandSender, ProtocolManager, + ConsensusEventReceiver, PoolEventReceiver, ), >, @@ -90,37 +102,85 @@ where let (network_controller, network_command_sender, network_event_receiver) = MockNetworkController::new(); let (pool_controller, mock_pool_receiver) = MockPoolController::new_with_receiver(); + let (consensus_controller, mock_consensus_receiver) = + MockConsensusController::new_with_receiver(); let storage = Storage::create_root(); // start protocol controller - let (protocol_command_sender, protocol_event_receiver, protocol_manager) = - start_protocol_controller( - *protocol_config, - network_command_sender, - network_event_receiver, - pool_controller, - storage.clone(), - ) - .await - .expect("could not start protocol controller"); + let (protocol_command_sender, protocol_command_receiver) = + mpsc::channel(protocol_config.controller_channel_size); + let protocol_manager = start_protocol_controller( + *protocol_config, + network_command_sender, + network_event_receiver, + protocol_command_receiver, + consensus_controller, + pool_controller, + storage.clone(), + ) + .await + .expect("could not start protocol controller"); + let protocol_command_sender = ProtocolCommandSender(protocol_command_sender); let ( _network_controller, - protocol_event_receiver, _protocol_command_sender, protocol_manager, + _consensus_event_receiver, _protocol_pool_event_receiver, ) = test( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + mock_consensus_receiver, mock_pool_receiver, storage, ) .await; protocol_manager - .stop(protocol_event_receiver) + .stop() .await .expect("Failed to shutdown protocol."); } + +/// send a block and assert it has been propagate (or not) +pub async fn send_and_propagate_block( + network_controller: &mut MockNetworkController, + block: WrappedBlock, + source_node_id: NodeId, + protocol_command_sender: &mut ProtocolCommandSender, + operations: Vec, +) { + network_controller + .send_header(source_node_id, block.content.header.clone()) + .await; + + let mut protocol_sender = protocol_command_sender.clone(); + tokio::task::spawn_blocking(move || { + protocol_sender + .send_wishlist_delta( + vec![(block.id, Some(block.content.header.clone()))] + .into_iter() + .collect(), + PreHashSet::::default(), + ) + .unwrap(); + }) + .await + .unwrap(); + + // Send block info to protocol. + let info = vec![( + block.id, + BlockInfoReply::Info(block.content.operations.clone()), + )]; + network_controller + .send_block_info(source_node_id, info) + .await; + + // Send full ops. + let info = vec![(block.id, BlockInfoReply::Operations(operations))]; + network_controller + .send_block_info(source_node_id, info) + .await; +}