From 3cb56522f5c34ccf7898c717bbd51da9d6f9eba8 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Thu, 25 Jan 2024 15:37:25 -0500 Subject: [PATCH 01/28] lint up to missing docs in task-impls --- .github/workflows/test-sequencer.yml | 1 - Cargo.lock | 43 ++- Cargo.toml | 2 + crates/hotshot/Cargo.toml | 2 +- crates/hotshot/examples/infra/mod.rs | 1 - crates/hotshot/src/lib.rs | 19 +- crates/hotshot/src/tasks/mod.rs | 22 +- .../src/traits/networking/combined_network.rs | 1 - .../src/traits/networking/libp2p_network.rs | 1 - .../src/traits/networking/memory_network.rs | 1 - .../traits/networking/web_server_network.rs | 1 - crates/hotshot/src/types/handle.rs | 29 +- crates/task-impls/Cargo.toml | 3 +- crates/task-impls/src/consensus.rs | 216 ++++++------- crates/task-impls/src/da.rs | 84 ++--- crates/task-impls/src/events.rs | 3 + crates/task-impls/src/harness.rs | 106 +++---- crates/task-impls/src/network.rs | 152 +++------ crates/task-impls/src/transactions.rs | 61 ++-- crates/task-impls/src/vid.rs | 73 +++-- crates/task-impls/src/view_sync.rs | 295 ++++++++++-------- crates/task-impls/src/vote.rs | 148 ++++----- crates/testing/Cargo.toml | 1 - crates/testing/src/completion_task.rs | 11 +- crates/testing/src/lib.rs | 2 - crates/testing/src/overall_safety_task.rs | 12 +- crates/testing/src/per_node_safety_task.rs | 11 +- crates/testing/src/spinning_task.rs | 11 +- crates/testing/src/task_helpers.rs | 1 - crates/testing/src/test_launcher.rs | 6 - crates/testing/src/test_runner.rs | 6 +- crates/testing/src/txn_task.rs | 11 +- crates/testing/src/view_sync_task.rs | 10 +- crates/testing/tests/consensus_task.rs | 7 +- crates/testing/tests/da_task.rs | 2 +- crates/testing/tests/network_task.rs | 2 +- crates/testing/tests/vid_task.rs | 2 +- crates/testing/tests/view_sync_task.rs | 2 +- crates/types/Cargo.toml | 1 - crates/types/src/lib.rs | 5 +- crates/types/src/traits/network.rs | 4 +- crates/types/src/vote.rs | 12 +- 42 files changed, 647 insertions(+), 736 deletions(-) diff --git a/.github/workflows/test-sequencer.yml b/.github/workflows/test-sequencer.yml index 78ff9d8237..3d59116827 100644 --- a/.github/workflows/test-sequencer.yml +++ b/.github/workflows/test-sequencer.yml @@ -55,7 +55,6 @@ jobs: hotshot-state-prover = { path = "${GITHUB_WORKSPACE}/hotshot/crates/hotshot-state-prover" } hotshot-orchestrator = { path = "${GITHUB_WORKSPACE}/hotshot/crates/orchestrator" } hotshot-web-server = { path = "${GITHUB_WORKSPACE}/hotshot/crates/web_server" } - hotshot-task = { path = "${GITHUB_WORKSPACE}/hotshot/crates/task" } hotshot-task-impls = { path = "${GITHUB_WORKSPACE}/hotshot/crates/task-impls" } hotshot-testing = { path = "${GITHUB_WORKSPACE}/hotshot/crates/testing" } hotshot-types = { path = "${GITHUB_WORKSPACE}/hotshot/crates/types" } diff --git a/Cargo.lock b/Cargo.lock index cb704fc783..caf0f563c0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -533,6 +533,17 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "async-broadcast" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "334d75cf09b33bede6cbc20e52515853ae7bee3d4eadd9540e13ce92af983d34" +dependencies = [ + "event-listener 3.1.0", + "event-listener-strategy 0.1.0", + "futures-core", +] + [[package]] name = "async-channel" version = "1.9.0" @@ -552,7 +563,7 @@ checksum = "1ca33f4bc4ed1babef42cad36cc1f51fa88be00420404e5b1e80ab1b18f7678c" dependencies = [ "concurrent-queue", "event-listener 4.0.3", - "event-listener-strategy", + "event-listener-strategy 0.4.0", "futures-core", "pin-project-lite 0.2.13", ] @@ -700,7 +711,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d034b430882f8381900d3fe6f0aaa3ad94f2cb4ac519b429692a1bc2dda4ae7b" dependencies = [ "event-listener 4.0.3", - "event-listener-strategy", + "event-listener-strategy 0.4.0", "pin-project-lite 0.2.13", ] @@ -2153,6 +2164,16 @@ dependencies = [ "pin-project-lite 0.2.13", ] +[[package]] +name = "event-listener-strategy" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15c97b4e30ea7e4b7e7b429d6e2d8510433ba8cee4e70dfb3243794e539d29fd" +dependencies = [ + "event-listener 3.1.0", + "pin-project-lite 0.2.13", +] + [[package]] name = "event-listener-strategy" version = "0.4.0" @@ -2684,7 +2705,6 @@ dependencies = [ "futures", "hotshot-constants", "hotshot-orchestrator", - "hotshot-task", "hotshot-task-impls", "hotshot-testing", "hotshot-types", @@ -2699,6 +2719,7 @@ dependencies = [ "serde", "snafu", "surf-disco", + "task", "time 0.3.31", "tokio", "toml 0.8.8", @@ -2794,6 +2815,7 @@ dependencies = [ name = "hotshot-task-impls" version = "0.1.0" dependencies = [ + "async-broadcast", "async-compatibility-layer", "async-lock 2.8.0", "async-std", @@ -2804,11 +2826,11 @@ dependencies = [ "either", "futures", "hotshot-constants", - "hotshot-task", "hotshot-types", "hotshot-utils", "sha2 0.10.8", "snafu", + "task", "time 0.3.31", "tokio", "tracing", @@ -2830,7 +2852,6 @@ dependencies = [ "hotshot", "hotshot-constants", "hotshot-orchestrator", - "hotshot-task", "hotshot-task-impls", "hotshot-types", "rand 0.8.5", @@ -2897,7 +2918,6 @@ dependencies = [ "ethereum-types", "generic-array", "hotshot-constants", - "hotshot-task", "hotshot-utils", "jf-plonk", "jf-primitives", @@ -6576,6 +6596,17 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" +[[package]] +name = "task" +version = "0.1.0" +source = "git+https://github.com/EspressoSystems/BroadcastChannel.git#8a623b5fcefae1a9c1485090b281dfc4a9662770" +dependencies = [ + "async-broadcast", + "async-std", + "futures", + "tokio", +] + [[package]] name = "tempfile" version = "3.9.0" diff --git a/Cargo.toml b/Cargo.toml index 6483758693..51ce739d4c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -34,9 +34,11 @@ ark-ed-on-bn254 = "0.4" ark-ff = "0.4" ark-serialize = "0.4" ark-std = { version = "0.4", default-features = false } +async-broadcast = "0.6.0" async-compatibility-layer = { git = "https://github.com/EspressoSystems/async-compatibility-layer.git", tag = "1.4.1", default-features = false, features = [ "logging-utils", ] } +task = { git = "https://github.com/EspressoSystems/BroadcastChannel.git" } async-lock = "2.8" async-trait = "0.1.77" bincode = "1.3.3" diff --git a/crates/hotshot/Cargo.toml b/crates/hotshot/Cargo.toml index 8ed04362ed..edb35dd1c1 100644 --- a/crates/hotshot/Cargo.toml +++ b/crates/hotshot/Cargo.toml @@ -96,7 +96,6 @@ hotshot-web-server = { version = "0.1.1", path = "../web_server", default-featur hotshot-orchestrator = { version = "0.1.1", path = "../orchestrator", default-features = false } hotshot-types = { path = "../types", version = "0.1.0", default-features = false } hotshot-utils = { path = "../utils" } -hotshot-task = { path = "../task", version = "0.1.0", default-features = false } hotshot-task-impls = { path = "../task-impls", version = "0.1.0", default-features = false } libp2p-identity = { workspace = true } libp2p-networking = { workspace = true } @@ -108,6 +107,7 @@ time = { workspace = true } derive_more = "0.99.17" portpicker = "0.1.1" lru = "0.12.1" +task = { workspace = true } tracing = { workspace = true } diff --git a/crates/hotshot/examples/infra/mod.rs b/crates/hotshot/examples/infra/mod.rs index c709eaff8b..66035631a1 100644 --- a/crates/hotshot/examples/infra/mod.rs +++ b/crates/hotshot/examples/infra/mod.rs @@ -23,7 +23,6 @@ use hotshot_orchestrator::{ client::{OrchestratorClient, ValidatorArgs}, config::{NetworkConfig, NetworkConfigFile, WebServerConfig}, }; -use hotshot_task::task::FilterEvent; use hotshot_testing::block_types::{TestBlockHeader, TestBlockPayload, TestTransaction}; use hotshot_types::message::Message; use hotshot_types::traits::network::ConnectedNetwork; diff --git a/crates/hotshot/src/lib.rs b/crates/hotshot/src/lib.rs index 70015ab626..4ed9f18c25 100644 --- a/crates/hotshot/src/lib.rs +++ b/crates/hotshot/src/lib.rs @@ -32,10 +32,6 @@ use commit::Committable; use custom_debug::Debug; use futures::join; use hotshot_constants::PROGRAM_PROTOCOL_VERSION; -use hotshot_task::{ - event_stream::{ChannelStream, EventStream}, - task_launcher::TaskRunner, -}; use hotshot_task_impls::{events::HotShotEvent, network::NetworkTaskKind}; #[cfg(feature = "hotshot-testing")] @@ -69,6 +65,7 @@ use std::{ sync::Arc, time::Duration, }; +use task::task::TaskRegistry; use tasks::add_vid_task; use tracing::{debug, error, info, instrument, trace, warn}; @@ -570,7 +567,7 @@ impl> SystemContext { pub async fn run_tasks(self) -> SystemContextHandle { // ED Need to set first first number to 1, or properly trigger the change upon start let task_runner = TaskRunner::new(); - let registry = task_runner.registry.clone(); + let registry = Arc::new(TaskRegistry::default()); let output_event_stream = self.inner.output_event_stream.clone(); let internal_event_stream = self.inner.internal_event_stream.clone(); @@ -639,17 +636,15 @@ impl> SystemContext { task_runner, internal_event_stream.clone(), output_event_stream.clone(), - handle.clone(), + &handle, ) .await; + let task_runner = add_da_task(task_runner, internal_event_stream.clone(), &handle).await; + let task_runner = add_vid_task(task_runner, internal_event_stream.clone(), &handle).await; let task_runner = - add_da_task(task_runner, internal_event_stream.clone(), handle.clone()).await; - let task_runner = - add_vid_task(task_runner, internal_event_stream.clone(), handle.clone()).await; - let task_runner = - add_transaction_task(task_runner, internal_event_stream.clone(), handle.clone()).await; + add_transaction_task(task_runner, internal_event_stream.clone(), &handle).await; let task_runner = - add_view_sync_task(task_runner, internal_event_stream.clone(), handle.clone()).await; + add_view_sync_task(task_runner, internal_event_stream.clone(), &handle).await; async_spawn(async move { let _ = task_runner.launch().await; info!("Task runner exited!"); diff --git a/crates/hotshot/src/tasks/mod.rs b/crates/hotshot/src/tasks/mod.rs index 563afb53cd..144cb4a804 100644 --- a/crates/hotshot/src/tasks/mod.rs +++ b/crates/hotshot/src/tasks/mod.rs @@ -3,14 +3,6 @@ use crate::{types::SystemContextHandle, HotShotConsensusApi}; use async_compatibility_layer::art::async_sleep; use futures::FutureExt; -use hotshot_task::{ - boxed_sync, - event_stream::ChannelStream, - task::{FilterEvent, HandleEvent, HandleMessage, HotShotTaskCompleted, HotShotTaskTypes}, - task_impls::TaskBuilder, - task_launcher::TaskRunner, - GeneratedStream, Merge, -}; use hotshot_task_impls::{ consensus::{ consensus_event_filter, CommitmentAndMetadata, ConsensusTaskState, ConsensusTaskTypes, @@ -200,7 +192,7 @@ pub async fn add_consensus_task>( task_runner: TaskRunner, event_stream: ChannelStream>, output_stream: ChannelStream>, - handle: SystemContextHandle, + handle: &SystemContextHandle, ) -> TaskRunner { let consensus = handle.hotshot.get_consensus(); let c_api: HotShotConsensusApi = HotShotConsensusApi { @@ -258,7 +250,7 @@ pub async fn add_consensus_task>( move |event, mut state: ConsensusTaskState>| { async move { if let HotShotEvent::Shutdown = event { - (Some(HotShotTaskCompleted::ShutDown), state) + (Some(HotShotTaskCompleted), state) } else { state.handle_event(event).await; (None, state) @@ -294,7 +286,7 @@ pub async fn add_consensus_task>( pub async fn add_vid_task>( task_runner: TaskRunner, event_stream: ChannelStream>, - handle: SystemContextHandle, + handle: &SystemContextHandle, ) -> TaskRunner { // build the vid task let c_api: HotShotConsensusApi = HotShotConsensusApi { @@ -351,7 +343,7 @@ pub async fn add_vid_task>( pub async fn add_da_task>( task_runner: TaskRunner, event_stream: ChannelStream>, - handle: SystemContextHandle, + handle: &SystemContextHandle, ) -> TaskRunner { // build the da task let c_api: HotShotConsensusApi = HotShotConsensusApi { @@ -408,7 +400,7 @@ pub async fn add_da_task>( pub async fn add_transaction_task>( task_runner: TaskRunner, event_stream: ChannelStream>, - handle: SystemContextHandle, + handle: &SystemContextHandle, ) -> TaskRunner { // build the transactions task let c_api: HotShotConsensusApi = HotShotConsensusApi { @@ -464,7 +456,7 @@ pub async fn add_transaction_task> pub async fn add_view_sync_task>( task_runner: TaskRunner, event_stream: ChannelStream>, - handle: SystemContextHandle, + handle: &SystemContextHandle, ) -> TaskRunner { let api = HotShotConsensusApi { inner: handle.hotshot.inner.clone(), @@ -494,7 +486,7 @@ pub async fn add_view_sync_task>( move |event, mut state: ViewSyncTaskState>| { async move { if let HotShotEvent::Shutdown = event { - (Some(HotShotTaskCompleted::ShutDown), state) + (Some(HotShotTaskCompleted), state) } else { state.handle_event(event).await; (None, state) diff --git a/crates/hotshot/src/traits/networking/combined_network.rs b/crates/hotshot/src/traits/networking/combined_network.rs index 81471dfe1f..67e937e27c 100644 --- a/crates/hotshot/src/traits/networking/combined_network.rs +++ b/crates/hotshot/src/traits/networking/combined_network.rs @@ -19,7 +19,6 @@ use async_trait::async_trait; use futures::join; use async_compatibility_layer::channel::UnboundedSendError; -use hotshot_task::{boxed_sync, BoxSyncFuture}; #[cfg(feature = "hotshot-testing")] use hotshot_types::traits::network::{NetworkReliability, TestableNetworkingImplementation}; use hotshot_types::{ diff --git a/crates/hotshot/src/traits/networking/libp2p_network.rs b/crates/hotshot/src/traits/networking/libp2p_network.rs index 8b633139af..426d24fd15 100644 --- a/crates/hotshot/src/traits/networking/libp2p_network.rs +++ b/crates/hotshot/src/traits/networking/libp2p_network.rs @@ -13,7 +13,6 @@ use async_trait::async_trait; use bimap::BiHashMap; use bincode::Options; use hotshot_constants::LOOK_AHEAD; -use hotshot_task::{boxed_sync, BoxSyncFuture}; #[cfg(feature = "hotshot-testing")] use hotshot_types::traits::network::{NetworkReliability, TestableNetworkingImplementation}; use hotshot_types::{ diff --git a/crates/hotshot/src/traits/networking/memory_network.rs b/crates/hotshot/src/traits/networking/memory_network.rs index 3e28a5871e..cb67641066 100644 --- a/crates/hotshot/src/traits/networking/memory_network.rs +++ b/crates/hotshot/src/traits/networking/memory_network.rs @@ -13,7 +13,6 @@ use async_trait::async_trait; use bincode::Options; use dashmap::DashMap; use futures::StreamExt; -use hotshot_task::{boxed_sync, BoxSyncFuture}; use hotshot_types::{ message::{Message, MessageKind}, traits::{ diff --git a/crates/hotshot/src/traits/networking/web_server_network.rs b/crates/hotshot/src/traits/networking/web_server_network.rs index 0ea946cf98..9ca0c8134d 100644 --- a/crates/hotshot/src/traits/networking/web_server_network.rs +++ b/crates/hotshot/src/traits/networking/web_server_network.rs @@ -12,7 +12,6 @@ use async_compatibility_layer::{ use async_lock::RwLock; use async_trait::async_trait; use derive_more::{Deref, DerefMut}; -use hotshot_task::{boxed_sync, BoxSyncFuture}; use hotshot_types::{ message::{Message, MessagePurpose}, traits::{ diff --git a/crates/hotshot/src/types/handle.rs b/crates/hotshot/src/types/handle.rs index 6f7d919eeb..9106c4fed0 100644 --- a/crates/hotshot/src/types/handle.rs +++ b/crates/hotshot/src/types/handle.rs @@ -5,14 +5,7 @@ use async_compatibility_layer::channel::UnboundedStream; use async_lock::RwLock; use commit::Committable; use futures::Stream; -use hotshot_task::{ - boxed_sync, - event_stream::{ChannelStream, EventStream, StreamId}, - global_registry::GlobalRegistry, - task::FilterEvent, - BoxSyncFuture, -}; -use hotshot_task_impls::events::HotShotEvent; + #[cfg(feature = "hotshot-testing")] use hotshot_types::{ message::{MessageKind, SequencingMessage}, @@ -29,6 +22,7 @@ use hotshot_types::{ traits::{node_implementation::NodeType, state::ConsensusTime, storage::Storage}, }; use std::sync::Arc; +use task::task::TaskRegistry; use tracing::error; /// Event streaming handle for a [`SystemContext`] instance running in the background @@ -36,13 +30,14 @@ use tracing::error; /// This type provides the means to message and interact with a background [`SystemContext`] instance, /// allowing the ability to receive [`Event`]s from it, send transactions to it, and interact with /// the underlying storage. +#[derive(Clone)] pub struct SystemContextHandle> { /// The [sender](ChannelStream) for the output stream from the background process pub(crate) output_event_stream: ChannelStream>, /// access to the internal ev ent stream, in case we need to, say, shut something down pub(crate) internal_event_stream: ChannelStream>, /// registry for controlling tasks - pub(crate) registry: GlobalRegistry, + pub(crate) registry: Arc, /// Internal reference to the underlying [`SystemContext`] pub hotshot: SystemContext, @@ -51,20 +46,6 @@ pub struct SystemContextHandle> { pub(crate) storage: I::Storage, } -impl + 'static> Clone - for SystemContextHandle -{ - fn clone(&self) -> Self { - Self { - registry: self.registry.clone(), - output_event_stream: self.output_event_stream.clone(), - internal_event_stream: self.internal_event_stream.clone(), - hotshot: self.hotshot.clone(), - storage: self.storage.clone(), - } - } -} - impl + 'static> SystemContextHandle { /// obtains a stream to expose to the user pub async fn get_event_stream( @@ -189,7 +170,7 @@ impl + 'static> SystemContextHandl { boxed_sync(async move { self.hotshot.inner.networks.shut_down_networks().await; - self.registry.shutdown_all().await; + self.registry.shutdown().await; }) } diff --git a/crates/task-impls/Cargo.toml b/crates/task-impls/Cargo.toml index e1a40992b3..ce343fbdc5 100644 --- a/crates/task-impls/Cargo.toml +++ b/crates/task-impls/Cargo.toml @@ -15,13 +15,14 @@ async-lock = { workspace = true } tracing = { workspace = true } hotshot-constants = { path = "../constants", default-features = false } hotshot-types = { path = "../types", default-features = false } -hotshot-task = { path = "../task", default-features = false } hotshot-utils = { path = "../utils" } time = { workspace = true } commit = { workspace = true } bincode = { workspace = true } bitvec = { workspace = true } sha2 = { workspace = true } +task = { workspace = true } +async-broadcast = { workspace = true } [target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] tokio = { workspace = true } diff --git a/crates/task-impls/src/consensus.rs b/crates/task-impls/src/consensus.rs index 6e7f540816..b383f0a7ef 100644 --- a/crates/task-impls/src/consensus.rs +++ b/crates/task-impls/src/consensus.rs @@ -1,5 +1,5 @@ use crate::{ - events::HotShotEvent, + events::{HotShotEvent, HotShotTaskCompleted}, helpers::cancel_task, vote::{create_vote_accumulator, AccumulatorInfo, VoteCollectionTaskState}, }; @@ -10,12 +10,10 @@ use async_std::task::JoinHandle; use commit::Committable; use core::time::Duration; use hotshot_constants::LOOK_AHEAD; -use hotshot_task::{ - event_stream::{ChannelStream, EventStream}, - global_registry::GlobalRegistry, - task::{HotShotTaskCompleted, TS}, - task_impls::HSTWithEvent, -}; +use task::task::{Task, TaskState}; + +use async_broadcast::Sender; + use hotshot_types::{ consensus::{Consensus, View}, data::{Leaf, QuorumProposal, VidCommitment, VidDisperse}, @@ -77,8 +75,6 @@ pub struct ConsensusTaskState< pub public_key: TYPES::SignatureKey, /// Our Private Key pub private_key: ::PrivateKey, - /// The global task registry - pub registry: GlobalRegistry, /// Reference to consensus. The replica will require a write lock on this. pub consensus: Arc>>, /// View timeout from config. @@ -124,11 +120,7 @@ pub struct ConsensusTaskState< /// last Timeout Certificate this node formed pub timeout_cert: Option>, - /// Global events stream to publish events - pub event_stream: ChannelStream>, - - /// Event stream to publish events to the application layer - pub output_event_stream: ChannelStream>, + pub output_event_stream: async_broadcast::Sender>, /// All the VID shares we've received for current and future views. /// In the future we will need a different struct similar to VidDisperse except @@ -175,7 +167,7 @@ impl, A: ConsensusApi + #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Consensus vote if able", level = "error")] // Check if we are able to vote, like whether the proposal is valid, // whether we have DAC and VID share, and if so, vote. - async fn vote_if_able(&mut self) -> bool { + async fn vote_if_able(&mut self, event_stream: &Sender>) -> bool { if !self.quorum_membership.has_stake(&self.public_key) { debug!( "We were not chosen for consensus committee on {:?}", @@ -242,8 +234,8 @@ impl, A: ConsensusApi + "Sending vote to next quorum leader {:?}", vote.get_view_number() + 1 ); - self.event_stream - .publish(HotShotEvent::QuorumVoteSend(vote)) + event_stream + .broadcast(HotShotEvent::QuorumVoteSend(vote)) .await; if let Some(commit_and_metadata) = &self.payload_commitment_and_metadata { if commit_and_metadata.is_genesis { @@ -346,8 +338,8 @@ impl, A: ConsensusApi + "Sending vote to next quorum leader {:?}", vote.get_view_number() + 1 ); - self.event_stream - .publish(HotShotEvent::QuorumVoteSend(vote)) + event_stream + .broadcast(HotShotEvent::QuorumVoteSend(vote)) .await; return true; } @@ -368,7 +360,11 @@ impl, A: ConsensusApi + /// Must only update the view and GC if the view actually changes #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Consensus update view", level = "error")] - async fn update_view(&mut self, new_view: TYPES::Time) -> bool { + async fn update_view( + &mut self, + new_view: TYPES::Time, + event_stream: &Sender>, + ) -> bool { if *self.cur_view < *new_view { debug!( "Updating view from {} to {} in consensus task", @@ -414,21 +410,21 @@ impl, A: ConsensusApi + .await; } - self.event_stream - .publish(HotShotEvent::ViewChange(new_view)) + event_stream + .broadcast(HotShotEvent::ViewChange(new_view)) .await; // Spawn a timeout task if we did actually update view let timeout = self.timeout; self.timeout_task = Some(async_spawn({ - let stream = self.event_stream.clone(); + let stream = event_stream.clone(); // Nuance: We timeout on the view + 1 here because that means that we have // not seen evidence to transition to this new view let view_number = self.cur_view + 1; async move { async_sleep(Duration::from_millis(timeout)).await; stream - .publish(HotShotEvent::Timeout(TYPES::Time::new(*view_number))) + .broadcast(HotShotEvent::Timeout(TYPES::Time::new(*view_number))) .await; } })); @@ -449,7 +445,11 @@ impl, A: ConsensusApi + /// Handles a consensus event received on the event stream #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Consensus replica task", level = "error")] - pub async fn handle_event(&mut self, event: HotShotEvent) { + pub async fn handle( + &mut self, + event: HotShotEvent, + event_stream: Sender>, + ) { match event { HotShotEvent::QuorumProposalRecv(proposal, sender) => { debug!( @@ -506,7 +506,7 @@ impl, A: ConsensusApi + } // NOTE: We could update our view with a valid TC but invalid QC, but that is not what we do here - self.update_view(view).await; + self.update_view(view, &event_stream).await; let consensus = self.consensus.upgradable_read().await; @@ -577,10 +577,10 @@ impl, A: ConsensusApi + "Attempting to publish proposal after voting; now in view: {}", *new_view ); - self.publish_proposal_if_able(qc.view_number + 1, None) + self.publish_proposal_if_able(qc.view_number + 1, None, &event_stream) .await; } - if self.vote_if_able().await { + if self.vote_if_able(&event_stream).await { self.current_proposal = None; } } @@ -725,7 +725,7 @@ impl, A: ConsensusApi + ) { error!("publishing view error"); self.output_event_stream - .publish(Event { + .broadcast(Event { view_number: view, event: EventType::Error { error: e.into() }, }) @@ -753,10 +753,10 @@ impl, A: ConsensusApi + } #[allow(clippy::cast_precision_loss)] if new_decide_reached { - self.event_stream - .publish(HotShotEvent::LeafDecided(leaf_views.clone())) + event_stream + .broadcast(HotShotEvent::LeafDecided(leaf_views.clone())) .await; - let decide_sent = self.output_event_stream.publish(Event { + let decide_sent = self.output_event_stream.broadcast(Event { view_number: consensus.last_decided_view, event: EventType::Decide { leaf_chain: Arc::new(leaf_views), @@ -806,11 +806,11 @@ impl, A: ConsensusApi + "Attempting to publish proposal after voting; now in view: {}", *new_view ); - self.publish_proposal_if_able(qc.view_number + 1, None) + self.publish_proposal_if_able(qc.view_number + 1, None, &event_stream) .await; } - if !self.vote_if_able().await { + if !self.vote_if_able(&event_stream).await { return; } self.current_proposal = None; @@ -833,34 +833,33 @@ impl, A: ConsensusApi + } let mut collector = self.vote_collector.write().await; - let maybe_task = collector.take(); - - if maybe_task.is_none() - || vote.get_view_number() > maybe_task.as_ref().unwrap().view + if collector.is_none() || vote.get_view_number() > collector.as_ref().unwrap().view { debug!("Starting vote handle for view {:?}", vote.get_view_number()); let info = AccumulatorInfo { public_key: self.public_key.clone(), membership: self.quorum_membership.clone(), view: vote.get_view_number(), - event_stream: self.event_stream.clone(), id: self.id, - registry: self.registry.clone(), }; *collector = create_vote_accumulator::< TYPES, QuorumVote, QuorumCertificate, - >(&info, vote.clone(), event) + >(&info, vote.clone(), event, &event_stream) .await; } else { - let result = maybe_task.unwrap().handle_event(event.clone()).await; + let result = collector + .as_mut() + .unwrap() + .handle_event(event.clone(), &event_stream) + .await; - if result.0 == Some(HotShotTaskCompleted::ShutDown) { + if result == Some(HotShotTaskCompleted) { + *collector = None; // The protocol has finished return; } - *collector = Some(result.1); } } HotShotEvent::TimeoutVoteRecv(ref vote) => { @@ -879,34 +878,34 @@ impl, A: ConsensusApi + return; } let mut collector = self.timeout_vote_collector.write().await; - let maybe_task = collector.take(); - if maybe_task.is_none() - || vote.get_view_number() > maybe_task.as_ref().unwrap().view + if collector.is_none() || vote.get_view_number() > collector.as_ref().unwrap().view { debug!("Starting vote handle for view {:?}", vote.get_view_number()); let info = AccumulatorInfo { public_key: self.public_key.clone(), membership: self.quorum_membership.clone(), view: vote.get_view_number(), - event_stream: self.event_stream.clone(), id: self.id, - registry: self.registry.clone(), }; *collector = create_vote_accumulator::< TYPES, TimeoutVote, TimeoutCertificate, - >(&info, vote.clone(), event) + >(&info, vote.clone(), event, &event_stream) .await; } else { - let result = maybe_task.unwrap().handle_event(event.clone()).await; + let result = collector + .as_mut() + .unwrap() + .handle_event(event.clone(), &event_stream) + .await; - if result.0 == Some(HotShotTaskCompleted::ShutDown) { + if result == Some(HotShotTaskCompleted) { + *collector = None; // The protocol has finished return; } - *collector = Some(result.1); } } HotShotEvent::QCFormed(cert) => { @@ -928,7 +927,10 @@ impl, A: ConsensusApi + let view = qc.view_number + 1; - if self.publish_proposal_if_able(view, Some(qc.clone())).await { + if self + .publish_proposal_if_able(view, Some(qc.clone()), &event_stream) + .await + { } else { warn!("Wasn't able to publish proposal"); } @@ -951,7 +953,7 @@ impl, A: ConsensusApi + ); if !self - .publish_proposal_if_able(qc.view_number + 1, None) + .publish_proposal_if_able(qc.view_number + 1, None, &event_stream) .await { debug!( @@ -976,9 +978,9 @@ impl, A: ConsensusApi + .write() .await .saved_da_certs - .insert(view, cert); + .insert(view, cert.clone()); - if self.vote_if_able().await { + if self.vote_if_able(&event_stream).await { self.current_proposal = None; } } @@ -1043,13 +1045,13 @@ impl, A: ConsensusApi + // update the view in state to the one in the message // Publish a view change event to the application - if !self.update_view(new_view).await { + if !self.update_view(new_view, &event_stream).await { debug!("view not updated"); return; } self.output_event_stream - .publish(Event { + .broadcast(Event { view_number: old_view_number, event: EventType::ViewFinished { view_number: old_view_number, @@ -1090,15 +1092,15 @@ impl, A: ConsensusApi + return; }; - self.event_stream - .publish(HotShotEvent::TimeoutVoteSend(vote)) + event_stream + .broadcast(HotShotEvent::TimeoutVoteSend(vote)) .await; debug!( "We did not receive evidence for view {} in time, sending timeout vote for that view!", *view ); self.output_event_stream - .publish(Event { + .broadcast(Event { view_number: view, event: EventType::ReplicaViewTimeout { view_number: view }, }) @@ -1116,14 +1118,19 @@ impl, A: ConsensusApi + if self.quorum_membership.get_leader(view) == self.public_key && self.consensus.read().await.high_qc.get_view_number() + 1 == view { - self.publish_proposal_if_able(view, None).await; + self.publish_proposal_if_able(view, None, &event_stream) + .await; } if let Some(tc) = &self.timeout_cert { if self.quorum_membership.get_leader(tc.get_view_number() + 1) == self.public_key { - self.publish_proposal_if_able(view, self.timeout_cert.clone()) - .await; + self.publish_proposal_if_able( + view, + self.timeout_cert.clone(), + &event_stream, + ) + .await; } } } @@ -1137,6 +1144,7 @@ impl, A: ConsensusApi + &mut self, view: TYPES::Time, timeout_certificate: Option>, + event_stream: &Sender>, ) -> bool { if self.quorum_membership.get_leader(view) != self.public_key { // This is expected for view 1, so skipping the logging. @@ -1244,8 +1252,8 @@ impl, A: ConsensusApi + leaf.view_number, "" ); - self.event_stream - .publish(HotShotEvent::QuorumProposalSend( + event_stream + .broadcast(HotShotEvent::QuorumProposalSend( message.clone(), self.public_key.clone(), )) @@ -1259,52 +1267,36 @@ impl, A: ConsensusApi + } } -impl, A: ConsensusApi> TS +impl, A: ConsensusApi + 'static> TaskState for ConsensusTaskState { -} - -/// Type alias for Consensus task -pub type ConsensusTaskTypes = HSTWithEvent< - ConsensusTaskError, - HotShotEvent, - ChannelStream>, - ConsensusTaskState, ->; - -/// Event handle for consensus -pub async fn sequencing_consensus_handle< - TYPES: NodeType, - I: NodeImplementation, - A: ConsensusApi + 'static, ->( - event: HotShotEvent, - mut state: ConsensusTaskState, -) -> ( - std::option::Option, - ConsensusTaskState, -) { - if let HotShotEvent::Shutdown = event { - (Some(HotShotTaskCompleted::ShutDown), state) - } else { - state.handle_event(event).await; - (None, state) + type Event = HotShotEvent; + type Result = (); + fn filter(event: &HotShotEvent) -> bool { + matches!( + event, + HotShotEvent::QuorumProposalRecv(_, _) + | HotShotEvent::QuorumVoteRecv(_) + | HotShotEvent::QCFormed(_) + | HotShotEvent::DACRecv(_) + | HotShotEvent::ViewChange(_) + | HotShotEvent::SendPayloadCommitmentAndMetadata(..) + | HotShotEvent::Timeout(_) + | HotShotEvent::TimeoutVoteRecv(_) + | HotShotEvent::VidDisperseRecv(..) + | HotShotEvent::Shutdown, + ) + } + async fn handle_event(event: Self::Event, task: &mut Task) -> Option<()> + where + Self: Sized, + { + // TODO: Don't clone the sender + let sender = task.clone_sender(); + task.state_mut().handle(event, sender).await; + None + } + fn should_shutdown(event: &Self::Event) -> bool { + matches!(event, HotShotEvent::Shutdown) } -} - -/// Filter for consensus, returns true for event types the consensus task subscribes to. -pub fn consensus_event_filter(event: &HotShotEvent) -> bool { - matches!( - event, - HotShotEvent::QuorumProposalRecv(_, _) - | HotShotEvent::QuorumVoteRecv(_) - | HotShotEvent::QCFormed(_) - | HotShotEvent::DACRecv(_) - | HotShotEvent::ViewChange(_) - | HotShotEvent::SendPayloadCommitmentAndMetadata(..) - | HotShotEvent::Timeout(_) - | HotShotEvent::TimeoutVoteRecv(_) - | HotShotEvent::VidDisperseRecv(..) - | HotShotEvent::Shutdown, - ) } diff --git a/crates/task-impls/src/da.rs b/crates/task-impls/src/da.rs index bab588d1dd..cb8f8b99ec 100644 --- a/crates/task-impls/src/da.rs +++ b/crates/task-impls/src/da.rs @@ -1,15 +1,10 @@ use crate::{ - events::HotShotEvent, + events::{HotShotEvent, HotShotTaskCompleted}, vote::{create_vote_accumulator, AccumulatorInfo, VoteCollectionTaskState}, }; +use async_broadcast::Sender; use async_lock::RwLock; -use hotshot_task::{ - event_stream::{ChannelStream, EventStream}, - global_registry::GlobalRegistry, - task::{HotShotTaskCompleted, TS}, - task_impls::HSTWithEvent, -}; use hotshot_types::{ consensus::{Consensus, View}, data::DAProposal, @@ -30,6 +25,7 @@ use hotshot_types::{ vote::HasViewNumber, }; use sha2::{Digest, Sha256}; +use task::task::{Task, TaskState}; use crate::vote::HandleVoteEvent; use snafu::Snafu; @@ -51,8 +47,6 @@ pub struct DATaskState< > { /// The state's api pub api: A, - /// Global registry task for the state - pub registry: GlobalRegistry, /// View number this view is executing in. pub cur_view: TYPES::Time, @@ -74,9 +68,6 @@ pub struct DATaskState< /// The current vote collection task, if there is one. pub vote_collector: RwLock, DACertificate>>, - /// Global events stream to publish events - pub event_stream: ChannelStream>, - /// This Nodes public key pub public_key: TYPES::SignatureKey, @@ -92,9 +83,10 @@ impl, A: ConsensusApi + { /// main task event handler #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "DA Main Task", level = "error")] - pub async fn handle_event( + pub async fn handle( &mut self, event: HotShotEvent, + event_stream: Sender>, ) -> Option { match event { HotShotEvent::DAProposalRecv(proposal, sender) => { @@ -177,9 +169,7 @@ impl, A: ConsensusApi + // self.cur_view = view; debug!("Sending vote to the DA leader {:?}", vote.get_view_number()); - self.event_stream - .publish(HotShotEvent::DAVoteSend(vote)) - .await; + event_stream.broadcast(HotShotEvent::DAVoteSend(vote)).await; let mut consensus = self.consensus.write().await; // Ensure this view is in the view map for garbage collection, but do not overwrite if @@ -204,34 +194,33 @@ impl, A: ConsensusApi + } let mut collector = self.vote_collector.write().await; - let maybe_task = collector.take(); - - if maybe_task.is_none() - || vote.get_view_number() > maybe_task.as_ref().unwrap().view + if collector.is_none() || vote.get_view_number() > collector.as_ref().unwrap().view { debug!("Starting vote handle for view {:?}", vote.get_view_number()); let info = AccumulatorInfo { public_key: self.public_key.clone(), membership: self.da_membership.clone(), view: vote.get_view_number(), - event_stream: self.event_stream.clone(), id: self.id, - registry: self.registry.clone(), }; *collector = create_vote_accumulator::< TYPES, DAVote, DACertificate, - >(&info, vote.clone(), event) + >(&info, vote.clone(), event, &event_stream) .await; } else { - let result = maybe_task.unwrap().handle_event(event.clone()).await; + let result = collector + .as_mut() + .unwrap() + .handle_event(event.clone(), &event_stream) + .await; - if result.0 == Some(HotShotTaskCompleted::ShutDown) { + if result == Some(HotShotTaskCompleted) { + *collector = None; // The protocol has finished return None; } - *collector = Some(result.1); } } HotShotEvent::ViewChange(view) => { @@ -311,8 +300,8 @@ impl, A: ConsensusApi + _pd: PhantomData, }; - self.event_stream - .publish(HotShotEvent::DAProposalSend( + event_stream + .broadcast(HotShotEvent::DAProposalSend( message.clone(), self.public_key.clone(), )) @@ -327,7 +316,7 @@ impl, A: ConsensusApi + HotShotEvent::Shutdown => { error!("Shutting down because of shutdown signal!"); - return Some(HotShotTaskCompleted::ShutDown); + return Some(HotShotTaskCompleted); } _ => { error!("unexpected event {:?}", event); @@ -351,15 +340,34 @@ impl, A: ConsensusApi + } /// task state implementation for DA Task -impl, A: ConsensusApi + 'static> TS +impl, A: ConsensusApi + 'static> TaskState for DATaskState { -} + type Event = HotShotEvent; -/// Type alias for DA Task Types -pub type DATaskTypes = HSTWithEvent< - ConsensusTaskError, - HotShotEvent, - ChannelStream>, - DATaskState, ->; + type Result = HotShotTaskCompleted; + + fn filter(event: &HotShotEvent) -> bool { + matches!( + event, + HotShotEvent::DAProposalRecv(_, _) + | HotShotEvent::DAVoteRecv(_) + | HotShotEvent::Shutdown + | HotShotEvent::TransactionsSequenced(_, _, _) + | HotShotEvent::Timeout(_) + | HotShotEvent::ViewChange(_) + ) + } + + async fn handle_event( + event: Self::Event, + task: &mut Task, + ) -> Option { + let sender = task.clone_sender(); + task.state_mut().handle(event, sender).await + } + + fn should_shutdown(event: &Self::Event) -> bool { + matches!(event, HotShotEvent::Shutdown) + } +} diff --git a/crates/task-impls/src/events.rs b/crates/task-impls/src/events.rs index 92e49d02ea..902f886fd5 100644 --- a/crates/task-impls/src/events.rs +++ b/crates/task-impls/src/events.rs @@ -15,6 +15,9 @@ use hotshot_types::{ traits::{node_implementation::NodeType, BlockPayload}, }; +#[derive(Eq, Hash, PartialEq, Debug, Clone)] +pub struct HotShotTaskCompleted; + /// All of the possible events that can be passed between Sequecning `HotShot` tasks #[derive(Eq, Hash, PartialEq, Debug, Clone)] pub enum HotShotEvent { diff --git a/crates/task-impls/src/harness.rs b/crates/task-impls/src/harness.rs index 2ee224b7cd..851ae968f5 100644 --- a/crates/task-impls/src/harness.rs +++ b/crates/task-impls/src/harness.rs @@ -1,36 +1,34 @@ -use crate::events::HotShotEvent; -use async_compatibility_layer::art::async_spawn; +use crate::events::{HotShotEvent, HotShotTaskCompleted}; +use async_broadcast::broadcast; -use futures::FutureExt; -use hotshot_task::{ - event_stream::{ChannelStream, EventStream}, - task::{FilterEvent, HandleEvent, HotShotTaskCompleted, HotShotTaskTypes, TS}, - task_impls::{HSTWithEvent, TaskBuilder}, - task_launcher::TaskRunner, -}; use hotshot_types::traits::node_implementation::NodeType; -use snafu::Snafu; use std::{collections::HashMap, future::Future, sync::Arc}; +use task::task::{Task, TaskRegistry, TaskState}; /// The state for the test harness task. Keeps track of which events and how many we expect to get pub struct TestHarnessState { /// The expected events we get from the test. Maps an event to the number of times we expect to see it expected_output: HashMap, usize>, + /// + allow_extra_output: bool, } -impl TS for TestHarnessState {} +impl TaskState for TestHarnessState { + type Event = HotShotEvent; + type Result = HotShotTaskCompleted; -/// Error emitted if the test harness task fails -#[derive(Snafu, Debug)] -pub struct TestHarnessTaskError {} + async fn handle_event( + event: Self::Event, + task: &mut task::task::Task, + ) -> Option { + let extra = task.state_mut().allow_extra_output; + handle_event(event, task, extra) + } -/// Type alias for the Test Harness Task -pub type TestHarnessTaskTypes = HSTWithEvent< - TestHarnessTaskError, - HotShotEvent, - ChannelStream>, - TestHarnessState, ->; + fn should_shutdown(event: &Self::Event) -> bool { + matches!(event, HotShotEvent::Shutdown) + } +} /// Runs a test by building the task using `build_fn` and then passing it the `input` events /// and testing the make sure all of the `expected_output` events are seen @@ -43,46 +41,40 @@ pub type TestHarnessTaskTypes = HSTWithEvent< /// # Panics /// Panics if any state the test expects is not set. Panicing causes a test failure #[allow(clippy::implicit_hasher)] -pub async fn run_harness( +pub async fn run_harness>>( input: Vec>, expected_output: HashMap, usize>, - event_stream: Option>>, - build_fn: impl FnOnce(TaskRunner, ChannelStream>) -> Fut, + state: S, allow_extra_output: bool, ) where TYPES: NodeType, - Fut: Future, + S: Send + 'static, + Fut: Future>, { - let task_runner = TaskRunner::new(); - let registry = task_runner.registry.clone(); - let event_stream = event_stream.unwrap_or_default(); - let state = TestHarnessState { expected_output }; - let handler = HandleEvent(Arc::new(move |event, state| { - async move { handle_event(event, state, allow_extra_output) }.boxed() - })); - let filter = FilterEvent::default(); - let builder = TaskBuilder::>::new("test_harness".to_string()) - .register_event_stream(event_stream.clone(), filter) - .await - .register_registry(&mut registry.clone()) - .await - .register_state(state) - .register_event_handler(handler); - - let id = builder.get_task_id().unwrap(); - - let task = TestHarnessTaskTypes::build(builder).launch(); - - let task_runner = task_runner.add_task(id, "test_harness".to_string(), task); - let task_runner = build_fn(task_runner, event_stream.clone()).await; - - let runner = async_spawn(async move { task_runner.launch().await }); + let registry = Arc::new(TaskRegistry::default()); + // set up two broadcast channels so the test sends to the task and the task back to the test + let (to_task, from_test) = broadcast(1024); + let (to_test, from_task) = broadcast(1024); + let test_state = TestHarnessState { + expected_output, + allow_extra_output, + }; + + let test_task = Task::new( + to_test.clone(), + from_task.clone(), + registry.clone(), + test_state, + ); + let task = Task::new(to_task.clone(), from_test.clone(), registry.clone(), state); + registry.run_task(test_task).await; + registry.run_task(task).await; for event in input { - let () = event_stream.publish(event).await; + let _ = to_task.broadcast(event).await.unwrap(); } - let _ = runner.await; + let _ = Arc::into_inner(registry).unwrap().join_all().await; } /// Handles an event for the Test Harness Task. If the event is expected, remove it from @@ -97,12 +89,10 @@ pub async fn run_harness( #[allow(clippy::needless_pass_by_value)] pub fn handle_event( event: HotShotEvent, - mut state: TestHarnessState, + task: &mut Task>, allow_extra_output: bool, -) -> ( - std::option::Option, - TestHarnessState, -) { +) -> Option { + let state = task.state_mut(); // Check the output in either case: // * We allow outputs only in our expected output set. // * We haven't received all expected outputs yet. @@ -121,8 +111,8 @@ pub fn handle_event( } if state.expected_output.is_empty() { - return (Some(HotShotTaskCompleted::ShutDown), state); + return Some(HotShotTaskCompleted); } - (None, state) + None } diff --git a/crates/task-impls/src/network.rs b/crates/task-impls/src/network.rs index 05587207a9..af84544589 100644 --- a/crates/task-impls/src/network.rs +++ b/crates/task-impls/src/network.rs @@ -1,16 +1,11 @@ -use crate::events::HotShotEvent; +use crate::events::{HotShotEvent, HotShotTaskCompleted}; +use async_broadcast::Sender; use either::Either::{self, Left, Right}; use hotshot_constants::PROGRAM_PROTOCOL_VERSION; -use hotshot_task::{ - event_stream::{ChannelStream, EventStream}, - task::{FilterEvent, HotShotTaskCompleted, TS}, - task_impls::{HSTWithEvent, HSTWithMessage}, - GeneratedStream, Merge, -}; + use hotshot_types::{ message::{ - CommitteeConsensusMessage, GeneralConsensusMessage, Message, MessageKind, Messages, - SequencingMessage, + CommitteeConsensusMessage, GeneralConsensusMessage, Message, MessageKind, SequencingMessage, }, traits::{ election::Membership, @@ -19,8 +14,7 @@ use hotshot_types::{ }, vote::{HasViewNumber, Vote}, }; -use snafu::Snafu; -use std::sync::Arc; +use task::task::{Task, TaskState}; use tracing::error; use tracing::instrument; @@ -39,11 +33,25 @@ pub enum NetworkTaskKind { /// the network message task state pub struct NetworkMessageTaskState { - /// event stream (used for publishing) - pub event_stream: ChannelStream>, + event_stream: Sender>, } -impl TS for NetworkMessageTaskState {} +impl TaskState for NetworkMessageTaskState { + type Event = Vec>; + type Result = (); + + async fn handle_event(event: Self::Event, task: &mut Task) -> Option<()> + where + Self: Sized, + { + task.state_mut().handle_messages(event).await; + None + } + + fn should_shutdown(_event: &Self::Event) -> bool { + false + } +} impl NetworkMessageTaskState { /// Handle the message. @@ -118,7 +126,7 @@ impl NetworkMessageTaskState { // TODO (Keyao benchmarking) Update these event variants (similar to the // `TransactionsRecv` event) so we can send one event for a vector of messages. // - self.event_stream.publish(event).await; + self.event_stream.broadcast(event).await; } MessageKind::Data(message) => match message { hotshot_types::message::DataMessage::SubmitTransaction(transaction, _) => { @@ -129,7 +137,7 @@ impl NetworkMessageTaskState { } if !transactions.is_empty() { self.event_stream - .publish(HotShotEvent::TransactionsRecv(transactions)) + .broadcast(HotShotEvent::TransactionsRecv(transactions)) .await; } } @@ -139,16 +147,40 @@ impl NetworkMessageTaskState { pub struct NetworkEventTaskState> { /// comm channel pub channel: COMMCHANNEL, - /// event stream - pub event_stream: ChannelStream>, /// view number pub view: TYPES::Time, + /// membership for the channel + pub membership: TYPES::Membership, // TODO ED Need to add exchange so we can get the recipient key and our own key? } -impl> TS +impl> TaskState for NetworkEventTaskState { + type Event = HotShotEvent; + + type Result = HotShotTaskCompleted; + + async fn handle_event( + event: Self::Event, + task: &mut Task, + ) -> Option { + let membership = task.state_mut().membership.clone(); + task.state_mut().handle_event(event, &membership).await + } + + fn should_shutdown(event: &Self::Event) -> bool { + matches!(event, HotShotEvent::Shutdown) + } + + fn filter(_event: &Self::Event) -> bool { + // default doesn't filter + false + } + + fn shutdown(&mut self) -> impl std::future::Future + Send { + async {} + } } impl> @@ -282,7 +314,7 @@ impl> } HotShotEvent::Shutdown => { error!("Networking task shutting down"); - return Some(HotShotTaskCompleted::ShutDown); + return Some(HotShotTaskCompleted); } event => { error!("Receieved unexpected message in network task {:?}", event); @@ -310,84 +342,4 @@ impl> None } - - /// network filter - pub fn filter(task_kind: NetworkTaskKind) -> FilterEvent> { - match task_kind { - NetworkTaskKind::Quorum => FilterEvent(Arc::new(Self::quorum_filter)), - NetworkTaskKind::Committee => FilterEvent(Arc::new(Self::committee_filter)), - NetworkTaskKind::ViewSync => FilterEvent(Arc::new(Self::view_sync_filter)), - NetworkTaskKind::VID => FilterEvent(Arc::new(Self::vid_filter)), - } - } - - /// quorum filter - fn quorum_filter(event: &HotShotEvent) -> bool { - matches!( - event, - HotShotEvent::QuorumProposalSend(_, _) - | HotShotEvent::QuorumVoteSend(_) - | HotShotEvent::Shutdown - | HotShotEvent::DACSend(_, _) - | HotShotEvent::ViewChange(_) - | HotShotEvent::TimeoutVoteSend(_) - ) - } - - /// committee filter - fn committee_filter(event: &HotShotEvent) -> bool { - matches!( - event, - HotShotEvent::DAProposalSend(_, _) - | HotShotEvent::DAVoteSend(_) - | HotShotEvent::Shutdown - | HotShotEvent::ViewChange(_) - ) - } - - /// vid filter - fn vid_filter(event: &HotShotEvent) -> bool { - matches!( - event, - HotShotEvent::Shutdown - | HotShotEvent::VidDisperseSend(_, _) - | HotShotEvent::ViewChange(_) - ) - } - - /// view sync filter - fn view_sync_filter(event: &HotShotEvent) -> bool { - matches!( - event, - HotShotEvent::ViewSyncPreCommitCertificate2Send(_, _) - | HotShotEvent::ViewSyncCommitCertificate2Send(_, _) - | HotShotEvent::ViewSyncFinalizeCertificate2Send(_, _) - | HotShotEvent::ViewSyncPreCommitVoteSend(_) - | HotShotEvent::ViewSyncCommitVoteSend(_) - | HotShotEvent::ViewSyncFinalizeVoteSend(_) - | HotShotEvent::Shutdown - | HotShotEvent::ViewChange(_) - ) - } } - -/// network error (no errors right now, only stub) -#[derive(Snafu, Debug)] -pub struct NetworkTaskError {} - -/// networking message task types -pub type NetworkMessageTaskTypes = HSTWithMessage< - NetworkTaskError, - Either, Messages>, - // A combination of broadcast and direct streams. - Merge>, GeneratedStream>>, - NetworkMessageTaskState, ->; - -/// network event task types -pub type NetworkEventTaskTypes = HSTWithEvent< - NetworkTaskError, - HotShotEvent, - ChannelStream>, - NetworkEventTaskState, ->; diff --git a/crates/task-impls/src/transactions.rs b/crates/task-impls/src/transactions.rs index 3fe87ab312..207b026af8 100644 --- a/crates/task-impls/src/transactions.rs +++ b/crates/task-impls/src/transactions.rs @@ -1,4 +1,5 @@ -use crate::events::HotShotEvent; +use crate::events::{HotShotEvent, HotShotTaskCompleted}; +use async_broadcast::Sender; use async_compatibility_layer::{ art::async_timeout, async_primitives::subscribable_rwlock::{ReadView, SubscribableRwLock}, @@ -6,12 +7,7 @@ use async_compatibility_layer::{ use async_lock::RwLock; use bincode::config::Options; use commit::{Commitment, Committable}; -use hotshot_task::{ - event_stream::{ChannelStream, EventStream}, - global_registry::GlobalRegistry, - task::{HotShotTaskCompleted, TS}, - task_impls::HSTWithEvent, -}; + use hotshot_types::{ consensus::Consensus, data::Leaf, @@ -32,6 +28,7 @@ use std::{ sync::Arc, time::Instant, }; +use task::task::{Task, TaskState}; use tracing::{debug, error, instrument, warn}; /// A type alias for `HashMap, T>` @@ -49,8 +46,6 @@ pub struct TransactionTaskState< > { /// The state's api pub api: A, - /// Global registry task for the state - pub registry: GlobalRegistry, /// View number this view is executing in. pub cur_view: TYPES::Time, @@ -70,9 +65,6 @@ pub struct TransactionTaskState< /// Membership for teh quorum pub membership: Arc, - /// Global events stream to publish events - pub event_stream: ChannelStream>, - /// This Nodes Public Key pub public_key: TYPES::SignatureKey, /// Our Private Key @@ -87,9 +79,10 @@ impl, A: ConsensusApi + /// main task event handler #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Transaction Handling Task", level = "error")] - pub async fn handle_event( + pub async fn handle( &mut self, event: HotShotEvent, + event_stream: Sender>, ) -> Option { match event { HotShotEvent::TransactionsRecv(transactions) => { @@ -250,8 +243,8 @@ impl, A: ConsensusApi + // send the sequenced transactions to VID and DA tasks let block_view = if make_block { view } else { view + 1 }; - self.event_stream - .publish(HotShotEvent::TransactionsSequenced( + event_stream + .broadcast(HotShotEvent::TransactionsSequenced( encoded_transactions, metadata, block_view, @@ -261,7 +254,7 @@ impl, A: ConsensusApi + return None; } HotShotEvent::Shutdown => { - return Some(HotShotTaskCompleted::ShutDown); + return Some(HotShotTaskCompleted); } _ => {} } @@ -335,9 +328,17 @@ impl, A: ConsensusApi + // .collect(); Some(txns) } +} + +/// task state implementation for Transactions Task +impl, A: ConsensusApi + 'static> TaskState + for TransactionTaskState +{ + type Event = HotShotEvent; + + type Result = HotShotTaskCompleted; - /// Event filter for the transaction task - pub fn filter(event: &HotShotEvent) -> bool { + fn filter(event: &HotShotEvent) -> bool { matches!( event, HotShotEvent::TransactionsRecv(_) @@ -346,18 +347,16 @@ impl, A: ConsensusApi + | HotShotEvent::ViewChange(_) ) } -} -/// task state implementation for Transactions Task -impl, A: ConsensusApi + 'static> TS - for TransactionTaskState -{ -} + async fn handle_event( + event: Self::Event, + task: &mut Task, + ) -> Option { + let sender = task.clone_sender(); + task.state_mut().handle(event, sender).await + } -/// Type alias for DA Task Types -pub type TransactionsTaskTypes = HSTWithEvent< - ConsensusTaskError, - HotShotEvent, - ChannelStream>, - TransactionTaskState, ->; + fn should_shutdown(event: &Self::Event) -> bool { + matches!(event, HotShotEvent::Shutdown) + } +} diff --git a/crates/task-impls/src/vid.rs b/crates/task-impls/src/vid.rs index a4b9338f87..d3056772da 100644 --- a/crates/task-impls/src/vid.rs +++ b/crates/task-impls/src/vid.rs @@ -1,13 +1,9 @@ -use crate::events::HotShotEvent; +use crate::events::{HotShotEvent, HotShotTaskCompleted}; +use async_broadcast::Sender; use async_lock::RwLock; #[cfg(async_executor_impl = "async-std")] use async_std::task::spawn_blocking; -use hotshot_task::{ - event_stream::ChannelStream, - global_registry::GlobalRegistry, - task::{HotShotTaskCompleted, TS}, - task_impls::HSTWithEvent, -}; + use hotshot_types::traits::network::CommunicationChannel; use hotshot_types::{ consensus::Consensus, @@ -24,10 +20,10 @@ use hotshot_types::{ data::{test_srs, VidScheme, VidSchemeTrait}, traits::network::ConsensusIntentEvent, }; +use task::task::TaskState; #[cfg(async_executor_impl = "tokio")] use tokio::task::spawn_blocking; -use hotshot_task::event_stream::EventStream; use snafu::Snafu; use std::marker::PhantomData; use std::sync::Arc; @@ -45,8 +41,6 @@ pub struct VIDTaskState< > { /// The state's api pub api: A, - /// Global registry task for the state - pub registry: GlobalRegistry, /// View number this view is executing in. pub cur_view: TYPES::Time, @@ -63,10 +57,6 @@ pub struct VIDTaskState< pub private_key: ::PrivateKey, /// The view and ID of the current vote collection task, if there is one. pub vote_collector: Option<(TYPES::Time, usize, usize)>, - - /// Global events stream to publish events - pub event_stream: ChannelStream>, - /// This state's ID pub id: u64, } @@ -76,9 +66,10 @@ impl, A: ConsensusApi + { /// main task event handler #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "VID Main Task", level = "error")] - pub async fn handle_event( + pub async fn handle( &mut self, event: HotShotEvent, + event_stream: Sender>, ) -> Option { match event { HotShotEvent::TransactionsSequenced(encoded_transactions, metadata, view_number) => { @@ -104,8 +95,8 @@ impl, A: ConsensusApi + // Unwrap here will just propogate any panic from the spawned task, it's not a new place we can panic. let vid_disperse = vid_disperse.unwrap(); // send the commitment and metadata to consensus for block building - self.event_stream - .publish(HotShotEvent::SendPayloadCommitmentAndMetadata( + event_stream + .broadcast(HotShotEvent::SendPayloadCommitmentAndMetadata( vid_disperse.commit, metadata, view_number, @@ -113,8 +104,8 @@ impl, A: ConsensusApi + .await; // send the block to the VID dispersal function - self.event_stream - .publish(HotShotEvent::BlockReady( + event_stream + .broadcast(HotShotEvent::BlockReady( VidDisperse::from_membership(view_number, vid_disperse, &self.membership), view_number, )) @@ -130,8 +121,8 @@ impl, A: ConsensusApi + return None; }; debug!("publishing VID disperse for view {}", *view_number); - self.event_stream - .publish(HotShotEvent::VidDisperseSend( + event_stream + .broadcast(HotShotEvent::VidDisperseSend( Proposal { signature, data: vid_disperse, @@ -169,7 +160,7 @@ impl, A: ConsensusApi + } HotShotEvent::Shutdown => { - return Some(HotShotTaskCompleted::ShutDown); + return Some(HotShotTaskCompleted); } _ => { error!("unexpected event {:?}", event); @@ -177,9 +168,26 @@ impl, A: ConsensusApi + } None } +} + +/// task state implementation for VID Task +impl, A: ConsensusApi + 'static> TaskState + for VIDTaskState +{ + type Event = HotShotEvent; + + type Result = HotShotTaskCompleted; - /// Filter the VID event. - pub fn filter(event: &HotShotEvent) -> bool { + async fn handle_event( + event: Self::Event, + task: &mut task::task::Task, + ) -> Option { + // TODO: Don't clone the sender + let sender = task.clone_sender(); + task.state_mut().handle(event, sender).await; + None + } + fn filter(event: &Self::Event) -> bool { matches!( event, HotShotEvent::Shutdown @@ -188,18 +196,7 @@ impl, A: ConsensusApi + | HotShotEvent::ViewChange(_) ) } + fn should_shutdown(event: &Self::Event) -> bool { + matches!(event, HotShotEvent::Shutdown) + } } - -/// task state implementation for VID Task -impl, A: ConsensusApi + 'static> TS - for VIDTaskState -{ -} - -/// Type alias for VID Task Types -pub type VIDTaskTypes = HSTWithEvent< - ConsensusTaskError, - HotShotEvent, - ChannelStream>, - VIDTaskState, ->; diff --git a/crates/task-impls/src/view_sync.rs b/crates/task-impls/src/view_sync.rs index 1c9e607fb0..6d8cb1e3a4 100644 --- a/crates/task-impls/src/view_sync.rs +++ b/crates/task-impls/src/view_sync.rs @@ -1,16 +1,12 @@ #![allow(clippy::module_name_repetitions)] use crate::{ - events::HotShotEvent, + events::{HotShotEvent, HotShotTaskCompleted}, helpers::cancel_task, vote::{create_vote_accumulator, AccumulatorInfo, HandleVoteEvent, VoteCollectionTaskState}, }; +use async_broadcast::Sender; use async_compatibility_layer::art::{async_sleep, async_spawn}; use async_lock::RwLock; -use hotshot_task::{ - event_stream::{ChannelStream, EventStream}, - task::{HotShotTaskCompleted, TS}, - task_impls::HSTWithEvent, -}; use hotshot_types::{ simple_certificate::{ ViewSyncCommitCertificate2, ViewSyncFinalizeCertificate2, ViewSyncPreCommitCertificate2, @@ -29,7 +25,6 @@ use hotshot_types::{ #[cfg(async_executor_impl = "async-std")] use async_std::task::JoinHandle; -use hotshot_task::global_registry::GlobalRegistry; use hotshot_types::{ message::GeneralConsensusMessage, traits::{ @@ -42,6 +37,7 @@ use hotshot_types::{ }; use snafu::Snafu; use std::{collections::HashMap, fmt::Debug, sync::Arc, time::Duration}; +use task::task::TaskState; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; use tracing::{debug, error, info, instrument, warn}; @@ -72,10 +68,6 @@ pub struct ViewSyncTaskState< I: NodeImplementation, A: ConsensusApi + 'static + std::clone::Clone, > { - /// Registry to register sub tasks - pub registry: GlobalRegistry, - /// Event stream to publish events to - pub event_stream: ChannelStream>, /// View HotShot is currently in pub current_view: TYPES::Time, /// View HotShot wishes to be in @@ -120,17 +112,22 @@ impl< TYPES: NodeType, I: NodeImplementation, A: ConsensusApi + 'static + std::clone::Clone, - > TS for ViewSyncTaskState + > TaskState for ViewSyncTaskState { -} + type Event = HotShotEvent; + + type Result = (); + + async fn handle_event(event: Self::Event, task: &mut task::task::Task) -> Option<()> { + let sender = task.clone_sender(); + task.state_mut().handle(event, sender).await; + None + } -/// Types for the main view sync task -pub type ViewSyncTaskStateTypes = HSTWithEvent< - ViewSyncTaskError, - HotShotEvent, - ChannelStream>, - ViewSyncTaskState, ->; + fn should_shutdown(event: &Self::Event) -> bool { + matches!(event, HotShotEvent::Shutdown) + } +} /// State of a view sync replica task pub struct ViewSyncReplicaTaskState< @@ -165,22 +162,40 @@ pub struct ViewSyncReplicaTaskState< pub private_key: ::PrivateKey, /// HotShot consensus API pub api: A, - /// Event stream to publish events to - pub event_stream: ChannelStream>, } -impl, A: ConsensusApi + 'static> TS +impl, A: ConsensusApi + 'static> TaskState for ViewSyncReplicaTaskState { -} + type Event = HotShotEvent; + + type Result = (); + + async fn handle_event(event: Self::Event, task: &mut task::task::Task) -> Option<()> { + let sender = task.clone_sender(); + task.state_mut().handle(event, sender).await; + None + } + fn filter(event: &Self::Event) -> bool { + matches!( + event, + HotShotEvent::ViewSyncPreCommitCertificate2Recv(_) + | HotShotEvent::ViewSyncCommitCertificate2Recv(_) + | HotShotEvent::ViewSyncFinalizeCertificate2Recv(_) + | HotShotEvent::ViewSyncPreCommitVoteRecv(_) + | HotShotEvent::ViewSyncCommitVoteRecv(_) + | HotShotEvent::ViewSyncFinalizeVoteRecv(_) + | HotShotEvent::Shutdown + | HotShotEvent::Timeout(_) + | HotShotEvent::ViewSyncTimeout(_, _, _) + | HotShotEvent::ViewChange(_) + ) + } -/// Types for view sync replica state -pub type ViewSyncReplicaTaskStateTypes = HSTWithEvent< - ViewSyncTaskError, - HotShotEvent, - ChannelStream>, - ViewSyncReplicaTaskState, ->; + fn should_shutdown(event: &Self::Event) -> bool { + matches!(event, HotShotEvent::Shutdown) + } +} impl< TYPES: NodeType, @@ -195,6 +210,7 @@ impl< &mut self, event: HotShotEvent, view: TYPES::Time, + sender: &Sender>, ) { // This certificate is old, we can throw it away // If next view = cert round, then that means we should already have a task running for it @@ -204,17 +220,17 @@ impl< return; } - if let Some(replica_task) = task_map.remove(&view) { + if let Some(replica_task) = task_map.get_mut(&view) { // Forward event then return debug!("Forwarding message"); - let result = replica_task.handle_event(event.clone()).await; + let result = replica_task.handle(event.clone(), sender.clone()).await; - if result.0 == Some(HotShotTaskCompleted::ShutDown) { + if result == Some(HotShotTaskCompleted) { // The protocol has finished + task_map.remove(&view); return; } - task_map.insert(view, result.1); return; } @@ -231,62 +247,65 @@ impl< public_key: self.public_key.clone(), private_key: self.private_key.clone(), api: self.api.clone(), - event_stream: self.event_stream.clone(), view_sync_timeout: self.view_sync_timeout, id: self.id, }; - let result = replica_state.handle_event(event.clone()).await; + let result = replica_state.handle(event.clone(), sender.clone()).await; - if result.0 == Some(HotShotTaskCompleted::ShutDown) { + if result == Some(HotShotTaskCompleted) { // The protocol has finished return; } - replica_state = result.1; - task_map.insert(view, replica_state); } #[instrument(skip_all, fields(id = self.id, view = *self.current_view), name = "View Sync Main Task", level = "error")] #[allow(clippy::type_complexity)] /// Handles incoming events for the main view sync task - pub async fn handle_event(&mut self, event: HotShotEvent) { + pub async fn handle( + &mut self, + event: HotShotEvent, + event_stream: Sender>, + ) { match &event { HotShotEvent::ViewSyncPreCommitCertificate2Recv(certificate) => { debug!("Received view sync cert for phase {:?}", certificate); let view = certificate.get_view_number(); - self.send_to_or_create_replica(event, view).await; + self.send_to_or_create_replica(event, view, &event_stream) + .await; } HotShotEvent::ViewSyncCommitCertificate2Recv(certificate) => { debug!("Received view sync cert for phase {:?}", certificate); let view = certificate.get_view_number(); - self.send_to_or_create_replica(event, view).await; + self.send_to_or_create_replica(event, view, &event_stream) + .await; } HotShotEvent::ViewSyncFinalizeCertificate2Recv(certificate) => { debug!("Received view sync cert for phase {:?}", certificate); let view = certificate.get_view_number(); - self.send_to_or_create_replica(event, view).await; + self.send_to_or_create_replica(event, view, &event_stream) + .await; } HotShotEvent::ViewSyncTimeout(view, _, _) => { debug!("view sync timeout in main task {:?}", view); let view = *view; - self.send_to_or_create_replica(event, view).await; + self.send_to_or_create_replica(event, view, &event_stream) + .await; } HotShotEvent::ViewSyncPreCommitVoteRecv(ref vote) => { let mut map = self.pre_commit_relay_map.write().await; let vote_view = vote.get_view_number(); - if let Some(relay_task) = map.remove(&vote_view) { + if let Some(relay_task) = map.get_mut(&vote_view) { debug!("Forwarding message"); - let result = relay_task.handle_event(event.clone()).await; + let result = relay_task.handle_event(event.clone(), &event_stream).await; - if result.0 == Some(HotShotTaskCompleted::ShutDown) { + if result == Some(HotShotTaskCompleted) { // The protocol has finished - return; + map.remove(&vote_view); } - - map.insert(vote_view, result.1); return; } @@ -305,11 +324,10 @@ impl< public_key: self.public_key.clone(), membership: self.membership.clone(), view: vote_view, - event_stream: self.event_stream.clone(), id: self.id, - registry: self.registry.clone(), }; - let vote_collector = create_vote_accumulator(&info, vote.clone(), event).await; + let vote_collector = + create_vote_accumulator(&info, vote.clone(), event, &event_stream).await; if let Some(vote_task) = vote_collector { map.insert(vote_view, vote_task); } @@ -318,16 +336,14 @@ impl< HotShotEvent::ViewSyncCommitVoteRecv(ref vote) => { let mut map = self.commit_relay_map.write().await; let vote_view = vote.get_view_number(); - if let Some(relay_task) = map.remove(&vote_view) { + if let Some(relay_task) = map.get_mut(&vote_view) { debug!("Forwarding message"); - let result = relay_task.handle_event(event.clone()).await; + let result = relay_task.handle_event(event.clone(), &event_stream).await; - if result.0 == Some(HotShotTaskCompleted::ShutDown) { + if result == Some(HotShotTaskCompleted) { // The protocol has finished - return; + map.remove(&vote_view); } - - map.insert(vote_view, result.1); return; } @@ -346,11 +362,10 @@ impl< public_key: self.public_key.clone(), membership: self.membership.clone(), view: vote_view, - event_stream: self.event_stream.clone(), id: self.id, - registry: self.registry.clone(), }; - let vote_collector = create_vote_accumulator(&info, vote.clone(), event).await; + let vote_collector = + create_vote_accumulator(&info, vote.clone(), event, &event_stream).await; if let Some(vote_task) = vote_collector { map.insert(vote_view, vote_task); } @@ -359,16 +374,14 @@ impl< HotShotEvent::ViewSyncFinalizeVoteRecv(ref vote) => { let mut map = self.finalize_relay_map.write().await; let vote_view = vote.get_view_number(); - if let Some(relay_task) = map.remove(&vote_view) { + if let Some(relay_task) = map.get_mut(&vote_view) { debug!("Forwarding message"); - let result = relay_task.handle_event(event.clone()).await; + let result = relay_task.handle_event(event.clone(), &event_stream).await; - if result.0 == Some(HotShotTaskCompleted::ShutDown) { + if result == Some(HotShotTaskCompleted) { // The protocol has finished - return; + map.remove(&vote_view); } - - map.insert(vote_view, result.1); return; } @@ -387,11 +400,10 @@ impl< public_key: self.public_key.clone(), membership: self.membership.clone(), view: vote_view, - event_stream: self.event_stream.clone(), id: self.id, - registry: self.registry.clone(), }; - let vote_collector = create_vote_accumulator(&info, vote.clone(), event).await; + let vote_collector = + create_vote_accumulator(&info, vote.clone(), event, &event_stream).await; if let Some(vote_task) = vote_collector { map.insert(vote_view, vote_task); } @@ -510,13 +522,14 @@ impl< self.send_to_or_create_replica( HotShotEvent::ViewSyncTrigger(view_number + 1), view_number + 1, + &event_stream, ) .await; } else { // If this is the first timeout we've seen advance to the next view self.current_view = view_number; - self.event_stream - .publish(HotShotEvent::ViewChange(TYPES::Time::new( + event_stream + .broadcast(HotShotEvent::ViewChange(TYPES::Time::new( *self.current_view, ))) .await; @@ -550,13 +563,11 @@ impl, A: ConsensusApi + { #[instrument(skip_all, fields(id = self.id, view = *self.current_view), name = "View Sync Replica Task", level = "error")] /// Handle incoming events for the view sync replica task - pub async fn handle_event( - mut self, + pub async fn handle( + &mut self, event: HotShotEvent, - ) -> ( - std::option::Option, - ViewSyncReplicaTaskState, - ) { + event_stream: Sender>, + ) -> Option { match event { HotShotEvent::ViewSyncPreCommitCertificate2Recv(certificate) => { let last_seen_certificate = ViewSyncPhase::PreCommit; @@ -565,20 +576,20 @@ impl, A: ConsensusApi + if certificate.get_view_number() < self.next_view { warn!("We're already in a higher round"); - return (None, self); + return None; } // If certificate is not valid, return current state if !certificate.is_valid_cert(self.membership.as_ref()) { error!("Not valid view sync cert! {:?}", certificate.get_data()); - return (None, self); + return None; } // If certificate is for a higher round shutdown this task // since another task should have been started for the higher round if certificate.get_view_number() > self.next_view { - return (Some(HotShotTaskCompleted::ShutDown), self); + return Some(HotShotTaskCompleted); } if certificate.get_data().relay > self.relay { @@ -595,13 +606,13 @@ impl, A: ConsensusApi + &self.private_key, ) else { error!("Failed to sign ViewSyncCommitData!"); - return (None, self); + return None; }; let message = GeneralConsensusMessage::::ViewSyncCommitVote(vote); if let GeneralConsensusMessage::ViewSyncCommitVote(vote) = message { - self.event_stream - .publish(HotShotEvent::ViewSyncCommitVoteSend(vote)) + event_stream + .broadcast(HotShotEvent::ViewSyncCommitVoteSend(vote)) .await; } @@ -610,15 +621,18 @@ impl, A: ConsensusApi + } self.timeout_task = Some(async_spawn({ - let stream = self.event_stream.clone(); + let stream = event_stream.clone(); let phase = last_seen_certificate; + let relay = self.relay; + let next_view = self.next_view; + let timeout = self.view_sync_timeout; async move { - async_sleep(self.view_sync_timeout).await; - info!("Vote sending timed out in ViewSyncPreCommitCertificateRecv, Relay = {}", self.relay); + async_sleep(timeout).await; + info!("Vote sending timed out in ViewSyncPreCommitCertificateRecv, Relay = {}", relay); stream - .publish(HotShotEvent::ViewSyncTimeout( - TYPES::Time::new(*self.next_view), - self.relay, + .broadcast(HotShotEvent::ViewSyncTimeout( + TYPES::Time::new(*next_view), + relay, phase, )) .await; @@ -633,20 +647,20 @@ impl, A: ConsensusApi + if certificate.get_view_number() < self.next_view { warn!("We're already in a higher round"); - return (None, self); + return None; } // If certificate is not valid, return current state if !certificate.is_valid_cert(self.membership.as_ref()) { error!("Not valid view sync cert! {:?}", certificate.get_data()); - return (None, self); + return None; } // If certificate is for a higher round shutdown this task // since another task should have been started for the higher round if certificate.get_view_number() > self.next_view { - return (Some(HotShotTaskCompleted::ShutDown), self); + return Some(HotShotTaskCompleted); } if certificate.get_data().relay > self.relay { @@ -663,13 +677,13 @@ impl, A: ConsensusApi + &self.private_key, ) else { error!("Failed to sign view sync finalized vote!"); - return (None, self); + return None; }; let message = GeneralConsensusMessage::::ViewSyncFinalizeVote(vote); if let GeneralConsensusMessage::ViewSyncFinalizeVote(vote) = message { - self.event_stream - .publish(HotShotEvent::ViewSyncFinalizeVoteSend(vote)) + event_stream + .broadcast(HotShotEvent::ViewSyncFinalizeVoteSend(vote)) .await; } @@ -678,30 +692,33 @@ impl, A: ConsensusApi + *self.next_view ); - self.event_stream - .publish(HotShotEvent::ViewChange(self.next_view - 1)) + event_stream + .broadcast(HotShotEvent::ViewChange(self.next_view - 1)) .await; - self.event_stream - .publish(HotShotEvent::ViewChange(self.next_view)) + event_stream + .broadcast(HotShotEvent::ViewChange(self.next_view)) .await; if let Some(timeout_task) = self.timeout_task.take() { cancel_task(timeout_task).await; } self.timeout_task = Some(async_spawn({ - let stream = self.event_stream.clone(); + let stream = event_stream.clone(); let phase = last_seen_certificate; + let relay = self.relay; + let next_view = self.next_view; + let timeout = self.view_sync_timeout; async move { - async_sleep(self.view_sync_timeout).await; + async_sleep(timeout).await; info!( "Vote sending timed out in ViewSyncCommitCertificateRecv, relay = {}", - self.relay + relay ); stream - .publish(HotShotEvent::ViewSyncTimeout( - TYPES::Time::new(*self.next_view), - self.relay, + .broadcast(HotShotEvent::ViewSyncTimeout( + TYPES::Time::new(*next_view), + relay, phase, )) .await; @@ -714,20 +731,20 @@ impl, A: ConsensusApi + if certificate.get_view_number() < self.next_view { warn!("We're already in a higher round"); - return (None, self); + return None; } // If certificate is not valid, return current state if !certificate.is_valid_cert(self.membership.as_ref()) { error!("Not valid view sync cert! {:?}", certificate.get_data()); - return (None, self); + return None; } // If certificate is for a higher round shutdown this task // since another task should have been started for the higher round if certificate.get_view_number() > self.next_view { - return (Some(HotShotTaskCompleted::ShutDown), self); + return Some(HotShotTaskCompleted); } // cancel poll for votes @@ -759,16 +776,16 @@ impl, A: ConsensusApi + cancel_task(timeout_task).await; } - self.event_stream - .publish(HotShotEvent::ViewChange(self.next_view)) + event_stream + .broadcast(HotShotEvent::ViewChange(self.next_view)) .await; - return (Some(HotShotTaskCompleted::ShutDown), self); + return Some(HotShotTaskCompleted); } HotShotEvent::ViewSyncTrigger(view_number) => { if self.next_view != TYPES::Time::new(*view_number) { error!("Unexpected view number to triger view sync"); - return (None, self); + return None; } let Ok(vote) = ViewSyncPreCommitVote::::create_signed_vote( @@ -781,32 +798,35 @@ impl, A: ConsensusApi + &self.private_key, ) else { error!("Failed to sign pre commit vote!"); - return (None, self); + return None; }; let message = GeneralConsensusMessage::::ViewSyncPreCommitVote(vote); if let GeneralConsensusMessage::ViewSyncPreCommitVote(vote) = message { - self.event_stream - .publish(HotShotEvent::ViewSyncPreCommitVoteSend(vote)) + event_stream + .broadcast(HotShotEvent::ViewSyncPreCommitVoteSend(vote)) .await; } self.timeout_task = Some(async_spawn({ - let stream = self.event_stream.clone(); + let stream = event_stream.clone(); + let relay = self.relay; + let next_view = self.next_view; + let timeout = self.view_sync_timeout; async move { - async_sleep(self.view_sync_timeout).await; + async_sleep(timeout).await; info!("Vote sending timed out in ViewSyncTrigger"); stream - .publish(HotShotEvent::ViewSyncTimeout( - TYPES::Time::new(*self.next_view), - self.relay, + .broadcast(HotShotEvent::ViewSyncTimeout( + TYPES::Time::new(*next_view), + relay, ViewSyncPhase::None, )) .await; } })); - return (None, self); + return None; } HotShotEvent::ViewSyncTimeout(round, relay, last_seen_certificate) => { @@ -832,14 +852,14 @@ impl, A: ConsensusApi + &self.private_key, ) else { error!("Failed to sign ViewSyncPreCommitData!"); - return (None, self); + return None; }; let message = GeneralConsensusMessage::::ViewSyncPreCommitVote(vote); if let GeneralConsensusMessage::ViewSyncPreCommitVote(vote) = message { - self.event_stream - .publish(HotShotEvent::ViewSyncPreCommitVoteSend(vote)) + event_stream + .broadcast(HotShotEvent::ViewSyncPreCommitVoteSend(vote)) .await; } } @@ -850,28 +870,31 @@ impl, A: ConsensusApi + } self.timeout_task = Some(async_spawn({ - let stream = self.event_stream.clone(); + let stream = event_stream.clone(); + let relay = self.relay; + let next_view = self.next_view; + let timeout = self.view_sync_timeout; async move { - async_sleep(self.view_sync_timeout).await; + async_sleep(timeout).await; info!( "Vote sending timed out in ViewSyncTimeout relay = {}", - self.relay + relay ); stream - .publish(HotShotEvent::ViewSyncTimeout( - TYPES::Time::new(*self.next_view), - self.relay, + .broadcast(HotShotEvent::ViewSyncTimeout( + TYPES::Time::new(*next_view), + relay, last_seen_certificate, )) .await; } })); - return (None, self); + return None; } } - _ => return (None, self), + _ => return None, } - (None, self) + None } } diff --git a/crates/task-impls/src/vote.rs b/crates/task-impls/src/vote.rs index bf20a7bad9..1682c3dd17 100644 --- a/crates/task-impls/src/vote.rs +++ b/crates/task-impls/src/vote.rs @@ -1,15 +1,11 @@ use std::{collections::HashMap, fmt::Debug, marker::PhantomData, sync::Arc}; -use crate::events::HotShotEvent; +use crate::events::{HotShotEvent, HotShotTaskCompleted}; +use async_broadcast::Sender; use async_trait::async_trait; use bitvec::prelude::*; use either::Either::{self, Left, Right}; -use hotshot_task::{ - event_stream::{ChannelStream, EventStream}, - global_registry::GlobalRegistry, - task::{HotShotTaskCompleted, TS}, - task_impls::HSTWithEvent, -}; + use hotshot_types::{ simple_certificate::{ DACertificate, QuorumCertificate, TimeoutCertificate, ViewSyncCommitCertificate2, @@ -23,6 +19,7 @@ use hotshot_types::{ vote::{Certificate, HasViewNumber, Vote, VoteAccumulator}, }; use snafu::Snafu; +use task::task::TaskState; use tracing::{debug, error}; #[derive(Snafu, Debug)] @@ -47,9 +44,6 @@ pub struct VoteCollectionTaskState< /// The view which we are collecting votes for pub view: TYPES::Time, - /// global event stream - pub event_stream: ChannelStream>, - /// Node id pub id: u64, } @@ -76,9 +70,13 @@ impl< { /// Take one vote and accumultate it. Returns either the cert or the updated state /// after the vote is accumulated - pub async fn accumulate_vote(mut self, vote: &VOTE) -> (Option, Self) { + pub async fn accumulate_vote( + &mut self, + vote: &VOTE, + event_stream: &&Sender>, + ) -> Option { if vote.get_leader(&self.membership) != self.public_key { - return (None, self); + return None; } if vote.get_view_number() != self.view { @@ -87,23 +85,20 @@ impl< *vote.get_view_number(), *self.view ); - return (None, self); + return None; } - let Some(accumulator) = self.accumulator else { - return (None, self); + let Some(ref mut accumulator) = self.accumulator else { + return None; }; match accumulator.accumulate(vote, &self.membership) { - Either::Left(acc) => { - self.accumulator = Some(acc); - (None, self) - } + Either::Left(()) => None, Either::Right(cert) => { debug!("Certificate Formed! {:?}", cert); - self.event_stream - .publish(VOTE::make_cert_event(cert, &self.public_key)) + event_stream + .broadcast(VOTE::make_cert_event(cert, &self.public_key)) .await; self.accumulator = None; - (Some(HotShotTaskCompleted::ShutDown), self) + Some(HotShotTaskCompleted) } } } @@ -121,17 +116,26 @@ impl< + std::marker::Send + std::marker::Sync + 'static, - > TS for VoteCollectionTaskState + > TaskState for VoteCollectionTaskState +where + VoteCollectionTaskState: HandleVoteEvent, { -} + type Event = HotShotEvent; -/// Types for a vote accumulator Task -pub type VoteTaskStateTypes = HSTWithEvent< - VoteTaskError, - HotShotEvent, - ChannelStream>, - VoteCollectionTaskState, ->; + type Result = HotShotTaskCompleted; + + async fn handle_event( + event: Self::Event, + task: &mut task::task::Task, + ) -> Option { + let sender = task.clone_sender(); + task.state_mut().handle_event(event, &sender).await + } + + fn should_shutdown(event: &Self::Event) -> bool { + matches!(event, HotShotEvent::Shutdown) + } +} /// Trait for types which will handle a vote event. #[async_trait] @@ -143,12 +147,10 @@ where { /// Handle a vote event async fn handle_event( - self, + &mut self, event: HotShotEvent, - ) -> ( - Option, - VoteCollectionTaskState, - ); + sender: &Sender>, + ) -> Option; /// Event filter to use for this event fn filter(event: &HotShotEvent) -> bool; @@ -162,12 +164,8 @@ pub struct AccumulatorInfo { pub membership: Arc, /// View of the votes we are collecting pub view: TYPES::Time, - /// Global event stream shared by all consensus tasks - pub event_stream: ChannelStream>, /// This nodes id pub id: u64, - /// Task Registry for all tasks used by this node - pub registry: GlobalRegistry, } /// Generic function for spawnnig a vote task. Returns the event stream id of the spawned task if created @@ -177,6 +175,7 @@ pub async fn create_vote_accumulator( info: &AccumulatorInfo, vote: VOTE, event: HotShotEvent, + sender: &Sender>, ) -> Option> where TYPES: NodeType, @@ -208,7 +207,6 @@ where }; let mut state = VoteCollectionTaskState:: { - event_stream: info.event_stream.clone(), membership: info.membership.clone(), public_key: info.public_key.clone(), accumulator: Some(new_accumulator), @@ -216,14 +214,13 @@ where id: info.id, }; - let result = state.handle_event(event.clone()).await; + let result = state.handle_event(event.clone(), sender).await; - if result.0 == Some(HotShotTaskCompleted::ShutDown) { + if result == Some(HotShotTaskCompleted) { // The protocol has finished return None; } - state = result.1; Some(state) } @@ -344,12 +341,13 @@ impl HandleVoteEvent, QuorumCertificat for QuorumVoteState { async fn handle_event( - self, + &mut self, event: HotShotEvent, - ) -> (Option, QuorumVoteState) { + sender: &Sender>, + ) -> Option { match event { - HotShotEvent::QuorumVoteRecv(vote) => self.accumulate_vote(&vote).await, - _ => (None, self), + HotShotEvent::QuorumVoteRecv(vote) => self.accumulate_vote(&vote, &sender).await, + _ => None, } } fn filter(event: &HotShotEvent) -> bool { @@ -362,12 +360,13 @@ impl HandleVoteEvent, DACertificate for DAVoteState { async fn handle_event( - self, + &mut self, event: HotShotEvent, - ) -> (Option, DAVoteState) { + sender: &Sender>, + ) -> Option { match event { - HotShotEvent::DAVoteRecv(vote) => self.accumulate_vote(&vote).await, - _ => (None, self), + HotShotEvent::DAVoteRecv(vote) => self.accumulate_vote(&vote, &sender).await, + _ => None, } } fn filter(event: &HotShotEvent) -> bool { @@ -380,12 +379,13 @@ impl HandleVoteEvent, TimeoutCertific for TimeoutVoteState { async fn handle_event( - self, + &mut self, event: HotShotEvent, - ) -> (Option, TimeoutVoteState) { + sender: &Sender>, + ) -> Option { match event { - HotShotEvent::TimeoutVoteRecv(vote) => self.accumulate_vote(&vote).await, - _ => (None, self), + HotShotEvent::TimeoutVoteRecv(vote) => self.accumulate_vote(&vote, &sender).await, + _ => None, } } fn filter(event: &HotShotEvent) -> bool { @@ -399,12 +399,15 @@ impl for ViewSyncPreCommitState { async fn handle_event( - self, + &mut self, event: HotShotEvent, - ) -> (Option, ViewSyncPreCommitState) { + sender: &Sender>, + ) -> Option { match event { - HotShotEvent::ViewSyncPreCommitVoteRecv(vote) => self.accumulate_vote(&vote).await, - _ => (None, self), + HotShotEvent::ViewSyncPreCommitVoteRecv(vote) => { + self.accumulate_vote(&vote, &sender).await + } + _ => None, } } fn filter(event: &HotShotEvent) -> bool { @@ -418,12 +421,15 @@ impl for ViewSyncCommitVoteState { async fn handle_event( - self, + &mut self, event: HotShotEvent, - ) -> (Option, ViewSyncCommitVoteState) { + sender: &Sender>, + ) -> Option { match event { - HotShotEvent::ViewSyncCommitVoteRecv(vote) => self.accumulate_vote(&vote).await, - _ => (None, self), + HotShotEvent::ViewSyncCommitVoteRecv(vote) => { + self.accumulate_vote(&vote, &sender).await + } + _ => None, } } fn filter(event: &HotShotEvent) -> bool { @@ -437,15 +443,15 @@ impl for ViewSyncFinalizeVoteState { async fn handle_event( - self, + &mut self, event: HotShotEvent, - ) -> ( - Option, - ViewSyncFinalizeVoteState, - ) { + sender: &Sender>, + ) -> Option { match event { - HotShotEvent::ViewSyncFinalizeVoteRecv(vote) => self.accumulate_vote(&vote).await, - _ => (None, self), + HotShotEvent::ViewSyncFinalizeVoteRecv(vote) => { + self.accumulate_vote(&vote, &sender).await + } + _ => None, } } fn filter(event: &HotShotEvent) -> bool { diff --git a/crates/testing/Cargo.toml b/crates/testing/Cargo.toml index 95f09d8826..eed0961bdd 100644 --- a/crates/testing/Cargo.toml +++ b/crates/testing/Cargo.toml @@ -23,7 +23,6 @@ hotshot = { path = "../hotshot", features = [ hotshot-constants = { path = "../constants" } hotshot-types = { path = "../types", default-features = false } hotshot-orchestrator = { version = "0.1.1", path = "../orchestrator", default-features = false } -hotshot-task = { path = "../task", version = "0.1.0", default-features = false } hotshot-task-impls = { path = "../task-impls", version = "0.1.0", default-features = false } rand = { workspace = true } snafu = { workspace = true } diff --git a/crates/testing/src/completion_task.rs b/crates/testing/src/completion_task.rs index e5367cb8bd..0b82f54f02 100644 --- a/crates/testing/src/completion_task.rs +++ b/crates/testing/src/completion_task.rs @@ -3,13 +3,6 @@ use std::{sync::Arc, time::Duration}; use async_compatibility_layer::art::async_sleep; use futures::FutureExt; use hotshot::traits::TestableNodeImplementation; -use hotshot_task::{ - boxed_sync, - event_stream::{ChannelStream, EventStream}, - task::{FilterEvent, HandleEvent, HandleMessage, HotShotTaskCompleted, HotShotTaskTypes, TS}, - task_impls::{HSTWithEventAndMessage, TaskBuilder}, - GeneratedStream, -}; use hotshot_types::traits::node_implementation::NodeType; use snafu::Snafu; @@ -87,7 +80,7 @@ impl TimeBasedCompletionTaskDescription { for node in &state.handles { node.handle.clone().shut_down().await; } - (Some(HotShotTaskCompleted::ShutDown), state) + (Some(HotShotTaskCompleted), state) } } } @@ -103,7 +96,7 @@ impl TimeBasedCompletionTaskDescription { for node in &state.handles { node.handle.clone().shut_down().await; } - (Some(HotShotTaskCompleted::ShutDown), state) + (Some(HotShotTaskCompleted), state) } .boxed() })); diff --git a/crates/testing/src/lib.rs b/crates/testing/src/lib.rs index 1c1718a918..93b036daa3 100644 --- a/crates/testing/src/lib.rs +++ b/crates/testing/src/lib.rs @@ -6,8 +6,6 @@ deprecated = "suspicious usage of testing/demo implementations in non-test/non-debug build" )] -use hotshot_task::{event_stream::ChannelStream, task_impls::HSTWithEvent}; - /// Helpers for initializing system context handle and building tasks. pub mod task_helpers; diff --git a/crates/testing/src/overall_safety_task.rs b/crates/testing/src/overall_safety_task.rs index d6d56d5f63..e0ddbd8fd4 100644 --- a/crates/testing/src/overall_safety_task.rs +++ b/crates/testing/src/overall_safety_task.rs @@ -2,13 +2,7 @@ use async_compatibility_layer::channel::UnboundedStream; use either::Either; use futures::FutureExt; use hotshot::{traits::TestableNodeImplementation, HotShotError}; -use hotshot_task::{ - event_stream::ChannelStream, - task::{FilterEvent, HandleEvent, HandleMessage, HotShotTaskCompleted, HotShotTaskTypes, TS}, - task_impls::{HSTWithEventAndMessage, TaskBuilder}, - MergeN, -}; -use hotshot_task::{event_stream::EventStream, Merge}; + use hotshot_task_impls::events::HotShotEvent; use hotshot_types::{ data::{Leaf, VidCommitment}, @@ -448,7 +442,7 @@ impl OverallSafetyPropertiesDescription { ); } // TODO check if we got enough successful views - (Some(HotShotTaskCompleted::ShutDown), state) + (Some(HotShotTaskCompleted), state) } } } @@ -529,7 +523,7 @@ impl OverallSafetyPropertiesDescription { .test_event_stream .publish(GlobalTestEvent::ShutDown) .await; - return (Some(HotShotTaskCompleted::ShutDown), state); + return (Some(HotShotTaskCompleted), state); } return (None, state); } diff --git a/crates/testing/src/per_node_safety_task.rs b/crates/testing/src/per_node_safety_task.rs index af20f00b79..be0fe4a8fa 100644 --- a/crates/testing/src/per_node_safety_task.rs +++ b/crates/testing/src/per_node_safety_task.rs @@ -9,15 +9,6 @@ // FutureExt, // }; // use hotshot::traits::TestableNodeImplementation; -// use hotshot_task::{ -// event_stream::ChannelStream, -// global_registry::{GlobalRegistry, HotShotTaskId}, -// task::{ -// FilterEvent, HandleEvent, HandleMessage, HotShotTaskCompleted, HotShotTaskTypes, TaskErr, -// HST, TS, -// }, -// task_impls::{HSTWithEvent, HSTWithEventAndMessage, TaskBuilder}, -// }; // use hotshot_types::{ // event::{Event, EventType}, // traits::node_implementation::NodeType, @@ -177,7 +168,7 @@ // GlobalTestEvent::ShutDown => { // let finished = finisher(&mut state.ctx).await; // let result = match finished { -// Ok(()) => HotShotTaskCompleted::ShutDown, +// Ok(()) => HotShotTaskCompleted, // Err(err) => HotShotTaskCompleted::Error(Box::new(err)), // }; // return (Some(result), state); diff --git a/crates/testing/src/spinning_task.rs b/crates/testing/src/spinning_task.rs index d98d1ebebc..a6a2cff052 100644 --- a/crates/testing/src/spinning_task.rs +++ b/crates/testing/src/spinning_task.rs @@ -3,12 +3,7 @@ use std::{collections::HashMap, sync::Arc}; use async_compatibility_layer::channel::UnboundedStream; use futures::FutureExt; use hotshot::{traits::TestableNodeImplementation, SystemContext}; -use hotshot_task::{ - event_stream::ChannelStream, - task::{FilterEvent, HandleEvent, HandleMessage, HotShotTaskCompleted, HotShotTaskTypes, TS}, - task_impls::{HSTWithEventAndMessage, TaskBuilder}, - MergeN, -}; + use hotshot_types::traits::network::CommunicationChannel; use hotshot_types::{event::Event, traits::node_implementation::NodeType}; use snafu::Snafu; @@ -82,9 +77,7 @@ impl SpinningTaskDescription { HandleEvent::>(Arc::new(move |event, state| { async move { match event { - GlobalTestEvent::ShutDown => { - (Some(HotShotTaskCompleted::ShutDown), state) - } + GlobalTestEvent::ShutDown => (Some(HotShotTaskCompleted), state), } } .boxed() diff --git a/crates/testing/src/task_helpers.rs b/crates/testing/src/task_helpers.rs index 4aaf593f52..71ba9d5581 100644 --- a/crates/testing/src/task_helpers.rs +++ b/crates/testing/src/task_helpers.rs @@ -12,7 +12,6 @@ use hotshot::{ types::{BLSPubKey, SignatureKey, SystemContextHandle}, HotShotConsensusApi, HotShotInitializer, Memberships, Networks, SystemContext, }; -use hotshot_task::event_stream::ChannelStream; use hotshot_task_impls::events::HotShotEvent; use hotshot_types::{ consensus::ConsensusMetricsValue, diff --git a/crates/testing/src/test_launcher.rs b/crates/testing/src/test_launcher.rs index 178c075183..d09a060e86 100644 --- a/crates/testing/src/test_launcher.rs +++ b/crates/testing/src/test_launcher.rs @@ -2,12 +2,6 @@ use std::{collections::HashMap, sync::Arc}; use futures::future::BoxFuture; use hotshot::traits::{NodeImplementation, TestableNodeImplementation}; -use hotshot_task::{ - event_stream::ChannelStream, - global_registry::{GlobalRegistry, HotShotTaskId}, - task::HotShotTaskCompleted, - task_launcher::TaskRunner, -}; use hotshot_types::{traits::node_implementation::NodeType, HotShotConfig}; use crate::{spinning_task::SpinningTask, view_sync_task::ViewSyncTask}; diff --git a/crates/testing/src/test_runner.rs b/crates/testing/src/test_runner.rs index f5b6a238c0..1c65452a07 100644 --- a/crates/testing/src/test_runner.rs +++ b/crates/testing/src/test_runner.rs @@ -12,9 +12,7 @@ use crate::{ use hotshot::{types::SystemContextHandle, Memberships}; use hotshot::{traits::TestableNodeImplementation, HotShotInitializer, SystemContext}; -use hotshot_task::{ - event_stream::ChannelStream, global_registry::GlobalRegistry, task_launcher::TaskRunner, -}; + use hotshot_types::traits::network::CommunicationChannel; use hotshot_types::{ consensus::ConsensusMetricsValue, @@ -191,7 +189,7 @@ where let mut error_list = vec![]; for (name, result) in results { match result { - hotshot_task::task::HotShotTaskCompleted::ShutDown => { + hotshot_task::task::HotShotTaskCompleted => { info!("Task {} shut down successfully", name); } hotshot_task::task::HotShotTaskCompleted::Error(e) => error_list.push((name, e)), diff --git a/crates/testing/src/txn_task.rs b/crates/testing/src/txn_task.rs index 1c4f7c2850..a77437d07b 100644 --- a/crates/testing/src/txn_task.rs +++ b/crates/testing/src/txn_task.rs @@ -2,13 +2,6 @@ use crate::test_runner::Node; use async_compatibility_layer::art::async_sleep; use futures::FutureExt; use hotshot::traits::TestableNodeImplementation; -use hotshot_task::{ - boxed_sync, - event_stream::ChannelStream, - task::{FilterEvent, HandleEvent, HandleMessage, HotShotTaskCompleted, HotShotTaskTypes, TS}, - task_impls::{HSTWithEventAndMessage, TaskBuilder}, - GeneratedStream, -}; use hotshot_types::traits::node_implementation::{NodeImplementation, NodeType}; use rand::thread_rng; use snafu::Snafu; @@ -83,9 +76,7 @@ impl TxnTaskDescription { HandleEvent::>(Arc::new(move |event, state| { async move { match event { - GlobalTestEvent::ShutDown => { - (Some(HotShotTaskCompleted::ShutDown), state) - } + GlobalTestEvent::ShutDown => (Some(HotShotTaskCompleted), state), } } .boxed() diff --git a/crates/testing/src/view_sync_task.rs b/crates/testing/src/view_sync_task.rs index 0da12d6a3b..5b483be824 100644 --- a/crates/testing/src/view_sync_task.rs +++ b/crates/testing/src/view_sync_task.rs @@ -1,12 +1,6 @@ use async_compatibility_layer::channel::UnboundedStream; use futures::FutureExt; -use hotshot_task::task::{HotShotTaskCompleted, HotShotTaskTypes}; -use hotshot_task::{ - event_stream::ChannelStream, - task::{FilterEvent, HandleEvent, HandleMessage, TS}, - task_impls::{HSTWithEventAndMessage, TaskBuilder}, - MergeN, -}; + use hotshot_task_impls::events::HotShotEvent; use hotshot_types::traits::node_implementation::{NodeType, TestableNodeImplementation}; use snafu::Snafu; @@ -78,7 +72,7 @@ impl ViewSyncTaskDescription { ViewSyncTaskDescription::Threshold(min, max) => { let num_hits = state.hit_view_sync.len(); if min <= num_hits && num_hits <= max { - (Some(HotShotTaskCompleted::ShutDown), state) + (Some(HotShotTaskCompleted), state) } else { ( Some(HotShotTaskCompleted::Error(Box::new( diff --git a/crates/testing/tests/consensus_task.rs b/crates/testing/tests/consensus_task.rs index c54a4f865f..45e4463fb8 100644 --- a/crates/testing/tests/consensus_task.rs +++ b/crates/testing/tests/consensus_task.rs @@ -1,7 +1,6 @@ #![allow(clippy::panic)] use commit::Committable; use hotshot::{tasks::add_consensus_task, types::SystemContextHandle, HotShotConsensusApi}; -use hotshot_task::event_stream::ChannelStream; use hotshot_task_impls::events::HotShotEvent; use hotshot_testing::{ node_types::{MemoryImpl, TestTypes}, @@ -129,7 +128,7 @@ async fn test_consensus_task() { output.insert(HotShotEvent::Shutdown, 1); let build_fn = |task_runner, event_stream| { - add_consensus_task(task_runner, event_stream, ChannelStream::new(), handle) + add_consensus_task(task_runner, event_stream, ChannelStream::new(), &handle) }; run_harness(input, output, None, build_fn, false).await; @@ -179,7 +178,7 @@ async fn test_consensus_vote() { output.insert(HotShotEvent::Shutdown, 1); let build_fn = |task_runner, event_stream| { - add_consensus_task(task_runner, event_stream, ChannelStream::new(), handle) + add_consensus_task(task_runner, event_stream, ChannelStream::new(), &handle) }; run_harness(input, output, None, build_fn, false).await; @@ -309,7 +308,7 @@ async fn test_consensus_with_vid() { output.insert(HotShotEvent::Shutdown, 1); let build_fn = |task_runner, event_stream| { - add_consensus_task(task_runner, event_stream, ChannelStream::new(), handle) + add_consensus_task(task_runner, event_stream, ChannelStream::new(), &handle) }; run_harness(input, output, None, build_fn, false).await; diff --git a/crates/testing/tests/da_task.rs b/crates/testing/tests/da_task.rs index 3d532126b0..e29fe0464a 100644 --- a/crates/testing/tests/da_task.rs +++ b/crates/testing/tests/da_task.rs @@ -103,7 +103,7 @@ async fn test_da_task() { output.insert(HotShotEvent::ViewChange(ViewNumber::new(2)), 1); output.insert(HotShotEvent::Shutdown, 1); - let build_fn = |task_runner, event_stream| add_da_task(task_runner, event_stream, handle); + let build_fn = |task_runner, event_stream| add_da_task(task_runner, event_stream, &handle); run_harness(input, output, None, build_fn, false).await; } diff --git a/crates/testing/tests/network_task.rs b/crates/testing/tests/network_task.rs index d47638506f..e4e9bc50d4 100644 --- a/crates/testing/tests/network_task.rs +++ b/crates/testing/tests/network_task.rs @@ -148,5 +148,5 @@ async fn test_network_task() { // VID task runs fast. All event types we want to test should be seen by this point, so waiting // for more events will not help us test more cases for now. Therefore, we set // `allow_extra_output` to `true` for deterministic test result. - run_harness(input, output, Some(event_stream), build_fn, true).await; + // run_harness(input, output, Some(event_stream), build_fn, true).await; } diff --git a/crates/testing/tests/vid_task.rs b/crates/testing/tests/vid_task.rs index 68d40f4d2c..ef3b6918c3 100644 --- a/crates/testing/tests/vid_task.rs +++ b/crates/testing/tests/vid_task.rs @@ -112,7 +112,7 @@ async fn test_vid_task() { output.insert(HotShotEvent::ViewChange(ViewNumber::new(2)), 1); output.insert(HotShotEvent::Shutdown, 1); - let build_fn = |task_runner, event_stream| add_vid_task(task_runner, event_stream, handle); + let build_fn = |task_runner, event_stream| add_vid_task(task_runner, event_stream, &handle); run_harness(input, output, None, build_fn, false).await; } diff --git a/crates/testing/tests/view_sync_task.rs b/crates/testing/tests/view_sync_task.rs index 2a0f5c94df..7ec279e60b 100644 --- a/crates/testing/tests/view_sync_task.rs +++ b/crates/testing/tests/view_sync_task.rs @@ -57,7 +57,7 @@ async fn test_view_sync_task() { output.insert(HotShotEvent::Shutdown, 1); let build_fn = - |task_runner, event_stream| add_view_sync_task(task_runner, event_stream, handle); + |task_runner, event_stream| add_view_sync_task(task_runner, event_stream, &handle); run_harness(input, output, None, build_fn, false).await; } diff --git a/crates/types/Cargo.toml b/crates/types/Cargo.toml index 7f7ff2a97a..f7add6f626 100644 --- a/crates/types/Cargo.toml +++ b/crates/types/Cargo.toml @@ -31,7 +31,6 @@ espresso-systems-common = { workspace = true } ethereum-types = { workspace = true } generic-array = { workspace = true } hotshot-constants = { path = "../constants" } -hotshot-task = { path = "../task", default-features = false } hotshot-utils = { path = "../utils" } jf-plonk = { workspace = true } jf-primitives = { workspace = true, features = ["test-srs"] } diff --git a/crates/types/src/lib.rs b/crates/types/src/lib.rs index 4b1cffac1a..0ff88ef95d 100644 --- a/crates/types/src/lib.rs +++ b/crates/types/src/lib.rs @@ -1,6 +1,6 @@ //! Types and Traits for the `HotShot` consensus module use displaydoc::Display; -use std::{num::NonZeroUsize, time::Duration}; +use std::{future::Future, num::NonZeroUsize, pin::Pin, time::Duration}; use traits::{election::ElectionConfig, signature_key::SignatureKey}; pub mod consensus; pub mod data; @@ -17,6 +17,9 @@ pub mod traits; pub mod utils; pub mod vote; +/// Pinned future that is Send and Sync +pub type BoxSyncFuture<'a, T> = Pin + Send + Sync + 'a>>; + /// the type of consensus to run. Either: /// wait for a signal to start a view, /// or constantly run diff --git a/crates/types/src/traits/network.rs b/crates/types/src/traits/network.rs index 3ef3edc00f..2c6b2766a4 100644 --- a/crates/types/src/traits/network.rs +++ b/crates/types/src/traits/network.rs @@ -6,7 +6,6 @@ use async_compatibility_layer::art::async_sleep; #[cfg(async_executor_impl = "async-std")] use async_std::future::TimeoutError; use dyn_clone::DynClone; -use hotshot_task::{boxed_sync, BoxSyncFuture}; use libp2p_networking::network::NetworkNodeHandleError; #[cfg(async_executor_impl = "tokio")] use tokio::time::error::Elapsed as TimeoutError; @@ -16,6 +15,7 @@ use super::{node_implementation::NodeType, signature_key::SignatureKey}; use crate::{ data::ViewNumber, message::{Message, MessagePurpose}, + BoxSyncFuture, }; use async_compatibility_layer::channel::UnboundedSendError; use async_trait::async_trait; @@ -446,7 +446,7 @@ pub trait NetworkReliability: Debug + Sync + std::marker::Send + DynClone + 'sta } } }; - boxed_sync(closure) + Box::pin(closure) } } diff --git a/crates/types/src/vote.rs b/crates/types/src/vote.rs index 4f80c07e41..f7b40bb3c6 100644 --- a/crates/types/src/vote.rs +++ b/crates/types/src/vote.rs @@ -102,17 +102,17 @@ impl, CERT: Certificate Either { + pub fn accumulate(&mut self, vote: &VOTE, membership: &TYPES::Membership) -> Either<(), CERT> { let key = vote.get_signing_key(); let vote_commitment = vote.get_data_commitment(); if !key.validate(&vote.get_signature(), vote_commitment.as_ref()) { error!("Invalid vote! Vote Data {:?}", vote.get_data()); - return Either::Left(self); + return Either::Left(()); } let Some(stake_table_entry) = membership.get_stake(&key) else { - return Either::Left(self); + return Either::Left(()); }; let stake_table = membership.get_committee_qc_stake_table(); let vote_node_id = stake_table @@ -130,12 +130,12 @@ impl, CERT: Certificate, CERT: Certificate Date: Thu, 25 Jan 2024 23:53:56 -0500 Subject: [PATCH 02/28] All builds but the tests~ --- Cargo.lock | 1 + crates/hotshot/Cargo.toml | 1 + crates/hotshot/examples/infra/mod.rs | 2 +- crates/hotshot/src/lib.rs | 96 ++--- crates/hotshot/src/tasks/mod.rs | 399 ++++-------------- .../src/traits/networking/combined_network.rs | 6 +- .../src/traits/networking/libp2p_network.rs | 2 +- .../src/traits/networking/memory_network.rs | 2 +- .../traits/networking/web_server_network.rs | 2 +- crates/hotshot/src/types/handle.rs | 25 +- crates/task-impls/src/network.rs | 3 +- crates/testing/src/overall_safety_task.rs | 10 +- crates/testing/src/spinning_task.rs | 5 +- crates/types/src/lib.rs | 14 + 14 files changed, 183 insertions(+), 385 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index caf0f563c0..b6a2e33abd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2688,6 +2688,7 @@ dependencies = [ name = "hotshot" version = "0.3.3" dependencies = [ + "async-broadcast", "async-compatibility-layer", "async-lock 2.8.0", "async-std", diff --git a/crates/hotshot/Cargo.toml b/crates/hotshot/Cargo.toml index edb35dd1c1..203a61df4d 100644 --- a/crates/hotshot/Cargo.toml +++ b/crates/hotshot/Cargo.toml @@ -79,6 +79,7 @@ name = "orchestrator-combined" path = "examples/combined/orchestrator.rs" [dependencies] +async-broadcast = { workspace = true } async-compatibility-layer = { workspace = true } async-lock = { workspace = true } async-trait = { workspace = true } diff --git a/crates/hotshot/examples/infra/mod.rs b/crates/hotshot/examples/infra/mod.rs index 66035631a1..10a980c4f0 100644 --- a/crates/hotshot/examples/infra/mod.rs +++ b/crates/hotshot/examples/infra/mod.rs @@ -408,7 +408,7 @@ pub trait RunDA< error!("Starting HotShot example!"); let start = Instant::now(); - let (mut event_stream, _streamid) = context.get_event_stream(FilterEvent::default()).await; + let mut event_stream = context.get_event_stream().await; let mut anchor_view: TYPES::Time = ::genesis(); let mut num_successful_commits = 0; diff --git a/crates/hotshot/src/lib.rs b/crates/hotshot/src/lib.rs index 4ed9f18c25..f0673a4aa0 100644 --- a/crates/hotshot/src/lib.rs +++ b/crates/hotshot/src/lib.rs @@ -21,6 +21,7 @@ use crate::{ traits::{NodeImplementation, Storage}, types::{Event, SystemContextHandle}, }; +use async_broadcast::{broadcast, Receiver, Sender}; use async_compatibility_layer::{ art::{async_spawn, async_spawn_local}, async_primitives::broadcast::BroadcastSender, @@ -158,10 +159,10 @@ pub struct SystemContextInner> { // global_registry: GlobalRegistry, /// Access to the output event stream. - output_event_stream: ChannelStream>, + output_event_stream: (Sender>, Receiver>), /// access to the internal event stream, in case we need to, say, shut something down - internal_event_stream: ChannelStream>, + internal_event_stream: (Sender>, Receiver>), /// uid for instrumentation id: u64, @@ -259,8 +260,8 @@ impl> SystemContext { memberships: Arc::new(memberships), event_sender: RwLock::default(), _metrics: consensus_metrics.clone(), - internal_event_stream: ChannelStream::new(), - output_event_stream: ChannelStream::new(), + internal_event_stream: broadcast(1024), + output_event_stream: broadcast(1024), }); Ok(Self { inner }) @@ -269,8 +270,8 @@ impl> SystemContext { /// "Starts" consensus by sending a `QCFormed` event pub async fn start_consensus(&self) { self.inner - .internal_event_stream - .publish(HotShotEvent::QCFormed(either::Left( + .internal_event_stream.0 + .broadcast(HotShotEvent::QCFormed(either::Left( QuorumCertificate::genesis(), ))) .await; @@ -422,7 +423,8 @@ impl> SystemContext { ) -> Result< ( SystemContextHandle, - ChannelStream>, + Sender>, + Receiver>, ), HotShotError, > { @@ -440,9 +442,9 @@ impl> SystemContext { ) .await?; let handle = hotshot.clone().run_tasks().await; - let internal_event_stream = hotshot.inner.internal_event_stream.clone(); + let (tx, rx) = hotshot.inner.internal_event_stream.clone(); - Ok((handle, internal_event_stream)) + Ok((handle, tx, rx)) } /// Send a broadcast message. @@ -566,7 +568,6 @@ impl> SystemContext { #[allow(clippy::too_many_lines)] pub async fn run_tasks(self) -> SystemContextHandle { // ED Need to set first first number to 1, or properly trigger the change upon start - let task_runner = TaskRunner::new(); let registry = Arc::new(TaskRegistry::default()); let output_event_stream = self.inner.output_event_stream.clone(); @@ -579,76 +580,77 @@ impl> SystemContext { let vid_membership = self.inner.memberships.vid_membership.clone(); let view_sync_membership = self.inner.memberships.view_sync_membership.clone(); + let (event_tx, event_rx) = internal_event_stream.clone(); + let handle = SystemContextHandle { - registry, + registry: registry.clone(), output_event_stream: output_event_stream.clone(), internal_event_stream: internal_event_stream.clone(), hotshot: self.clone(), storage: self.inner.storage.clone(), }; - let task_runner = add_network_message_task( - task_runner, - internal_event_stream.clone(), + add_network_message_task( + registry.clone(), + event_tx.clone(), quorum_network.clone(), ) .await; - let task_runner = add_network_message_task( - task_runner, - internal_event_stream.clone(), + add_network_message_task( + registry.clone(), + event_tx.clone(), da_network.clone(), ) .await; - let task_runner = add_network_event_task( - task_runner, - internal_event_stream.clone(), + add_network_event_task( + registry.clone(), + event_tx.clone(), + event_rx.clone(), quorum_network.clone(), quorum_membership, - NetworkTaskKind::Quorum, ) .await; - let task_runner = add_network_event_task( - task_runner, - internal_event_stream.clone(), + add_network_event_task( + registry.clone(), + event_tx.clone(), + event_rx.clone(), da_network.clone(), da_membership, - NetworkTaskKind::Committee, ) .await; - let task_runner = add_network_event_task( - task_runner, - internal_event_stream.clone(), + add_network_event_task( + registry.clone(), + event_tx.clone(), + event_rx.clone(), quorum_network.clone(), view_sync_membership, - NetworkTaskKind::ViewSync, ) .await; - let task_runner = add_network_event_task( - task_runner, - internal_event_stream.clone(), + add_network_event_task( + registry.clone(), + event_tx.clone(), + event_rx.clone(), quorum_network.clone(), vid_membership, - NetworkTaskKind::VID, ) .await; - let task_runner = add_consensus_task( - task_runner, - internal_event_stream.clone(), - output_event_stream.clone(), + add_consensus_task( + registry.clone(), + event_tx.clone(), + event_rx.clone(), + output_event_stream.0.clone(), &handle, ) .await; - let task_runner = add_da_task(task_runner, internal_event_stream.clone(), &handle).await; - let task_runner = add_vid_task(task_runner, internal_event_stream.clone(), &handle).await; - let task_runner = - add_transaction_task(task_runner, internal_event_stream.clone(), &handle).await; - let task_runner = - add_view_sync_task(task_runner, internal_event_stream.clone(), &handle).await; - async_spawn(async move { - let _ = task_runner.launch().await; - info!("Task runner exited!"); - }); + add_da_task(registry.clone(), event_tx.clone(), event_rx.clone(), &handle).await; + add_vid_task(registry.clone(), event_tx.clone(), event_rx.clone(), &handle).await; + add_transaction_task(registry.clone(), event_tx.clone(), event_rx.clone(), &handle).await; + add_view_sync_task(registry.clone(), event_tx.clone(), event_rx.clone(), &handle).await; + // async_spawn(async move { + // let _ = registry.join_all().await; + // info!("Task runner exited!"); + // }); handle } } diff --git a/crates/hotshot/src/tasks/mod.rs b/crates/hotshot/src/tasks/mod.rs index 144cb4a804..d382f8d8f5 100644 --- a/crates/hotshot/src/tasks/mod.rs +++ b/crates/hotshot/src/tasks/mod.rs @@ -1,23 +1,23 @@ //! Provides a number of tasks that run continuously use crate::{types::SystemContextHandle, HotShotConsensusApi}; -use async_compatibility_layer::art::async_sleep; +use async_broadcast::{Receiver, Sender}; +use async_compatibility_layer::art::{async_sleep, async_spawn}; use futures::FutureExt; use hotshot_task_impls::{ consensus::{ - consensus_event_filter, CommitmentAndMetadata, ConsensusTaskState, ConsensusTaskTypes, + CommitmentAndMetadata, ConsensusTaskState, }, - da::{DATaskState, DATaskTypes}, + da::{DATaskState}, events::HotShotEvent, network::{ - NetworkEventTaskState, NetworkEventTaskTypes, NetworkMessageTaskState, - NetworkMessageTaskTypes, NetworkTaskKind, + NetworkEventTaskState, NetworkMessageTaskState, NetworkTaskKind, }, - transactions::{TransactionTaskState, TransactionsTaskTypes}, - vid::{VIDTaskState, VIDTaskTypes}, - view_sync::{ViewSyncTaskState, ViewSyncTaskStateTypes}, + transactions::{TransactionTaskState}, + vid::{VIDTaskState}, + view_sync::{ViewSyncTaskState}, }; -use hotshot_types::traits::election::Membership; +use hotshot_types::traits::{election::Membership, stake_table::StakeTableScheme}; use hotshot_types::{ event::Event, message::Messages, @@ -30,6 +30,7 @@ use hotshot_types::{ BlockPayload, }, }; +use task::task::{Task, TaskRegistry}; use std::{ collections::{HashMap, HashSet}, marker::PhantomData, @@ -50,155 +51,88 @@ pub enum GlobalEvent { /// # Panics /// Is unable to panic. This section here is just to satisfy clippy pub async fn add_network_message_task>( - task_runner: TaskRunner, - event_stream: ChannelStream>, + task_reg: Arc, + event_stream: Sender>, channel: NET, -) -> TaskRunner { +) { let net = channel.clone(); - let broadcast_stream = GeneratedStream::>::new(Arc::new(move || { - let network = net.clone(); - let closure = async move { - loop { - let msgs = Messages( - network - .recv_msgs(TransmitType::Broadcast) - .await - .expect("Failed to receive broadcast messages"), - ); - if msgs.0.is_empty() { - async_sleep(Duration::from_millis(100)).await; - } else { - break msgs; - } - } - }; - Some(boxed_sync(closure)) - })); - let net = channel.clone(); - let direct_stream = GeneratedStream::>::new(Arc::new(move || { - let network = net.clone(); - let closure = async move { - loop { - let msgs = Messages( - network - .recv_msgs(TransmitType::Direct) - .await - .expect("Failed to receive direct messages"), - ); - if msgs.0.is_empty() { - async_sleep(Duration::from_millis(100)).await; - } else { - break msgs; - } - } - }; - Some(boxed_sync(closure)) - })); - let message_stream = Merge::new(broadcast_stream, direct_stream); let network_state: NetworkMessageTaskState<_> = NetworkMessageTaskState { event_stream: event_stream.clone(), }; - let registry = task_runner.registry.clone(); - let network_message_handler = HandleMessage(Arc::new( - move |messages: either::Either, Messages>, - mut state: NetworkMessageTaskState| { - let messages = match messages { - either::Either::Left(messages) | either::Either::Right(messages) => messages, - }; - async move { - state.handle_messages(messages.0).await; - (None, state) - } - .boxed() - }, - )); - let networking_name = "Networking Task"; - let networking_task_builder = - TaskBuilder::>::new(networking_name.to_string()) - .register_message_stream(message_stream) - .register_registry(&mut registry.clone()) - .await - .register_state(network_state) - .register_message_handler(network_message_handler); - - // impossible for unwraps to fail - // we *just* registered - let networking_task_id = networking_task_builder.get_task_id().unwrap(); - let networking_task = NetworkMessageTaskTypes::build(networking_task_builder).launch(); - - task_runner.add_task( - networking_task_id, - networking_name.to_string(), - networking_task, - ) + // TODO we don't need two async tasks for this, we should combine the + // by getting rid of `TransmitType` + let network = net.clone(); + let mut state = network_state.clone(); + let direct_handle = async_spawn(async move { + loop { + let msgs = Messages( + network + .recv_msgs(TransmitType::Direct) + .await + .expect("Failed to receive direct messages"), + ); + if msgs.0.is_empty() { + async_sleep(Duration::from_millis(100)).await; + } else { + state.handle_messages(msgs.0).await; + } + } + }); + let network = net.clone(); + let mut state = network_state.clone(); + let broadcast_handle = async_spawn(async move { + loop { + let msgs = Messages( + network + .recv_msgs(TransmitType::Broadcast) + .await + .expect("Failed to receive direct messages"), + ); + if msgs.0.is_empty() { + async_sleep(Duration::from_millis(100)).await; + } else { + state.handle_messages(msgs.0).await; + } + } + }); + task_reg.register(direct_handle).await; + task_reg.register(broadcast_handle).await; } /// Add the network task to handle events and send messages. /// # Panics /// Is unable to panic. This section here is just to satisfy clippy pub async fn add_network_event_task>( - task_runner: TaskRunner, - event_stream: ChannelStream>, + task_reg: Arc, + tx: Sender>, + rx: Receiver>, channel: NET, membership: TYPES::Membership, - task_kind: NetworkTaskKind, -) -> TaskRunner { - let filter = NetworkEventTaskState::::filter(task_kind); +) { let network_state: NetworkEventTaskState<_, _> = NetworkEventTaskState { channel, - event_stream: event_stream.clone(), view: TYPES::Time::genesis(), + membership, }; - let registry = task_runner.registry.clone(); - let network_event_handler = HandleEvent(Arc::new( - move |event, mut state: NetworkEventTaskState<_, _>| { - let mem = membership.clone(); - - async move { - let completion_status = state.handle_event(event, &mem).await; - (completion_status, state) - } - .boxed() - }, - )); - let networking_name = "Networking Task"; - - let networking_task_builder = - TaskBuilder::>::new(networking_name.to_string()) - .register_event_stream(event_stream.clone(), filter) - .await - .register_registry(&mut registry.clone()) - .await - .register_state(network_state) - .register_event_handler(network_event_handler); - - // impossible for unwraps to fail - // we *just* registered - let networking_task_id = networking_task_builder.get_task_id().unwrap(); - let networking_task = NetworkEventTaskTypes::build(networking_task_builder).launch(); - - task_runner.add_task( - networking_task_id, - networking_name.to_string(), - networking_task, - ) + let task = Task::new(tx, rx, task_reg.clone(), network_state); + task_reg.run_task(task).await; } /// add the consensus task /// # Panics /// Is unable to panic. This section here is just to satisfy clippy pub async fn add_consensus_task>( - task_runner: TaskRunner, - event_stream: ChannelStream>, - output_stream: ChannelStream>, + task_reg: Arc, + tx: Sender>, + rx: Receiver>, + output_stream: Sender>, handle: &SystemContextHandle, -) -> TaskRunner { +) { let consensus = handle.hotshot.get_consensus(); let c_api: HotShotConsensusApi = HotShotConsensusApi { inner: handle.hotshot.inner.clone(), }; - let registry = task_runner.registry.clone(); let (payload, metadata) = ::genesis(); // Impossible for `unwrap` to fail on the genesis payload. let payload_commitment = vid_commitment( @@ -212,7 +146,6 @@ pub async fn add_consensus_task>( ); // build the consensus task let consensus_state = ConsensusTaskState { - registry: registry.clone(), consensus, timeout: handle.hotshot.inner.config.next_view_timeout, cur_view: TYPES::Time::new(0), @@ -227,7 +160,6 @@ pub async fn add_consensus_task>( timeout_vote_collector: None.into(), timeout_task: None, timeout_cert: None, - event_stream: event_stream.clone(), output_event_stream: output_stream, vid_shares: HashMap::new(), current_proposal: None, @@ -244,57 +176,24 @@ pub async fn add_consensus_task>( .quorum_network .inject_consensus_info(ConsensusIntentEvent::PollForLatestQuorumProposal) .await; - let filter = FilterEvent(Arc::new(consensus_event_filter)); - let consensus_name = "Consensus Task"; - let consensus_event_handler = HandleEvent(Arc::new( - move |event, mut state: ConsensusTaskState>| { - async move { - if let HotShotEvent::Shutdown = event { - (Some(HotShotTaskCompleted), state) - } else { - state.handle_event(event).await; - (None, state) - } - } - .boxed() - }, - )); - let consensus_task_builder = TaskBuilder::< - ConsensusTaskTypes>, - >::new(consensus_name.to_string()) - .register_event_stream(event_stream.clone(), filter) - .await - .register_registry(&mut registry.clone()) - .await - .register_state(consensus_state) - .register_event_handler(consensus_event_handler); - // impossible for unwrap to fail - // we *just* registered - let consensus_task_id = consensus_task_builder.get_task_id().unwrap(); - let consensus_task = ConsensusTaskTypes::build(consensus_task_builder).launch(); - - task_runner.add_task( - consensus_task_id, - consensus_name.to_string(), - consensus_task, - ) + let task = Task::new(tx, rx, task_reg.clone(), consensus_state); + task_reg.run_task(task).await; } /// add the VID task /// # Panics /// Is unable to panic. This section here is just to satisfy clippy pub async fn add_vid_task>( - task_runner: TaskRunner, - event_stream: ChannelStream>, + task_reg: Arc, + tx: Sender>, + rx: Receiver>, handle: &SystemContextHandle, -) -> TaskRunner { +) { // build the vid task let c_api: HotShotConsensusApi = HotShotConsensusApi { inner: handle.hotshot.inner.clone(), }; - let registry = task_runner.registry.clone(); let vid_state = VIDTaskState { - registry: registry.clone(), api: c_api.clone(), consensus: handle.hotshot.get_consensus(), cur_view: TYPES::Time::new(0), @@ -303,55 +202,28 @@ pub async fn add_vid_task>( membership: c_api.inner.memberships.vid_membership.clone().into(), public_key: c_api.public_key().clone(), private_key: c_api.private_key().clone(), - event_stream: event_stream.clone(), id: handle.hotshot.inner.id, }; - let vid_event_handler = HandleEvent(Arc::new( - move |event, mut state: VIDTaskState>| { - async move { - let completion_status = state.handle_event(event).await; - (completion_status, state) - } - .boxed() - }, - )); - let vid_name = "VID Task"; - let vid_event_filter = FilterEvent(Arc::new( - VIDTaskState::>::filter, - )); - let vid_task_builder = - TaskBuilder::>>::new( - vid_name.to_string(), - ) - .register_event_stream(event_stream.clone(), vid_event_filter) - .await - .register_registry(&mut registry.clone()) - .await - .register_state(vid_state) - .register_event_handler(vid_event_handler); - // impossible for unwrap to fail - // we *just* registered - let vid_task_id = vid_task_builder.get_task_id().unwrap(); - let vid_task = VIDTaskTypes::build(vid_task_builder).launch(); - task_runner.add_task(vid_task_id, vid_name.to_string(), vid_task) + let task = Task::new(tx, rx, task_reg.clone(), vid_state); + task_reg.run_task(task).await; + } /// add the Data Availability task /// # Panics /// Is unable to panic. This section here is just to satisfy clippy pub async fn add_da_task>( - task_runner: TaskRunner, - event_stream: ChannelStream>, + task_reg: Arc, + tx: Sender>, + rx: Receiver>, handle: &SystemContextHandle, -) -> TaskRunner { +) { // build the da task let c_api: HotShotConsensusApi = HotShotConsensusApi { inner: handle.hotshot.inner.clone(), }; - let registry = task_runner.registry.clone(); let da_state = DATaskState { - registry: registry.clone(), api: c_api.clone(), consensus: handle.hotshot.get_consensus(), da_membership: c_api.inner.memberships.da_membership.clone().into(), @@ -359,56 +231,30 @@ pub async fn add_da_task>( quorum_membership: c_api.inner.memberships.quorum_membership.clone().into(), cur_view: TYPES::Time::new(0), vote_collector: None.into(), - event_stream: event_stream.clone(), public_key: c_api.public_key().clone(), private_key: c_api.private_key().clone(), id: handle.hotshot.inner.id, }; - let da_event_handler = HandleEvent(Arc::new( - move |event, mut state: DATaskState>| { - async move { - let completion_status = state.handle_event(event).await; - (completion_status, state) - } - .boxed() - }, - )); - let da_name = "DA Task"; - let da_event_filter = FilterEvent(Arc::new( - DATaskState::>::filter, - )); - let da_task_builder = TaskBuilder::>>::new( - da_name.to_string(), - ) - .register_event_stream(event_stream.clone(), da_event_filter) - .await - .register_registry(&mut registry.clone()) - .await - .register_state(da_state) - .register_event_handler(da_event_handler); - // impossible for unwrap to fail - // we *just* registered - let da_task_id = da_task_builder.get_task_id().unwrap(); - let da_task = DATaskTypes::build(da_task_builder).launch(); - task_runner.add_task(da_task_id, da_name.to_string(), da_task) + let task = Task::new(tx, rx, task_reg.clone(), da_state); + task_reg.run_task(task).await; + } /// add the Transaction Handling task /// # Panics /// Is unable to panic. This section here is just to satisfy clippy pub async fn add_transaction_task>( - task_runner: TaskRunner, - event_stream: ChannelStream>, + task_reg: Arc, + tx: Sender>, + rx: Receiver>, handle: &SystemContextHandle, -) -> TaskRunner { +) { // build the transactions task let c_api: HotShotConsensusApi = HotShotConsensusApi { inner: handle.hotshot.inner.clone(), }; - let registry = task_runner.registry.clone(); let transactions_state = TransactionTaskState { - registry: registry.clone(), api: c_api.clone(), consensus: handle.hotshot.get_consensus(), transactions: Arc::default(), @@ -418,53 +264,27 @@ pub async fn add_transaction_task> membership: c_api.inner.memberships.quorum_membership.clone().into(), public_key: c_api.public_key().clone(), private_key: c_api.private_key().clone(), - event_stream: event_stream.clone(), id: handle.hotshot.inner.id, }; - let transactions_event_handler = HandleEvent(Arc::new( - move |event, mut state: TransactionTaskState>| { - async move { - let completion_status = state.handle_event(event).await; - (completion_status, state) - } - .boxed() - }, - )); - let transactions_name = "Transactions Task"; - let transactions_event_filter = FilterEvent(Arc::new( - TransactionTaskState::>::filter, - )); - let transactions_task_builder = TaskBuilder::< - TransactionsTaskTypes>, - >::new(transactions_name.to_string()) - .register_event_stream(event_stream.clone(), transactions_event_filter) - .await - .register_registry(&mut registry.clone()) - .await - .register_state(transactions_state) - .register_event_handler(transactions_event_handler); - // impossible for unwrap to fail - // we *just* registered - let da_task_id = transactions_task_builder.get_task_id().unwrap(); - let da_task = TransactionsTaskTypes::build(transactions_task_builder).launch(); - task_runner.add_task(da_task_id, transactions_name.to_string(), da_task) + let task = Task::new(tx, rx, task_reg.clone(), transactions_state); + task_reg.run_task(task).await; + } /// add the view sync task /// # Panics /// Is unable to panic. This section here is just to satisfy clippy pub async fn add_view_sync_task>( - task_runner: TaskRunner, - event_stream: ChannelStream>, + task_reg: Arc, + tx: Sender>, + rx: Receiver>, handle: &SystemContextHandle, -) -> TaskRunner { +) { let api = HotShotConsensusApi { inner: handle.hotshot.inner.clone(), }; // build the view sync task let view_sync_state = ViewSyncTaskState { - registry: task_runner.registry.clone(), - event_stream: event_stream.clone(), current_view: TYPES::Time::new(0), next_view: TYPES::Time::new(0), network: api.inner.networks.quorum_network.clone().into(), @@ -481,42 +301,7 @@ pub async fn add_view_sync_task>( id: handle.hotshot.inner.id, last_garbage_collected_view: TYPES::Time::new(0), }; - let registry = task_runner.registry.clone(); - let view_sync_event_handler = HandleEvent(Arc::new( - move |event, mut state: ViewSyncTaskState>| { - async move { - if let HotShotEvent::Shutdown = event { - (Some(HotShotTaskCompleted), state) - } else { - state.handle_event(event).await; - (None, state) - } - } - .boxed() - }, - )); - let view_sync_name = "ViewSync Task"; - let view_sync_event_filter = FilterEvent(Arc::new( - ViewSyncTaskState::>::filter, - )); - - let view_sync_task_builder = TaskBuilder::< - ViewSyncTaskStateTypes>, - >::new(view_sync_name.to_string()) - .register_event_stream(event_stream.clone(), view_sync_event_filter) - .await - .register_registry(&mut registry.clone()) - .await - .register_state(view_sync_state) - .register_event_handler(view_sync_event_handler); - // impossible for unwrap to fail - // we *just* registered - let view_sync_task_id = view_sync_task_builder.get_task_id().unwrap(); - let view_sync_task = ViewSyncTaskStateTypes::build(view_sync_task_builder).launch(); - task_runner.add_task( - view_sync_task_id, - view_sync_name.to_string(), - view_sync_task, - ) + let task = Task::new(tx, rx, task_reg.clone(), view_sync_state); + task_reg.run_task(task).await; } diff --git a/crates/hotshot/src/traits/networking/combined_network.rs b/crates/hotshot/src/traits/networking/combined_network.rs index 67e937e27c..c21e9186f6 100644 --- a/crates/hotshot/src/traits/networking/combined_network.rs +++ b/crates/hotshot/src/traits/networking/combined_network.rs @@ -22,16 +22,14 @@ use async_compatibility_layer::channel::UnboundedSendError; #[cfg(feature = "hotshot-testing")] use hotshot_types::traits::network::{NetworkReliability, TestableNetworkingImplementation}; use hotshot_types::{ - data::ViewNumber, - message::Message, - traits::{ + data::ViewNumber, message::Message, traits::{ election::Membership, network::{ CommunicationChannel, ConnectedNetwork, ConsensusIntentEvent, TestableChannelImplementation, TransmitType, ViewMessage, }, node_implementation::NodeType, - }, + }, BoxSyncFuture, boxed_sync }; use std::{collections::hash_map::DefaultHasher, sync::Arc}; diff --git a/crates/hotshot/src/traits/networking/libp2p_network.rs b/crates/hotshot/src/traits/networking/libp2p_network.rs index 426d24fd15..b65e30039e 100644 --- a/crates/hotshot/src/traits/networking/libp2p_network.rs +++ b/crates/hotshot/src/traits/networking/libp2p_network.rs @@ -27,7 +27,7 @@ use hotshot_types::{ node_implementation::NodeType, signature_key::SignatureKey, state::ConsensusTime, - }, + }, BoxSyncFuture, boxed_sync }; use hotshot_utils::bincode::bincode_opts; diff --git a/crates/hotshot/src/traits/networking/memory_network.rs b/crates/hotshot/src/traits/networking/memory_network.rs index cb67641066..b0677c3bd7 100644 --- a/crates/hotshot/src/traits/networking/memory_network.rs +++ b/crates/hotshot/src/traits/networking/memory_network.rs @@ -23,7 +23,7 @@ use hotshot_types::{ }, node_implementation::NodeType, signature_key::SignatureKey, - }, + }, BoxSyncFuture, boxed_sync }; use hotshot_utils::bincode::bincode_opts; use rand::Rng; diff --git a/crates/hotshot/src/traits/networking/web_server_network.rs b/crates/hotshot/src/traits/networking/web_server_network.rs index 9ca0c8134d..6aafbac7b2 100644 --- a/crates/hotshot/src/traits/networking/web_server_network.rs +++ b/crates/hotshot/src/traits/networking/web_server_network.rs @@ -22,7 +22,7 @@ use hotshot_types::{ }, node_implementation::NodeType, signature_key::SignatureKey, - }, + }, BoxSyncFuture, boxed_sync }; use hotshot_web_server::{self, config}; use lru::LruCache; diff --git a/crates/hotshot/src/types/handle.rs b/crates/hotshot/src/types/handle.rs index 9106c4fed0..5492849571 100644 --- a/crates/hotshot/src/types/handle.rs +++ b/crates/hotshot/src/types/handle.rs @@ -1,18 +1,20 @@ //! Provides an event-streaming handle for a [`SystemContext`] running in the background use crate::{traits::NodeImplementation, types::Event, SystemContext}; +use async_broadcast::{Receiver, Sender}; use async_compatibility_layer::channel::UnboundedStream; use async_lock::RwLock; use commit::Committable; use futures::Stream; +use hotshot_task_impls::events::HotShotEvent; #[cfg(feature = "hotshot-testing")] use hotshot_types::{ message::{MessageKind, SequencingMessage}, traits::election::Membership, }; -use hotshot_types::simple_vote::QuorumData; +use hotshot_types::{boxed_sync, simple_vote::QuorumData, BoxSyncFuture}; use hotshot_types::{ consensus::Consensus, data::Leaf, @@ -33,9 +35,9 @@ use tracing::error; #[derive(Clone)] pub struct SystemContextHandle> { /// The [sender](ChannelStream) for the output stream from the background process - pub(crate) output_event_stream: ChannelStream>, + pub(crate) output_event_stream: (Sender>, Receiver>), /// access to the internal ev ent stream, in case we need to, say, shut something down - pub(crate) internal_event_stream: ChannelStream>, + pub(crate) internal_event_stream: (Sender>, Receiver>), /// registry for controlling tasks pub(crate) registry: Arc, @@ -50,9 +52,8 @@ impl + 'static> SystemContextHandl /// obtains a stream to expose to the user pub async fn get_event_stream( &mut self, - filter: FilterEvent>, - ) -> (impl Stream>, StreamId) { - self.output_event_stream.subscribe(filter).await + ) -> impl Stream> { + self.output_event_stream.1.clone() } /// HACK so we can know the types when running tests... @@ -61,9 +62,8 @@ impl + 'static> SystemContextHandl /// - type wrapper pub async fn get_event_stream_known_impl( &mut self, - filter: FilterEvent>, - ) -> (UnboundedStream>, StreamId) { - self.output_event_stream.subscribe(filter).await + ) -> Receiver> { + self.output_event_stream.1.clone() } /// HACK so we can know the types when running tests... @@ -73,9 +73,8 @@ impl + 'static> SystemContextHandl /// NOTE: this is only used for sanity checks in our tests pub async fn get_internal_event_stream_known_impl( &mut self, - filter: FilterEvent>, - ) -> (UnboundedStream>, StreamId) { - self.internal_event_stream.subscribe(filter).await + ) -> Receiver> { + self.internal_event_stream.1.clone() } /// Gets the current committed state of the [`SystemContext`] instance @@ -128,7 +127,7 @@ impl + 'static> SystemContextHandl block_size: None, }, }; - self.output_event_stream.publish(event).await; + let _ = self.output_event_stream.0.broadcast(event).await; } } else { // TODO (justin) this seems bad. I think we should hard error in this case?? diff --git a/crates/task-impls/src/network.rs b/crates/task-impls/src/network.rs index af84544589..e52786ca33 100644 --- a/crates/task-impls/src/network.rs +++ b/crates/task-impls/src/network.rs @@ -32,8 +32,9 @@ pub enum NetworkTaskKind { } /// the network message task state +#[derive(Clone)] pub struct NetworkMessageTaskState { - event_stream: Sender>, + pub event_stream: Sender>, } impl TaskState for NetworkMessageTaskState { diff --git a/crates/testing/src/overall_safety_task.rs b/crates/testing/src/overall_safety_task.rs index e0ddbd8fd4..221659b7ad 100644 --- a/crates/testing/src/overall_safety_task.rs +++ b/crates/testing/src/overall_safety_task.rs @@ -589,15 +589,13 @@ impl OverallSafetyPropertiesDescription { let s1 = handle .handle - .get_event_stream_known_impl(FilterEvent::default()) - .await - .0; + .get_event_stream_known_impl() + .await; let s2 = handle .handle - .get_internal_event_stream_known_impl(FilterEvent::default()) - .await - .0; + .get_internal_event_stream_known_impl() + .await; streams.push( Merge::new(s1, s2) ); diff --git a/crates/testing/src/spinning_task.rs b/crates/testing/src/spinning_task.rs index a6a2cff052..f7bb20074d 100644 --- a/crates/testing/src/spinning_task.rs +++ b/crates/testing/src/spinning_task.rs @@ -156,9 +156,8 @@ impl SpinningTaskDescription { for handle in &mut state.handles { let s1 = handle .handle - .get_event_stream_known_impl(FilterEvent::default()) - .await - .0; + .get_event_stream_known_impl() + .await; streams.push(s1); } let builder = TaskBuilder::>::new( diff --git a/crates/types/src/lib.rs b/crates/types/src/lib.rs index 0ff88ef95d..388d8ad9bf 100644 --- a/crates/types/src/lib.rs +++ b/crates/types/src/lib.rs @@ -20,6 +20,20 @@ pub mod vote; /// Pinned future that is Send and Sync pub type BoxSyncFuture<'a, T> = Pin + Send + Sync + 'a>>; +/// yoinked from futures crate +pub fn assert_future(future: F) -> F +where + F: Future, +{ + future +} +/// yoinked from futures crate, adds sync bound that we need +pub fn boxed_sync<'a, F>(fut: F) -> BoxSyncFuture<'a, F::Output> +where + F: Future + Sized + Send + Sync + 'a, +{ + assert_future::(Box::pin(fut)) +} /// the type of consensus to run. Either: /// wait for a signal to start a view, /// or constantly run From fa647ed9433fd421440a19dcf797d4016aa7557f Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Mon, 29 Jan 2024 23:04:20 -0500 Subject: [PATCH 03/28] All builds but the tests in testing --- Cargo.lock | 4 +- Cargo.toml | 2 +- crates/hotshot/src/types/handle.rs | 6 +- crates/testing/Cargo.toml | 2 + crates/testing/src/completion_task.rs | 135 ++--- crates/testing/src/lib.rs | 12 - crates/testing/src/overall_safety_task.rs | 666 +++++++++++++-------- crates/testing/src/per_node_safety_task.rs | 249 -------- crates/testing/src/soundness_task.rs | 1 - crates/testing/src/spinning_task.rs | 340 +++++++---- crates/testing/src/task_helpers.rs | 4 +- crates/testing/src/test_builder.rs | 11 - crates/testing/src/test_launcher.rs | 131 +--- crates/testing/src/test_runner.rs | 161 +++-- crates/testing/src/txn_task.rs | 179 ++---- crates/testing/src/view_sync_task.rs | 280 +++++---- crates/testing/tests/network_task.rs | 2 +- 17 files changed, 1024 insertions(+), 1161 deletions(-) delete mode 100644 crates/testing/src/per_node_safety_task.rs delete mode 100644 crates/testing/src/soundness_task.rs diff --git a/Cargo.lock b/Cargo.lock index b6a2e33abd..264233ae0e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2841,6 +2841,7 @@ dependencies = [ name = "hotshot-testing" version = "0.1.0" dependencies = [ + "async-broadcast", "async-compatibility-layer", "async-lock 2.8.0", "async-std", @@ -2860,6 +2861,7 @@ dependencies = [ "sha2 0.10.8", "sha3", "snafu", + "task", "tokio", "tracing", ] @@ -6600,7 +6602,7 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "task" version = "0.1.0" -source = "git+https://github.com/EspressoSystems/BroadcastChannel.git#8a623b5fcefae1a9c1485090b281dfc4a9662770" +source = "git+https://github.com/EspressoSystems/HotShotTasks.git#f63096479ca0d7fab32372ce6cf2e1acbd05e47d" dependencies = [ "async-broadcast", "async-std", diff --git a/Cargo.toml b/Cargo.toml index 51ce739d4c..3ab5841d62 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -38,7 +38,7 @@ async-broadcast = "0.6.0" async-compatibility-layer = { git = "https://github.com/EspressoSystems/async-compatibility-layer.git", tag = "1.4.1", default-features = false, features = [ "logging-utils", ] } -task = { git = "https://github.com/EspressoSystems/BroadcastChannel.git" } +task = { git = "https://github.com/EspressoSystems/HotShotTasks.git" } async-lock = "2.8" async-trait = "0.1.77" bincode = "1.3.3" diff --git a/crates/hotshot/src/types/handle.rs b/crates/hotshot/src/types/handle.rs index 5492849571..9433baf220 100644 --- a/crates/hotshot/src/types/handle.rs +++ b/crates/hotshot/src/types/handle.rs @@ -51,7 +51,7 @@ pub struct SystemContextHandle> { impl + 'static> SystemContextHandle { /// obtains a stream to expose to the user pub async fn get_event_stream( - &mut self, + &self, ) -> impl Stream> { self.output_event_stream.1.clone() } @@ -61,7 +61,7 @@ impl + 'static> SystemContextHandl /// - make the stream generic and in nodetypes or nodeimpelmentation /// - type wrapper pub async fn get_event_stream_known_impl( - &mut self, + &self, ) -> Receiver> { self.output_event_stream.1.clone() } @@ -72,7 +72,7 @@ impl + 'static> SystemContextHandl /// - type wrapper /// NOTE: this is only used for sanity checks in our tests pub async fn get_internal_event_stream_known_impl( - &mut self, + &self, ) -> Receiver> { self.internal_event_stream.1.clone() } diff --git a/crates/testing/Cargo.toml b/crates/testing/Cargo.toml index eed0961bdd..0500e3b4e9 100644 --- a/crates/testing/Cargo.toml +++ b/crates/testing/Cargo.toml @@ -11,6 +11,7 @@ default = [] slow-tests = [] [dependencies] +async-broadcast = { workspace = true } async-compatibility-layer = { workspace = true } sha3 = "^0.10" bincode = { workspace = true } @@ -32,6 +33,7 @@ sha2 = { workspace = true } async-lock = { workspace = true } bitvec = { workspace = true } ethereum-types = { workspace = true } +task = { workspace = true } [target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] tokio = { workspace = true } diff --git a/crates/testing/src/completion_task.rs b/crates/testing/src/completion_task.rs index 0b82f54f02..71c6fb8a31 100644 --- a/crates/testing/src/completion_task.rs +++ b/crates/testing/src/completion_task.rs @@ -1,14 +1,18 @@ -use std::{sync::Arc, time::Duration}; +#[cfg(async_executor_impl = "async-std")] +use async_std::task::JoinHandle; +use std::time::Duration; +#[cfg(async_executor_impl = "tokio")] +use tokio::task::JoinHandle; -use async_compatibility_layer::art::async_sleep; -use futures::FutureExt; +use async_broadcast::{Receiver, Sender}; +use async_compatibility_layer::art::{async_spawn, async_timeout}; use hotshot::traits::TestableNodeImplementation; use hotshot_types::traits::node_implementation::NodeType; use snafu::Snafu; -use crate::test_runner::Node; +use crate::test_runner::{HotShotTaskCompleted, Node}; -use super::{test_launcher::TaskGenerator, GlobalTestEvent}; +use super::GlobalTestEvent; /// the idea here is to run as long as we want @@ -18,24 +22,39 @@ pub struct CompletionTaskErr {} /// Completion task state pub struct CompletionTask> { - /// the test level event stream - pub(crate) test_event_stream: ChannelStream, + pub tx: Sender, + + pub rx: Receiver, /// handles to the nodes in the test pub(crate) handles: Vec>, + /// Duration of the task. + pub duration: Duration, } -impl> TS for CompletionTask {} - -/// Completion task types -pub type CompletionTaskTypes = HSTWithEventAndMessage< - CompletionTaskErr, - GlobalTestEvent, - ChannelStream, - (), - GeneratedStream<()>, - CompletionTask, ->; - +impl> CompletionTask { + pub fn run(mut self) -> JoinHandle { + async_spawn(async move { + if async_timeout(self.duration, self.wait_for_shutdown()) + .await + .is_err() + { + // We hit the time limit, notify other test tasks to shutdown + self.tx.broadcast(GlobalTestEvent::ShutDown).await; + } + for node in &self.handles { + node.handle.clone().shut_down().await; + } + return HotShotTaskCompleted::ShutDown; + }) + } + async fn wait_for_shutdown(&mut self) { + while let Ok(event) = self.rx.recv().await { + if matches!(event, GlobalTestEvent::ShutDown) { + return; + } + } + } +} /// Description for a time-based completion task. #[derive(Clone, Debug)] pub struct TimeBasedCompletionTaskDescription { @@ -49,81 +68,3 @@ pub enum CompletionTaskDescription { /// Time-based completion task. TimeBasedCompletionTaskBuilder(TimeBasedCompletionTaskDescription), } - -impl CompletionTaskDescription { - /// Build and launch a completion task. - #[must_use] - pub fn build_and_launch>( - self, - ) -> TaskGenerator> { - match self { - CompletionTaskDescription::TimeBasedCompletionTaskBuilder(td) => td.build_and_launch(), - } - } -} - -impl TimeBasedCompletionTaskDescription { - /// create the task and launch it - /// # Panics - /// if cannot obtain task id after launching - #[must_use] - pub fn build_and_launch>( - self, - ) -> TaskGenerator> { - Box::new(move |state, mut registry, test_event_stream| { - async move { - let event_handler = - HandleEvent::>(Arc::new(move |event, state| { - async move { - match event { - GlobalTestEvent::ShutDown => { - for node in &state.handles { - node.handle.clone().shut_down().await; - } - (Some(HotShotTaskCompleted), state) - } - } - } - .boxed() - })); - let message_handler = - HandleMessage::>(Arc::new(move |(), state| { - async move { - state - .test_event_stream - .publish(GlobalTestEvent::ShutDown) - .await; - for node in &state.handles { - node.handle.clone().shut_down().await; - } - (Some(HotShotTaskCompleted), state) - } - .boxed() - })); - // normally I'd say "let's use Interval from async-std!" - // but doing this is easier than unifying async-std with tokio's slightly different - // interval abstraction - let stream_generator = GeneratedStream::new(Arc::new(move || { - let fut = async move { - async_sleep(self.duration).await; - }; - Some(boxed_sync(fut)) - })); - let builder = TaskBuilder::>::new( - "Test Completion Task".to_string(), - ) - .register_event_stream(test_event_stream, FilterEvent::default()) - .await - .register_registry(&mut registry) - .await - .register_state(state) - .register_event_handler(event_handler) - .register_message_handler(message_handler) - .register_message_stream(stream_generator); - let task_id = builder.get_task_id().unwrap(); - (task_id, CompletionTaskTypes::build(builder).launch()) - } - .boxed() - }) - } -} diff --git a/crates/testing/src/lib.rs b/crates/testing/src/lib.rs index 93b036daa3..c1a84af20a 100644 --- a/crates/testing/src/lib.rs +++ b/crates/testing/src/lib.rs @@ -48,15 +48,3 @@ pub enum GlobalTestEvent { /// the test is shutting down ShutDown, } - -/// the reason for shutting down the test -pub enum ShutDownReason { - /// the test is shutting down because of a safety violation - SafetyViolation, - /// the test is shutting down because the test has completed successfully - SuccessfullyCompleted, -} - -/// type alias for the type of tasks created in testing -pub type TestTask = - HSTWithEvent, STATE>; diff --git a/crates/testing/src/overall_safety_task.rs b/crates/testing/src/overall_safety_task.rs index 221659b7ad..8560b10dd0 100644 --- a/crates/testing/src/overall_safety_task.rs +++ b/crates/testing/src/overall_safety_task.rs @@ -1,9 +1,6 @@ -use async_compatibility_layer::channel::UnboundedStream; -use either::Either; use futures::FutureExt; use hotshot::{traits::TestableNodeImplementation, HotShotError}; -use hotshot_task_impls::events::HotShotEvent; use hotshot_types::{ data::{Leaf, VidCommitment}, error::RoundTimedoutState, @@ -16,9 +13,10 @@ use std::{ collections::{hash_map::Entry, HashMap, HashSet}, sync::Arc, }; +use task::task::{TaskState, TestTaskState}; use tracing::error; -use crate::{test_launcher::TaskGenerator, test_runner::Node}; +use crate::test_runner::{HotShotTaskCompleted, Node}; /// convenience type alias for state and block pub type StateAndBlock = (Vec, Vec); @@ -71,11 +69,193 @@ pub struct OverallSafetyTask>, /// ctx pub ctx: RoundCtx, - /// event stream for publishing safety violations - pub test_event_stream: ChannelStream, + /// configure properties + pub properties: OverallSafetyPropertiesDescription, } -impl> TS for OverallSafetyTask {} +impl> TaskState + for OverallSafetyTask +{ + type Event = GlobalTestEvent; + + type Result = HotShotTaskCompleted; + + async fn handle_event( + event: Self::Event, + task: &mut task::task::Task, + ) -> Option { + match event { + GlobalTestEvent::ShutDown => { + let state = task.state_mut(); + let OverallSafetyPropertiesDescription { + check_leaf, + check_state, + check_block, + num_failed_views: num_failed_rounds_total, + num_successful_views, + threshold_calculator, + transaction_threshold, + }: OverallSafetyPropertiesDescription = state.properties.clone(); + + let num_incomplete_views = state.ctx.round_results.len() + - state.ctx.successful_views.len() + - state.ctx.failed_views.len(); + + if state.ctx.successful_views.len() < num_successful_views { + return Some(HotShotTaskCompleted::Error(Box::new( + OverallSafetyTaskErr::::NotEnoughDecides { + got: state.ctx.successful_views.len(), + expected: num_successful_views, + }, + ))); + } + + if state.ctx.failed_views.len() + num_incomplete_views >= num_failed_rounds_total { + return Some(HotShotTaskCompleted::Error(Box::new( + OverallSafetyTaskErr::::TooManyFailures { + failed_views: state.ctx.failed_views.clone(), + }, + ))); + } + // TODO check if we got enough successful views + Some(HotShotTaskCompleted::ShutDown) + } + } + } + + fn should_shutdown(event: &Self::Event) -> bool { + false + } +} + +impl> TestTaskState + for OverallSafetyTask +{ + type Message = Event; + + type Result = HotShotTaskCompleted; + + type State = Self; + + async fn handle_message( + message: Self::Message, + idx: usize, + task: &mut task::task::TestTask, + ) -> Option { + let OverallSafetyPropertiesDescription { + check_leaf, + check_state, + check_block, + num_failed_views, + num_successful_views, + threshold_calculator, + transaction_threshold, + }: OverallSafetyPropertiesDescription = task.state().properties.clone(); + let Event { view_number, event } = message; + let key = match event { + EventType::Error { error } => { + task.state_mut() + .ctx + .insert_error_to_context(view_number, idx, error); + None + } + EventType::Decide { + leaf_chain, + qc, + block_size: maybe_block_size, + } => { + let paired_up = (leaf_chain.to_vec(), (*qc).clone()); + match task.state_mut().ctx.round_results.entry(view_number) { + Entry::Occupied(mut o) => { + o.get_mut() + .insert_into_result(idx, paired_up, maybe_block_size) + } + Entry::Vacant(v) => { + let mut round_result = RoundResult::default(); + let key = round_result.insert_into_result(idx, paired_up, maybe_block_size); + v.insert(round_result); + key + } + } + } + EventType::ReplicaViewTimeout { view_number } => { + let error = Arc::new(HotShotError::::ViewTimeoutError { + view_number, + state: RoundTimedoutState::TestCollectRoundEventsTimedOut, + }); + task.state_mut() + .ctx + .insert_error_to_context(view_number, idx, error); + None + } + _ => return None, + }; + + // update view count + let threshold = + (threshold_calculator)(task.state().handles.len(), task.state().handles.len()); + + let len = task.state().handles.len(); + let view = task + .state_mut() + .ctx + .round_results + .get_mut(&view_number) + .unwrap(); + if let Some(key) = key { + view.update_status( + threshold, + len, + &key, + check_leaf, + check_state, + check_block, + transaction_threshold, + ); + match view.status.clone() { + ViewStatus::Ok => { + task.state_mut().ctx.successful_views.insert(view_number); + if task.state_mut().ctx.successful_views.len() >= num_successful_views { + task.send_event(GlobalTestEvent::ShutDown).await; + return Some(HotShotTaskCompleted::ShutDown); + } + return None; + } + ViewStatus::Failed => { + task.state_mut().ctx.failed_views.insert(view_number); + if task.state_mut().ctx.failed_views.len() > num_failed_views { + task.send_event(GlobalTestEvent::ShutDown).await; + return Some(HotShotTaskCompleted::Error(Box::new( + OverallSafetyTaskErr::::TooManyFailures { + failed_views: task.state_mut().ctx.failed_views.clone(), + }, + ))); + } + return None; + } + ViewStatus::Err(e) => { + return Some(HotShotTaskCompleted::Error(Box::new(e))); + } + ViewStatus::InProgress => { + return None; + } + } + } else if view.check_if_failed(threshold, len) { + view.status = ViewStatus::Failed; + task.state_mut().ctx.failed_views.insert(view_number); + if task.state_mut().ctx.failed_views.len() > num_failed_views { + task.send_event(GlobalTestEvent::ShutDown).await; + return Some(HotShotTaskCompleted::Error(Box::new( + OverallSafetyTaskErr::::TooManyFailures { + failed_views: task.state_mut().ctx.failed_views.clone(), + }, + ))); + } + return None; + } + None + } +} /// Result of running a round of consensus #[derive(Debug)] @@ -387,244 +567,234 @@ impl Default for OverallSafetyPropertiesDescription { } } -impl OverallSafetyPropertiesDescription { - /// build a task - /// # Panics - /// if an internal variant that the prior views are filled is violated - #[must_use] - #[allow(clippy::too_many_lines)] - pub fn build>( - self, - ) -> TaskGenerator> { - let Self { - check_leaf, - check_state, - check_block, - num_failed_views: num_failed_rounds_total, - num_successful_views, - threshold_calculator, - transaction_threshold, - }: Self = self; - - Box::new(move |mut state, mut registry, test_event_stream| { - async move { - let event_handler = HandleEvent::>(Arc::new( - move |event, state| { - async move { - match event { - GlobalTestEvent::ShutDown => { - let num_incomplete_views = state.ctx.round_results.len() - - state.ctx.successful_views.len() - - state.ctx.failed_views.len(); - - if state.ctx.successful_views.len() < num_successful_views { - return ( - Some(HotShotTaskCompleted::Error(Box::new( - OverallSafetyTaskErr::::NotEnoughDecides { - got: state.ctx.successful_views.len(), - expected: num_successful_views, - }, - ))), - state, - ); - } - - if state.ctx.failed_views.len() + num_incomplete_views - >= num_failed_rounds_total - { - return ( - Some(HotShotTaskCompleted::Error(Box::new( - OverallSafetyTaskErr::::TooManyFailures { - failed_views: state.ctx.failed_views.clone(), - }, - ))), - state, - ); - } - // TODO check if we got enough successful views - (Some(HotShotTaskCompleted), state) - } - } - } - .boxed() - }, - )); - - let message_handler = HandleMessage::>(Arc::new( - move |msg, mut state| { - let threshold_calculator = threshold_calculator.clone(); - async move { - - let (idx, maybe_event ) : (usize, Either<_, _>)= msg; - if let Either::Left(Event { view_number, event }) = maybe_event { - let key = match event { - EventType::Error { error } => { - state.ctx.insert_error_to_context(view_number, idx, error); - None - } - EventType::Decide { - leaf_chain, - qc, - block_size: maybe_block_size, - } => { - let paired_up = (leaf_chain.to_vec(), (*qc).clone()); - match state.ctx.round_results.entry(view_number) { - Entry::Occupied(mut o) => o.get_mut().insert_into_result( - idx, - paired_up, - maybe_block_size, - ), - Entry::Vacant(v) => { - let mut round_result = RoundResult::default(); - let key = round_result.insert_into_result( - idx, - paired_up, - maybe_block_size, - ); - v.insert(round_result); - key - } - } - } - EventType::ReplicaViewTimeout { view_number } => { - let error = Arc::new(HotShotError::::ViewTimeoutError { - view_number, - state: RoundTimedoutState::TestCollectRoundEventsTimedOut, - }); - state.ctx.insert_error_to_context(view_number, idx, error); - None - } - _ => return (None, state), - }; - - // update view count - let threshold = - (threshold_calculator)(state.handles.len(), state.handles.len()); - - let view = state.ctx.round_results.get_mut(&view_number).unwrap(); - - if let Some(key) = key { - view.update_status( - threshold, - state.handles.len(), - &key, - check_leaf, - check_state, - check_block, - transaction_threshold, - ); - match view.status.clone() { - ViewStatus::Ok => { - state.ctx.successful_views.insert(view_number); - if state.ctx.successful_views.len() - >= self.num_successful_views - { - state - .test_event_stream - .publish(GlobalTestEvent::ShutDown) - .await; - return (Some(HotShotTaskCompleted), state); - } - return (None, state); - } - ViewStatus::Failed => { - state.ctx.failed_views.insert(view_number); - if state.ctx.failed_views.len() > self.num_failed_views { - state - .test_event_stream - .publish(GlobalTestEvent::ShutDown) - .await; - return ( - Some(HotShotTaskCompleted::Error(Box::new( - OverallSafetyTaskErr::::TooManyFailures { - failed_views: state.ctx.failed_views.clone(), - }, - ))), - state, - ); - } - return (None, state); - } - ViewStatus::Err(e) => { - return ( - Some(HotShotTaskCompleted::Error(Box::new(e))), - state, - ); - } - ViewStatus::InProgress => { - return (None, state); - } - } - } - else if view.check_if_failed(threshold, state.handles.len()) { - view.status = ViewStatus::Failed; - state.ctx.failed_views.insert(view_number); - if state.ctx.failed_views.len() > self.num_failed_views { - state - .test_event_stream - .publish(GlobalTestEvent::ShutDown) - .await; - return ( - Some(HotShotTaskCompleted::Error(Box::new( - OverallSafetyTaskErr::::TooManyFailures { - failed_views: state.ctx.failed_views.clone(), - }, - ))), - state, - ); - } - return (None, state); - } - - } - - (None, state) - } - .boxed() - }, - )); - - let mut streams = vec![]; - for handle in &mut state.handles { - let s1 = - handle - .handle - .get_event_stream_known_impl() - .await; - let s2 = - handle - .handle - .get_internal_event_stream_known_impl() - .await; - streams.push( - Merge::new(s1, s2) - ); - } - let builder = TaskBuilder::>::new( - "Test Overall Safety Task".to_string(), - ) - .register_event_stream(test_event_stream, FilterEvent::default()) - .await - .register_registry(&mut registry) - .await - .register_message_handler(message_handler) - .register_message_stream(MergeN::new(streams)) - .register_event_handler(event_handler) - .register_state(state); - let task_id = builder.get_task_id().unwrap(); - (task_id, OverallSafetyTaskTypes::build(builder).launch()) - } - .boxed() - }) - } -} - -/// overall types for safety task -pub type OverallSafetyTaskTypes = HSTWithEventAndMessage< - OverallSafetyTaskErr, - GlobalTestEvent, - ChannelStream, - (usize, Either, HotShotEvent>), - MergeN>, UnboundedStream>>>, - OverallSafetyTask, ->; +// impl OverallSafetyPropertiesDescription { +// /// build a task +// /// # Panics +// /// if an internal variant that the prior views are filled is violated +// #[must_use] +// #[allow(clippy::too_many_lines)] +// pub fn build>( +// self, +// ) -> TaskGenerator> { +// let Self { +// check_leaf, +// check_state, +// check_block, +// num_failed_views: num_failed_rounds_total, +// num_successful_views, +// threshold_calculator, +// transaction_threshold, +// }: Self = self; + +// Box::new(move |mut state, mut registry, test_event_stream| { +// async move { +// let event_handler = HandleEvent::>(Arc::new( +// move |event, state| { +// async move { +// match event { +// GlobalTestEvent::ShutDown => { +// let num_incomplete_views = state.ctx.round_results.len() +// - state.ctx.successful_views.len() +// - state.ctx.failed_views.len(); + +// if state.ctx.successful_views.len() < num_successful_views { +// return ( +// Some(HotShotTaskCompleted::Error(Box::new( +// OverallSafetyTaskErr::::NotEnoughDecides { +// got: state.ctx.successful_views.len(), +// expected: num_successful_views, +// }, +// ))), +// state, +// ); +// } + +// if state.ctx.failed_views.len() + num_incomplete_views +// >= num_failed_rounds_total +// { +// return ( +// Some(HotShotTaskCompleted::Error(Box::new( +// OverallSafetyTaskErr::::TooManyFailures { +// failed_views: state.ctx.failed_views.clone(), +// }, +// ))), +// state, +// ); +// } +// // TODO check if we got enough successful views +// (Some(HotShotTaskCompleted), state) +// } +// } +// } +// .boxed() +// }, +// )); + +// let message_handler = HandleMessage::>(Arc::new( +// move |msg, mut state| { +// let threshold_calculator = threshold_calculator.clone(); +// async move { + +// let (idx, maybe_event ) : (usize, Either<_, _>)= msg; +// if let Either::Left(Event { view_number, event }) = maybe_event { +// let key = match event { +// EventType::Error { error } => { +// state.ctx.insert_error_to_context(view_number, idx, error); +// None +// } +// EventType::Decide { +// leaf_chain, +// qc, +// block_size: maybe_block_size, +// } => { +// let paired_up = (leaf_chain.to_vec(), (*qc).clone()); +// match state.ctx.round_results.entry(view_number) { +// Entry::Occupied(mut o) => o.get_mut().insert_into_result( +// idx, +// paired_up, +// maybe_block_size, +// ), +// Entry::Vacant(v) => { +// let mut round_result = RoundResult::default(); +// let key = round_result.insert_into_result( +// idx, +// paired_up, +// maybe_block_size, +// ); +// v.insert(round_result); +// key +// } +// } +// } +// EventType::ReplicaViewTimeout { view_number } => { +// let error = Arc::new(HotShotError::::ViewTimeoutError { +// view_number, +// state: RoundTimedoutState::TestCollectRoundEventsTimedOut, +// }); +// state.ctx.insert_error_to_context(view_number, idx, error); +// None +// } +// _ => return (None, state), +// }; + +// // update view count +// let threshold = +// (threshold_calculator)(state.handles.len(), state.handles.len()); + +// let view = state.ctx.round_results.get_mut(&view_number).unwrap(); + +// if let Some(key) = key { +// view.update_status( +// threshold, +// state.handles.len(), +// &key, +// check_leaf, +// check_state, +// check_block, +// transaction_threshold, +// ); +// match view.status.clone() { +// ViewStatus::Ok => { +// state.ctx.successful_views.insert(view_number); +// if state.ctx.successful_views.len() +// >= self.num_successful_views +// { +// state +// .test_event_stream +// .publish(GlobalTestEvent::ShutDown) +// .await; +// return (Some(HotShotTaskCompleted), state); +// } +// return (None, state); +// } +// ViewStatus::Failed => { +// state.ctx.failed_views.insert(view_number); +// if state.ctx.failed_views.len() > self.num_failed_views { +// state +// .test_event_stream +// .publish(GlobalTestEvent::ShutDown) +// .await; +// return ( +// Some(HotShotTaskCompleted::Error(Box::new( +// OverallSafetyTaskErr::::TooManyFailures { +// failed_views: state.ctx.failed_views.clone(), +// }, +// ))), +// state, +// ); +// } +// return (None, state); +// } +// ViewStatus::Err(e) => { +// return ( +// Some(HotShotTaskCompleted::Error(Box::new(e))), +// state, +// ); +// } +// ViewStatus::InProgress => { +// return (None, state); +// } +// } +// } +// else if view.check_if_failed(threshold, state.handles.len()) { +// view.status = ViewStatus::Failed; +// state.ctx.failed_views.insert(view_number); +// if state.ctx.failed_views.len() > self.num_failed_views { +// state +// .test_event_stream +// .publish(GlobalTestEvent::ShutDown) +// .await; +// return ( +// Some(HotShotTaskCompleted::Error(Box::new( +// OverallSafetyTaskErr::::TooManyFailures { +// failed_views: state.ctx.failed_views.clone(), +// }, +// ))), +// state, +// ); +// } +// return (None, state); +// } + +// } + +// (None, state) +// } +// .boxed() +// }, +// )); + +// let mut streams = vec![]; +// for handle in &mut state.handles { +// let s1 = +// handle +// .handle +// .get_event_stream_known_impl() +// .await; +// let s2 = +// handle +// .handle +// .get_internal_event_stream_known_impl() +// .await; +// streams.push( +// Merge::new(s1, s2) +// ); +// } +// let builder = TaskBuilder::>::new( +// "Test Overall Safety Task".to_string(), +// ) +// .register_event_stream(test_event_stream, FilterEvent::default()) +// .await +// .register_registry(&mut registry) +// .await +// .register_message_handler(message_handler) +// .register_message_stream(MergeN::new(streams)) +// .register_event_handler(event_handler) +// .register_state(state); +// let task_id = builder.get_task_id().unwrap(); +// (task_id, OverallSafetyTaskTypes::build(builder).launch()) +// } +// .boxed() +// }) +// } +// } diff --git a/crates/testing/src/per_node_safety_task.rs b/crates/testing/src/per_node_safety_task.rs deleted file mode 100644 index be0fe4a8fa..0000000000 --- a/crates/testing/src/per_node_safety_task.rs +++ /dev/null @@ -1,249 +0,0 @@ -// // TODO rename this file to per-node -// -// use std::{ops::Deref, sync::Arc}; -// -// use async_compatibility_layer::channel::UnboundedStream; -// use either::Either; -// use futures::{ -// future::{BoxFuture, LocalBoxFuture}, -// FutureExt, -// }; -// use hotshot::traits::TestableNodeImplementation; -// use hotshot_types::{ -// event::{Event, EventType}, -// traits::node_implementation::NodeType, -// }; -// use nll::nll_todo::nll_todo; -// use snafu::Snafu; -// use tracing::log::warn; -// -// use crate::test_errors::ConsensusTestError; -// -// use super::{ -// completion_task::CompletionTask, -// node_ctx::{NodeCtx, ViewFailed, ViewStatus, ViewSuccess}, -// GlobalTestEvent, -// }; -// -// #[derive(Snafu, Debug)] -// pub enum PerNodeSafetyTaskErr { -// // TODO make this more detailed -// TooManyFailures, -// NotEnoughDecides, -// } -// impl TaskErr for PerNodeSafetyTaskErr {} -// -// /// Data availability task state -// /// -// #[derive(Debug)] -// pub struct PerNodeSafetyTask> { -// pub(crate) ctx: NodeCtx, -// } -// -// impl> Default -// for PerNodeSafetyTask -// { -// fn default() -> Self { -// Self { -// ctx: Default::default(), -// } -// } -// } -// -// impl> TS -// for PerNodeSafetyTask -// { -// } -// -// /// builder describing custom safety properties -// #[derive(Clone)] -// pub enum PerNodeSafetyTaskDescription< -// TYPES: NodeType, -// I: TestableNodeImplementation, -// > { -// GenProperties(PerNodeSafetyPropertiesDescription), -// CustomProperties(PerNodeSafetyFinisher), -// } -// -// /// properties used for gen -// #[derive(Clone, Debug)] -// pub struct PerNodeSafetyPropertiesDescription { -// /// number failed views -// pub num_failed_views: Option, -// /// number decide events -// pub num_decide_events: Option, -// } -// -// // basic consistency check for single node -// /// Exists for easier overriding -// /// runs at end of all tasks -// #[derive(Clone)] -// #[allow(clippy::type_complexity)] -// pub struct PerNodeSafetyFinisher< -// TYPES: NodeType, -// I: TestableNodeImplementation, -// >( -// pub Arc< -// dyn for<'a> Fn(&'a mut NodeCtx) -> BoxFuture<'a, Result<(), PerNodeSafetyTaskErr>> -// + Send -// + 'static -// + Sync, -// >, -// ); -// -// impl> Deref -// for PerNodeSafetyFinisher -// { -// type Target = dyn for<'a> Fn(&'a mut NodeCtx) -> BoxFuture<'a, Result<(), PerNodeSafetyTaskErr>> -// + Send -// + 'static -// + Sync; -// -// fn deref(&self) -> &Self::Target { -// &*self.0 -// } -// } -// -// impl> -// PerNodeSafetyTaskDescription -// { -// fn gen_finisher(self) -> PerNodeSafetyFinisher { -// match self { -// PerNodeSafetyTaskDescription::CustomProperties(finisher) => finisher, -// PerNodeSafetyTaskDescription::GenProperties(PerNodeSafetyPropertiesDescription { -// num_failed_views, -// num_decide_events, -// }) => PerNodeSafetyFinisher(Arc::new(move |ctx: &mut NodeCtx| { -// async move { -// let mut num_failed = 0; -// let mut num_decided = 0; -// for (_view_num, view_status) in &ctx.round_results { -// match view_status { -// ViewStatus::InProgress(_) => {} -// ViewStatus::ViewFailed(_) => { -// num_failed += 1; -// } -// ViewStatus::ViewSuccess(_) => { -// num_decided += 1; -// } -// } -// } -// if let Some(num_failed_views) = num_failed_views { -// if num_failed >= num_failed_views { -// return Err(PerNodeSafetyTaskErr::TooManyFailures); -// } -// } -// -// if let Some(num_decide_events) = num_decide_events { -// if num_decided < num_decide_events { -// return Err(PerNodeSafetyTaskErr::NotEnoughDecides); -// } -// } -// Ok(()) -// } -// .boxed() -// })), -// } -// } -// -// /// build -// pub fn build( -// self, -// // registry: &mut GlobalRegistry, -// // test_event_stream: ChannelStream, -// // hotshot_event_stream: UnboundedStream>, -// ) -> TaskGenerator< -// PerNodeSafetyTask -// > { -// Box::new( -// move |state, mut registry, test_event_stream, hotshot_event_stream| { -// // TODO this is cursed, there's definitely a better way to do this -// let desc = self.clone(); -// async move { -// let test_event_handler = HandleEvent::>(Arc::new( -// move |event, mut state| { -// let finisher = desc.clone().gen_finisher(); -// async move { -// match event { -// GlobalTestEvent::ShutDown => { -// let finished = finisher(&mut state.ctx).await; -// let result = match finished { -// Ok(()) => HotShotTaskCompleted, -// Err(err) => HotShotTaskCompleted::Error(Box::new(err)), -// }; -// return (Some(result), state); -// } -// _ => { -// unimplemented!() -// } -// } -// } -// .boxed() -// }, -// )); -// let message_handler = HandleMessage::>(Arc::new( -// move |msg, mut state| { -// async move { -// let Event { view_number, event } = msg; -// match event { -// EventType::Error { error } => { -// // TODO better warn with node idx -// warn!("View {:?} failed for a replica", view_number); -// state.ctx.round_results.insert( -// view_number, -// ViewStatus::ViewFailed(ViewFailed(error)), -// ); -// } -// EventType::Decide { leaf_chain, qc } => { -// state.ctx.round_results.insert( -// view_number, -// ViewStatus::ViewSuccess(ViewSuccess { -// agreed_state: -// -// }), -// ); -// } -// // these aren't failures -// EventType::ReplicaViewTimeout { view_number } -// | EventType::NextLeaderViewTimeout { view_number } -// | EventType::ViewFinished { view_number } => todo!(), -// _ => todo!(), -// } -// (None, state) -// } -// .boxed() -// }, -// )); -// -// let builder = TaskBuilder::>::new( -// "Safety Check Task".to_string(), -// ) -// .register_event_stream(test_event_stream, FilterEvent::default()) -// .await -// .register_registry(&mut registry) -// .await -// .register_state(state) -// .register_event_handler(test_event_handler) -// .register_message_handler(message_handler) -// .register_message_stream(hotshot_event_stream); -// let task_id = builder.get_task_id().unwrap(); -// (task_id, PerNodeSafetyTaskTypes::build(builder).launch()) -// } -// .boxed() -// }, -// ) -// } -// } -// -// // /// Data Availability task types -// pub type PerNodeSafetyTaskTypes< -// TYPES: NodeType, -// I: TestableNodeImplementation, -// > = HSTWithEventAndMessage< -// PerNodeSafetyTaskErr, -// GlobalTestEvent, -// ChannelStream, -// Event, -// UnboundedStream>, -// PerNodeSafetyTask, -// >; diff --git a/crates/testing/src/soundness_task.rs b/crates/testing/src/soundness_task.rs deleted file mode 100644 index 8b13789179..0000000000 --- a/crates/testing/src/soundness_task.rs +++ /dev/null @@ -1 +0,0 @@ - diff --git a/crates/testing/src/spinning_task.rs b/crates/testing/src/spinning_task.rs index f7bb20074d..8797db8d46 100644 --- a/crates/testing/src/spinning_task.rs +++ b/crates/testing/src/spinning_task.rs @@ -1,14 +1,15 @@ -use std::{collections::HashMap, sync::Arc}; +use std::collections::HashMap; -use async_compatibility_layer::channel::UnboundedStream; -use futures::FutureExt; use hotshot::{traits::TestableNodeImplementation, SystemContext}; +use hotshot_types::event::EventType; use hotshot_types::traits::network::CommunicationChannel; use hotshot_types::{event::Event, traits::node_implementation::NodeType}; use snafu::Snafu; +use task::task::{TaskState, TestTaskState}; -use crate::{test_launcher::TaskGenerator, test_runner::Node}; +use crate::test_runner::HotShotTaskCompleted; +use crate::test_runner::Node; /// convience type for state and block pub type StateAndBlock = (Vec, Vec); @@ -30,7 +31,91 @@ pub struct SpinningTask> { pub(crate) latest_view: Option, } -impl> TS for SpinningTask {} +impl> TaskState for SpinningTask { + type Event = GlobalTestEvent; + + type Result = HotShotTaskCompleted; + + async fn handle_event( + event: Self::Event, + _task: &mut task::task::Task, + ) -> Option { + if matches!(event, GlobalTestEvent::ShutDown) { + return Some(HotShotTaskCompleted::ShutDown); + } + None + } + + fn should_shutdown(event: &Self::Event) -> bool { + false + } +} + +impl> TestTaskState + for SpinningTask +{ + type Message = Event; + + type Result = HotShotTaskCompleted; + + type State = Self; + + async fn handle_message( + message: Self::Message, + id: usize, + task: &mut task::task::TestTask, + ) -> Option { + let Event { + view_number, + event: _, + } = message; + + let state = &mut task.state_mut(); + + // if we have not seen this view before + if state.latest_view.is_none() || view_number > state.latest_view.unwrap() { + // perform operations on the nodes + if let Some(operations) = state.changes.remove(&view_number) { + for ChangeNode { idx, updown } in operations { + match updown { + UpDown::Up => { + if let Some(node) = state.late_start.remove(&idx.try_into().unwrap()) { + tracing::error!("Node {} spinning up late", idx); + let handle = node.run_tasks().await; + handle.hotshot.start_consensus().await; + } + } + UpDown::Down => { + if let Some(node) = state.handles.get_mut(idx) { + tracing::error!("Node {} shutting down", idx); + node.handle.shut_down().await; + } + } + UpDown::NetworkUp => { + if let Some(handle) = state.handles.get(idx) { + tracing::error!("Node {} networks resuming", idx); + handle.networks.0.resume(); + handle.networks.1.resume(); + } + } + UpDown::NetworkDown => { + if let Some(handle) = state.handles.get(idx) { + tracing::error!("Node {} networks pausing", idx); + handle.networks.0.pause(); + handle.networks.1.pause(); + } + } + } + } + } + + // update our latest view + state.latest_view = Some(view_number); + } + + None + } +} /// Spin the node up or down #[derive(Clone, Debug)] @@ -62,129 +147,126 @@ pub struct SpinningTaskDescription { pub node_changes: Vec<(u64, Vec)>, } -impl SpinningTaskDescription { - /// build a task - /// # Panics - /// If there is no latest view - /// or if the node id is over `u32::MAX` - #[must_use] - pub fn build>( - self, - ) -> TaskGenerator> { - Box::new(move |mut state, mut registry, test_event_stream| { - async move { - let event_handler = - HandleEvent::>(Arc::new(move |event, state| { - async move { - match event { - GlobalTestEvent::ShutDown => (Some(HotShotTaskCompleted), state), - } - } - .boxed() - })); - - let message_handler = HandleMessage::>(Arc::new( - move |msg, mut state| { - async move { - let Event { - view_number, - event: _, - } = msg.1; - - // if we have not seen this view before - if state.latest_view.is_none() - || view_number > state.latest_view.unwrap() - { - // perform operations on the nodes - if let Some(operations) = state.changes.remove(&view_number) { - for ChangeNode { idx, updown } in operations { - match updown { - UpDown::Up => { - if let Some(node) = state - .late_start - .remove(&idx.try_into().unwrap()) - { - tracing::error!( - "Node {} spinning up late", - idx - ); - let handle = node.run_tasks().await; - handle.hotshot.start_consensus().await; - } - } - UpDown::Down => { - if let Some(node) = state.handles.get_mut(idx) { - tracing::error!("Node {} shutting down", idx); - node.handle.shut_down().await; - } - } - UpDown::NetworkUp => { - if let Some(handle) = state.handles.get(idx) { - tracing::error!( - "Node {} networks resuming", - idx - ); - handle.networks.0.resume(); - handle.networks.1.resume(); - } - } - UpDown::NetworkDown => { - if let Some(handle) = state.handles.get(idx) { - tracing::error!( - "Node {} networks pausing", - idx - ); - handle.networks.0.pause(); - handle.networks.1.pause(); - } - } - } - } - } - - // update our latest view - state.latest_view = Some(view_number); - } +// impl SpinningTaskDescription { +// /// build a task +// /// # Panics +// /// If there is no latest view +// /// or if the node id is over `u32::MAX` +// #[must_use] +// pub fn build>( +// self, +// ) -> TaskGenerator> { +// Box::new(move |mut state, mut registry, test_event_stream| { +// async move { +// let event_handler = +// HandleEvent::>(Arc::new(move |event, state| { +// async move { +// match event { +// GlobalTestEvent::ShutDown => (Some(HotShotTaskCompleted), state), +// } +// } +// .boxed() +// })); - (None, state) - } - .boxed() - }, - )); - - let mut streams = vec![]; - for handle in &mut state.handles { - let s1 = handle - .handle - .get_event_stream_known_impl() - .await; - streams.push(s1); - } - let builder = TaskBuilder::>::new( - "Test Spinning Task".to_string(), - ) - .register_event_stream(test_event_stream, FilterEvent::default()) - .await - .register_registry(&mut registry) - .await - .register_message_handler(message_handler) - .register_message_stream(MergeN::new(streams)) - .register_event_handler(event_handler) - .register_state(state); - let task_id = builder.get_task_id().unwrap(); - (task_id, SpinningTaskTypes::build(builder).launch()) - } - .boxed() - }) - } -} +// let message_handler = HandleMessage::>(Arc::new( +// move |msg, mut state| { +// async move { +// let Event { +// view_number, +// event: _, +// } = msg.1; + +// // if we have not seen this view before +// if state.latest_view.is_none() +// || view_number > state.latest_view.unwrap() +// { +// // perform operations on the nodes +// if let Some(operations) = state.changes.remove(&view_number) { +// for ChangeNode { idx, updown } in operations { +// match updown { +// UpDown::Up => { +// if let Some(node) = state +// .late_start +// .remove(&idx.try_into().unwrap()) +// { +// tracing::error!( +// "Node {} spinning up late", +// idx +// ); +// let handle = node.run_tasks().await; +// handle.hotshot.start_consensus().await; +// } +// } +// UpDown::Down => { +// if let Some(node) = state.handles.get_mut(idx) { +// tracing::error!("Node {} shutting down", idx); +// node.handle.shut_down().await; +// } +// } +// UpDown::NetworkUp => { +// if let Some(handle) = state.handles.get(idx) { +// tracing::error!( +// "Node {} networks resuming", +// idx +// ); +// handle.networks.0.resume(); +// handle.networks.1.resume(); +// } +// } +// UpDown::NetworkDown => { +// if let Some(handle) = state.handles.get(idx) { +// tracing::error!( +// "Node {} networks pausing", +// idx +// ); +// handle.networks.0.pause(); +// handle.networks.1.pause(); +// } +// } +// } +// } +// } + +// // update our latest view +// state.latest_view = Some(view_number); +// } + +// (None, state) +// } +// .boxed() +// }, +// )); + +// let mut streams = vec![]; +// for handle in &mut state.handles { +// let s1 = handle.handle.get_event_stream_known_impl().await; +// streams.push(s1); +// } +// let builder = TaskBuilder::>::new( +// "Test Spinning Task".to_string(), +// ) +// .register_event_stream(test_event_stream, FilterEvent::default()) +// .await +// .register_registry(&mut registry) +// .await +// .register_message_handler(message_handler) +// .register_message_stream(MergeN::new(streams)) +// .register_event_handler(event_handler) +// .register_state(state); +// let task_id = builder.get_task_id().unwrap(); +// (task_id, SpinningTaskTypes::build(builder).launch()) +// } +// .boxed() +// }) +// } +// } -/// types for safety task -pub type SpinningTaskTypes = HSTWithEventAndMessage< - SpinningTaskErr, - GlobalTestEvent, - ChannelStream, - (usize, Event), - MergeN>>, - SpinningTask, ->; +// /// types for safety task +// pub type SpinningTaskTypes = HSTWithEventAndMessage< +// SpinningTaskErr, +// GlobalTestEvent, +// ChannelStream, +// (usize, Event), +// MergeN>>, +// SpinningTask, +// >; diff --git a/crates/testing/src/task_helpers.rs b/crates/testing/src/task_helpers.rs index 71ba9d5581..fcb2c37497 100644 --- a/crates/testing/src/task_helpers.rs +++ b/crates/testing/src/task_helpers.rs @@ -31,6 +31,7 @@ use hotshot_types::{ vote::HasViewNumber, }; +use async_broadcast::{Receiver, Sender}; use async_lock::RwLockUpgradableReadGuard; use bitvec::bitvec; use hotshot_types::simple_vote::QuorumData; @@ -50,7 +51,8 @@ pub async fn build_system_handle( node_id: u64, ) -> ( SystemContextHandle, - ChannelStream>, + Sender>, + Receiver>, ) { let builder = TestMetadata::default_multiple_rounds(); diff --git a/crates/testing/src/test_builder.rs b/crates/testing/src/test_builder.rs index 6046253e26..4285189ed5 100644 --- a/crates/testing/src/test_builder.rs +++ b/crates/testing/src/test_builder.rs @@ -285,11 +285,6 @@ impl TestMetadata { a.propose_max_round_time = propose_max_round_time; }; - let txn_task_generator = txn_description.build(); - let completion_task_generator = completion_task_description.build_and_launch(); - let overall_safety_task_generator = overall_safety_properties.build(); - let spinning_task_generator = spinning_properties.build(); - let view_sync_task_generator = view_sync_properties.build(); TestLauncher { resource_generator: ResourceGenerators { channel_generator: >::gen_comm_channels( @@ -302,12 +297,6 @@ impl TestMetadata { config, }, metadata: self, - txn_task_generator, - overall_safety_task_generator, - completion_task_generator, - spinning_task_generator, - view_sync_task_generator, - hooks: vec![], } .modify_default_config(mod_config) } diff --git a/crates/testing/src/test_launcher.rs b/crates/testing/src/test_launcher.rs index d09a060e86..5f1ad08122 100644 --- a/crates/testing/src/test_launcher.rs +++ b/crates/testing/src/test_launcher.rs @@ -3,6 +3,7 @@ use std::{collections::HashMap, sync::Arc}; use futures::future::BoxFuture; use hotshot::traits::{NodeImplementation, TestableNodeImplementation}; use hotshot_types::{traits::node_implementation::NodeType, HotShotConfig}; +use task::task::TaskRegistry; use crate::{spinning_task::SpinningTask, view_sync_task::ViewSyncTask}; @@ -27,23 +28,23 @@ pub type CommitteeNetworkGenerator = Box) -> T + 'static>; pub type ViewSyncNetworkGenerator = Box) -> T + 'static>; /// Wrapper type for a task generator. -pub type TaskGenerator = Box< - dyn FnOnce( - TASK, - GlobalRegistry, - ChannelStream, - ) - -> BoxFuture<'static, (HotShotTaskId, BoxFuture<'static, HotShotTaskCompleted>)>, ->; - -/// Wrapper type for a hook. -pub type Hook = Box< - dyn FnOnce( - GlobalRegistry, - ChannelStream, - ) - -> BoxFuture<'static, (HotShotTaskId, BoxFuture<'static, HotShotTaskCompleted>)>, ->; +// pub type TaskGenerator = Box< +// dyn FnOnce( +// TASK, +// GlobalRegistry, +// ChannelStream, +// ) +// -> BoxFuture<'static, (HotShotTaskId, BoxFuture<'static, HotShotTaskCompleted>)>, +// >; + +// /// Wrapper type for a hook. +// pub type Hook = Box< +// dyn FnOnce( +// GlobalRegistry, +// ChannelStream, +// ) +// -> BoxFuture<'static, (HotShotTaskId, BoxFuture<'static, HotShotTaskCompleted>)>, +// >; /// generators for resources used by each node pub struct ResourceGenerators> { @@ -61,18 +62,6 @@ pub struct TestLauncher> { pub resource_generator: ResourceGenerators, /// metadasta used for tasks pub metadata: TestMetadata, - /// overrideable txn task generator function - pub txn_task_generator: TaskGenerator>, - /// overrideable timeout task generator function - pub completion_task_generator: TaskGenerator>, - /// overall safety task generator - pub overall_safety_task_generator: TaskGenerator>, - /// task for spinning nodes up/down - pub spinning_task_generator: TaskGenerator>, - /// task for view sync - pub view_sync_task_generator: TaskGenerator>, - /// extra hooks in case we want to check additional things - pub hooks: Vec, } impl> TestLauncher { @@ -84,92 +73,8 @@ impl> TestLauncher>, - ) -> Self { - Self { - overall_safety_task_generator, - ..self - } - } - - /// override the safety task generator - #[must_use] - pub fn with_spinning_task_generator( - self, - spinning_task_generator: TaskGenerator>, - ) -> Self { - Self { - spinning_task_generator, - ..self - } - } - - /// overridde the completion task generator - #[must_use] - pub fn with_completion_task_generator( - self, - completion_task_generator: TaskGenerator>, - ) -> Self { - Self { - completion_task_generator, - ..self - } - } - - /// override the txn task generator - #[must_use] - pub fn with_txn_task_generator( - self, - txn_task_generator: TaskGenerator>, - ) -> Self { - Self { - txn_task_generator, - ..self - } - } - - /// override the view sync task generator - #[must_use] - pub fn with_view_sync_task_generator( - self, - view_sync_task_generator: TaskGenerator>, - ) -> Self { - Self { - view_sync_task_generator, - ..self } } - - /// override resource generators - #[must_use] - pub fn with_resource_generator(self, resource_generator: ResourceGenerators) -> Self { - Self { - resource_generator, - ..self - } - } - - /// add a hook - #[must_use] - pub fn add_hook(mut self, hook: Hook) -> Self { - self.hooks.push(hook); - self - } - - /// overwrite hooks with more hooks - #[must_use] - pub fn with_hooks(self, hooks: Vec) -> Self { - Self { hooks, ..self } - } - /// Modifies the config used when generating nodes with `f` #[must_use] pub fn modify_default_config( diff --git a/crates/testing/src/test_runner.rs b/crates/testing/src/test_runner.rs index 1c65452a07..596b2c71a3 100644 --- a/crates/testing/src/test_runner.rs +++ b/crates/testing/src/test_runner.rs @@ -5,10 +5,14 @@ use super::{ txn_task::TxnTask, }; use crate::{ - spinning_task::{ChangeNode, UpDown}, + completion_task::CompletionTaskDescription, + spinning_task::{ChangeNode, SpinningTask, UpDown}, test_launcher::{Networks, TestLauncher}, + txn_task::TxnTaskDescription, view_sync_task::ViewSyncTask, }; +use async_broadcast::broadcast; +use futures::{future::join_all, stream::FuturesUnordered}; use hotshot::{types::SystemContextHandle, Memberships}; use hotshot::{traits::TestableNodeImplementation, HotShotInitializer, SystemContext}; @@ -22,7 +26,9 @@ use hotshot_types::{ use std::{ collections::{HashMap, HashSet}, marker::PhantomData, + sync::Arc, }; +use task::task::{Task, TaskRegistry, TestTask}; #[allow(deprecated)] use tracing::info; @@ -49,10 +55,28 @@ pub struct TestRunner> { pub(crate) late_start: HashMap>, /// the next node unique identifier pub(crate) next_node_id: u64, - /// overarching test task - pub(crate) task_runner: TaskRunner, } +/// enum describing how the tasks completed +pub enum HotShotTaskCompleted { + /// the task shut down successfully + ShutDown, + /// the task encountered an error + Error(Box), + /// the streams the task was listening for died + StreamsDied, + /// we somehow lost the state + /// this is definitely a bug. + LostState, + /// lost the return value somehow + LostReturnValue, + /// Stream exists but missing handler + MissingHandler, +} + +pub trait TaskErr: std::error::Error + Sync + Send + 'static {} +impl TaskErr for T {} + impl> TestRunner where I: TestableNodeImplementation, @@ -62,6 +86,20 @@ where /// if the test fails #[allow(clippy::too_many_lines)] pub async fn run_test(mut self) { + let (tx, rx) = broadcast(1024); + + let mut event_rxs = vec![]; + let mut internal_event_rxs = vec![]; + + for node in &self.nodes { + let r = node.handle.get_event_stream_known_impl().await; + event_rxs.push(r); + } + for node in &self.nodes { + let r = node.handle.get_internal_event_stream_known_impl().await; + internal_event_rxs.push(r); + } + let spinning_changes = self .launcher .metadata @@ -78,46 +116,43 @@ where } } + let reg = Arc::new(TaskRegistry::default()); + self.add_nodes(self.launcher.metadata.total_nodes, &late_start_nodes) .await; let TestRunner { - launcher, + ref launcher, nodes, late_start, next_node_id: _, - mut task_runner, } = self; - let registry = GlobalRegistry::default(); - let test_event_stream = ChannelStream::new(); - // add transaction task - let txn_task_state = TxnTask { - handles: nodes.clone(), - next_node_idx: Some(0), - }; - let (id, task) = (launcher.txn_task_generator)( - txn_task_state, - registry.clone(), - test_event_stream.clone(), - ) - .await; - task_runner = - task_runner.add_task(id, "Test Transaction Submission Task".to_string(), task); + let mut task_futs = vec![]; + let meta = launcher.metadata.clone(); - // add completion task - let completion_task_state = CompletionTask { - handles: nodes.clone(), - test_event_stream: test_event_stream.clone(), - }; - let (id, task) = (launcher.completion_task_generator)( - completion_task_state, - registry.clone(), - test_event_stream.clone(), - ) - .await; + if let TxnTaskDescription::RoundRobinTimeBased(duration) = meta.txn_description { + let txn_task = TxnTask { + handles: nodes.clone(), + next_node_idx: Some(0), + duration, + shutdown_chan: rx.clone(), + }; + task_futs.push(txn_task.run()); + } - task_runner = task_runner.add_task(id, "Test Completion Task".to_string(), task); + // add completion task + if let CompletionTaskDescription::TimeBasedCompletionTaskBuilder(time_based) = + meta.completion_task_description + { + let completion_task = CompletionTask { + tx: tx.clone(), + rx: rx.clone(), + handles: nodes.clone(), + duration: time_based.duration, + }; + task_futs.push(completion_task.run()); + } // add spinning task // map spinning to view @@ -129,48 +164,47 @@ where .append(&mut change); } - let spinning_task_state = crate::spinning_task::SpinningTask { + let spinning_task_state = SpinningTask { handles: nodes.clone(), late_start, latest_view: None, changes, }; - - let (id, task) = (launcher.spinning_task_generator)( - spinning_task_state, - registry.clone(), - test_event_stream.clone(), - ) - .await; - task_runner = task_runner.add_task(id, "Test Spinning Task".to_string(), task); - + let spinning_task = TestTask::, SpinningTask>::new( + Task::new(tx.clone(), rx.clone(), reg.clone(), spinning_task_state), + event_rxs.clone(), + ); + task_futs.push(spinning_task.run()); // add safety task let overall_safety_task_state = OverallSafetyTask { handles: nodes.clone(), ctx: RoundCtx::default(), - test_event_stream: test_event_stream.clone(), + properties: self.launcher.metadata.overall_safety_properties, }; - let (id, task) = (launcher.overall_safety_task_generator)( - overall_safety_task_state, - registry.clone(), - test_event_stream.clone(), - ) - .await; - task_runner = task_runner.add_task(id, "Test Overall Safety Task".to_string(), task); + + let safety_task = TestTask::, OverallSafetyTask>::new( + Task::new( + tx.clone(), + rx.clone(), + reg.clone(), + overall_safety_task_state, + ), + event_rxs.clone(), + ); + task_futs.push(safety_task.run()); // add view sync task let view_sync_task_state = ViewSyncTask { handles: nodes.clone(), hit_view_sync: HashSet::new(), + description: self.launcher.metadata.view_sync_properties, }; - let (id, task) = (launcher.view_sync_task_generator)( - view_sync_task_state, - registry.clone(), - test_event_stream.clone(), - ) - .await; - task_runner = task_runner.add_task(id, "View Sync Task".to_string(), task); + let view_sync_task = TestTask::, ViewSyncTask>::new( + Task::new(tx.clone(), rx.clone(), reg.clone(), view_sync_task_state), + internal_event_rxs, + ); + task_futs.push(view_sync_task.run()); // wait for networks to be ready for node in &nodes { @@ -184,15 +218,14 @@ where } } - let results = task_runner.launch().await; - + let results = join_all(task_futs).await; let mut error_list = vec![]; - for (name, result) in results { - match result { - hotshot_task::task::HotShotTaskCompleted => { - info!("Task {} shut down successfully", name); + for (result) in results { + match result.unwrap() { + HotShotTaskCompleted::ShutDown => { + info!("Task shut down successfully"); } - hotshot_task::task::HotShotTaskCompleted::Error(e) => error_list.push((name, e)), + HotShotTaskCompleted::Error(e) => error_list.push(e), _ => { panic!("Future impl for task abstraction failed! This should never happen"); } diff --git a/crates/testing/src/txn_task.rs b/crates/testing/src/txn_task.rs index a77437d07b..f8ca729859 100644 --- a/crates/testing/src/txn_task.rs +++ b/crates/testing/src/txn_task.rs @@ -1,13 +1,18 @@ -use crate::test_runner::Node; -use async_compatibility_layer::art::async_sleep; -use futures::FutureExt; +use crate::test_runner::{HotShotTaskCompleted, Node}; +use async_broadcast::{Receiver, TryRecvError}; +use async_compatibility_layer::art::{async_sleep, async_spawn}; +#[cfg(async_executor_impl = "async-std")] +use async_std::task::JoinHandle; use hotshot::traits::TestableNodeImplementation; -use hotshot_types::traits::node_implementation::{NodeImplementation, NodeType}; +use hotshot_types::traits::node_implementation::NodeType; use rand::thread_rng; use snafu::Snafu; -use std::{sync::Arc, time::Duration}; +#[cfg(async_executor_impl = "tokio")] +use tokio::task::JoinHandle; -use super::{test_launcher::TaskGenerator, GlobalTestEvent}; +use std::time::Duration; + +use super::GlobalTestEvent; // the obvious idea here is to pass in a "stream" that completes every `n` seconds // the stream construction can definitely be fancier but that's the baseline idea @@ -23,19 +28,56 @@ pub struct TxnTask> { pub handles: Vec>, /// Optional index of the next node. pub next_node_idx: Option, + /// time to wait between txns + pub duration: Duration, + /// + pub shutdown_chan: Receiver, } -impl> TS for TxnTask {} - -/// types for task that deices when things are completed -pub type TxnTaskTypes = HSTWithEventAndMessage< - TxnTaskErr, - GlobalTestEvent, - ChannelStream, - (), - GeneratedStream<()>, - TxnTask, ->; +impl> TxnTask { + pub fn run(mut self) -> JoinHandle { + async_spawn(async move { + loop { + async_sleep(self.duration).await; + match self.shutdown_chan.try_recv() { + Ok(event) => { + return HotShotTaskCompleted::ShutDown; + } + Err(TryRecvError::Empty) => {} + Err(_) => { + return HotShotTaskCompleted::StreamsDied; + } + } + self.submit_tx().await; + } + }) + } + async fn submit_tx(&mut self) { + if let Some(idx) = self.next_node_idx { + // submit to idx handle + // increment state + self.next_node_idx = Some((idx + 1) % self.handles.len()); + match self.handles.get(idx) { + None => { + // should do error + unimplemented!() + } + Some(node) => { + // use rand::seq::IteratorRandom; + // we're assuming all nodes have the same leaf. + // If they don't match, this is probably fine since + // it should be caught by an assertion (and the txn will be rejected anyway) + let leaf = node.handle.get_decided_leaf().await; + let txn = I::leaf_create_random_transaction(&leaf, &mut thread_rng(), 0); + node.handle + .submit_transaction(txn.clone()) + .await + .expect("Could not send transaction"); + } + } + } + } +} /// build the transaction task #[derive(Clone, Debug)] @@ -46,106 +88,3 @@ pub enum TxnTaskDescription { /// TODO DistributionBased, // others? } - -impl TxnTaskDescription { - /// build a task - /// # Panics - /// if unable to get task id - #[must_use] - pub fn build>( - self, - ) -> TaskGenerator> - where - TYPES: NodeType, - I: NodeImplementation, - { - Box::new(move |state, mut registry, test_event_stream| { - async move { - // consistency check - match self { - TxnTaskDescription::RoundRobinTimeBased(_) => { - assert!(state.next_node_idx.is_some()); - } - TxnTaskDescription::DistributionBased => assert!(state.next_node_idx.is_none()), - } - // TODO we'll possibly want multiple criterion including: - // - certain number of txns committed - // - anchor of certain depth - // - some other stuff? probably? - let event_handler = - HandleEvent::>(Arc::new(move |event, state| { - async move { - match event { - GlobalTestEvent::ShutDown => (Some(HotShotTaskCompleted), state), - } - } - .boxed() - })); - let message_handler = - HandleMessage::>(Arc::new(move |(), mut state| { - async move { - if let Some(idx) = state.next_node_idx { - // submit to idx handle - // increment state - state.next_node_idx = Some((idx + 1) % state.handles.len()); - match state.handles.get(idx) { - None => { - // should do error - unimplemented!() - } - Some(node) => { - // use rand::seq::IteratorRandom; - // we're assuming all nodes have the same leaf. - // If they don't match, this is probably fine since - // it should be caught by an assertion (and the txn will be rejected anyway) - let leaf = node.handle.get_decided_leaf().await; - let txn = I::leaf_create_random_transaction( - &leaf, - &mut thread_rng(), - 0, - ); - node.handle - .submit_transaction(txn.clone()) - .await - .expect("Could not send transaction"); - (None, state) - } - } - } else { - // TODO make an issue - // in the case that this is random - // which I haven't implemented yet - unimplemented!() - } - } - .boxed() - })); - let stream_generator = match self { - TxnTaskDescription::RoundRobinTimeBased(duration) => { - GeneratedStream::new(Arc::new(move || { - let fut = async move { - async_sleep(duration).await; - }; - Some(boxed_sync(fut)) - })) - } - TxnTaskDescription::DistributionBased => unimplemented!(), - }; - let builder = TaskBuilder::>::new( - "Test Transaction Submission Task".to_string(), - ) - .register_event_stream(test_event_stream, FilterEvent::default()) - .await - .register_registry(&mut registry) - .await - .register_state(state) - .register_event_handler(event_handler) - .register_message_handler(message_handler) - .register_message_stream(stream_generator); - let task_id = builder.get_task_id().unwrap(); - (task_id, TxnTaskTypes::build(builder).launch()) - } - .boxed() - }) - } -} diff --git a/crates/testing/src/view_sync_task.rs b/crates/testing/src/view_sync_task.rs index 5b483be824..578e9b58c0 100644 --- a/crates/testing/src/view_sync_task.rs +++ b/crates/testing/src/view_sync_task.rs @@ -1,12 +1,13 @@ -use async_compatibility_layer::channel::UnboundedStream; -use futures::FutureExt; - use hotshot_task_impls::events::HotShotEvent; use hotshot_types::traits::node_implementation::{NodeType, TestableNodeImplementation}; use snafu::Snafu; -use std::{collections::HashSet, sync::Arc}; +use std::collections::HashSet; +use task::task::{TaskState, TestTaskState}; -use crate::{test_launcher::TaskGenerator, test_runner::Node, GlobalTestEvent}; +use crate::{ + test_runner::{HotShotTaskCompleted, Node}, + GlobalTestEvent, +}; /// `ViewSync` Task error #[derive(Snafu, Debug, Clone)] @@ -21,19 +22,78 @@ pub struct ViewSyncTask> { pub(crate) handles: Vec>, /// nodes that hit view sync pub(crate) hit_view_sync: HashSet, + /// properties of task + pub(crate) description: ViewSyncTaskDescription, } -impl> TS for ViewSyncTask {} +impl> TaskState for ViewSyncTask { + type Event = GlobalTestEvent; -/// `ViewSync` task types -pub type ViewSyncTaskTypes = HSTWithEventAndMessage< - ViewSyncTaskErr, - GlobalTestEvent, - ChannelStream, - (usize, HotShotEvent), - MergeN>>, - ViewSyncTask, ->; + type Result = HotShotTaskCompleted; + + async fn handle_event( + event: Self::Event, + task: &mut task::task::Task, + ) -> Option { + let state = task.state_mut(); + match event { + GlobalTestEvent::ShutDown => match state.description.clone() { + ViewSyncTaskDescription::Threshold(min, max) => { + let num_hits = state.hit_view_sync.len(); + if min <= num_hits && num_hits <= max { + Some(HotShotTaskCompleted::ShutDown) + } else { + Some(HotShotTaskCompleted::Error(Box::new(ViewSyncTaskErr { + hit_view_sync: state.hit_view_sync.clone(), + }))) + } + } + }, + } + } + + fn should_shutdown(event: &Self::Event) -> bool { + false + } +} + +impl> TestTaskState + for ViewSyncTask +{ + type Message = HotShotEvent; + + type Result = HotShotTaskCompleted; + + type State = Self; + + async fn handle_message( + message: Self::Message, + id: usize, + task: &mut task::task::TestTask, + ) -> Option { + match message { + // all the view sync events + HotShotEvent::ViewSyncTimeout(_, _, _) + | HotShotEvent::ViewSyncPreCommitVoteRecv(_) + | HotShotEvent::ViewSyncCommitVoteRecv(_) + | HotShotEvent::ViewSyncFinalizeVoteRecv(_) + | HotShotEvent::ViewSyncPreCommitVoteSend(_) + | HotShotEvent::ViewSyncCommitVoteSend(_) + | HotShotEvent::ViewSyncFinalizeVoteSend(_) + | HotShotEvent::ViewSyncPreCommitCertificate2Recv(_) + | HotShotEvent::ViewSyncCommitCertificate2Recv(_) + | HotShotEvent::ViewSyncFinalizeCertificate2Recv(_) + | HotShotEvent::ViewSyncPreCommitCertificate2Send(_, _) + | HotShotEvent::ViewSyncCommitCertificate2Send(_, _) + | HotShotEvent::ViewSyncFinalizeCertificate2Send(_, _) + | HotShotEvent::ViewSyncTrigger(_) => { + task.state_mut().hit_view_sync.insert(id); + } + _ => (), + } + None + } +} /// enum desecribing whether a node should hit view sync #[derive(Clone, Debug, Copy)] @@ -53,99 +113,99 @@ pub enum ViewSyncTaskDescription { Threshold(usize, usize), } -impl ViewSyncTaskDescription { - /// build a view sync task from its description - /// # Panics - /// if there is an violation of the view sync description - #[must_use] - pub fn build>( - self, - ) -> TaskGenerator> { - Box::new(move |mut state, mut registry, test_event_stream| { - async move { - let event_handler = - HandleEvent::>(Arc::new(move |event, state| { - let self_dup = self.clone(); - async move { - match event { - GlobalTestEvent::ShutDown => match self_dup.clone() { - ViewSyncTaskDescription::Threshold(min, max) => { - let num_hits = state.hit_view_sync.len(); - if min <= num_hits && num_hits <= max { - (Some(HotShotTaskCompleted), state) - } else { - ( - Some(HotShotTaskCompleted::Error(Box::new( - ViewSyncTaskErr { - hit_view_sync: state.hit_view_sync.clone(), - }, - ))), - state, - ) - } - } - }, - } - } - .boxed() - })); - - let message_handler = HandleMessage::>(Arc::new( - // NOTE: could short circuit on entering view sync if we're not supposed to - // enter view sync. I opted not to do this just to gather more information - // (since we'll fail the test later anyway) - move |(id, msg), mut state| { - async move { - match msg { - // all the view sync events - HotShotEvent::ViewSyncTimeout(_, _, _) - | HotShotEvent::ViewSyncPreCommitVoteRecv(_) - | HotShotEvent::ViewSyncCommitVoteRecv(_) - | HotShotEvent::ViewSyncFinalizeVoteRecv(_) - | HotShotEvent::ViewSyncPreCommitVoteSend(_) - | HotShotEvent::ViewSyncCommitVoteSend(_) - | HotShotEvent::ViewSyncFinalizeVoteSend(_) - | HotShotEvent::ViewSyncPreCommitCertificate2Recv(_) - | HotShotEvent::ViewSyncCommitCertificate2Recv(_) - | HotShotEvent::ViewSyncFinalizeCertificate2Recv(_) - | HotShotEvent::ViewSyncPreCommitCertificate2Send(_, _) - | HotShotEvent::ViewSyncCommitCertificate2Send(_, _) - | HotShotEvent::ViewSyncFinalizeCertificate2Send(_, _) - | HotShotEvent::ViewSyncTrigger(_) => { - state.hit_view_sync.insert(id); - } - _ => (), - } - (None, state) - } - .boxed() - }, - )); - let mut streams = vec![]; - for handle in &mut state.handles { - let stream = handle - .handle - .get_internal_event_stream_known_impl(FilterEvent::default()) - .await - .0; - streams.push(stream); - } +// impl ViewSyncTaskDescription { +// /// build a view sync task from its description +// /// # Panics +// /// if there is an violation of the view sync description +// #[must_use] +// pub fn build>( +// self, +// ) -> TaskGenerator> { +// Box::new(move |mut state, mut registry, test_event_stream| { +// async move { +// let event_handler = +// HandleEvent::>(Arc::new(move |event, state| { +// let self_dup = self.clone(); +// async move { +// match event { +// GlobalTestEvent::ShutDown => match self_dup.clone() { +// ViewSyncTaskDescription::Threshold(min, max) => { +// let num_hits = state.hit_view_sync.len(); +// if min <= num_hits && num_hits <= max { +// (Some(HotShotTaskCompleted), state) +// } else { +// ( +// Some(HotShotTaskCompleted::Error(Box::new( +// ViewSyncTaskErr { +// hit_view_sync: state.hit_view_sync.clone(), +// }, +// ))), +// state, +// ) +// } +// } +// }, +// } +// } +// .boxed() +// })); - let builder = TaskBuilder::>::new( - "Test Completion Task".to_string(), - ) - .register_event_stream(test_event_stream, FilterEvent::default()) - .await - .register_registry(&mut registry) - .await - .register_state(state) - .register_event_handler(event_handler) - .register_message_handler(message_handler) - .register_message_stream(MergeN::new(streams)); - let task_id = builder.get_task_id().unwrap(); - (task_id, ViewSyncTaskTypes::build(builder).launch()) - } - .boxed() - }) - } -} +// let message_handler = HandleMessage::>(Arc::new( +// // NOTE: could short circuit on entering view sync if we're not supposed to +// // enter view sync. I opted not to do this just to gather more information +// // (since we'll fail the test later anyway) +// move |(id, msg), mut state| { +// async move { +// match msg { +// // all the view sync events +// HotShotEvent::ViewSyncTimeout(_, _, _) +// | HotShotEvent::ViewSyncPreCommitVoteRecv(_) +// | HotShotEvent::ViewSyncCommitVoteRecv(_) +// | HotShotEvent::ViewSyncFinalizeVoteRecv(_) +// | HotShotEvent::ViewSyncPreCommitVoteSend(_) +// | HotShotEvent::ViewSyncCommitVoteSend(_) +// | HotShotEvent::ViewSyncFinalizeVoteSend(_) +// | HotShotEvent::ViewSyncPreCommitCertificate2Recv(_) +// | HotShotEvent::ViewSyncCommitCertificate2Recv(_) +// | HotShotEvent::ViewSyncFinalizeCertificate2Recv(_) +// | HotShotEvent::ViewSyncPreCommitCertificate2Send(_, _) +// | HotShotEvent::ViewSyncCommitCertificate2Send(_, _) +// | HotShotEvent::ViewSyncFinalizeCertificate2Send(_, _) +// | HotShotEvent::ViewSyncTrigger(_) => { +// state.hit_view_sync.insert(id); +// } +// _ => (), +// } +// (None, state) +// } +// .boxed() +// }, +// )); +// let mut streams = vec![]; +// for handle in &mut state.handles { +// let stream = handle +// .handle +// .get_internal_event_stream_known_impl(FilterEvent::default()) +// .await +// .0; +// streams.push(stream); +// } + +// let builder = TaskBuilder::>::new( +// "Test Completion Task".to_string(), +// ) +// .register_event_stream(test_event_stream, FilterEvent::default()) +// .await +// .register_registry(&mut registry) +// .await +// .register_state(state) +// .register_event_handler(event_handler) +// .register_message_handler(message_handler) +// .register_message_stream(MergeN::new(streams)); +// let task_id = builder.get_task_id().unwrap(); +// (task_id, ViewSyncTaskTypes::build(builder).launch()) +// } +// .boxed() +// }) +// } +// } diff --git a/crates/testing/tests/network_task.rs b/crates/testing/tests/network_task.rs index e4e9bc50d4..df2797a80d 100644 --- a/crates/testing/tests/network_task.rs +++ b/crates/testing/tests/network_task.rs @@ -28,7 +28,7 @@ async fn test_network_task() { async_compatibility_layer::logging::setup_backtrace(); // Build the API for node 2. - let (handle, event_stream) = build_system_handle(2).await; + let (handle, tx, rx) = build_system_handle(2).await; let api: HotShotConsensusApi = HotShotConsensusApi { inner: handle.hotshot.inner.clone(), }; From ad49d465bec2664392133cf5bebe0877998833c4 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Mon, 29 Jan 2024 23:15:43 -0500 Subject: [PATCH 04/28] linting --- crates/hotshot/src/lib.rs | 49 ++++++++++++------- crates/hotshot/src/tasks/mod.rs | 25 ++++------ .../src/traits/networking/combined_network.rs | 8 ++- .../src/traits/networking/libp2p_network.rs | 4 +- .../src/traits/networking/memory_network.rs | 4 +- .../traits/networking/web_server_network.rs | 4 +- crates/hotshot/src/types/handle.rs | 12 ++--- crates/testing/Cargo.toml | 2 - 8 files changed, 59 insertions(+), 49 deletions(-) diff --git a/crates/hotshot/src/lib.rs b/crates/hotshot/src/lib.rs index f0673a4aa0..ce0151128f 100644 --- a/crates/hotshot/src/lib.rs +++ b/crates/hotshot/src/lib.rs @@ -270,7 +270,8 @@ impl> SystemContext { /// "Starts" consensus by sending a `QCFormed` event pub async fn start_consensus(&self) { self.inner - .internal_event_stream.0 + .internal_event_stream + .0 .broadcast(HotShotEvent::QCFormed(either::Left( QuorumCertificate::genesis(), ))) @@ -590,18 +591,8 @@ impl> SystemContext { storage: self.inner.storage.clone(), }; - add_network_message_task( - registry.clone(), - event_tx.clone(), - quorum_network.clone(), - ) - .await; - add_network_message_task( - registry.clone(), - event_tx.clone(), - da_network.clone(), - ) - .await; + add_network_message_task(registry.clone(), event_tx.clone(), quorum_network.clone()).await; + add_network_message_task(registry.clone(), event_tx.clone(), da_network.clone()).await; add_network_event_task( registry.clone(), @@ -643,10 +634,34 @@ impl> SystemContext { &handle, ) .await; - add_da_task(registry.clone(), event_tx.clone(), event_rx.clone(), &handle).await; - add_vid_task(registry.clone(), event_tx.clone(), event_rx.clone(), &handle).await; - add_transaction_task(registry.clone(), event_tx.clone(), event_rx.clone(), &handle).await; - add_view_sync_task(registry.clone(), event_tx.clone(), event_rx.clone(), &handle).await; + add_da_task( + registry.clone(), + event_tx.clone(), + event_rx.clone(), + &handle, + ) + .await; + add_vid_task( + registry.clone(), + event_tx.clone(), + event_rx.clone(), + &handle, + ) + .await; + add_transaction_task( + registry.clone(), + event_tx.clone(), + event_rx.clone(), + &handle, + ) + .await; + add_view_sync_task( + registry.clone(), + event_tx.clone(), + event_rx.clone(), + &handle, + ) + .await; // async_spawn(async move { // let _ = registry.join_all().await; // info!("Task runner exited!"); diff --git a/crates/hotshot/src/tasks/mod.rs b/crates/hotshot/src/tasks/mod.rs index d382f8d8f5..21dd6093cf 100644 --- a/crates/hotshot/src/tasks/mod.rs +++ b/crates/hotshot/src/tasks/mod.rs @@ -5,17 +5,13 @@ use async_broadcast::{Receiver, Sender}; use async_compatibility_layer::art::{async_sleep, async_spawn}; use futures::FutureExt; use hotshot_task_impls::{ - consensus::{ - CommitmentAndMetadata, ConsensusTaskState, - }, - da::{DATaskState}, + consensus::{CommitmentAndMetadata, ConsensusTaskState}, + da::DATaskState, events::HotShotEvent, - network::{ - NetworkEventTaskState, NetworkMessageTaskState, NetworkTaskKind, - }, - transactions::{TransactionTaskState}, - vid::{VIDTaskState}, - view_sync::{ViewSyncTaskState}, + network::{NetworkEventTaskState, NetworkMessageTaskState, NetworkTaskKind}, + transactions::TransactionTaskState, + vid::VIDTaskState, + view_sync::ViewSyncTaskState, }; use hotshot_types::traits::{election::Membership, stake_table::StakeTableScheme}; use hotshot_types::{ @@ -30,13 +26,13 @@ use hotshot_types::{ BlockPayload, }, }; -use task::task::{Task, TaskRegistry}; use std::{ collections::{HashMap, HashSet}, marker::PhantomData, sync::Arc, time::Duration, }; +use task::task::{Task, TaskRegistry}; /// event for global event stream #[derive(Clone, Debug)] @@ -60,7 +56,7 @@ pub async fn add_network_message_task>( let task = Task::new(tx, rx, task_reg.clone(), vid_state); task_reg.run_task(task).await; - } /// add the Data Availability task @@ -238,7 +233,6 @@ pub async fn add_da_task>( let task = Task::new(tx, rx, task_reg.clone(), da_state); task_reg.run_task(task).await; - } /// add the Transaction Handling task @@ -269,7 +263,6 @@ pub async fn add_transaction_task> let task = Task::new(tx, rx, task_reg.clone(), transactions_state); task_reg.run_task(task).await; - } /// add the view sync task /// # Panics diff --git a/crates/hotshot/src/traits/networking/combined_network.rs b/crates/hotshot/src/traits/networking/combined_network.rs index c21e9186f6..9f061e8bd4 100644 --- a/crates/hotshot/src/traits/networking/combined_network.rs +++ b/crates/hotshot/src/traits/networking/combined_network.rs @@ -22,14 +22,18 @@ use async_compatibility_layer::channel::UnboundedSendError; #[cfg(feature = "hotshot-testing")] use hotshot_types::traits::network::{NetworkReliability, TestableNetworkingImplementation}; use hotshot_types::{ - data::ViewNumber, message::Message, traits::{ + boxed_sync, + data::ViewNumber, + message::Message, + traits::{ election::Membership, network::{ CommunicationChannel, ConnectedNetwork, ConsensusIntentEvent, TestableChannelImplementation, TransmitType, ViewMessage, }, node_implementation::NodeType, - }, BoxSyncFuture, boxed_sync + }, + BoxSyncFuture, }; use std::{collections::hash_map::DefaultHasher, sync::Arc}; diff --git a/crates/hotshot/src/traits/networking/libp2p_network.rs b/crates/hotshot/src/traits/networking/libp2p_network.rs index b65e30039e..023383e9a7 100644 --- a/crates/hotshot/src/traits/networking/libp2p_network.rs +++ b/crates/hotshot/src/traits/networking/libp2p_network.rs @@ -16,6 +16,7 @@ use hotshot_constants::LOOK_AHEAD; #[cfg(feature = "hotshot-testing")] use hotshot_types::traits::network::{NetworkReliability, TestableNetworkingImplementation}; use hotshot_types::{ + boxed_sync, data::ViewNumber, message::{Message, MessageKind}, traits::{ @@ -27,7 +28,8 @@ use hotshot_types::{ node_implementation::NodeType, signature_key::SignatureKey, state::ConsensusTime, - }, BoxSyncFuture, boxed_sync + }, + BoxSyncFuture, }; use hotshot_utils::bincode::bincode_opts; diff --git a/crates/hotshot/src/traits/networking/memory_network.rs b/crates/hotshot/src/traits/networking/memory_network.rs index b0677c3bd7..24ec99c623 100644 --- a/crates/hotshot/src/traits/networking/memory_network.rs +++ b/crates/hotshot/src/traits/networking/memory_network.rs @@ -14,6 +14,7 @@ use bincode::Options; use dashmap::DashMap; use futures::StreamExt; use hotshot_types::{ + boxed_sync, message::{Message, MessageKind}, traits::{ election::Membership, @@ -23,7 +24,8 @@ use hotshot_types::{ }, node_implementation::NodeType, signature_key::SignatureKey, - }, BoxSyncFuture, boxed_sync + }, + BoxSyncFuture, }; use hotshot_utils::bincode::bincode_opts; use rand::Rng; diff --git a/crates/hotshot/src/traits/networking/web_server_network.rs b/crates/hotshot/src/traits/networking/web_server_network.rs index 6aafbac7b2..2b42e1b44c 100644 --- a/crates/hotshot/src/traits/networking/web_server_network.rs +++ b/crates/hotshot/src/traits/networking/web_server_network.rs @@ -13,6 +13,7 @@ use async_lock::RwLock; use async_trait::async_trait; use derive_more::{Deref, DerefMut}; use hotshot_types::{ + boxed_sync, message::{Message, MessagePurpose}, traits::{ network::{ @@ -22,7 +23,8 @@ use hotshot_types::{ }, node_implementation::NodeType, signature_key::SignatureKey, - }, BoxSyncFuture, boxed_sync + }, + BoxSyncFuture, }; use hotshot_web_server::{self, config}; use lru::LruCache; diff --git a/crates/hotshot/src/types/handle.rs b/crates/hotshot/src/types/handle.rs index 9433baf220..88df8cfe71 100644 --- a/crates/hotshot/src/types/handle.rs +++ b/crates/hotshot/src/types/handle.rs @@ -50,9 +50,7 @@ pub struct SystemContextHandle> { impl + 'static> SystemContextHandle { /// obtains a stream to expose to the user - pub async fn get_event_stream( - &self, - ) -> impl Stream> { + pub async fn get_event_stream(&self) -> impl Stream> { self.output_event_stream.1.clone() } @@ -60,9 +58,7 @@ impl + 'static> SystemContextHandl /// there are two cleaner solutions: /// - make the stream generic and in nodetypes or nodeimpelmentation /// - type wrapper - pub async fn get_event_stream_known_impl( - &self, - ) -> Receiver> { + pub async fn get_event_stream_known_impl(&self) -> Receiver> { self.output_event_stream.1.clone() } @@ -71,9 +67,7 @@ impl + 'static> SystemContextHandl /// - make the stream generic and in nodetypes or nodeimpelmentation /// - type wrapper /// NOTE: this is only used for sanity checks in our tests - pub async fn get_internal_event_stream_known_impl( - &self, - ) -> Receiver> { + pub async fn get_internal_event_stream_known_impl(&self) -> Receiver> { self.internal_event_stream.1.clone() } diff --git a/crates/testing/Cargo.toml b/crates/testing/Cargo.toml index 0500e3b4e9..0ae4418cf9 100644 --- a/crates/testing/Cargo.toml +++ b/crates/testing/Cargo.toml @@ -40,5 +40,3 @@ tokio = { workspace = true } [target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] async-std = { workspace = true } -[lints] -workspace = true From 4def2085b31e672e680d962e205c837f3155c89e Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Tue, 30 Jan 2024 09:55:50 -0500 Subject: [PATCH 05/28] Test can run, but they fail --- Cargo.lock | 2 +- crates/hotshot/src/lib.rs | 20 +++++++++---------- crates/hotshot/src/tasks/mod.rs | 10 ++++------ crates/task-impls/src/harness.rs | 3 +-- crates/testing/tests/consensus_task.rs | 20 +++++++------------ crates/testing/tests/da_task.rs | 18 +++++++++++++---- crates/testing/tests/network_task.rs | 2 +- crates/testing/tests/vid_task.rs | 17 ++++++++++++---- crates/testing/tests/view_sync_task.rs | 27 +++++++++++++++++++++----- 9 files changed, 73 insertions(+), 46 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 264233ae0e..3d49cc0c18 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6602,7 +6602,7 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "task" version = "0.1.0" -source = "git+https://github.com/EspressoSystems/HotShotTasks.git#f63096479ca0d7fab32372ce6cf2e1acbd05e47d" +source = "git+https://github.com/EspressoSystems/HotShotTasks.git#377aab77694bf01c92488f8bff1af70c6a857955" dependencies = [ "async-broadcast", "async-std", diff --git a/crates/hotshot/src/lib.rs b/crates/hotshot/src/lib.rs index ce0151128f..140b4c88ea 100644 --- a/crates/hotshot/src/lib.rs +++ b/crates/hotshot/src/lib.rs @@ -66,7 +66,7 @@ use std::{ sync::Arc, time::Duration, }; -use task::task::TaskRegistry; +use task::task::{Task, TaskRegistry}; use tasks::add_vid_task; use tracing::{debug, error, info, instrument, trace, warn}; @@ -130,7 +130,7 @@ pub struct SystemContextInner> { private_key: ::PrivateKey, /// Configuration items for this hotshot instance - config: HotShotConfig, + pub config: HotShotConfig, /// This `HotShot` instance's storage backend storage: I::Storage, @@ -159,13 +159,13 @@ pub struct SystemContextInner> { // global_registry: GlobalRegistry, /// Access to the output event stream. - output_event_stream: (Sender>, Receiver>), + pub output_event_stream: (Sender>, Receiver>), /// access to the internal event stream, in case we need to, say, shut something down internal_event_stream: (Sender>, Receiver>), /// uid for instrumentation - id: u64, + pub id: u64, } /// Thread safe, shared view of a `HotShot` @@ -626,14 +626,14 @@ impl> SystemContext { vid_membership, ) .await; - add_consensus_task( - registry.clone(), + let consensus_state = add_consensus_task(output_event_stream.0.clone(), &handle).await; + let task = Task::new( event_tx.clone(), event_rx.clone(), - output_event_stream.0.clone(), - &handle, - ) - .await; + registry.clone(), + consensus_state, + ); + registry.run_task(task).await; add_da_task( registry.clone(), event_tx.clone(), diff --git a/crates/hotshot/src/tasks/mod.rs b/crates/hotshot/src/tasks/mod.rs index 21dd6093cf..b7f8c440d3 100644 --- a/crates/hotshot/src/tasks/mod.rs +++ b/crates/hotshot/src/tasks/mod.rs @@ -119,12 +119,9 @@ pub async fn add_network_event_task>( - task_reg: Arc, - tx: Sender>, - rx: Receiver>, output_stream: Sender>, handle: &SystemContextHandle, -) { +) -> ConsensusTaskState> { let consensus = handle.hotshot.get_consensus(); let c_api: HotShotConsensusApi = HotShotConsensusApi { inner: handle.hotshot.inner.clone(), @@ -172,8 +169,9 @@ pub async fn add_consensus_task>( .quorum_network .inject_consensus_info(ConsensusIntentEvent::PollForLatestQuorumProposal) .await; - let task = Task::new(tx, rx, task_reg.clone(), consensus_state); - task_reg.run_task(task).await; + consensus_state + // let task = Task::new(tx, rx, task_reg.clone(), consensus_state); + // task_reg.run_task(task).await; } /// add the VID task diff --git a/crates/task-impls/src/harness.rs b/crates/task-impls/src/harness.rs index 851ae968f5..58be8bbd37 100644 --- a/crates/task-impls/src/harness.rs +++ b/crates/task-impls/src/harness.rs @@ -41,7 +41,7 @@ impl TaskState for TestHarnessState { /// # Panics /// Panics if any state the test expects is not set. Panicing causes a test failure #[allow(clippy::implicit_hasher)] -pub async fn run_harness>>( +pub async fn run_harness>>( input: Vec>, expected_output: HashMap, usize>, state: S, @@ -49,7 +49,6 @@ pub async fn run_harness>>( ) where TYPES: NodeType, S: Send + 'static, - Fut: Future>, { let registry = Arc::new(TaskRegistry::default()); // set up two broadcast channels so the test sends to the task and the task back to the test diff --git a/crates/testing/tests/consensus_task.rs b/crates/testing/tests/consensus_task.rs index 45e4463fb8..b844584a43 100644 --- a/crates/testing/tests/consensus_task.rs +++ b/crates/testing/tests/consensus_task.rs @@ -127,11 +127,9 @@ async fn test_consensus_task() { output.insert(HotShotEvent::Shutdown, 1); - let build_fn = |task_runner, event_stream| { - add_consensus_task(task_runner, event_stream, ChannelStream::new(), &handle) - }; + let consensus_state = add_consensus_task(handle.hotshot.inner.output_event_stream.0.clone(), &handle).await; - run_harness(input, output, None, build_fn, false).await; + run_harness(input, output, consensus_state, false).await; } #[cfg(test)] @@ -177,11 +175,9 @@ async fn test_consensus_vote() { input.push(HotShotEvent::Shutdown); output.insert(HotShotEvent::Shutdown, 1); - let build_fn = |task_runner, event_stream| { - add_consensus_task(task_runner, event_stream, ChannelStream::new(), &handle) - }; + let consensus_state = add_consensus_task(handle.hotshot.inner.output_event_stream.0.clone(), &handle).await; - run_harness(input, output, None, build_fn, false).await; + run_harness(input, output, consensus_state, false).await; } #[cfg(test)] @@ -216,7 +212,7 @@ async fn test_consensus_with_vid() { async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); - let (handle, _event_stream) = build_system_handle(2).await; + let (handle, _tx, _rx) = build_system_handle(2).await; // We assign node's key pair rather than read from config file since it's a test // In view 2, node 2 is the leader. let (private_key_view2, public_key_view2) = key_pair_for_id(2); @@ -307,9 +303,7 @@ async fn test_consensus_with_vid() { input.push(HotShotEvent::Shutdown); output.insert(HotShotEvent::Shutdown, 1); - let build_fn = |task_runner, event_stream| { - add_consensus_task(task_runner, event_stream, ChannelStream::new(), &handle) - }; + let consensus_state = add_consensus_task(handle.hotshot.inner.output_event_stream.0.clone(), &handle).await; - run_harness(input, output, None, build_fn, false).await; + run_harness(input, output, consensus_state, false).await; } diff --git a/crates/testing/tests/da_task.rs b/crates/testing/tests/da_task.rs index e29fe0464a..71dcde0c38 100644 --- a/crates/testing/tests/da_task.rs +++ b/crates/testing/tests/da_task.rs @@ -1,5 +1,5 @@ use hotshot::{types::SignatureKey, HotShotConsensusApi}; -use hotshot_task_impls::events::HotShotEvent; +use hotshot_task_impls::{da::DATaskState, events::HotShotEvent}; use hotshot_testing::{ block_types::TestTransaction, node_types::{MemoryImpl, TestTypes}, @@ -103,7 +103,17 @@ async fn test_da_task() { output.insert(HotShotEvent::ViewChange(ViewNumber::new(2)), 1); output.insert(HotShotEvent::Shutdown, 1); - let build_fn = |task_runner, event_stream| add_da_task(task_runner, event_stream, &handle); - - run_harness(input, output, None, build_fn, false).await; + let da_state = DATaskState { + api: api.clone(), + consensus: handle.hotshot.get_consensus(), + da_membership: api.inner.memberships.da_membership.clone().into(), + da_network: api.inner.networks.da_network.clone().into(), + quorum_membership: api.inner.memberships.quorum_membership.clone().into(), + cur_view: ViewNumber::new(0), + vote_collector: None.into(), + public_key: api.public_key().clone(), + private_key: api.private_key().clone(), + id: handle.hotshot.inner.id, + }; + run_harness(input, output, da_state, false).await; } diff --git a/crates/testing/tests/network_task.rs b/crates/testing/tests/network_task.rs index df2797a80d..2552512490 100644 --- a/crates/testing/tests/network_task.rs +++ b/crates/testing/tests/network_task.rs @@ -143,7 +143,7 @@ async fn test_network_task() { output.insert(HotShotEvent::VidDisperseRecv(vid_proposal, pub_key), 1); output.insert(HotShotEvent::DAProposalRecv(da_proposal, pub_key), 1); - let build_fn = |task_runner, _| async { task_runner }; + // let build_fn = |task_runner, _| async { task_runner }; // There may be extra outputs not in the expected set, e.g., a second `VidDisperseRecv` if the // VID task runs fast. All event types we want to test should be seen by this point, so waiting // for more events will not help us test more cases for now. Therefore, we set diff --git a/crates/testing/tests/vid_task.rs b/crates/testing/tests/vid_task.rs index ef3b6918c3..a9d5204ead 100644 --- a/crates/testing/tests/vid_task.rs +++ b/crates/testing/tests/vid_task.rs @@ -1,5 +1,5 @@ use hotshot::{tasks::add_vid_task, types::SignatureKey, HotShotConsensusApi}; -use hotshot_task_impls::events::HotShotEvent; +use hotshot_task_impls::{events::HotShotEvent, vid::VIDTaskState}; use hotshot_testing::{ block_types::TestTransaction, node_types::{MemoryImpl, TestTypes}, @@ -112,7 +112,16 @@ async fn test_vid_task() { output.insert(HotShotEvent::ViewChange(ViewNumber::new(2)), 1); output.insert(HotShotEvent::Shutdown, 1); - let build_fn = |task_runner, event_stream| add_vid_task(task_runner, event_stream, &handle); - - run_harness(input, output, None, build_fn, false).await; + let vid_state = VIDTaskState { + api: api.clone(), + consensus: handle.hotshot.get_consensus(), + cur_view: ViewNumber::new(0), + vote_collector: None, + network: api.inner.networks.quorum_network.clone().into(), + membership: api.inner.memberships.vid_membership.clone().into(), + public_key: api.public_key().clone(), + private_key: api.private_key().clone(), + id: handle.hotshot.inner.id, + }; + run_harness(input, output, vid_state, false).await; } diff --git a/crates/testing/tests/view_sync_task.rs b/crates/testing/tests/view_sync_task.rs index 7ec279e60b..9f09e2a450 100644 --- a/crates/testing/tests/view_sync_task.rs +++ b/crates/testing/tests/view_sync_task.rs @@ -13,9 +13,12 @@ use std::collections::HashMap; async fn test_view_sync_task() { use hotshot::tasks::add_view_sync_task; use hotshot_task_impls::harness::run_harness; + use hotshot_task_impls::view_sync::ViewSyncTaskState; use hotshot_testing::task_helpers::build_system_handle; use hotshot_types::simple_vote::ViewSyncPreCommitData; - + use std::time::Duration; + use hotshot_types::traits::consensus_api::ConsensusApi; + async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); @@ -56,8 +59,22 @@ async fn test_view_sync_task() { output.insert(HotShotEvent::Shutdown, 1); - let build_fn = - |task_runner, event_stream| add_view_sync_task(task_runner, event_stream, &handle); - - run_harness(input, output, None, build_fn, false).await; + let view_sync_state = ViewSyncTaskState { + current_view: ViewNumber::new(0), + next_view: ViewNumber::new(0), + network: api.inner.networks.quorum_network.clone().into(), + membership: api.inner.memberships.view_sync_membership.clone().into(), + public_key: api.public_key().clone(), + private_key: api.private_key().clone(), + api, + num_timeouts_tracked: 0, + replica_task_map: HashMap::default().into(), + pre_commit_relay_map: HashMap::default().into(), + commit_relay_map: HashMap::default().into(), + finalize_relay_map: HashMap::default().into(), + view_sync_timeout: Duration::new(10, 0), + id: handle.hotshot.inner.id, + last_garbage_collected_view: ViewNumber::new(0), + }; + run_harness(input, output, view_sync_state, false).await; } From 76e15901ceebebf1b32092624342bf0f4bff5633 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Tue, 30 Jan 2024 16:49:54 -0500 Subject: [PATCH 06/28] some fixes --- Cargo.lock | 2 +- crates/hotshot/src/lib.rs | 12 ++- crates/hotshot/src/tasks/mod.rs | 5 +- .../src/traits/networking/memory_network.rs | 4 +- crates/task-impls/src/consensus.rs | 16 ++-- crates/task-impls/src/da.rs | 20 +---- crates/task-impls/src/network.rs | 79 ++++++++++++++----- crates/task-impls/src/transactions.rs | 6 +- crates/task-impls/src/vid.rs | 7 +- crates/task-impls/src/view_sync.rs | 37 +++++---- crates/testing/src/test_launcher.rs | 19 ----- crates/testing/src/test_runner.rs | 78 +++++++++--------- crates/testing/tests/consensus_task.rs | 9 ++- crates/testing/tests/view_sync_task.rs | 4 +- 14 files changed, 163 insertions(+), 135 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3d49cc0c18..11fcf771ef 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6602,7 +6602,7 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "task" version = "0.1.0" -source = "git+https://github.com/EspressoSystems/HotShotTasks.git#377aab77694bf01c92488f8bff1af70c6a857955" +source = "git+https://github.com/EspressoSystems/HotShotTasks.git#a237e633587c7fe49fe1a45d5da9551cf5aab0a7" dependencies = [ "async-broadcast", "async-std", diff --git a/crates/hotshot/src/lib.rs b/crates/hotshot/src/lib.rs index 140b4c88ea..3c8728ae8f 100644 --- a/crates/hotshot/src/lib.rs +++ b/crates/hotshot/src/lib.rs @@ -33,7 +33,8 @@ use commit::Committable; use custom_debug::Debug; use futures::join; use hotshot_constants::PROGRAM_PROTOCOL_VERSION; -use hotshot_task_impls::{events::HotShotEvent, network::NetworkTaskKind}; +use hotshot_task_impls::events::HotShotEvent; +use hotshot_task_impls::network; #[cfg(feature = "hotshot-testing")] use hotshot_types::traits::node_implementation::ChannelMaps; @@ -260,8 +261,8 @@ impl> SystemContext { memberships: Arc::new(memberships), event_sender: RwLock::default(), _metrics: consensus_metrics.clone(), - internal_event_stream: broadcast(1024), - output_event_stream: broadcast(1024), + internal_event_stream: broadcast(100024), + output_event_stream: broadcast(100024), }); Ok(Self { inner }) @@ -269,6 +270,7 @@ impl> SystemContext { /// "Starts" consensus by sending a `QCFormed` event pub async fn start_consensus(&self) { + debug!("Starting Consensus"); self.inner .internal_event_stream .0 @@ -600,6 +602,7 @@ impl> SystemContext { event_rx.clone(), quorum_network.clone(), quorum_membership, + network::quorum_filter, ) .await; add_network_event_task( @@ -608,6 +611,7 @@ impl> SystemContext { event_rx.clone(), da_network.clone(), da_membership, + network::committee_filter, ) .await; add_network_event_task( @@ -616,6 +620,7 @@ impl> SystemContext { event_rx.clone(), quorum_network.clone(), view_sync_membership, + network::view_sync_filter, ) .await; add_network_event_task( @@ -624,6 +629,7 @@ impl> SystemContext { event_rx.clone(), quorum_network.clone(), vid_membership, + network::vid_filter, ) .await; let consensus_state = add_consensus_task(output_event_stream.0.clone(), &handle).await; diff --git a/crates/hotshot/src/tasks/mod.rs b/crates/hotshot/src/tasks/mod.rs index b7f8c440d3..da322c28d0 100644 --- a/crates/hotshot/src/tasks/mod.rs +++ b/crates/hotshot/src/tasks/mod.rs @@ -8,7 +8,7 @@ use hotshot_task_impls::{ consensus::{CommitmentAndMetadata, ConsensusTaskState}, da::DATaskState, events::HotShotEvent, - network::{NetworkEventTaskState, NetworkMessageTaskState, NetworkTaskKind}, + network::{NetworkEventTaskState, NetworkMessageTaskState}, transactions::TransactionTaskState, vid::VIDTaskState, view_sync::ViewSyncTaskState, @@ -105,11 +105,13 @@ pub async fn add_network_event_task>, channel: NET, membership: TYPES::Membership, + filter: fn(&HotShotEvent) -> bool, ) { let network_state: NetworkEventTaskState<_, _> = NetworkEventTaskState { channel, view: TYPES::Time::genesis(), membership, + filter, }; let task = Task::new(tx, rx, task_reg.clone(), network_state); task_reg.run_task(task).await; @@ -126,6 +128,7 @@ pub async fn add_consensus_task>( let c_api: HotShotConsensusApi = HotShotConsensusApi { inner: handle.hotshot.inner.clone(), }; + let (payload, metadata) = ::genesis(); // Impossible for `unwrap` to fail on the genesis payload. let payload_commitment = vid_commitment( diff --git a/crates/hotshot/src/traits/networking/memory_network.rs b/crates/hotshot/src/traits/networking/memory_network.rs index 24ec99c623..ae9f1c53dc 100644 --- a/crates/hotshot/src/traits/networking/memory_network.rs +++ b/crates/hotshot/src/traits/networking/memory_network.rs @@ -301,7 +301,7 @@ impl ConnectedNetwork for Memory message: M, recipients: BTreeSet, ) -> Result<(), NetworkError> { - debug!(?message, "Broadcasting message"); + trace!(?message, "Broadcasting message"); // Bincode the message let vec = bincode_opts() .serialize(&message) @@ -349,7 +349,7 @@ impl ConnectedNetwork for Memory #[instrument(name = "MemoryNetwork::direct_message")] async fn direct_message(&self, message: M, recipient: K) -> Result<(), NetworkError> { - debug!(?message, ?recipient, "Sending direct message"); + // debug!(?message, ?recipient, "Sending direct message"); // Bincode the message let vec = bincode_opts() .serialize(&message) diff --git a/crates/task-impls/src/consensus.rs b/crates/task-impls/src/consensus.rs index b383f0a7ef..7818c46f89 100644 --- a/crates/task-impls/src/consensus.rs +++ b/crates/task-impls/src/consensus.rs @@ -1057,7 +1057,8 @@ impl, A: ConsensusApi + view_number: old_view_number, }, }) - .await; + .await + .unwrap(); } HotShotEvent::Timeout(view) => { // NOTE: We may optionally have the timeout task listen for view change events @@ -1094,7 +1095,8 @@ impl, A: ConsensusApi + event_stream .broadcast(HotShotEvent::TimeoutVoteSend(vote)) - .await; + .await + .unwrap(); debug!( "We did not receive evidence for view {} in time, sending timeout vote for that view!", *view @@ -1104,7 +1106,8 @@ impl, A: ConsensusApi + view_number: view, event: EventType::ReplicaViewTimeout { view_number: view }, }) - .await; + .await + .unwrap(); let consensus = self.consensus.read().await; consensus.metrics.number_of_timeouts.add(1); } @@ -1257,7 +1260,8 @@ impl, A: ConsensusApi + message.clone(), self.public_key.clone(), )) - .await; + .await + .unwrap(); self.payload_commitment_and_metadata = None; return true; @@ -1272,8 +1276,8 @@ impl, A: ConsensusApi + { type Event = HotShotEvent; type Result = (); - fn filter(event: &HotShotEvent) -> bool { - matches!( + fn filter(&self, event: &HotShotEvent) -> bool { + !matches!( event, HotShotEvent::QuorumProposalRecv(_, _) | HotShotEvent::QuorumVoteRecv(_) diff --git a/crates/task-impls/src/da.rs b/crates/task-impls/src/da.rs index cb8f8b99ec..f1a7d8c31f 100644 --- a/crates/task-impls/src/da.rs +++ b/crates/task-impls/src/da.rs @@ -305,7 +305,8 @@ impl, A: ConsensusApi + message.clone(), self.public_key.clone(), )) - .await; + .await + .unwrap(); } HotShotEvent::Timeout(view) => { @@ -324,19 +325,6 @@ impl, A: ConsensusApi + } None } - - /// Filter the DA event. - pub fn filter(event: &HotShotEvent) -> bool { - matches!( - event, - HotShotEvent::DAProposalRecv(_, _) - | HotShotEvent::DAVoteRecv(_) - | HotShotEvent::Shutdown - | HotShotEvent::TransactionsSequenced(_, _, _) - | HotShotEvent::Timeout(_) - | HotShotEvent::ViewChange(_) - ) - } } /// task state implementation for DA Task @@ -347,8 +335,8 @@ impl, A: ConsensusApi + type Result = HotShotTaskCompleted; - fn filter(event: &HotShotEvent) -> bool { - matches!( + fn filter(&self, event: &HotShotEvent) -> bool { + !matches!( event, HotShotEvent::DAProposalRecv(_, _) | HotShotEvent::DAVoteRecv(_) diff --git a/crates/task-impls/src/network.rs b/crates/task-impls/src/network.rs index e52786ca33..9d708d21a6 100644 --- a/crates/task-impls/src/network.rs +++ b/crates/task-impls/src/network.rs @@ -18,22 +18,56 @@ use task::task::{Task, TaskState}; use tracing::error; use tracing::instrument; -/// the type of network task -#[derive(Clone, Copy, Debug)] -pub enum NetworkTaskKind { - /// quorum: the normal "everyone" committee - Quorum, - /// da committee - Committee, - /// view sync - ViewSync, - /// vid - VID, +/// quorum filter +pub fn quorum_filter(event: &HotShotEvent) -> bool { + !matches!( + event, + HotShotEvent::QuorumProposalSend(_, _) + | HotShotEvent::QuorumVoteSend(_) + | HotShotEvent::Shutdown + | HotShotEvent::DACSend(_, _) + | HotShotEvent::ViewChange(_) + | HotShotEvent::TimeoutVoteSend(_) + ) } +/// committee filter +pub fn committee_filter(event: &HotShotEvent) -> bool { + !matches!( + event, + HotShotEvent::DAProposalSend(_, _) + | HotShotEvent::DAVoteSend(_) + | HotShotEvent::Shutdown + | HotShotEvent::ViewChange(_) + ) +} + +/// vid filter +pub fn vid_filter(event: &HotShotEvent) -> bool { + !matches!( + event, + HotShotEvent::Shutdown | HotShotEvent::VidDisperseSend(_, _) | HotShotEvent::ViewChange(_) + ) +} + +/// view sync filter +pub fn view_sync_filter(event: &HotShotEvent) -> bool { + !matches!( + event, + HotShotEvent::ViewSyncPreCommitCertificate2Send(_, _) + | HotShotEvent::ViewSyncCommitCertificate2Send(_, _) + | HotShotEvent::ViewSyncFinalizeCertificate2Send(_, _) + | HotShotEvent::ViewSyncPreCommitVoteSend(_) + | HotShotEvent::ViewSyncCommitVoteSend(_) + | HotShotEvent::ViewSyncFinalizeVoteSend(_) + | HotShotEvent::Shutdown + | HotShotEvent::ViewChange(_) + ) +} /// the network message task state #[derive(Clone)] pub struct NetworkMessageTaskState { + /// Sender to send internal events this task generates to other tasks pub event_stream: Sender>, } @@ -49,6 +83,10 @@ impl TaskState for NetworkMessageTaskState { None } + fn filter(&self, _event: &Self::Event) -> bool { + return false; + } + fn should_shutdown(_event: &Self::Event) -> bool { false } @@ -153,6 +191,8 @@ pub struct NetworkEventTaskState) -> bool, } impl> TaskState @@ -171,16 +211,15 @@ impl> TaskState } fn should_shutdown(event: &Self::Event) -> bool { - matches!(event, HotShotEvent::Shutdown) - } - - fn filter(_event: &Self::Event) -> bool { - // default doesn't filter + if matches!(event, HotShotEvent::Shutdown) { + error!("Network Task received Shutdown event"); + return true; + } false } - fn shutdown(&mut self) -> impl std::future::Future + Send { - async {} + fn filter(&self, event: &Self::Event) -> bool { + (self.filter)(event) } } @@ -317,8 +356,8 @@ impl> error!("Networking task shutting down"); return Some(HotShotTaskCompleted); } - event => { - error!("Receieved unexpected message in network task {:?}", event); + _event => { + // error!("Receieved unexpected message in network task {:?}", event); return None; } }; diff --git a/crates/task-impls/src/transactions.rs b/crates/task-impls/src/transactions.rs index 207b026af8..426b3627ca 100644 --- a/crates/task-impls/src/transactions.rs +++ b/crates/task-impls/src/transactions.rs @@ -176,6 +176,7 @@ impl, A: ConsensusApi + return None; } HotShotEvent::ViewChange(view) => { + debug!("view change in transactions to view {:?}", view); if *self.cur_view >= *view { return None; } @@ -189,6 +190,7 @@ impl, A: ConsensusApi + // return if we aren't the next leader or we skipped last view and aren't the current leader. if !make_block && self.membership.get_leader(self.cur_view + 1) != self.public_key { + debug!("Not next leader for view {:?}", self.cur_view); return None; } @@ -338,8 +340,8 @@ impl, A: ConsensusApi + type Result = HotShotTaskCompleted; - fn filter(event: &HotShotEvent) -> bool { - matches!( + fn filter(&self, event: &HotShotEvent) -> bool { + !matches!( event, HotShotEvent::TransactionsRecv(_) | HotShotEvent::LeafDecided(_) diff --git a/crates/task-impls/src/vid.rs b/crates/task-impls/src/vid.rs index d3056772da..9c1460b14f 100644 --- a/crates/task-impls/src/vid.rs +++ b/crates/task-impls/src/vid.rs @@ -130,7 +130,8 @@ impl, A: ConsensusApi + }, self.public_key.clone(), )) - .await; + .await + .unwrap(); } HotShotEvent::ViewChange(view) => { @@ -187,8 +188,8 @@ impl, A: ConsensusApi + task.state_mut().handle(event, sender).await; None } - fn filter(event: &Self::Event) -> bool { - matches!( + fn filter(&self, event: &Self::Event) -> bool { + !matches!( event, HotShotEvent::Shutdown | HotShotEvent::TransactionsSequenced(_, _, _) diff --git a/crates/task-impls/src/view_sync.rs b/crates/task-impls/src/view_sync.rs index 6d8cb1e3a4..2e83fc5a9b 100644 --- a/crates/task-impls/src/view_sync.rs +++ b/crates/task-impls/src/view_sync.rs @@ -124,6 +124,22 @@ impl< None } + fn filter(&self, event: &Self::Event) -> bool { + !matches!( + event, + HotShotEvent::ViewSyncPreCommitCertificate2Recv(_) + | HotShotEvent::ViewSyncCommitCertificate2Recv(_) + | HotShotEvent::ViewSyncFinalizeCertificate2Recv(_) + | HotShotEvent::ViewSyncPreCommitVoteRecv(_) + | HotShotEvent::ViewSyncCommitVoteRecv(_) + | HotShotEvent::ViewSyncFinalizeVoteRecv(_) + | HotShotEvent::Shutdown + | HotShotEvent::Timeout(_) + | HotShotEvent::ViewSyncTimeout(_, _, _) + | HotShotEvent::ViewChange(_) + ) + } + fn should_shutdown(event: &Self::Event) -> bool { matches!(event, HotShotEvent::Shutdown) } @@ -176,8 +192,8 @@ impl, A: ConsensusApi + task.state_mut().handle(event, sender).await; None } - fn filter(event: &Self::Event) -> bool { - matches!( + fn filter(&self, event: &Self::Event) -> bool { + !matches!( event, HotShotEvent::ViewSyncPreCommitCertificate2Recv(_) | HotShotEvent::ViewSyncCommitCertificate2Recv(_) @@ -539,23 +555,6 @@ impl< _ => {} } } - - /// Filter view sync related events. - pub fn filter(event: &HotShotEvent) -> bool { - matches!( - event, - HotShotEvent::ViewSyncPreCommitCertificate2Recv(_) - | HotShotEvent::ViewSyncCommitCertificate2Recv(_) - | HotShotEvent::ViewSyncFinalizeCertificate2Recv(_) - | HotShotEvent::ViewSyncPreCommitVoteRecv(_) - | HotShotEvent::ViewSyncCommitVoteRecv(_) - | HotShotEvent::ViewSyncFinalizeVoteRecv(_) - | HotShotEvent::Shutdown - | HotShotEvent::Timeout(_) - | HotShotEvent::ViewSyncTimeout(_, _, _) - | HotShotEvent::ViewChange(_) - ) - } } impl, A: ConsensusApi + 'static> diff --git a/crates/testing/src/test_launcher.rs b/crates/testing/src/test_launcher.rs index 5f1ad08122..65d9a071ec 100644 --- a/crates/testing/src/test_launcher.rs +++ b/crates/testing/src/test_launcher.rs @@ -27,25 +27,6 @@ pub type CommitteeNetworkGenerator = Box) -> T + 'static>; /// Wrapper Type for view sync function that takes a `ConnectedNetwork` and returns a `CommunicationChannel` pub type ViewSyncNetworkGenerator = Box) -> T + 'static>; -/// Wrapper type for a task generator. -// pub type TaskGenerator = Box< -// dyn FnOnce( -// TASK, -// GlobalRegistry, -// ChannelStream, -// ) -// -> BoxFuture<'static, (HotShotTaskId, BoxFuture<'static, HotShotTaskCompleted>)>, -// >; - -// /// Wrapper type for a hook. -// pub type Hook = Box< -// dyn FnOnce( -// GlobalRegistry, -// ChannelStream, -// ) -// -> BoxFuture<'static, (HotShotTaskId, BoxFuture<'static, HotShotTaskCompleted>)>, -// >; - /// generators for resources used by each node pub struct ResourceGenerators> { /// generate channels diff --git a/crates/testing/src/test_runner.rs b/crates/testing/src/test_runner.rs index 596b2c71a3..a8111a97ac 100644 --- a/crates/testing/src/test_runner.rs +++ b/crates/testing/src/test_runner.rs @@ -86,20 +86,7 @@ where /// if the test fails #[allow(clippy::too_many_lines)] pub async fn run_test(mut self) { - let (tx, rx) = broadcast(1024); - - let mut event_rxs = vec![]; - let mut internal_event_rxs = vec![]; - - for node in &self.nodes { - let r = node.handle.get_event_stream_known_impl().await; - event_rxs.push(r); - } - for node in &self.nodes { - let r = node.handle.get_internal_event_stream_known_impl().await; - internal_event_rxs.push(r); - } - + let (tx, rx) = broadcast(100024); let spinning_changes = self .launcher .metadata @@ -116,10 +103,21 @@ where } } - let reg = Arc::new(TaskRegistry::default()); - self.add_nodes(self.launcher.metadata.total_nodes, &late_start_nodes) .await; + let mut event_rxs = vec![]; + let mut internal_event_rxs = vec![]; + + for node in &self.nodes { + let r = node.handle.get_event_stream_known_impl().await; + event_rxs.push(r); + } + for node in &self.nodes { + let r = node.handle.get_internal_event_stream_known_impl().await; + internal_event_rxs.push(r); + } + + let reg = Arc::new(TaskRegistry::default()); let TestRunner { ref launcher, @@ -131,28 +129,28 @@ where let mut task_futs = vec![]; let meta = launcher.metadata.clone(); - if let TxnTaskDescription::RoundRobinTimeBased(duration) = meta.txn_description { - let txn_task = TxnTask { - handles: nodes.clone(), - next_node_idx: Some(0), - duration, - shutdown_chan: rx.clone(), + let txn_task = + if let TxnTaskDescription::RoundRobinTimeBased(duration) = meta.txn_description { + let txn_task = TxnTask { + handles: nodes.clone(), + next_node_idx: Some(0), + duration, + shutdown_chan: rx.clone(), + }; + Some(txn_task) + } else { + None }; - task_futs.push(txn_task.run()); - } // add completion task - if let CompletionTaskDescription::TimeBasedCompletionTaskBuilder(time_based) = - meta.completion_task_description - { - let completion_task = CompletionTask { - tx: tx.clone(), - rx: rx.clone(), - handles: nodes.clone(), - duration: time_based.duration, - }; - task_futs.push(completion_task.run()); - } + let CompletionTaskDescription::TimeBasedCompletionTaskBuilder(time_based) = + meta.completion_task_description; + let completion_task = CompletionTask { + tx: tx.clone(), + rx: rx.clone(), + handles: nodes.clone(), + duration: time_based.duration, + }; // add spinning task // map spinning to view @@ -191,7 +189,6 @@ where ), event_rxs.clone(), ); - task_futs.push(safety_task.run()); // add view sync task let view_sync_task_state = ViewSyncTask { @@ -204,7 +201,6 @@ where Task::new(tx.clone(), rx.clone(), reg.clone(), view_sync_task_state), internal_event_rxs, ); - task_futs.push(view_sync_task.run()); // wait for networks to be ready for node in &nodes { @@ -217,10 +213,16 @@ where node.handle.hotshot.start_consensus().await; } } + task_futs.push(safety_task.run()); + task_futs.push(view_sync_task.run()); + // if let Some(txn) = txn_task { + // task_futs.push(txn.run()); + // } + task_futs.push(completion_task.run()); let results = join_all(task_futs).await; let mut error_list = vec![]; - for (result) in results { + for result in results { match result.unwrap() { HotShotTaskCompleted::ShutDown => { info!("Task shut down successfully"); diff --git a/crates/testing/tests/consensus_task.rs b/crates/testing/tests/consensus_task.rs index b844584a43..aa01d86b09 100644 --- a/crates/testing/tests/consensus_task.rs +++ b/crates/testing/tests/consensus_task.rs @@ -127,7 +127,8 @@ async fn test_consensus_task() { output.insert(HotShotEvent::Shutdown, 1); - let consensus_state = add_consensus_task(handle.hotshot.inner.output_event_stream.0.clone(), &handle).await; + let consensus_state = + add_consensus_task(handle.hotshot.inner.output_event_stream.0.clone(), &handle).await; run_harness(input, output, consensus_state, false).await; } @@ -175,7 +176,8 @@ async fn test_consensus_vote() { input.push(HotShotEvent::Shutdown); output.insert(HotShotEvent::Shutdown, 1); - let consensus_state = add_consensus_task(handle.hotshot.inner.output_event_stream.0.clone(), &handle).await; + let consensus_state = + add_consensus_task(handle.hotshot.inner.output_event_stream.0.clone(), &handle).await; run_harness(input, output, consensus_state, false).await; } @@ -303,7 +305,8 @@ async fn test_consensus_with_vid() { input.push(HotShotEvent::Shutdown); output.insert(HotShotEvent::Shutdown, 1); - let consensus_state = add_consensus_task(handle.hotshot.inner.output_event_stream.0.clone(), &handle).await; + let consensus_state = + add_consensus_task(handle.hotshot.inner.output_event_stream.0.clone(), &handle).await; run_harness(input, output, consensus_state, false).await; } diff --git a/crates/testing/tests/view_sync_task.rs b/crates/testing/tests/view_sync_task.rs index 9f09e2a450..88359a514f 100644 --- a/crates/testing/tests/view_sync_task.rs +++ b/crates/testing/tests/view_sync_task.rs @@ -16,9 +16,9 @@ async fn test_view_sync_task() { use hotshot_task_impls::view_sync::ViewSyncTaskState; use hotshot_testing::task_helpers::build_system_handle; use hotshot_types::simple_vote::ViewSyncPreCommitData; - use std::time::Duration; use hotshot_types::traits::consensus_api::ConsensusApi; - + use std::time::Duration; + async_compatibility_layer::logging::setup_logging(); async_compatibility_layer::logging::setup_backtrace(); From da50e0ad83ab0a8d0c41e5b4a874a4782ed6c6cb Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Tue, 30 Jan 2024 16:51:10 -0500 Subject: [PATCH 07/28] cargo fix --- crates/hotshot/src/lib.rs | 2 +- crates/hotshot/src/tasks/mod.rs | 2 +- crates/hotshot/src/types/handle.rs | 2 +- crates/task-impls/src/harness.rs | 2 +- crates/testing/src/overall_safety_task.rs | 12 ++++++------ crates/testing/src/spinning_task.rs | 6 +++--- crates/testing/src/test_builder.rs | 10 +++++----- crates/testing/src/test_launcher.rs | 9 ++++----- crates/testing/src/test_runner.rs | 4 ++-- crates/testing/src/txn_task.rs | 2 +- crates/testing/src/view_sync_task.rs | 2 +- crates/testing/tests/da_task.rs | 2 +- crates/testing/tests/network_task.rs | 4 ++-- crates/testing/tests/vid_task.rs | 2 +- crates/testing/tests/view_sync_task.rs | 2 +- 15 files changed, 31 insertions(+), 32 deletions(-) diff --git a/crates/hotshot/src/lib.rs b/crates/hotshot/src/lib.rs index 3c8728ae8f..53088908f0 100644 --- a/crates/hotshot/src/lib.rs +++ b/crates/hotshot/src/lib.rs @@ -69,7 +69,7 @@ use std::{ }; use task::task::{Task, TaskRegistry}; use tasks::add_vid_task; -use tracing::{debug, error, info, instrument, trace, warn}; +use tracing::{debug, error, instrument, trace, warn}; // -- Rexports // External diff --git a/crates/hotshot/src/tasks/mod.rs b/crates/hotshot/src/tasks/mod.rs index da322c28d0..764e4a68ba 100644 --- a/crates/hotshot/src/tasks/mod.rs +++ b/crates/hotshot/src/tasks/mod.rs @@ -3,7 +3,7 @@ use crate::{types::SystemContextHandle, HotShotConsensusApi}; use async_broadcast::{Receiver, Sender}; use async_compatibility_layer::art::{async_sleep, async_spawn}; -use futures::FutureExt; + use hotshot_task_impls::{ consensus::{CommitmentAndMetadata, ConsensusTaskState}, da::DATaskState, diff --git a/crates/hotshot/src/types/handle.rs b/crates/hotshot/src/types/handle.rs index 88df8cfe71..6b2b42c882 100644 --- a/crates/hotshot/src/types/handle.rs +++ b/crates/hotshot/src/types/handle.rs @@ -2,7 +2,7 @@ use crate::{traits::NodeImplementation, types::Event, SystemContext}; use async_broadcast::{Receiver, Sender}; -use async_compatibility_layer::channel::UnboundedStream; + use async_lock::RwLock; use commit::Committable; use futures::Stream; diff --git a/crates/task-impls/src/harness.rs b/crates/task-impls/src/harness.rs index 58be8bbd37..906065fe2b 100644 --- a/crates/task-impls/src/harness.rs +++ b/crates/task-impls/src/harness.rs @@ -2,7 +2,7 @@ use crate::events::{HotShotEvent, HotShotTaskCompleted}; use async_broadcast::broadcast; use hotshot_types::traits::node_implementation::NodeType; -use std::{collections::HashMap, future::Future, sync::Arc}; +use std::{collections::HashMap, sync::Arc}; use task::task::{Task, TaskRegistry, TaskState}; /// The state for the test harness task. Keeps track of which events and how many we expect to get diff --git a/crates/testing/src/overall_safety_task.rs b/crates/testing/src/overall_safety_task.rs index 8560b10dd0..f98cf776bc 100644 --- a/crates/testing/src/overall_safety_task.rs +++ b/crates/testing/src/overall_safety_task.rs @@ -88,13 +88,13 @@ impl> TaskState GlobalTestEvent::ShutDown => { let state = task.state_mut(); let OverallSafetyPropertiesDescription { - check_leaf, - check_state, - check_block, + check_leaf: _, + check_state: _, + check_block: _, num_failed_views: num_failed_rounds_total, num_successful_views, - threshold_calculator, - transaction_threshold, + threshold_calculator: _, + transaction_threshold: _, }: OverallSafetyPropertiesDescription = state.properties.clone(); let num_incomplete_views = state.ctx.round_results.len() @@ -123,7 +123,7 @@ impl> TaskState } } - fn should_shutdown(event: &Self::Event) -> bool { + fn should_shutdown(_event: &Self::Event) -> bool { false } } diff --git a/crates/testing/src/spinning_task.rs b/crates/testing/src/spinning_task.rs index 8797db8d46..0de67a7327 100644 --- a/crates/testing/src/spinning_task.rs +++ b/crates/testing/src/spinning_task.rs @@ -2,7 +2,7 @@ use std::collections::HashMap; use hotshot::{traits::TestableNodeImplementation, SystemContext}; -use hotshot_types::event::EventType; + use hotshot_types::traits::network::CommunicationChannel; use hotshot_types::{event::Event, traits::node_implementation::NodeType}; use snafu::Snafu; @@ -46,7 +46,7 @@ impl> TaskState for Spinni None } - fn should_shutdown(event: &Self::Event) -> bool { + fn should_shutdown(_event: &Self::Event) -> bool { false } } @@ -62,7 +62,7 @@ impl> TestTaskState async fn handle_message( message: Self::Message, - id: usize, + _id: usize, task: &mut task::task::TestTask, ) -> Option { let Event { diff --git a/crates/testing/src/test_builder.rs b/crates/testing/src/test_builder.rs index 4285189ed5..9bfe6fdb18 100644 --- a/crates/testing/src/test_builder.rs +++ b/crates/testing/src/test_builder.rs @@ -215,12 +215,12 @@ impl TestMetadata { min_transactions, timing_data, da_committee_size, - txn_description, - completion_task_description, - overall_safety_properties, - spinning_properties, + + + + unreliable_network, - view_sync_properties, + .. } = self.clone(); diff --git a/crates/testing/src/test_launcher.rs b/crates/testing/src/test_launcher.rs index 65d9a071ec..19527a39c7 100644 --- a/crates/testing/src/test_launcher.rs +++ b/crates/testing/src/test_launcher.rs @@ -1,15 +1,14 @@ use std::{collections::HashMap, sync::Arc}; -use futures::future::BoxFuture; + use hotshot::traits::{NodeImplementation, TestableNodeImplementation}; use hotshot_types::{traits::node_implementation::NodeType, HotShotConfig}; -use task::task::TaskRegistry; -use crate::{spinning_task::SpinningTask, view_sync_task::ViewSyncTask}; + + use super::{ - completion_task::CompletionTask, overall_safety_task::OverallSafetyTask, - test_builder::TestMetadata, test_runner::TestRunner, txn_task::TxnTask, GlobalTestEvent, + test_builder::TestMetadata, test_runner::TestRunner, }; /// convience type alias for the networks available diff --git a/crates/testing/src/test_runner.rs b/crates/testing/src/test_runner.rs index a8111a97ac..823a752f21 100644 --- a/crates/testing/src/test_runner.rs +++ b/crates/testing/src/test_runner.rs @@ -12,7 +12,7 @@ use crate::{ view_sync_task::ViewSyncTask, }; use async_broadcast::broadcast; -use futures::{future::join_all, stream::FuturesUnordered}; +use futures::{future::join_all}; use hotshot::{types::SystemContextHandle, Memberships}; use hotshot::{traits::TestableNodeImplementation, HotShotInitializer, SystemContext}; @@ -129,7 +129,7 @@ where let mut task_futs = vec![]; let meta = launcher.metadata.clone(); - let txn_task = + let _txn_task = if let TxnTaskDescription::RoundRobinTimeBased(duration) = meta.txn_description { let txn_task = TxnTask { handles: nodes.clone(), diff --git a/crates/testing/src/txn_task.rs b/crates/testing/src/txn_task.rs index f8ca729859..8de94f4d8d 100644 --- a/crates/testing/src/txn_task.rs +++ b/crates/testing/src/txn_task.rs @@ -40,7 +40,7 @@ impl> TxnTask { loop { async_sleep(self.duration).await; match self.shutdown_chan.try_recv() { - Ok(event) => { + Ok(_event) => { return HotShotTaskCompleted::ShutDown; } Err(TryRecvError::Empty) => {} diff --git a/crates/testing/src/view_sync_task.rs b/crates/testing/src/view_sync_task.rs index 578e9b58c0..8a1cefb07a 100644 --- a/crates/testing/src/view_sync_task.rs +++ b/crates/testing/src/view_sync_task.rs @@ -52,7 +52,7 @@ impl> TaskState for ViewSy } } - fn should_shutdown(event: &Self::Event) -> bool { + fn should_shutdown(_event: &Self::Event) -> bool { false } } diff --git a/crates/testing/tests/da_task.rs b/crates/testing/tests/da_task.rs index 71dcde0c38..31634fc0c5 100644 --- a/crates/testing/tests/da_task.rs +++ b/crates/testing/tests/da_task.rs @@ -21,7 +21,7 @@ use std::{collections::HashMap, marker::PhantomData}; )] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_da_task() { - use hotshot::tasks::add_da_task; + use hotshot_task_impls::harness::run_harness; use hotshot_testing::task_helpers::build_system_handle; use hotshot_types::message::Proposal; diff --git a/crates/testing/tests/network_task.rs b/crates/testing/tests/network_task.rs index 2552512490..52458243dd 100644 --- a/crates/testing/tests/network_task.rs +++ b/crates/testing/tests/network_task.rs @@ -20,7 +20,7 @@ use std::{collections::HashMap, marker::PhantomData}; #[ignore] #[allow(clippy::too_many_lines)] async fn test_network_task() { - use hotshot_task_impls::harness::run_harness; + use hotshot_testing::task_helpers::build_system_handle; use hotshot_types::{data::VidDisperse, message::Proposal}; @@ -28,7 +28,7 @@ async fn test_network_task() { async_compatibility_layer::logging::setup_backtrace(); // Build the API for node 2. - let (handle, tx, rx) = build_system_handle(2).await; + let (handle, _tx, _rx) = build_system_handle(2).await; let api: HotShotConsensusApi = HotShotConsensusApi { inner: handle.hotshot.inner.clone(), }; diff --git a/crates/testing/tests/vid_task.rs b/crates/testing/tests/vid_task.rs index a9d5204ead..f4bb00dc54 100644 --- a/crates/testing/tests/vid_task.rs +++ b/crates/testing/tests/vid_task.rs @@ -1,4 +1,4 @@ -use hotshot::{tasks::add_vid_task, types::SignatureKey, HotShotConsensusApi}; +use hotshot::{types::SignatureKey, HotShotConsensusApi}; use hotshot_task_impls::{events::HotShotEvent, vid::VIDTaskState}; use hotshot_testing::{ block_types::TestTransaction, diff --git a/crates/testing/tests/view_sync_task.rs b/crates/testing/tests/view_sync_task.rs index 88359a514f..6fc3711432 100644 --- a/crates/testing/tests/view_sync_task.rs +++ b/crates/testing/tests/view_sync_task.rs @@ -11,7 +11,7 @@ use std::collections::HashMap; )] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_view_sync_task() { - use hotshot::tasks::add_view_sync_task; + use hotshot_task_impls::harness::run_harness; use hotshot_task_impls::view_sync::ViewSyncTaskState; use hotshot_testing::task_helpers::build_system_handle; From 224fdb1f792cebc53b55eba838ffc67fe1b145d1 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Tue, 30 Jan 2024 22:07:42 -0500 Subject: [PATCH 08/28] replace broadcast with broadcast_direct --- Cargo.lock | 31 +- Cargo.toml | 1 - crates/hotshot/src/lib.rs | 2 +- crates/hotshot/src/types/handle.rs | 2 +- crates/task-impls/src/consensus.rs | 45 +- crates/task-impls/src/da.rs | 7 +- crates/task-impls/src/events.rs | 1 + crates/task-impls/src/network.rs | 9 +- crates/task-impls/src/transactions.rs | 5 +- crates/task-impls/src/vid.rs | 12 +- crates/task-impls/src/view_sync.rs | 60 ++- crates/task-impls/src/vote.rs | 5 +- crates/task/Cargo.toml | 27 -- crates/task/src/event_stream.rs | 268 ----------- crates/task/src/global_registry.rs | 214 --------- crates/task/src/lib.rs | 385 --------------- crates/task/src/task.rs | 637 ------------------------- crates/task/src/task_impls.rs | 457 ------------------ crates/task/src/task_launcher.rs | 68 --- crates/task/src/task_state.rs | 182 ------- crates/testing/src/completion_task.rs | 9 +- crates/testing/src/spinning_task.rs | 125 ----- crates/testing/src/test_builder.rs | 6 +- crates/testing/src/test_launcher.rs | 8 +- crates/testing/src/test_runner.rs | 7 +- crates/testing/src/timeout_task.rs | 1 - crates/testing/src/view_sync_task.rs | 97 ---- crates/testing/tests/da_task.rs | 1 - crates/testing/tests/network_task.rs | 1 - crates/testing/tests/view_sync_task.rs | 1 - 30 files changed, 103 insertions(+), 2571 deletions(-) delete mode 100644 crates/task/Cargo.toml delete mode 100644 crates/task/src/event_stream.rs delete mode 100644 crates/task/src/global_registry.rs delete mode 100644 crates/task/src/lib.rs delete mode 100644 crates/task/src/task.rs delete mode 100644 crates/task/src/task_impls.rs delete mode 100644 crates/task/src/task_launcher.rs delete mode 100644 crates/task/src/task_state.rs delete mode 100644 crates/testing/src/timeout_task.rs diff --git a/Cargo.lock b/Cargo.lock index 11fcf771ef..13cb860b2d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -908,17 +908,6 @@ version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" -[[package]] -name = "atomic_enum" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6227a8d6fdb862bcb100c4314d0d9579e5cd73fa6df31a2e6f6e1acd3c5f1207" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] - [[package]] name = "attohttpc" version = "0.24.1" @@ -2794,24 +2783,6 @@ dependencies = [ "jf-utils", ] -[[package]] -name = "hotshot-task" -version = "0.1.0" -dependencies = [ - "async-compatibility-layer", - "async-lock 2.8.0", - "async-std", - "async-trait", - "atomic_enum", - "either", - "futures", - "pin-project", - "serde", - "snafu", - "tokio", - "tracing", -] - [[package]] name = "hotshot-task-impls" version = "0.1.0" @@ -6602,7 +6573,7 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "task" version = "0.1.0" -source = "git+https://github.com/EspressoSystems/HotShotTasks.git#a237e633587c7fe49fe1a45d5da9551cf5aab0a7" +source = "git+https://github.com/EspressoSystems/HotShotTasks.git#c55751758da8899d868cfc28bc95cd4e4d71584d" dependencies = [ "async-broadcast", "async-std", diff --git a/Cargo.toml b/Cargo.toml index 3ab5841d62..796b7f457b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,7 +18,6 @@ members = [ "crates/hotshot-state-prover", "crates/libp2p-networking", "crates/testing-macros", - "crates/task", "crates/task-impls", "crates/testing", "crates/types", diff --git a/crates/hotshot/src/lib.rs b/crates/hotshot/src/lib.rs index 53088908f0..4472624041 100644 --- a/crates/hotshot/src/lib.rs +++ b/crates/hotshot/src/lib.rs @@ -274,7 +274,7 @@ impl> SystemContext { self.inner .internal_event_stream .0 - .broadcast(HotShotEvent::QCFormed(either::Left( + .broadcast_direct(HotShotEvent::QCFormed(either::Left( QuorumCertificate::genesis(), ))) .await; diff --git a/crates/hotshot/src/types/handle.rs b/crates/hotshot/src/types/handle.rs index 6b2b42c882..2a7d8de3ce 100644 --- a/crates/hotshot/src/types/handle.rs +++ b/crates/hotshot/src/types/handle.rs @@ -121,7 +121,7 @@ impl + 'static> SystemContextHandl block_size: None, }, }; - let _ = self.output_event_stream.0.broadcast(event).await; + let _ = self.output_event_stream.0.broadcast_direct(event).await; } } else { // TODO (justin) this seems bad. I think we should hard error in this case?? diff --git a/crates/task-impls/src/consensus.rs b/crates/task-impls/src/consensus.rs index 7818c46f89..496462de21 100644 --- a/crates/task-impls/src/consensus.rs +++ b/crates/task-impls/src/consensus.rs @@ -120,6 +120,7 @@ pub struct ConsensusTaskState< /// last Timeout Certificate this node formed pub timeout_cert: Option>, + /// Output events to application pub output_event_stream: async_broadcast::Sender>, /// All the VID shares we've received for current and future views. @@ -235,8 +236,9 @@ impl, A: ConsensusApi + vote.get_view_number() + 1 ); event_stream - .broadcast(HotShotEvent::QuorumVoteSend(vote)) - .await; + .broadcast_direct(HotShotEvent::QuorumVoteSend(vote)) + .await + .unwrap(); if let Some(commit_and_metadata) = &self.payload_commitment_and_metadata { if commit_and_metadata.is_genesis { self.payload_commitment_and_metadata = None; @@ -339,8 +341,9 @@ impl, A: ConsensusApi + vote.get_view_number() + 1 ); event_stream - .broadcast(HotShotEvent::QuorumVoteSend(vote)) - .await; + .broadcast_direct(HotShotEvent::QuorumVoteSend(vote)) + .await + .unwrap(); return true; } } @@ -411,8 +414,9 @@ impl, A: ConsensusApi + } event_stream - .broadcast(HotShotEvent::ViewChange(new_view)) - .await; + .broadcast_direct(HotShotEvent::ViewChange(new_view)) + .await + .unwrap(); // Spawn a timeout task if we did actually update view let timeout = self.timeout; @@ -424,8 +428,9 @@ impl, A: ConsensusApi + async move { async_sleep(Duration::from_millis(timeout)).await; stream - .broadcast(HotShotEvent::Timeout(TYPES::Time::new(*view_number))) - .await; + .broadcast_direct(HotShotEvent::Timeout(TYPES::Time::new(*view_number))) + .await + .unwrap(); } })); let consensus = self.consensus.read().await; @@ -725,11 +730,12 @@ impl, A: ConsensusApi + ) { error!("publishing view error"); self.output_event_stream - .broadcast(Event { + .broadcast_direct(Event { view_number: view, event: EventType::Error { error: e.into() }, }) - .await; + .await + .unwrap(); } } @@ -754,9 +760,10 @@ impl, A: ConsensusApi + #[allow(clippy::cast_precision_loss)] if new_decide_reached { event_stream - .broadcast(HotShotEvent::LeafDecided(leaf_views.clone())) - .await; - let decide_sent = self.output_event_stream.broadcast(Event { + .broadcast_direct(HotShotEvent::LeafDecided(leaf_views.clone())) + .await + .unwrap(); + let decide_sent = self.output_event_stream.broadcast_direct(Event { view_number: consensus.last_decided_view, event: EventType::Decide { leaf_chain: Arc::new(leaf_views), @@ -788,7 +795,8 @@ impl, A: ConsensusApi + debug!("Sending Decide for view {:?}", consensus.last_decided_view); debug!("Decided txns len {:?}", included_txns_set.len()); - decide_sent.await; + decide_sent.await.unwrap(); + debug!("decide send succeeded"); } let new_view = self.current_proposal.clone().unwrap().view_number + 1; @@ -1051,7 +1059,7 @@ impl, A: ConsensusApi + } self.output_event_stream - .broadcast(Event { + .broadcast_direct(Event { view_number: old_view_number, event: EventType::ViewFinished { view_number: old_view_number, @@ -1094,7 +1102,7 @@ impl, A: ConsensusApi + }; event_stream - .broadcast(HotShotEvent::TimeoutVoteSend(vote)) + .broadcast_direct(HotShotEvent::TimeoutVoteSend(vote)) .await .unwrap(); debug!( @@ -1102,7 +1110,7 @@ impl, A: ConsensusApi + *view ); self.output_event_stream - .broadcast(Event { + .broadcast_direct(Event { view_number: view, event: EventType::ReplicaViewTimeout { view_number: view }, }) @@ -1256,7 +1264,7 @@ impl, A: ConsensusApi + ); event_stream - .broadcast(HotShotEvent::QuorumProposalSend( + .broadcast_direct(HotShotEvent::QuorumProposalSend( message.clone(), self.public_key.clone(), )) @@ -1297,6 +1305,7 @@ impl, A: ConsensusApi + { // TODO: Don't clone the sender let sender = task.clone_sender(); + info!("sender queue len {}", sender.len()); task.state_mut().handle(event, sender).await; None } diff --git a/crates/task-impls/src/da.rs b/crates/task-impls/src/da.rs index f1a7d8c31f..f90e1ff23c 100644 --- a/crates/task-impls/src/da.rs +++ b/crates/task-impls/src/da.rs @@ -169,7 +169,10 @@ impl, A: ConsensusApi + // self.cur_view = view; debug!("Sending vote to the DA leader {:?}", vote.get_view_number()); - event_stream.broadcast(HotShotEvent::DAVoteSend(vote)).await; + event_stream + .broadcast_direct(HotShotEvent::DAVoteSend(vote)) + .await + .unwrap(); let mut consensus = self.consensus.write().await; // Ensure this view is in the view map for garbage collection, but do not overwrite if @@ -301,7 +304,7 @@ impl, A: ConsensusApi + }; event_stream - .broadcast(HotShotEvent::DAProposalSend( + .broadcast_direct(HotShotEvent::DAProposalSend( message.clone(), self.public_key.clone(), )) diff --git a/crates/task-impls/src/events.rs b/crates/task-impls/src/events.rs index 902f886fd5..5181fc87d6 100644 --- a/crates/task-impls/src/events.rs +++ b/crates/task-impls/src/events.rs @@ -15,6 +15,7 @@ use hotshot_types::{ traits::{node_implementation::NodeType, BlockPayload}, }; +/// Marker that the task completed #[derive(Eq, Hash, PartialEq, Debug, Clone)] pub struct HotShotTaskCompleted; diff --git a/crates/task-impls/src/network.rs b/crates/task-impls/src/network.rs index 9d708d21a6..740442e747 100644 --- a/crates/task-impls/src/network.rs +++ b/crates/task-impls/src/network.rs @@ -84,7 +84,7 @@ impl TaskState for NetworkMessageTaskState { } fn filter(&self, _event: &Self::Event) -> bool { - return false; + false } fn should_shutdown(_event: &Self::Event) -> bool { @@ -165,7 +165,7 @@ impl NetworkMessageTaskState { // TODO (Keyao benchmarking) Update these event variants (similar to the // `TransactionsRecv` event) so we can send one event for a vector of messages. // - self.event_stream.broadcast(event).await; + self.event_stream.broadcast(event).await.unwrap(); } MessageKind::Data(message) => match message { hotshot_types::message::DataMessage::SubmitTransaction(transaction, _) => { @@ -176,8 +176,9 @@ impl NetworkMessageTaskState { } if !transactions.is_empty() { self.event_stream - .broadcast(HotShotEvent::TransactionsRecv(transactions)) - .await; + .broadcast_direct(HotShotEvent::TransactionsRecv(transactions)) + .await + .unwrap(); } } } diff --git a/crates/task-impls/src/transactions.rs b/crates/task-impls/src/transactions.rs index 426b3627ca..c2fef8cb69 100644 --- a/crates/task-impls/src/transactions.rs +++ b/crates/task-impls/src/transactions.rs @@ -246,12 +246,13 @@ impl, A: ConsensusApi + // send the sequenced transactions to VID and DA tasks let block_view = if make_block { view } else { view + 1 }; event_stream - .broadcast(HotShotEvent::TransactionsSequenced( + .broadcast_direct(HotShotEvent::TransactionsSequenced( encoded_transactions, metadata, block_view, )) - .await; + .await + .unwrap(); return None; } diff --git a/crates/task-impls/src/vid.rs b/crates/task-impls/src/vid.rs index 9c1460b14f..dd5a98ff3c 100644 --- a/crates/task-impls/src/vid.rs +++ b/crates/task-impls/src/vid.rs @@ -96,20 +96,22 @@ impl, A: ConsensusApi + let vid_disperse = vid_disperse.unwrap(); // send the commitment and metadata to consensus for block building event_stream - .broadcast(HotShotEvent::SendPayloadCommitmentAndMetadata( + .broadcast_direct(HotShotEvent::SendPayloadCommitmentAndMetadata( vid_disperse.commit, metadata, view_number, )) - .await; + .await + .unwrap(); // send the block to the VID dispersal function event_stream - .broadcast(HotShotEvent::BlockReady( + .broadcast_direct(HotShotEvent::BlockReady( VidDisperse::from_membership(view_number, vid_disperse, &self.membership), view_number, )) - .await; + .await + .unwrap(); } HotShotEvent::BlockReady(vid_disperse, view_number) => { @@ -122,7 +124,7 @@ impl, A: ConsensusApi + }; debug!("publishing VID disperse for view {}", *view_number); event_stream - .broadcast(HotShotEvent::VidDisperseSend( + .broadcast_direct(HotShotEvent::VidDisperseSend( Proposal { signature, data: vid_disperse, diff --git a/crates/task-impls/src/view_sync.rs b/crates/task-impls/src/view_sync.rs index 2e83fc5a9b..f526b1160a 100644 --- a/crates/task-impls/src/view_sync.rs +++ b/crates/task-impls/src/view_sync.rs @@ -545,10 +545,11 @@ impl< // If this is the first timeout we've seen advance to the next view self.current_view = view_number; event_stream - .broadcast(HotShotEvent::ViewChange(TYPES::Time::new( + .broadcast_direct(HotShotEvent::ViewChange(TYPES::Time::new( *self.current_view, ))) - .await; + .await + .unwrap(); } } @@ -611,8 +612,9 @@ impl, A: ConsensusApi + if let GeneralConsensusMessage::ViewSyncCommitVote(vote) = message { event_stream - .broadcast(HotShotEvent::ViewSyncCommitVoteSend(vote)) - .await; + .broadcast_direct(HotShotEvent::ViewSyncCommitVoteSend(vote)) + .await + .unwrap(); } if let Some(timeout_task) = self.timeout_task.take() { @@ -629,12 +631,13 @@ impl, A: ConsensusApi + async_sleep(timeout).await; info!("Vote sending timed out in ViewSyncPreCommitCertificateRecv, Relay = {}", relay); stream - .broadcast(HotShotEvent::ViewSyncTimeout( + .broadcast_direct(HotShotEvent::ViewSyncTimeout( TYPES::Time::new(*next_view), relay, phase, )) - .await; + .await + .unwrap(); } })); } @@ -682,8 +685,9 @@ impl, A: ConsensusApi + if let GeneralConsensusMessage::ViewSyncFinalizeVote(vote) = message { event_stream - .broadcast(HotShotEvent::ViewSyncFinalizeVoteSend(vote)) - .await; + .broadcast_direct(HotShotEvent::ViewSyncFinalizeVoteSend(vote)) + .await + .unwrap(); } info!( @@ -692,12 +696,14 @@ impl, A: ConsensusApi + ); event_stream - .broadcast(HotShotEvent::ViewChange(self.next_view - 1)) - .await; + .broadcast_direct(HotShotEvent::ViewChange(self.next_view - 1)) + .await + .unwrap(); event_stream - .broadcast(HotShotEvent::ViewChange(self.next_view)) - .await; + .broadcast_direct(HotShotEvent::ViewChange(self.next_view)) + .await + .unwrap(); if let Some(timeout_task) = self.timeout_task.take() { cancel_task(timeout_task).await; @@ -715,12 +721,13 @@ impl, A: ConsensusApi + relay ); stream - .broadcast(HotShotEvent::ViewSyncTimeout( + .broadcast_direct(HotShotEvent::ViewSyncTimeout( TYPES::Time::new(*next_view), relay, phase, )) - .await; + .await + .unwrap(); } })); } @@ -776,8 +783,9 @@ impl, A: ConsensusApi + } event_stream - .broadcast(HotShotEvent::ViewChange(self.next_view)) - .await; + .broadcast_direct(HotShotEvent::ViewChange(self.next_view)) + .await + .unwrap(); return Some(HotShotTaskCompleted); } @@ -803,8 +811,9 @@ impl, A: ConsensusApi + if let GeneralConsensusMessage::ViewSyncPreCommitVote(vote) = message { event_stream - .broadcast(HotShotEvent::ViewSyncPreCommitVoteSend(vote)) - .await; + .broadcast_direct(HotShotEvent::ViewSyncPreCommitVoteSend(vote)) + .await + .unwrap(); } self.timeout_task = Some(async_spawn({ @@ -816,12 +825,13 @@ impl, A: ConsensusApi + async_sleep(timeout).await; info!("Vote sending timed out in ViewSyncTrigger"); stream - .broadcast(HotShotEvent::ViewSyncTimeout( + .broadcast_direct(HotShotEvent::ViewSyncTimeout( TYPES::Time::new(*next_view), relay, ViewSyncPhase::None, )) - .await; + .await + .unwrap(); } })); @@ -858,8 +868,9 @@ impl, A: ConsensusApi + if let GeneralConsensusMessage::ViewSyncPreCommitVote(vote) = message { event_stream - .broadcast(HotShotEvent::ViewSyncPreCommitVoteSend(vote)) - .await; + .broadcast_direct(HotShotEvent::ViewSyncPreCommitVoteSend(vote)) + .await + .unwrap(); } } ViewSyncPhase::Finalize => { @@ -880,12 +891,13 @@ impl, A: ConsensusApi + relay ); stream - .broadcast(HotShotEvent::ViewSyncTimeout( + .broadcast_direct(HotShotEvent::ViewSyncTimeout( TYPES::Time::new(*next_view), relay, last_seen_certificate, )) - .await; + .await + .unwrap(); } })); diff --git a/crates/task-impls/src/vote.rs b/crates/task-impls/src/vote.rs index 1682c3dd17..0ec0b573da 100644 --- a/crates/task-impls/src/vote.rs +++ b/crates/task-impls/src/vote.rs @@ -95,8 +95,9 @@ impl< Either::Right(cert) => { debug!("Certificate Formed! {:?}", cert); event_stream - .broadcast(VOTE::make_cert_event(cert, &self.public_key)) - .await; + .broadcast_direct(VOTE::make_cert_event(cert, &self.public_key)) + .await + .unwrap(); self.accumulator = None; Some(HotShotTaskCompleted) } diff --git a/crates/task/Cargo.toml b/crates/task/Cargo.toml deleted file mode 100644 index 03b65c1934..0000000000 --- a/crates/task/Cargo.toml +++ /dev/null @@ -1,27 +0,0 @@ -[package] -authors = ["Espresso Systems "] -description = "Async task abstraction for use in consensus" -edition = "2021" -name = "hotshot-task" -version = "0.1.0" - -[dependencies] -async-compatibility-layer = { workspace = true } -async-trait = { workspace = true } -either = { workspace = true } -futures = { workspace = true } -serde = { workspace = true } -snafu = { workspace = true } -async-lock = { workspace = true } -tracing = { workspace = true } -atomic_enum = "0.2.0" -pin-project = "1.1.3" - -[target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] -tokio = { workspace = true } - -[target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] -async-std = { workspace = true } - -[lints] -workspace = true diff --git a/crates/task/src/event_stream.rs b/crates/task/src/event_stream.rs deleted file mode 100644 index 5248fe4373..0000000000 --- a/crates/task/src/event_stream.rs +++ /dev/null @@ -1,268 +0,0 @@ -use async_compatibility_layer::channel::{unbounded, UnboundedSender, UnboundedStream}; -use async_lock::RwLock; -use std::{ - collections::HashMap, - pin::Pin, - sync::Arc, - task::{Context, Poll}, -}; - -use async_trait::async_trait; -use futures::Stream; - -use crate::task::{FilterEvent, PassType}; - -/// a stream that does nothing. -/// it's immediately closed -#[derive(Clone)] -pub struct DummyStream; - -impl Stream for DummyStream { - type Item = (); - - fn poll_next(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { - Poll::Ready(None) - } -} - -#[async_trait] -impl EventStream for DummyStream { - type EventType = (); - - type StreamType = DummyStream; - - async fn publish(&self, _event: Self::EventType) {} - - async fn subscribe( - &self, - _filter: FilterEvent, - ) -> (Self::StreamType, StreamId) { - (DummyStream, 0) - } - - async fn unsubscribe(&self, _id: StreamId) {} - - async fn direct_message(&self, _id: StreamId, _event: Self::EventType) {} -} - -impl SendableStream for DummyStream {} - -/// this is only used for indexing -pub type StreamId = usize; - -/// a stream that plays nicely with async -pub trait SendableStream: Stream + Sync + Send + 'static {} - -/// Async pub sub event stream -/// NOTE: static bound indicates that if the type points to data, that data lives for the lifetime -/// of the program -#[async_trait] -pub trait EventStream: Clone + 'static + Sync + Send { - /// the type of event to process - type EventType: PassType; - /// the type of stream to use - type StreamType: SendableStream; - - /// publish an event to the event stream - async fn publish(&self, event: Self::EventType); - - /// subscribe to a particular set of events - /// specified by `filter`. Filter returns true if the event should be propagated - /// TODO (justin) rethink API, we might be able just to use `StreamExt::filter` and `Filter` - /// That would certainly be cleaner - async fn subscribe(&self, filter: FilterEvent) - -> (Self::StreamType, StreamId); - - /// unsubscribe from the stream - async fn unsubscribe(&self, id: StreamId); - - /// send direct message to node - async fn direct_message(&self, id: StreamId, event: Self::EventType); -} - -/// Event stream implementation using channels as the underlying primitive. -/// We want it to be cloneable -#[derive(Clone)] -pub struct ChannelStream { - /// inner field. Useful for having the stream itself - /// be clone - inner: Arc>>, -} - -/// trick to make the event stream clonable -struct ChannelStreamInner { - /// the subscribers to the channel - subscribers: HashMap, UnboundedSender)>, - /// the next unused assignable id - next_stream_id: StreamId, -} - -impl ChannelStream { - /// construct a new event stream - #[must_use] - pub fn new() -> Self { - Self { - inner: Arc::new(RwLock::new(ChannelStreamInner { - subscribers: HashMap::new(), - next_stream_id: 0, - })), - } - } -} - -impl Default for ChannelStream { - fn default() -> Self { - Self::new() - } -} - -impl SendableStream for UnboundedStream {} - -#[async_trait] -impl EventStream for ChannelStream { - type EventType = EVENT; - type StreamType = UnboundedStream; - - async fn direct_message(&self, id: StreamId, event: Self::EventType) { - let inner = self.inner.write().await; - match inner.subscribers.get(&id) { - Some((filter, sender)) => { - if filter(&event) { - match sender.send(event.clone()).await { - Ok(()) => (), - // error sending => stream is closed so remove it - Err(_) => self.unsubscribe(id).await, - } - } - } - None => { - tracing::debug!("Requested stream id not found"); - } - } - } - - /// publish an event to the event stream - async fn publish(&self, event: Self::EventType) { - let inner = self.inner.read().await; - for (uid, (filter, sender)) in &inner.subscribers { - if filter(&event) { - match sender.send(event.clone()).await { - Ok(()) => (), - // error sending => stream is closed so remove it - Err(_) => { - self.unsubscribe(*uid).await; - } - } - } - } - } - - async fn subscribe( - &self, - filter: FilterEvent, - ) -> (Self::StreamType, StreamId) { - let mut inner = self.inner.write().await; - let new_stream_id = inner.next_stream_id; - let (s, r) = unbounded(); - inner.next_stream_id += 1; - // NOTE: can never be already existing. - // so, this should always return `None` - inner.subscribers.insert(new_stream_id, (filter, s)); - (r.into_stream(), new_stream_id) - } - - async fn unsubscribe(&self, uid: StreamId) { - let mut inner = self.inner.write().await; - inner.subscribers.remove(&uid); - } -} - -#[cfg(test)] -pub mod test { - use crate::{event_stream::EventStream, StreamExt}; - use async_compatibility_layer::art::{async_sleep, async_spawn}; - use std::time::Duration; - - #[derive(Clone, Debug, PartialEq, Eq)] - enum TestMessage { - One, - Two, - Three, - } - - #[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) - )] - #[cfg_attr(async_executor_impl = "async-std", async_std::test)] - async fn test_channel_stream_basic() { - use crate::task::FilterEvent; - - use super::ChannelStream; - - let channel_stream = ChannelStream::::new(); - let (mut stream, _) = channel_stream.subscribe(FilterEvent::default()).await; - let dup_channel_stream = channel_stream.clone(); - - let dup_dup_channel_stream = channel_stream.clone(); - - async_spawn(async move { - let (mut stream, _) = dup_channel_stream.subscribe(FilterEvent::default()).await; - assert!(stream.next().await.unwrap() == TestMessage::Three); - assert!(stream.next().await.unwrap() == TestMessage::One); - assert!(stream.next().await.unwrap() == TestMessage::Two); - }); - - async_spawn(async move { - dup_dup_channel_stream.publish(TestMessage::Three).await; - dup_dup_channel_stream.publish(TestMessage::One).await; - dup_dup_channel_stream.publish(TestMessage::Two).await; - }); - async_sleep(Duration::new(3, 0)).await; - - assert!(stream.next().await.unwrap() == TestMessage::Three); - assert!(stream.next().await.unwrap() == TestMessage::One); - assert!(stream.next().await.unwrap() == TestMessage::Two); - } - - #[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) - )] - #[cfg_attr(async_executor_impl = "async-std", async_std::test)] - async fn test_channel_stream_xtreme() { - use crate::task::FilterEvent; - - use super::ChannelStream; - - let channel_stream = ChannelStream::::new(); - let mut streams = Vec::new(); - - for _i in 0..1000 { - let dup_channel_stream = channel_stream.clone(); - let (stream, _) = dup_channel_stream.subscribe(FilterEvent::default()).await; - streams.push(stream); - } - - let dup_dup_channel_stream = channel_stream.clone(); - - for _i in 0..1000 { - let mut stream = streams.pop().unwrap(); - async_spawn(async move { - for event in [TestMessage::One, TestMessage::Two, TestMessage::Three] { - for _ in 0..100 { - assert!(stream.next().await.unwrap() == event); - } - } - }); - } - - async_spawn(async move { - for event in [TestMessage::One, TestMessage::Two, TestMessage::Three] { - for _ in 0..100 { - dup_dup_channel_stream.publish(event.clone()).await; - } - } - }); - } -} diff --git a/crates/task/src/global_registry.rs b/crates/task/src/global_registry.rs deleted file mode 100644 index 1977c21c76..0000000000 --- a/crates/task/src/global_registry.rs +++ /dev/null @@ -1,214 +0,0 @@ -use async_lock::RwLock; -use either::Either; -use futures::{future::BoxFuture, FutureExt}; -use std::{ - collections::{BTreeMap, BTreeSet}, - ops::Deref, - sync::Arc, -}; - -use crate::task_state::{TaskState, TaskStatus}; - -/// function to shut down gobal registry -#[derive(Clone)] -pub struct ShutdownFn(pub Arc BoxFuture<'static, ()> + Sync + Send>); - -// TODO this might cleaner as `run()` -// but then this pattern should change everywhere -impl Deref for ShutdownFn { - type Target = dyn Fn() -> BoxFuture<'static, ()> + Sync + Send; - - fn deref(&self) -> &Self::Target { - &*self.0 - } -} - -/// id of task. Usize instead of u64 because -/// used for primarily for indexing -pub type HotShotTaskId = usize; - -/// the global registry provides a place to: -/// - inquire about the state of various tasks -/// - gracefully shut down tasks -#[derive(Debug, Clone)] -pub struct GlobalRegistry { - /// up-to-date shared list of statuses - /// only used if `state_cache` is out of date - /// or if appending - state_list: Arc>>, - /// possibly stale read version of state - /// NOTE: must include entire state in order to - /// support both incrementing and reading. - /// Writing to the status should gracefully shut down the task - state_cache: BTreeMap, -} - -/// function to modify state -#[allow(clippy::type_complexity)] -struct Modifier(Box Either + Send>); - -impl Default for GlobalRegistry { - fn default() -> Self { - Self::new() - } -} - -impl GlobalRegistry { - /// create new registry - #[must_use] - pub fn new() -> Self { - Self { - state_list: Arc::new(RwLock::new(BTreeMap::default())), - state_cache: BTreeMap::default(), - } - } - - /// register with the global registry - /// return a function to the caller (task) that can be used to deregister - /// returns a function to call to shut down the task - /// and the unique identifier of the task - pub async fn register(&mut self, name: &str, status: TaskState) -> (ShutdownFn, HotShotTaskId) { - let mut list = self.state_list.write().await; - let next_id = list - .last_key_value() - .map(|(k, _v)| k) - .copied() - .unwrap_or_default() - + 1; - let new_entry = (status.clone(), name.to_string()); - let new_entry_dup = new_entry.0.clone(); - list.insert(next_id, new_entry.clone()); - - self.state_cache.insert(next_id, new_entry); - - let shutdown_fn = ShutdownFn(Arc::new(move || { - new_entry_dup.set_state(TaskStatus::Completed); - async move {}.boxed() - })); - (shutdown_fn, next_id) - } - - /// update the cache - async fn update_cache(&mut self) { - // NOTE: this can be done much more cleverly - // avoid one intersection by comparing max keys (constant time op vs O(n + m)) - // and debatable how often the other op needs to be run - // probably much much less often - let list = self.state_list.read().await; - let list_keys: BTreeSet = list.keys().copied().collect(); - let cache_keys: BTreeSet = self.state_cache.keys().copied().collect(); - // bleh not as efficient - let missing_key_list = list_keys.difference(&cache_keys); - let expired_key_list = cache_keys.difference(&list_keys); - - for expired_key in expired_key_list { - self.state_cache.remove(expired_key); - } - - for key in missing_key_list { - // technically shouldn't be possible for this to be none since - // we have a read lock - // nevertheless, this seems easier - if let Some(val) = list.get(key) { - self.state_cache.insert(*key, val.clone()); - } - } - } - - /// internal function to run `modifier` on `uid` - /// if it exists - async fn operate_on_task( - &mut self, - uid: HotShotTaskId, - modifier: Modifier, - ) -> Either { - // the happy path - if let Some(ele) = self.state_cache.get(&uid) { - modifier.0(&ele.0) - } - // the sad path - else { - self.update_cache().await; - if let Some(ele) = self.state_cache.get(&uid) { - modifier.0(&ele.0) - } else { - Either::Right(false) - } - } - } - - /// set `uid`'s state to paused - /// returns true upon success and false if `uid` is not registered - pub async fn pause_task(&mut self, uid: HotShotTaskId) -> bool { - let modifier = Modifier(Box::new(|state| { - state.set_state(TaskStatus::Paused); - Either::Right(true) - })); - match self.operate_on_task(uid, modifier).await { - Either::Left(_) => unreachable!(), - Either::Right(b) => b, - } - } - - /// set `uid`'s state to running - /// returns true upon success and false if `uid` is not registered - pub async fn run_task(&mut self, uid: HotShotTaskId) -> bool { - let modifier = Modifier(Box::new(|state| { - state.set_state(TaskStatus::Running); - Either::Right(true) - })); - match self.operate_on_task(uid, modifier).await { - Either::Left(_) => unreachable!(), - Either::Right(b) => b, - } - } - - /// if the `uid` is registered with the global registry - /// return its task status - /// this is a way to subscribe to state changes from the taskstatus - /// since `HotShotTaskStatus` implements stream - pub async fn get_task_state(&mut self, uid: HotShotTaskId) -> Option { - let modifier = Modifier(Box::new(|state| Either::Left(state.get_status()))); - match self.operate_on_task(uid, modifier).await { - Either::Left(state) => Some(state), - Either::Right(false) => None, - Either::Right(true) => unreachable!(), - } - } - - /// shut down a task from a different thread - /// returns true if succeeded - /// returns false if the task does not exist - pub async fn shutdown_task(&mut self, uid: usize) -> bool { - let modifier = Modifier(Box::new(|state| { - state.set_state(TaskStatus::Completed); - Either::Right(true) - })); - let result = match self.operate_on_task(uid, modifier).await { - Either::Left(_) => unreachable!(), - Either::Right(b) => b, - }; - let mut list = self.state_list.write().await; - list.remove(&uid); - result - } - - /// checks if all registered tasks have completed - pub async fn is_shutdown(&mut self) -> bool { - let task_list = self.state_list.read().await; - for task in (*task_list).values() { - if task.0.get_status() != TaskStatus::Completed { - return false; - } - } - true - } - - /// shut down all tasks in registry - pub async fn shutdown_all(&mut self) { - let mut task_list = self.state_list.write().await; - while let Some((_uid, task)) = task_list.pop_last() { - task.0.set_state(TaskStatus::Completed); - } - } -} diff --git a/crates/task/src/lib.rs b/crates/task/src/lib.rs deleted file mode 100644 index 918a0eaded..0000000000 --- a/crates/task/src/lib.rs +++ /dev/null @@ -1,385 +0,0 @@ -//! Abstractions meant for usage with long running consensus tasks -//! and testing harness - -use crate::task::PassType; -use either::Either; -use event_stream::SendableStream; -use Poll::{Pending, Ready}; -// The spawner of the task should be able to fire and forget the task if it makes sense. -use futures::{stream::Fuse, Future, Stream, StreamExt}; -use std::{ - pin::Pin, - slice::SliceIndex, - sync::Arc, - task::{Context, Poll}, -}; -// NOTE use pin_project here because we're already bring in procedural macros elsewhere -// so there is no reason to use pin_project_lite -use pin_project::pin_project; - -/// Astractions over the state of a task and a stream -/// interface for task changes. Allows in the happy path -/// for lockless manipulation of tasks -/// and in the sad case, only the use of a `std::sync::mutex` -pub mod task_state; - -/// the global registry storing the status of all tasks -/// as well as the abiliity to terminate them -pub mod global_registry; - -/// mpmc streamable to all subscribed tasks -pub mod event_stream; - -/// The `HotShot` Task. The main point of this library. Uses all other abstractions -/// to create an abstraction over tasks -pub mod task; - -/// The hotshot task launcher. Useful for constructing tasks -pub mod task_launcher; - -/// the task implementations with different features -pub mod task_impls; - -/// merge `N` streams of the same type -#[pin_project] -pub struct MergeN { - /// Streams to be merged. - #[pin] - streams: Vec>, - /// idx to start polling - idx: usize, -} - -impl MergeN { - /// create a new stream - #[must_use] - pub fn new(streams: Vec) -> MergeN { - let fused_streams = streams.into_iter().map(StreamExt::fuse).collect(); - MergeN { - streams: fused_streams, - idx: 0, - } - } -} - -impl PassType for T {} - -impl SendableStream for MergeN {} - -// NOTE: yoinked from https://github.com/yoshuawuyts/futures-concurrency/ -// we should really just use `futures-concurrency`. I'm being lazy here -// and not bringing in yet another dependency. Note: their merge is implemented much -// more cleverly than this rather naive impl - -// NOTE: If this is implemented through the trait, this will work on both vecs and -// slices. -// -// From: https://github.com/rust-lang/rust/pull/78370/files -/// Get a pinned mutable pointer from a list. -pub(crate) fn get_pin_mut_from_vec( - slice: Pin<&mut Vec>, - index: I, -) -> Option> -where - I: SliceIndex<[T]>, -{ - // SAFETY: `get_unchecked_mut` is never used to move the slice inside `self` (`SliceIndex` - // is sealed and all `SliceIndex::get_mut` implementations never move elements). - // `x` is guaranteed to be pinned because it comes from `self` which is pinned. - unsafe { - slice - .get_unchecked_mut() - .get_mut(index) - .map(|x| Pin::new_unchecked(x)) - } -} - -impl Stream for MergeN { - // idx of the stream, item - type Item = (usize, ::Item); - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let mut me = self.project(); - - let idx = *me.idx; - *me.idx = (idx + 1) % me.streams.len(); - - let first_half = idx..me.streams.len(); - let second_half = 0..idx; - - let iterator = first_half.chain(second_half); - - let mut done = false; - - for i in iterator { - let stream = get_pin_mut_from_vec(me.streams.as_mut(), i).unwrap(); - - match stream.poll_next(cx) { - Ready(Some(val)) => return Ready(Some((i, val))), - Ready(None) => {} - Pending => done = false, - } - } - - if done { - Ready(None) - } else { - Pending - } - } -} - -// NOTE: yoinked /from async-std -// except this is executor agnostic (doesn't rely on async-std streamext/fuse) -// NOTE: usage of this is for combining streams into one main stream -// for usage with `MessageStream` -// TODO move this to async-compatibility-layer -#[pin_project] -/// Stream type that merges two underlying streams -pub struct Merge { - /// first stream to merge - #[pin] - a: Fuse, - /// second stream to merge - #[pin] - b: Fuse, - /// When `true`, poll `a` first, otherwise, `poll` b`. - a_first: bool, -} - -impl Merge { - /// create a new Merged stream - pub fn new(a: T, b: U) -> Merge - where - T: Stream, - U: Stream, - { - Merge { - a: a.fuse(), - b: b.fuse(), - a_first: true, - } - } -} - -impl Stream for Merge -where - T: Stream, - U: Stream, -{ - type Item = Either; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let me = self.project(); - let a_first = *me.a_first; - - // Toggle the flag - *me.a_first = !a_first; - - poll_next(me.a, me.b, cx, a_first) - } - - fn size_hint(&self) -> (usize, Option) { - let (a_lower, a_upper) = self.a.size_hint(); - let (b_lower, b_upper) = self.b.size_hint(); - - let upper = match (a_upper, b_upper) { - (Some(a_upper), Some(b_upper)) => Some(a_upper + b_upper), - _ => None, - }; - - (a_lower + b_lower, upper) - } -} - -impl SendableStream for Merge -where - T: Stream + Send + Sync + 'static, - U: Stream + Send + Sync + 'static, -{ -} - -/// poll the next item in the merged stream -fn poll_next( - first: Pin<&mut T>, - second: Pin<&mut U>, - cx: &mut Context<'_>, - order: bool, -) -> Poll>> -where - T: Stream, - U: Stream, -{ - let mut done = true; - - // there's definitely a better way to do this - if order { - match first.poll_next(cx) { - Ready(Some(val)) => return Ready(Some(Either::Left(val))), - Ready(None) => {} - Pending => done = false, - } - - match second.poll_next(cx) { - Ready(Some(val)) => return Ready(Some(Either::Right(val))), - Ready(None) => {} - Pending => done = false, - } - } else { - match second.poll_next(cx) { - Ready(Some(val)) => return Ready(Some(Either::Right(val))), - Ready(None) => {} - Pending => done = false, - } - - match first.poll_next(cx) { - Ready(Some(val)) => return Ready(Some(Either::Left(val))), - Ready(None) => {} - Pending => done = false, - } - } - - if done { - Ready(None) - } else { - Pending - } -} - -/// gotta make the futures sync -pub type BoxSyncFuture<'a, T> = Pin + Send + Sync + 'a>>; - -/// may be treated as a stream -#[pin_project(project = ProjectedStreamableThing)] -pub struct GeneratedStream { - // todo maybe type wrapper is in order - /// Stream generator. - generator: Arc Option> + Sync + Send>, - /// Optional in-progress future. - in_progress_fut: Option>, -} - -impl GeneratedStream { - /// create a generator - pub fn new( - generator: Arc Option> + Sync + Send>, - ) -> Self { - GeneratedStream { - generator, - in_progress_fut: None, - } - } -} - -impl Stream for GeneratedStream { - type Item = O; - - fn poll_next( - self: std::pin::Pin<&mut Self>, - cx: &mut std::task::Context<'_>, - ) -> std::task::Poll> { - let projection = self.project(); - match projection.in_progress_fut { - Some(fut) => { - // NOTE: this is entirely safe. - // We will ONLY poll if we've been awakened. - // otherwise, we won't poll. - match fut.as_mut().poll(cx) { - Ready(val) => { - *projection.in_progress_fut = None; - Poll::Ready(Some(val)) - } - Pending => Poll::Pending, - } - } - None => { - let wrapped_fut = (*projection.generator)(); - let Some(mut fut) = wrapped_fut else { - return Poll::Ready(None); - }; - match fut.as_mut().poll(cx) { - Ready(val) => { - *projection.in_progress_fut = None; - Poll::Ready(Some(val)) - } - Pending => { - *projection.in_progress_fut = Some(fut); - Poll::Pending - } - } - } - } - } -} - -/// yoinked from futures crate -pub fn assert_future(future: F) -> F -where - F: Future, -{ - future -} - -/// yoinked from futures crate, adds sync bound that we need -pub fn boxed_sync<'a, F>(fut: F) -> BoxSyncFuture<'a, F::Output> -where - F: Future + Sized + Send + Sync + 'a, -{ - assert_future::(Box::pin(fut)) -} - -impl SendableStream for GeneratedStream {} - -#[cfg(test)] -pub mod test { - use crate::{boxed_sync, Arc, GeneratedStream, StreamExt}; - - #[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) - )] - #[cfg_attr(async_executor_impl = "async-std", async_std::test)] - async fn test_stream_basic() { - let mut stream = GeneratedStream:: { - generator: Arc::new(move || { - let closure = async move { 5 }; - Some(boxed_sync(closure)) - }), - in_progress_fut: None, - }; - assert!(stream.next().await == Some(5)); - assert!(stream.next().await == Some(5)); - assert!(stream.next().await == Some(5)); - assert!(stream.next().await == Some(5)); - } - - #[cfg(test)] - #[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) - )] - #[cfg_attr(async_executor_impl = "async-std", async_std::test)] - async fn test_stream_fancy() { - use async_compatibility_layer::art::async_sleep; - use std::{sync::atomic::Ordering, time::Duration}; - - let value = Arc::::default(); - let mut stream = GeneratedStream:: { - generator: Arc::new(move || { - let value = value.clone(); - let closure = async move { - let actual_value = value.load(Ordering::Relaxed); - value.store(actual_value + 1, Ordering::Relaxed); - async_sleep(Duration::new(0, 500)).await; - u32::from(actual_value) - }; - Some(boxed_sync(closure)) - }), - in_progress_fut: None, - }; - assert!(stream.next().await == Some(0)); - assert!(stream.next().await == Some(1)); - assert!(stream.next().await == Some(2)); - assert!(stream.next().await == Some(3)); - } -} diff --git a/crates/task/src/task.rs b/crates/task/src/task.rs deleted file mode 100644 index 8435ff0fcf..0000000000 --- a/crates/task/src/task.rs +++ /dev/null @@ -1,637 +0,0 @@ -use std::{ - fmt::{Debug, Formatter}, - ops::Deref, - pin::Pin, - task::{Context, Poll}, -}; - -use async_compatibility_layer::art::async_yield_now; -use either::Either::{self, Left, Right}; -use futures::{future::BoxFuture, stream::Fuse, Future, FutureExt, Stream, StreamExt}; -use pin_project::pin_project; -use std::sync::Arc; - -use crate::{ - event_stream::{EventStream, SendableStream, StreamId}, - global_registry::{GlobalRegistry, HotShotTaskId, ShutdownFn}, - task_impls::TaskBuilder, - task_state::{TaskState, TaskStatus}, -}; - -/// restrictions on types we wish to pass around. -/// Includes messages and events -pub trait PassType: Clone + Debug + Sync + Send + 'static {} - -/// the task state -pub trait TS: Sync + Send + 'static {} - -/// a task error that has nice qualities -#[allow(clippy::module_name_repetitions)] -pub trait TaskErr: std::error::Error + Sync + Send + 'static {} - -impl TaskErr for T {} - -/// group of types needed for a hotshot task -pub trait HotShotTaskTypes: 'static { - /// the event type from the event stream - type Event: PassType; - /// the state of the task - type State: TS; - /// the global event stream - type EventStream: EventStream; - /// the message stream to receive - type Message: PassType; - /// the steam of messages from other tasks - type MessageStream: SendableStream; - /// the error to return - type Error: TaskErr + 'static + ?Sized; - - /// build a task - /// NOTE: done here and not on `TaskBuilder` because - /// we want specific checks done on each variant - /// NOTE: all generics implement `Sized`, but this bound is - /// NOT applied to `Self` unless we specify - fn build(builder: TaskBuilder) -> HST - where - Self: Sized; -} - -/// hot shot task -#[pin_project(project = ProjectedHST)] -#[allow(clippy::type_complexity)] -pub struct HST { - /// Optional ID of the stream. - pub(crate) stream_id: Option, - /// the eventual return value, post-cleanup - r_val: Option, - /// if we have a future for tracking shutdown progress - in_progress_shutdown_fut: Option>, - /// the in progress future - in_progress_fut: Option, HSTT::State)>>, - /// name of task - name: String, - /// state of the task - /// TODO make this boxed. We don't want to assume this is a small future. - /// since it concievably may be stored on the stack - #[pin] - status: TaskState, - /// functions performing cleanup - /// one should shut down the task - /// if we're tracking with a global registry - /// the other should unsubscribe from the stream - shutdown_fns: Vec, - /// shared stream - event_stream: MaybePinnedEventStream, - /// stream of messages - message_stream: Option>>>, - /// state - state: Option, - /// handler for events - handle_event: Option>, - /// handler for messages - handle_message: Option>, - /// task id - pub(crate) tid: Option, -} - -/// an option of a pinned boxed fused event stream -pub type MaybePinnedEventStream = - Option::EventStream as EventStream>::StreamType>>>>; - -/// ADT for wrapping all possible handler types -#[allow(dead_code)] -pub(crate) enum HotShotTaskHandler { - /// handle an event - HandleEvent(HandleEvent), - /// handle a message - HandleMessage(HandleMessage), - /// filter an event - FilterEvent(FilterEvent), - /// deregister with the registry - Shutdown(ShutdownFn), -} - -/// Type wrapper for handling an event -#[allow(clippy::type_complexity)] -pub struct HandleEvent( - pub Arc< - dyn Fn( - HSTT::Event, - HSTT::State, - ) -> BoxFuture<'static, (Option, HSTT::State)> - + Sync - + Send, - >, -); - -impl Default for HandleEvent { - fn default() -> Self { - Self(Arc::new(|_event, state| { - async move { (None, state) }.boxed() - })) - } -} - -impl Deref for HandleEvent { - type Target = dyn Fn( - HSTT::Event, - HSTT::State, - ) -> BoxFuture<'static, (Option, HSTT::State)>; - - fn deref(&self) -> &Self::Target { - &*self.0 - } -} - -/// Type wrapper for handling a message -#[allow(clippy::type_complexity)] -pub struct HandleMessage( - pub Arc< - dyn Fn( - HSTT::Message, - HSTT::State, - ) -> BoxFuture<'static, (Option, HSTT::State)> - + Sync - + Send, - >, -); -impl Deref for HandleMessage { - type Target = dyn Fn( - HSTT::Message, - HSTT::State, - ) -> BoxFuture<'static, (Option, HSTT::State)>; - - fn deref(&self) -> &Self::Target { - &*self.0 - } -} - -/// Return `true` if the event should be filtered -#[derive(Clone)] -pub struct FilterEvent(pub Arc bool + Send + 'static + Sync>); - -impl Default for FilterEvent { - fn default() -> Self { - Self(Arc::new(|_| true)) - } -} - -impl Deref for FilterEvent { - type Target = dyn Fn(&EVENT) -> bool + Send + 'static + Sync; - - fn deref(&self) -> &Self::Target { - &*self.0 - } -} - -impl HST { - /// Do a consistency check on the `HST` construction - pub(crate) fn base_check(&self) { - assert!(!self.shutdown_fns.is_empty(), "No shutdown functions"); - assert!( - self.in_progress_fut.is_none(), - "This future has already been polled" - ); - - assert!(self.state.is_some(), "Didn't register state"); - - assert!(self.tid.is_some(), "Didn't register global registry"); - } - - /// perform event sanity checks - pub(crate) fn event_check(&self) { - assert!( - self.shutdown_fns.len() == 2, - "Expected 2 shutdown functions" - ); - assert!(self.event_stream.is_some(), "Didn't register event stream"); - assert!(self.handle_event.is_some(), "Didn't register event handler"); - } - - /// perform message sanity checks - pub(crate) fn message_check(&self) { - assert!( - self.handle_message.is_some(), - "Didn't register message handler" - ); - assert!( - self.message_stream.is_some(), - "Didn't register message stream" - ); - } - - /// register a handler with the task - #[must_use] - pub(crate) fn register_handler(self, handler: HotShotTaskHandler) -> Self { - match handler { - HotShotTaskHandler::HandleEvent(handler) => Self { - handle_event: Some(handler), - ..self - }, - HotShotTaskHandler::HandleMessage(handler) => Self { - handle_message: Some(handler), - ..self - }, - HotShotTaskHandler::FilterEvent(_handler) => unimplemented!(), - HotShotTaskHandler::Shutdown(_handler) => unimplemented!(), - } - } - - /// register an event stream with the task - pub(crate) async fn register_event_stream( - self, - event_stream: HSTT::EventStream, - filter: FilterEvent, - ) -> Self { - let (stream, uid) = event_stream.subscribe(filter).await; - - let mut shutdown_fns = self.shutdown_fns; - { - let event_stream = event_stream.clone(); - shutdown_fns.push(ShutdownFn(Arc::new(move || -> BoxFuture<'static, ()> { - let event_stream = event_stream.clone(); - async move { - event_stream.clone().unsubscribe(uid).await; - } - .boxed() - }))); - } - // TODO perhaps GC the event stream - // (unsunscribe) - Self { - event_stream: Some(Box::pin(stream.fuse())), - shutdown_fns, - stream_id: Some(uid), - ..self - } - } - - /// register a message with the task - #[must_use] - pub(crate) fn register_message_stream(self, stream: HSTT::MessageStream) -> Self { - Self { - message_stream: Some(Box::pin(stream.fuse())), - ..self - } - } - - /// register state with the task - #[must_use] - pub(crate) fn register_state(self, state: HSTT::State) -> Self { - Self { - state: Some(state), - ..self - } - } - - /// register with the registry - pub(crate) async fn register_registry(self, registry: &mut GlobalRegistry) -> Self { - let (shutdown_fn, id) = registry.register(&self.name, self.status.clone()).await; - let mut shutdown_fns = self.shutdown_fns; - shutdown_fns.push(shutdown_fn); - Self { - shutdown_fns, - tid: Some(id), - ..self - } - } - - /// create a new task - pub(crate) fn new(name: String) -> Self { - Self { - stream_id: None, - r_val: None, - name, - status: TaskState::new(), - event_stream: None, - state: None, - handle_event: None, - handle_message: None, - shutdown_fns: vec![], - message_stream: None, - in_progress_fut: None, - in_progress_shutdown_fut: None, - tid: None, - } - } - - /// launch the task - /// NOTE: the only way to get a `HST` is by usage - /// of one of the impls. Those all have checks enabled. - /// So, it should be safe to launch. - pub fn launch(self) -> BoxFuture<'static, HotShotTaskCompleted> { - Box::pin(self) - } -} - -/// enum describing how the tasks completed -pub enum HotShotTaskCompleted { - /// the task shut down successfully - ShutDown, - /// the task encountered an error - Error(Box), - /// the streams the task was listening for died - StreamsDied, - /// we somehow lost the state - /// this is definitely a bug. - LostState, - /// lost the return value somehow - LostReturnValue, - /// Stream exists but missing handler - MissingHandler, -} - -impl std::fmt::Debug for HotShotTaskCompleted { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - match self { - HotShotTaskCompleted::ShutDown => f.write_str("HotShotTaskCompleted::ShutDown"), - HotShotTaskCompleted::Error(_) => f.write_str("HotShotTaskCompleted::Error"), - HotShotTaskCompleted::StreamsDied => f.write_str("HotShotTaskCompleted::StreamsDied"), - HotShotTaskCompleted::LostState => f.write_str("HotShotTaskCompleted::LostState"), - HotShotTaskCompleted::LostReturnValue => { - f.write_str("HotShotTaskCompleted::LostReturnValue") - } - HotShotTaskCompleted::MissingHandler => { - f.write_str("HotShotTaskCompleted::MissingHandler") - } - } - } -} - -impl PartialEq for HotShotTaskCompleted { - fn eq(&self, other: &Self) -> bool { - match (self, other) { - (Self::Error(_l0), Self::Error(_r0)) => false, - _ => core::mem::discriminant(self) == core::mem::discriminant(other), - } - } -} - -impl<'pin, HSTT: HotShotTaskTypes> ProjectedHST<'pin, HSTT> { - /// launches the shutdown future - fn launch_shutdown_fut(&mut self, cx: &mut Context<'_>) -> Poll { - let fut = self.create_shutdown_fut(); - self.check_ip_shutdown_fut(fut, cx) - } - - /// checks the in progress shutdown future, `fut` - fn check_ip_shutdown_fut( - &mut self, - mut fut: Pin + Send>>, - cx: &mut Context<'_>, - ) -> Poll { - match fut.as_mut().poll(cx) { - Poll::Ready(()) => Poll::Ready( - self.r_val - .take() - .unwrap_or_else(|| HotShotTaskCompleted::LostReturnValue), - ), - Poll::Pending => { - *self.in_progress_shutdown_fut = Some(fut); - Poll::Pending - } - } - } - - /// creates the shutdown future and returns it - fn create_shutdown_fut(&mut self) -> Pin + Send>> { - let shutdown_fns = self.shutdown_fns.clone(); - let fut = async move { - for shutdown_fn in shutdown_fns { - shutdown_fn().await; - } - } - .boxed(); - fut - } - - /// check the event stream - /// returns either a poll if there's a future IP - /// or a bool stating whether or not the stream is finished - fn check_event_stream( - &mut self, - cx: &mut Context<'_>, - ) -> Either, bool> { - let event_stream = self.event_stream.take(); - if let Some(mut inner_event_stream) = event_stream { - while let Poll::Ready(maybe_event) = inner_event_stream.as_mut().poll_next(cx) { - if let Some(event) = maybe_event { - if let Some(handle_event) = self.handle_event { - let maybe_state = self.state.take(); - if let Some(state) = maybe_state { - let mut fut = handle_event(event, state); - match fut.as_mut().poll(cx) { - Poll::Ready((result, state)) => { - if let Some(completed) = result { - *self.in_progress_fut = None; - *self.state = Some(state); - *self.r_val = Some(completed); - let result = self.launch_shutdown_fut(cx); - *self.event_stream = Some(inner_event_stream); - return Left(result); - } - // run a yield to tell the executor to go do work on other - // tasks if they are available - // this is necessary otherwise we could end up with one - // task that returns really quickly blocking the executor - // from dealing with other tasks. - let mut fut = async move { - async_yield_now().await; - (None, state) - } - .boxed(); - // if the executor has no extra work to do, - // continue to poll the event stream - if let Poll::Ready((_, state)) = fut.as_mut().poll(cx) { - *self.state = Some(state); - *self.in_progress_fut = None; - // NOTE: don't need to set event stream because - // that will be done on the next iteration - continue; - } - // otherwise, return pending and finish executing the - // yield later - *self.event_stream = Some(inner_event_stream); - *self.in_progress_fut = Some(fut); - return Left(Poll::Pending); - } - Poll::Pending => { - *self.in_progress_fut = Some(fut); - *self.event_stream = Some(inner_event_stream); - return Left(Poll::Pending); - } - } - } - // lost state case - *self.r_val = Some(HotShotTaskCompleted::LostState); - let result = self.launch_shutdown_fut(cx); - *self.event_stream = Some(inner_event_stream); - return Left(result); - } - // no handler case - *self.r_val = Some(HotShotTaskCompleted::MissingHandler); - let result = self.launch_shutdown_fut(cx); - *self.event_stream = Some(inner_event_stream); - return Left(result); - } - // this is a fused future so `None` will come every time after the stream - // finishes - *self.event_stream = Some(inner_event_stream); - return Right(true); - } - *self.event_stream = Some(inner_event_stream); - return Right(false); - } - // stream doesn't exist so trivially true - *self.event_stream = event_stream; - Right(true) - } - - /// check the message stream - /// returns either a poll if there's a future IP - /// or a bool stating whether or not the stream is finished - fn check_message_stream( - &mut self, - cx: &mut Context<'_>, - ) -> Either, bool> { - let message_stream = self.message_stream.take(); - if let Some(mut inner_message_stream) = message_stream { - while let Poll::Ready(maybe_msg) = inner_message_stream.as_mut().poll_next(cx) { - if let Some(msg) = maybe_msg { - if let Some(handle_msg) = self.handle_message { - let maybe_state = self.state.take(); - if let Some(state) = maybe_state { - let mut fut = handle_msg(msg, state); - match fut.as_mut().poll(cx) { - Poll::Ready((result, state)) => { - if let Some(completed) = result { - *self.in_progress_fut = None; - *self.state = Some(state); - *self.r_val = Some(completed); - let result = self.launch_shutdown_fut(cx); - *self.message_stream = Some(inner_message_stream); - return Left(result); - } - // run a yield to tell the executor to go do work on other - // tasks if they are available - // this is necessary otherwise we could end up with one - // task that returns really quickly blocking the executor - // from dealing with other tasks. - let mut fut = async move { - async_yield_now().await; - (None, state) - } - .boxed(); - // if the executor has no extra work to do, - // continue to poll the event stream - if let Poll::Ready((_, state)) = fut.as_mut().poll(cx) { - *self.state = Some(state); - *self.in_progress_fut = None; - // NOTE: don't need to set event stream because - // that will be done on the next iteration - continue; - } - // otherwise, return pending and finish executing the - // yield later - *self.message_stream = Some(inner_message_stream); - *self.in_progress_fut = Some(fut); - return Left(Poll::Pending); - } - Poll::Pending => { - *self.in_progress_fut = Some(fut); - *self.message_stream = Some(inner_message_stream); - return Left(Poll::Pending); - } - } - } - // lost state case - *self.r_val = Some(HotShotTaskCompleted::LostState); - let result = self.launch_shutdown_fut(cx); - *self.message_stream = Some(inner_message_stream); - return Left(result); - } - // no handler case - *self.r_val = Some(HotShotTaskCompleted::MissingHandler); - let result = self.launch_shutdown_fut(cx); - *self.message_stream = Some(inner_message_stream); - return Left(result); - } - // this is a fused future so `None` will come every time after the stream - // finishes - *self.message_stream = Some(inner_message_stream); - return Right(true); - } - *self.message_stream = Some(inner_message_stream); - return Right(false); - } - // stream doesn't exist so trivially true - *self.message_stream = message_stream; - Right(true) - } -} - -// NOTE: this is a Future, but it could easily be a stream. -// but these are semantically equivalent because instead of -// returning when paused, we just return `Poll::Pending` -impl Future for HST { - type Output = HotShotTaskCompleted; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let mut projected = self.as_mut().project(); - - if let Some(fut) = projected.in_progress_shutdown_fut.take() { - return projected.check_ip_shutdown_fut(fut, cx); - } - - // check if task is complete - if let Some(state_change) = projected.status.as_mut().try_next() { - match state_change { - TaskStatus::NotStarted | TaskStatus::Paused => { - return Poll::Pending; - } - TaskStatus::Running => {} - TaskStatus::Completed => { - *projected.r_val = Some(HotShotTaskCompleted::ShutDown); - return projected.launch_shutdown_fut(cx); - } - } - } - - // check if there's an in progress future - if let Some(in_progress_fut) = projected.in_progress_fut { - match in_progress_fut.as_mut().poll(cx) { - Poll::Ready((result, state)) => { - *projected.in_progress_fut = None; - *projected.state = Some(state); - // if the future errored out, return it, we're done - if let Some(completed) = result { - *projected.r_val = Some(completed); - return projected.launch_shutdown_fut(cx); - } - } - Poll::Pending => { - return Poll::Pending; - } - } - } - - let event_stream_finished = match projected.check_event_stream(cx) { - Left(result) => return result, - Right(finished) => finished, - }; - - let message_stream_finished = match projected.check_message_stream(cx) { - Left(result) => return result, - Right(finished) => finished, - }; - - if message_stream_finished && event_stream_finished { - tracing::error!("Message and event stream both finished!"); - *projected.r_val = Some(HotShotTaskCompleted::StreamsDied); - let result = projected.launch_shutdown_fut(cx); - return result; - } - - Poll::Pending - } -} diff --git a/crates/task/src/task_impls.rs b/crates/task/src/task_impls.rs deleted file mode 100644 index 768e011775..0000000000 --- a/crates/task/src/task_impls.rs +++ /dev/null @@ -1,457 +0,0 @@ -use futures::Stream; -use std::marker::PhantomData; - -use crate::{ - event_stream::{DummyStream, EventStream, SendableStream, StreamId}, - global_registry::{GlobalRegistry, HotShotTaskId}, - task::{ - FilterEvent, HandleEvent, HandleMessage, HotShotTaskHandler, HotShotTaskTypes, PassType, - TaskErr, HST, TS, - }, -}; - -/// trait to specify features -pub trait ImplMessageStream {} - -/// trait to specify features -pub trait ImplEventStream {} - -/// builder for task -pub struct TaskBuilder(HST); - -impl TaskBuilder { - /// register an event handler - #[must_use] - pub fn register_event_handler(self, handler: HandleEvent) -> Self - where - HSTT: ImplEventStream, - { - Self( - self.0 - .register_handler(HotShotTaskHandler::HandleEvent(handler)), - ) - } - - /// obtains stream id if it exists - pub fn get_stream_id(&self) -> Option { - self.0.stream_id - } - - /// register a message handler - #[must_use] - pub fn register_message_handler(self, handler: HandleMessage) -> Self - where - HSTT: ImplMessageStream, - { - Self( - self.0 - .register_handler(HotShotTaskHandler::HandleMessage(handler)), - ) - } - - /// register a message stream - #[must_use] - pub fn register_message_stream(self, stream: HSTT::MessageStream) -> Self - where - HSTT: ImplMessageStream, - { - Self(self.0.register_message_stream(stream)) - } - - /// register an event stream - pub async fn register_event_stream( - self, - stream: HSTT::EventStream, - filter: FilterEvent, - ) -> Self - where - HSTT: ImplEventStream, - { - Self(self.0.register_event_stream(stream, filter).await) - } - - /// register the state - #[must_use] - pub fn register_state(self, state: HSTT::State) -> Self { - Self(self.0.register_state(state)) - } - - /// register with the global registry - pub async fn register_registry(self, registry: &mut GlobalRegistry) -> Self { - Self(self.0.register_registry(registry).await) - } - - /// get the task id in the global registry - pub fn get_task_id(&self) -> Option { - self.0.tid - } - - /// create a new task builder - #[must_use] - pub fn new(name: String) -> Self { - Self(HST::new(name)) - } -} - -/// a hotshot task with an event stream -pub struct HSTWithEvent< - ERR: std::error::Error, - EVENT: PassType, - ESTREAM: EventStream, - STATE: TS, -> { - /// phantom data - _pd: PhantomData<(ERR, EVENT, ESTREAM, STATE)>, -} - -impl< - ERR: std::error::Error, - EVENT: PassType, - ESTREAM: EventStream, - STATE: TS, - > ImplEventStream for HSTWithEvent -{ -} - -impl, STATE: TS> - ImplMessageStream for HSTWithMessage -{ -} - -impl, STATE: TS> - HotShotTaskTypes for HSTWithEvent -{ - type Event = EVENT; - type State = STATE; - type EventStream = ESTREAM; - type Message = (); - type MessageStream = DummyStream; - type Error = ERR; - - fn build(builder: TaskBuilder) -> HST - where - Self: Sized, - { - builder.0.base_check(); - builder.0.event_check(); - builder.0 - } -} - -/// a hotshot task with a message -pub struct HSTWithMessage< - ERR: std::error::Error, - MSG: PassType, - MSTREAM: Stream, - STATE: TS, -> { - /// phantom data - _pd: PhantomData<(ERR, MSG, MSTREAM, STATE)>, -} - -impl, STATE: TS> HotShotTaskTypes - for HSTWithMessage -{ - type Event = (); - type State = STATE; - type EventStream = DummyStream; - type Message = MSG; - type MessageStream = MSTREAM; - type Error = ERR; - - fn build(builder: TaskBuilder) -> HST - where - Self: Sized, - { - builder.0.base_check(); - builder.0.message_check(); - builder.0 - } -} - -/// hotshot task with even and message -pub struct HSTWithEventAndMessage< - ERR: std::error::Error, - EVENT: PassType, - ESTREAM: EventStream, - MSG: PassType, - MSTREAM: Stream, - STATE: TS, -> { - /// phantom data - _pd: PhantomData<(ERR, EVENT, ESTREAM, MSG, MSTREAM, STATE)>, -} - -impl< - ERR: std::error::Error, - EVENT: PassType, - ESTREAM: EventStream, - MSG: PassType, - MSTREAM: Stream, - STATE: TS, - > ImplEventStream for HSTWithEventAndMessage -{ -} - -impl< - ERR: std::error::Error, - EVENT: PassType, - ESTREAM: EventStream, - MSG: PassType, - MSTREAM: Stream, - STATE: TS, - > ImplMessageStream for HSTWithEventAndMessage -{ -} - -impl< - ERR: TaskErr, - EVENT: PassType, - ESTREAM: EventStream, - MSG: PassType, - MSTREAM: SendableStream, - STATE: TS, - > HotShotTaskTypes for HSTWithEventAndMessage -{ - type Event = EVENT; - type State = STATE; - type EventStream = ESTREAM; - type Message = MSG; - type MessageStream = MSTREAM; - type Error = ERR; - - fn build(builder: TaskBuilder) -> HST - where - Self: Sized, - { - builder.0.base_check(); - builder.0.message_check(); - builder.0.event_check(); - builder.0 - } -} - -#[cfg(test)] -pub mod test { - use async_compatibility_layer::channel::{unbounded, UnboundedStream}; - use snafu::Snafu; - - use crate::{event_stream, event_stream::ChannelStream, task::TS}; - - use super::{HSTWithEvent, HSTWithEventAndMessage, HSTWithMessage}; - use crate::{event_stream::EventStream, task::HotShotTaskTypes, task_impls::TaskBuilder}; - use async_compatibility_layer::art::async_spawn; - use futures::FutureExt; - use std::sync::Arc; - - use crate::{ - global_registry::GlobalRegistry, - task::{FilterEvent, HandleEvent, HandleMessage, HotShotTaskCompleted}, - }; - use async_compatibility_layer::logging::setup_logging; - - #[derive(Snafu, Debug)] - pub struct Error {} - - #[derive(Clone, Debug, Eq, PartialEq, Hash)] - pub struct State {} - - #[derive(Clone, Debug, Eq, PartialEq, Hash, Default)] - pub struct CounterState { - num_events_recved: u64, - } - - #[derive(Clone, Debug, Eq, PartialEq, Hash)] - pub enum Event { - Finished, - Dummy, - } - - impl TS for State {} - - impl TS for CounterState {} - - #[derive(Clone, Debug, PartialEq, Eq, Hash)] - pub enum Message { - Finished, - Dummy, - } - - // TODO fill in generics for stream - - pub type AppliedHSTWithEvent = HSTWithEvent, State>; - pub type AppliedHSTWithEventCounterState = - HSTWithEvent, CounterState>; - pub type AppliedHSTWithMessage = - HSTWithMessage, State>; - pub type AppliedHSTWithEventMessage = HSTWithEventAndMessage< - Error, - Event, - ChannelStream, - Message, - UnboundedStream, - State, - >; - - #[cfg(test)] - #[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) - )] - #[cfg_attr(async_executor_impl = "async-std", async_std::test)] - #[allow(clippy::should_panic_without_expect)] - #[should_panic] - async fn test_init_with_event_stream() { - setup_logging(); - let task = TaskBuilder::::new("Test Task".to_string()); - AppliedHSTWithEvent::build(task).launch().await; - } - - // TODO this should be moved to async-compatibility-layer - #[cfg(test)] - #[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) - )] - #[cfg_attr(async_executor_impl = "async-std", async_std::test)] - async fn test_channel_stream() { - use futures::StreamExt; - let (s, r) = unbounded(); - let mut stream: UnboundedStream = r.into_stream(); - s.send(Message::Dummy).await.unwrap(); - s.send(Message::Finished).await.unwrap(); - assert!(stream.next().await.unwrap() == Message::Dummy); - assert!(stream.next().await.unwrap() == Message::Finished); - } - - #[cfg(test)] - #[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) - )] - #[cfg_attr(async_executor_impl = "async-std", async_std::test)] - async fn test_task_with_event_stream() { - setup_logging(); - let event_stream: event_stream::ChannelStream = event_stream::ChannelStream::new(); - let mut registry = GlobalRegistry::new(); - - let mut task_runner = crate::task_launcher::TaskRunner::default(); - - for i in 0..10000 { - let state = CounterState::default(); - let event_handler = HandleEvent(Arc::new(move |event, mut state: CounterState| { - async move { - if let Event::Dummy = event { - state.num_events_recved += 1; - } - - if state.num_events_recved == 100 { - (Some(HotShotTaskCompleted::ShutDown), state) - } else { - (None, state) - } - } - .boxed() - })); - let name = format!("Test Task {i:?}").to_string(); - let built_task = TaskBuilder::::new(name.clone()) - .register_event_stream(event_stream.clone(), FilterEvent::default()) - .await - .register_registry(&mut registry) - .await - .register_state(state) - .register_event_handler(event_handler); - let id = built_task.get_task_id().unwrap(); - let result = AppliedHSTWithEventCounterState::build(built_task).launch(); - task_runner = task_runner.add_task(id, name, result); - } - - async_spawn(async move { - for _ in 0..100 { - event_stream.publish(Event::Dummy).await; - } - }); - - let results = task_runner.launch().await; - for result in results { - assert!(result.1 == HotShotTaskCompleted::ShutDown); - } - } - - #[cfg(test)] - #[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) - )] - #[cfg_attr(async_executor_impl = "async-std", async_std::test)] - async fn test_task_with_event_stream_xtreme() { - setup_logging(); - let event_stream: event_stream::ChannelStream = event_stream::ChannelStream::new(); - - let state = State {}; - - let mut registry = GlobalRegistry::new(); - - let event_handler = HandleEvent(Arc::new(move |event, state| { - async move { - if let Event::Finished = event { - (Some(HotShotTaskCompleted::ShutDown), state) - } else { - (None, state) - } - } - .boxed() - })); - - let built_task = TaskBuilder::::new("Test Task".to_string()) - .register_event_stream(event_stream.clone(), FilterEvent::default()) - .await - .register_registry(&mut registry) - .await - .register_state(state) - .register_event_handler(event_handler); - event_stream.publish(Event::Dummy).await; - event_stream.publish(Event::Dummy).await; - event_stream.publish(Event::Finished).await; - AppliedHSTWithEvent::build(built_task).launch().await; - } - - #[cfg(test)] - #[cfg_attr( - async_executor_impl = "tokio", - tokio::test(flavor = "multi_thread", worker_threads = 2) - )] - #[cfg_attr(async_executor_impl = "async-std", async_std::test)] - async fn test_task_with_message_stream() { - setup_logging(); - let state = State {}; - - let mut registry = GlobalRegistry::new(); - - let (s, r) = async_compatibility_layer::channel::unbounded(); - - let message_handler = HandleMessage(Arc::new(move |message, state| { - async move { - if let Message::Finished = message { - (Some(HotShotTaskCompleted::ShutDown), state) - } else { - (None, state) - } - } - .boxed() - })); - - let built_task = TaskBuilder::::new("Test Task".to_string()) - .register_message_handler(message_handler) - .register_message_stream(r.into_stream()) - .register_registry(&mut registry) - .await - .register_state(state); - async_spawn(async move { - s.send(Message::Dummy).await.unwrap(); - s.send(Message::Finished).await.unwrap(); - }); - let result = AppliedHSTWithMessage::build(built_task).launch().await; - assert!(result == HotShotTaskCompleted::ShutDown); - } -} diff --git a/crates/task/src/task_launcher.rs b/crates/task/src/task_launcher.rs deleted file mode 100644 index deff065af2..0000000000 --- a/crates/task/src/task_launcher.rs +++ /dev/null @@ -1,68 +0,0 @@ -use futures::future::{join_all, BoxFuture}; - -use crate::{ - global_registry::{GlobalRegistry, HotShotTaskId}, - task::HotShotTaskCompleted, -}; - -// TODO use genericarray + typenum to make this use the number of tasks as a parameter -/// runner for tasks -/// `N` specifies the number of tasks to ensure that the user -/// doesn't forget how many tasks they wished to add. -pub struct TaskRunner -// < -// const N: usize, -// > -{ - /// internal set of tasks to launch - tasks: Vec<( - HotShotTaskId, - String, - BoxFuture<'static, HotShotTaskCompleted>, - )>, - /// global registry - pub registry: GlobalRegistry, -} - -impl Default for TaskRunner { - fn default() -> Self { - Self::new() - } -} - -impl TaskRunner /* */ { - /// create new runner - #[must_use] - pub fn new() -> Self { - Self { - tasks: Vec::new(), - registry: GlobalRegistry::new(), - } - } - - // `name` is for logging purposes only and may be duplicated or inconsistent. - /// to support builder pattern - #[must_use] - pub fn add_task( - mut self, - id: HotShotTaskId, - name: String, - task: BoxFuture<'static, HotShotTaskCompleted>, - ) -> TaskRunner { - self.tasks.push((id, name, task)); - self - } - - /// returns a `Vec` because type isn't known - #[must_use] - pub async fn launch(self) -> Vec<(String, HotShotTaskCompleted)> { - let names = self - .tasks - .iter() - .map(|(_id, name, _)| name.clone()) - .collect::>(); - let result = join_all(self.tasks.into_iter().map(|(_, _, task)| task)).await; - - names.into_iter().zip(result).collect::>() - } -} diff --git a/crates/task/src/task_state.rs b/crates/task/src/task_state.rs deleted file mode 100644 index 01758965a1..0000000000 --- a/crates/task/src/task_state.rs +++ /dev/null @@ -1,182 +0,0 @@ -use atomic_enum::atomic_enum; -use serde::{Deserialize, Serialize}; -use std::{ - fmt::Debug, - sync::{atomic::Ordering, Arc}, -}; - -/// Nit: wish this was for u8 but sadly no -/// Represents the status of a hotshot task -#[atomic_enum] -#[derive(Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] -pub enum TaskStatus { - /// the task hasn't started running - NotStarted = 0, - /// the task is running - Running = 1, - /// NOTE: not useful generally, but VERY useful for byzantine nodes - /// and testing malfunctions - /// we'll have a granular way to, from the registry, stop a task momentarily - /// and inspect/modify its state - Paused = 2, - /// the task completed - Completed = 3, -} - -/// The state of a task -/// `AtomicTaskStatus` + book keeping to notify btwn tasks -#[derive(Clone)] -pub struct TaskState { - /// previous status - prev: Arc, - /// next status - next: Arc, - // using `std::sync::mutex` here because it's faster than async's version - // wakers: Arc>>, -} - -impl Debug for TaskState { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("TaskState") - .field("status", &self.get_status()) - .finish() - } -} -impl Default for TaskState { - fn default() -> Self { - Self::new() - } -} - -impl TaskState { - /// create a new state - #[must_use] - pub fn new() -> Self { - Self { - prev: Arc::new(TaskStatus::NotStarted.into()), - next: Arc::new(TaskStatus::NotStarted.into()), - // wakers: Arc::default(), - } - } - - /// create a task state from a task status - #[must_use] - pub fn from_status(state: Arc) -> Self { - let prev_state = AtomicTaskStatus::new(state.load(Ordering::SeqCst)); - Self { - prev: Arc::new(prev_state), - next: state, - // wakers: Arc::default(), - } - } - - /// sets the state - /// # Panics - /// should never panic unless internally a lock poison happens - /// this should NOT be possible - pub fn set_state(&self, state: TaskStatus) { - self.next.swap(state, Ordering::SeqCst); - // no panics, so can never be poisoned. - // let mut wakers = self.wakers.lock().unwrap(); - - // drain the wakers - // for waker in wakers.drain(..) { - // waker.wake(); - // } - } - /// gets a possibly stale version of the state - #[must_use] - pub fn get_status(&self) -> TaskStatus { - self.next.load(Ordering::SeqCst) - } -} - -// GNARLY bug @jbearer found -// cx gets *really* large in some cases -// impl Stream for TaskState { -// type Item = TaskStatus; -// -// #[unstable] -// fn poll_next( -// self: std::pin::Pin<&mut Self>, -// cx: &mut std::task::Context<'_>, -// ) -> std::task::Poll> { -// let next = self.next.load(Ordering::SeqCst); -// let prev = self.prev.swap(next, Ordering::SeqCst); -// // a new value has been set -// if prev == next { -// // no panics, so impossible to be poisoned -// self.wakers.lock().unwrap().push(cx.waker().clone()); -// -// // no value has been set, poll again later -// std::task::Poll::Pending -// } else { -// std::task::Poll::Ready(Some(next)) -// } -// } -// } - -impl TaskState { - /// Try to get the next task status. - #[must_use] - pub fn try_next(self: std::pin::Pin<&mut Self>) -> Option { - let next = self.next.load(Ordering::SeqCst); - let prev = self.prev.swap(next, Ordering::SeqCst); - // a new value has been set - if prev == next { - None - } else { - // drain the wakers to wake up the stream. - // we did change value - // let mut wakers = self.wakers.lock().unwrap(); - // for waker in wakers.drain(..) { - // waker.wake(); - // } - Some(next) - } - } -} - -#[cfg(test)] -pub mod test { - - // #[cfg(test)] - // #[cfg_attr( - // feature = "tokio-executor", - // tokio::test(flavor = "multi_thread", worker_threads = 2) - // )] - // #[cfg_attr(feature = "async-std-executor", async_std::test)] - // async fn test_state_stream() { - // setup_logging(); - // - // let mut task = crate::task_state::TaskState::new(); - // - // let task_dup = task.clone(); - // - // async_spawn(async move { - // async_sleep(std::time::Duration::from_secs(1)).await; - // task_dup.set_state(crate::task_state::TaskStatus::Running); - // async_sleep(std::time::Duration::from_secs(1)).await; - // task_dup.set_state(crate::task_state::TaskStatus::Paused); - // async_sleep(std::time::Duration::from_secs(1)).await; - // task_dup.set_state(crate::task_state::TaskStatus::Completed); - // }); - // - // // spawn new task that sleeps then increments - // - // assert_eq!( - // task.try_next().unwrap(), - // crate::task_state::TaskStatus::Running - // ); - // assert_eq!( - // task.next().unwrap(), - // crate::task_state::TaskStatus::Paused - // ); - // assert_eq!( - // task.next().unwrap(), - // crate::task_state::TaskStatus::Completed - // ); - // } - // TODO test global registry using either global + lazy_static - // or passing around global registry -} diff --git a/crates/testing/src/completion_task.rs b/crates/testing/src/completion_task.rs index 71c6fb8a31..79178ed72c 100644 --- a/crates/testing/src/completion_task.rs +++ b/crates/testing/src/completion_task.rs @@ -39,7 +39,11 @@ impl> CompletionTask> CompletionTask changes pub node_changes: Vec<(u64, Vec)>, } - -// impl SpinningTaskDescription { -// /// build a task -// /// # Panics -// /// If there is no latest view -// /// or if the node id is over `u32::MAX` -// #[must_use] -// pub fn build>( -// self, -// ) -> TaskGenerator> { -// Box::new(move |mut state, mut registry, test_event_stream| { -// async move { -// let event_handler = -// HandleEvent::>(Arc::new(move |event, state| { -// async move { -// match event { -// GlobalTestEvent::ShutDown => (Some(HotShotTaskCompleted), state), -// } -// } -// .boxed() -// })); - -// let message_handler = HandleMessage::>(Arc::new( -// move |msg, mut state| { -// async move { -// let Event { -// view_number, -// event: _, -// } = msg.1; - -// // if we have not seen this view before -// if state.latest_view.is_none() -// || view_number > state.latest_view.unwrap() -// { -// // perform operations on the nodes -// if let Some(operations) = state.changes.remove(&view_number) { -// for ChangeNode { idx, updown } in operations { -// match updown { -// UpDown::Up => { -// if let Some(node) = state -// .late_start -// .remove(&idx.try_into().unwrap()) -// { -// tracing::error!( -// "Node {} spinning up late", -// idx -// ); -// let handle = node.run_tasks().await; -// handle.hotshot.start_consensus().await; -// } -// } -// UpDown::Down => { -// if let Some(node) = state.handles.get_mut(idx) { -// tracing::error!("Node {} shutting down", idx); -// node.handle.shut_down().await; -// } -// } -// UpDown::NetworkUp => { -// if let Some(handle) = state.handles.get(idx) { -// tracing::error!( -// "Node {} networks resuming", -// idx -// ); -// handle.networks.0.resume(); -// handle.networks.1.resume(); -// } -// } -// UpDown::NetworkDown => { -// if let Some(handle) = state.handles.get(idx) { -// tracing::error!( -// "Node {} networks pausing", -// idx -// ); -// handle.networks.0.pause(); -// handle.networks.1.pause(); -// } -// } -// } -// } -// } - -// // update our latest view -// state.latest_view = Some(view_number); -// } - -// (None, state) -// } -// .boxed() -// }, -// )); - -// let mut streams = vec![]; -// for handle in &mut state.handles { -// let s1 = handle.handle.get_event_stream_known_impl().await; -// streams.push(s1); -// } -// let builder = TaskBuilder::>::new( -// "Test Spinning Task".to_string(), -// ) -// .register_event_stream(test_event_stream, FilterEvent::default()) -// .await -// .register_registry(&mut registry) -// .await -// .register_message_handler(message_handler) -// .register_message_stream(MergeN::new(streams)) -// .register_event_handler(event_handler) -// .register_state(state); -// let task_id = builder.get_task_id().unwrap(); -// (task_id, SpinningTaskTypes::build(builder).launch()) -// } -// .boxed() -// }) -// } -// } - -// /// types for safety task -// pub type SpinningTaskTypes = HSTWithEventAndMessage< -// SpinningTaskErr, -// GlobalTestEvent, -// ChannelStream, -// (usize, Event), -// MergeN>>, -// SpinningTask, -// >; diff --git a/crates/testing/src/test_builder.rs b/crates/testing/src/test_builder.rs index 9bfe6fdb18..9a4b0e99a2 100644 --- a/crates/testing/src/test_builder.rs +++ b/crates/testing/src/test_builder.rs @@ -215,12 +215,8 @@ impl TestMetadata { min_transactions, timing_data, da_committee_size, - - - - + unreliable_network, - .. } = self.clone(); diff --git a/crates/testing/src/test_launcher.rs b/crates/testing/src/test_launcher.rs index 19527a39c7..82a0e1d870 100644 --- a/crates/testing/src/test_launcher.rs +++ b/crates/testing/src/test_launcher.rs @@ -1,15 +1,9 @@ use std::{collections::HashMap, sync::Arc}; - use hotshot::traits::{NodeImplementation, TestableNodeImplementation}; use hotshot_types::{traits::node_implementation::NodeType, HotShotConfig}; - - - -use super::{ - test_builder::TestMetadata, test_runner::TestRunner, -}; +use super::{test_builder::TestMetadata, test_runner::TestRunner}; /// convience type alias for the networks available pub type Networks = ( diff --git a/crates/testing/src/test_runner.rs b/crates/testing/src/test_runner.rs index 823a752f21..83f836332d 100644 --- a/crates/testing/src/test_runner.rs +++ b/crates/testing/src/test_runner.rs @@ -12,7 +12,7 @@ use crate::{ view_sync_task::ViewSyncTask, }; use async_broadcast::broadcast; -use futures::{future::join_all}; +use futures::future::join_all; use hotshot::{types::SystemContextHandle, Memberships}; use hotshot::{traits::TestableNodeImplementation, HotShotInitializer, SystemContext}; @@ -172,7 +172,6 @@ where Task::new(tx.clone(), rx.clone(), reg.clone(), spinning_task_state), event_rxs.clone(), ); - task_futs.push(spinning_task.run()); // add safety task let overall_safety_task_state = OverallSafetyTask { handles: nodes.clone(), @@ -214,13 +213,15 @@ where } } task_futs.push(safety_task.run()); - task_futs.push(view_sync_task.run()); + // task_futs.push(view_sync_task.run()); // if let Some(txn) = txn_task { // task_futs.push(txn.run()); // } task_futs.push(completion_task.run()); + task_futs.push(spinning_task.run()); let results = join_all(task_futs).await; + tracing::error!("test tasks joined"); let mut error_list = vec![]; for result in results { match result.unwrap() { diff --git a/crates/testing/src/timeout_task.rs b/crates/testing/src/timeout_task.rs deleted file mode 100644 index 8b13789179..0000000000 --- a/crates/testing/src/timeout_task.rs +++ /dev/null @@ -1 +0,0 @@ - diff --git a/crates/testing/src/view_sync_task.rs b/crates/testing/src/view_sync_task.rs index 8a1cefb07a..5dbfcb7178 100644 --- a/crates/testing/src/view_sync_task.rs +++ b/crates/testing/src/view_sync_task.rs @@ -112,100 +112,3 @@ pub enum ViewSyncTaskDescription { /// (min, max) number nodes that may hit view sync, inclusive Threshold(usize, usize), } - -// impl ViewSyncTaskDescription { -// /// build a view sync task from its description -// /// # Panics -// /// if there is an violation of the view sync description -// #[must_use] -// pub fn build>( -// self, -// ) -> TaskGenerator> { -// Box::new(move |mut state, mut registry, test_event_stream| { -// async move { -// let event_handler = -// HandleEvent::>(Arc::new(move |event, state| { -// let self_dup = self.clone(); -// async move { -// match event { -// GlobalTestEvent::ShutDown => match self_dup.clone() { -// ViewSyncTaskDescription::Threshold(min, max) => { -// let num_hits = state.hit_view_sync.len(); -// if min <= num_hits && num_hits <= max { -// (Some(HotShotTaskCompleted), state) -// } else { -// ( -// Some(HotShotTaskCompleted::Error(Box::new( -// ViewSyncTaskErr { -// hit_view_sync: state.hit_view_sync.clone(), -// }, -// ))), -// state, -// ) -// } -// } -// }, -// } -// } -// .boxed() -// })); - -// let message_handler = HandleMessage::>(Arc::new( -// // NOTE: could short circuit on entering view sync if we're not supposed to -// // enter view sync. I opted not to do this just to gather more information -// // (since we'll fail the test later anyway) -// move |(id, msg), mut state| { -// async move { -// match msg { -// // all the view sync events -// HotShotEvent::ViewSyncTimeout(_, _, _) -// | HotShotEvent::ViewSyncPreCommitVoteRecv(_) -// | HotShotEvent::ViewSyncCommitVoteRecv(_) -// | HotShotEvent::ViewSyncFinalizeVoteRecv(_) -// | HotShotEvent::ViewSyncPreCommitVoteSend(_) -// | HotShotEvent::ViewSyncCommitVoteSend(_) -// | HotShotEvent::ViewSyncFinalizeVoteSend(_) -// | HotShotEvent::ViewSyncPreCommitCertificate2Recv(_) -// | HotShotEvent::ViewSyncCommitCertificate2Recv(_) -// | HotShotEvent::ViewSyncFinalizeCertificate2Recv(_) -// | HotShotEvent::ViewSyncPreCommitCertificate2Send(_, _) -// | HotShotEvent::ViewSyncCommitCertificate2Send(_, _) -// | HotShotEvent::ViewSyncFinalizeCertificate2Send(_, _) -// | HotShotEvent::ViewSyncTrigger(_) => { -// state.hit_view_sync.insert(id); -// } -// _ => (), -// } -// (None, state) -// } -// .boxed() -// }, -// )); -// let mut streams = vec![]; -// for handle in &mut state.handles { -// let stream = handle -// .handle -// .get_internal_event_stream_known_impl(FilterEvent::default()) -// .await -// .0; -// streams.push(stream); -// } - -// let builder = TaskBuilder::>::new( -// "Test Completion Task".to_string(), -// ) -// .register_event_stream(test_event_stream, FilterEvent::default()) -// .await -// .register_registry(&mut registry) -// .await -// .register_state(state) -// .register_event_handler(event_handler) -// .register_message_handler(message_handler) -// .register_message_stream(MergeN::new(streams)); -// let task_id = builder.get_task_id().unwrap(); -// (task_id, ViewSyncTaskTypes::build(builder).launch()) -// } -// .boxed() -// }) -// } -// } diff --git a/crates/testing/tests/da_task.rs b/crates/testing/tests/da_task.rs index 31634fc0c5..ad71b39be6 100644 --- a/crates/testing/tests/da_task.rs +++ b/crates/testing/tests/da_task.rs @@ -21,7 +21,6 @@ use std::{collections::HashMap, marker::PhantomData}; )] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_da_task() { - use hotshot_task_impls::harness::run_harness; use hotshot_testing::task_helpers::build_system_handle; use hotshot_types::message::Proposal; diff --git a/crates/testing/tests/network_task.rs b/crates/testing/tests/network_task.rs index 52458243dd..d4b1893e2e 100644 --- a/crates/testing/tests/network_task.rs +++ b/crates/testing/tests/network_task.rs @@ -20,7 +20,6 @@ use std::{collections::HashMap, marker::PhantomData}; #[ignore] #[allow(clippy::too_many_lines)] async fn test_network_task() { - use hotshot_testing::task_helpers::build_system_handle; use hotshot_types::{data::VidDisperse, message::Proposal}; diff --git a/crates/testing/tests/view_sync_task.rs b/crates/testing/tests/view_sync_task.rs index 6fc3711432..27e5dcc82d 100644 --- a/crates/testing/tests/view_sync_task.rs +++ b/crates/testing/tests/view_sync_task.rs @@ -11,7 +11,6 @@ use std::collections::HashMap; )] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_view_sync_task() { - use hotshot_task_impls::harness::run_harness; use hotshot_task_impls::view_sync::ViewSyncTaskState; use hotshot_testing::task_helpers::build_system_handle; From 016b36acc64bcac50ad82cece2593b676a111942 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Tue, 30 Jan 2024 23:39:47 -0500 Subject: [PATCH 09/28] test almost running correctly, maybe not passing --- Cargo.lock | 2 +- crates/hotshot/src/lib.rs | 3 ++- crates/testing/src/test_runner.rs | 2 +- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 13cb860b2d..3d2b74512e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6573,7 +6573,7 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "task" version = "0.1.0" -source = "git+https://github.com/EspressoSystems/HotShotTasks.git#c55751758da8899d868cfc28bc95cd4e4d71584d" +source = "git+https://github.com/EspressoSystems/HotShotTasks.git#7eab11eaea7083e9fb31a9a202f5b95c673c50ad" dependencies = [ "async-broadcast", "async-std", diff --git a/crates/hotshot/src/lib.rs b/crates/hotshot/src/lib.rs index 4472624041..879de6a035 100644 --- a/crates/hotshot/src/lib.rs +++ b/crates/hotshot/src/lib.rs @@ -277,7 +277,8 @@ impl> SystemContext { .broadcast_direct(HotShotEvent::QCFormed(either::Left( QuorumCertificate::genesis(), ))) - .await; + .await + .unwrap(); } /// Marks a given view number as timed out. This should be called a fixed period after a round is started. diff --git a/crates/testing/src/test_runner.rs b/crates/testing/src/test_runner.rs index 83f836332d..9daf0bcd98 100644 --- a/crates/testing/src/test_runner.rs +++ b/crates/testing/src/test_runner.rs @@ -213,7 +213,7 @@ where } } task_futs.push(safety_task.run()); - // task_futs.push(view_sync_task.run()); + task_futs.push(view_sync_task.run()); // if let Some(txn) = txn_task { // task_futs.push(txn.run()); // } From b30effda625544c111c604cdd862e4e2a6862917 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Wed, 31 Jan 2024 09:32:52 -0500 Subject: [PATCH 10/28] Add Tasks to project, success test passes --- Cargo.lock | 28 +- Cargo.toml | 1 + crates/hotshot/Cargo.toml | 2 +- crates/hotshot/src/lib.rs | 2 +- crates/hotshot/src/tasks/mod.rs | 2 +- crates/hotshot/src/types/handle.rs | 2 +- crates/task-impls/Cargo.toml | 2 +- crates/task-impls/src/consensus.rs | 2 +- crates/task-impls/src/da.rs | 2 +- crates/task-impls/src/harness.rs | 4 +- crates/task-impls/src/network.rs | 2 +- crates/task-impls/src/transactions.rs | 2 +- crates/task-impls/src/vid.rs | 4 +- crates/task-impls/src/view_sync.rs | 6 +- crates/task-impls/src/vote.rs | 7 +- crates/task/Cargo.lock | 542 ++++++++++++++++++++++ crates/task/Cargo.toml | 17 + crates/task/src/dependency.rs | 227 +++++++++ crates/task/src/dependency_task.rs | 129 +++++ crates/task/src/lib.rs | 5 + crates/task/src/task.rs | 360 ++++++++++++++ crates/testing/Cargo.toml | 2 +- crates/testing/src/overall_safety_task.rs | 10 +- crates/testing/src/spinning_task.rs | 9 +- crates/testing/src/test_runner.rs | 2 +- crates/testing/src/view_sync_task.rs | 9 +- 26 files changed, 1325 insertions(+), 55 deletions(-) create mode 100644 crates/task/Cargo.lock create mode 100644 crates/task/Cargo.toml create mode 100644 crates/task/src/dependency.rs create mode 100644 crates/task/src/dependency_task.rs create mode 100644 crates/task/src/lib.rs create mode 100644 crates/task/src/task.rs diff --git a/Cargo.lock b/Cargo.lock index 3d2b74512e..d7ed72d6f0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2695,6 +2695,7 @@ dependencies = [ "futures", "hotshot-constants", "hotshot-orchestrator", + "hotshot-task", "hotshot-task-impls", "hotshot-testing", "hotshot-types", @@ -2709,7 +2710,6 @@ dependencies = [ "serde", "snafu", "surf-disco", - "task", "time 0.3.31", "tokio", "toml 0.8.8", @@ -2783,6 +2783,17 @@ dependencies = [ "jf-utils", ] +[[package]] +name = "hotshot-task" +version = "0.1.0" +dependencies = [ + "async-broadcast", + "async-std", + "futures", + "tokio", + "tracing", +] + [[package]] name = "hotshot-task-impls" version = "0.1.0" @@ -2798,11 +2809,11 @@ dependencies = [ "either", "futures", "hotshot-constants", + "hotshot-task", "hotshot-types", "hotshot-utils", "sha2 0.10.8", "snafu", - "task", "time 0.3.31", "tokio", "tracing", @@ -2825,6 +2836,7 @@ dependencies = [ "hotshot", "hotshot-constants", "hotshot-orchestrator", + "hotshot-task", "hotshot-task-impls", "hotshot-types", "rand 0.8.5", @@ -2832,7 +2844,6 @@ dependencies = [ "sha2 0.10.8", "sha3", "snafu", - "task", "tokio", "tracing", ] @@ -6570,17 +6581,6 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" -[[package]] -name = "task" -version = "0.1.0" -source = "git+https://github.com/EspressoSystems/HotShotTasks.git#7eab11eaea7083e9fb31a9a202f5b95c673c50ad" -dependencies = [ - "async-broadcast", - "async-std", - "futures", - "tokio", -] - [[package]] name = "tempfile" version = "3.9.0" diff --git a/Cargo.toml b/Cargo.toml index 796b7f457b..3ab5841d62 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,6 +18,7 @@ members = [ "crates/hotshot-state-prover", "crates/libp2p-networking", "crates/testing-macros", + "crates/task", "crates/task-impls", "crates/testing", "crates/types", diff --git a/crates/hotshot/Cargo.toml b/crates/hotshot/Cargo.toml index 203a61df4d..d9e6a86627 100644 --- a/crates/hotshot/Cargo.toml +++ b/crates/hotshot/Cargo.toml @@ -108,7 +108,7 @@ time = { workspace = true } derive_more = "0.99.17" portpicker = "0.1.1" lru = "0.12.1" -task = { workspace = true } +hotshot-task = { path = "../task" } tracing = { workspace = true } diff --git a/crates/hotshot/src/lib.rs b/crates/hotshot/src/lib.rs index 879de6a035..a2dd3e0323 100644 --- a/crates/hotshot/src/lib.rs +++ b/crates/hotshot/src/lib.rs @@ -39,6 +39,7 @@ use hotshot_task_impls::network; #[cfg(feature = "hotshot-testing")] use hotshot_types::traits::node_implementation::ChannelMaps; +use hotshot_task::task::{Task, TaskRegistry}; use hotshot_types::{ consensus::{Consensus, ConsensusMetricsValue, View, ViewInner, ViewQueue}, data::Leaf, @@ -67,7 +68,6 @@ use std::{ sync::Arc, time::Duration, }; -use task::task::{Task, TaskRegistry}; use tasks::add_vid_task; use tracing::{debug, error, instrument, trace, warn}; diff --git a/crates/hotshot/src/tasks/mod.rs b/crates/hotshot/src/tasks/mod.rs index 764e4a68ba..e7bc4b5b11 100644 --- a/crates/hotshot/src/tasks/mod.rs +++ b/crates/hotshot/src/tasks/mod.rs @@ -4,6 +4,7 @@ use crate::{types::SystemContextHandle, HotShotConsensusApi}; use async_broadcast::{Receiver, Sender}; use async_compatibility_layer::art::{async_sleep, async_spawn}; +use hotshot_task::task::{Task, TaskRegistry}; use hotshot_task_impls::{ consensus::{CommitmentAndMetadata, ConsensusTaskState}, da::DATaskState, @@ -32,7 +33,6 @@ use std::{ sync::Arc, time::Duration, }; -use task::task::{Task, TaskRegistry}; /// event for global event stream #[derive(Clone, Debug)] diff --git a/crates/hotshot/src/types/handle.rs b/crates/hotshot/src/types/handle.rs index 2a7d8de3ce..5e977fad1a 100644 --- a/crates/hotshot/src/types/handle.rs +++ b/crates/hotshot/src/types/handle.rs @@ -14,6 +14,7 @@ use hotshot_types::{ traits::election::Membership, }; +use hotshot_task::task::TaskRegistry; use hotshot_types::{boxed_sync, simple_vote::QuorumData, BoxSyncFuture}; use hotshot_types::{ consensus::Consensus, @@ -24,7 +25,6 @@ use hotshot_types::{ traits::{node_implementation::NodeType, state::ConsensusTime, storage::Storage}, }; use std::sync::Arc; -use task::task::TaskRegistry; use tracing::error; /// Event streaming handle for a [`SystemContext`] instance running in the background diff --git a/crates/task-impls/Cargo.toml b/crates/task-impls/Cargo.toml index ce343fbdc5..b236dc23da 100644 --- a/crates/task-impls/Cargo.toml +++ b/crates/task-impls/Cargo.toml @@ -21,7 +21,7 @@ commit = { workspace = true } bincode = { workspace = true } bitvec = { workspace = true } sha2 = { workspace = true } -task = { workspace = true } +hotshot-task = { path = "../task" } async-broadcast = { workspace = true } [target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] diff --git a/crates/task-impls/src/consensus.rs b/crates/task-impls/src/consensus.rs index 496462de21..d5c110b499 100644 --- a/crates/task-impls/src/consensus.rs +++ b/crates/task-impls/src/consensus.rs @@ -10,7 +10,7 @@ use async_std::task::JoinHandle; use commit::Committable; use core::time::Duration; use hotshot_constants::LOOK_AHEAD; -use task::task::{Task, TaskState}; +use hotshot_task::task::{Task, TaskState}; use async_broadcast::Sender; diff --git a/crates/task-impls/src/da.rs b/crates/task-impls/src/da.rs index f90e1ff23c..6912091d79 100644 --- a/crates/task-impls/src/da.rs +++ b/crates/task-impls/src/da.rs @@ -5,6 +5,7 @@ use crate::{ use async_broadcast::Sender; use async_lock::RwLock; +use hotshot_task::task::{Task, TaskState}; use hotshot_types::{ consensus::{Consensus, View}, data::DAProposal, @@ -25,7 +26,6 @@ use hotshot_types::{ vote::HasViewNumber, }; use sha2::{Digest, Sha256}; -use task::task::{Task, TaskState}; use crate::vote::HandleVoteEvent; use snafu::Snafu; diff --git a/crates/task-impls/src/harness.rs b/crates/task-impls/src/harness.rs index 906065fe2b..a1c6f57014 100644 --- a/crates/task-impls/src/harness.rs +++ b/crates/task-impls/src/harness.rs @@ -1,9 +1,9 @@ use crate::events::{HotShotEvent, HotShotTaskCompleted}; use async_broadcast::broadcast; +use hotshot_task::task::{Task, TaskRegistry, TaskState}; use hotshot_types::traits::node_implementation::NodeType; use std::{collections::HashMap, sync::Arc}; -use task::task::{Task, TaskRegistry, TaskState}; /// The state for the test harness task. Keeps track of which events and how many we expect to get pub struct TestHarnessState { @@ -19,7 +19,7 @@ impl TaskState for TestHarnessState { async fn handle_event( event: Self::Event, - task: &mut task::task::Task, + task: &mut Task, ) -> Option { let extra = task.state_mut().allow_extra_output; handle_event(event, task, extra) diff --git a/crates/task-impls/src/network.rs b/crates/task-impls/src/network.rs index 740442e747..ee219de20a 100644 --- a/crates/task-impls/src/network.rs +++ b/crates/task-impls/src/network.rs @@ -3,6 +3,7 @@ use async_broadcast::Sender; use either::Either::{self, Left, Right}; use hotshot_constants::PROGRAM_PROTOCOL_VERSION; +use hotshot_task::task::{Task, TaskState}; use hotshot_types::{ message::{ CommitteeConsensusMessage, GeneralConsensusMessage, Message, MessageKind, SequencingMessage, @@ -14,7 +15,6 @@ use hotshot_types::{ }, vote::{HasViewNumber, Vote}, }; -use task::task::{Task, TaskState}; use tracing::error; use tracing::instrument; diff --git a/crates/task-impls/src/transactions.rs b/crates/task-impls/src/transactions.rs index c2fef8cb69..c3d3ef2f8b 100644 --- a/crates/task-impls/src/transactions.rs +++ b/crates/task-impls/src/transactions.rs @@ -8,6 +8,7 @@ use async_lock::RwLock; use bincode::config::Options; use commit::{Commitment, Committable}; +use hotshot_task::task::{Task, TaskState}; use hotshot_types::{ consensus::Consensus, data::Leaf, @@ -28,7 +29,6 @@ use std::{ sync::Arc, time::Instant, }; -use task::task::{Task, TaskState}; use tracing::{debug, error, instrument, warn}; /// A type alias for `HashMap, T>` diff --git a/crates/task-impls/src/vid.rs b/crates/task-impls/src/vid.rs index dd5a98ff3c..3ad97572f0 100644 --- a/crates/task-impls/src/vid.rs +++ b/crates/task-impls/src/vid.rs @@ -4,6 +4,7 @@ use async_lock::RwLock; #[cfg(async_executor_impl = "async-std")] use async_std::task::spawn_blocking; +use hotshot_task::task::{Task, TaskState}; use hotshot_types::traits::network::CommunicationChannel; use hotshot_types::{ consensus::Consensus, @@ -20,7 +21,6 @@ use hotshot_types::{ data::{test_srs, VidScheme, VidSchemeTrait}, traits::network::ConsensusIntentEvent, }; -use task::task::TaskState; #[cfg(async_executor_impl = "tokio")] use tokio::task::spawn_blocking; @@ -183,7 +183,7 @@ impl, A: ConsensusApi + async fn handle_event( event: Self::Event, - task: &mut task::task::Task, + task: &mut Task, ) -> Option { // TODO: Don't clone the sender let sender = task.clone_sender(); diff --git a/crates/task-impls/src/view_sync.rs b/crates/task-impls/src/view_sync.rs index f526b1160a..1e783f8b92 100644 --- a/crates/task-impls/src/view_sync.rs +++ b/crates/task-impls/src/view_sync.rs @@ -25,6 +25,7 @@ use hotshot_types::{ #[cfg(async_executor_impl = "async-std")] use async_std::task::JoinHandle; +use hotshot_task::task::{Task, TaskState}; use hotshot_types::{ message::GeneralConsensusMessage, traits::{ @@ -37,7 +38,6 @@ use hotshot_types::{ }; use snafu::Snafu; use std::{collections::HashMap, fmt::Debug, sync::Arc, time::Duration}; -use task::task::TaskState; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; use tracing::{debug, error, info, instrument, warn}; @@ -118,7 +118,7 @@ impl< type Result = (); - async fn handle_event(event: Self::Event, task: &mut task::task::Task) -> Option<()> { + async fn handle_event(event: Self::Event, task: &mut Task) -> Option<()> { let sender = task.clone_sender(); task.state_mut().handle(event, sender).await; None @@ -187,7 +187,7 @@ impl, A: ConsensusApi + type Result = (); - async fn handle_event(event: Self::Event, task: &mut task::task::Task) -> Option<()> { + async fn handle_event(event: Self::Event, task: &mut Task) -> Option<()> { let sender = task.clone_sender(); task.state_mut().handle(event, sender).await; None diff --git a/crates/task-impls/src/vote.rs b/crates/task-impls/src/vote.rs index 0ec0b573da..ebb8126949 100644 --- a/crates/task-impls/src/vote.rs +++ b/crates/task-impls/src/vote.rs @@ -6,6 +6,7 @@ use async_trait::async_trait; use bitvec::prelude::*; use either::Either::{self, Left, Right}; +use hotshot_task::task::{Task, TaskState}; use hotshot_types::{ simple_certificate::{ DACertificate, QuorumCertificate, TimeoutCertificate, ViewSyncCommitCertificate2, @@ -19,7 +20,6 @@ use hotshot_types::{ vote::{Certificate, HasViewNumber, Vote, VoteAccumulator}, }; use snafu::Snafu; -use task::task::TaskState; use tracing::{debug, error}; #[derive(Snafu, Debug)] @@ -125,10 +125,7 @@ where type Result = HotShotTaskCompleted; - async fn handle_event( - event: Self::Event, - task: &mut task::task::Task, - ) -> Option { + async fn handle_event(event: Self::Event, task: &mut Task) -> Option { let sender = task.clone_sender(); task.state_mut().handle_event(event, &sender).await } diff --git a/crates/task/Cargo.lock b/crates/task/Cargo.lock new file mode 100644 index 0000000000..f8711d97b6 --- /dev/null +++ b/crates/task/Cargo.lock @@ -0,0 +1,542 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "addr2line" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb" +dependencies = [ + "gimli", +] + +[[package]] +name = "adler" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" + +[[package]] +name = "async-channel" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ca33f4bc4ed1babef42cad36cc1f51fa88be00420404e5b1e80ab1b18f7678c" +dependencies = [ + "concurrent-queue", + "event-listener", + "event-listener-strategy", + "futures-core", + "pin-project-lite", +] + +[[package]] +name = "autocfg" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" + +[[package]] +name = "backtrace" +version = "0.3.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2089b7e3f35b9dd2d0ed921ead4f6d318c27680d4a5bd167b3ee120edb105837" +dependencies = [ + "addr2line", + "cc", + "cfg-if", + "libc", + "miniz_oxide", + "object", + "rustc-demangle", +] + +[[package]] +name = "bitflags" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" + +[[package]] +name = "broadcast" +version = "0.1.0" +dependencies = [ + "async-channel", + "futures", + "futures-core", + "tokio", + "tokio-util", +] + +[[package]] +name = "bytes" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2bd12c1caf447e69cd4528f47f94d203fd2582878ecb9e9465484c4148a8223" + +[[package]] +name = "cc" +version = "1.0.83" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" +dependencies = [ + "libc", +] + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "concurrent-queue" +version = "2.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d16048cd947b08fa32c24458a22f5dc5e835264f689f4f5653210c69fd107363" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-utils" +version = "0.8.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "248e3bacc7dc6baa3b21e405ee045c3047101a49145e7e9eca583ab4c2ca5345" + +[[package]] +name = "event-listener" +version = "4.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67b215c49b2b248c855fb73579eb1f4f26c38ffdc12973e20e07b91d78d5646e" +dependencies = [ + "concurrent-queue", + "parking", + "pin-project-lite", +] + +[[package]] +name = "event-listener-strategy" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "958e4d70b6d5e81971bebec42271ec641e7ff4e170a6fa605f2b8a8b65cb97d3" +dependencies = [ + "event-listener", + "pin-project-lite", +] + +[[package]] +name = "futures" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0" +dependencies = [ + "futures-channel", + "futures-core", + "futures-executor", + "futures-io", + "futures-sink", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-channel" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" +dependencies = [ + "futures-core", + "futures-sink", +] + +[[package]] +name = "futures-core" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" + +[[package]] +name = "futures-executor" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d" +dependencies = [ + "futures-core", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-io" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" + +[[package]] +name = "futures-macro" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "futures-sink" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" + +[[package]] +name = "futures-task" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" + +[[package]] +name = "futures-util" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" +dependencies = [ + "futures-channel", + "futures-core", + "futures-io", + "futures-macro", + "futures-sink", + "futures-task", + "memchr", + "pin-project-lite", + "pin-utils", + "slab", +] + +[[package]] +name = "gimli" +version = "0.28.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253" + +[[package]] +name = "hermit-abi" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d3d0e0f38255e7fa3cf31335b3a56f05febd18025f4db5ef7a0cfb4f8da651f" + +[[package]] +name = "libc" +version = "0.2.152" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13e3bf6590cbc649f4d1a3eefc9d5d6eb746f5200ffb04e5e142700b8faa56e7" + +[[package]] +name = "lock_api" +version = "0.4.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c168f8615b12bc01f9c17e2eb0cc07dcae1940121185446edc3744920e8ef45" +dependencies = [ + "autocfg", + "scopeguard", +] + +[[package]] +name = "memchr" +version = "2.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "523dc4f511e55ab87b694dc30d0f820d60906ef06413f93d4d7a1385599cc149" + +[[package]] +name = "miniz_oxide" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7" +dependencies = [ + "adler", +] + +[[package]] +name = "mio" +version = "0.8.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f3d0b296e374a4e6f3c7b0a1f5a51d748a0d34c85e7dc48fc3fa9a87657fe09" +dependencies = [ + "libc", + "wasi", + "windows-sys", +] + +[[package]] +name = "num_cpus" +version = "1.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" +dependencies = [ + "hermit-abi", + "libc", +] + +[[package]] +name = "object" +version = "0.32.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a6a622008b6e321afc04970976f62ee297fdbaa6f95318ca343e3eebb9648441" +dependencies = [ + "memchr", +] + +[[package]] +name = "parking" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb813b8af86854136c6922af0598d719255ecb2179515e6e7730d468f05c9cae" + +[[package]] +name = "parking_lot" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" +dependencies = [ + "lock_api", + "parking_lot_core", +] + +[[package]] +name = "parking_lot_core" +version = "0.9.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c42a9226546d68acdd9c0a280d17ce19bfe27a46bf68784e4066115788d008e" +dependencies = [ + "cfg-if", + "libc", + "redox_syscall", + "smallvec", + "windows-targets", +] + +[[package]] +name = "pin-project-lite" +version = "0.2.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58" + +[[package]] +name = "pin-utils" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" + +[[package]] +name = "proc-macro2" +version = "1.0.76" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95fc56cda0b5c3325f5fbbd7ff9fda9e02bb00bb3dac51252d2f1bfa1cb8cc8c" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "quote" +version = "1.0.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "redox_syscall" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa" +dependencies = [ + "bitflags", +] + +[[package]] +name = "rustc-demangle" +version = "0.1.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" + +[[package]] +name = "scopeguard" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" + +[[package]] +name = "signal-hook-registry" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1" +dependencies = [ + "libc", +] + +[[package]] +name = "slab" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" +dependencies = [ + "autocfg", +] + +[[package]] +name = "smallvec" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b187f0231d56fe41bfb12034819dd2bf336422a5866de41bc3fec4b2e3883e8" + +[[package]] +name = "socket2" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b5fac59a5cb5dd637972e5fca70daf0523c9067fcdc4842f053dae04a18f8e9" +dependencies = [ + "libc", + "windows-sys", +] + +[[package]] +name = "syn" +version = "2.0.48" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f3531638e407dfc0814761abb7c00a5b54992b849452a0646b7f65c9f770f3f" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "task" +version = "0.1.0" +dependencies = [ + "broadcast", + "tokio", +] + +[[package]] +name = "tokio" +version = "1.35.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c89b4efa943be685f629b149f53829423f8f5531ea21249408e8e2f8671ec104" +dependencies = [ + "backtrace", + "bytes", + "libc", + "mio", + "num_cpus", + "parking_lot", + "pin-project-lite", + "signal-hook-registry", + "socket2", + "tokio-macros", + "windows-sys", +] + +[[package]] +name = "tokio-macros" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "tokio-util" +version = "0.7.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5419f34732d9eb6ee4c3578b7989078579b7f039cbbb9ca2c4da015749371e15" +dependencies = [ + "bytes", + "futures-core", + "futures-sink", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "unicode-ident" +version = "1.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" + +[[package]] +name = "wasi" +version = "0.11.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" + +[[package]] +name = "windows-sys" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +dependencies = [ + "windows-targets", +] + +[[package]] +name = "windows-targets" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" +dependencies = [ + "windows_aarch64_gnullvm", + "windows_aarch64_msvc", + "windows_i686_gnu", + "windows_i686_msvc", + "windows_x86_64_gnu", + "windows_x86_64_gnullvm", + "windows_x86_64_msvc", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" + +[[package]] +name = "windows_i686_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" + +[[package]] +name = "windows_i686_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" diff --git a/crates/task/Cargo.toml b/crates/task/Cargo.toml new file mode 100644 index 0000000000..2b17ac25d0 --- /dev/null +++ b/crates/task/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "hotshot-task" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] + +futures = "0.3.30" +async-broadcast = "0.6.0" +tracing = { workspace = true } + +[target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] +tokio = { version = "1.35.1", features = ["time", "rt-multi-thread", "macros", "sync"] } +[target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] +async-std = { version = "1.12.0", features = ["attributes"] } diff --git a/crates/task/src/dependency.rs b/crates/task/src/dependency.rs new file mode 100644 index 0000000000..014f979fd5 --- /dev/null +++ b/crates/task/src/dependency.rs @@ -0,0 +1,227 @@ +use async_broadcast::Receiver; +use futures::future::BoxFuture; +use futures::stream::FuturesUnordered; +use futures::stream::StreamExt; +use futures::FutureExt; +use std::future::Future; + +pub trait Dependency { + fn completed(self) -> impl Future + Send; +} + +trait CombineDependencies: + Sized + Dependency + Send + 'static +{ + fn or + Send + 'static>(self, dep: D) -> OrDependency { + let mut or = OrDependency::from_deps(vec![self]); + or.add_dep(dep); + or + } + fn and + Send + 'static>(self, dep: D) -> AndDependency { + let mut and = AndDependency::from_deps(vec![self]); + and.add_dep(dep); + and + } +} + +pub struct AndDependency { + deps: Vec>, +} +impl Dependency> for AndDependency { + async fn completed(self) -> Vec { + let futures = FuturesUnordered::from_iter(self.deps); + futures.collect().await + } +} + +impl AndDependency { + pub fn from_deps(deps: Vec + Send + 'static>) -> Self { + let mut pinned = vec![]; + for dep in deps { + pinned.push(dep.completed().boxed()) + } + Self { deps: pinned } + } + pub fn add_dep(&mut self, dep: impl Dependency + Send + 'static) { + self.deps.push(dep.completed().boxed()); + } + pub fn add_deps(&mut self, deps: AndDependency) { + for dep in deps.deps { + self.deps.push(dep); + } + } +} + +pub struct OrDependency { + deps: Vec>, +} +impl Dependency for OrDependency { + async fn completed(self) -> T { + let mut futures = FuturesUnordered::from_iter(self.deps); + loop { + if let Some(val) = futures.next().await { + break val; + } + } + } +} + +impl OrDependency { + pub fn from_deps(deps: Vec + Send + 'static>) -> Self { + let mut pinned = vec![]; + for dep in deps { + pinned.push(dep.completed().boxed()) + } + Self { deps: pinned } + } + pub fn add_dep(&mut self, dep: impl Dependency + Send + 'static) { + self.deps.push(dep.completed().boxed()); + } +} + +pub struct EventDependency { + pub(crate) event_rx: Receiver, + pub(crate) match_fn: Box bool + Send>, +} + +impl EventDependency { + pub fn new(receiver: Receiver, match_fn: Box bool + Send>) -> Self { + Self { + event_rx: receiver, + match_fn: Box::new(match_fn), + } + } +} + +impl Dependency for EventDependency { + async fn completed(mut self) -> T { + loop { + let next = self.event_rx.recv().await.unwrap(); + if (self.match_fn)(&next) { + return next; + } + } + } +} + +// Impl Combine for all the basic dependency types +impl CombineDependencies for D where + D: Dependency + Send + 'static +{ +} + +#[cfg(test)] +mod tests { + use crate::dependency::CombineDependencies; + + use super::{AndDependency, Dependency, EventDependency, OrDependency}; + use async_broadcast::{broadcast, Receiver}; + + fn eq_dep(rx: Receiver, val: usize) -> EventDependency { + EventDependency { + event_rx: rx, + match_fn: Box::new(move |v| *v == val), + } + } + + #[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) + )] + #[cfg_attr(async_executor_impl = "async-std", async_std::test)] + async fn it_works() { + let (tx, rx) = broadcast(10); + + let mut deps = vec![]; + for i in 0..5 { + tx.broadcast(i).await.unwrap(); + deps.push(eq_dep(rx.clone(), 5)) + } + + let and = AndDependency::from_deps(deps); + tx.broadcast(5).await.unwrap(); + let result = and.completed().await; + assert_eq!(result, vec![5; 5]); + } + #[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) + )] + #[cfg_attr(async_executor_impl = "async-std", async_std::test)] + async fn or_dep() { + let (tx, rx) = broadcast(10); + + tx.broadcast(5).await.unwrap(); + let mut deps = vec![]; + for _ in 0..5 { + deps.push(eq_dep(rx.clone(), 5)) + } + let or = OrDependency::from_deps(deps); + let result = or.completed().await; + assert_eq!(result, 5); + } + + #[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) + )] + #[cfg_attr(async_executor_impl = "async-std", async_std::test)] + async fn and_or_dep() { + let (tx, rx) = broadcast(10); + + tx.broadcast(1).await.unwrap(); + tx.broadcast(2).await.unwrap(); + tx.broadcast(3).await.unwrap(); + tx.broadcast(5).await.unwrap(); + tx.broadcast(6).await.unwrap(); + + let or1 = OrDependency::from_deps([eq_dep(rx.clone(), 4), eq_dep(rx.clone(), 6)].into()); + let or2 = OrDependency::from_deps([eq_dep(rx.clone(), 4), eq_dep(rx.clone(), 5)].into()); + let and = AndDependency::from_deps([or1, or2].into()); + let result = and.completed().await; + assert_eq!(result, vec![6, 5]); + } + + #[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) + )] + #[cfg_attr(async_executor_impl = "async-std", async_std::test)] + async fn or_and_dep() { + let (tx, rx) = broadcast(10); + + tx.broadcast(1).await.unwrap(); + tx.broadcast(2).await.unwrap(); + tx.broadcast(3).await.unwrap(); + tx.broadcast(4).await.unwrap(); + tx.broadcast(5).await.unwrap(); + + let and1 = eq_dep(rx.clone(), 4).and(eq_dep(rx.clone(), 6)); + let and2 = eq_dep(rx.clone(), 4).and(eq_dep(rx.clone(), 5)); + let or = and1.or(and2); + let result = or.completed().await; + assert_eq!(result, vec![4, 5]); + } + + #[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) + )] + #[cfg_attr(async_executor_impl = "async-std", async_std::test)] + async fn many_and_dep() { + let (tx, rx) = broadcast(10); + + tx.broadcast(1).await.unwrap(); + tx.broadcast(2).await.unwrap(); + tx.broadcast(3).await.unwrap(); + tx.broadcast(4).await.unwrap(); + tx.broadcast(5).await.unwrap(); + tx.broadcast(6).await.unwrap(); + + let mut and1 = eq_dep(rx.clone(), 4).and(eq_dep(rx.clone(), 6)); + let and2 = eq_dep(rx.clone(), 4).and(eq_dep(rx.clone(), 5)); + and1.add_deps(and2); + let result = and1.completed().await; + assert_eq!(result, vec![4, 6, 4, 5]); + } +} diff --git a/crates/task/src/dependency_task.rs b/crates/task/src/dependency_task.rs new file mode 100644 index 0000000000..86daa095e1 --- /dev/null +++ b/crates/task/src/dependency_task.rs @@ -0,0 +1,129 @@ +#[cfg(async_executor_impl = "async-std")] +use async_std::task::{spawn, JoinHandle}; +#[cfg(async_executor_impl = "tokio")] +use tokio::task::{spawn, JoinHandle}; + +use futures::Future; + +use crate::dependency::Dependency; + +pub trait HandleDepResult: Send + Sized + Sync + 'static { + type Result: Send + Sync + 'static; + + /// Called once when the Dependency completes handles the results + fn handle_dep_result(self, res: Self::Result) -> impl Future + Send; +} + +pub struct DependencyTask + Send, H: HandleDepResult + Send> { + pub(crate) dep: D, + pub(crate) handle: H, +} + +impl + Send, H: HandleDepResult + Send> DependencyTask { + pub fn new(dep: D, handle: H) -> Self { + Self { dep, handle } + } +} + +impl + Send + 'static, H: HandleDepResult> DependencyTask { + pub fn run(self) -> JoinHandle<()> + where + Self: Sized, + { + spawn(async move { + self.handle + .handle_dep_result(self.dep.completed().await) + .await; + }) + } +} + +#[cfg(test)] +mod test { + + use std::time::Duration; + + use async_broadcast::{broadcast, Receiver, Sender}; + use futures::{stream::FuturesOrdered, StreamExt}; + + #[cfg(async_executor_impl = "async-std")] + use async_std::task::sleep; + #[cfg(async_executor_impl = "tokio")] + use tokio::time::sleep; + + use super::*; + use crate::dependency::*; + + #[derive(Clone, PartialEq, Eq, Debug)] + enum TaskResult { + Success(usize), + // Failure, + } + + struct DummyHandle { + sender: Sender, + } + impl HandleDepResult for DummyHandle { + type Result = usize; + async fn handle_dep_result(self, res: usize) { + self.sender + .broadcast(TaskResult::Success(res)) + .await + .unwrap(); + } + } + + fn eq_dep(rx: Receiver, val: usize) -> EventDependency { + EventDependency { + event_rx: rx, + match_fn: Box::new(move |v| *v == val), + } + } + + #[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) + )] + #[cfg_attr(async_executor_impl = "async-std", async_std::test)] + async fn it_works() { + let (tx, rx) = broadcast(10); + let (res_tx, mut res_rx) = broadcast(10); + let dep = eq_dep(rx, 2); + let handle = DummyHandle { sender: res_tx }; + let join_handle = DependencyTask { dep, handle }.run(); + tx.broadcast(2).await.unwrap(); + assert_eq!(res_rx.recv().await.unwrap(), TaskResult::Success(2)); + let _ = join_handle.await; + } + + #[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) + )] + #[cfg_attr(async_executor_impl = "async-std", async_std::test)] + async fn many_works() { + let (tx, rx) = broadcast(20); + let (res_tx, mut res_rx) = broadcast(20); + + let mut handles = vec![]; + for i in 0..10 { + let dep = eq_dep(rx.clone(), i); + let handle = DummyHandle { + sender: res_tx.clone(), + }; + handles.push(DependencyTask { dep, handle }.run()); + } + let tx2 = tx.clone(); + spawn(async move { + for i in 0..10 { + tx.broadcast(i).await.unwrap(); + sleep(Duration::from_millis(10)).await; + } + }); + for i in 0..10 { + assert_eq!(res_rx.recv().await.unwrap(), TaskResult::Success(i)); + } + tx2.broadcast(100).await.unwrap(); + FuturesOrdered::from_iter(handles).collect::>().await; + } +} diff --git a/crates/task/src/lib.rs b/crates/task/src/lib.rs new file mode 100644 index 0000000000..ab945c258f --- /dev/null +++ b/crates/task/src/lib.rs @@ -0,0 +1,5 @@ +pub mod task; + +pub mod dependency; + +pub mod dependency_task; diff --git a/crates/task/src/task.rs b/crates/task/src/task.rs new file mode 100644 index 0000000000..037a03e0e2 --- /dev/null +++ b/crates/task/src/task.rs @@ -0,0 +1,360 @@ +use std::sync::Arc; + +use async_broadcast::{Receiver, SendError, Sender}; + +#[cfg(async_executor_impl = "async-std")] +use async_std::{ + sync::RwLock, + task::{spawn, JoinHandle}, +}; +use futures::{ + future::{select_all, try_join_all}, + Future, +}; + +#[cfg(async_executor_impl = "tokio")] +use tokio::{ + sync::RwLock, + task::{spawn, JoinHandle}, +}; + +use crate::{ + dependency::Dependency, + dependency_task::{DependencyTask, HandleDepResult}, +}; + +pub trait TaskState: Send { + type Event: Clone + Send + Sync + 'static; + type Result: Send; + /// Handle event and update state. Return true if the task is finished + /// false otherwise + fn handle_event( + event: Self::Event, + task: &mut Task, + ) -> impl Future> + Send + where + Self: Sized; + + /// Return true if the event should be filtered + fn filter(&self, _event: &Self::Event) -> bool { + // default doesn't filter + false + } + /// Do something with the result of the task before it shuts down + fn handle_result(&self, _res: &Self::Result) -> impl std::future::Future + Send { + async {} + } + /// Return true if the event should shut the task down + fn should_shutdown(event: &Self::Event) -> bool; + /// Handle anything before the task is completely shutdown + fn shutdown(&mut self) -> impl std::future::Future + Send { + async {} + } +} + +pub trait TestTaskState: Send { + type Message: Clone + Send + Sync + 'static; + type Result: Send; + type State: TaskState; + fn handle_message( + message: Self::Message, + id: usize, + task: &mut TestTask, + ) -> impl Future> + Send + where + Self: Sized; +} + +pub struct Task { + event_sender: Sender, + event_receiver: Receiver, + registry: Arc, + state: S, +} + +impl Task { + pub fn new( + tx: Sender, + rx: Receiver, + registry: Arc, + state: S, + ) -> Self { + Task { + event_sender: tx, + event_receiver: rx, + registry, + state, + } + } + fn run(mut self) -> JoinHandle<()> { + spawn(async move { + loop { + let event = self.event_receiver.recv_direct().await; + if S::should_shutdown(event.as_ref().unwrap()) { + self.state.shutdown().await; + break; + } + if self.state.filter(event.as_ref().unwrap()) { + continue; + } + if let Some(res) = S::handle_event(event.unwrap(), &mut self).await { + self.state.handle_result(&res).await; + self.state.shutdown().await; + break; + } + } + }) + } + pub fn subscribe(&self) -> Receiver { + self.event_receiver.clone() + } + pub fn sender(&self) -> &Sender { + &self.event_sender + } + pub fn clone_sender(&self) -> Sender { + self.event_sender.clone() + } + pub async fn send(&self, event: S::Event) -> Result, SendError> { + self.event_sender.broadcast(event).await + } + pub fn state_mut(&mut self) -> &mut S { + &mut self.state + } + pub async fn run_sub_task(&self, state: S) { + let task = Task { + event_sender: self.clone_sender(), + event_receiver: self.subscribe(), + registry: self.registry.clone(), + state, + }; + // Note: await here is only awaiting the task to be added to the + // registry, not for the task to run. + self.registry.run_task(task).await; + } +} + +pub struct TestTask { + task: Task, + message_receivers: Vec>, +} + +impl< + S: TaskState + Send + 'static, + T: TestTaskState + Send + Sync + 'static, + > TestTask +{ + pub fn new(task: Task, rxs: Vec>) -> Self { + Self { + task, + message_receivers: rxs, + } + } + pub fn run(mut self) -> JoinHandle { + spawn(async move { + loop { + let mut futs = vec![]; + for rx in self.message_receivers.iter_mut() { + futs.push(rx.recv()); + } + let (msg, id, _) = select_all(futs).await; + if let Ok(event) = self.task.event_receiver.try_recv() { + if S::should_shutdown(&event) { + self.task.state.shutdown().await; + // tracing::error!("Shutting down test task TODO!"); + todo!(); + } + if !self.state().filter(&event) { + if let Some(res) = S::handle_event(event, &mut self.task).await { + self.task.state.handle_result(&res).await; + self.task.state.shutdown().await; + return res; + } + } + } + + if let Some(res) = T::handle_message(msg.unwrap(), id, &mut self).await { + self.task.state.handle_result(&res).await; + self.task.state.shutdown().await; + return res; + } + } + }) + } + pub fn state(&self) -> &S { + &self.task.state + } + pub fn state_mut(&mut self) -> &mut S { + self.task.state_mut() + } + pub async fn send_event(&self, event: S::Event) { + self.task.send(event).await.unwrap(); + } +} + +#[derive(Default)] +pub struct TaskRegistry { + task_handles: RwLock>>, +} + +impl TaskRegistry { + pub async fn register(&self, handle: JoinHandle<()>) { + self.task_handles.write().await.push(handle); + } + pub async fn shutdown(&self) { + let mut handles = self.task_handles.write().await; + while let Some(handle) = handles.pop() { + #[cfg(async_executor_impl = "async-std")] + handle.cancel().await; + #[cfg(async_executor_impl = "tokio")] + handle.abort(); + } + } + pub async fn run_task(&self, task: Task) + where + S: TaskState + Send + 'static, + { + self.register(task.run()).await; + } + pub async fn spawn_dependency_task( + &self, + dep: impl Dependency + Send + 'static, + handle: impl HandleDepResult, + ) { + let join_handle = DependencyTask { dep, handle }.run(); + self.register(join_handle).await; + } + pub async fn join_all(self) -> Vec<()> { + #[cfg(async_executor_impl = "async-std")] + let ret = join_all(self.task_handles.into_inner()).await; + #[cfg(async_executor_impl = "tokio")] + let ret = try_join_all(self.task_handles.into_inner()).await.unwrap(); + ret + } +} + +#[cfg(test)] +mod tests { + use super::*; + use async_broadcast::broadcast; + #[cfg(async_executor_impl = "async-std")] + use async_std::task::sleep; + use std::{collections::HashSet, time::Duration}; + #[cfg(async_executor_impl = "tokio")] + use tokio::time::sleep; + + #[derive(Default)] + pub struct DummyHandle { + val: usize, + seen: HashSet, + } + + impl TaskState for DummyHandle { + type Event = usize; + type Result = (); + async fn handle_event(event: usize, task: &mut Task) -> Option<()> { + sleep(Duration::from_millis(10)).await; + let state = task.state_mut(); + state.seen.insert(event); + if event > state.val { + state.val = event; + if state.val >= 100 { + panic!("Test should shutdown before getting an event for 100") + } + task.send(event + 1).await.unwrap(); + } + None + } + fn should_shutdown(event: &usize) -> bool { + *event >= 98 + } + async fn shutdown(&mut self) { + for i in 1..98 { + assert!(self.seen.contains(&i)); + } + } + } + + impl TestTaskState for DummyHandle { + type Message = String; + type Result = (); + type State = Self; + + async fn handle_message( + message: Self::Message, + _id: usize, + _task: &mut TestTask, + ) -> Option<()> { + println!("got message {}", message); + if message == *"done".to_string() { + return Some(()); + } + None + } + } + #[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 2) + )] + #[cfg_attr(async_executor_impl = "async-std", async_std::test)] + async fn it_works() { + let reg = Arc::new(TaskRegistry::default()); + let (tx, rx) = broadcast(10); + let task1 = Task:: { + event_sender: tx.clone(), + event_receiver: rx.clone(), + registry: reg.clone(), + state: Default::default(), + }; + tx.broadcast(1).await.unwrap(); + let task2 = Task:: { + event_sender: tx.clone(), + event_receiver: rx, + registry: reg, + state: Default::default(), + }; + let handle = task2.run(); + let _res = task1.run().await; + let _ = handle.await; + } + + #[cfg_attr( + async_executor_impl = "tokio", + tokio::test(flavor = "multi_thread", worker_threads = 10) + )] + #[cfg_attr(async_executor_impl = "async-std", async_std::test)] + #[allow(clippy::should_panic_without_expect)] + #[should_panic] + async fn test_works() { + let reg = Arc::new(TaskRegistry::default()); + let (tx, rx) = broadcast(10); + let (msg_tx, msg_rx) = broadcast(10); + let task1 = Task:: { + event_sender: tx.clone(), + event_receiver: rx.clone(), + registry: reg.clone(), + state: Default::default(), + }; + tx.broadcast(1).await.unwrap(); + let task2 = Task:: { + event_sender: tx.clone(), + event_receiver: rx, + registry: reg, + state: Default::default(), + }; + let test1 = TestTask::<_, DummyHandle> { + task: task1, + message_receivers: vec![msg_rx.clone()], + }; + let test2 = TestTask::<_, DummyHandle> { + task: task2, + message_receivers: vec![msg_rx.clone()], + }; + + let handle = test1.run(); + let handle2 = test2.run(); + sleep(Duration::from_millis(30)).await; + msg_tx.broadcast("done".into()).await.unwrap(); + handle.await.unwrap(); + handle2.await.unwrap(); + } +} diff --git a/crates/testing/Cargo.toml b/crates/testing/Cargo.toml index 0ae4418cf9..8640d6d886 100644 --- a/crates/testing/Cargo.toml +++ b/crates/testing/Cargo.toml @@ -33,7 +33,7 @@ sha2 = { workspace = true } async-lock = { workspace = true } bitvec = { workspace = true } ethereum-types = { workspace = true } -task = { workspace = true } +hotshot-task = { path = "../task" } [target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] tokio = { workspace = true } diff --git a/crates/testing/src/overall_safety_task.rs b/crates/testing/src/overall_safety_task.rs index f98cf776bc..4e8a7db884 100644 --- a/crates/testing/src/overall_safety_task.rs +++ b/crates/testing/src/overall_safety_task.rs @@ -1,6 +1,7 @@ use futures::FutureExt; use hotshot::{traits::TestableNodeImplementation, HotShotError}; +use hotshot_task::task::{Task, TaskState, TestTaskState}; use hotshot_types::{ data::{Leaf, VidCommitment}, error::RoundTimedoutState, @@ -13,7 +14,6 @@ use std::{ collections::{hash_map::Entry, HashMap, HashSet}, sync::Arc, }; -use task::task::{TaskState, TestTaskState}; use tracing::error; use crate::test_runner::{HotShotTaskCompleted, Node}; @@ -80,12 +80,10 @@ impl> TaskState type Result = HotShotTaskCompleted; - async fn handle_event( - event: Self::Event, - task: &mut task::task::Task, - ) -> Option { + async fn handle_event(event: Self::Event, task: &mut Task) -> Option { match event { GlobalTestEvent::ShutDown => { + tracing::error!("Shutting down SafetyTask"); let state = task.state_mut(); let OverallSafetyPropertiesDescription { check_leaf: _, @@ -140,7 +138,7 @@ impl> TestTaskState async fn handle_message( message: Self::Message, idx: usize, - task: &mut task::task::TestTask, + task: &mut hotshot_task::task::TestTask, ) -> Option { let OverallSafetyPropertiesDescription { check_leaf, diff --git a/crates/testing/src/spinning_task.rs b/crates/testing/src/spinning_task.rs index 0567be7631..d5c621287a 100644 --- a/crates/testing/src/spinning_task.rs +++ b/crates/testing/src/spinning_task.rs @@ -2,10 +2,10 @@ use std::collections::HashMap; use hotshot::{traits::TestableNodeImplementation, SystemContext}; +use hotshot_task::task::{Task, TaskState, TestTaskState}; use hotshot_types::traits::network::CommunicationChannel; use hotshot_types::{event::Event, traits::node_implementation::NodeType}; use snafu::Snafu; -use task::task::{TaskState, TestTaskState}; use crate::test_runner::HotShotTaskCompleted; use crate::test_runner::Node; @@ -35,10 +35,7 @@ impl> TaskState for Spinni type Result = HotShotTaskCompleted; - async fn handle_event( - event: Self::Event, - _task: &mut task::task::Task, - ) -> Option { + async fn handle_event(event: Self::Event, _task: &mut Task) -> Option { if matches!(event, GlobalTestEvent::ShutDown) { return Some(HotShotTaskCompleted::ShutDown); } @@ -62,7 +59,7 @@ impl> TestTaskState async fn handle_message( message: Self::Message, _id: usize, - task: &mut task::task::TestTask, + task: &mut hotshot_task::task::TestTask, ) -> Option { let Event { view_number, diff --git a/crates/testing/src/test_runner.rs b/crates/testing/src/test_runner.rs index 9daf0bcd98..7bc917e216 100644 --- a/crates/testing/src/test_runner.rs +++ b/crates/testing/src/test_runner.rs @@ -17,6 +17,7 @@ use hotshot::{types::SystemContextHandle, Memberships}; use hotshot::{traits::TestableNodeImplementation, HotShotInitializer, SystemContext}; +use hotshot_task::task::{Task, TaskRegistry, TestTask}; use hotshot_types::traits::network::CommunicationChannel; use hotshot_types::{ consensus::ConsensusMetricsValue, @@ -28,7 +29,6 @@ use std::{ marker::PhantomData, sync::Arc, }; -use task::task::{Task, TaskRegistry, TestTask}; #[allow(deprecated)] use tracing::info; diff --git a/crates/testing/src/view_sync_task.rs b/crates/testing/src/view_sync_task.rs index 5dbfcb7178..08ca0e8aed 100644 --- a/crates/testing/src/view_sync_task.rs +++ b/crates/testing/src/view_sync_task.rs @@ -1,8 +1,8 @@ +use hotshot_task::task::{Task, TaskState, TestTaskState}; use hotshot_task_impls::events::HotShotEvent; use hotshot_types::traits::node_implementation::{NodeType, TestableNodeImplementation}; use snafu::Snafu; use std::collections::HashSet; -use task::task::{TaskState, TestTaskState}; use crate::{ test_runner::{HotShotTaskCompleted, Node}, @@ -31,10 +31,7 @@ impl> TaskState for ViewSy type Result = HotShotTaskCompleted; - async fn handle_event( - event: Self::Event, - task: &mut task::task::Task, - ) -> Option { + async fn handle_event(event: Self::Event, task: &mut Task) -> Option { let state = task.state_mut(); match event { GlobalTestEvent::ShutDown => match state.description.clone() { @@ -69,7 +66,7 @@ impl> TestTaskState async fn handle_message( message: Self::Message, id: usize, - task: &mut task::task::TestTask, + task: &mut hotshot_task::task::TestTask, ) -> Option { match message { // all the view sync events From 81a8e4d6852c918d9444bb8dcd5c1b48fa0b0d60 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Wed, 31 Jan 2024 11:29:03 -0500 Subject: [PATCH 11/28] test can pass --- crates/hotshot/src/lib.rs | 34 +++++++++++++-------------- crates/testing/src/completion_task.rs | 2 -- crates/testing/src/test_runner.rs | 10 ++++---- crates/testing/src/txn_task.rs | 4 ++++ crates/testing/src/view_sync_task.rs | 6 ++--- 5 files changed, 28 insertions(+), 28 deletions(-) diff --git a/crates/hotshot/src/lib.rs b/crates/hotshot/src/lib.rs index a2dd3e0323..a21a0cc54a 100644 --- a/crates/hotshot/src/lib.rs +++ b/crates/hotshot/src/lib.rs @@ -142,11 +142,6 @@ pub struct SystemContextInner> { /// Memberships used by consensus pub memberships: Arc>, - // pub quorum_network: Arc; - // pub committee_network: Arc; - /// Sender for [`Event`]s - event_sender: RwLock>>>, - /// the metrics that the implementor is using. _metrics: Arc, @@ -259,7 +254,6 @@ impl> SystemContext { storage, networks: Arc::new(networks), memberships: Arc::new(memberships), - event_sender: RwLock::default(), _metrics: consensus_metrics.clone(), internal_event_stream: broadcast(100024), output_event_stream: broadcast(100024), @@ -317,12 +311,14 @@ impl> SystemContext { // TODO: remove with https://github.com/EspressoSystems/HotShot/issues/2407 async fn send_external_event(&self, event: Event) { debug!(?event, "send_external_event"); - let mut event_sender = self.inner.event_sender.write().await; - if let Some(sender) = &*event_sender { - if let Err(e) = sender.send_async(event).await { - error!(?e, "Could not send event to event_sender"); - *event_sender = None; - } + if let Err(e) = self + .inner + .output_event_stream + .0 + .broadcast_direct(event) + .await + { + error!(?e, "Could not send event to event_sender"); } } @@ -710,12 +706,14 @@ impl> ConsensusApi async fn send_event(&self, event: Event) { debug!(?event, "send_event"); - let mut event_sender = self.inner.event_sender.write().await; - if let Some(sender) = &*event_sender { - if let Err(e) = sender.send_async(event).await { - error!(?e, "Could not send event to event_sender"); - *event_sender = None; - } + if let Err(e) = self + .inner + .output_event_stream + .0 + .broadcast_direct(event) + .await + { + error!(?e, "Could not send event to event_sender"); } } diff --git a/crates/testing/src/completion_task.rs b/crates/testing/src/completion_task.rs index 79178ed72c..8bd3c2a635 100644 --- a/crates/testing/src/completion_task.rs +++ b/crates/testing/src/completion_task.rs @@ -38,8 +38,6 @@ impl> CompletionTask, ViewSyncTask>::new( @@ -214,9 +214,9 @@ where } task_futs.push(safety_task.run()); task_futs.push(view_sync_task.run()); - // if let Some(txn) = txn_task { - // task_futs.push(txn.run()); - // } + if let Some(txn) = txn_task { + task_futs.push(txn.run()); + } task_futs.push(completion_task.run()); task_futs.push(spinning_task.run()); diff --git a/crates/testing/src/txn_task.rs b/crates/testing/src/txn_task.rs index 8de94f4d8d..d3ce9ee85b 100644 --- a/crates/testing/src/txn_task.rs +++ b/crates/testing/src/txn_task.rs @@ -36,6 +36,7 @@ pub struct TxnTask> { impl> TxnTask { pub fn run(mut self) -> JoinHandle { + async_sleep(Duration::from_millis(100)); async_spawn(async move { loop { async_sleep(self.duration).await; @@ -59,10 +60,12 @@ impl> TxnTask { self.next_node_idx = Some((idx + 1) % self.handles.len()); match self.handles.get(idx) { None => { + tracing::error!("couldn't get node in txn task"); // should do error unimplemented!() } Some(node) => { + tracing::error!("sending txn to node {}", idx); // use rand::seq::IteratorRandom; // we're assuming all nodes have the same leaf. // If they don't match, this is probably fine since @@ -73,6 +76,7 @@ impl> TxnTask { .submit_transaction(txn.clone()) .await .expect("Could not send transaction"); + tracing::error!("txn sent"); } } } diff --git a/crates/testing/src/view_sync_task.rs b/crates/testing/src/view_sync_task.rs index 08ca0e8aed..6e4e59cefe 100644 --- a/crates/testing/src/view_sync_task.rs +++ b/crates/testing/src/view_sync_task.rs @@ -2,7 +2,7 @@ use hotshot_task::task::{Task, TaskState, TestTaskState}; use hotshot_task_impls::events::HotShotEvent; use hotshot_types::traits::node_implementation::{NodeType, TestableNodeImplementation}; use snafu::Snafu; -use std::collections::HashSet; +use std::{collections::HashSet, marker::PhantomData}; use crate::{ test_runner::{HotShotTaskCompleted, Node}, @@ -18,12 +18,12 @@ pub struct ViewSyncTaskErr { /// `ViewSync` task state pub struct ViewSyncTask> { - /// the node handles - pub(crate) handles: Vec>, /// nodes that hit view sync pub(crate) hit_view_sync: HashSet, /// properties of task pub(crate) description: ViewSyncTaskDescription, + /// Phantom data for TYPES and I + pub(crate) _pd: PhantomData<(TYPES, I)>, } impl> TaskState for ViewSyncTask { From 4e1718fafce8b3014210818f7b6dc22f093dc748 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Wed, 31 Jan 2024 13:54:18 -0500 Subject: [PATCH 12/28] fixing tests, basic passes except unit tests --- Cargo.lock | 1 + crates/task-impls/src/harness.rs | 8 +++++--- crates/task/Cargo.toml | 1 + crates/task/src/task.rs | 27 ++++++++++++++++----------- crates/testing/src/test_runner.rs | 19 ++++++++++++------- crates/testing/src/txn_task.rs | 4 +--- 6 files changed, 36 insertions(+), 24 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d7ed72d6f0..142e15c9a4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2788,6 +2788,7 @@ name = "hotshot-task" version = "0.1.0" dependencies = [ "async-broadcast", + "async-compatibility-layer", "async-std", "futures", "tokio", diff --git a/crates/task-impls/src/harness.rs b/crates/task-impls/src/harness.rs index a1c6f57014..77718c9f05 100644 --- a/crates/task-impls/src/harness.rs +++ b/crates/task-impls/src/harness.rs @@ -51,6 +51,7 @@ pub async fn run_harness>>( S: Send + 'static, { let registry = Arc::new(TaskRegistry::default()); + let mut tasks = vec![]; // set up two broadcast channels so the test sends to the task and the task back to the test let (to_task, from_test) = broadcast(1024); let (to_test, from_task) = broadcast(1024); @@ -66,14 +67,15 @@ pub async fn run_harness>>( test_state, ); let task = Task::new(to_task.clone(), from_test.clone(), registry.clone(), state); - registry.run_task(test_task).await; - registry.run_task(task).await; + + tasks.push(task.run()); + tasks.push(test_task.run()); for event in input { let _ = to_task.broadcast(event).await.unwrap(); } - let _ = Arc::into_inner(registry).unwrap().join_all().await; + let _ = futures::future::join_all(tasks).await; } /// Handles an event for the Test Harness Task. If the event is expected, remove it from diff --git a/crates/task/Cargo.toml b/crates/task/Cargo.toml index 2b17ac25d0..4c6722600f 100644 --- a/crates/task/Cargo.toml +++ b/crates/task/Cargo.toml @@ -10,6 +10,7 @@ edition = "2021" futures = "0.3.30" async-broadcast = "0.6.0" tracing = { workspace = true } +async-compatibility-layer = { workspace = true } [target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] tokio = { version = "1.35.1", features = ["time", "rt-multi-thread", "macros", "sync"] } diff --git a/crates/task/src/task.rs b/crates/task/src/task.rs index 037a03e0e2..0ae39d8fd4 100644 --- a/crates/task/src/task.rs +++ b/crates/task/src/task.rs @@ -1,7 +1,8 @@ use std::sync::Arc; +use std::time::Duration; use async_broadcast::{Receiver, SendError, Sender}; - +use async_compatibility_layer::art::async_timeout; #[cfg(async_executor_impl = "async-std")] use async_std::{ sync::RwLock, @@ -86,7 +87,7 @@ impl Task { state, } } - fn run(mut self) -> JoinHandle<()> { + pub fn run(mut self) -> JoinHandle<()> { spawn(async move { loop { let event = self.event_receiver.recv_direct().await; @@ -153,14 +154,11 @@ impl< spawn(async move { loop { let mut futs = vec![]; - for rx in self.message_receivers.iter_mut() { - futs.push(rx.recv()); - } - let (msg, id, _) = select_all(futs).await; + if let Ok(event) = self.task.event_receiver.try_recv() { if S::should_shutdown(&event) { self.task.state.shutdown().await; - // tracing::error!("Shutting down test task TODO!"); + tracing::error!("Shutting down test task TODO!"); todo!(); } if !self.state().filter(&event) { @@ -172,10 +170,17 @@ impl< } } - if let Some(res) = T::handle_message(msg.unwrap(), id, &mut self).await { - self.task.state.handle_result(&res).await; - self.task.state.shutdown().await; - return res; + for rx in self.message_receivers.iter_mut() { + futs.push(rx.recv()); + } + if let Ok((Ok(msg), id, _)) = + async_timeout(Duration::from_secs(1), select_all(futs)).await + { + if let Some(res) = T::handle_message(msg, id, &mut self).await { + self.task.state.handle_result(&res).await; + self.task.state.shutdown().await; + return res; + } } } }) diff --git a/crates/testing/src/test_runner.rs b/crates/testing/src/test_runner.rs index 4743895392..8a512140c1 100644 --- a/crates/testing/src/test_runner.rs +++ b/crates/testing/src/test_runner.rs @@ -224,13 +224,18 @@ where tracing::error!("test tasks joined"); let mut error_list = vec![]; for result in results { - match result.unwrap() { - HotShotTaskCompleted::ShutDown => { - info!("Task shut down successfully"); - } - HotShotTaskCompleted::Error(e) => error_list.push(e), - _ => { - panic!("Future impl for task abstraction failed! This should never happen"); + match result { + Ok(res) => match res { + HotShotTaskCompleted::ShutDown => { + info!("Task shut down successfully"); + } + HotShotTaskCompleted::Error(e) => error_list.push(e), + _ => { + panic!("Future impl for task abstraction failed! This should never happen"); + } + }, + Err(e) => { + panic!("Error Joining the test task {:?}", e); } } } diff --git a/crates/testing/src/txn_task.rs b/crates/testing/src/txn_task.rs index d3ce9ee85b..4e510cc50a 100644 --- a/crates/testing/src/txn_task.rs +++ b/crates/testing/src/txn_task.rs @@ -36,8 +36,8 @@ pub struct TxnTask> { impl> TxnTask { pub fn run(mut self) -> JoinHandle { - async_sleep(Duration::from_millis(100)); async_spawn(async move { + async_sleep(Duration::from_millis(100)).await; loop { async_sleep(self.duration).await; match self.shutdown_chan.try_recv() { @@ -65,7 +65,6 @@ impl> TxnTask { unimplemented!() } Some(node) => { - tracing::error!("sending txn to node {}", idx); // use rand::seq::IteratorRandom; // we're assuming all nodes have the same leaf. // If they don't match, this is probably fine since @@ -76,7 +75,6 @@ impl> TxnTask { .submit_transaction(txn.clone()) .await .expect("Could not send transaction"); - tracing::error!("txn sent"); } } } From d37c03d84f43bee6d40908f7d68f20b64fcda33d Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Wed, 31 Jan 2024 14:07:24 -0500 Subject: [PATCH 13/28] fixed consensus task test --- crates/task-impls/src/harness.rs | 12 +++++++--- crates/testing/tests/consensus_task.rs | 33 ++++---------------------- 2 files changed, 13 insertions(+), 32 deletions(-) diff --git a/crates/task-impls/src/harness.rs b/crates/task-impls/src/harness.rs index 77718c9f05..2aea46dc2e 100644 --- a/crates/task-impls/src/harness.rs +++ b/crates/task-impls/src/harness.rs @@ -1,9 +1,10 @@ use crate::events::{HotShotEvent, HotShotTaskCompleted}; use async_broadcast::broadcast; +use async_compatibility_layer::art::async_timeout; use hotshot_task::task::{Task, TaskRegistry, TaskState}; use hotshot_types::traits::node_implementation::NodeType; -use std::{collections::HashMap, sync::Arc}; +use std::{collections::HashMap, sync::Arc, time::Duration}; /// The state for the test harness task. Keeps track of which events and how many we expect to get pub struct TestHarnessState { @@ -66,7 +67,7 @@ pub async fn run_harness>>( registry.clone(), test_state, ); - let task = Task::new(to_task.clone(), from_test.clone(), registry.clone(), state); + let task = Task::new(to_test.clone(), from_test.clone(), registry.clone(), state); tasks.push(task.run()); tasks.push(test_task.run()); @@ -75,7 +76,12 @@ pub async fn run_harness>>( let _ = to_task.broadcast(event).await.unwrap(); } - let _ = futures::future::join_all(tasks).await; + if async_timeout(Duration::from_secs(2), futures::future::join_all(tasks)) + .await + .is_err() + { + panic!("Test timeout out before all all expected outputs received"); + } } /// Handles an event for the Test Harness Task. If the event is expected, remove it from diff --git a/crates/testing/tests/consensus_task.rs b/crates/testing/tests/consensus_task.rs index aa01d86b09..75b2141066 100644 --- a/crates/testing/tests/consensus_task.rs +++ b/crates/testing/tests/consensus_task.rs @@ -108,25 +108,18 @@ async fn test_consensus_task() { input.push(HotShotEvent::Shutdown); - output.insert(HotShotEvent::QCFormed(either::Left(qc)), 1); output.insert( HotShotEvent::QuorumProposalSend(proposal.clone(), public_key), 1, ); - output.insert( - HotShotEvent::QuorumProposalRecv(proposal.clone(), public_key), - 1, - ); + output.insert(HotShotEvent::ViewChange(ViewNumber::new(1)), 1); if let GeneralConsensusMessage::Vote(vote) = build_vote(&handle, proposal.data).await { output.insert(HotShotEvent::QuorumVoteSend(vote.clone()), 1); input.push(HotShotEvent::QuorumVoteRecv(vote.clone())); - output.insert(HotShotEvent::QuorumVoteRecv(vote), 1); } - output.insert(HotShotEvent::Shutdown, 1); - let consensus_state = add_consensus_task(handle.hotshot.inner.output_event_stream.0.clone(), &handle).await; @@ -160,21 +153,16 @@ async fn test_consensus_vote() { proposal.clone(), public_key, )); - output.insert( - HotShotEvent::QuorumProposalRecv(proposal.clone(), public_key), - 1, - ); + let proposal = proposal.data; if let GeneralConsensusMessage::Vote(vote) = build_vote(&handle, proposal).await { output.insert(HotShotEvent::QuorumVoteSend(vote.clone()), 1); input.push(HotShotEvent::QuorumVoteRecv(vote.clone())); - output.insert(HotShotEvent::QuorumVoteRecv(vote), 1); } output.insert(HotShotEvent::ViewChange(ViewNumber::new(1)), 1); input.push(HotShotEvent::Shutdown); - output.insert(HotShotEvent::Shutdown, 1); let consensus_state = add_consensus_task(handle.hotshot.inner.output_event_stream.0.clone(), &handle).await; @@ -282,25 +270,12 @@ async fn test_consensus_with_vid() { public_key_view2, )); - output.insert( - HotShotEvent::QuorumProposalRecv(proposal_view2.clone(), public_key_view2), - 1, - ); - output.insert(HotShotEvent::DACRecv(created_dac_view2), 1); - output.insert(HotShotEvent::VidDisperseRecv(vid_proposal, pub_key), 1); - if let GeneralConsensusMessage::Vote(vote) = build_vote(&handle, proposal_view2.data).await { output.insert(HotShotEvent::QuorumVoteSend(vote.clone()), 1); } - output.insert( - HotShotEvent::ViewChange(ViewNumber::new(1)), - 2, // 2 occurrences: 1 from `QuorumProposalRecv`, 1 from input - ); - output.insert( - HotShotEvent::ViewChange(ViewNumber::new(2)), - 2, // 2 occurrences: 1 from `QuorumProposalRecv`?, 1 from input - ); + output.insert(HotShotEvent::ViewChange(ViewNumber::new(1)), 1); + output.insert(HotShotEvent::ViewChange(ViewNumber::new(2)), 1); input.push(HotShotEvent::Shutdown); output.insert(HotShotEvent::Shutdown, 1); From 31dfea75aa7ca8c772ab973fca00b6159d04fb16 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Wed, 31 Jan 2024 14:12:25 -0500 Subject: [PATCH 14/28] fixing other task tests --- crates/testing/tests/da_task.rs | 10 ---------- crates/testing/tests/vid_task.rs | 15 ++------------- crates/testing/tests/view_sync_task.rs | 6 ------ 3 files changed, 2 insertions(+), 29 deletions(-) diff --git a/crates/testing/tests/da_task.rs b/crates/testing/tests/da_task.rs index ad71b39be6..81a73945b8 100644 --- a/crates/testing/tests/da_task.rs +++ b/crates/testing/tests/da_task.rs @@ -80,11 +80,6 @@ async fn test_da_task() { input.push(HotShotEvent::Shutdown); - output.insert(HotShotEvent::ViewChange(ViewNumber::new(1)), 1); - output.insert( - HotShotEvent::TransactionsSequenced(encoded_transactions, (), ViewNumber::new(2)), - 1, - ); output.insert(HotShotEvent::DAProposalSend(message.clone(), pub_key), 1); let da_vote = DAVote::create_signed_vote( DAData { @@ -97,11 +92,6 @@ async fn test_da_task() { .expect("Failed to sign DAData"); output.insert(HotShotEvent::DAVoteSend(da_vote), 1); - output.insert(HotShotEvent::DAProposalRecv(message, pub_key), 1); - - output.insert(HotShotEvent::ViewChange(ViewNumber::new(2)), 1); - output.insert(HotShotEvent::Shutdown, 1); - let da_state = DATaskState { api: api.clone(), consensus: handle.hotshot.get_consensus(), diff --git a/crates/testing/tests/vid_task.rs b/crates/testing/tests/vid_task.rs index f4bb00dc54..58fa821a30 100644 --- a/crates/testing/tests/vid_task.rs +++ b/crates/testing/tests/vid_task.rs @@ -68,7 +68,6 @@ async fn test_vid_task() { _pd: PhantomData, }; - // Every event input is seen on the event stream in the output. let mut input = Vec::new(); let mut output = HashMap::new(); @@ -88,15 +87,9 @@ async fn test_vid_task() { input.push(HotShotEvent::VidDisperseRecv(vid_proposal.clone(), pub_key)); input.push(HotShotEvent::Shutdown); - output.insert(HotShotEvent::ViewChange(ViewNumber::new(1)), 1); - output.insert( - HotShotEvent::TransactionsSequenced(encoded_transactions, (), ViewNumber::new(2)), - 1, - ); - output.insert( HotShotEvent::BlockReady(vid_disperse, ViewNumber::new(2)), - 2, + 1, ); output.insert( @@ -105,13 +98,9 @@ async fn test_vid_task() { ); output.insert( HotShotEvent::VidDisperseSend(vid_proposal.clone(), pub_key), - 2, // 2 occurrences: 1 from `input`, 1 from the DA task + 1, ); - output.insert(HotShotEvent::VidDisperseRecv(vid_proposal, pub_key), 1); - output.insert(HotShotEvent::ViewChange(ViewNumber::new(2)), 1); - output.insert(HotShotEvent::Shutdown, 1); - let vid_state = VIDTaskState { api: api.clone(), consensus: handle.hotshot.get_consensus(), diff --git a/crates/testing/tests/view_sync_task.rs b/crates/testing/tests/view_sync_task.rs index 27e5dcc82d..308c271d3c 100644 --- a/crates/testing/tests/view_sync_task.rs +++ b/crates/testing/tests/view_sync_task.rs @@ -41,7 +41,6 @@ async fn test_view_sync_task() { tracing::error!("Vote in test is {:?}", vote.clone()); - // Every event input is seen on the event stream in the output. let mut input = Vec::new(); let mut output = HashMap::new(); @@ -50,14 +49,9 @@ async fn test_view_sync_task() { input.push(HotShotEvent::Shutdown); - output.insert(HotShotEvent::Timeout(ViewNumber::new(2)), 1); - output.insert(HotShotEvent::Timeout(ViewNumber::new(3)), 1); - output.insert(HotShotEvent::ViewChange(ViewNumber::new(2)), 1); output.insert(HotShotEvent::ViewSyncPreCommitVoteSend(vote.clone()), 1); - output.insert(HotShotEvent::Shutdown, 1); - let view_sync_state = ViewSyncTaskState { current_view: ViewNumber::new(0), next_view: ViewNumber::new(0), From 2b39c50456e43c39b2fbf3cd9b105f0646d1cd3d Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Wed, 31 Jan 2024 14:33:22 -0500 Subject: [PATCH 15/28] Remove more cruft --- crates/hotshot/examples/combined/types.rs | 8 +- crates/hotshot/examples/infra/mod.rs | 2 +- crates/hotshot/examples/libp2p/types.rs | 8 +- crates/hotshot/examples/webserver/types.rs | 8 +- crates/hotshot/src/lib.rs | 51 +--- crates/hotshot/src/types/handle.rs | 36 --- crates/testing/src/node_types.rs | 38 +-- crates/testing/src/overall_safety_task.rs | 232 ------------------ crates/testing/tests/memory_network.rs | 8 +- crates/types/src/consensus.rs | 1 - .../types/src/traits/node_implementation.rs | 75 +----- 11 files changed, 10 insertions(+), 457 deletions(-) diff --git a/crates/hotshot/examples/combined/types.rs b/crates/hotshot/examples/combined/types.rs index 4e5d342275..4d98d71f8e 100644 --- a/crates/hotshot/examples/combined/types.rs +++ b/crates/hotshot/examples/combined/types.rs @@ -1,7 +1,7 @@ use crate::infra::CombinedDARun; use hotshot::traits::implementations::{CombinedCommChannel, MemoryStorage}; use hotshot_testing::state_types::TestTypes; -use hotshot_types::traits::node_implementation::{ChannelMaps, NodeImplementation, NodeType}; +use hotshot_types::traits::node_implementation::{NodeImplementation, NodeType}; use serde::{Deserialize, Serialize}; use std::fmt::Debug; @@ -22,12 +22,6 @@ impl NodeImplementation for NodeImpl { type Storage = MemoryStorage; type QuorumNetwork = QuorumNetwork; type CommitteeNetwork = DANetwork; - - fn new_channel_maps( - start_view: ::Time, - ) -> (ChannelMaps, Option>) { - (ChannelMaps::new(start_view), None) - } } /// convenience type alias pub type ThisRun = CombinedDARun; diff --git a/crates/hotshot/examples/infra/mod.rs b/crates/hotshot/examples/infra/mod.rs index 10a980c4f0..7300272be3 100644 --- a/crates/hotshot/examples/infra/mod.rs +++ b/crates/hotshot/examples/infra/mod.rs @@ -388,7 +388,7 @@ pub trait RunDA< /// Starts HotShot consensus, returns when consensus has finished async fn run_hotshot( &self, - mut context: SystemContextHandle, + context: SystemContextHandle, transactions: &mut Vec, transactions_to_send_per_round: u64, ) { diff --git a/crates/hotshot/examples/libp2p/types.rs b/crates/hotshot/examples/libp2p/types.rs index 449e518f48..e9d662e707 100644 --- a/crates/hotshot/examples/libp2p/types.rs +++ b/crates/hotshot/examples/libp2p/types.rs @@ -1,7 +1,7 @@ use crate::infra::Libp2pDARun; use hotshot::traits::implementations::{Libp2pCommChannel, MemoryStorage}; use hotshot_testing::state_types::TestTypes; -use hotshot_types::traits::node_implementation::{ChannelMaps, NodeImplementation, NodeType}; +use hotshot_types::traits::node_implementation::{NodeImplementation, NodeType}; use serde::{Deserialize, Serialize}; use std::fmt::Debug; @@ -22,12 +22,6 @@ impl NodeImplementation for NodeImpl { type Storage = MemoryStorage; type QuorumNetwork = QuorumNetwork; type CommitteeNetwork = DANetwork; - - fn new_channel_maps( - start_view: ::Time, - ) -> (ChannelMaps, Option>) { - (ChannelMaps::new(start_view), None) - } } /// convenience type alias pub type ThisRun = Libp2pDARun; diff --git a/crates/hotshot/examples/webserver/types.rs b/crates/hotshot/examples/webserver/types.rs index 03d8fc36ed..8c0b323329 100644 --- a/crates/hotshot/examples/webserver/types.rs +++ b/crates/hotshot/examples/webserver/types.rs @@ -1,7 +1,7 @@ use crate::infra::WebServerDARun; use hotshot::traits::implementations::{MemoryStorage, WebCommChannel}; use hotshot_testing::state_types::TestTypes; -use hotshot_types::traits::node_implementation::{ChannelMaps, NodeImplementation, NodeType}; +use hotshot_types::traits::node_implementation::{NodeImplementation, NodeType}; use serde::{Deserialize, Serialize}; use std::fmt::Debug; @@ -22,12 +22,6 @@ impl NodeImplementation for NodeImpl { type Storage = MemoryStorage; type CommitteeNetwork = DANetwork; type QuorumNetwork = QuorumNetwork; - - fn new_channel_maps( - start_view: ::Time, - ) -> (ChannelMaps, Option>) { - (ChannelMaps::new(start_view), None) - } } /// convenience type alias pub type ThisRun = WebServerDARun; diff --git a/crates/hotshot/src/lib.rs b/crates/hotshot/src/lib.rs index a21a0cc54a..4d0356dbd4 100644 --- a/crates/hotshot/src/lib.rs +++ b/crates/hotshot/src/lib.rs @@ -24,10 +24,9 @@ use crate::{ use async_broadcast::{broadcast, Receiver, Sender}; use async_compatibility_layer::{ art::{async_spawn, async_spawn_local}, - async_primitives::broadcast::BroadcastSender, channel::UnboundedSender, }; -use async_lock::{RwLock, RwLockUpgradableReadGuard, RwLockWriteGuard}; +use async_lock::RwLock; use async_trait::async_trait; use commit::Committable; use custom_debug::Debug; @@ -36,12 +35,9 @@ use hotshot_constants::PROGRAM_PROTOCOL_VERSION; use hotshot_task_impls::events::HotShotEvent; use hotshot_task_impls::network; -#[cfg(feature = "hotshot-testing")] -use hotshot_types::traits::node_implementation::ChannelMaps; - use hotshot_task::task::{Task, TaskRegistry}; use hotshot_types::{ - consensus::{Consensus, ConsensusMetricsValue, View, ViewInner, ViewQueue}, + consensus::{Consensus, ConsensusMetricsValue, View, ViewInner}, data::Leaf, error::StorageSnafu, event::EventType, @@ -52,7 +48,7 @@ use hotshot_types::{ traits::{ consensus_api::ConsensusApi, network::{CommunicationChannel, NetworkError}, - node_implementation::{NodeType, SendToTasks}, + node_implementation::NodeType, signature_key::SignatureKey, state::ConsensusTime, storage::StoredView, @@ -148,11 +144,6 @@ pub struct SystemContextInner> { /// The hotstuff implementation consensus: Arc>>, - /// Channels for sending/recv-ing proposals and votes for quorum and committee exchanges, the - /// latter of which is only applicable for sequencing consensus. - #[cfg(feature = "hotshot-testing")] - channel_maps: (ChannelMaps, Option>), - // global_registry: GlobalRegistry, /// Access to the output event stream. pub output_event_stream: (Sender>, Receiver>), @@ -246,7 +237,6 @@ impl> SystemContext { let inner: Arc> = Arc::new(SystemContextInner { id: nonce, #[cfg(feature = "hotshot-testing")] - channel_maps: I::new_channel_maps(start_view), consensus, public_key, private_key, @@ -518,41 +508,6 @@ impl> SystemContext { pub fn get_next_view_timeout(&self) -> u64 { self.inner.config.next_view_timeout } - - /// given a view number and a upgradable read lock on a channel map, inserts entry into map if it - /// doesn't exist, or creates entry. Then returns a clone of the entry - pub async fn create_or_obtain_chan_from_read( - view_num: TYPES::Time, - channel_map: RwLockUpgradableReadGuard<'_, SendToTasks>, - ) -> ViewQueue { - // check if we have the entry - // if we don't, insert - if let Some(vq) = channel_map.channel_map.get(&view_num) { - vq.clone() - } else { - let mut channel_map = - RwLockUpgradableReadGuard::<'_, SendToTasks>::upgrade(channel_map).await; - let new_view_queue = ViewQueue::default(); - let vq = new_view_queue.clone(); - // NOTE: the read lock is held until all other read locks are DROPPED and - // the read lock may be turned into a write lock. - // This means that the `channel_map` will not change. So we don't need - // to check again to see if a channel was added - - channel_map.channel_map.insert(view_num, new_view_queue); - vq - } - } - - /// given a view number and a write lock on a channel map, inserts entry into map if it - /// doesn't exist, or creates entry. Then returns a clone of the entry - #[allow(clippy::unused_async)] // async for API compatibility reasons - pub async fn create_or_obtain_chan_from_write( - view_num: TYPES::Time, - mut channel_map: RwLockWriteGuard<'_, SendToTasks>, - ) -> ViewQueue { - channel_map.channel_map.entry(view_num).or_default().clone() - } } impl> SystemContext { diff --git a/crates/hotshot/src/types/handle.rs b/crates/hotshot/src/types/handle.rs index 5e977fad1a..dbc5572282 100644 --- a/crates/hotshot/src/types/handle.rs +++ b/crates/hotshot/src/types/handle.rs @@ -218,40 +218,4 @@ impl + 'static> SystemContextHandl .send_direct_message(MessageKind::from_consensus_message(msg), recipient) .await; } - - /// Get length of the replica's receiver channel - #[cfg(feature = "hotshot-testing")] - pub async fn get_replica_receiver_channel_len( - &self, - view_number: TYPES::Time, - ) -> Option { - use async_compatibility_layer::channel::UnboundedReceiver; - - let channel_map = self.hotshot.inner.channel_maps.0.vote_channel.read().await; - let chan = channel_map.channel_map.get(&view_number)?; - let receiver = chan.receiver_chan.lock().await; - UnboundedReceiver::len(&*receiver) - } - - /// Get length of the next leaders's receiver channel - #[cfg(feature = "hotshot-testing")] - pub async fn get_next_leader_receiver_channel_len( - &self, - view_number: TYPES::Time, - ) -> Option { - use async_compatibility_layer::channel::UnboundedReceiver; - - let channel_map = self - .hotshot - .inner - .channel_maps - .0 - .proposal_channel - .read() - .await; - let chan = channel_map.channel_map.get(&view_number)?; - - let receiver = chan.receiver_chan.lock().await; - UnboundedReceiver::len(&*receiver) - } } diff --git a/crates/testing/src/node_types.rs b/crates/testing/src/node_types.rs index 92b1b3d010..340a35743d 100644 --- a/crates/testing/src/node_types.rs +++ b/crates/testing/src/node_types.rs @@ -15,7 +15,7 @@ use hotshot::traits::{ use hotshot_types::{ data::ViewNumber, signature_key::BLSPubKey, - traits::node_implementation::{ChannelMaps, NodeType}, + traits::node_implementation::NodeType, }; use serde::{Deserialize, Serialize}; @@ -99,58 +99,22 @@ impl NodeImplementation for Libp2pImpl { type Storage = MemoryStorage; type QuorumNetwork = StaticLibp2pQuorumComm; type CommitteeNetwork = StaticLibp2pDAComm; - - fn new_channel_maps( - start_view: ::Time, - ) -> (ChannelMaps, Option>) { - ( - ChannelMaps::new(start_view), - Some(ChannelMaps::new(start_view)), - ) - } } impl NodeImplementation for MemoryImpl { type Storage = MemoryStorage; type QuorumNetwork = StaticMemoryQuorumComm; type CommitteeNetwork = StaticMemoryDAComm; - - fn new_channel_maps( - start_view: ::Time, - ) -> (ChannelMaps, Option>) { - ( - ChannelMaps::new(start_view), - Some(ChannelMaps::new(start_view)), - ) - } } impl NodeImplementation for WebImpl { type Storage = MemoryStorage; type QuorumNetwork = StaticWebQuorumComm; type CommitteeNetwork = StaticWebDAComm; - - fn new_channel_maps( - start_view: ::Time, - ) -> (ChannelMaps, Option>) { - ( - ChannelMaps::new(start_view), - Some(ChannelMaps::new(start_view)), - ) - } } impl NodeImplementation for CombinedImpl { type Storage = MemoryStorage; type QuorumNetwork = StaticCombinedQuorumComm; type CommitteeNetwork = StaticCombinedDAComm; - - fn new_channel_maps( - start_view: ::Time, - ) -> (ChannelMaps, Option>) { - ( - ChannelMaps::new(start_view), - Some(ChannelMaps::new(start_view)), - ) - } } diff --git a/crates/testing/src/overall_safety_task.rs b/crates/testing/src/overall_safety_task.rs index 4e8a7db884..7bbe0ddc18 100644 --- a/crates/testing/src/overall_safety_task.rs +++ b/crates/testing/src/overall_safety_task.rs @@ -564,235 +564,3 @@ impl Default for OverallSafetyPropertiesDescription { } } } - -// impl OverallSafetyPropertiesDescription { -// /// build a task -// /// # Panics -// /// if an internal variant that the prior views are filled is violated -// #[must_use] -// #[allow(clippy::too_many_lines)] -// pub fn build>( -// self, -// ) -> TaskGenerator> { -// let Self { -// check_leaf, -// check_state, -// check_block, -// num_failed_views: num_failed_rounds_total, -// num_successful_views, -// threshold_calculator, -// transaction_threshold, -// }: Self = self; - -// Box::new(move |mut state, mut registry, test_event_stream| { -// async move { -// let event_handler = HandleEvent::>(Arc::new( -// move |event, state| { -// async move { -// match event { -// GlobalTestEvent::ShutDown => { -// let num_incomplete_views = state.ctx.round_results.len() -// - state.ctx.successful_views.len() -// - state.ctx.failed_views.len(); - -// if state.ctx.successful_views.len() < num_successful_views { -// return ( -// Some(HotShotTaskCompleted::Error(Box::new( -// OverallSafetyTaskErr::::NotEnoughDecides { -// got: state.ctx.successful_views.len(), -// expected: num_successful_views, -// }, -// ))), -// state, -// ); -// } - -// if state.ctx.failed_views.len() + num_incomplete_views -// >= num_failed_rounds_total -// { -// return ( -// Some(HotShotTaskCompleted::Error(Box::new( -// OverallSafetyTaskErr::::TooManyFailures { -// failed_views: state.ctx.failed_views.clone(), -// }, -// ))), -// state, -// ); -// } -// // TODO check if we got enough successful views -// (Some(HotShotTaskCompleted), state) -// } -// } -// } -// .boxed() -// }, -// )); - -// let message_handler = HandleMessage::>(Arc::new( -// move |msg, mut state| { -// let threshold_calculator = threshold_calculator.clone(); -// async move { - -// let (idx, maybe_event ) : (usize, Either<_, _>)= msg; -// if let Either::Left(Event { view_number, event }) = maybe_event { -// let key = match event { -// EventType::Error { error } => { -// state.ctx.insert_error_to_context(view_number, idx, error); -// None -// } -// EventType::Decide { -// leaf_chain, -// qc, -// block_size: maybe_block_size, -// } => { -// let paired_up = (leaf_chain.to_vec(), (*qc).clone()); -// match state.ctx.round_results.entry(view_number) { -// Entry::Occupied(mut o) => o.get_mut().insert_into_result( -// idx, -// paired_up, -// maybe_block_size, -// ), -// Entry::Vacant(v) => { -// let mut round_result = RoundResult::default(); -// let key = round_result.insert_into_result( -// idx, -// paired_up, -// maybe_block_size, -// ); -// v.insert(round_result); -// key -// } -// } -// } -// EventType::ReplicaViewTimeout { view_number } => { -// let error = Arc::new(HotShotError::::ViewTimeoutError { -// view_number, -// state: RoundTimedoutState::TestCollectRoundEventsTimedOut, -// }); -// state.ctx.insert_error_to_context(view_number, idx, error); -// None -// } -// _ => return (None, state), -// }; - -// // update view count -// let threshold = -// (threshold_calculator)(state.handles.len(), state.handles.len()); - -// let view = state.ctx.round_results.get_mut(&view_number).unwrap(); - -// if let Some(key) = key { -// view.update_status( -// threshold, -// state.handles.len(), -// &key, -// check_leaf, -// check_state, -// check_block, -// transaction_threshold, -// ); -// match view.status.clone() { -// ViewStatus::Ok => { -// state.ctx.successful_views.insert(view_number); -// if state.ctx.successful_views.len() -// >= self.num_successful_views -// { -// state -// .test_event_stream -// .publish(GlobalTestEvent::ShutDown) -// .await; -// return (Some(HotShotTaskCompleted), state); -// } -// return (None, state); -// } -// ViewStatus::Failed => { -// state.ctx.failed_views.insert(view_number); -// if state.ctx.failed_views.len() > self.num_failed_views { -// state -// .test_event_stream -// .publish(GlobalTestEvent::ShutDown) -// .await; -// return ( -// Some(HotShotTaskCompleted::Error(Box::new( -// OverallSafetyTaskErr::::TooManyFailures { -// failed_views: state.ctx.failed_views.clone(), -// }, -// ))), -// state, -// ); -// } -// return (None, state); -// } -// ViewStatus::Err(e) => { -// return ( -// Some(HotShotTaskCompleted::Error(Box::new(e))), -// state, -// ); -// } -// ViewStatus::InProgress => { -// return (None, state); -// } -// } -// } -// else if view.check_if_failed(threshold, state.handles.len()) { -// view.status = ViewStatus::Failed; -// state.ctx.failed_views.insert(view_number); -// if state.ctx.failed_views.len() > self.num_failed_views { -// state -// .test_event_stream -// .publish(GlobalTestEvent::ShutDown) -// .await; -// return ( -// Some(HotShotTaskCompleted::Error(Box::new( -// OverallSafetyTaskErr::::TooManyFailures { -// failed_views: state.ctx.failed_views.clone(), -// }, -// ))), -// state, -// ); -// } -// return (None, state); -// } - -// } - -// (None, state) -// } -// .boxed() -// }, -// )); - -// let mut streams = vec![]; -// for handle in &mut state.handles { -// let s1 = -// handle -// .handle -// .get_event_stream_known_impl() -// .await; -// let s2 = -// handle -// .handle -// .get_internal_event_stream_known_impl() -// .await; -// streams.push( -// Merge::new(s1, s2) -// ); -// } -// let builder = TaskBuilder::>::new( -// "Test Overall Safety Task".to_string(), -// ) -// .register_event_stream(test_event_stream, FilterEvent::default()) -// .await -// .register_registry(&mut registry) -// .await -// .register_message_handler(message_handler) -// .register_message_stream(MergeN::new(streams)) -// .register_event_handler(event_handler) -// .register_state(state); -// let task_id = builder.get_task_id().unwrap(); -// (task_id, OverallSafetyTaskTypes::build(builder).launch()) -// } -// .boxed() -// }) -// } -// } diff --git a/crates/testing/tests/memory_network.rs b/crates/testing/tests/memory_network.rs index b548a736e8..77414d9223 100644 --- a/crates/testing/tests/memory_network.rs +++ b/crates/testing/tests/memory_network.rs @@ -18,7 +18,7 @@ use hotshot_types::message::Message; use hotshot_types::signature_key::BLSPubKey; use hotshot_types::traits::network::TestableNetworkingImplementation; use hotshot_types::traits::network::{ConnectedNetwork, TransmitType}; -use hotshot_types::traits::node_implementation::{ChannelMaps, NodeType}; +use hotshot_types::traits::node_implementation::NodeType; use hotshot_types::{ data::ViewNumber, message::{DataMessage, MessageKind}, @@ -69,12 +69,6 @@ impl NodeImplementation for TestImpl { type Storage = MemoryStorage; type QuorumNetwork = QuorumNetwork; type CommitteeNetwork = DANetwork; - - fn new_channel_maps( - start_view: ::Time, - ) -> (ChannelMaps, Option>) { - (ChannelMaps::new(start_view), None) - } } /// fake Eq diff --git a/crates/types/src/consensus.rs b/crates/types/src/consensus.rs index 80bea89de4..c8525969d3 100644 --- a/crates/types/src/consensus.rs +++ b/crates/types/src/consensus.rs @@ -1,7 +1,6 @@ //! Provides the core consensus types pub use crate::{ - traits::node_implementation::ViewQueue, utils::{View, ViewInner}, }; use displaydoc::Display; diff --git a/crates/types/src/traits/node_implementation.rs b/crates/types/src/traits/node_implementation.rs index 82a7aa2afc..de351eec70 100644 --- a/crates/types/src/traits/node_implementation.rs +++ b/crates/types/src/traits/node_implementation.rs @@ -20,7 +20,7 @@ use crate::{ }, }; use async_compatibility_layer::channel::{unbounded, UnboundedReceiver, UnboundedSender}; -use async_lock::{Mutex, RwLock}; +use async_lock::Mutex; use async_trait::async_trait; use serde::{Deserialize, Serialize}; use std::{ @@ -30,73 +30,6 @@ use std::{ sync::{atomic::AtomicBool, Arc}, }; -/// struct containing messages for a view to send to a replica or DA committee member. -#[derive(Clone)] -pub struct ViewQueue { - /// to send networking events to a replica or DA committee member. - pub sender_chan: UnboundedSender>, - - /// to recv networking events for a replica or DA committee member. - pub receiver_chan: Arc>>>, - - /// `true` if this queue has already received a proposal - pub has_received_proposal: Arc, -} - -impl Default for ViewQueue { - /// create new view queue - fn default() -> Self { - let (s, r) = unbounded(); - ViewQueue { - sender_chan: s, - receiver_chan: Arc::new(Mutex::new(r)), - has_received_proposal: Arc::new(AtomicBool::new(false)), - } - } -} - -/// metadata for sending information to the leader, replica, or DA committee member. -pub struct SendToTasks { - /// the current view number - /// this should always be in sync with `Consensus` - pub cur_view: TYPES::Time, - - /// a map from view number to ViewQueue - /// one of (replica|next leader)'s' task for view i will be listening on the channel in here - pub channel_map: BTreeMap>, -} - -impl SendToTasks { - /// create new sendtosasks - #[must_use] - pub fn new(view_num: TYPES::Time) -> Self { - SendToTasks { - cur_view: view_num, - channel_map: BTreeMap::default(), - } - } -} - -/// Channels for sending/recv-ing proposals and votes. -#[derive(Clone)] -pub struct ChannelMaps { - /// Channel for the next consensus leader or DA leader. - pub proposal_channel: Arc>>, - - /// Channel for the replica or DA committee member. - pub vote_channel: Arc>>, -} - -impl ChannelMaps { - /// Create channels starting from a given view. - pub fn new(start_view: TYPES::Time) -> Self { - Self { - proposal_channel: Arc::new(RwLock::new(SendToTasks::new(start_view))), - vote_channel: Arc::new(RwLock::new(SendToTasks::new(start_view))), - } - } -} - /// Node implementation aggregate trait /// /// This trait exists to collect multiple behavior implementations into one type, to allow @@ -115,12 +48,6 @@ pub trait NodeImplementation: type QuorumNetwork: CommunicationChannel; /// Network for those in the DA committee type CommitteeNetwork: CommunicationChannel; - - /// Create channels for sending/recv-ing proposals and votes for quorum and committee - /// exchanges, the latter of which is only applicable for sequencing consensus. - fn new_channel_maps( - start_view: TYPES::Time, - ) -> (ChannelMaps, Option>); } /// extra functions required on a node implementation to be usable by hotshot-testing From a7cdad919546c1da10898c611f9d7a8157106cfd Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Wed, 31 Jan 2024 16:02:47 -0500 Subject: [PATCH 16/28] replace event sending with helper for better errors --- crates/hotshot/examples/combined/types.rs | 2 +- crates/hotshot/examples/infra/mod.rs | 2 +- crates/hotshot/examples/libp2p/types.rs | 2 +- crates/hotshot/examples/webserver/types.rs | 2 +- crates/hotshot/src/lib.rs | 68 +++++------- crates/hotshot/src/tasks/mod.rs | 2 +- crates/hotshot/src/types/handle.rs | 23 ++-- crates/task-impls/src/consensus.rs | 100 ++++++++---------- crates/task-impls/src/da.rs | 19 ++-- crates/task-impls/src/harness.rs | 1 + crates/task-impls/src/helpers.rs | 20 ++++ crates/task-impls/src/lib.rs | 2 +- crates/task-impls/src/network.rs | 16 +-- crates/task-impls/src/transactions.rs | 18 ++-- crates/task-impls/src/vid.rs | 34 +++--- crates/task-impls/src/view_sync.rs | 100 ++++++++---------- crates/task-impls/src/vote.rs | 27 +++-- crates/testing/src/completion_task.rs | 8 +- crates/testing/src/node_types.rs | 4 +- crates/testing/src/overall_safety_task.rs | 1 - crates/testing/src/test_runner.rs | 8 +- crates/testing/src/view_sync_task.rs | 5 +- crates/testing/tests/da_task.rs | 2 +- crates/testing/tests/vid_task.rs | 2 +- crates/testing/tests/view_sync_task.rs | 2 +- crates/types/src/consensus.rs | 4 +- .../types/src/traits/node_implementation.rs | 10 +- 27 files changed, 232 insertions(+), 252 deletions(-) diff --git a/crates/hotshot/examples/combined/types.rs b/crates/hotshot/examples/combined/types.rs index 4d98d71f8e..94980e0925 100644 --- a/crates/hotshot/examples/combined/types.rs +++ b/crates/hotshot/examples/combined/types.rs @@ -1,7 +1,7 @@ use crate::infra::CombinedDARun; use hotshot::traits::implementations::{CombinedCommChannel, MemoryStorage}; use hotshot_testing::state_types::TestTypes; -use hotshot_types::traits::node_implementation::{NodeImplementation, NodeType}; +use hotshot_types::traits::node_implementation::NodeImplementation; use serde::{Deserialize, Serialize}; use std::fmt::Debug; diff --git a/crates/hotshot/examples/infra/mod.rs b/crates/hotshot/examples/infra/mod.rs index 7300272be3..d9af9102ca 100644 --- a/crates/hotshot/examples/infra/mod.rs +++ b/crates/hotshot/examples/infra/mod.rs @@ -408,7 +408,7 @@ pub trait RunDA< error!("Starting HotShot example!"); let start = Instant::now(); - let mut event_stream = context.get_event_stream().await; + let mut event_stream = context.get_event_stream(); let mut anchor_view: TYPES::Time = ::genesis(); let mut num_successful_commits = 0; diff --git a/crates/hotshot/examples/libp2p/types.rs b/crates/hotshot/examples/libp2p/types.rs index e9d662e707..446905bab6 100644 --- a/crates/hotshot/examples/libp2p/types.rs +++ b/crates/hotshot/examples/libp2p/types.rs @@ -1,7 +1,7 @@ use crate::infra::Libp2pDARun; use hotshot::traits::implementations::{Libp2pCommChannel, MemoryStorage}; use hotshot_testing::state_types::TestTypes; -use hotshot_types::traits::node_implementation::{NodeImplementation, NodeType}; +use hotshot_types::traits::node_implementation::NodeImplementation; use serde::{Deserialize, Serialize}; use std::fmt::Debug; diff --git a/crates/hotshot/examples/webserver/types.rs b/crates/hotshot/examples/webserver/types.rs index 8c0b323329..46a466ed5f 100644 --- a/crates/hotshot/examples/webserver/types.rs +++ b/crates/hotshot/examples/webserver/types.rs @@ -1,7 +1,7 @@ use crate::infra::WebServerDARun; use hotshot::traits::implementations::{MemoryStorage, WebCommChannel}; use hotshot_testing::state_types::TestTypes; -use hotshot_types::traits::node_implementation::{NodeImplementation, NodeType}; +use hotshot_types::traits::node_implementation::NodeImplementation; use serde::{Deserialize, Serialize}; use std::fmt::Debug; diff --git a/crates/hotshot/src/lib.rs b/crates/hotshot/src/lib.rs index 4d0356dbd4..3cee346279 100644 --- a/crates/hotshot/src/lib.rs +++ b/crates/hotshot/src/lib.rs @@ -21,7 +21,7 @@ use crate::{ traits::{NodeImplementation, Storage}, types::{Event, SystemContextHandle}, }; -use async_broadcast::{broadcast, Receiver, Sender}; +use async_broadcast::{broadcast, InactiveReceiver, Receiver, Sender}; use async_compatibility_layer::{ art::{async_spawn, async_spawn_local}, channel::UnboundedSender, @@ -33,6 +33,7 @@ use custom_debug::Debug; use futures::join; use hotshot_constants::PROGRAM_PROTOCOL_VERSION; use hotshot_task_impls::events::HotShotEvent; +use hotshot_task_impls::helpers::broadcast_event; use hotshot_task_impls::network; use hotshot_task::task::{Task, TaskRegistry}; @@ -65,7 +66,7 @@ use std::{ time::Duration, }; use tasks::add_vid_task; -use tracing::{debug, error, instrument, trace, warn}; +use tracing::{debug, instrument, trace, warn}; // -- Rexports // External @@ -146,10 +147,13 @@ pub struct SystemContextInner> { // global_registry: GlobalRegistry, /// Access to the output event stream. - pub output_event_stream: (Sender>, Receiver>), + pub output_event_stream: (Sender>, InactiveReceiver>), /// access to the internal event stream, in case we need to, say, shut something down - internal_event_stream: (Sender>, Receiver>), + internal_event_stream: ( + Sender>, + InactiveReceiver>, + ), /// uid for instrumentation pub id: u64, @@ -234,6 +238,9 @@ impl> SystemContext { }; let consensus = Arc::new(RwLock::new(consensus)); + let (internal_tx, internal_rx) = broadcast(100_000); + let (external_tx, external_rx) = broadcast(100_000); + let inner: Arc> = Arc::new(SystemContextInner { id: nonce, #[cfg(feature = "hotshot-testing")] @@ -245,14 +252,17 @@ impl> SystemContext { networks: Arc::new(networks), memberships: Arc::new(memberships), _metrics: consensus_metrics.clone(), - internal_event_stream: broadcast(100024), - output_event_stream: broadcast(100024), + internal_event_stream: (internal_tx, internal_rx.deactivate()), + output_event_stream: (external_tx, external_rx.deactivate()), }); Ok(Self { inner }) } /// "Starts" consensus by sending a `QCFormed` event + /// + /// # Panics + /// Panics if sending genesis fails pub async fn start_consensus(&self) { debug!("Starting Consensus"); self.inner @@ -262,7 +272,7 @@ impl> SystemContext { QuorumCertificate::genesis(), ))) .await - .unwrap(); + .expect("Genesis Broadcast failed"); } /// Marks a given view number as timed out. This should be called a fixed period after a round is started. @@ -301,15 +311,7 @@ impl> SystemContext { // TODO: remove with https://github.com/EspressoSystems/HotShot/issues/2407 async fn send_external_event(&self, event: Event) { debug!(?event, "send_external_event"); - if let Err(e) = self - .inner - .output_event_stream - .0 - .broadcast_direct(event) - .await - { - error!(?e, "Could not send event to event_sender"); - } + broadcast_event(event, &self.inner.output_event_stream.0).await; } /// Publishes a transaction asynchronously to the network @@ -434,7 +436,7 @@ impl> SystemContext { let handle = hotshot.clone().run_tasks().await; let (tx, rx) = hotshot.inner.internal_event_stream.clone(); - Ok((handle, tx, rx)) + Ok((handle, tx, rx.activate())) } /// Send a broadcast message. @@ -551,7 +553,7 @@ impl> SystemContext { add_network_event_task( registry.clone(), event_tx.clone(), - event_rx.clone(), + event_rx.activate_cloned(), quorum_network.clone(), quorum_membership, network::quorum_filter, @@ -560,7 +562,7 @@ impl> SystemContext { add_network_event_task( registry.clone(), event_tx.clone(), - event_rx.clone(), + event_rx.activate_cloned(), da_network.clone(), da_membership, network::committee_filter, @@ -569,7 +571,7 @@ impl> SystemContext { add_network_event_task( registry.clone(), event_tx.clone(), - event_rx.clone(), + event_rx.activate_cloned(), quorum_network.clone(), view_sync_membership, network::view_sync_filter, @@ -578,7 +580,7 @@ impl> SystemContext { add_network_event_task( registry.clone(), event_tx.clone(), - event_rx.clone(), + event_rx.activate_cloned(), quorum_network.clone(), vid_membership, network::vid_filter, @@ -587,7 +589,7 @@ impl> SystemContext { let consensus_state = add_consensus_task(output_event_stream.0.clone(), &handle).await; let task = Task::new( event_tx.clone(), - event_rx.clone(), + event_rx.activate_cloned(), registry.clone(), consensus_state, ); @@ -595,35 +597,31 @@ impl> SystemContext { add_da_task( registry.clone(), event_tx.clone(), - event_rx.clone(), + event_rx.activate_cloned(), &handle, ) .await; add_vid_task( registry.clone(), event_tx.clone(), - event_rx.clone(), + event_rx.activate_cloned(), &handle, ) .await; add_transaction_task( registry.clone(), event_tx.clone(), - event_rx.clone(), + event_rx.activate_cloned(), &handle, ) .await; add_view_sync_task( registry.clone(), event_tx.clone(), - event_rx.clone(), + event_rx.activate_cloned(), &handle, ) .await; - // async_spawn(async move { - // let _ = registry.join_all().await; - // info!("Task runner exited!"); - // }); handle } } @@ -661,15 +659,7 @@ impl> ConsensusApi async fn send_event(&self, event: Event) { debug!(?event, "send_event"); - if let Err(e) = self - .inner - .output_event_stream - .0 - .broadcast_direct(event) - .await - { - error!(?e, "Could not send event to event_sender"); - } + broadcast_event(event, &self.inner.output_event_stream.0).await; } fn public_key(&self) -> &TYPES::SignatureKey { diff --git a/crates/hotshot/src/tasks/mod.rs b/crates/hotshot/src/tasks/mod.rs index e7bc4b5b11..24e5e44f38 100644 --- a/crates/hotshot/src/tasks/mod.rs +++ b/crates/hotshot/src/tasks/mod.rs @@ -14,7 +14,7 @@ use hotshot_task_impls::{ vid::VIDTaskState, view_sync::ViewSyncTaskState, }; -use hotshot_types::traits::{election::Membership, stake_table::StakeTableScheme}; +use hotshot_types::traits::election::Membership; use hotshot_types::{ event::Event, message::Messages, diff --git a/crates/hotshot/src/types/handle.rs b/crates/hotshot/src/types/handle.rs index dbc5572282..37e3bec2e4 100644 --- a/crates/hotshot/src/types/handle.rs +++ b/crates/hotshot/src/types/handle.rs @@ -1,7 +1,7 @@ //! Provides an event-streaming handle for a [`SystemContext`] running in the background use crate::{traits::NodeImplementation, types::Event, SystemContext}; -use async_broadcast::{Receiver, Sender}; +use async_broadcast::{InactiveReceiver, Receiver, Sender}; use async_lock::RwLock; use commit::Committable; @@ -35,9 +35,12 @@ use tracing::error; #[derive(Clone)] pub struct SystemContextHandle> { /// The [sender](ChannelStream) for the output stream from the background process - pub(crate) output_event_stream: (Sender>, Receiver>), + pub(crate) output_event_stream: (Sender>, InactiveReceiver>), /// access to the internal ev ent stream, in case we need to, say, shut something down - pub(crate) internal_event_stream: (Sender>, Receiver>), + pub(crate) internal_event_stream: ( + Sender>, + InactiveReceiver>, + ), /// registry for controlling tasks pub(crate) registry: Arc, @@ -50,16 +53,16 @@ pub struct SystemContextHandle> { impl + 'static> SystemContextHandle { /// obtains a stream to expose to the user - pub async fn get_event_stream(&self) -> impl Stream> { - self.output_event_stream.1.clone() + pub fn get_event_stream(&self) -> impl Stream> { + self.output_event_stream.1.activate_cloned() } /// HACK so we can know the types when running tests... /// there are two cleaner solutions: /// - make the stream generic and in nodetypes or nodeimpelmentation /// - type wrapper - pub async fn get_event_stream_known_impl(&self) -> Receiver> { - self.output_event_stream.1.clone() + pub fn get_event_stream_known_impl(&self) -> Receiver> { + self.output_event_stream.1.activate_cloned() } /// HACK so we can know the types when running tests... @@ -67,8 +70,8 @@ impl + 'static> SystemContextHandl /// - make the stream generic and in nodetypes or nodeimpelmentation /// - type wrapper /// NOTE: this is only used for sanity checks in our tests - pub async fn get_internal_event_stream_known_impl(&self) -> Receiver> { - self.internal_event_stream.1.clone() + pub fn get_internal_event_stream_known_impl(&self) -> Receiver> { + self.internal_event_stream.1.activate_cloned() } /// Gets the current committed state of the [`SystemContext`] instance @@ -121,7 +124,7 @@ impl + 'static> SystemContextHandl block_size: None, }, }; - let _ = self.output_event_stream.0.broadcast_direct(event).await; + crate::broadcast_event(event, &self.output_event_stream.0).await; } } else { // TODO (justin) this seems bad. I think we should hard error in this case?? diff --git a/crates/task-impls/src/consensus.rs b/crates/task-impls/src/consensus.rs index d5c110b499..1d30467374 100644 --- a/crates/task-impls/src/consensus.rs +++ b/crates/task-impls/src/consensus.rs @@ -1,6 +1,6 @@ use crate::{ events::{HotShotEvent, HotShotTaskCompleted}, - helpers::cancel_task, + helpers::{broadcast_event, cancel_task}, vote::{create_vote_accumulator, AccumulatorInfo, VoteCollectionTaskState}, }; use async_compatibility_layer::art::{async_sleep, async_spawn}; @@ -235,10 +235,7 @@ impl, A: ConsensusApi + "Sending vote to next quorum leader {:?}", vote.get_view_number() + 1 ); - event_stream - .broadcast_direct(HotShotEvent::QuorumVoteSend(vote)) - .await - .unwrap(); + broadcast_event(HotShotEvent::QuorumVoteSend(vote), event_stream).await; if let Some(commit_and_metadata) = &self.payload_commitment_and_metadata { if commit_and_metadata.is_genesis { self.payload_commitment_and_metadata = None; @@ -340,10 +337,7 @@ impl, A: ConsensusApi + "Sending vote to next quorum leader {:?}", vote.get_view_number() + 1 ); - event_stream - .broadcast_direct(HotShotEvent::QuorumVoteSend(vote)) - .await - .unwrap(); + broadcast_event(HotShotEvent::QuorumVoteSend(vote), event_stream).await; return true; } } @@ -413,10 +407,7 @@ impl, A: ConsensusApi + .await; } - event_stream - .broadcast_direct(HotShotEvent::ViewChange(new_view)) - .await - .unwrap(); + broadcast_event(HotShotEvent::ViewChange(new_view), event_stream).await; // Spawn a timeout task if we did actually update view let timeout = self.timeout; @@ -427,10 +418,11 @@ impl, A: ConsensusApi + let view_number = self.cur_view + 1; async move { async_sleep(Duration::from_millis(timeout)).await; - stream - .broadcast_direct(HotShotEvent::Timeout(TYPES::Time::new(*view_number))) - .await - .unwrap(); + broadcast_event( + HotShotEvent::Timeout(TYPES::Time::new(*view_number)), + &stream, + ) + .await; } })); let consensus = self.consensus.read().await; @@ -729,13 +721,14 @@ impl, A: ConsensusApi + }, ) { error!("publishing view error"); - self.output_event_stream - .broadcast_direct(Event { + broadcast_event( + Event { view_number: view, event: EventType::Error { error: e.into() }, - }) - .await - .unwrap(); + }, + &self.output_event_stream, + ) + .await; } } @@ -759,18 +752,17 @@ impl, A: ConsensusApi + } #[allow(clippy::cast_precision_loss)] if new_decide_reached { - event_stream - .broadcast_direct(HotShotEvent::LeafDecided(leaf_views.clone())) - .await - .unwrap(); - let decide_sent = self.output_event_stream.broadcast_direct(Event { - view_number: consensus.last_decided_view, - event: EventType::Decide { - leaf_chain: Arc::new(leaf_views), - qc: Arc::new(new_decide_qc.unwrap()), - block_size: Some(included_txns_set.len().try_into().unwrap()), + let decide_sent = broadcast_event( + Event { + view_number: consensus.last_decided_view, + event: EventType::Decide { + leaf_chain: Arc::new(leaf_views), + qc: Arc::new(new_decide_qc.unwrap()), + block_size: Some(included_txns_set.len().try_into().unwrap()), + }, }, - }); + &self.output_event_stream, + ); let old_anchor_view = consensus.last_decided_view; consensus .collect_garbage(old_anchor_view, new_anchor_view) @@ -795,7 +787,7 @@ impl, A: ConsensusApi + debug!("Sending Decide for view {:?}", consensus.last_decided_view); debug!("Decided txns len {:?}", included_txns_set.len()); - decide_sent.await.unwrap(); + decide_sent.await; debug!("decide send succeeded"); } @@ -1058,15 +1050,16 @@ impl, A: ConsensusApi + return; } - self.output_event_stream - .broadcast_direct(Event { + broadcast_event( + Event { view_number: old_view_number, event: EventType::ViewFinished { view_number: old_view_number, }, - }) - .await - .unwrap(); + }, + &self.output_event_stream, + ) + .await; } HotShotEvent::Timeout(view) => { // NOTE: We may optionally have the timeout task listen for view change events @@ -1101,21 +1094,20 @@ impl, A: ConsensusApi + return; }; - event_stream - .broadcast_direct(HotShotEvent::TimeoutVoteSend(vote)) - .await - .unwrap(); + broadcast_event(HotShotEvent::TimeoutVoteSend(vote), &event_stream).await; debug!( "We did not receive evidence for view {} in time, sending timeout vote for that view!", *view ); - self.output_event_stream - .broadcast_direct(Event { + + broadcast_event( + Event { view_number: view, event: EventType::ReplicaViewTimeout { view_number: view }, - }) - .await - .unwrap(); + }, + &self.output_event_stream, + ) + .await; let consensus = self.consensus.read().await; consensus.metrics.number_of_timeouts.add(1); } @@ -1263,13 +1255,11 @@ impl, A: ConsensusApi + leaf.view_number, "" ); - event_stream - .broadcast_direct(HotShotEvent::QuorumProposalSend( - message.clone(), - self.public_key.clone(), - )) - .await - .unwrap(); + broadcast_event( + HotShotEvent::QuorumProposalSend(message.clone(), self.public_key.clone()), + event_stream, + ) + .await; self.payload_commitment_and_metadata = None; return true; diff --git a/crates/task-impls/src/da.rs b/crates/task-impls/src/da.rs index 6912091d79..bd9ae9cb9d 100644 --- a/crates/task-impls/src/da.rs +++ b/crates/task-impls/src/da.rs @@ -1,5 +1,6 @@ use crate::{ events::{HotShotEvent, HotShotTaskCompleted}, + helpers::broadcast_event, vote::{create_vote_accumulator, AccumulatorInfo, VoteCollectionTaskState}, }; use async_broadcast::Sender; @@ -169,10 +170,8 @@ impl, A: ConsensusApi + // self.cur_view = view; debug!("Sending vote to the DA leader {:?}", vote.get_view_number()); - event_stream - .broadcast_direct(HotShotEvent::DAVoteSend(vote)) - .await - .unwrap(); + + broadcast_event(HotShotEvent::DAVoteSend(vote), &event_stream).await; let mut consensus = self.consensus.write().await; // Ensure this view is in the view map for garbage collection, but do not overwrite if @@ -303,13 +302,11 @@ impl, A: ConsensusApi + _pd: PhantomData, }; - event_stream - .broadcast_direct(HotShotEvent::DAProposalSend( - message.clone(), - self.public_key.clone(), - )) - .await - .unwrap(); + broadcast_event( + HotShotEvent::DAProposalSend(message.clone(), self.public_key.clone()), + &event_stream, + ) + .await; } HotShotEvent::Timeout(view) => { diff --git a/crates/task-impls/src/harness.rs b/crates/task-impls/src/harness.rs index 2aea46dc2e..fe807187b6 100644 --- a/crates/task-impls/src/harness.rs +++ b/crates/task-impls/src/harness.rs @@ -42,6 +42,7 @@ impl TaskState for TestHarnessState { /// # Panics /// Panics if any state the test expects is not set. Panicing causes a test failure #[allow(clippy::implicit_hasher)] +#[allow(clippy::panic)] pub async fn run_harness>>( input: Vec>, expected_output: HashMap, usize>, diff --git a/crates/task-impls/src/helpers.rs b/crates/task-impls/src/helpers.rs index c50f776500..7e7b63c830 100644 --- a/crates/task-impls/src/helpers.rs +++ b/crates/task-impls/src/helpers.rs @@ -1,3 +1,4 @@ +use async_broadcast::{SendError, Sender}; #[cfg(async_executor_impl = "async-std")] use async_std::task::JoinHandle; #[cfg(async_executor_impl = "tokio")] @@ -10,3 +11,22 @@ pub async fn cancel_task(task: JoinHandle) { #[cfg(async_executor_impl = "tokio")] task.abort(); } + +/// Helper function to send events and log errors +pub async fn broadcast_event(event: E, sender: &Sender) { + match sender.broadcast_direct(event).await { + Ok(None) => (), + Ok(Some(overflowed)) => { + tracing::error!( + "Event sender queue overflow, Oldest event removed form queue: {:?}", + overflowed + ); + } + Err(SendError(e)) => { + tracing::error!( + "Event: {:?}\n Sending failed, event stream probably shutdown", + e + ); + } + } +} diff --git a/crates/task-impls/src/lib.rs b/crates/task-impls/src/lib.rs index 8521b525d4..ce9f3655ea 100644 --- a/crates/task-impls/src/lib.rs +++ b/crates/task-impls/src/lib.rs @@ -29,4 +29,4 @@ pub mod vid; pub mod vote; /// Helper functions used by any task -mod helpers; +pub mod helpers; diff --git a/crates/task-impls/src/network.rs b/crates/task-impls/src/network.rs index ee219de20a..648d2994dd 100644 --- a/crates/task-impls/src/network.rs +++ b/crates/task-impls/src/network.rs @@ -1,4 +1,7 @@ -use crate::events::{HotShotEvent, HotShotTaskCompleted}; +use crate::{ + events::{HotShotEvent, HotShotTaskCompleted}, + helpers::broadcast_event, +}; use async_broadcast::Sender; use either::Either::{self, Left, Right}; use hotshot_constants::PROGRAM_PROTOCOL_VERSION; @@ -165,7 +168,7 @@ impl NetworkMessageTaskState { // TODO (Keyao benchmarking) Update these event variants (similar to the // `TransactionsRecv` event) so we can send one event for a vector of messages. // - self.event_stream.broadcast(event).await.unwrap(); + broadcast_event(event, &self.event_stream).await; } MessageKind::Data(message) => match message { hotshot_types::message::DataMessage::SubmitTransaction(transaction, _) => { @@ -175,10 +178,11 @@ impl NetworkMessageTaskState { }; } if !transactions.is_empty() { - self.event_stream - .broadcast_direct(HotShotEvent::TransactionsRecv(transactions)) - .await - .unwrap(); + broadcast_event( + HotShotEvent::TransactionsRecv(transactions), + &self.event_stream, + ) + .await; } } } diff --git a/crates/task-impls/src/transactions.rs b/crates/task-impls/src/transactions.rs index c3d3ef2f8b..d07cfafec1 100644 --- a/crates/task-impls/src/transactions.rs +++ b/crates/task-impls/src/transactions.rs @@ -1,4 +1,7 @@ -use crate::events::{HotShotEvent, HotShotTaskCompleted}; +use crate::{ + events::{HotShotEvent, HotShotTaskCompleted}, + helpers::broadcast_event, +}; use async_broadcast::Sender; use async_compatibility_layer::{ art::async_timeout, @@ -245,14 +248,11 @@ impl, A: ConsensusApi + // send the sequenced transactions to VID and DA tasks let block_view = if make_block { view } else { view + 1 }; - event_stream - .broadcast_direct(HotShotEvent::TransactionsSequenced( - encoded_transactions, - metadata, - block_view, - )) - .await - .unwrap(); + broadcast_event( + HotShotEvent::TransactionsSequenced(encoded_transactions, metadata, block_view), + &event_stream, + ) + .await; return None; } diff --git a/crates/task-impls/src/vid.rs b/crates/task-impls/src/vid.rs index 3ad97572f0..714c502e31 100644 --- a/crates/task-impls/src/vid.rs +++ b/crates/task-impls/src/vid.rs @@ -1,4 +1,5 @@ use crate::events::{HotShotEvent, HotShotTaskCompleted}; +use crate::helpers::broadcast_event; use async_broadcast::Sender; use async_lock::RwLock; #[cfg(async_executor_impl = "async-std")] @@ -95,23 +96,25 @@ impl, A: ConsensusApi + // Unwrap here will just propogate any panic from the spawned task, it's not a new place we can panic. let vid_disperse = vid_disperse.unwrap(); // send the commitment and metadata to consensus for block building - event_stream - .broadcast_direct(HotShotEvent::SendPayloadCommitmentAndMetadata( + broadcast_event( + HotShotEvent::SendPayloadCommitmentAndMetadata( vid_disperse.commit, metadata, view_number, - )) - .await - .unwrap(); + ), + &event_stream, + ) + .await; // send the block to the VID dispersal function - event_stream - .broadcast_direct(HotShotEvent::BlockReady( + broadcast_event( + HotShotEvent::BlockReady( VidDisperse::from_membership(view_number, vid_disperse, &self.membership), view_number, - )) - .await - .unwrap(); + ), + &event_stream, + ) + .await; } HotShotEvent::BlockReady(vid_disperse, view_number) => { @@ -123,17 +126,18 @@ impl, A: ConsensusApi + return None; }; debug!("publishing VID disperse for view {}", *view_number); - event_stream - .broadcast_direct(HotShotEvent::VidDisperseSend( + broadcast_event( + HotShotEvent::VidDisperseSend( Proposal { signature, data: vid_disperse, _pd: PhantomData, }, self.public_key.clone(), - )) - .await - .unwrap(); + ), + &event_stream, + ) + .await; } HotShotEvent::ViewChange(view) => { diff --git a/crates/task-impls/src/view_sync.rs b/crates/task-impls/src/view_sync.rs index 1e783f8b92..e23e03bc3f 100644 --- a/crates/task-impls/src/view_sync.rs +++ b/crates/task-impls/src/view_sync.rs @@ -1,7 +1,7 @@ #![allow(clippy::module_name_repetitions)] use crate::{ events::{HotShotEvent, HotShotTaskCompleted}, - helpers::cancel_task, + helpers::{broadcast_event, cancel_task}, vote::{create_vote_accumulator, AccumulatorInfo, HandleVoteEvent, VoteCollectionTaskState}, }; use async_broadcast::Sender; @@ -544,12 +544,11 @@ impl< } else { // If this is the first timeout we've seen advance to the next view self.current_view = view_number; - event_stream - .broadcast_direct(HotShotEvent::ViewChange(TYPES::Time::new( - *self.current_view, - ))) - .await - .unwrap(); + broadcast_event( + HotShotEvent::ViewChange(TYPES::Time::new(*self.current_view)), + &event_stream, + ) + .await; } } @@ -611,10 +610,8 @@ impl, A: ConsensusApi + let message = GeneralConsensusMessage::::ViewSyncCommitVote(vote); if let GeneralConsensusMessage::ViewSyncCommitVote(vote) = message { - event_stream - .broadcast_direct(HotShotEvent::ViewSyncCommitVoteSend(vote)) - .await - .unwrap(); + broadcast_event(HotShotEvent::ViewSyncCommitVoteSend(vote), &event_stream) + .await; } if let Some(timeout_task) = self.timeout_task.take() { @@ -630,14 +627,16 @@ impl, A: ConsensusApi + async move { async_sleep(timeout).await; info!("Vote sending timed out in ViewSyncPreCommitCertificateRecv, Relay = {}", relay); - stream - .broadcast_direct(HotShotEvent::ViewSyncTimeout( + + broadcast_event( + HotShotEvent::ViewSyncTimeout( TYPES::Time::new(*next_view), relay, phase, - )) - .await - .unwrap(); + ), + &stream, + ) + .await; } })); } @@ -684,10 +683,8 @@ impl, A: ConsensusApi + let message = GeneralConsensusMessage::::ViewSyncFinalizeVote(vote); if let GeneralConsensusMessage::ViewSyncFinalizeVote(vote) = message { - event_stream - .broadcast_direct(HotShotEvent::ViewSyncFinalizeVoteSend(vote)) - .await - .unwrap(); + broadcast_event(HotShotEvent::ViewSyncFinalizeVoteSend(vote), &event_stream) + .await; } info!( @@ -695,15 +692,9 @@ impl, A: ConsensusApi + *self.next_view ); - event_stream - .broadcast_direct(HotShotEvent::ViewChange(self.next_view - 1)) - .await - .unwrap(); + broadcast_event(HotShotEvent::ViewChange(self.next_view - 1), &event_stream).await; - event_stream - .broadcast_direct(HotShotEvent::ViewChange(self.next_view)) - .await - .unwrap(); + broadcast_event(HotShotEvent::ViewChange(self.next_view), &event_stream).await; if let Some(timeout_task) = self.timeout_task.take() { cancel_task(timeout_task).await; @@ -720,14 +711,15 @@ impl, A: ConsensusApi + "Vote sending timed out in ViewSyncCommitCertificateRecv, relay = {}", relay ); - stream - .broadcast_direct(HotShotEvent::ViewSyncTimeout( + broadcast_event( + HotShotEvent::ViewSyncTimeout( TYPES::Time::new(*next_view), relay, phase, - )) - .await - .unwrap(); + ), + &stream, + ) + .await; } })); } @@ -782,10 +774,7 @@ impl, A: ConsensusApi + cancel_task(timeout_task).await; } - event_stream - .broadcast_direct(HotShotEvent::ViewChange(self.next_view)) - .await - .unwrap(); + broadcast_event(HotShotEvent::ViewChange(self.next_view), &event_stream).await; return Some(HotShotTaskCompleted); } @@ -810,10 +799,8 @@ impl, A: ConsensusApi + let message = GeneralConsensusMessage::::ViewSyncPreCommitVote(vote); if let GeneralConsensusMessage::ViewSyncPreCommitVote(vote) = message { - event_stream - .broadcast_direct(HotShotEvent::ViewSyncPreCommitVoteSend(vote)) - .await - .unwrap(); + broadcast_event(HotShotEvent::ViewSyncPreCommitVoteSend(vote), &event_stream) + .await; } self.timeout_task = Some(async_spawn({ @@ -824,14 +811,15 @@ impl, A: ConsensusApi + async move { async_sleep(timeout).await; info!("Vote sending timed out in ViewSyncTrigger"); - stream - .broadcast_direct(HotShotEvent::ViewSyncTimeout( + broadcast_event( + HotShotEvent::ViewSyncTimeout( TYPES::Time::new(*next_view), relay, ViewSyncPhase::None, - )) - .await - .unwrap(); + ), + &stream, + ) + .await; } })); @@ -867,10 +855,11 @@ impl, A: ConsensusApi + GeneralConsensusMessage::::ViewSyncPreCommitVote(vote); if let GeneralConsensusMessage::ViewSyncPreCommitVote(vote) = message { - event_stream - .broadcast_direct(HotShotEvent::ViewSyncPreCommitVoteSend(vote)) - .await - .unwrap(); + broadcast_event( + HotShotEvent::ViewSyncPreCommitVoteSend(vote), + &event_stream, + ) + .await; } } ViewSyncPhase::Finalize => { @@ -890,14 +879,15 @@ impl, A: ConsensusApi + "Vote sending timed out in ViewSyncTimeout relay = {}", relay ); - stream - .broadcast_direct(HotShotEvent::ViewSyncTimeout( + broadcast_event( + HotShotEvent::ViewSyncTimeout( TYPES::Time::new(*next_view), relay, last_seen_certificate, - )) - .await - .unwrap(); + ), + &stream, + ) + .await; } })); diff --git a/crates/task-impls/src/vote.rs b/crates/task-impls/src/vote.rs index ebb8126949..39775ccd55 100644 --- a/crates/task-impls/src/vote.rs +++ b/crates/task-impls/src/vote.rs @@ -1,6 +1,9 @@ use std::{collections::HashMap, fmt::Debug, marker::PhantomData, sync::Arc}; -use crate::events::{HotShotEvent, HotShotTaskCompleted}; +use crate::{ + events::{HotShotEvent, HotShotTaskCompleted}, + helpers::broadcast_event, +}; use async_broadcast::Sender; use async_trait::async_trait; use bitvec::prelude::*; @@ -73,7 +76,7 @@ impl< pub async fn accumulate_vote( &mut self, vote: &VOTE, - event_stream: &&Sender>, + event_stream: &Sender>, ) -> Option { if vote.get_leader(&self.membership) != self.public_key { return None; @@ -94,10 +97,8 @@ impl< Either::Left(()) => None, Either::Right(cert) => { debug!("Certificate Formed! {:?}", cert); - event_stream - .broadcast_direct(VOTE::make_cert_event(cert, &self.public_key)) - .await - .unwrap(); + + broadcast_event(VOTE::make_cert_event(cert, &self.public_key), event_stream).await; self.accumulator = None; Some(HotShotTaskCompleted) } @@ -344,7 +345,7 @@ impl HandleVoteEvent, QuorumCertificat sender: &Sender>, ) -> Option { match event { - HotShotEvent::QuorumVoteRecv(vote) => self.accumulate_vote(&vote, &sender).await, + HotShotEvent::QuorumVoteRecv(vote) => self.accumulate_vote(&vote, sender).await, _ => None, } } @@ -363,7 +364,7 @@ impl HandleVoteEvent, DACertificate sender: &Sender>, ) -> Option { match event { - HotShotEvent::DAVoteRecv(vote) => self.accumulate_vote(&vote, &sender).await, + HotShotEvent::DAVoteRecv(vote) => self.accumulate_vote(&vote, sender).await, _ => None, } } @@ -382,7 +383,7 @@ impl HandleVoteEvent, TimeoutCertific sender: &Sender>, ) -> Option { match event { - HotShotEvent::TimeoutVoteRecv(vote) => self.accumulate_vote(&vote, &sender).await, + HotShotEvent::TimeoutVoteRecv(vote) => self.accumulate_vote(&vote, sender).await, _ => None, } } @@ -403,7 +404,7 @@ impl ) -> Option { match event { HotShotEvent::ViewSyncPreCommitVoteRecv(vote) => { - self.accumulate_vote(&vote, &sender).await + self.accumulate_vote(&vote, sender).await } _ => None, } @@ -424,9 +425,7 @@ impl sender: &Sender>, ) -> Option { match event { - HotShotEvent::ViewSyncCommitVoteRecv(vote) => { - self.accumulate_vote(&vote, &sender).await - } + HotShotEvent::ViewSyncCommitVoteRecv(vote) => self.accumulate_vote(&vote, sender).await, _ => None, } } @@ -447,7 +446,7 @@ impl ) -> Option { match event { HotShotEvent::ViewSyncFinalizeVoteRecv(vote) => { - self.accumulate_vote(&vote, &sender).await + self.accumulate_vote(&vote, sender).await } _ => None, } diff --git a/crates/testing/src/completion_task.rs b/crates/testing/src/completion_task.rs index 8bd3c2a635..94efb83b3c 100644 --- a/crates/testing/src/completion_task.rs +++ b/crates/testing/src/completion_task.rs @@ -7,6 +7,7 @@ use tokio::task::JoinHandle; use async_broadcast::{Receiver, Sender}; use async_compatibility_layer::art::{async_spawn, async_timeout}; use hotshot::traits::TestableNodeImplementation; +use hotshot_task_impls::helpers::broadcast_event; use hotshot_types::traits::node_implementation::NodeType; use snafu::Snafu; @@ -38,15 +39,12 @@ impl> CompletionTask, ViewSyncTask>::new( diff --git a/crates/testing/src/view_sync_task.rs b/crates/testing/src/view_sync_task.rs index 6e4e59cefe..bb77407f68 100644 --- a/crates/testing/src/view_sync_task.rs +++ b/crates/testing/src/view_sync_task.rs @@ -4,10 +4,7 @@ use hotshot_types::traits::node_implementation::{NodeType, TestableNodeImplement use snafu::Snafu; use std::{collections::HashSet, marker::PhantomData}; -use crate::{ - test_runner::{HotShotTaskCompleted, Node}, - GlobalTestEvent, -}; +use crate::{test_runner::HotShotTaskCompleted, GlobalTestEvent}; /// `ViewSync` Task error #[derive(Snafu, Debug, Clone)] diff --git a/crates/testing/tests/da_task.rs b/crates/testing/tests/da_task.rs index 81a73945b8..8660e45159 100644 --- a/crates/testing/tests/da_task.rs +++ b/crates/testing/tests/da_task.rs @@ -100,7 +100,7 @@ async fn test_da_task() { quorum_membership: api.inner.memberships.quorum_membership.clone().into(), cur_view: ViewNumber::new(0), vote_collector: None.into(), - public_key: api.public_key().clone(), + public_key: *api.public_key(), private_key: api.private_key().clone(), id: handle.hotshot.inner.id, }; diff --git a/crates/testing/tests/vid_task.rs b/crates/testing/tests/vid_task.rs index 58fa821a30..ed1607be9d 100644 --- a/crates/testing/tests/vid_task.rs +++ b/crates/testing/tests/vid_task.rs @@ -108,7 +108,7 @@ async fn test_vid_task() { vote_collector: None, network: api.inner.networks.quorum_network.clone().into(), membership: api.inner.memberships.vid_membership.clone().into(), - public_key: api.public_key().clone(), + public_key: *api.public_key(), private_key: api.private_key().clone(), id: handle.hotshot.inner.id, }; diff --git a/crates/testing/tests/view_sync_task.rs b/crates/testing/tests/view_sync_task.rs index 308c271d3c..f12c47ee54 100644 --- a/crates/testing/tests/view_sync_task.rs +++ b/crates/testing/tests/view_sync_task.rs @@ -57,7 +57,7 @@ async fn test_view_sync_task() { next_view: ViewNumber::new(0), network: api.inner.networks.quorum_network.clone().into(), membership: api.inner.memberships.view_sync_membership.clone().into(), - public_key: api.public_key().clone(), + public_key: *api.public_key(), private_key: api.private_key().clone(), api, num_timeouts_tracked: 0, diff --git a/crates/types/src/consensus.rs b/crates/types/src/consensus.rs index c8525969d3..995faa8853 100644 --- a/crates/types/src/consensus.rs +++ b/crates/types/src/consensus.rs @@ -1,8 +1,6 @@ //! Provides the core consensus types -pub use crate::{ - utils::{View, ViewInner}, -}; +pub use crate::utils::{View, ViewInner}; use displaydoc::Display; use crate::{ diff --git a/crates/types/src/traits/node_implementation.rs b/crates/types/src/traits/node_implementation.rs index de351eec70..60a10271c8 100644 --- a/crates/types/src/traits/node_implementation.rs +++ b/crates/types/src/traits/node_implementation.rs @@ -13,22 +13,14 @@ use super::{ }; use crate::{ data::{Leaf, TestableLeaf}, - message::ProcessedSequencingMessage, traits::{ election::Membership, network::TestableChannelImplementation, signature_key::SignatureKey, storage::Storage, BlockPayload, }, }; -use async_compatibility_layer::channel::{unbounded, UnboundedReceiver, UnboundedSender}; -use async_lock::Mutex; use async_trait::async_trait; use serde::{Deserialize, Serialize}; -use std::{ - collections::BTreeMap, - fmt::Debug, - hash::Hash, - sync::{atomic::AtomicBool, Arc}, -}; +use std::{fmt::Debug, hash::Hash, sync::Arc}; /// Node implementation aggregate trait /// From b56f870d3dab059438b0440f0b221b4d55ef4afd Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Wed, 31 Jan 2024 16:25:58 -0500 Subject: [PATCH 17/28] lint after merge --- crates/hotshot/examples/infra/mod.rs | 1 - crates/hotshot/src/lib.rs | 2 +- crates/hotshot/src/tasks/mod.rs | 1 - crates/testing/src/spinning_task.rs | 7 ++----- crates/types/src/traits/node_implementation.rs | 3 +-- 5 files changed, 4 insertions(+), 10 deletions(-) diff --git a/crates/hotshot/examples/infra/mod.rs b/crates/hotshot/examples/infra/mod.rs index b589656a94..3d791a063f 100644 --- a/crates/hotshot/examples/infra/mod.rs +++ b/crates/hotshot/examples/infra/mod.rs @@ -23,7 +23,6 @@ use hotshot_orchestrator::{ client::{OrchestratorClient, ValidatorArgs}, config::{NetworkConfig, NetworkConfigFile, WebServerConfig}, }; -use hotshot_task::task::FilterEvent; use hotshot_testing::{ block_types::{TestBlockHeader, TestBlockPayload, TestTransaction}, state_types::TestInstanceState, diff --git a/crates/hotshot/src/lib.rs b/crates/hotshot/src/lib.rs index 2ee1d47520..5ecd28c3ef 100644 --- a/crates/hotshot/src/lib.rs +++ b/crates/hotshot/src/lib.rs @@ -66,7 +66,7 @@ use std::{ time::Duration, }; use tasks::add_vid_task; -use tracing::{debug, info, instrument, trace, warn}; +use tracing::{debug, instrument, trace, warn}; // -- Rexports // External diff --git a/crates/hotshot/src/tasks/mod.rs b/crates/hotshot/src/tasks/mod.rs index ae8b78b09e..8f848e7b17 100644 --- a/crates/hotshot/src/tasks/mod.rs +++ b/crates/hotshot/src/tasks/mod.rs @@ -32,7 +32,6 @@ use std::{ sync::Arc, time::Duration, }; -use tracing::error; /// event for global event stream #[derive(Clone, Debug)] diff --git a/crates/testing/src/spinning_task.rs b/crates/testing/src/spinning_task.rs index dd691b0854..3c978ab4b1 100644 --- a/crates/testing/src/spinning_task.rs +++ b/crates/testing/src/spinning_task.rs @@ -1,16 +1,13 @@ use std::collections::HashMap; -use hotshot::{traits::TestableNodeImplementation, SystemContext}; +use hotshot::traits::TestableNodeImplementation; use crate::test_runner::HotShotTaskCompleted; use crate::test_runner::LateStartNode; use crate::test_runner::Node; use hotshot_task::task::{Task, TaskState, TestTaskState}; use hotshot_types::traits::network::CommunicationChannel; -use hotshot_types::{ - event::Event, - traits::node_implementation::{ConsensusTime, NodeType}, -}; +use hotshot_types::{event::Event, traits::node_implementation::NodeType}; use snafu::Snafu; use std::collections::BTreeMap; /// convience type for state and block diff --git a/crates/types/src/traits/node_implementation.rs b/crates/types/src/traits/node_implementation.rs index 80f8c20fa1..d2b9f6cb99 100644 --- a/crates/types/src/traits/node_implementation.rs +++ b/crates/types/src/traits/node_implementation.rs @@ -22,12 +22,11 @@ use async_trait::async_trait; use commit::Committable; use serde::{Deserialize, Serialize}; use std::{ - collections::BTreeMap, fmt::Debug, hash::Hash, ops, ops::{Deref, Sub}, - sync::{atomic::AtomicBool, Arc}, + sync::Arc, }; /// Node implementation aggregate trait /// From ca58069557b3a96095c1b02ea8332b81074693ed Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Wed, 31 Jan 2024 17:55:42 -0500 Subject: [PATCH 18/28] fixing unit tests and lints for both async types --- crates/hotshot/src/lib.rs | 6 ++++- crates/hotshot/src/tasks/mod.rs | 35 +++++++++++++++---------- crates/task-impls/src/consensus.rs | 1 + crates/task-impls/src/harness.rs | 6 +++-- crates/task/src/task.rs | 23 +++++++++++----- crates/testing/src/test_runner.rs | 42 +++++++++++++++++++++++------- 6 files changed, 81 insertions(+), 32 deletions(-) diff --git a/crates/hotshot/src/lib.rs b/crates/hotshot/src/lib.rs index 5ecd28c3ef..86e987ea56 100644 --- a/crates/hotshot/src/lib.rs +++ b/crates/hotshot/src/lib.rs @@ -246,7 +246,11 @@ impl> SystemContext { let consensus = Arc::new(RwLock::new(consensus)); let (internal_tx, internal_rx) = broadcast(100_000); - let (external_tx, external_rx) = broadcast(100_000); + let (mut external_tx, external_rx) = broadcast(100_000); + + // This makes it so we won't block on broadcasting if there is not a receiver + // Our own copy of the receiver is inactive so it doesn't count. + external_tx.set_await_active(false); let inner: Arc> = Arc::new(SystemContextInner { id: nonce, diff --git a/crates/hotshot/src/tasks/mod.rs b/crates/hotshot/src/tasks/mod.rs index 8f848e7b17..e0a4ff023c 100644 --- a/crates/hotshot/src/tasks/mod.rs +++ b/crates/hotshot/src/tasks/mod.rs @@ -32,6 +32,7 @@ use std::{ sync::Arc, time::Duration, }; +use tracing::error; /// event for global event stream #[derive(Clone, Debug)] @@ -59,14 +60,17 @@ pub async fn add_network_message_task Messages(msgs), + Err(err) => { + error!("failed to receive broadcast messages: {err}"); + + // return zero messages so we sleep and try again + Messages(vec![]) + } + }; if msgs.0.is_empty() { async_sleep(Duration::from_millis(100)).await; } else { @@ -76,14 +80,17 @@ pub async fn add_network_message_task Messages(msgs), + Err(err) => { + error!("failed to receive direct messages: {err}"); + + // return zero messages so we sleep and try again + Messages(vec![]) + } + }; if msgs.0.is_empty() { async_sleep(Duration::from_millis(100)).await; } else { diff --git a/crates/task-impls/src/consensus.rs b/crates/task-impls/src/consensus.rs index 20ed69054b..40f4be43bd 100644 --- a/crates/task-impls/src/consensus.rs +++ b/crates/task-impls/src/consensus.rs @@ -1307,6 +1307,7 @@ impl, A: ConsensusApi + where Self: Sized, { + tracing::error!("consensus get event {:?}", event); // TODO: Don't clone the sender let sender = task.clone_sender(); info!("sender queue len {}", sender.len()); diff --git a/crates/task-impls/src/harness.rs b/crates/task-impls/src/harness.rs index fe807187b6..9c7fa44e03 100644 --- a/crates/task-impls/src/harness.rs +++ b/crates/task-impls/src/harness.rs @@ -70,11 +70,11 @@ pub async fn run_harness>>( ); let task = Task::new(to_test.clone(), from_test.clone(), registry.clone(), state); - tasks.push(task.run()); tasks.push(test_task.run()); + tasks.push(task.run()); for event in input { - let _ = to_task.broadcast(event).await.unwrap(); + to_task.broadcast_direct(event).await.unwrap(); } if async_timeout(Duration::from_secs(2), futures::future::join_all(tasks)) @@ -100,6 +100,7 @@ pub fn handle_event( task: &mut Task>, allow_extra_output: bool, ) -> Option { + tracing::error!("got event {:?}", event); let state = task.state_mut(); // Check the output in either case: // * We allow outputs only in our expected output set. @@ -119,6 +120,7 @@ pub fn handle_event( } if state.expected_output.is_empty() { + tracing::error!("test harness task completed"); return Some(HotShotTaskCompleted); } diff --git a/crates/task/src/task.rs b/crates/task/src/task.rs index 0ae39d8fd4..c5737a30f9 100644 --- a/crates/task/src/task.rs +++ b/crates/task/src/task.rs @@ -8,10 +8,13 @@ use async_std::{ sync::RwLock, task::{spawn, JoinHandle}, }; -use futures::{ - future::{select_all, try_join_all}, - Future, -}; +use futures::{future::select_all, Future}; + +#[cfg(async_executor_impl = "async-std")] +use futures::future::join_all; + +#[cfg(async_executor_impl = "tokio")] +use futures::future::try_join_all; #[cfg(async_executor_impl = "tokio")] use tokio::{ @@ -359,7 +362,15 @@ mod tests { let handle2 = test2.run(); sleep(Duration::from_millis(30)).await; msg_tx.broadcast("done".into()).await.unwrap(); - handle.await.unwrap(); - handle2.await.unwrap(); + #[cfg(async_executor_impl = "tokio")] + { + handle.await.unwrap(); + handle2.await.unwrap(); + } + #[cfg(async_executor_impl = "async-std")] + { + handle.await; + handle2.await; + } } } diff --git a/crates/testing/src/test_runner.rs b/crates/testing/src/test_runner.rs index 0edf8147c2..38eff9b1d2 100644 --- a/crates/testing/src/test_runner.rs +++ b/crates/testing/src/test_runner.rs @@ -233,13 +233,14 @@ where } task_futs.push(completion_task.run()); task_futs.push(spinning_task.run()); - - let results = join_all(task_futs).await; - tracing::error!("test tasks joined"); let mut error_list = vec![]; - for result in results { - match result { - Ok(res) => match res { + + #[cfg(async_executor_impl = "async-std")] + { + let results = join_all(task_futs).await; + tracing::error!("test tasks joined"); + for result in results { + match result { HotShotTaskCompleted::ShutDown => { info!("Task shut down successfully"); } @@ -247,12 +248,35 @@ where _ => { panic!("Future impl for task abstraction failed! This should never happen"); } - }, - Err(e) => { - panic!("Error Joining the test task {:?}", e); } } } + + #[cfg(async_executor_impl = "tokio")] + { + let results = join_all(task_futs).await; + + tracing::error!("test tasks joined"); + for result in results { + match result { + Ok(res) => { + match res { + HotShotTaskCompleted::ShutDown => { + info!("Task shut down successfully"); + } + HotShotTaskCompleted::Error(e) => error_list.push(e), + _ => { + panic!("Future impl for task abstraction failed! This should never happen"); + } + } + } + Err(e) => { + panic!("Error Joining the test task {:?}", e); + } + } + } + } + assert!( error_list.is_empty(), "TEST FAILED! Results: {error_list:?}" From 7350604bb9c62fcbbc621a5b33c2de977a5028dc Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Wed, 31 Jan 2024 21:47:21 -0500 Subject: [PATCH 19/28] remove superfluous log and fix doc --- crates/hotshot/src/types/handle.rs | 6 ++++-- crates/task-impls/src/consensus.rs | 3 +-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/crates/hotshot/src/types/handle.rs b/crates/hotshot/src/types/handle.rs index 9c96052562..ea9f9bda53 100644 --- a/crates/hotshot/src/types/handle.rs +++ b/crates/hotshot/src/types/handle.rs @@ -37,9 +37,11 @@ use tracing::error; /// the underlying storage. #[derive(Clone)] pub struct SystemContextHandle> { - /// The [sender](ChannelStream) for the output stream from the background process + /// The [sender](Sender) and an `InactiveReceiver` to keep the channel open. + /// The Channel will output all the events. Subscribers will get an activated + /// clone of the `Receiver` when they get output stream. pub(crate) output_event_stream: (Sender>, InactiveReceiver>), - /// access to the internal ev ent stream, in case we need to, say, shut something down + /// access to the internal event stream, in case we need to, say, shut something down pub(crate) internal_event_stream: ( Sender>, InactiveReceiver>, diff --git a/crates/task-impls/src/consensus.rs b/crates/task-impls/src/consensus.rs index 40f4be43bd..f9114bcc6d 100644 --- a/crates/task-impls/src/consensus.rs +++ b/crates/task-impls/src/consensus.rs @@ -1307,10 +1307,9 @@ impl, A: ConsensusApi + where Self: Sized, { - tracing::error!("consensus get event {:?}", event); // TODO: Don't clone the sender let sender = task.clone_sender(); - info!("sender queue len {}", sender.len()); + tracing::trace!("sender queue len {}", sender.len()); task.state_mut().handle(event, sender).await; None } From 1c2494200ae41fcd88fe7204f7c0838b1bf73f11 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Wed, 31 Jan 2024 21:49:08 -0500 Subject: [PATCH 20/28] Remove Cargo.lock from tasks --- crates/task/Cargo.lock | 542 ----------------------------------------- 1 file changed, 542 deletions(-) delete mode 100644 crates/task/Cargo.lock diff --git a/crates/task/Cargo.lock b/crates/task/Cargo.lock deleted file mode 100644 index f8711d97b6..0000000000 --- a/crates/task/Cargo.lock +++ /dev/null @@ -1,542 +0,0 @@ -# This file is automatically @generated by Cargo. -# It is not intended for manual editing. -version = 3 - -[[package]] -name = "addr2line" -version = "0.21.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb" -dependencies = [ - "gimli", -] - -[[package]] -name = "adler" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" - -[[package]] -name = "async-channel" -version = "2.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ca33f4bc4ed1babef42cad36cc1f51fa88be00420404e5b1e80ab1b18f7678c" -dependencies = [ - "concurrent-queue", - "event-listener", - "event-listener-strategy", - "futures-core", - "pin-project-lite", -] - -[[package]] -name = "autocfg" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" - -[[package]] -name = "backtrace" -version = "0.3.69" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2089b7e3f35b9dd2d0ed921ead4f6d318c27680d4a5bd167b3ee120edb105837" -dependencies = [ - "addr2line", - "cc", - "cfg-if", - "libc", - "miniz_oxide", - "object", - "rustc-demangle", -] - -[[package]] -name = "bitflags" -version = "1.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" - -[[package]] -name = "broadcast" -version = "0.1.0" -dependencies = [ - "async-channel", - "futures", - "futures-core", - "tokio", - "tokio-util", -] - -[[package]] -name = "bytes" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2bd12c1caf447e69cd4528f47f94d203fd2582878ecb9e9465484c4148a8223" - -[[package]] -name = "cc" -version = "1.0.83" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" -dependencies = [ - "libc", -] - -[[package]] -name = "cfg-if" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" - -[[package]] -name = "concurrent-queue" -version = "2.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d16048cd947b08fa32c24458a22f5dc5e835264f689f4f5653210c69fd107363" -dependencies = [ - "crossbeam-utils", -] - -[[package]] -name = "crossbeam-utils" -version = "0.8.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "248e3bacc7dc6baa3b21e405ee045c3047101a49145e7e9eca583ab4c2ca5345" - -[[package]] -name = "event-listener" -version = "4.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67b215c49b2b248c855fb73579eb1f4f26c38ffdc12973e20e07b91d78d5646e" -dependencies = [ - "concurrent-queue", - "parking", - "pin-project-lite", -] - -[[package]] -name = "event-listener-strategy" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "958e4d70b6d5e81971bebec42271ec641e7ff4e170a6fa605f2b8a8b65cb97d3" -dependencies = [ - "event-listener", - "pin-project-lite", -] - -[[package]] -name = "futures" -version = "0.3.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0" -dependencies = [ - "futures-channel", - "futures-core", - "futures-executor", - "futures-io", - "futures-sink", - "futures-task", - "futures-util", -] - -[[package]] -name = "futures-channel" -version = "0.3.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" -dependencies = [ - "futures-core", - "futures-sink", -] - -[[package]] -name = "futures-core" -version = "0.3.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" - -[[package]] -name = "futures-executor" -version = "0.3.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d" -dependencies = [ - "futures-core", - "futures-task", - "futures-util", -] - -[[package]] -name = "futures-io" -version = "0.3.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" - -[[package]] -name = "futures-macro" -version = "0.3.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "futures-sink" -version = "0.3.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" - -[[package]] -name = "futures-task" -version = "0.3.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" - -[[package]] -name = "futures-util" -version = "0.3.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" -dependencies = [ - "futures-channel", - "futures-core", - "futures-io", - "futures-macro", - "futures-sink", - "futures-task", - "memchr", - "pin-project-lite", - "pin-utils", - "slab", -] - -[[package]] -name = "gimli" -version = "0.28.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253" - -[[package]] -name = "hermit-abi" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d3d0e0f38255e7fa3cf31335b3a56f05febd18025f4db5ef7a0cfb4f8da651f" - -[[package]] -name = "libc" -version = "0.2.152" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13e3bf6590cbc649f4d1a3eefc9d5d6eb746f5200ffb04e5e142700b8faa56e7" - -[[package]] -name = "lock_api" -version = "0.4.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c168f8615b12bc01f9c17e2eb0cc07dcae1940121185446edc3744920e8ef45" -dependencies = [ - "autocfg", - "scopeguard", -] - -[[package]] -name = "memchr" -version = "2.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "523dc4f511e55ab87b694dc30d0f820d60906ef06413f93d4d7a1385599cc149" - -[[package]] -name = "miniz_oxide" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7" -dependencies = [ - "adler", -] - -[[package]] -name = "mio" -version = "0.8.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f3d0b296e374a4e6f3c7b0a1f5a51d748a0d34c85e7dc48fc3fa9a87657fe09" -dependencies = [ - "libc", - "wasi", - "windows-sys", -] - -[[package]] -name = "num_cpus" -version = "1.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" -dependencies = [ - "hermit-abi", - "libc", -] - -[[package]] -name = "object" -version = "0.32.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6a622008b6e321afc04970976f62ee297fdbaa6f95318ca343e3eebb9648441" -dependencies = [ - "memchr", -] - -[[package]] -name = "parking" -version = "2.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb813b8af86854136c6922af0598d719255ecb2179515e6e7730d468f05c9cae" - -[[package]] -name = "parking_lot" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" -dependencies = [ - "lock_api", - "parking_lot_core", -] - -[[package]] -name = "parking_lot_core" -version = "0.9.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c42a9226546d68acdd9c0a280d17ce19bfe27a46bf68784e4066115788d008e" -dependencies = [ - "cfg-if", - "libc", - "redox_syscall", - "smallvec", - "windows-targets", -] - -[[package]] -name = "pin-project-lite" -version = "0.2.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58" - -[[package]] -name = "pin-utils" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" - -[[package]] -name = "proc-macro2" -version = "1.0.76" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95fc56cda0b5c3325f5fbbd7ff9fda9e02bb00bb3dac51252d2f1bfa1cb8cc8c" -dependencies = [ - "unicode-ident", -] - -[[package]] -name = "quote" -version = "1.0.35" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef" -dependencies = [ - "proc-macro2", -] - -[[package]] -name = "redox_syscall" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa" -dependencies = [ - "bitflags", -] - -[[package]] -name = "rustc-demangle" -version = "0.1.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" - -[[package]] -name = "scopeguard" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" - -[[package]] -name = "signal-hook-registry" -version = "1.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1" -dependencies = [ - "libc", -] - -[[package]] -name = "slab" -version = "0.4.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" -dependencies = [ - "autocfg", -] - -[[package]] -name = "smallvec" -version = "1.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b187f0231d56fe41bfb12034819dd2bf336422a5866de41bc3fec4b2e3883e8" - -[[package]] -name = "socket2" -version = "0.5.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b5fac59a5cb5dd637972e5fca70daf0523c9067fcdc4842f053dae04a18f8e9" -dependencies = [ - "libc", - "windows-sys", -] - -[[package]] -name = "syn" -version = "2.0.48" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f3531638e407dfc0814761abb7c00a5b54992b849452a0646b7f65c9f770f3f" -dependencies = [ - "proc-macro2", - "quote", - "unicode-ident", -] - -[[package]] -name = "task" -version = "0.1.0" -dependencies = [ - "broadcast", - "tokio", -] - -[[package]] -name = "tokio" -version = "1.35.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c89b4efa943be685f629b149f53829423f8f5531ea21249408e8e2f8671ec104" -dependencies = [ - "backtrace", - "bytes", - "libc", - "mio", - "num_cpus", - "parking_lot", - "pin-project-lite", - "signal-hook-registry", - "socket2", - "tokio-macros", - "windows-sys", -] - -[[package]] -name = "tokio-macros" -version = "2.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "tokio-util" -version = "0.7.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5419f34732d9eb6ee4c3578b7989078579b7f039cbbb9ca2c4da015749371e15" -dependencies = [ - "bytes", - "futures-core", - "futures-sink", - "pin-project-lite", - "tokio", -] - -[[package]] -name = "unicode-ident" -version = "1.0.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" - -[[package]] -name = "wasi" -version = "0.11.0+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" - -[[package]] -name = "windows-sys" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" -dependencies = [ - "windows-targets", -] - -[[package]] -name = "windows-targets" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" -dependencies = [ - "windows_aarch64_gnullvm", - "windows_aarch64_msvc", - "windows_i686_gnu", - "windows_i686_msvc", - "windows_x86_64_gnu", - "windows_x86_64_gnullvm", - "windows_x86_64_msvc", -] - -[[package]] -name = "windows_aarch64_gnullvm" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" - -[[package]] -name = "windows_aarch64_msvc" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" - -[[package]] -name = "windows_i686_gnu" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" - -[[package]] -name = "windows_i686_msvc" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" - -[[package]] -name = "windows_x86_64_gnu" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" - -[[package]] -name = "windows_x86_64_gnullvm" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" - -[[package]] -name = "windows_x86_64_msvc" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" From 992f6b6929af83367cd9d5442d60302b1e83c90b Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Wed, 31 Jan 2024 21:54:59 -0500 Subject: [PATCH 21/28] cleanup --- crates/task-impls/src/harness.rs | 1 - crates/testing/src/overall_safety_task.rs | 1 - 2 files changed, 2 deletions(-) diff --git a/crates/task-impls/src/harness.rs b/crates/task-impls/src/harness.rs index 9c7fa44e03..3c0926e412 100644 --- a/crates/task-impls/src/harness.rs +++ b/crates/task-impls/src/harness.rs @@ -100,7 +100,6 @@ pub fn handle_event( task: &mut Task>, allow_extra_output: bool, ) -> Option { - tracing::error!("got event {:?}", event); let state = task.state_mut(); // Check the output in either case: // * We allow outputs only in our expected output set. diff --git a/crates/testing/src/overall_safety_task.rs b/crates/testing/src/overall_safety_task.rs index 453e625719..d800e7c99d 100644 --- a/crates/testing/src/overall_safety_task.rs +++ b/crates/testing/src/overall_safety_task.rs @@ -113,7 +113,6 @@ impl> TaskState }, ))); } - // TODO check if we got enough successful views Some(HotShotTaskCompleted::ShutDown) } } From 8a0dec00f72df3a6e0e46a911de04081eac48717 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Tue, 6 Feb 2024 15:09:01 -0500 Subject: [PATCH 22/28] Addressing comments --- crates/hotshot/src/lib.rs | 13 ++++++------- crates/hotshot/src/tasks/mod.rs | 21 +++++++++++++++++---- crates/task-impls/src/harness.rs | 2 +- crates/task-impls/src/network.rs | 4 ++-- crates/testing/tests/consensus_task.rs | 15 ++++++++++----- 5 files changed, 36 insertions(+), 19 deletions(-) diff --git a/crates/hotshot/src/lib.rs b/crates/hotshot/src/lib.rs index 92dd96aaac..8818d084f3 100644 --- a/crates/hotshot/src/lib.rs +++ b/crates/hotshot/src/lib.rs @@ -33,7 +33,7 @@ use hotshot_task_impls::events::HotShotEvent; use hotshot_task_impls::helpers::broadcast_event; use hotshot_task_impls::network; -use hotshot_task::task::{Task, TaskRegistry}; +use hotshot_task::task::TaskRegistry; use hotshot_types::{ consensus::{Consensus, ConsensusMetricsValue, View, ViewInner}, data::Leaf, @@ -509,14 +509,13 @@ impl> SystemContext { network::vid_filter, ) .await; - let consensus_state = add_consensus_task(output_event_stream.0.clone(), &handle).await; - let task = Task::new( + add_consensus_task( + registry.clone(), event_tx.clone(), event_rx.activate_cloned(), - registry.clone(), - consensus_state, - ); - registry.run_task(task).await; + &handle, + ) + .await; add_da_task( registry.clone(), event_tx.clone(), diff --git a/crates/hotshot/src/tasks/mod.rs b/crates/hotshot/src/tasks/mod.rs index e0a4ff023c..000bc5adf8 100644 --- a/crates/hotshot/src/tasks/mod.rs +++ b/crates/hotshot/src/tasks/mod.rs @@ -123,10 +123,10 @@ pub async fn add_network_event_task>( +pub async fn create_consensus_state>( output_stream: Sender>, handle: &SystemContextHandle, ) -> ConsensusTaskState> { @@ -184,8 +184,21 @@ pub async fn add_consensus_task>( .inject_consensus_info(ConsensusIntentEvent::PollForLatestViewSyncCertificate) .await; consensus_state - // let task = Task::new(tx, rx, task_reg.clone(), consensus_state); - // task_reg.run_task(task).await; +} + +/// add the consensus task +/// # Panics +/// Is unable to panic. This section here is just to satisfy clippy +pub async fn add_consensus_task>( + task_reg: Arc, + tx: Sender>, + rx: Receiver>, + handle: &SystemContextHandle, +) { + let state = + create_consensus_state(handle.hotshot.inner.output_event_stream.0.clone(), handle).await; + let task = Task::new(tx, rx, task_reg.clone(), state); + task_reg.run_task(task).await; } /// add the VID task diff --git a/crates/task-impls/src/harness.rs b/crates/task-impls/src/harness.rs index 3c0926e412..7cb9ce7d4d 100644 --- a/crates/task-impls/src/harness.rs +++ b/crates/task-impls/src/harness.rs @@ -10,7 +10,7 @@ use std::{collections::HashMap, sync::Arc, time::Duration}; pub struct TestHarnessState { /// The expected events we get from the test. Maps an event to the number of times we expect to see it expected_output: HashMap, usize>, - /// + /// If true we won't fail the test if extra events come in allow_extra_output: bool, } diff --git a/crates/task-impls/src/network.rs b/crates/task-impls/src/network.rs index 48695c076d..7fb7ae00ca 100644 --- a/crates/task-impls/src/network.rs +++ b/crates/task-impls/src/network.rs @@ -357,8 +357,8 @@ impl> error!("Networking task shutting down"); return Some(HotShotTaskCompleted); } - _event => { - // error!("Receieved unexpected message in network task {:?}", event); + event => { + error!("Receieved unexpected message in network task {:?}", event); return None; } }; diff --git a/crates/testing/tests/consensus_task.rs b/crates/testing/tests/consensus_task.rs index fe3569f25f..8ed52d6aea 100644 --- a/crates/testing/tests/consensus_task.rs +++ b/crates/testing/tests/consensus_task.rs @@ -1,6 +1,6 @@ #![allow(clippy::panic)] use commit::Committable; -use hotshot::{tasks::add_consensus_task, types::SystemContextHandle, HotShotConsensusApi}; +use hotshot::{types::SystemContextHandle, HotShotConsensusApi}; use hotshot_task_impls::events::HotShotEvent; use hotshot_testing::{ node_types::{MemoryImpl, TestTypes}, @@ -81,6 +81,7 @@ async fn build_vote( )] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_consensus_task() { + use hotshot::tasks::create_consensus_state; use hotshot_task_impls::harness::run_harness; use hotshot_testing::task_helpers::build_system_handle; use hotshot_types::simple_certificate::QuorumCertificate; @@ -120,7 +121,7 @@ async fn test_consensus_task() { } let consensus_state = - add_consensus_task(handle.hotshot.inner.output_event_stream.0.clone(), &handle).await; + create_consensus_state(handle.hotshot.inner.output_event_stream.0.clone(), &handle).await; run_harness(input, output, consensus_state, false).await; } @@ -132,6 +133,7 @@ async fn test_consensus_task() { )] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] async fn test_consensus_vote() { + use hotshot::tasks::create_consensus_state; use hotshot_task_impls::harness::run_harness; use hotshot_testing::task_helpers::build_system_handle; @@ -164,7 +166,7 @@ async fn test_consensus_vote() { input.push(HotShotEvent::Shutdown); let consensus_state = - add_consensus_task(handle.hotshot.inner.output_event_stream.0.clone(), &handle).await; + create_consensus_state(handle.hotshot.inner.output_event_stream.0.clone(), &handle).await; run_harness(input, output, consensus_state, false).await; } @@ -278,8 +280,11 @@ async fn test_consensus_with_vid() { input.push(HotShotEvent::Shutdown); output.insert(HotShotEvent::Shutdown, 1); - let consensus_state = - add_consensus_task(handle.hotshot.inner.output_event_stream.0.clone(), &handle).await; + let consensus_state = hotshot::tasks::create_consensus_state( + handle.hotshot.inner.output_event_stream.0.clone(), + &handle, + ) + .await; run_harness(input, output, consensus_state, false).await; } From e0065a0c03bd4c8e44cb8578094140fea2e3c06b Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Wed, 7 Feb 2024 13:32:22 -0500 Subject: [PATCH 23/28] Update tasks based on feedback --- crates/task-impls/src/consensus.rs | 2 +- crates/task-impls/src/da.rs | 2 +- crates/task-impls/src/harness.rs | 2 +- crates/task-impls/src/network.rs | 4 +- crates/task-impls/src/transactions.rs | 2 +- crates/task-impls/src/vid.rs | 2 +- crates/task-impls/src/view_sync.rs | 4 +- crates/task-impls/src/vote.rs | 4 +- crates/task/Cargo.toml | 4 + crates/task/src/dependency.rs | 117 ++++++++++++------ crates/task/src/dependency_task.rs | 35 ++++-- crates/task/src/lib.rs | 7 +- crates/task/src/task.rs | 138 ++++++++++++++++------ crates/testing/src/overall_safety_task.rs | 8 +- crates/testing/src/spinning_task.rs | 8 +- crates/testing/src/view_sync_task.rs | 6 +- 16 files changed, 238 insertions(+), 107 deletions(-) diff --git a/crates/task-impls/src/consensus.rs b/crates/task-impls/src/consensus.rs index 834839bf9c..2bcdea2a23 100644 --- a/crates/task-impls/src/consensus.rs +++ b/crates/task-impls/src/consensus.rs @@ -1311,7 +1311,7 @@ impl, A: ConsensusApi + for ConsensusTaskState { type Event = HotShotEvent; - type Result = (); + type Output = (); fn filter(&self, event: &HotShotEvent) -> bool { !matches!( event, diff --git a/crates/task-impls/src/da.rs b/crates/task-impls/src/da.rs index 352aa9723c..577afbaca6 100644 --- a/crates/task-impls/src/da.rs +++ b/crates/task-impls/src/da.rs @@ -332,7 +332,7 @@ impl, A: ConsensusApi + { type Event = HotShotEvent; - type Result = HotShotTaskCompleted; + type Output = HotShotTaskCompleted; fn filter(&self, event: &HotShotEvent) -> bool { !matches!( diff --git a/crates/task-impls/src/harness.rs b/crates/task-impls/src/harness.rs index 7cb9ce7d4d..509a664751 100644 --- a/crates/task-impls/src/harness.rs +++ b/crates/task-impls/src/harness.rs @@ -16,7 +16,7 @@ pub struct TestHarnessState { impl TaskState for TestHarnessState { type Event = HotShotEvent; - type Result = HotShotTaskCompleted; + type Output = HotShotTaskCompleted; async fn handle_event( event: Self::Event, diff --git a/crates/task-impls/src/network.rs b/crates/task-impls/src/network.rs index ec43d037d3..6ad62e32cb 100644 --- a/crates/task-impls/src/network.rs +++ b/crates/task-impls/src/network.rs @@ -76,7 +76,7 @@ pub struct NetworkMessageTaskState { impl TaskState for NetworkMessageTaskState { type Event = Vec>; - type Result = (); + type Output = (); async fn handle_event(event: Self::Event, task: &mut Task) -> Option<()> where @@ -201,7 +201,7 @@ impl> TaskState { type Event = HotShotEvent; - type Result = HotShotTaskCompleted; + type Output = HotShotTaskCompleted; async fn handle_event( event: Self::Event, diff --git a/crates/task-impls/src/transactions.rs b/crates/task-impls/src/transactions.rs index 6e83a43eec..5cdb8d4af8 100644 --- a/crates/task-impls/src/transactions.rs +++ b/crates/task-impls/src/transactions.rs @@ -340,7 +340,7 @@ impl, A: ConsensusApi + { type Event = HotShotEvent; - type Result = HotShotTaskCompleted; + type Output = HotShotTaskCompleted; fn filter(&self, event: &HotShotEvent) -> bool { !matches!( diff --git a/crates/task-impls/src/vid.rs b/crates/task-impls/src/vid.rs index 714c502e31..27bf7d34dd 100644 --- a/crates/task-impls/src/vid.rs +++ b/crates/task-impls/src/vid.rs @@ -183,7 +183,7 @@ impl, A: ConsensusApi + { type Event = HotShotEvent; - type Result = HotShotTaskCompleted; + type Output = HotShotTaskCompleted; async fn handle_event( event: Self::Event, diff --git a/crates/task-impls/src/view_sync.rs b/crates/task-impls/src/view_sync.rs index 9d6dee8af7..04d47cda08 100644 --- a/crates/task-impls/src/view_sync.rs +++ b/crates/task-impls/src/view_sync.rs @@ -115,7 +115,7 @@ impl< { type Event = HotShotEvent; - type Result = (); + type Output = (); async fn handle_event(event: Self::Event, task: &mut Task) -> Option<()> { let sender = task.clone_sender(); @@ -184,7 +184,7 @@ impl, A: ConsensusApi + { type Event = HotShotEvent; - type Result = (); + type Output = (); async fn handle_event(event: Self::Event, task: &mut Task) -> Option<()> { let sender = task.clone_sender(); diff --git a/crates/task-impls/src/vote.rs b/crates/task-impls/src/vote.rs index 107dc6d250..38f26a01b8 100644 --- a/crates/task-impls/src/vote.rs +++ b/crates/task-impls/src/vote.rs @@ -123,9 +123,9 @@ where { type Event = HotShotEvent; - type Result = HotShotTaskCompleted; + type Output = HotShotTaskCompleted; - async fn handle_event(event: Self::Event, task: &mut Task) -> Option { + async fn handle_event(event: Self::Event, task: &mut Task) -> Option { let sender = task.clone_sender(); task.state_mut().handle_event(event, &sender).await } diff --git a/crates/task/Cargo.toml b/crates/task/Cargo.toml index 4c6722600f..1207618627 100644 --- a/crates/task/Cargo.toml +++ b/crates/task/Cargo.toml @@ -1,4 +1,5 @@ [package] +authors = ["Espresso Systems "] name = "hotshot-task" version = "0.1.0" edition = "2021" @@ -16,3 +17,6 @@ async-compatibility-layer = { workspace = true } tokio = { version = "1.35.1", features = ["time", "rt-multi-thread", "macros", "sync"] } [target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] async-std = { version = "1.12.0", features = ["attributes"] } + +[lints] +workspace = true \ No newline at end of file diff --git a/crates/task/src/dependency.rs b/crates/task/src/dependency.rs index 014f979fd5..6ae793a7a4 100644 --- a/crates/task/src/dependency.rs +++ b/crates/task/src/dependency.rs @@ -1,50 +1,75 @@ -use async_broadcast::Receiver; +use async_broadcast::{Receiver, RecvError}; use futures::future::BoxFuture; use futures::stream::FuturesUnordered; use futures::stream::StreamExt; use futures::FutureExt; use std::future::Future; +/// Type which describes the idea of waiting for a dependency to complete pub trait Dependency { - fn completed(self) -> impl Future + Send; -} - -trait CombineDependencies: - Sized + Dependency + Send + 'static -{ - fn or + Send + 'static>(self, dep: D) -> OrDependency { + /// Complete will wait until it gets some value `T` then return the value + fn completed(self) -> impl Future> + Send; + /// Create an or dependency from this dependency and another + fn or + Send + 'static>(self, dep: D) -> OrDependency + where + T: Send + Sync + Clone + 'static, + Self: Sized + Send + 'static, + { let mut or = OrDependency::from_deps(vec![self]); or.add_dep(dep); or } - fn and + Send + 'static>(self, dep: D) -> AndDependency { + /// Create an and dependency from this dependency and another + fn and + Send + 'static>(self, dep: D) -> AndDependency + where + T: Send + Sync + Clone + 'static, + Self: Sized + Send + 'static, + { let mut and = AndDependency::from_deps(vec![self]); and.add_dep(dep); and } } +/// Used to combine dependencies to create `AndDependency`s or `OrDependency`s +trait CombineDependencies: + Sized + Dependency + Send + 'static +{ +} + +/// Defines a dependency that completes when all of its deps complete pub struct AndDependency { - deps: Vec>, + /// Dependencies being combined + deps: Vec>>, } impl Dependency> for AndDependency { - async fn completed(self) -> Vec { + /// Returns a vector of all of the results from it's dependencies. + /// The results will be in a random order + async fn completed(self) -> Option> { let futures = FuturesUnordered::from_iter(self.deps); - futures.collect().await + futures + .collect::>>() + .await + .into_iter() + .collect() } } impl AndDependency { + /// Create from a vec of deps + #[must_use] pub fn from_deps(deps: Vec + Send + 'static>) -> Self { let mut pinned = vec![]; for dep in deps { - pinned.push(dep.completed().boxed()) + pinned.push(dep.completed().boxed()); } Self { deps: pinned } } + /// Add another dependency pub fn add_dep(&mut self, dep: impl Dependency + Send + 'static) { self.deps.push(dep.completed().boxed()); } + /// Add multiple dependencies pub fn add_deps(&mut self, deps: AndDependency) { for dep in deps.deps { self.deps.push(dep); @@ -52,39 +77,56 @@ impl AndDependency { } } +/// Defines a dependency that complets when one of it's dependencies compeltes pub struct OrDependency { - deps: Vec>, + /// Dependencies being combined + deps: Vec>>, } impl Dependency for OrDependency { - async fn completed(self) -> T { + /// Returns the value of the first completed dependency + async fn completed(self) -> Option { let mut futures = FuturesUnordered::from_iter(self.deps); loop { - if let Some(val) = futures.next().await { - break val; + if let Some(maybe) = futures.next().await { + if maybe.is_some() { + return maybe; + } + } else { + return None; } } } } impl OrDependency { + /// Creat an `OrDependency` from a vec of dependencies + #[must_use] pub fn from_deps(deps: Vec + Send + 'static>) -> Self { let mut pinned = vec![]; for dep in deps { - pinned.push(dep.completed().boxed()) + pinned.push(dep.completed().boxed()); } Self { deps: pinned } } + /// Add another dependecy pub fn add_dep(&mut self, dep: impl Dependency + Send + 'static) { self.deps.push(dep.completed().boxed()); } } +/// A dependency that listens on a chanel for an event +/// that matches what some value it wants. pub struct EventDependency { + /// Channel of incomming events pub(crate) event_rx: Receiver, + /// Closure which returns true if the incoming `T` is the + /// thing that completes this dependency pub(crate) match_fn: Box bool + Send>, } impl EventDependency { + /// Create a new `EventDependency` + #[must_use] pub fn new(receiver: Receiver, match_fn: Box bool + Send>) -> Self { Self { event_rx: receiver, @@ -94,26 +136,27 @@ impl EventDependency { } impl Dependency for EventDependency { - async fn completed(mut self) -> T { + async fn completed(mut self) -> Option { loop { - let next = self.event_rx.recv().await.unwrap(); - if (self.match_fn)(&next) { - return next; + match self.event_rx.recv_direct().await { + Ok(event) => { + if (self.match_fn)(&event) { + return Some(event); + } + } + Err(RecvError::Overflowed(n)) => { + tracing::error!("Dependency Task overloaded, skipping {} events", n); + } + Err(RecvError::Closed) => { + return None; + } } } } } -// Impl Combine for all the basic dependency types -impl CombineDependencies for D where - D: Dependency + Send + 'static -{ -} - #[cfg(test)] mod tests { - use crate::dependency::CombineDependencies; - use super::{AndDependency, Dependency, EventDependency, OrDependency}; use async_broadcast::{broadcast, Receiver}; @@ -135,13 +178,13 @@ mod tests { let mut deps = vec![]; for i in 0..5 { tx.broadcast(i).await.unwrap(); - deps.push(eq_dep(rx.clone(), 5)) + deps.push(eq_dep(rx.clone(), 5)); } let and = AndDependency::from_deps(deps); tx.broadcast(5).await.unwrap(); let result = and.completed().await; - assert_eq!(result, vec![5; 5]); + assert_eq!(result, Some(vec![5; 5])); } #[cfg_attr( async_executor_impl = "tokio", @@ -154,11 +197,11 @@ mod tests { tx.broadcast(5).await.unwrap(); let mut deps = vec![]; for _ in 0..5 { - deps.push(eq_dep(rx.clone(), 5)) + deps.push(eq_dep(rx.clone(), 5)); } let or = OrDependency::from_deps(deps); let result = or.completed().await; - assert_eq!(result, 5); + assert_eq!(result, Some(5)); } #[cfg_attr( @@ -179,7 +222,7 @@ mod tests { let or2 = OrDependency::from_deps([eq_dep(rx.clone(), 4), eq_dep(rx.clone(), 5)].into()); let and = AndDependency::from_deps([or1, or2].into()); let result = and.completed().await; - assert_eq!(result, vec![6, 5]); + assert_eq!(result, Some(vec![6, 5])); } #[cfg_attr( @@ -200,7 +243,7 @@ mod tests { let and2 = eq_dep(rx.clone(), 4).and(eq_dep(rx.clone(), 5)); let or = and1.or(and2); let result = or.completed().await; - assert_eq!(result, vec![4, 5]); + assert_eq!(result, Some(vec![4, 5])); } #[cfg_attr( @@ -222,6 +265,6 @@ mod tests { let and2 = eq_dep(rx.clone(), 4).and(eq_dep(rx.clone(), 5)); and1.add_deps(and2); let result = and1.completed().await; - assert_eq!(result, vec![4, 6, 4, 5]); + assert_eq!(result, Some(vec![4, 6, 4, 5])); } } diff --git a/crates/task/src/dependency_task.rs b/crates/task/src/dependency_task.rs index 86daa095e1..9db6786637 100644 --- a/crates/task/src/dependency_task.rs +++ b/crates/task/src/dependency_task.rs @@ -7,33 +7,41 @@ use futures::Future; use crate::dependency::Dependency; -pub trait HandleDepResult: Send + Sized + Sync + 'static { - type Result: Send + Sync + 'static; +/// Defines a type that can handle the result of a dependency +pub trait HandleDepOutput: Send + Sized + Sync + 'static { + /// Type we expect from completed dependency + type Output: Send + Sync + 'static; /// Called once when the Dependency completes handles the results - fn handle_dep_result(self, res: Self::Result) -> impl Future + Send; + fn handle_dep_result(self, res: Self::Output) -> impl Future + Send; } -pub struct DependencyTask + Send, H: HandleDepResult + Send> { +/// A task that runs until it's dependency completes and it handles the result +pub struct DependencyTask + Send, H: HandleDepOutput + Send> { + /// Dependency this taks waits for pub(crate) dep: D, + /// Handles the results returned from `self.dep.completed().await` pub(crate) handle: H, } -impl + Send, H: HandleDepResult + Send> DependencyTask { +impl + Send, H: HandleDepOutput + Send> DependencyTask { + /// Create a new `DependencyTask` + #[must_use] pub fn new(dep: D, handle: H) -> Self { Self { dep, handle } } } -impl + Send + 'static, H: HandleDepResult> DependencyTask { +impl + Send + 'static, H: HandleDepOutput> DependencyTask { + /// Spawn the dependency task pub fn run(self) -> JoinHandle<()> where Self: Sized, { spawn(async move { - self.handle - .handle_dep_result(self.dep.completed().await) - .await; + if let Some(completed) = self.dep.completed().await { + self.handle.handle_dep_result(completed).await; + } }) } } @@ -63,8 +71,8 @@ mod test { struct DummyHandle { sender: Sender, } - impl HandleDepResult for DummyHandle { - type Result = usize; + impl HandleDepOutput for DummyHandle { + type Output = usize; async fn handle_dep_result(self, res: usize) { self.sender .broadcast(TaskResult::Success(res)) @@ -85,6 +93,8 @@ mod test { tokio::test(flavor = "multi_thread", worker_threads = 2) )] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] + // allow unused for tokio because it's a test + #[allow(unused_must_use)] async fn it_works() { let (tx, rx) = broadcast(10); let (res_tx, mut res_rx) = broadcast(10); @@ -93,7 +103,8 @@ mod test { let join_handle = DependencyTask { dep, handle }.run(); tx.broadcast(2).await.unwrap(); assert_eq!(res_rx.recv().await.unwrap(), TaskResult::Success(2)); - let _ = join_handle.await; + + join_handle.await; } #[cfg_attr( diff --git a/crates/task/src/lib.rs b/crates/task/src/lib.rs index ab945c258f..cf71eb7090 100644 --- a/crates/task/src/lib.rs +++ b/crates/task/src/lib.rs @@ -1,5 +1,8 @@ -pub mod task; +//! Task primatives for `HotShot` +/// Simple Dependecy types pub mod dependency; - +/// Task which can uses dependencies pub mod dependency_task; +/// Basic task types +pub mod task; diff --git a/crates/task/src/task.rs b/crates/task/src/task.rs index c5737a30f9..e17061157e 100644 --- a/crates/task/src/task.rs +++ b/crates/task/src/task.rs @@ -24,18 +24,21 @@ use tokio::{ use crate::{ dependency::Dependency, - dependency_task::{DependencyTask, HandleDepResult}, + dependency_task::{DependencyTask, HandleDepOutput}, }; +/// Type for mutable task state that can be used as the state for a `Task` pub trait TaskState: Send { + /// Type of event sent and received by the task type Event: Clone + Send + Sync + 'static; - type Result: Send; + /// The result returned when this task compeltes + type Output: Send; /// Handle event and update state. Return true if the task is finished - /// false otherwise + /// false otherwise. The handler can access the state through `Task::state_mut` fn handle_event( event: Self::Event, task: &mut Task, - ) -> impl Future> + Send + ) -> impl Future> + Send where Self: Sized; @@ -45,7 +48,7 @@ pub trait TaskState: Send { false } /// Do something with the result of the task before it shuts down - fn handle_result(&self, _res: &Self::Result) -> impl std::future::Future + Send { + fn handle_result(&self, _res: &Self::Output) -> impl std::future::Future + Send { async {} } /// Return true if the event should shut the task down @@ -56,27 +59,48 @@ pub trait TaskState: Send { } } +/// Task state for a test. Similar to `TaskState` but it handles +/// messages as well as events. Messages are events that are +/// external to this task. (i.e. a test message would be an event from non test task) +/// This is used as state for `TestTask` and messages can come from many +/// different input streams. pub trait TestTaskState: Send { + /// Message type handled by the task type Message: Clone + Send + Sync + 'static; - type Result: Send; + /// Result returned by the test task on completion + type Output: Send; + /// The state type type State: TaskState; + /// Handle and incoming message and return `Some` if the task is finished fn handle_message( message: Self::Message, id: usize, task: &mut TestTask, - ) -> impl Future> + Send + ) -> impl Future> + Send where Self: Sized; } +/// A basic task which loops waiting for events to come from `event_receiver` +/// and then handles them using it's state +/// It sends events to other `Task`s through `event_sender` +/// This should be used as the primary building block for long running +/// or medium running tasks (i.e. anything that can't be described as a dependency task) pub struct Task { + /// Sends events all tasks including itself event_sender: Sender, + /// Receives events that are broadcast from any task, including itself event_receiver: Receiver, + /// Contains this task, used to register any spawned tasks registry: Arc, + /// The state of the task. It is fed events from `event_sender` + /// and mutates it state ocordingly. Also it signals the task + /// if it is complete/should shutdown state: S, } impl Task { + /// Create a new task pub fn new( tx: Sender, rx: Receiver, @@ -90,40 +114,59 @@ impl Task { state, } } + /// Spawn the task loop, consuming self. Will continue until + /// the task reaches some shutdown condition pub fn run(mut self) -> JoinHandle<()> { spawn(async move { loop { - let event = self.event_receiver.recv_direct().await; - if S::should_shutdown(event.as_ref().unwrap()) { - self.state.shutdown().await; - break; - } - if self.state.filter(event.as_ref().unwrap()) { - continue; - } - if let Some(res) = S::handle_event(event.unwrap(), &mut self).await { - self.state.handle_result(&res).await; - self.state.shutdown().await; - break; + match self.event_receiver.recv_direct().await { + Ok(event) => { + if S::should_shutdown(&event) { + self.state.shutdown().await; + break; + } + if self.state.filter(&event) { + continue; + } + if let Some(res) = S::handle_event(event, &mut self).await { + self.state.handle_result(&res).await; + self.state.shutdown().await; + break; + } + } + Err(e) => { + tracing::error!("Failed to receiving from event stream Error: {}", e); + } } } }) } + + /// Create a new event `Receiver` from this Task's receiver. + /// The returned receiver will get all messages not yet seen by this task pub fn subscribe(&self) -> Receiver { self.event_receiver.clone() } + /// Get a new sender handle for events pub fn sender(&self) -> &Sender { &self.event_sender } + /// Clone the sender handle pub fn clone_sender(&self) -> Sender { self.event_sender.clone() } + /// Broadcast a message to all listening tasks + /// # Errors + /// Errors if the broadcast fails pub async fn send(&self, event: S::Event) -> Result, SendError> { self.event_sender.broadcast(event).await } + /// Get a mutable reference to this tasks state pub fn state_mut(&mut self) -> &mut S { &mut self.state } + /// Spawn a new task adn register it. It will get all events not seend + /// by the task creating it. pub async fn run_sub_task(&self, state: S) { let task = Task { event_sender: self.clone_sender(), @@ -137,23 +180,32 @@ impl Task { } } +/// Similar to `Task` but adds functionality for testing. Notably +/// it adds message receivers to collect events from many non-test tasks pub struct TestTask { + /// Task which handles test events task: Task, + /// Receivers for outside events message_receivers: Vec>, } impl< S: TaskState + Send + 'static, - T: TestTaskState + Send + Sync + 'static, + T: TestTaskState + Send + Sync + 'static, > TestTask { + /// Create a test task pub fn new(task: Task, rxs: Vec>) -> Self { Self { task, message_receivers: rxs, } } - pub fn run(mut self) -> JoinHandle { + /// Runs the task, taking events from the the test events and the message receivers. + /// Consumes self and runs until some shutdown condition is met. + /// The join handle will return the result of the task, useful for deciding if the test + /// passed or not. + pub fn run(mut self) -> JoinHandle { spawn(async move { loop { let mut futs = vec![]; @@ -173,7 +225,7 @@ impl< } } - for rx in self.message_receivers.iter_mut() { + for rx in &mut self.message_receivers { futs.push(rx.recv()); } if let Ok((Ok(msg), id, _)) = @@ -188,26 +240,37 @@ impl< } }) } + + /// Get a ref to state pub fn state(&self) -> &S { &self.task.state } + /// Get a mutable ref to state pub fn state_mut(&mut self) -> &mut S { self.task.state_mut() } + /// Send an event to other listening test tasks + /// + /// # Panics + /// panics if the event can't be sent (ok to panic in test) pub async fn send_event(&self, event: S::Event) { self.task.send(event).await.unwrap(); } } #[derive(Default)] +/// A collection of tasks which can handle shutdown pub struct TaskRegistry { + /// Tasks this registry controls task_handles: RwLock>>, } impl TaskRegistry { + /// Add a task to the registry pub async fn register(&self, handle: JoinHandle<()>) { self.task_handles.write().await.push(handle); } + /// Try to cancel/abort the task this registry has pub async fn shutdown(&self) { let mut handles = self.task_handles.write().await; while let Some(handle) = handles.pop() { @@ -217,20 +280,25 @@ impl TaskRegistry { handle.abort(); } } + /// Take a task, run it, and register it pub async fn run_task(&self, task: Task) where S: TaskState + Send + 'static, { self.register(task.run()).await; } + /// Create a new `DependencyTask` run it, and register it pub async fn spawn_dependency_task( &self, dep: impl Dependency + Send + 'static, - handle: impl HandleDepResult, + handle: impl HandleDepOutput, ) { let join_handle = DependencyTask { dep, handle }.run(); self.register(join_handle).await; } + /// Wait for the results of all the tasks registered + /// # Panics + /// Panics if one of the tasks paniced pub async fn join_all(self) -> Vec<()> { #[cfg(async_executor_impl = "async-std")] let ret = join_all(self.task_handles.into_inner()).await; @@ -256,18 +324,20 @@ mod tests { seen: HashSet, } + #[allow(clippy::panic)] impl TaskState for DummyHandle { type Event = usize; - type Result = (); + type Output = (); async fn handle_event(event: usize, task: &mut Task) -> Option<()> { sleep(Duration::from_millis(10)).await; let state = task.state_mut(); state.seen.insert(event); if event > state.val { state.val = event; - if state.val >= 100 { - panic!("Test should shutdown before getting an event for 100") - } + assert!( + state.val < 100, + "Test should shutdown before getting an event for 100" + ); task.send(event + 1).await.unwrap(); } None @@ -284,7 +354,7 @@ mod tests { impl TestTaskState for DummyHandle { type Message = String; - type Result = (); + type Output = (); type State = Self; async fn handle_message( @@ -292,7 +362,6 @@ mod tests { _id: usize, _task: &mut TestTask, ) -> Option<()> { - println!("got message {}", message); if message == *"done".to_string() { return Some(()); } @@ -304,6 +373,7 @@ mod tests { tokio::test(flavor = "multi_thread", worker_threads = 2) )] #[cfg_attr(async_executor_impl = "async-std", async_std::test)] + #[allow(unused_must_use)] async fn it_works() { let reg = Arc::new(TaskRegistry::default()); let (tx, rx) = broadcast(10); @@ -311,18 +381,18 @@ mod tests { event_sender: tx.clone(), event_receiver: rx.clone(), registry: reg.clone(), - state: Default::default(), + state: DummyHandle::default(), }; tx.broadcast(1).await.unwrap(); let task2 = Task:: { event_sender: tx.clone(), event_receiver: rx, registry: reg, - state: Default::default(), + state: DummyHandle::default(), }; let handle = task2.run(); let _res = task1.run().await; - let _ = handle.await; + handle.await; } #[cfg_attr( @@ -340,14 +410,14 @@ mod tests { event_sender: tx.clone(), event_receiver: rx.clone(), registry: reg.clone(), - state: Default::default(), + state: DummyHandle::default(), }; tx.broadcast(1).await.unwrap(); let task2 = Task:: { event_sender: tx.clone(), event_receiver: rx, registry: reg, - state: Default::default(), + state: DummyHandle::default(), }; let test1 = TestTask::<_, DummyHandle> { task: task1, diff --git a/crates/testing/src/overall_safety_task.rs b/crates/testing/src/overall_safety_task.rs index 3287c91abd..693a0d46dd 100644 --- a/crates/testing/src/overall_safety_task.rs +++ b/crates/testing/src/overall_safety_task.rs @@ -77,9 +77,9 @@ impl> TaskState { type Event = GlobalTestEvent; - type Result = HotShotTaskCompleted; + type Output = HotShotTaskCompleted; - async fn handle_event(event: Self::Event, task: &mut Task) -> Option { + async fn handle_event(event: Self::Event, task: &mut Task) -> Option { match event { GlobalTestEvent::ShutDown => { tracing::error!("Shutting down SafetyTask"); @@ -128,7 +128,7 @@ impl> TestTaskState { type Message = Event; - type Result = HotShotTaskCompleted; + type Output = HotShotTaskCompleted; type State = Self; @@ -136,7 +136,7 @@ impl> TestTaskState message: Self::Message, idx: usize, task: &mut hotshot_task::task::TestTask, - ) -> Option { + ) -> Option { let OverallSafetyPropertiesDescription { check_leaf, check_block, diff --git a/crates/testing/src/spinning_task.rs b/crates/testing/src/spinning_task.rs index 3c978ab4b1..017e1497a0 100644 --- a/crates/testing/src/spinning_task.rs +++ b/crates/testing/src/spinning_task.rs @@ -34,9 +34,9 @@ pub struct SpinningTask> { impl> TaskState for SpinningTask { type Event = GlobalTestEvent; - type Result = HotShotTaskCompleted; + type Output = HotShotTaskCompleted; - async fn handle_event(event: Self::Event, _task: &mut Task) -> Option { + async fn handle_event(event: Self::Event, _task: &mut Task) -> Option { if matches!(event, GlobalTestEvent::ShutDown) { return Some(HotShotTaskCompleted::ShutDown); } @@ -53,7 +53,7 @@ impl> TestTaskState { type Message = Event; - type Result = HotShotTaskCompleted; + type Output = HotShotTaskCompleted; type State = Self; @@ -61,7 +61,7 @@ impl> TestTaskState message: Self::Message, _id: usize, task: &mut hotshot_task::task::TestTask, - ) -> Option { + ) -> Option { let Event { view_number, event: _, diff --git a/crates/testing/src/view_sync_task.rs b/crates/testing/src/view_sync_task.rs index bb77407f68..139e6b73fd 100644 --- a/crates/testing/src/view_sync_task.rs +++ b/crates/testing/src/view_sync_task.rs @@ -26,9 +26,9 @@ pub struct ViewSyncTask> { impl> TaskState for ViewSyncTask { type Event = GlobalTestEvent; - type Result = HotShotTaskCompleted; + type Output = HotShotTaskCompleted; - async fn handle_event(event: Self::Event, task: &mut Task) -> Option { + async fn handle_event(event: Self::Event, task: &mut Task) -> Option { let state = task.state_mut(); match event { GlobalTestEvent::ShutDown => match state.description.clone() { @@ -56,7 +56,7 @@ impl> TestTaskState { type Message = HotShotEvent; - type Result = HotShotTaskCompleted; + type Output = HotShotTaskCompleted; type State = Self; From a02f0eca86d980e5a00cfda9839f3681786c7ca2 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Wed, 7 Feb 2024 13:35:06 -0500 Subject: [PATCH 24/28] remove outdated # Panics doc comments --- crates/hotshot/src/tasks/mod.rs | 14 +------------- 1 file changed, 1 insertion(+), 13 deletions(-) diff --git a/crates/hotshot/src/tasks/mod.rs b/crates/hotshot/src/tasks/mod.rs index 4ced622456..fb43e67a26 100644 --- a/crates/hotshot/src/tasks/mod.rs +++ b/crates/hotshot/src/tasks/mod.rs @@ -103,8 +103,6 @@ pub async fn add_network_message_task>( task_reg: Arc, tx: Sender>, @@ -125,7 +123,7 @@ pub async fn add_network_event_task>( output_stream: Sender>, handle: &SystemContextHandle, @@ -205,8 +203,6 @@ pub async fn create_consensus_state>( task_reg: Arc, tx: Sender>, @@ -220,8 +216,6 @@ pub async fn add_consensus_task>( } /// add the VID task -/// # Panics -/// Is unable to panic. This section here is just to satisfy clippy pub async fn add_vid_task>( task_reg: Arc, tx: Sender>, @@ -249,8 +243,6 @@ pub async fn add_vid_task>( } /// add the Data Availability task -/// # Panics -/// Is unable to panic. This section here is just to satisfy clippy pub async fn add_da_task>( task_reg: Arc, tx: Sender>, @@ -279,8 +271,6 @@ pub async fn add_da_task>( } /// add the Transaction Handling task -/// # Panics -/// Is unable to panic. This section here is just to satisfy clippy pub async fn add_transaction_task>( task_reg: Arc, tx: Sender>, @@ -308,8 +298,6 @@ pub async fn add_transaction_task> task_reg.run_task(task).await; } /// add the view sync task -/// # Panics -/// Is unable to panic. This section here is just to satisfy clippy pub async fn add_view_sync_task>( task_reg: Arc, tx: Sender>, From 93e7913f3495f95f24cac16023ce82b35e17f63d Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Wed, 7 Feb 2024 13:36:31 -0500 Subject: [PATCH 25/28] missed on # Panics --- crates/hotshot/src/tasks/mod.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/crates/hotshot/src/tasks/mod.rs b/crates/hotshot/src/tasks/mod.rs index fb43e67a26..fdb8c7ecfc 100644 --- a/crates/hotshot/src/tasks/mod.rs +++ b/crates/hotshot/src/tasks/mod.rs @@ -44,8 +44,6 @@ pub enum GlobalEvent { } /// Add the network task to handle messages and publish events. -/// # Panics -/// Is unable to panic. This section here is just to satisfy clippy pub async fn add_network_message_task>( task_reg: Arc, event_stream: Sender>, From e8cba7f55322f99fd91ddfdf8df9d8082f71e2df Mon Sep 17 00:00:00 2001 From: = Date: Fri, 9 Feb 2024 05:56:42 -0500 Subject: [PATCH 26/28] Reveiw comments minus filters --- crates/constants/src/lib.rs | 3 +++ crates/hotshot/src/lib.rs | 6 ++--- crates/hotshot/src/tasks/mod.rs | 3 +++ .../traits/networking/web_server_network.rs | 9 +------ crates/orchestrator/src/lib.rs | 25 +----------------- crates/task-impls/src/consensus.rs | 1 - crates/task-impls/src/transactions.rs | 5 +--- crates/task/src/task.rs | 26 ++++++++++++------- crates/testing/src/test_runner.rs | 3 ++- 9 files changed, 31 insertions(+), 50 deletions(-) diff --git a/crates/constants/src/lib.rs b/crates/constants/src/lib.rs index 3a44c0902a..621df3cdfe 100644 --- a/crates/constants/src/lib.rs +++ b/crates/constants/src/lib.rs @@ -28,3 +28,6 @@ pub struct Version { /// Constant for protocol version 0.1. pub const VERSION_0_1: Version = Version { major: 0, minor: 1 }; + +/// Default Channel Size for consensus event sharing +pub const EVENT_CHANNEL_SIZE: usize = 100_000; diff --git a/crates/hotshot/src/lib.rs b/crates/hotshot/src/lib.rs index 8b2008edab..d06d1d162c 100644 --- a/crates/hotshot/src/lib.rs +++ b/crates/hotshot/src/lib.rs @@ -27,7 +27,7 @@ use async_trait::async_trait; use commit::Committable; use custom_debug::Debug; use futures::join; -use hotshot_constants::VERSION_0_1; +use hotshot_constants::{EVENT_CHANNEL_SIZE, VERSION_0_1}; use hotshot_task_impls::events::HotShotEvent; use hotshot_task_impls::helpers::broadcast_event; use hotshot_task_impls::network; @@ -239,8 +239,8 @@ impl> SystemContext { }; let consensus = Arc::new(RwLock::new(consensus)); - let (internal_tx, internal_rx) = broadcast(100_000); - let (mut external_tx, external_rx) = broadcast(100_000); + let (internal_tx, internal_rx) = broadcast(EVENT_CHANNEL_SIZE); + let (mut external_tx, external_rx) = broadcast(EVENT_CHANNEL_SIZE); // This makes it so we won't block on broadcasting if there is not a receiver // Our own copy of the receiver is inactive so it doesn't count. diff --git a/crates/hotshot/src/tasks/mod.rs b/crates/hotshot/src/tasks/mod.rs index fdb8c7ecfc..2952898ca1 100644 --- a/crates/hotshot/src/tasks/mod.rs +++ b/crates/hotshot/src/tasks/mod.rs @@ -56,6 +56,7 @@ pub async fn add_network_message_task Inner { } return false; } - MessagePurpose::Vote => { + MessagePurpose::Vote | MessagePurpose::ViewSyncVote => { let vote = deserialized_message.clone(); *vote_index += 1; direct_poll_queue.write().await.push(vote); @@ -351,13 +351,6 @@ impl Inner { // Only pushing the first proposal since we will soon only be allowing 1 proposal per view return true; } - MessagePurpose::ViewSyncVote => { - let vote = deserialized_message.clone(); - *vote_index += 1; - direct_poll_queue.write().await.push(vote); - - return false; - } MessagePurpose::ViewSyncCertificate => { // TODO ED Special case this for view sync // TODO ED Need to add vote indexing to web server for view sync certs diff --git a/crates/orchestrator/src/lib.rs b/crates/orchestrator/src/lib.rs index 0453fecbd5..5da1d96b48 100644 --- a/crates/orchestrator/src/lib.rs +++ b/crates/orchestrator/src/lib.rs @@ -14,7 +14,7 @@ use std::{ }; use tide_disco::{Api, App}; -use surf_disco::{error::ClientError, Url}; +use surf_disco::Url; use tide_disco::{ api::ApiError, error::ServerError, @@ -55,8 +55,6 @@ struct OrchestratorState { start: bool, /// The total nodes that have posted they are ready to start pub nodes_connected: u64, - /// connection to the web server - client: Option>, } impl @@ -64,17 +62,11 @@ impl { /// create a new [`OrchestratorState`] pub fn new(network_config: NetworkConfig) -> Self { - let mut web_client = None; - if network_config.web_server_config.is_some() { - let base_url = "http://0.0.0.0/9000".to_string().parse().unwrap(); - web_client = Some(surf_disco::Client::::new(base_url)); - } OrchestratorState { latest_index: 0, config: network_config, start: false, nodes_connected: 0, - client: web_client, } } } @@ -123,21 +115,6 @@ where }); } - //add new node's key to stake table - if self.config.web_server_config.clone().is_some() { - let new_key = &self.config.config.my_own_validator_config.public_key; - let client_clone = self.client.clone().unwrap(); - async move { - client_clone - .post::<()>("api/staketable") - .body_binary(&new_key) - .unwrap() - .send() - .await - } - .boxed(); - } - if self.config.libp2p_config.clone().is_some() { let libp2p_config_clone = self.config.libp2p_config.clone().unwrap(); // Designate node as bootstrap node and store its identity information diff --git a/crates/task-impls/src/consensus.rs b/crates/task-impls/src/consensus.rs index 2bcdea2a23..ffe19d0a37 100644 --- a/crates/task-impls/src/consensus.rs +++ b/crates/task-impls/src/consensus.rs @@ -1331,7 +1331,6 @@ impl, A: ConsensusApi + where Self: Sized, { - // TODO: Don't clone the sender let sender = task.clone_sender(); tracing::trace!("sender queue len {}", sender.len()); task.state_mut().handle(event, sender).await; diff --git a/crates/task-impls/src/transactions.rs b/crates/task-impls/src/transactions.rs index 5cdb8d4af8..d29142a5a2 100644 --- a/crates/task-impls/src/transactions.rs +++ b/crates/task-impls/src/transactions.rs @@ -266,10 +266,7 @@ impl, A: ConsensusApi + } #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), name = "Transaction Handling Task", level = "error")] - async fn wait_for_transactions( - &self, - _parent_leaf: Leaf, - ) -> Option> { + async fn wait_for_transactions(&self, _: Leaf) -> Option> { let task_start_time = Instant::now(); // TODO (Keyao) Investigate the use of transaction hash diff --git a/crates/task/src/task.rs b/crates/task/src/task.rs index e17061157e..e87f3465d5 100644 --- a/crates/task/src/task.rs +++ b/crates/task/src/task.rs @@ -21,6 +21,7 @@ use tokio::{ sync::RwLock, task::{spawn, JoinHandle}, }; +use tracing::error; use crate::{ dependency::Dependency, @@ -228,13 +229,20 @@ impl< for rx in &mut self.message_receivers { futs.push(rx.recv()); } - if let Ok((Ok(msg), id, _)) = - async_timeout(Duration::from_secs(1), select_all(futs)).await - { - if let Some(res) = T::handle_message(msg, id, &mut self).await { - self.task.state.handle_result(&res).await; - self.task.state.shutdown().await; - return res; + // if let Ok((Ok(msg), id, _)) = + match async_timeout(Duration::from_secs(1), select_all(futs)).await { + Ok((Ok(msg), id, _)) => { + if let Some(res) = T::handle_message(msg, id, &mut self).await { + self.task.state.handle_result(&res).await; + self.task.state.shutdown().await; + return res; + } + } + Err(e) => { + error!("Failed to get event from task. Error: {:?}", e); + } + Ok((Err(e), _, _)) => { + error!("A task channel returned an Error: {:?}", e); } } } @@ -359,8 +367,8 @@ mod tests { async fn handle_message( message: Self::Message, - _id: usize, - _task: &mut TestTask, + _: usize, + _: &mut TestTask, ) -> Option<()> { if message == *"done".to_string() { return Some(()); diff --git a/crates/testing/src/test_runner.rs b/crates/testing/src/test_runner.rs index 38eff9b1d2..e5a592c72f 100644 --- a/crates/testing/src/test_runner.rs +++ b/crates/testing/src/test_runner.rs @@ -18,6 +18,7 @@ use hotshot::{types::SystemContextHandle, Memberships}; use hotshot::{traits::TestableNodeImplementation, HotShotInitializer, SystemContext}; +use hotshot_constants::EVENT_CHANNEL_SIZE; use hotshot_task::task::{Task, TaskRegistry, TestTask}; use hotshot_types::traits::network::CommunicationChannel; use hotshot_types::{ @@ -100,7 +101,7 @@ where /// if the test fails #[allow(clippy::too_many_lines)] pub async fn run_test(mut self) { - let (tx, rx) = broadcast(100_000); + let (tx, rx) = broadcast(EVENT_CHANNEL_SIZE); let spinning_changes = self .launcher .metadata From f200a95c7a4772d316bf8537b802c7173d501199 Mon Sep 17 00:00:00 2001 From: = Date: Fri, 9 Feb 2024 06:06:31 -0500 Subject: [PATCH 27/28] Lower a log level, fix task/cargo.toml --- crates/task-impls/src/helpers.rs | 2 +- crates/task/Cargo.toml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/crates/task-impls/src/helpers.rs b/crates/task-impls/src/helpers.rs index 7e7b63c830..93376f7086 100644 --- a/crates/task-impls/src/helpers.rs +++ b/crates/task-impls/src/helpers.rs @@ -23,7 +23,7 @@ pub async fn broadcast_event(event: E, sender: &Send ); } Err(SendError(e)) => { - tracing::error!( + tracing::warn!( "Event: {:?}\n Sending failed, event stream probably shutdown", e ); diff --git a/crates/task/Cargo.toml b/crates/task/Cargo.toml index 1207618627..39c531a637 100644 --- a/crates/task/Cargo.toml +++ b/crates/task/Cargo.toml @@ -14,9 +14,9 @@ tracing = { workspace = true } async-compatibility-layer = { workspace = true } [target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] -tokio = { version = "1.35.1", features = ["time", "rt-multi-thread", "macros", "sync"] } +tokio = { workspace= true, features = ["time", "rt-multi-thread", "macros", "sync"] } [target.'cfg(all(async_executor_impl = "async-std"))'.dependencies] -async-std = { version = "1.12.0", features = ["attributes"] } +async-std = { workspace= true, features = ["attributes"] } [lints] workspace = true \ No newline at end of file From 2fefff54445b92ff6ed081051efece096ef56fb8 Mon Sep 17 00:00:00 2001 From: = Date: Fri, 9 Feb 2024 10:32:29 -0500 Subject: [PATCH 28/28] Remove todo --- crates/task-impls/src/vid.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/crates/task-impls/src/vid.rs b/crates/task-impls/src/vid.rs index 27bf7d34dd..d07aeb2c10 100644 --- a/crates/task-impls/src/vid.rs +++ b/crates/task-impls/src/vid.rs @@ -189,7 +189,6 @@ impl, A: ConsensusApi + event: Self::Event, task: &mut Task, ) -> Option { - // TODO: Don't clone the sender let sender = task.clone_sender(); task.state_mut().handle(event, sender).await; None