From 299b8b1005822e76492671977d4a3bed73844974 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Tue, 17 Sep 2024 16:37:36 -0400 Subject: [PATCH 1/7] create restart context, and allow restarting after number of views --- crates/testing/src/spinning_task.rs | 58 ++++++++++++++++++--- crates/testing/src/test_runner.rs | 5 +- crates/testing/tests/tests_2/catchup.rs | 67 +++++++++++++++++++++++-- 3 files changed, 117 insertions(+), 13 deletions(-) diff --git a/crates/testing/src/spinning_task.rs b/crates/testing/src/spinning_task.rs index ec9bda7660..2911d923b9 100644 --- a/crates/testing/src/spinning_task.rs +++ b/crates/testing/src/spinning_task.rs @@ -13,7 +13,9 @@ use anyhow::Result; use async_lock::RwLock; use async_trait::async_trait; use futures::future::join_all; -use hotshot::{traits::TestableNodeImplementation, types::EventType, HotShotInitializer}; +use hotshot::{ + traits::TestableNodeImplementation, types::EventType, HotShotInitializer, SystemContext, +}; use hotshot_example_types::{ auction_results_provider_types::TestAuctionResultsProvider, state_types::{TestInstanceState, TestValidatedState}, @@ -46,7 +48,12 @@ pub type StateAndBlock = (Vec, Vec); pub struct SpinningTaskErr {} /// Spinning task state -pub struct SpinningTask, V: Versions> { +pub struct SpinningTask< + TYPES: NodeType, + N: ConnectedNetwork, + I: TestableNodeImplementation, + V: Versions, +> { /// handle to the nodes pub(crate) handles: Arc>>>, /// late start nodes @@ -61,6 +68,8 @@ pub struct SpinningTask, V pub(crate) high_qc: QuorumCertificate, /// Add specified delay to async calls pub(crate) async_delay_config: DelayConfig, + /// Context stored for nodes to be restarted with + pub(crate) restart_contexts: HashMap>, } #[async_trait] @@ -69,7 +78,7 @@ impl< I: TestableNodeImplementation, N: ConnectedNetwork, V: Versions, - > TestTaskState for SpinningTask + > TestTaskState for SpinningTask where I: TestableNodeImplementation, I: NodeImplementation< @@ -188,7 +197,7 @@ where node.handle.shut_down().await; } } - UpDown::Restart => { + UpDown::RestartDown(delay_views) => { let node_id = idx.try_into().unwrap(); if let Some(node) = self.handles.write().await.get_mut(idx) { tracing::error!("Node {} shutting down", idx); @@ -253,8 +262,28 @@ where ), ) .await; - new_nodes.push((context, idx)); - new_networks.push(network.clone()); + if delay_views == 0 { + new_nodes.push((context, idx)); + new_networks.push(network.clone()); + } else { + let up_view = view_number + delay_views; + let change = ChangeNode { + idx, + updown: UpDown::RestartUp, + }; + self.changes.entry(up_view).or_default().push(change); + let new_ctx = RestartContext { + context, + network: network.clone(), + }; + self.restart_contexts.insert(idx, new_ctx); + } + } + } + UpDown::RestartUp => { + if let Some(ctx) = self.restart_contexts.remove(&idx) { + new_nodes.push((ctx.context, idx)); + new_networks.push(ctx.network.clone()); } } UpDown::NetworkUp => { @@ -307,6 +336,17 @@ where } } +#[derive(Clone)] +pub(crate) struct RestartContext< + TYPES: NodeType, + N: ConnectedNetwork, + I: TestableNodeImplementation, + V: Versions, +> { + context: Arc>, + network: Arc, +} + /// Spin the node up or down #[derive(Clone, Debug)] pub enum UpDown { @@ -318,8 +358,10 @@ pub enum UpDown { NetworkUp, /// spin the node's network down NetworkDown, - /// restart the node - Restart, + /// Take a node down to be restarted after a number of views + RestartDown(u64), + /// Start a node up again after it's been shutdown for restart. This + RestartUp, } /// denotes a change in node state diff --git a/crates/testing/src/test_runner.rs b/crates/testing/src/test_runner.rs index 0f6faffae3..68e3b816f0 100644 --- a/crates/testing/src/test_runner.rs +++ b/crates/testing/src/test_runner.rs @@ -103,7 +103,7 @@ where if matches!(change.updown, UpDown::Up) { late_start_nodes.insert(change.idx.try_into().unwrap()); } - if matches!(change.updown, UpDown::Restart) { + if matches!(change.updown, UpDown::RestartDown(_)) { restart_nodes.insert(change.idx.try_into().unwrap()); } } @@ -190,8 +190,9 @@ where ) .await, async_delay_config: self.launcher.metadata.async_delay_config, + restart_contexts: HashMap::new(), }; - let spinning_task = TestTask::>::new( + let spinning_task = TestTask::>::new( spinning_task_state, event_rxs.clone(), test_receiver.clone(), diff --git a/crates/testing/tests/tests_2/catchup.rs b/crates/testing/tests/tests_2/catchup.rs index 8253ae8084..7eb7c0a658 100644 --- a/crates/testing/tests/tests_2/catchup.rs +++ b/crates/testing/tests/tests_2/catchup.rs @@ -328,7 +328,7 @@ async fn test_all_restart() { for i in 0..20 { catchup_nodes.push(ChangeNode { idx: i, - updown: UpDown::Restart, + updown: UpDown::RestartDown(0), }) } @@ -390,7 +390,7 @@ async fn test_all_restart_cdn() { for i in 0..20 { catchup_nodes.push(ChangeNode { idx: i, - updown: UpDown::Restart, + updown: UpDown::RestartDown(0), }) } @@ -457,7 +457,7 @@ async fn test_all_restart_one_da() { for i in 0..20 { catchup_nodes.push(ChangeNode { idx: i, - updown: UpDown::Restart, + updown: UpDown::RestartDown(0), }) } @@ -494,3 +494,64 @@ async fn test_all_restart_one_da() { .run_test::() .await; } + +#[cfg(test)] +#[cfg_attr(async_executor_impl = "tokio", tokio::test(flavor = "multi_thread"))] +#[cfg_attr(async_executor_impl = "async-std", async_std::test)] +async fn test_staggered_restart() { + use std::time::Duration; + + use hotshot_example_types::node_types::{CombinedImpl, TestTypes, TestVersions}; + use hotshot_testing::{ + block_builder::SimpleBuilderImplementation, + completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, + overall_safety_task::OverallSafetyPropertiesDescription, + spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, + test_builder::TestDescription, + }; + + async_compatibility_layer::logging::setup_logging(); + async_compatibility_layer::logging::setup_backtrace(); + let mut metadata: TestDescription = + TestDescription::default(); + + let mut catchup_nodes = vec![]; + for i in 0..20 { + catchup_nodes.push(ChangeNode { + idx: i, + updown: UpDown::RestartDown(0), + }) + } + + metadata.start_nodes = 10; + metadata.num_nodes_with_stake = 10; + + // Explicitly make the DA small to simulate real network. + metadata.da_staked_committee_size = 4; + + metadata.spinning_properties = SpinningTaskDescription { + // Restart all the nodes in view 13 + node_changes: vec![(13, catchup_nodes)], + }; + metadata.view_sync_properties = + hotshot_testing::view_sync_task::ViewSyncTaskDescription::Threshold(0, 20); + + metadata.completion_task_description = + CompletionTaskDescription::TimeBasedCompletionTaskBuilder( + TimeBasedCompletionTaskDescription { + duration: Duration::from_secs(60), + }, + ); + metadata.overall_safety_properties = OverallSafetyPropertiesDescription { + // Make sure we keep committing rounds after the catchup, but not the full 50. + num_successful_views: 22, + num_failed_views: 15, + ..Default::default() + }; + + metadata + .gen_launcher(0) + .launch() + .run_test::() + .await; +} From 8991b9cf0fd839b0111c83c8bceafaff5c1bc688 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Wed, 18 Sep 2024 15:24:26 -0400 Subject: [PATCH 2/7] repros the issue --- crates/hotshot/src/tasks/task_state.rs | 2 ++ crates/hotshot/src/types/handle.rs | 1 + crates/task-impls/src/consensus/handlers.rs | 5 +-- .../task-impls/src/consensus/view_change.rs | 2 +- crates/task-impls/src/consensus2/handlers.rs | 2 +- crates/task-impls/src/network.rs | 6 ++-- .../src/quorum_proposal/handlers.rs | 2 +- crates/task-impls/src/view_sync.rs | 10 +++--- crates/testing/src/spinning_task.rs | 13 ++++---- crates/testing/tests/tests_2/catchup.rs | 32 +++++++++++++------ 10 files changed, 48 insertions(+), 27 deletions(-) diff --git a/crates/hotshot/src/tasks/task_state.rs b/crates/hotshot/src/tasks/task_state.rs index ab7f68fdc4..e03c58dd57 100644 --- a/crates/hotshot/src/tasks/task_state.rs +++ b/crates/hotshot/src/tasks/task_state.rs @@ -25,6 +25,7 @@ use hotshot_types::{ node_implementation::{ConsensusTime, NodeImplementation, NodeType}, }, }; +use tracing::error; use crate::{types::SystemContextHandle, Versions}; @@ -221,6 +222,7 @@ impl, V: Versions> CreateTaskState async fn create_from(handle: &SystemContextHandle) -> Self { let consensus = handle.hotshot.consensus(); let timeout_task = handle.spawn_initial_timeout_task(); + error!("Starting consensus with curr view {:?}", handle.cur_view().await); Self { consensus: OuterConsensus::new(consensus), diff --git a/crates/hotshot/src/types/handle.rs b/crates/hotshot/src/types/handle.rs index 0b285b593f..6c974659bf 100644 --- a/crates/hotshot/src/types/handle.rs +++ b/crates/hotshot/src/types/handle.rs @@ -245,6 +245,7 @@ impl + 'static, V: Versions> let next_view_timeout = self.hotshot.config.next_view_timeout; let start_view = self.hotshot.start_view; + tracing::error!("spawning timeout task with start view {:?}", start_view); // Spawn a task that will sleep for the next view timeout and then send a timeout event // if not cancelled async_spawn({ diff --git a/crates/task-impls/src/consensus/handlers.rs b/crates/task-impls/src/consensus/handlers.rs index 36f6831e5a..d4f2ee5d59 100644 --- a/crates/task-impls/src/consensus/handlers.rs +++ b/crates/task-impls/src/consensus/handlers.rs @@ -139,9 +139,10 @@ pub async fn create_and_send_proposal( _pd: PhantomData, }; - debug!( - "Sending proposal for view {:?}", + error!( + "Sending proposal for view {:?} ID: {}", proposed_leaf.view_number(), + id, ); async_sleep(Duration::from_millis(round_start_delay)).await; diff --git a/crates/task-impls/src/consensus/view_change.rs b/crates/task-impls/src/consensus/view_change.rs index 82928afcfc..846630ed72 100644 --- a/crates/task-impls/src/consensus/view_change.rs +++ b/crates/task-impls/src/consensus/view_change.rs @@ -60,7 +60,7 @@ pub(crate) async fn update_view( let old_view = *cur_view; - debug!("Updating view from {} to {}", *old_view, *new_view); + error!("Updating view from {} to {}", *old_view, *new_view); if *old_view / 100 != *new_view / 100 { // TODO (https://github.com/EspressoSystems/HotShot/issues/2296): diff --git a/crates/task-impls/src/consensus2/handlers.rs b/crates/task-impls/src/consensus2/handlers.rs index c766334471..ec26bd7bdb 100644 --- a/crates/task-impls/src/consensus2/handlers.rs +++ b/crates/task-impls/src/consensus2/handlers.rs @@ -239,7 +239,7 @@ pub(crate) async fn handle_timeout ) .await; - debug!( + error!( "We did not receive evidence for view {} in time, sending timeout vote for that view!", *view_number ); diff --git a/crates/task-impls/src/network.rs b/crates/task-impls/src/network.rs index 2042a9fc04..ac2ce43f6b 100644 --- a/crates/task-impls/src/network.rs +++ b/crates/task-impls/src/network.rs @@ -488,13 +488,15 @@ impl< )), TransmitType::Broadcast, )), - HotShotEvent::TimeoutVoteSend(vote) => Some(( + HotShotEvent::TimeoutVoteSend(vote) => { + error!("sending timeout vote"); + Some(( vote.signing_key(), MessageKind::::from_consensus_message(SequencingMessage::General( GeneralConsensusMessage::TimeoutVote(vote.clone()), )), TransmitType::Direct(membership.leader(vote.view_number() + 1)), - )), + ))}, HotShotEvent::UpgradeProposalSend(proposal, sender) => Some(( sender, MessageKind::::from_consensus_message(SequencingMessage::General( diff --git a/crates/task-impls/src/quorum_proposal/handlers.rs b/crates/task-impls/src/quorum_proposal/handlers.rs index 867b6298a9..db75d66437 100644 --- a/crates/task-impls/src/quorum_proposal/handlers.rs +++ b/crates/task-impls/src/quorum_proposal/handlers.rs @@ -225,7 +225,7 @@ impl ProposalDependencyHandle { signature, _pd: PhantomData, }; - debug!( + error!( "Sending proposal for view {:?}", proposed_leaf.view_number(), ); diff --git a/crates/task-impls/src/view_sync.rs b/crates/task-impls/src/view_sync.rs index 134766283b..592f201a97 100644 --- a/crates/task-impls/src/view_sync.rs +++ b/crates/task-impls/src/view_sync.rs @@ -560,7 +560,7 @@ impl, V: Versions> let timeout = self.view_sync_timeout; async move { async_sleep(timeout).await; - info!("Vote sending timed out in ViewSyncPreCommitCertificateRecv, Relay = {}", relay); + error!("Vote sending timed out in ViewSyncPreCommitCertificateRecv, Relay = {}", relay); broadcast_event( Arc::new(HotShotEvent::ViewSyncTimeout( @@ -630,7 +630,7 @@ impl, V: Versions> .await; } - info!( + error!( "View sync protocol has received view sync evidence to update the view to {}", *self.next_view ); @@ -652,7 +652,7 @@ impl, V: Versions> let timeout = self.view_sync_timeout; async move { async_sleep(timeout).await; - info!( + error!( "Vote sending timed out in ViewSyncCommitCertificateRecv, relay = {}", relay ); @@ -748,7 +748,7 @@ impl, V: Versions> let timeout = self.view_sync_timeout; async move { async_sleep(timeout).await; - info!("Vote sending timed out in ViewSyncTrigger"); + error!("Vote sending timed out in ViewSyncTrigger"); broadcast_event( Arc::new(HotShotEvent::ViewSyncTimeout( TYPES::Time::new(*next_view), @@ -814,7 +814,7 @@ impl, V: Versions> let last_cert = last_seen_certificate.clone(); async move { async_sleep(timeout).await; - info!( + error!( "Vote sending timed out in ViewSyncTimeout relay = {}", relay ); diff --git a/crates/testing/src/spinning_task.rs b/crates/testing/src/spinning_task.rs index 2911d923b9..bbcedbf1cc 100644 --- a/crates/testing/src/spinning_task.rs +++ b/crates/testing/src/spinning_task.rs @@ -10,6 +10,7 @@ use std::{ }; use anyhow::Result; +use async_broadcast::broadcast; use async_lock::RwLock; use async_trait::async_trait; use futures::future::join_all; @@ -23,6 +24,7 @@ use hotshot_example_types::{ testable_delay::DelayConfig, }; use hotshot_types::{ + constants::EVENT_CHANNEL_SIZE, data::Leaf, event::Event, simple_certificate::QuorumCertificate, @@ -221,7 +223,7 @@ where self.last_decided_leaf.clone(), TestInstanceState::new(self.async_delay_config.clone()), None, - view_number, + read_storage.last_actioned_view().await, read_storage.last_actioned_view().await, read_storage.proposals_cloned().await, read_storage.high_qc_cloned().await.unwrap_or( @@ -242,6 +244,7 @@ where // For tests, make the node DA based on its index node_id < config.da_staked_committee_size as u64, ); + let internal_chan = broadcast(EVENT_CHANNEL_SIZE); let context = TestRunner::::add_node_with_config_and_channels( node_id, @@ -252,13 +255,10 @@ where validator_config, (*read_storage).clone(), marketplace_config.clone(), - ( - node.handle.internal_channel_sender(), - node.handle.internal_event_stream_receiver_known_impl(), - ), + internal_chan, ( node.handle.external_channel_sender(), - node.handle.event_stream_known_impl(), + node.handle.event_stream_known_impl().new_receiver(), ), ) .await; @@ -310,6 +310,7 @@ where join_all(ready_futs).await; while let Some((node, id)) = new_nodes.pop() { + tracing::error!("starting node {} back up", id); let handle = node.run_tasks().await; // Create the node and add it to the state, so we can shut them diff --git a/crates/testing/tests/tests_2/catchup.rs b/crates/testing/tests/tests_2/catchup.rs index 7eb7c0a658..6b559dc382 100644 --- a/crates/testing/tests/tests_2/catchup.rs +++ b/crates/testing/tests/tests_2/catchup.rs @@ -515,13 +515,26 @@ async fn test_staggered_restart() { let mut metadata: TestDescription = TestDescription::default(); - let mut catchup_nodes = vec![]; - for i in 0..20 { - catchup_nodes.push(ChangeNode { + let mut down_da_nodes = vec![]; + for i in 1..4 { + down_da_nodes.push(ChangeNode { idx: i, - updown: UpDown::RestartDown(0), - }) + updown: UpDown::RestartDown(20), + }); + } + + let mut down_regular_nodes = vec![]; + for i in 4..10 { + down_regular_nodes.push(ChangeNode { + idx: i, + updown: UpDown::RestartDown(0) + }); } + // restart the last da so it gets the new libp2p routing table + down_regular_nodes.push(ChangeNode{ + idx: 0, + updown: UpDown::RestartDown(0) + }); metadata.start_nodes = 10; metadata.num_nodes_with_stake = 10; @@ -531,21 +544,22 @@ async fn test_staggered_restart() { metadata.spinning_properties = SpinningTaskDescription { // Restart all the nodes in view 13 - node_changes: vec![(13, catchup_nodes)], + node_changes: vec![(13, down_da_nodes), (33, down_regular_nodes)], }; metadata.view_sync_properties = - hotshot_testing::view_sync_task::ViewSyncTaskDescription::Threshold(0, 20); + hotshot_testing::view_sync_task::ViewSyncTaskDescription::Threshold(0, 50); + // Give the test some extra time because we are purposely timing out views metadata.completion_task_description = CompletionTaskDescription::TimeBasedCompletionTaskBuilder( TimeBasedCompletionTaskDescription { - duration: Duration::from_secs(60), + duration: Duration::from_secs(240), }, ); metadata.overall_safety_properties = OverallSafetyPropertiesDescription { // Make sure we keep committing rounds after the catchup, but not the full 50. num_successful_views: 22, - num_failed_views: 15, + num_failed_views: 30, ..Default::default() }; From 1313952b289efa9f62b2695dbc2a431b944dbe18 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Wed, 18 Sep 2024 15:57:54 -0400 Subject: [PATCH 3/7] Fix the issue by saving the timeout vote as an action --- crates/task-impls/src/network.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/task-impls/src/network.rs b/crates/task-impls/src/network.rs index ac2ce43f6b..916efcfdec 100644 --- a/crates/task-impls/src/network.rs +++ b/crates/task-impls/src/network.rs @@ -489,7 +489,7 @@ impl< TransmitType::Broadcast, )), HotShotEvent::TimeoutVoteSend(vote) => { - error!("sending timeout vote"); + *maybe_action = Some(HotShotAction::Vote); Some(( vote.signing_key(), MessageKind::::from_consensus_message(SequencingMessage::General( From 8848bb733a77b7698770cabac9b5d4b6d07d9b87 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Wed, 18 Sep 2024 16:02:16 -0400 Subject: [PATCH 4/7] cleanup/lint --- crates/hotshot/src/tasks/task_state.rs | 2 -- crates/hotshot/src/types/handle.rs | 1 - crates/task-impls/src/consensus/handlers.rs | 2 +- crates/task-impls/src/network.rs | 13 +++++++------ crates/task-impls/src/quorum_proposal/handlers.rs | 2 +- crates/task-impls/src/view_sync.rs | 8 ++++---- crates/testing/src/spinning_task.rs | 2 +- crates/testing/tests/tests_2/catchup.rs | 6 +++--- 8 files changed, 17 insertions(+), 19 deletions(-) diff --git a/crates/hotshot/src/tasks/task_state.rs b/crates/hotshot/src/tasks/task_state.rs index e03c58dd57..ab7f68fdc4 100644 --- a/crates/hotshot/src/tasks/task_state.rs +++ b/crates/hotshot/src/tasks/task_state.rs @@ -25,7 +25,6 @@ use hotshot_types::{ node_implementation::{ConsensusTime, NodeImplementation, NodeType}, }, }; -use tracing::error; use crate::{types::SystemContextHandle, Versions}; @@ -222,7 +221,6 @@ impl, V: Versions> CreateTaskState async fn create_from(handle: &SystemContextHandle) -> Self { let consensus = handle.hotshot.consensus(); let timeout_task = handle.spawn_initial_timeout_task(); - error!("Starting consensus with curr view {:?}", handle.cur_view().await); Self { consensus: OuterConsensus::new(consensus), diff --git a/crates/hotshot/src/types/handle.rs b/crates/hotshot/src/types/handle.rs index 6c974659bf..0b285b593f 100644 --- a/crates/hotshot/src/types/handle.rs +++ b/crates/hotshot/src/types/handle.rs @@ -245,7 +245,6 @@ impl + 'static, V: Versions> let next_view_timeout = self.hotshot.config.next_view_timeout; let start_view = self.hotshot.start_view; - tracing::error!("spawning timeout task with start view {:?}", start_view); // Spawn a task that will sleep for the next view timeout and then send a timeout event // if not cancelled async_spawn({ diff --git a/crates/task-impls/src/consensus/handlers.rs b/crates/task-impls/src/consensus/handlers.rs index d4f2ee5d59..9ef7f69702 100644 --- a/crates/task-impls/src/consensus/handlers.rs +++ b/crates/task-impls/src/consensus/handlers.rs @@ -139,7 +139,7 @@ pub async fn create_and_send_proposal( _pd: PhantomData, }; - error!( + debug!( "Sending proposal for view {:?} ID: {}", proposed_leaf.view_number(), id, diff --git a/crates/task-impls/src/network.rs b/crates/task-impls/src/network.rs index 916efcfdec..40597b75b2 100644 --- a/crates/task-impls/src/network.rs +++ b/crates/task-impls/src/network.rs @@ -491,12 +491,13 @@ impl< HotShotEvent::TimeoutVoteSend(vote) => { *maybe_action = Some(HotShotAction::Vote); Some(( - vote.signing_key(), - MessageKind::::from_consensus_message(SequencingMessage::General( - GeneralConsensusMessage::TimeoutVote(vote.clone()), - )), - TransmitType::Direct(membership.leader(vote.view_number() + 1)), - ))}, + vote.signing_key(), + MessageKind::::from_consensus_message(SequencingMessage::General( + GeneralConsensusMessage::TimeoutVote(vote.clone()), + )), + TransmitType::Direct(membership.leader(vote.view_number() + 1)), + )) + } HotShotEvent::UpgradeProposalSend(proposal, sender) => Some(( sender, MessageKind::::from_consensus_message(SequencingMessage::General( diff --git a/crates/task-impls/src/quorum_proposal/handlers.rs b/crates/task-impls/src/quorum_proposal/handlers.rs index db75d66437..867b6298a9 100644 --- a/crates/task-impls/src/quorum_proposal/handlers.rs +++ b/crates/task-impls/src/quorum_proposal/handlers.rs @@ -225,7 +225,7 @@ impl ProposalDependencyHandle { signature, _pd: PhantomData, }; - error!( + debug!( "Sending proposal for view {:?}", proposed_leaf.view_number(), ); diff --git a/crates/task-impls/src/view_sync.rs b/crates/task-impls/src/view_sync.rs index 592f201a97..8f023af983 100644 --- a/crates/task-impls/src/view_sync.rs +++ b/crates/task-impls/src/view_sync.rs @@ -560,7 +560,7 @@ impl, V: Versions> let timeout = self.view_sync_timeout; async move { async_sleep(timeout).await; - error!("Vote sending timed out in ViewSyncPreCommitCertificateRecv, Relay = {}", relay); + info!("Vote sending timed out in ViewSyncPreCommitCertificateRecv, Relay = {}", relay); broadcast_event( Arc::new(HotShotEvent::ViewSyncTimeout( @@ -652,7 +652,7 @@ impl, V: Versions> let timeout = self.view_sync_timeout; async move { async_sleep(timeout).await; - error!( + info!( "Vote sending timed out in ViewSyncCommitCertificateRecv, relay = {}", relay ); @@ -748,7 +748,7 @@ impl, V: Versions> let timeout = self.view_sync_timeout; async move { async_sleep(timeout).await; - error!("Vote sending timed out in ViewSyncTrigger"); + info!("Vote sending timed out in ViewSyncTrigger"); broadcast_event( Arc::new(HotShotEvent::ViewSyncTimeout( TYPES::Time::new(*next_view), @@ -814,7 +814,7 @@ impl, V: Versions> let last_cert = last_seen_certificate.clone(); async move { async_sleep(timeout).await; - error!( + info!( "Vote sending timed out in ViewSyncTimeout relay = {}", relay ); diff --git a/crates/testing/src/spinning_task.rs b/crates/testing/src/spinning_task.rs index bbcedbf1cc..3e7b9d747c 100644 --- a/crates/testing/src/spinning_task.rs +++ b/crates/testing/src/spinning_task.rs @@ -310,7 +310,7 @@ where join_all(ready_futs).await; while let Some((node, id)) = new_nodes.pop() { - tracing::error!("starting node {} back up", id); + tracing::error!("Starting node {} back up", id); let handle = node.run_tasks().await; // Create the node and add it to the state, so we can shut them diff --git a/crates/testing/tests/tests_2/catchup.rs b/crates/testing/tests/tests_2/catchup.rs index 6b559dc382..04ea28b787 100644 --- a/crates/testing/tests/tests_2/catchup.rs +++ b/crates/testing/tests/tests_2/catchup.rs @@ -527,13 +527,13 @@ async fn test_staggered_restart() { for i in 4..10 { down_regular_nodes.push(ChangeNode { idx: i, - updown: UpDown::RestartDown(0) + updown: UpDown::RestartDown(0), }); } // restart the last da so it gets the new libp2p routing table - down_regular_nodes.push(ChangeNode{ + down_regular_nodes.push(ChangeNode { idx: 0, - updown: UpDown::RestartDown(0) + updown: UpDown::RestartDown(0), }); metadata.start_nodes = 10; From c3cab622bd47368ce563ebf9ec991c51c446141a Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Wed, 18 Sep 2024 16:31:29 -0400 Subject: [PATCH 5/7] revert some logging remove unused file --- .../task-impls/src/consensus/view_change.rs | 143 ------------------ 1 file changed, 143 deletions(-) delete mode 100644 crates/task-impls/src/consensus/view_change.rs diff --git a/crates/task-impls/src/consensus/view_change.rs b/crates/task-impls/src/consensus/view_change.rs deleted file mode 100644 index 846630ed72..0000000000 --- a/crates/task-impls/src/consensus/view_change.rs +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) -// This file is part of the HotShot repository. - -// You should have received a copy of the MIT License -// along with the HotShot repository. If not, see . - -use core::time::Duration; -use std::sync::Arc; - -use anyhow::{ensure, Result}; -use async_broadcast::Sender; -use async_compatibility_layer::art::{async_sleep, async_spawn}; -#[cfg(async_executor_impl = "async-std")] -use async_std::task::JoinHandle; -use chrono::Utc; -use hotshot_types::{ - consensus::{ConsensusUpgradableReadLockGuard, OuterConsensus}, - event::{Event, EventType}, - traits::node_implementation::{ConsensusTime, NodeType}, -}; -#[cfg(async_executor_impl = "tokio")] -use tokio::task::JoinHandle; -use tracing::{debug, error, instrument}; - -use crate::{ - events::HotShotEvent, - helpers::{broadcast_event, cancel_task}, -}; - -/// Constant which tells [`update_view`] to send a view change event when called. -pub(crate) const SEND_VIEW_CHANGE_EVENT: bool = true; - -/// Constant which tells [`update_view`] to not send a view change event when called. -pub(crate) const DONT_SEND_VIEW_CHANGE_EVENT: bool = false; - -/// Update the view if it actually changed, takes a mutable reference to the `cur_view` and the -/// `timeout_task` which are updated during the operation of the function. -/// -/// # Errors -/// Returns an [`anyhow::Error`] when the new view is not greater than the current view. -/// TODO: Remove args when we merge dependency tasks. -#[allow(clippy::too_many_arguments)] -#[instrument(skip_all)] -pub(crate) async fn update_view( - new_view: TYPES::Time, - event_stream: &Sender>>, - timeout: u64, - consensus: OuterConsensus, - cur_view: &mut TYPES::Time, - cur_view_time: &mut i64, - timeout_task: &mut JoinHandle<()>, - output_event_stream: &Sender>, - send_view_change_event: bool, - is_old_view_leader: bool, -) -> Result<()> { - ensure!( - new_view > *cur_view, - "New view is not greater than our current view" - ); - - let old_view = *cur_view; - - error!("Updating view from {} to {}", *old_view, *new_view); - - if *old_view / 100 != *new_view / 100 { - // TODO (https://github.com/EspressoSystems/HotShot/issues/2296): - // switch to info! when INFO logs become less cluttered - error!("Progress: entered view {:>6}", *new_view); - } - - *cur_view = new_view; - - // The next view is just the current view + 1 - let next_view = *cur_view + 1; - - if send_view_change_event { - futures::join! { - broadcast_event(Arc::new(HotShotEvent::ViewChange(new_view)), event_stream), - broadcast_event( - Event { - view_number: old_view, - event: EventType::ViewFinished { - view_number: old_view, - }, - }, - output_event_stream, - ) - }; - } - - // Spawn a timeout task if we did actually update view - let new_timeout_task = async_spawn({ - let stream = event_stream.clone(); - // Nuance: We timeout on the view + 1 here because that means that we have - // not seen evidence to transition to this new view - let view_number = next_view; - let timeout = Duration::from_millis(timeout); - async move { - async_sleep(timeout).await; - broadcast_event( - Arc::new(HotShotEvent::Timeout(TYPES::Time::new(*view_number))), - &stream, - ) - .await; - } - }); - - // cancel the old timeout task - cancel_task(std::mem::replace(timeout_task, new_timeout_task)).await; - - let consensus = consensus.upgradable_read().await; - consensus - .metrics - .current_view - .set(usize::try_from(cur_view.u64()).unwrap()); - let new_view_time = Utc::now().timestamp(); - if is_old_view_leader { - #[allow(clippy::cast_precision_loss)] - consensus - .metrics - .view_duration_as_leader - .add_point((new_view_time - *cur_view_time) as f64); - } - *cur_view_time = new_view_time; - - // Do the comparison before the subtraction to avoid potential overflow, since - // `last_decided_view` may be greater than `cur_view` if the node is catching up. - if usize::try_from(cur_view.u64()).unwrap() - > usize::try_from(consensus.last_decided_view().u64()).unwrap() - { - consensus.metrics.number_of_views_since_last_decide.set( - usize::try_from(cur_view.u64()).unwrap() - - usize::try_from(consensus.last_decided_view().u64()).unwrap(), - ); - } - let mut consensus = ConsensusUpgradableReadLockGuard::upgrade(consensus).await; - if let Err(e) = consensus.update_view(new_view) { - tracing::trace!("{e:?}"); - } - tracing::trace!("View updated successfully"); - - Ok(()) -} From 896ff21531567606c87aa740975138182ccfed89 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Wed, 18 Sep 2024 16:32:16 -0400 Subject: [PATCH 6/7] logging --- crates/task-impls/src/consensus2/handlers.rs | 2 +- crates/task-impls/src/view_sync.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/task-impls/src/consensus2/handlers.rs b/crates/task-impls/src/consensus2/handlers.rs index ec26bd7bdb..c766334471 100644 --- a/crates/task-impls/src/consensus2/handlers.rs +++ b/crates/task-impls/src/consensus2/handlers.rs @@ -239,7 +239,7 @@ pub(crate) async fn handle_timeout ) .await; - error!( + debug!( "We did not receive evidence for view {} in time, sending timeout vote for that view!", *view_number ); diff --git a/crates/task-impls/src/view_sync.rs b/crates/task-impls/src/view_sync.rs index 8f023af983..134766283b 100644 --- a/crates/task-impls/src/view_sync.rs +++ b/crates/task-impls/src/view_sync.rs @@ -630,7 +630,7 @@ impl, V: Versions> .await; } - error!( + info!( "View sync protocol has received view sync evidence to update the view to {}", *self.next_view ); From 3f2506ae8d9ec979feab963878b31e8924f2b404 Mon Sep 17 00:00:00 2001 From: Brendon Fish Date: Thu, 19 Sep 2024 09:56:03 -0400 Subject: [PATCH 7/7] Rename UpDown enum --- crates/testing/src/spinning_task.rs | 19 +++++---- crates/testing/src/test_runner.rs | 6 +-- crates/testing/tests/tests_1/libp2p.rs | 4 +- .../tests/tests_1/test_with_failures_2.rs | 8 ++-- crates/testing/tests/tests_2/catchup.rs | 42 +++++++++---------- .../tests/tests_2/test_with_failures_one.rs | 4 +- .../tests_3/test_with_failures_half_f.rs | 8 ++-- .../tests/tests_4/test_with_failures_f.rs | 14 +++---- .../testing/tests/tests_5/combined_network.rs | 14 +++---- crates/testing/tests/tests_5/timeout.rs | 8 ++-- 10 files changed, 64 insertions(+), 63 deletions(-) diff --git a/crates/testing/src/spinning_task.rs b/crates/testing/src/spinning_task.rs index d88379b67e..ef4709af68 100644 --- a/crates/testing/src/spinning_task.rs +++ b/crates/testing/src/spinning_task.rs @@ -128,7 +128,7 @@ where if let Some(operations) = self.changes.remove(&view_number) { for ChangeNode { idx, updown } in operations { match updown { - UpDown::Up => { + NodeAction::Up => { let node_id = idx.try_into().unwrap(); if let Some(node) = self.late_start.remove(&node_id) { tracing::error!("Node {} spinning up late", idx); @@ -198,13 +198,13 @@ where self.handles.write().await.push(node); } } - UpDown::Down => { + NodeAction::Down => { if let Some(node) = self.handles.write().await.get_mut(idx) { tracing::error!("Node {} shutting down", idx); node.handle.shut_down().await; } } - UpDown::RestartDown(delay_views) => { + NodeAction::RestartDown(delay_views) => { let node_id = idx.try_into().unwrap(); if let Some(node) = self.handles.write().await.get_mut(idx) { tracing::error!("Node {} shutting down", idx); @@ -274,7 +274,7 @@ where let up_view = view_number + delay_views; let change = ChangeNode { idx, - updown: UpDown::RestartUp, + updown: NodeAction::RestartUp, }; self.changes.entry(up_view).or_default().push(change); let new_ctx = RestartContext { @@ -285,19 +285,19 @@ where } } } - UpDown::RestartUp => { + NodeAction::RestartUp => { if let Some(ctx) = self.restart_contexts.remove(&idx) { new_nodes.push((ctx.context, idx)); new_networks.push(ctx.network.clone()); } } - UpDown::NetworkUp => { + NodeAction::NetworkUp => { if let Some(handle) = self.handles.write().await.get(idx) { tracing::error!("Node {} networks resuming", idx); handle.network.resume(); } } - UpDown::NetworkDown => { + NodeAction::NetworkDown => { if let Some(handle) = self.handles.write().await.get(idx) { tracing::error!("Node {} networks pausing", idx); handle.network.pause(); @@ -355,7 +355,7 @@ pub(crate) struct RestartContext< /// Spin the node up or down #[derive(Clone, Debug)] -pub enum UpDown { +pub enum NodeAction { /// spin the node up Up, /// spin the node down @@ -367,6 +367,7 @@ pub enum UpDown { /// Take a node down to be restarted after a number of views RestartDown(u64), /// Start a node up again after it's been shutdown for restart. This + /// should only be created following a `ResartDown` RestartUp, } @@ -376,7 +377,7 @@ pub struct ChangeNode { /// the index of the node pub idx: usize, /// spin the node or node's network up or down - pub updown: UpDown, + pub updown: NodeAction, } /// description of the spinning task diff --git a/crates/testing/src/test_runner.rs b/crates/testing/src/test_runner.rs index 011aa2ed42..6a24730d31 100644 --- a/crates/testing/src/test_runner.rs +++ b/crates/testing/src/test_runner.rs @@ -57,7 +57,7 @@ use super::{ use crate::{ block_builder::{BuilderTask, TestBuilderImplementation}, completion_task::CompletionTaskDescription, - spinning_task::{ChangeNode, SpinningTask, UpDown}, + spinning_task::{ChangeNode, NodeAction, SpinningTask}, test_builder::create_test_handle, test_launcher::{Network, TestLauncher}, test_task::{TestResult, TestTask}, @@ -105,10 +105,10 @@ where let mut restart_nodes: HashSet = HashSet::new(); for (_, changes) in &spinning_changes { for change in changes { - if matches!(change.updown, UpDown::Up) { + if matches!(change.updown, NodeAction::Up) { late_start_nodes.insert(change.idx.try_into().unwrap()); } - if matches!(change.updown, UpDown::RestartDown(_)) { + if matches!(change.updown, NodeAction::RestartDown(_)) { restart_nodes.insert(change.idx.try_into().unwrap()); } } diff --git a/crates/testing/tests/tests_1/libp2p.rs b/crates/testing/tests/tests_1/libp2p.rs index 32f0878999..f19e3b0798 100644 --- a/crates/testing/tests/tests_1/libp2p.rs +++ b/crates/testing/tests/tests_1/libp2p.rs @@ -11,7 +11,7 @@ use hotshot_testing::{ block_builder::SimpleBuilderImplementation, completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, overall_safety_task::OverallSafetyPropertiesDescription, - spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, + spinning_task::{ChangeNode, NodeAction, SpinningTaskDescription}, test_builder::{TestDescription, TimingData}, }; use tracing::instrument; @@ -73,7 +73,7 @@ async fn libp2p_network_failures_2() { let dead_nodes = vec![ChangeNode { idx: 11, - updown: UpDown::Down, + updown: NodeAction::Down, }]; metadata.spinning_properties = SpinningTaskDescription { diff --git a/crates/testing/tests/tests_1/test_with_failures_2.rs b/crates/testing/tests/tests_1/test_with_failures_2.rs index c757b5d1b7..8573af51f5 100644 --- a/crates/testing/tests/tests_1/test_with_failures_2.rs +++ b/crates/testing/tests/tests_1/test_with_failures_2.rs @@ -15,7 +15,7 @@ use hotshot_example_types::{ use hotshot_macros::cross_tests; use hotshot_testing::{ block_builder::SimpleBuilderImplementation, - spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, + spinning_task::{ChangeNode, NodeAction, SpinningTaskDescription}, test_builder::TestDescription, view_sync_task::ViewSyncTaskDescription, }; @@ -51,11 +51,11 @@ cross_tests!( let dead_nodes = vec![ ChangeNode { idx: 10, - updown: UpDown::Down, + updown: NodeAction::Down, }, ChangeNode { idx: 11, - updown: UpDown::Down, + updown: NodeAction::Down, }, ]; @@ -87,7 +87,7 @@ cross_tests!( let dead_nodes = vec![ ChangeNode { idx: 3, - updown: UpDown::Down, + updown: NodeAction::Down, }, ]; diff --git a/crates/testing/tests/tests_2/catchup.rs b/crates/testing/tests/tests_2/catchup.rs index 04ea28b787..0b8fcbfdb9 100644 --- a/crates/testing/tests/tests_2/catchup.rs +++ b/crates/testing/tests/tests_2/catchup.rs @@ -15,7 +15,7 @@ async fn test_catchup() { block_builder::SimpleBuilderImplementation, completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, overall_safety_task::OverallSafetyPropertiesDescription, - spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, + spinning_task::{ChangeNode, NodeAction, SpinningTaskDescription}, test_builder::{TestDescription, TimingData}, }; @@ -32,7 +32,7 @@ async fn test_catchup() { TestDescription::default(); let catchup_node = vec![ChangeNode { idx: 19, - updown: UpDown::Up, + updown: NodeAction::Up, }]; metadata.timing_data = timing_data; @@ -78,7 +78,7 @@ async fn test_catchup_cdn() { block_builder::SimpleBuilderImplementation, completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, overall_safety_task::OverallSafetyPropertiesDescription, - spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, + spinning_task::{ChangeNode, NodeAction, SpinningTaskDescription}, test_builder::{TestDescription, TimingData}, }; @@ -92,7 +92,7 @@ async fn test_catchup_cdn() { TestDescription::default(); let catchup_nodes = vec![ChangeNode { idx: 18, - updown: UpDown::Up, + updown: NodeAction::Up, }]; metadata.timing_data = timing_data; metadata.start_nodes = 19; @@ -133,7 +133,7 @@ async fn test_catchup_one_node() { block_builder::SimpleBuilderImplementation, completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, overall_safety_task::OverallSafetyPropertiesDescription, - spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, + spinning_task::{ChangeNode, NodeAction, SpinningTaskDescription}, test_builder::{TestDescription, TimingData}, }; async_compatibility_layer::logging::setup_logging(); @@ -146,7 +146,7 @@ async fn test_catchup_one_node() { TestDescription::default(); let catchup_nodes = vec![ChangeNode { idx: 18, - updown: UpDown::Up, + updown: NodeAction::Up, }]; metadata.timing_data = timing_data; metadata.start_nodes = 19; @@ -189,7 +189,7 @@ async fn test_catchup_in_view_sync() { block_builder::SimpleBuilderImplementation, completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, overall_safety_task::OverallSafetyPropertiesDescription, - spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, + spinning_task::{ChangeNode, NodeAction, SpinningTaskDescription}, test_builder::{TestDescription, TimingData}, }; async_compatibility_layer::logging::setup_logging(); @@ -203,11 +203,11 @@ async fn test_catchup_in_view_sync() { let catchup_nodes = vec![ ChangeNode { idx: 18, - updown: UpDown::Up, + updown: NodeAction::Up, }, ChangeNode { idx: 19, - updown: UpDown::Up, + updown: NodeAction::Up, }, ]; @@ -252,7 +252,7 @@ async fn test_catchup_reload() { block_builder::SimpleBuilderImplementation, completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, overall_safety_task::OverallSafetyPropertiesDescription, - spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, + spinning_task::{ChangeNode, NodeAction, SpinningTaskDescription}, test_builder::{TestDescription, TimingData}, }; @@ -266,7 +266,7 @@ async fn test_catchup_reload() { TestDescription::default(); let catchup_node = vec![ChangeNode { idx: 19, - updown: UpDown::Up, + updown: NodeAction::Up, }]; metadata.timing_data = timing_data; @@ -312,7 +312,7 @@ async fn test_all_restart() { block_builder::SimpleBuilderImplementation, completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, overall_safety_task::OverallSafetyPropertiesDescription, - spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, + spinning_task::{ChangeNode, NodeAction, SpinningTaskDescription}, test_builder::{TestDescription, TimingData}, }; @@ -328,7 +328,7 @@ async fn test_all_restart() { for i in 0..20 { catchup_nodes.push(ChangeNode { idx: i, - updown: UpDown::RestartDown(0), + updown: NodeAction::RestartDown(0), }) } @@ -374,7 +374,7 @@ async fn test_all_restart_cdn() { block_builder::SimpleBuilderImplementation, completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, overall_safety_task::OverallSafetyPropertiesDescription, - spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, + spinning_task::{ChangeNode, NodeAction, SpinningTaskDescription}, test_builder::{TestDescription, TimingData}, }; @@ -390,7 +390,7 @@ async fn test_all_restart_cdn() { for i in 0..20 { catchup_nodes.push(ChangeNode { idx: i, - updown: UpDown::RestartDown(0), + updown: NodeAction::RestartDown(0), }) } @@ -440,7 +440,7 @@ async fn test_all_restart_one_da() { block_builder::SimpleBuilderImplementation, completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, overall_safety_task::OverallSafetyPropertiesDescription, - spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, + spinning_task::{ChangeNode, NodeAction, SpinningTaskDescription}, test_builder::{TestDescription, TimingData}, }; @@ -457,7 +457,7 @@ async fn test_all_restart_one_da() { for i in 0..20 { catchup_nodes.push(ChangeNode { idx: i, - updown: UpDown::RestartDown(0), + updown: NodeAction::RestartDown(0), }) } @@ -506,7 +506,7 @@ async fn test_staggered_restart() { block_builder::SimpleBuilderImplementation, completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, overall_safety_task::OverallSafetyPropertiesDescription, - spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, + spinning_task::{ChangeNode, NodeAction, SpinningTaskDescription}, test_builder::TestDescription, }; @@ -519,7 +519,7 @@ async fn test_staggered_restart() { for i in 1..4 { down_da_nodes.push(ChangeNode { idx: i, - updown: UpDown::RestartDown(20), + updown: NodeAction::RestartDown(20), }); } @@ -527,13 +527,13 @@ async fn test_staggered_restart() { for i in 4..10 { down_regular_nodes.push(ChangeNode { idx: i, - updown: UpDown::RestartDown(0), + updown: NodeAction::RestartDown(0), }); } // restart the last da so it gets the new libp2p routing table down_regular_nodes.push(ChangeNode { idx: 0, - updown: UpDown::RestartDown(0), + updown: NodeAction::RestartDown(0), }); metadata.start_nodes = 10; diff --git a/crates/testing/tests/tests_2/test_with_failures_one.rs b/crates/testing/tests/tests_2/test_with_failures_one.rs index 5408b57eb5..c540c9cbfc 100644 --- a/crates/testing/tests/tests_2/test_with_failures_one.rs +++ b/crates/testing/tests/tests_2/test_with_failures_one.rs @@ -11,7 +11,7 @@ use hotshot_example_types::{ use hotshot_macros::cross_tests; use hotshot_testing::{ block_builder::SimpleBuilderImplementation, - spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, + spinning_task::{ChangeNode, NodeAction, SpinningTaskDescription}, test_builder::TestDescription, }; @@ -32,7 +32,7 @@ cross_tests!( // let dead_nodes = vec![ChangeNode { idx: 19, - updown: UpDown::Down, + updown: NodeAction::Down, }]; metadata.spinning_properties = SpinningTaskDescription { diff --git a/crates/testing/tests/tests_3/test_with_failures_half_f.rs b/crates/testing/tests/tests_3/test_with_failures_half_f.rs index e4d7f58d68..8b1eb531a2 100644 --- a/crates/testing/tests/tests_3/test_with_failures_half_f.rs +++ b/crates/testing/tests/tests_3/test_with_failures_half_f.rs @@ -11,7 +11,7 @@ use hotshot_example_types::{ use hotshot_macros::cross_tests; use hotshot_testing::{ block_builder::SimpleBuilderImplementation, - spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, + spinning_task::{ChangeNode, NodeAction, SpinningTaskDescription}, test_builder::TestDescription, }; // Test f/2 nodes leaving the network. @@ -32,15 +32,15 @@ cross_tests!( let dead_nodes = vec![ ChangeNode { idx: 17, - updown: UpDown::Down, + updown: NodeAction::Down, }, ChangeNode { idx: 18, - updown: UpDown::Down, + updown: NodeAction::Down, }, ChangeNode { idx: 19, - updown: UpDown::Down, + updown: NodeAction::Down, }, ]; diff --git a/crates/testing/tests/tests_4/test_with_failures_f.rs b/crates/testing/tests/tests_4/test_with_failures_f.rs index c36b043294..931d7eaf5e 100644 --- a/crates/testing/tests/tests_4/test_with_failures_f.rs +++ b/crates/testing/tests/tests_4/test_with_failures_f.rs @@ -11,7 +11,7 @@ use hotshot_example_types::{ use hotshot_macros::cross_tests; use hotshot_testing::{ block_builder::SimpleBuilderImplementation, - spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, + spinning_task::{ChangeNode, NodeAction, SpinningTaskDescription}, test_builder::TestDescription, }; // Test f nodes leaving the network. @@ -35,27 +35,27 @@ cross_tests!( let dead_nodes = vec![ ChangeNode { idx: 14, - updown: UpDown::Down, + updown: NodeAction::Down, }, ChangeNode { idx: 15, - updown: UpDown::Down, + updown: NodeAction::Down, }, ChangeNode { idx: 16, - updown: UpDown::Down, + updown: NodeAction::Down, }, ChangeNode { idx: 17, - updown: UpDown::Down, + updown: NodeAction::Down, }, ChangeNode { idx: 18, - updown: UpDown::Down, + updown: NodeAction::Down, }, ChangeNode { idx: 19, - updown: UpDown::Down, + updown: NodeAction::Down, }, ]; diff --git a/crates/testing/tests/tests_5/combined_network.rs b/crates/testing/tests/tests_5/combined_network.rs index bc35b9ad3c..e4d0fb4625 100644 --- a/crates/testing/tests/tests_5/combined_network.rs +++ b/crates/testing/tests/tests_5/combined_network.rs @@ -11,7 +11,7 @@ use hotshot_testing::{ block_builder::SimpleBuilderImplementation, completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, overall_safety_task::OverallSafetyPropertiesDescription, - spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, + spinning_task::{ChangeNode, NodeAction, SpinningTaskDescription}, test_builder::{TestDescription, TimingData}, }; use rand::Rng; @@ -86,7 +86,7 @@ async fn test_combined_network_cdn_crash() { for node in 0..metadata.num_nodes_with_stake { all_nodes.push(ChangeNode { idx: node, - updown: UpDown::NetworkDown, + updown: NodeAction::NetworkDown, }); } @@ -136,11 +136,11 @@ async fn test_combined_network_reup() { for node in 0..metadata.num_nodes_with_stake { all_down.push(ChangeNode { idx: node, - updown: UpDown::NetworkDown, + updown: NodeAction::NetworkDown, }); all_up.push(ChangeNode { idx: node, - updown: UpDown::NetworkUp, + updown: NodeAction::NetworkUp, }); } @@ -188,7 +188,7 @@ async fn test_combined_network_half_dc() { for node in 0..metadata.num_nodes_with_stake / 2 { half.push(ChangeNode { idx: node, - updown: UpDown::NetworkDown, + updown: NodeAction::NetworkDown, }); } @@ -212,9 +212,9 @@ fn generate_random_node_changes( for _ in 0..total_nodes * 2 { let updown = if rng.gen::() { - UpDown::NetworkUp + NodeAction::NetworkUp } else { - UpDown::NetworkDown + NodeAction::NetworkDown }; let node_change = ChangeNode { diff --git a/crates/testing/tests/tests_5/timeout.rs b/crates/testing/tests/tests_5/timeout.rs index 2a9cd4e73a..2269d5000d 100644 --- a/crates/testing/tests/tests_5/timeout.rs +++ b/crates/testing/tests/tests_5/timeout.rs @@ -17,7 +17,7 @@ async fn test_timeout() { block_builder::SimpleBuilderImplementation, completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, overall_safety_task::OverallSafetyPropertiesDescription, - spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, + spinning_task::{ChangeNode, NodeAction, SpinningTaskDescription}, test_builder::{TestDescription, TimingData}, }; async_compatibility_layer::logging::setup_logging(); @@ -34,7 +34,7 @@ async fn test_timeout() { }; let dead_nodes = vec![ChangeNode { idx: 0, - updown: UpDown::Down, + updown: NodeAction::Down, }]; metadata.timing_data = timing_data; @@ -77,7 +77,7 @@ async fn test_timeout_libp2p() { block_builder::SimpleBuilderImplementation, completion_task::{CompletionTaskDescription, TimeBasedCompletionTaskDescription}, overall_safety_task::OverallSafetyPropertiesDescription, - spinning_task::{ChangeNode, SpinningTaskDescription, UpDown}, + spinning_task::{ChangeNode, NodeAction, SpinningTaskDescription}, test_builder::{TestDescription, TimingData}, }; @@ -98,7 +98,7 @@ async fn test_timeout_libp2p() { }; let dead_nodes = vec![ChangeNode { idx: 9, - updown: UpDown::Down, + updown: NodeAction::Down, }]; metadata.timing_data = timing_data;