From 401cdba28c6ac69f67edbe6fe6ae9da1dc5ad4f5 Mon Sep 17 00:00:00 2001 From: Lukasz Rzasik Date: Thu, 10 Oct 2024 15:29:09 -0600 Subject: [PATCH 01/16] Add epoch parameter in `Membership` trait's methods --- .../src/auction_results_provider_types.rs | 5 +- crates/example-types/src/node_types.rs | 27 +-- crates/example-types/src/storage_types.rs | 18 +- crates/examples/infra/mod.rs | 4 +- crates/hotshot/src/lib.rs | 36 ++-- crates/hotshot/src/tasks/mod.rs | 3 +- crates/hotshot/src/tasks/task_state.rs | 11 +- .../traits/election/randomized_committee.rs | 22 +- .../src/traits/election/static_committee.rs | 22 +- .../static_committee_leader_two_views.rs | 22 +- .../src/traits/networking/combined_network.rs | 7 +- .../src/traits/networking/libp2p_network.rs | 7 +- crates/hotshot/src/types/handle.rs | 18 +- crates/orchestrator/src/config.rs | 4 + crates/task-impls/src/consensus/handlers.rs | 35 +++- crates/task-impls/src/consensus/mod.rs | 87 ++++++-- crates/task-impls/src/consensus2/handlers.rs | 32 ++- crates/task-impls/src/consensus2/mod.rs | 7 +- crates/task-impls/src/da.rs | 25 ++- crates/task-impls/src/events.rs | 26 +-- crates/task-impls/src/helpers.rs | 34 ++-- crates/task-impls/src/network.rs | 44 ++-- .../src/quorum_proposal/handlers.rs | 4 +- crates/task-impls/src/quorum_proposal/mod.rs | 37 +++- .../src/quorum_proposal_recv/handlers.rs | 12 +- .../src/quorum_proposal_recv/mod.rs | 9 +- crates/task-impls/src/quorum_vote/mod.rs | 57 ++++-- crates/task-impls/src/request.rs | 24 ++- crates/task-impls/src/response.rs | 12 +- crates/task-impls/src/transactions.rs | 59 +++--- crates/task-impls/src/upgrade.rs | 31 +-- crates/task-impls/src/vid.rs | 7 +- crates/task-impls/src/view_sync.rs | 88 +++++--- crates/task-impls/src/vote_collection.rs | 85 ++++++-- .../src/byzantine/byzantine_behaviour.rs | 11 +- crates/testing/src/consistency_task.rs | 10 +- crates/testing/src/helpers.rs | 54 +++-- crates/testing/src/overall_safety_task.rs | 20 +- crates/testing/src/spinning_task.rs | 8 +- crates/testing/src/test_builder.rs | 1 + crates/testing/src/test_runner.rs | 4 +- crates/testing/src/view_generator.rs | 34 +++- .../testing/tests/tests_1/consensus_task.rs | 15 +- crates/testing/tests/tests_1/da_task.rs | 17 +- crates/testing/tests/tests_1/network_task.rs | 4 + .../testing/tests/tests_1/transaction_task.rs | 6 +- .../tests_1/upgrade_task_with_consensus.rs | 10 +- crates/testing/tests/tests_1/vid_task.rs | 19 +- .../testing/tests/tests_1/view_sync_task.rs | 6 +- .../testing/tests/tests_3/byzantine_tests.rs | 4 +- .../testing/tests/tests_3/memory_network.rs | 4 +- crates/types/src/consensus.rs | 114 +++++++---- crates/types/src/data.rs | 189 +++++++++++------- crates/types/src/error.rs | 2 +- crates/types/src/event.rs | 8 +- crates/types/src/lib.rs | 2 + crates/types/src/message.rs | 19 +- crates/types/src/request_response.rs | 2 +- crates/types/src/simple_certificate.rs | 21 +- crates/types/src/simple_vote.rs | 28 +-- .../src/traits/auction_results_provider.rs | 5 +- crates/types/src/traits/election.rs | 38 ++-- crates/types/src/traits/network.rs | 18 +- .../types/src/traits/node_implementation.rs | 6 +- crates/types/src/traits/storage.rs | 4 +- crates/types/src/utils.rs | 2 +- crates/types/src/vote.rs | 17 +- 67 files changed, 1073 insertions(+), 550 deletions(-) diff --git a/crates/example-types/src/auction_results_provider_types.rs b/crates/example-types/src/auction_results_provider_types.rs index af2f8b7026..d6b6490189 100644 --- a/crates/example-types/src/auction_results_provider_types.rs +++ b/crates/example-types/src/auction_results_provider_types.rs @@ -48,7 +48,10 @@ pub struct TestAuctionResultsProvider { impl AuctionResultsProvider for TestAuctionResultsProvider { /// Mock fetching the auction results, with optional error injection to simulate failure cases /// in the solver. - async fn fetch_auction_result(&self, view_number: TYPES::Time) -> Result { + async fn fetch_auction_result( + &self, + view_number: TYPES::ViewTime, + ) -> Result { if let Some(url) = &self.broadcast_url { let resp = reqwest::get(url.join(&format!("/v0/api/auction_results/{}", *view_number))?) diff --git a/crates/example-types/src/node_types.rs b/crates/example-types/src/node_types.rs index a8f06976c3..d1dff916ca 100644 --- a/crates/example-types/src/node_types.rs +++ b/crates/example-types/src/node_types.rs @@ -4,6 +4,12 @@ // You should have received a copy of the MIT License // along with the HotShot repository. If not, see . +use crate::{ + auction_results_provider_types::{TestAuctionResult, TestAuctionResultsProvider}, + block_types::{TestBlockHeader, TestBlockPayload, TestTransaction}, + state_types::{TestInstanceState, TestValidatedState}, + storage_types::TestStorage, +}; use hotshot::traits::{ election::{ randomized_committee::RandomizedCommittee, static_committee::StaticCommittee, @@ -12,6 +18,7 @@ use hotshot::traits::{ implementations::{CombinedNetworks, Libp2pNetwork, MemoryNetwork, PushCdnNetwork}, NodeImplementation, }; +use hotshot_types::data::EpochNumber; use hotshot_types::{ data::ViewNumber, signature_key::{BLSPubKey, BuilderKey}, @@ -20,13 +27,6 @@ use hotshot_types::{ use serde::{Deserialize, Serialize}; use vbs::version::StaticVersion; -use crate::{ - auction_results_provider_types::{TestAuctionResult, TestAuctionResultsProvider}, - block_types::{TestBlockHeader, TestBlockPayload, TestTransaction}, - state_types::{TestInstanceState, TestValidatedState}, - storage_types::TestStorage, -}; - #[derive( Copy, Clone, @@ -45,7 +45,8 @@ use crate::{ pub struct TestTypes; impl NodeType for TestTypes { type AuctionResult = TestAuctionResult; - type Time = ViewNumber; + type ViewTime = ViewNumber; + type EpochTime = EpochNumber; type BlockHeader = TestBlockHeader; type BlockPayload = TestBlockPayload; type SignatureKey = BLSPubKey; @@ -74,7 +75,8 @@ impl NodeType for TestTypes { pub struct TestTypesRandomizedLeader; impl NodeType for TestTypesRandomizedLeader { type AuctionResult = TestAuctionResult; - type Time = ViewNumber; + type ViewTime = ViewNumber; + type EpochTime = EpochNumber; type BlockHeader = TestBlockHeader; type BlockPayload = TestBlockPayload; type SignatureKey = BLSPubKey; @@ -103,7 +105,8 @@ impl NodeType for TestTypesRandomizedLeader { pub struct TestConsecutiveLeaderTypes; impl NodeType for TestConsecutiveLeaderTypes { type AuctionResult = TestAuctionResult; - type Time = ViewNumber; + type ViewTime = ViewNumber; + type EpochTime = EpochNumber; type BlockHeader = TestBlockHeader; type BlockPayload = TestBlockPayload; type SignatureKey = BLSPubKey; @@ -235,8 +238,8 @@ mod tests { let data = TestData { data: 10 }; - let view_0 = ::Time::new(0); - let view_1 = ::Time::new(1); + let view_0 = ::ViewTime::new(0); + let view_1 = ::ViewTime::new(1); let versioned_data_0 = VersionedVoteData::::new( diff --git a/crates/example-types/src/storage_types.rs b/crates/example-types/src/storage_types.rs index 5c6277f35c..6749326746 100644 --- a/crates/example-types/src/storage_types.rs +++ b/crates/example-types/src/storage_types.rs @@ -29,17 +29,17 @@ use hotshot_types::{ use crate::testable_delay::{DelayConfig, SupportedTraitTypesForAsyncDelay, TestableDelay}; type VidShares = HashMap< - ::Time, + ::ViewTime, HashMap<::SignatureKey, Proposal>>, >; #[derive(Clone, Debug)] pub struct TestStorageState { vids: VidShares, - das: HashMap>>, - proposals: BTreeMap>>, + das: HashMap>>, + proposals: BTreeMap>>, high_qc: Option>, - action: TYPES::Time, + action: TYPES::ViewTime, } impl Default for TestStorageState { @@ -49,7 +49,7 @@ impl Default for TestStorageState { das: HashMap::new(), proposals: BTreeMap::new(), high_qc: None, - action: TYPES::Time::genesis(), + action: TYPES::ViewTime::genesis(), } } } @@ -87,7 +87,7 @@ impl TestableDelay for TestStorage { impl TestStorage { pub async fn proposals_cloned( &self, - ) -> BTreeMap>> { + ) -> BTreeMap>> { self.inner.read().await.proposals.clone() } pub async fn high_qc_cloned(&self) -> Option> { @@ -96,7 +96,7 @@ impl TestStorage { pub async fn decided_upgrade_certificate(&self) -> Option> { self.decided_upgrade_certificate.read().await.clone() } - pub async fn last_actioned_view(&self) -> TYPES::Time { + pub async fn last_actioned_view(&self) -> TYPES::ViewTime { self.inner.read().await.action } } @@ -145,7 +145,7 @@ impl Storage for TestStorage { async fn record_action( &self, - view: ::Time, + view: ::ViewTime, action: hotshot_types::event::HotShotAction, ) -> Result<()> { if self.should_return_err { @@ -180,7 +180,7 @@ impl Storage for TestStorage { async fn update_undecided_state( &self, _leafs: CommitmentMap>, - _state: BTreeMap>, + _state: BTreeMap>, ) -> Result<()> { if self.should_return_err { bail!("Failed to update high qc to storage"); diff --git a/crates/examples/infra/mod.rs b/crates/examples/infra/mod.rs index 056eb92782..e7bd04bef3 100755 --- a/crates/examples/infra/mod.rs +++ b/crates/examples/infra/mod.rs @@ -463,7 +463,7 @@ pub trait RunDa< let start = Instant::now(); let mut event_stream = context.event_stream(); - let mut anchor_view: TYPES::Time = ::genesis(); + let mut anchor_view: TYPES::ViewTime = ::genesis(); let mut num_successful_commits = 0; context.hotshot.start_consensus().await; @@ -563,7 +563,7 @@ pub trait RunDa< .hotshot .memberships .quorum_membership - .committee_leaders(TYPES::Time::genesis()) + .committee_leaders(TYPES::ViewTime::genesis(), TYPES::EpochTime::genesis()) .len(); let total_num_views = usize::try_from(consensus.locked_view().u64()).unwrap(); // `failed_num_views` could include uncommitted views diff --git a/crates/hotshot/src/lib.rs b/crates/hotshot/src/lib.rs index fa85394aeb..d89462f276 100644 --- a/crates/hotshot/src/lib.rs +++ b/crates/hotshot/src/lib.rs @@ -123,7 +123,7 @@ pub struct SystemContext, V: Versi instance_state: Arc, /// The view to enter when first starting consensus - start_view: TYPES::Time, + start_view: TYPES::ViewTime, /// Access to the output event stream. output_event_stream: (Sender>, InactiveReceiver>), @@ -302,9 +302,17 @@ impl, V: Versions> SystemContext, V: Versions> SystemContext, V: Versions> SystemContext, V: Versions> SystemContext Option> { + pub async fn state(&self, view: TYPES::ViewTime) -> Option> { self.consensus.read().await.state(view).cloned() } @@ -968,10 +976,10 @@ pub struct HotShotInitializer { state_delta: Option>::Delta>>, /// Starting view number that should be equivelant to the view the node shut down with last. - start_view: TYPES::Time, + start_view: TYPES::ViewTime, /// The view we last performed an action in. An action is Proposing or voting for /// Either the quorum or DA. - actioned_view: TYPES::Time, + actioned_view: TYPES::ViewTime, /// Highest QC that was seen, for genesis it's the genesis QC. It should be for a view greater /// than `inner`s view number for the non genesis case because we must have seen higher QCs /// to decide on the leaf. @@ -982,9 +990,9 @@ pub struct HotShotInitializer { /// to vote and propose right away if they didn't miss anything while down. undecided_leafs: Vec>, /// Not yet decided state - undecided_state: BTreeMap>, + undecided_state: BTreeMap>, /// Proposals we have sent out to provide to others for catchup - saved_proposals: BTreeMap>>, + saved_proposals: BTreeMap>>, } impl HotShotInitializer { @@ -1001,8 +1009,8 @@ impl HotShotInitializer { inner: Leaf::genesis(&validated_state, &instance_state).await, validated_state: Some(Arc::new(validated_state)), state_delta: Some(Arc::new(state_delta)), - start_view: TYPES::Time::new(0), - actioned_view: TYPES::Time::new(0), + start_view: TYPES::ViewTime::new(0), + actioned_view: TYPES::ViewTime::new(0), saved_proposals: BTreeMap::new(), high_qc, decided_upgrade_certificate: None, @@ -1024,13 +1032,13 @@ impl HotShotInitializer { anchor_leaf: Leaf, instance_state: TYPES::InstanceState, validated_state: Option>, - start_view: TYPES::Time, - actioned_view: TYPES::Time, - saved_proposals: BTreeMap>>, + start_view: TYPES::ViewTime, + actioned_view: TYPES::ViewTime, + saved_proposals: BTreeMap>>, high_qc: QuorumCertificate, decided_upgrade_certificate: Option>, undecided_leafs: Vec>, - undecided_state: BTreeMap>, + undecided_state: BTreeMap>, ) -> Self { Self { inner: anchor_leaf, diff --git a/crates/hotshot/src/tasks/mod.rs b/crates/hotshot/src/tasks/mod.rs index e9981b8b5f..a25c760f12 100644 --- a/crates/hotshot/src/tasks/mod.rs +++ b/crates/hotshot/src/tasks/mod.rs @@ -169,7 +169,8 @@ pub fn add_network_event_task< ) { let network_state: NetworkEventTaskState<_, V, _, _> = NetworkEventTaskState { network, - view: TYPES::Time::genesis(), + view: TYPES::ViewTime::genesis(), + epoch: TYPES::EpochTime::genesis(), quorum_membership, da_membership, storage: Arc::clone(&handle.storage()), diff --git a/crates/hotshot/src/tasks/task_state.rs b/crates/hotshot/src/tasks/task_state.rs index c1a10f4646..895e8a7eca 100644 --- a/crates/hotshot/src/tasks/task_state.rs +++ b/crates/hotshot/src/tasks/task_state.rs @@ -70,6 +70,7 @@ impl, V: Versions> CreateTaskState return Self { output_event_stream: handle.hotshot.external_event_stream.0.clone(), cur_view: handle.cur_view().await, + cur_epoch: handle.cur_epoch().await, quorum_membership: handle.hotshot.memberships.quorum_membership.clone().into(), network: Arc::clone(&handle.hotshot.network), vote_collectors: BTreeMap::default(), @@ -91,6 +92,7 @@ impl, V: Versions> CreateTaskState return Self { output_event_stream: handle.hotshot.external_event_stream.0.clone(), cur_view: handle.cur_view().await, + cur_epoch: handle.cur_epoch().await, quorum_membership: handle.hotshot.memberships.quorum_membership.clone().into(), network: Arc::clone(&handle.hotshot.network), vote_collector: None.into(), @@ -118,6 +120,7 @@ impl, V: Versions> CreateTaskState Self { consensus: OuterConsensus::new(handle.hotshot.consensus()), cur_view: handle.cur_view().await, + cur_epoch: handle.cur_epoch().await, vote_collector: None, network: Arc::clone(&handle.hotshot.network), membership: handle.hotshot.memberships.quorum_membership.clone().into(), @@ -140,6 +143,7 @@ impl, V: Versions> CreateTaskState network: Arc::clone(&handle.hotshot.network), quorum_membership: handle.hotshot.memberships.quorum_membership.clone().into(), cur_view: handle.cur_view().await, + cur_epoch: handle.cur_epoch().await, vote_collectors: BTreeMap::default(), public_key: handle.public_key().clone(), private_key: handle.private_key().clone(), @@ -160,6 +164,7 @@ impl, V: Versions> CreateTaskState Self { current_view: cur_view, next_view: cur_view, + current_epoch: handle.cur_epoch().await, network: Arc::clone(&handle.hotshot.network), membership: handle.hotshot.memberships.quorum_membership.clone().into(), public_key: handle.public_key().clone(), @@ -171,7 +176,7 @@ impl, V: Versions> CreateTaskState finalize_relay_map: HashMap::default().into(), view_sync_timeout: handle.hotshot.config.view_sync_timeout, id: handle.hotshot.id, - last_garbage_collected_view: TYPES::Time::new(0), + last_garbage_collected_view: TYPES::ViewTime::new(0), upgrade_lock: handle.hotshot.upgrade_lock.clone(), } } @@ -187,6 +192,7 @@ impl, V: Versions> CreateTaskState output_event_stream: handle.hotshot.external_event_stream.0.clone(), consensus: OuterConsensus::new(handle.hotshot.consensus()), cur_view: handle.cur_view().await, + cur_epoch: handle.cur_epoch().await, network: Arc::clone(&handle.hotshot.network), membership: handle.hotshot.memberships.quorum_membership.clone().into(), public_key: handle.public_key().clone(), @@ -228,6 +234,7 @@ impl, V: Versions> CreateTaskState round_start_delay: handle.hotshot.config.round_start_delay, cur_view: handle.cur_view().await, cur_view_time: Utc::now().timestamp(), + cur_epoch: handle.cur_epoch().await, payload_commitment_and_metadata: None, vote_collectors: BTreeMap::default(), timeout_vote_collectors: BTreeMap::default(), @@ -316,6 +323,7 @@ impl, V: Versions> CreateTaskState consensus: OuterConsensus::new(consensus), cur_view: handle.cur_view().await, cur_view_time: Utc::now().timestamp(), + cur_epoch: handle.cur_epoch().await, network: Arc::clone(&handle.hotshot.network), quorum_membership: handle.hotshot.memberships.quorum_membership.clone().into(), timeout_membership: handle.hotshot.memberships.quorum_membership.clone().into(), @@ -353,6 +361,7 @@ impl, V: Versions> CreateTaskState storage: Arc::clone(&handle.storage), cur_view: handle.cur_view().await, cur_view_time: Utc::now().timestamp(), + cur_epoch: handle.cur_epoch().await, output_event_stream: handle.hotshot.external_event_stream.0.clone(), timeout_task: async_spawn(async {}), timeout: handle.hotshot.config.next_view_timeout, diff --git a/crates/hotshot/src/traits/election/randomized_committee.rs b/crates/hotshot/src/traits/election/randomized_committee.rs index b761eb1ee2..073fe63f5b 100644 --- a/crates/hotshot/src/traits/election/randomized_committee.rs +++ b/crates/hotshot/src/traits/election/randomized_committee.rs @@ -82,6 +82,7 @@ impl Membership for RandomizedCommittee { /// Get the stake table for the current view fn stake_table( &self, + _epoch: ::EpochTime, ) -> Vec<<::SignatureKey as SignatureKey>::StakeTableEntry> { self.stake_table.clone() } @@ -89,7 +90,8 @@ impl Membership for RandomizedCommittee { /// Get all members of the committee for the current view fn committee_members( &self, - _view_number: ::Time, + _view_number: ::ViewTime, + _epoch: ::EpochTime, ) -> std::collections::BTreeSet<::SignatureKey> { self.stake_table .iter() @@ -100,7 +102,8 @@ impl Membership for RandomizedCommittee { /// Get all eligible leaders of the committee for the current view fn committee_leaders( &self, - _view_number: ::Time, + _view_number: ::ViewTime, + _epoch: ::EpochTime, ) -> std::collections::BTreeSet<::SignatureKey> { self.eligible_leaders .iter() @@ -112,13 +115,18 @@ impl Membership for RandomizedCommittee { fn stake( &self, pub_key: &::SignatureKey, + _epoch: ::EpochTime, ) -> Option<::StakeTableEntry> { // Only return the stake if it is above zero self.indexed_stake_table.get(pub_key).cloned() } /// Check if a node has stake in the committee - fn has_stake(&self, pub_key: &::SignatureKey) -> bool { + fn has_stake( + &self, + pub_key: &::SignatureKey, + _epoch: ::EpochTime, + ) -> bool { self.indexed_stake_table .get(pub_key) .is_some_and(|x| x.stake() > U256::zero()) @@ -130,7 +138,11 @@ impl Membership for RandomizedCommittee { } /// Index the vector of public keys with the current view number - fn leader(&self, view_number: TYPES::Time) -> TYPES::SignatureKey { + fn leader( + &self, + view_number: TYPES::ViewTime, + _epoch: ::EpochTime, + ) -> TYPES::SignatureKey { let mut rng: StdRng = rand::SeedableRng::seed_from_u64(*view_number); let randomized_view_number: u64 = rng.gen_range(0..=u64::MAX); @@ -143,7 +155,7 @@ impl Membership for RandomizedCommittee { } /// Get the total number of nodes in the committee - fn total_nodes(&self) -> usize { + fn total_nodes(&self, _epoch: ::EpochTime) -> usize { self.stake_table.len() } diff --git a/crates/hotshot/src/traits/election/static_committee.rs b/crates/hotshot/src/traits/election/static_committee.rs index 69cc5ce9c2..054fc0aae9 100644 --- a/crates/hotshot/src/traits/election/static_committee.rs +++ b/crates/hotshot/src/traits/election/static_committee.rs @@ -80,6 +80,7 @@ impl Membership for StaticCommittee { /// Get the stake table for the current view fn stake_table( &self, + _epoch: ::EpochTime, ) -> Vec<<::SignatureKey as SignatureKey>::StakeTableEntry> { self.stake_table.clone() } @@ -87,7 +88,8 @@ impl Membership for StaticCommittee { /// Get all members of the committee for the current view fn committee_members( &self, - _view_number: ::Time, + _view_number: ::ViewTime, + _epoch: ::EpochTime, ) -> std::collections::BTreeSet<::SignatureKey> { self.stake_table .iter() @@ -98,7 +100,8 @@ impl Membership for StaticCommittee { /// Get all eligible leaders of the committee for the current view fn committee_leaders( &self, - _view_number: ::Time, + _view_number: ::ViewTime, + _epoch: ::EpochTime, ) -> std::collections::BTreeSet<::SignatureKey> { self.eligible_leaders .iter() @@ -110,13 +113,18 @@ impl Membership for StaticCommittee { fn stake( &self, pub_key: &::SignatureKey, + _epoch: ::EpochTime, ) -> Option<::StakeTableEntry> { // Only return the stake if it is above zero self.indexed_stake_table.get(pub_key).cloned() } /// Check if a node has stake in the committee - fn has_stake(&self, pub_key: &::SignatureKey) -> bool { + fn has_stake( + &self, + pub_key: &::SignatureKey, + _epoch: ::EpochTime, + ) -> bool { self.indexed_stake_table .get(pub_key) .is_some_and(|x| x.stake() > U256::zero()) @@ -128,7 +136,11 @@ impl Membership for StaticCommittee { } /// Index the vector of public keys with the current view number - fn leader(&self, view_number: TYPES::Time) -> TYPES::SignatureKey { + fn leader( + &self, + view_number: TYPES::ViewTime, + _epoch: ::EpochTime, + ) -> TYPES::SignatureKey { #[allow(clippy::cast_possible_truncation)] let index = *view_number as usize % self.eligible_leaders.len(); let res = self.eligible_leaders[index].clone(); @@ -136,7 +148,7 @@ impl Membership for StaticCommittee { } /// Get the total number of nodes in the committee - fn total_nodes(&self) -> usize { + fn total_nodes(&self, _epoch: ::EpochTime) -> usize { self.stake_table.len() } diff --git a/crates/hotshot/src/traits/election/static_committee_leader_two_views.rs b/crates/hotshot/src/traits/election/static_committee_leader_two_views.rs index 9ce83c14a0..9104bce6cb 100644 --- a/crates/hotshot/src/traits/election/static_committee_leader_two_views.rs +++ b/crates/hotshot/src/traits/election/static_committee_leader_two_views.rs @@ -80,6 +80,7 @@ impl Membership for StaticCommitteeLeaderForTwoViews::EpochTime, ) -> Vec<<::SignatureKey as SignatureKey>::StakeTableEntry> { self.stake_table.clone() } @@ -87,7 +88,8 @@ impl Membership for StaticCommitteeLeaderForTwoViews::Time, + _view_number: ::ViewTime, + _epoch: ::EpochTime, ) -> std::collections::BTreeSet<::SignatureKey> { self.stake_table .iter() @@ -98,7 +100,8 @@ impl Membership for StaticCommitteeLeaderForTwoViews::Time, + _view_number: ::ViewTime, + _epoch: ::EpochTime, ) -> std::collections::BTreeSet<::SignatureKey> { self.eligible_leaders .iter() @@ -110,13 +113,18 @@ impl Membership for StaticCommitteeLeaderForTwoViews::SignatureKey, + _epoch: ::EpochTime, ) -> Option<::StakeTableEntry> { // Only return the stake if it is above zero self.indexed_stake_table.get(pub_key).cloned() } /// Check if a node has stake in the committee - fn has_stake(&self, pub_key: &::SignatureKey) -> bool { + fn has_stake( + &self, + pub_key: &::SignatureKey, + _epoch: ::EpochTime, + ) -> bool { self.indexed_stake_table .get(pub_key) .is_some_and(|x| x.stake() > U256::zero()) @@ -128,7 +136,11 @@ impl Membership for StaticCommitteeLeaderForTwoViews TYPES::SignatureKey { + fn leader( + &self, + view_number: TYPES::ViewTime, + _epoch: ::EpochTime, + ) -> TYPES::SignatureKey { let index = usize::try_from((*view_number / 2) % self.eligible_leaders.len() as u64).unwrap(); let res = self.eligible_leaders[index].clone(); @@ -136,7 +148,7 @@ impl Membership for StaticCommitteeLeaderForTwoViews usize { + fn total_nodes(&self, _epoch: ::EpochTime) -> usize { self.stake_table.len() } diff --git a/crates/hotshot/src/traits/networking/combined_network.rs b/crates/hotshot/src/traits/networking/combined_network.rs index 2a4bd88f8f..90872b9fd0 100644 --- a/crates/hotshot/src/traits/networking/combined_network.rs +++ b/crates/hotshot/src/traits/networking/combined_network.rs @@ -501,7 +501,7 @@ impl ConnectedNetwork for CombinedNetworks self.secondary().queue_node_lookup(view_number, pk) } - async fn update_view<'a, T>(&'a self, view: u64, membership: &T::Membership) + async fn update_view<'a, T>(&'a self, view: u64, epoch: u64, membership: &T::Membership) where T: NodeType + 'a, { @@ -522,7 +522,10 @@ impl ConnectedNetwork for CombinedNetworks } }); // Run `update_view` logic for the libp2p network - self.networks.1.update_view::(view, membership).await; + self.networks + .1 + .update_view::(view, epoch, membership) + .await; } fn is_primary_down(&self) -> bool { diff --git a/crates/hotshot/src/traits/networking/libp2p_network.rs b/crates/hotshot/src/traits/networking/libp2p_network.rs index 8dd97b6aeb..d2638d47b9 100644 --- a/crates/hotshot/src/traits/networking/libp2p_network.rs +++ b/crates/hotshot/src/traits/networking/libp2p_network.rs @@ -1131,12 +1131,13 @@ impl ConnectedNetwork for Libp2pNetwork { /// So the logic with libp2p is to prefetch upcomming leaders libp2p address to /// save time when we later need to direct message the leader our vote. Hence the /// use of the future view and leader to queue the lookups. - async fn update_view<'a, TYPES>(&'a self, view: u64, membership: &TYPES::Membership) + async fn update_view<'a, TYPES>(&'a self, view: u64, epoch: u64, membership: &TYPES::Membership) where TYPES: NodeType + 'a, { - let future_view = ::Time::new(view) + LOOK_AHEAD; - let future_leader = membership.leader(future_view); + let future_view = ::ViewTime::new(view) + LOOK_AHEAD; + let epoch = ::EpochTime::new(epoch); + let future_leader = membership.leader(future_view, epoch); let _ = self .queue_node_lookup(ViewNumber::new(*future_view), future_leader) diff --git a/crates/hotshot/src/types/handle.rs b/crates/hotshot/src/types/handle.rs index 0879a349fe..b3881c6f3f 100644 --- a/crates/hotshot/src/types/handle.rs +++ b/crates/hotshot/src/types/handle.rs @@ -117,7 +117,7 @@ impl + 'static, V: Versions> /// return [`None`] if the requested view has already been decided (but see /// [`decided_state`](Self::decided_state)) or if there is no path for the requested /// view to ever be decided. - pub async fn state(&self, view: TYPES::Time) -> Option> { + pub async fn state(&self, view: TYPES::ViewTime) -> Option> { self.hotshot.state(view).await } @@ -190,11 +190,15 @@ impl + 'static, V: Versions> /// Wrapper for `HotShotConsensusApi`'s `leader` function #[allow(clippy::unused_async)] // async for API compatibility reasons - pub async fn leader(&self, view_number: TYPES::Time) -> TYPES::SignatureKey { + pub async fn leader( + &self, + view_number: TYPES::ViewTime, + epoch_number: TYPES::EpochTime, + ) -> TYPES::SignatureKey { self.hotshot .memberships .quorum_membership - .leader(view_number) + .leader(view_number, epoch_number) } // Below is for testing only: @@ -221,10 +225,16 @@ impl + 'static, V: Versions> /// Wrapper to get the view number this node is on. #[instrument(skip_all, target = "SystemContextHandle", fields(id = self.hotshot.id))] - pub async fn cur_view(&self) -> TYPES::Time { + pub async fn cur_view(&self) -> TYPES::ViewTime { self.hotshot.consensus.read().await.cur_view() } + /// Wrapper to get the epoch number this node is on. + #[instrument(skip_all, target = "SystemContextHandle", fields(id = self.hotshot.id))] + pub async fn cur_epoch(&self) -> TYPES::EpochTime { + self.hotshot.consensus.read().await.cur_epoch() + } + /// Provides a reference to the underlying storage for this [`SystemContext`], allowing access to /// historical data #[must_use] diff --git a/crates/orchestrator/src/config.rs b/crates/orchestrator/src/config.rs index 81bd03cf9d..c2d1e15187 100644 --- a/crates/orchestrator/src/config.rs +++ b/crates/orchestrator/src/config.rs @@ -555,6 +555,8 @@ pub struct HotShotConfigFile { pub builder_urls: Vec1, /// Upgrade config pub upgrade: UpgradeConfig, + /// Number of blocks in an epoch, zero means there are no epochs + pub epoch_height: u64, } #[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] @@ -681,6 +683,7 @@ impl From> for HotShotConfig { stop_proposing_time: val.upgrade.stop_proposing_time, start_voting_time: val.upgrade.start_voting_time, stop_voting_time: val.upgrade.stop_voting_time, + epoch_height: val.epoch_height, } } } @@ -749,6 +752,7 @@ impl Default for HotShotConfigFile { data_request_delay: Some(Duration::from_millis(REQUEST_DATA_DELAY)), builder_urls: default_builder_urls(), upgrade: UpgradeConfig::default(), + epoch_height: 0, } } } diff --git a/crates/task-impls/src/consensus/handlers.rs b/crates/task-impls/src/consensus/handlers.rs index 193ebbd257..c956fbdacd 100644 --- a/crates/task-impls/src/consensus/handlers.rs +++ b/crates/task-impls/src/consensus/handlers.rs @@ -58,7 +58,7 @@ pub async fn create_and_send_proposal( private_key: ::PrivateKey, consensus: OuterConsensus, event_stream: Sender>>, - view: TYPES::Time, + view: TYPES::ViewTime, commitment_and_metadata: CommitmentAndMetadata, parent_leaf: Leaf, state: Arc, @@ -164,7 +164,7 @@ pub async fn create_and_send_proposal( #[allow(clippy::too_many_arguments)] #[instrument(skip_all)] pub async fn publish_proposal_from_commitment_and_metadata( - view: TYPES::Time, + view: TYPES::ViewTime, sender: Sender>>, receiver: Receiver>>, quorum_membership: Arc, @@ -287,6 +287,7 @@ pub(crate) async fn handle_quorum_proposal_recv< validate_proposal_view_and_certs( proposal, task_state.cur_view, + task_state.cur_epoch, &task_state.quorum_membership, &task_state.timeout_membership, &task_state.upgrade_lock, @@ -300,6 +301,7 @@ pub(crate) async fn handle_quorum_proposal_recv< if !justify_qc .is_valid_cert( task_state.quorum_membership.as_ref(), + task_state.cur_epoch, &task_state.upgrade_lock, ) .await @@ -320,7 +322,10 @@ pub(crate) async fn handle_quorum_proposal_recv< &mut task_state.timeout_task, &task_state.output_event_stream, SEND_VIEW_CHANGE_EVENT, - task_state.quorum_membership.leader(cur_view) == task_state.public_key, + task_state + .quorum_membership + .leader(cur_view, task_state.cur_epoch) + == task_state.public_key, ) .await { @@ -443,7 +448,9 @@ pub(crate) async fn handle_quorum_proposal_recv< let new_view = proposal.data.view_number + 1; // This is for the case where we form a QC but have not yet seen the previous proposal ourselves - let should_propose = task_state.quorum_membership.leader(new_view) + let should_propose = task_state + .quorum_membership + .leader(new_view, task_state.cur_epoch) == task_state.public_key && high_qc.view_number == current_proposal.clone().unwrap().view_number; @@ -556,12 +563,15 @@ pub async fn handle_quorum_proposal_validated< } } + let current_epoch = consensus.cur_epoch(); + drop(consensus); let new_view = task_state.current_proposal.clone().unwrap().view_number + 1; // In future we can use the mempool model where we fetch the proposal if we don't have it, instead of having to wait for it here // This is for the case where we form a QC but have not yet seen the previous proposal ourselves - let should_propose = task_state.quorum_membership.leader(new_view) == task_state.public_key + let should_propose = task_state.quorum_membership.leader(new_view, current_epoch) + == task_state.public_key && task_state.consensus.read().await.high_qc().view_number == task_state.current_proposal.clone().unwrap().view_number; @@ -670,7 +680,7 @@ pub async fn update_state_and_vote_if_able< I: NodeImplementation, V: Versions, >( - cur_view: TYPES::Time, + cur_view: TYPES::ViewTime, proposal: QuorumProposal, public_key: TYPES::SignatureKey, private_key: ::PrivateKey, @@ -684,7 +694,8 @@ pub async fn update_state_and_vote_if_able< ) -> bool { use hotshot_types::simple_vote::QuorumVote; - if !quorum_membership.has_stake(&public_key) { + let current_epoch = consensus.read().await.cur_epoch(); + if !quorum_membership.has_stake(&public_key, current_epoch) { debug!("We were not chosen for quorum committee on {:?}", cur_view); return false; } @@ -712,9 +723,9 @@ pub async fn update_state_and_vote_if_able< { if upgrade_cert.upgrading_in(cur_view) && Some(proposal.block_header.payload_commitment()) - != null_block::commitment(quorum_membership.total_nodes()) + != null_block::commitment(quorum_membership.total_nodes(current_epoch)) { - info!("Refusing to vote on proposal because it does not have a null commitment, and we are between versions. Expected:\n\n{:?}\n\nActual:{:?}", null_block::commitment(quorum_membership.total_nodes()), Some(proposal.block_header.payload_commitment())); + info!("Refusing to vote on proposal because it does not have a null commitment, and we are between versions. Expected:\n\n{:?}\n\nActual:{:?}", null_block::commitment(quorum_membership.total_nodes(current_epoch)), Some(proposal.block_header.payload_commitment())); return false; } } @@ -800,7 +811,11 @@ pub async fn update_state_and_vote_if_able< // Validate the DAC. let message = if cert - .is_valid_cert(vote_info.da_membership.as_ref(), upgrade_lock) + .is_valid_cert( + vote_info.da_membership.as_ref(), + current_epoch, + upgrade_lock, + ) .await { // Validate the block payload commitment for non-genesis DAC. diff --git a/crates/task-impls/src/consensus/mod.rs b/crates/task-impls/src/consensus/mod.rs index b8fad36a08..8f52645f57 100644 --- a/crates/task-impls/src/consensus/mod.rs +++ b/crates/task-impls/src/consensus/mod.rs @@ -66,11 +66,14 @@ pub struct ConsensusTaskState, V: /// Round start delay from config, in milliseconds. pub round_start_delay: u64, /// View number this view is executing in. - pub cur_view: TYPES::Time, + pub cur_view: TYPES::ViewTime, /// Timestamp this view starts at. pub cur_view_time: i64, + /// Epoch number this node is executing in. + pub cur_epoch: TYPES::EpochTime, + /// The commitment to the current block payload and its metadata submitted to DA. pub payload_commitment_and_metadata: Option>, @@ -98,7 +101,7 @@ pub struct ConsensusTaskState, V: /// Spawned tasks related to a specific view, so we can cancel them when /// they are stale - pub spawned_tasks: BTreeMap>>, + pub spawned_tasks: BTreeMap>>, /// The most recent upgrade certificate this node formed. /// Note: this is ONLY for certificates that have been formed internally, @@ -131,7 +134,7 @@ pub struct ConsensusTaskState, V: impl, V: Versions> ConsensusTaskState { /// Cancel all tasks the consensus tasks has spawned before the given view - pub async fn cancel_tasks(&mut self, view: TYPES::Time) { + pub async fn cancel_tasks(&mut self, view: TYPES::ViewTime) { let keep = self.spawned_tasks.split_off(&view); let mut cancel = Vec::new(); while let Some((_, tasks)) = self.spawned_tasks.pop_first() { @@ -153,7 +156,10 @@ impl, V: Versions> ConsensusTaskSt // Check sender of VID disperse share is signed by DA committee member let validate_sender = sender.validate(&disperse.signature, payload_commitment.as_ref()) - && self.da_membership.committee_members(view).contains(sender); + && self + .da_membership + .committee_members(view, self.cur_epoch) + .contains(sender); // Check whether the data satisfies one of the following. // * From the right leader for this view. @@ -163,7 +169,7 @@ impl, V: Versions> ConsensusTaskSt .validate(&disperse.signature, payload_commitment.as_ref()) || self .quorum_membership - .leader(view) + .leader(view, self.cur_epoch) .validate(&disperse.signature, payload_commitment.as_ref()); if !validate_sender && !validated { return false; @@ -173,7 +179,7 @@ impl, V: Versions> ConsensusTaskSt // NOTE: `verify_share` returns a nested `Result`, so we must check both the inner // and outer results matches!( - vid_scheme(self.quorum_membership.total_nodes()).verify_share( + vid_scheme(self.quorum_membership.total_nodes(self.cur_epoch)).verify_share( &disperse.data.share, &disperse.data.common, &payload_commitment, @@ -186,7 +192,7 @@ impl, V: Versions> ConsensusTaskSt #[instrument(skip_all, target = "ConsensusTaskState", fields(id = self.id, view = *self.cur_view))] async fn publish_proposal( &mut self, - view: TYPES::Time, + view: TYPES::ViewTime, event_sender: Sender>>, event_receiver: Receiver>>, ) -> Result<()> { @@ -221,7 +227,7 @@ impl, V: Versions> ConsensusTaskSt #[instrument(skip_all, fields(id = self.id, view = *self.cur_view), target = "ConsensusTaskState")] async fn spawn_vote_task( &mut self, - view: TYPES::Time, + view: TYPES::ViewTime, event_sender: Sender>>, event_receiver: Receiver>>, ) { @@ -311,11 +317,18 @@ impl, V: Versions> ConsensusTaskSt } HotShotEvent::QuorumVoteRecv(ref vote) => { debug!("Received quorum vote: {:?}", vote.view_number()); - if self.quorum_membership.leader(vote.view_number() + 1) != self.public_key { + let current_epoch = self.consensus.read().await.cur_epoch(); + if self + .quorum_membership + .leader(vote.view_number() + 1, current_epoch) + != self.public_key + { error!( "We are not the leader for view {} are we the leader for view + 1? {}", *vote.view_number() + 1, - self.quorum_membership.leader(vote.view_number() + 2) == self.public_key + self.quorum_membership + .leader(vote.view_number() + 2, current_epoch) + == self.public_key ); return; } @@ -325,6 +338,7 @@ impl, V: Versions> ConsensusTaskSt vote, self.public_key.clone(), &self.quorum_membership, + self.cur_epoch, self.id, &event, &event_sender, @@ -333,11 +347,18 @@ impl, V: Versions> ConsensusTaskSt .await; } HotShotEvent::TimeoutVoteRecv(ref vote) => { - if self.timeout_membership.leader(vote.view_number() + 1) != self.public_key { + let current_epoch = self.consensus.read().await.cur_epoch(); + if self + .timeout_membership + .leader(vote.view_number() + 1, current_epoch) + != self.public_key + { error!( "We are not the leader for view {} are we the leader for view + 1? {}", *vote.view_number() + 1, - self.timeout_membership.leader(vote.view_number() + 2) == self.public_key + self.timeout_membership + .leader(vote.view_number() + 2, current_epoch) + == self.public_key ); return; } @@ -347,6 +368,7 @@ impl, V: Versions> ConsensusTaskSt vote, self.public_key.clone(), &self.quorum_membership, + self.cur_epoch, self.id, &event, &event_sender, @@ -491,6 +513,7 @@ impl, V: Versions> ConsensusTaskSt } } + let current_epoch = self.consensus.read().await.cur_epoch(); // update the view in state to the one in the message // Publish a view change event to the application // Returns if the view does not need updating. @@ -504,7 +527,9 @@ impl, V: Versions> ConsensusTaskSt &mut self.timeout_task, &self.output_event_stream, DONT_SEND_VIEW_CHANGE_EVENT, - self.quorum_membership.leader(old_view_number) == self.public_key, + self.quorum_membership + .leader(old_view_number, current_epoch) + == self.public_key, ) .await { @@ -518,7 +543,11 @@ impl, V: Versions> ConsensusTaskSt if self.cur_view >= view { return; } - if !self.timeout_membership.has_stake(&self.public_key) { + let current_epoch = self.consensus.read().await.cur_epoch(); + if !self + .timeout_membership + .has_stake(&self.public_key, current_epoch) + { debug!( "We were not chosen for consensus committee on {:?}", self.cur_view @@ -562,8 +591,9 @@ impl, V: Versions> ConsensusTaskSt ) .await; let consensus = self.consensus.read().await; + let current_epoch = consensus.cur_epoch(); consensus.metrics.number_of_timeouts.add(1); - if self.quorum_membership.leader(view) == self.public_key { + if self.quorum_membership.leader(view, current_epoch) == self.public_key { consensus.metrics.number_of_timeouts_as_leader.add(1); } } @@ -588,7 +618,8 @@ impl, V: Versions> ConsensusTaskSt block_view: view, auction_result: auction_result.clone(), }); - if self.quorum_membership.leader(view) == self.public_key + let current_epoch = self.consensus.read().await.cur_epoch(); + if self.quorum_membership.leader(view, current_epoch) == self.public_key && self.consensus.read().await.high_qc().view_number() + 1 == view { if let Err(e) = self @@ -605,9 +636,12 @@ impl, V: Versions> ConsensusTaskSt info!("Failed to propose off SendPayloadCommitmentAndMetadata because we had view change evidence, but it was not current."); return; } + let current_epoch = self.consensus.read().await.cur_epoch(); match cert { ViewChangeEvidence::Timeout(tc) => { - if self.quorum_membership.leader(tc.view_number() + 1) + if self + .quorum_membership + .leader(tc.view_number() + 1, current_epoch) == self.public_key { if let Err(e) = self @@ -619,7 +653,11 @@ impl, V: Versions> ConsensusTaskSt } } ViewChangeEvidence::ViewSync(vsc) => { - if self.quorum_membership.leader(vsc.view_number()) == self.public_key { + if self + .quorum_membership + .leader(vsc.view_number(), current_epoch) + == self.public_key + { if let Err(e) = self .publish_proposal(view, event_sender, event_receiver) .await @@ -632,8 +670,13 @@ impl, V: Versions> ConsensusTaskSt } } HotShotEvent::ViewSyncFinalizeCertificate2Recv(certificate) => { + let current_epoch = self.consensus.read().await.cur_epoch(); if !certificate - .is_valid_cert(self.quorum_membership.as_ref(), &self.upgrade_lock) + .is_valid_cert( + self.quorum_membership.as_ref(), + current_epoch, + &self.upgrade_lock, + ) .await { error!( @@ -645,7 +688,7 @@ impl, V: Versions> ConsensusTaskSt let view = certificate.view_number; - if self.quorum_membership.leader(view) == self.public_key { + if self.quorum_membership.leader(view, current_epoch) == self.public_key { self.proposal_cert = Some(ViewChangeEvidence::ViewSync(certificate.clone())); debug!( @@ -666,9 +709,11 @@ impl, V: Versions> ConsensusTaskSt return; }; let new_view = proposal.view_number() + 1; + let current_epoch = self.consensus.read().await.cur_epoch(); // In future we can use the mempool model where we fetch the proposal if we don't have it, instead of having to wait for it here // This is for the case where we form a QC but have not yet seen the previous proposal ourselves - let should_propose = self.quorum_membership.leader(new_view) == self.public_key + let should_propose = self.quorum_membership.leader(new_view, current_epoch) + == self.public_key && self.consensus.read().await.high_qc().view_number == proposal.view_number(); if should_propose { diff --git a/crates/task-impls/src/consensus2/handlers.rs b/crates/task-impls/src/consensus2/handlers.rs index c766334471..ae0ac3d67f 100644 --- a/crates/task-impls/src/consensus2/handlers.rs +++ b/crates/task-impls/src/consensus2/handlers.rs @@ -42,7 +42,10 @@ pub(crate) async fn handle_quorum_vote_recv< ) -> Result<()> { // Are we the leader for this view? ensure!( - task_state.quorum_membership.leader(vote.view_number() + 1) == task_state.public_key, + task_state + .quorum_membership + .leader(vote.view_number() + 1, task_state.cur_epoch) + == task_state.public_key, format!( "We are not the leader for view {:?}", vote.view_number() + 1 @@ -54,6 +57,7 @@ pub(crate) async fn handle_quorum_vote_recv< vote, task_state.public_key.clone(), &task_state.quorum_membership, + task_state.cur_epoch, task_state.id, &event, sender, @@ -77,7 +81,10 @@ pub(crate) async fn handle_timeout_vote_recv< ) -> Result<()> { // Are we the leader for this view? ensure!( - task_state.timeout_membership.leader(vote.view_number() + 1) == task_state.public_key, + task_state + .timeout_membership + .leader(vote.view_number() + 1, task_state.cur_epoch) + == task_state.public_key, format!( "We are not the leader for view {:?}", vote.view_number() + 1 @@ -89,6 +96,7 @@ pub(crate) async fn handle_timeout_vote_recv< vote, task_state.public_key.clone(), &task_state.quorum_membership, + task_state.cur_epoch, task_state.id, &event, sender, @@ -106,7 +114,7 @@ pub(crate) async fn handle_view_change< I: NodeImplementation, V: Versions, >( - new_view_number: TYPES::Time, + new_view_number: TYPES::ViewTime, sender: &Sender>>, task_state: &mut Consensus2TaskState, ) -> Result<()> { @@ -147,7 +155,7 @@ pub(crate) async fn handle_view_change< async move { async_sleep(Duration::from_millis(timeout)).await; broadcast_event( - Arc::new(HotShotEvent::Timeout(TYPES::Time::new(*view_number))), + Arc::new(HotShotEvent::Timeout(TYPES::ViewTime::new(*view_number))), &stream, ) .await; @@ -167,7 +175,11 @@ pub(crate) async fn handle_view_change< .current_view .set(usize::try_from(task_state.cur_view.u64()).unwrap()); let cur_view_time = Utc::now().timestamp(); - if task_state.quorum_membership.leader(old_view_number) == task_state.public_key { + if task_state + .quorum_membership + .leader(old_view_number, task_state.cur_epoch) + == task_state.public_key + { #[allow(clippy::cast_precision_loss)] consensus .metrics @@ -203,7 +215,7 @@ pub(crate) async fn handle_view_change< /// Handle a `Timeout` event. #[instrument(skip_all)] pub(crate) async fn handle_timeout, V: Versions>( - view_number: TYPES::Time, + view_number: TYPES::ViewTime, sender: &Sender>>, task_state: &mut Consensus2TaskState, ) -> Result<()> { @@ -215,7 +227,7 @@ pub(crate) async fn handle_timeout ensure!( task_state .timeout_membership - .has_stake(&task_state.public_key), + .has_stake(&task_state.public_key, task_state.cur_epoch), format!("We were not chosen for the consensus committee for view {view_number:?}") ); @@ -260,7 +272,11 @@ pub(crate) async fn handle_timeout .metrics .number_of_timeouts .add(1); - if task_state.quorum_membership.leader(view_number) == task_state.public_key { + if task_state + .quorum_membership + .leader(view_number, task_state.cur_epoch) + == task_state.public_key + { task_state .consensus .read() diff --git a/crates/task-impls/src/consensus2/mod.rs b/crates/task-impls/src/consensus2/mod.rs index 7b139842f9..ee69f17734 100644 --- a/crates/task-impls/src/consensus2/mod.rs +++ b/crates/task-impls/src/consensus2/mod.rs @@ -70,11 +70,14 @@ pub struct Consensus2TaskState, V: pub storage: Arc>, /// The view number that this node is currently executing in. - pub cur_view: TYPES::Time, + pub cur_view: TYPES::ViewTime, /// Timestamp this view starts at. pub cur_view_time: i64, + /// The epoch number that this node is currently executing in. + pub cur_epoch: TYPES::EpochTime, + /// Output events to application pub output_event_stream: async_broadcast::Sender>, @@ -88,7 +91,7 @@ pub struct Consensus2TaskState, V: pub consensus: OuterConsensus, /// The last decided view - pub last_decided_view: TYPES::Time, + pub last_decided_view: TYPES::ViewTime, /// The node's id pub id: u64, diff --git a/crates/task-impls/src/da.rs b/crates/task-impls/src/da.rs index 19452752d6..7f886997ac 100644 --- a/crates/task-impls/src/da.rs +++ b/crates/task-impls/src/da.rs @@ -49,7 +49,10 @@ pub struct DaTaskState, V: Version pub output_event_stream: async_broadcast::Sender>, /// View number this view is executing in. - pub cur_view: TYPES::Time, + pub cur_view: TYPES::ViewTime, + + /// Epoch number this node is executing in. + pub cur_epoch: TYPES::EpochTime, /// Reference to consensus. Leader will require a read lock on this. pub consensus: OuterConsensus, @@ -108,7 +111,7 @@ impl, V: Versions> DaTaskState, V: Versions> DaTaskState, V: Versions> DaTaskState, V: Versions> DaTaskState, V: Versions> DaTaskState, V: Versions> DaTaskState, V: Versions> DaTaskState, V: Versions> DaTaskState TaskEvent for HotShotEvent { #[derive(Debug, Clone)] pub struct ProposalMissing { /// View of missing proposal - pub view: TYPES::Time, + pub view: TYPES::ViewTime, /// Channel to send the response back to pub response_chan: Sender>>>, } @@ -93,7 +93,7 @@ pub enum HotShotEvent { /// Send a quorum vote to the next leader; emitted by a replica in the consensus task after seeing a valid quorum proposal QuorumVoteSend(QuorumVote), /// All dependencies for the quorum vote are validated. - QuorumVoteDependenciesValidated(TYPES::Time), + QuorumVoteDependenciesValidated(TYPES::ViewTime), /// A quorum proposal with the given parent leaf is validated. /// The full validation checks include: /// 1. The proposal is not for an old view @@ -124,9 +124,9 @@ pub enum HotShotEvent { /// The DA leader has collected enough votes to form a DAC; emitted by the DA leader in the DA task; sent to the entire network via the networking task DacSend(DaCertificate, TYPES::SignatureKey), /// The current view has changed; emitted by the replica in the consensus task or replica in the view sync task; received by almost all other tasks - ViewChange(TYPES::Time), + ViewChange(TYPES::ViewTime), /// Timeout for the view sync protocol; emitted by a replica in the view sync task - ViewSyncTimeout(TYPES::Time, u64, ViewSyncPhase), + ViewSyncTimeout(TYPES::ViewTime, u64, ViewSyncPhase), /// Receive a `ViewSyncPreCommitVote` from the network; received by a relay in the view sync task ViewSyncPreCommitVoteRecv(ViewSyncPreCommitVote), @@ -157,9 +157,9 @@ pub enum HotShotEvent { ViewSyncFinalizeCertificate2Send(ViewSyncFinalizeCertificate2, TYPES::SignatureKey), /// Trigger the start of the view sync protocol; emitted by view sync task; internal trigger only - ViewSyncTrigger(TYPES::Time), + ViewSyncTrigger(TYPES::ViewTime), /// A consensus view has timed out; emitted by a replica in the consensus task; received by the view sync task; internal event only - Timeout(TYPES::Time), + Timeout(TYPES::ViewTime), /// Receive transactions from the network TransactionsRecv(Vec), /// Send transactions to the network @@ -169,14 +169,14 @@ pub enum HotShotEvent { VidCommitment, BuilderCommitment, >::Metadata, - TYPES::Time, + TYPES::ViewTime, Vec1>, Option, ), /// Event when the transactions task has sequenced transactions. Contains the encoded transactions, the metadata, and the view number BlockRecv(PackedBundle), /// Event when the transactions task has a block formed - BlockReady(VidDisperse, TYPES::Time), + BlockReady(VidDisperse, TYPES::ViewTime), /// Event when consensus decided on a leaf LeafDecided(Vec>), /// Send VID shares to VID storage nodes; emitted by the DA leader @@ -205,13 +205,13 @@ pub enum HotShotEvent { /* Consensus State Update Events */ /// A undecided view has been created and added to the validated state storage. - ValidatedStateUpdated(TYPES::Time, View), + ValidatedStateUpdated(TYPES::ViewTime, View), /// A new locked view has been created (2-chain) - LockedViewUpdated(TYPES::Time), + LockedViewUpdated(TYPES::ViewTime), /// A new anchor view has been successfully reached by this node (3-chain). - LastDecidedViewUpdated(TYPES::Time), + LastDecidedViewUpdated(TYPES::ViewTime), /// A new high_qc has been reached by this node. UpdateHighQc(QuorumCertificate), @@ -260,7 +260,7 @@ pub enum HotShotEvent { impl HotShotEvent { #[allow(clippy::too_many_lines)] /// Return the view number for a hotshot event if present - pub fn view_number(&self) -> Option { + pub fn view_number(&self) -> Option { match self { HotShotEvent::QuorumVoteRecv(v) => Some(v.view_number()), HotShotEvent::TimeoutVoteRecv(v) | HotShotEvent::TimeoutVoteSend(v) => { @@ -512,7 +512,7 @@ impl Display for HotShotEvent { write!(f, "BlockReady(view_number={view_number:?})") } HotShotEvent::LeafDecided(leaves) => { - let view_numbers: Vec<::Time> = + let view_numbers: Vec<::ViewTime> = leaves.iter().map(Leaf::view_number).collect(); write!(f, "LeafDecided({view_numbers:?})") } diff --git a/crates/task-impls/src/helpers.rs b/crates/task-impls/src/helpers.rs index 1c266029c9..671f5f7337 100644 --- a/crates/task-impls/src/helpers.rs +++ b/crates/task-impls/src/helpers.rs @@ -47,7 +47,7 @@ use crate::{events::HotShotEvent, request::REQUEST_TIMEOUT}; #[instrument(skip_all)] #[allow(clippy::too_many_arguments)] pub(crate) async fn fetch_proposal( - view_number: TYPES::Time, + view_number: TYPES::ViewTime, event_sender: Sender>>, event_receiver: Receiver>>, quorum_membership: Arc, @@ -77,6 +77,7 @@ pub(crate) async fn fetch_proposal( .await; let mem = Arc::clone(&quorum_membership); + let current_epoch = consensus.read().await.cur_epoch(); // Make a background task to await the arrival of the event data. let Ok(Some(proposal)) = // We want to explicitly timeout here so we aren't waiting around for the data. @@ -108,7 +109,7 @@ pub(crate) async fn fetch_proposal( hs_event.as_ref() { // Make sure that the quorum_proposal is valid - if quorum_proposal.validate_signature(&mem, upgrade_lock).await.is_ok() { + if quorum_proposal.validate_signature(&mem, current_epoch, upgrade_lock).await.is_ok() { proposal = Some(quorum_proposal.clone()); } @@ -127,7 +128,7 @@ pub(crate) async fn fetch_proposal( let justify_qc = proposal.data.justify_qc.clone(); if !justify_qc - .is_valid_cert(quorum_membership.as_ref(), upgrade_lock) + .is_valid_cert(quorum_membership.as_ref(), current_epoch, upgrade_lock) .await { bail!("Invalid justify_qc in proposal for view {}", *view_number); @@ -164,10 +165,10 @@ pub(crate) async fn fetch_proposal( #[derive(Debug)] pub struct LeafChainTraversalOutcome { /// The new locked view obtained from a 2 chain starting from the proposal's parent. - pub new_locked_view_number: Option, + pub new_locked_view_number: Option, /// The new decided view obtained from a 3 chain starting from the proposal's parent. - pub new_decided_view_number: Option, + pub new_decided_view_number: Option, /// The qc for the decided chain. pub new_decide_qc: Option>, @@ -352,7 +353,7 @@ pub async fn decide_from_proposal( #[instrument(skip_all)] #[allow(clippy::too_many_arguments)] pub(crate) async fn parent_leaf_and_state( - next_proposal_view_number: TYPES::Time, + next_proposal_view_number: TYPES::ViewTime, event_sender: &Sender>>, event_receiver: &Receiver>>, quorum_membership: Arc, @@ -361,8 +362,9 @@ pub(crate) async fn parent_leaf_and_state( consensus: OuterConsensus, upgrade_lock: &UpgradeLock, ) -> Result<(Leaf, Arc<::ValidatedState>)> { + let current_epoch = consensus.read().await.cur_epoch(); ensure!( - quorum_membership.leader(next_proposal_view_number) == public_key, + quorum_membership.leader(next_proposal_view_number, current_epoch) == public_key, "Somehow we formed a QC but are not the leader for the next view {next_proposal_view_number:?}", ); let parent_view_number = consensus.read().await.high_qc().view_number(); @@ -507,9 +509,11 @@ pub async fn validate_proposal_safety_and_liveness< .await; } + let current_epoch = consensus.read().await.cur_epoch(); UpgradeCertificate::validate( &proposal.data.upgrade_certificate, &quorum_membership, + current_epoch, &upgrade_lock, ) .await?; @@ -593,7 +597,8 @@ pub async fn validate_proposal_safety_and_liveness< /// If any validation or view number check fails. pub async fn validate_proposal_view_and_certs( proposal: &Proposal>, - cur_view: TYPES::Time, + cur_view: TYPES::ViewTime, + cur_epoch: TYPES::EpochTime, quorum_membership: &Arc, timeout_membership: &Arc, upgrade_lock: &UpgradeLock, @@ -607,7 +612,7 @@ pub async fn validate_proposal_view_and_certs( // Validate the proposal's signature. This should also catch if the leaf_commitment does not equal our calculated parent commitment proposal - .validate_signature(quorum_membership, upgrade_lock) + .validate_signature(quorum_membership, cur_epoch, upgrade_lock) .await?; // Verify a timeout certificate OR a view sync certificate exists and is valid. @@ -627,7 +632,7 @@ pub async fn validate_proposal_view_and_certs( ); ensure!( timeout_cert - .is_valid_cert(timeout_membership.as_ref(), upgrade_lock) + .is_valid_cert(timeout_membership.as_ref(), cur_epoch, upgrade_lock) .await, "Timeout certificate for view {} was invalid", *view @@ -644,7 +649,7 @@ pub async fn validate_proposal_view_and_certs( // View sync certs must also be valid. ensure!( view_sync_cert - .is_valid_cert(quorum_membership.as_ref(), upgrade_lock) + .is_valid_cert(quorum_membership.as_ref(), cur_epoch, upgrade_lock) .await, "Invalid view sync finalize cert provided" ); @@ -657,6 +662,7 @@ pub async fn validate_proposal_view_and_certs( UpgradeCertificate::validate( &proposal.data.upgrade_certificate, quorum_membership, + cur_epoch, upgrade_lock, ) .await?; @@ -678,11 +684,11 @@ pub const DONT_SEND_VIEW_CHANGE_EVENT: bool = false; /// TODO: Remove args when we merge dependency tasks. #[allow(clippy::too_many_arguments)] pub(crate) async fn update_view( - new_view: TYPES::Time, + new_view: TYPES::ViewTime, event_stream: &Sender>>, timeout: u64, consensus: OuterConsensus, - cur_view: &mut TYPES::Time, + cur_view: &mut TYPES::ViewTime, cur_view_time: &mut i64, timeout_task: &mut JoinHandle<()>, output_event_stream: &Sender>, @@ -732,7 +738,7 @@ pub(crate) async fn update_view( async move { async_sleep(timeout).await; broadcast_event( - Arc::new(HotShotEvent::Timeout(TYPES::Time::new(*view_number))), + Arc::new(HotShotEvent::Timeout(TYPES::ViewTime::new(*view_number))), &stream, ) .await; diff --git a/crates/task-impls/src/network.rs b/crates/task-impls/src/network.rs index dcd7e85945..5d513dcec4 100644 --- a/crates/task-impls/src/network.rs +++ b/crates/task-impls/src/network.rs @@ -163,7 +163,7 @@ impl NetworkMessageTaskState { // Send the external message to the external event stream so it can be processed broadcast_event( Event { - view_number: TYPES::Time::new(1), + view_number: TYPES::ViewTime::new(1), event: EventType::ExternalMessageReceived(data), }, &self.external_event_stream, @@ -184,7 +184,9 @@ pub struct NetworkEventTaskState< /// comm network pub network: Arc, /// view number - pub view: TYPES::Time, + pub view: TYPES::ViewTime, + /// epoch number + pub epoch: TYPES::EpochTime, /// quorum for the network pub quorum_membership: TYPES::Membership, /// da for the network @@ -299,7 +301,7 @@ impl< maybe_action: Option, storage: Arc>, state: Arc>>, - view: ::Time, + view: ::ViewTime, ) -> Result<(), ()> { if let Some(action) = maybe_action { if !state.write().await.update_action(action, view) { @@ -352,7 +354,10 @@ impl< MessageKind::::from_consensus_message(SequencingMessage::General( GeneralConsensusMessage::Vote(vote.clone()), )), - TransmitType::Direct(self.quorum_membership.leader(vote.view_number() + 1)), + TransmitType::Direct( + self.quorum_membership + .leader(vote.view_number() + 1, self.epoch), + ), )) } HotShotEvent::QuorumProposalRequestSend(req, signature) => Some(( @@ -361,7 +366,7 @@ impl< GeneralConsensusMessage::ProposalRequested(req.clone(), signature), )), TransmitType::DaCommitteeAndLeaderBroadcast( - self.quorum_membership.leader(req.view_number), + self.quorum_membership.leader(req.view_number, self.epoch), ), )), HotShotEvent::QuorumProposalResponseSend(sender_key, proposal) => Some(( @@ -392,7 +397,10 @@ impl< MessageKind::::from_consensus_message(SequencingMessage::Da( DaConsensusMessage::DaVote(vote.clone()), )), - TransmitType::Direct(self.quorum_membership.leader(vote.view_number())), + TransmitType::Direct( + self.quorum_membership + .leader(vote.view_number(), self.epoch), + ), )) } HotShotEvent::DacSend(certificate, sender) => { @@ -412,7 +420,7 @@ impl< )), TransmitType::Direct( self.quorum_membership - .leader(vote.view_number() + vote.date().relay), + .leader(vote.view_number() + vote.date().relay, self.epoch), ), )), HotShotEvent::ViewSyncCommitVoteSend(vote) => Some(( @@ -422,7 +430,7 @@ impl< )), TransmitType::Direct( self.quorum_membership - .leader(vote.view_number() + vote.date().relay), + .leader(vote.view_number() + vote.date().relay, self.epoch), ), )), HotShotEvent::ViewSyncFinalizeVoteSend(vote) => Some(( @@ -432,7 +440,7 @@ impl< )), TransmitType::Direct( self.quorum_membership - .leader(vote.view_number() + vote.date().relay), + .leader(vote.view_number() + vote.date().relay, self.epoch), ), )), HotShotEvent::ViewSyncPreCommitCertificate2Send(certificate, sender) => Some(( @@ -463,7 +471,10 @@ impl< MessageKind::::from_consensus_message(SequencingMessage::General( GeneralConsensusMessage::TimeoutVote(vote.clone()), )), - TransmitType::Direct(self.quorum_membership.leader(vote.view_number() + 1)), + TransmitType::Direct( + self.quorum_membership + .leader(vote.view_number() + 1, self.epoch), + ), )) } HotShotEvent::UpgradeProposalSend(proposal, sender) => Some(( @@ -480,13 +491,20 @@ impl< MessageKind::::from_consensus_message(SequencingMessage::General( GeneralConsensusMessage::UpgradeVote(vote.clone()), )), - TransmitType::Direct(self.quorum_membership.leader(vote.view_number())), + TransmitType::Direct( + self.quorum_membership + .leader(vote.view_number(), self.epoch), + ), )) } HotShotEvent::ViewChange(view) => { self.view = view; self.network - .update_view::(self.view.u64(), &self.quorum_membership) + .update_view::( + self.view.u64(), + self.epoch.u64(), + &self.quorum_membership, + ) .await; None } @@ -530,7 +548,7 @@ impl< }; let view = message.kind.view_number(); let committee_topic = self.quorum_membership.committee_topic(); - let da_committee = self.da_membership.committee_members(view); + let da_committee = self.da_membership.committee_members(view, self.epoch); let net = Arc::clone(&self.network); let storage = Arc::clone(&self.storage); let state = Arc::clone(&self.consensus); diff --git a/crates/task-impls/src/quorum_proposal/handlers.rs b/crates/task-impls/src/quorum_proposal/handlers.rs index e53a618320..862481434b 100644 --- a/crates/task-impls/src/quorum_proposal/handlers.rs +++ b/crates/task-impls/src/quorum_proposal/handlers.rs @@ -60,10 +60,10 @@ pub(crate) enum ProposalDependency { /// Handler for the proposal dependency pub struct ProposalDependencyHandle { /// Latest view number that has been proposed for (proxy for cur_view). - pub latest_proposed_view: TYPES::Time, + pub latest_proposed_view: TYPES::ViewTime, /// The view number to propose for. - pub view_number: TYPES::Time, + pub view_number: TYPES::ViewTime, /// The event sender. pub sender: Sender>>, diff --git a/crates/task-impls/src/quorum_proposal/mod.rs b/crates/task-impls/src/quorum_proposal/mod.rs index 0d845f2070..45f761d322 100644 --- a/crates/task-impls/src/quorum_proposal/mod.rs +++ b/crates/task-impls/src/quorum_proposal/mod.rs @@ -46,10 +46,10 @@ mod handlers; /// The state for the quorum proposal task. pub struct QuorumProposalTaskState, V: Versions> { /// Latest view number that has been proposed for. - pub latest_proposed_view: TYPES::Time, + pub latest_proposed_view: TYPES::ViewTime, /// Table for the in-progress proposal dependency tasks. - pub proposal_dependencies: HashMap>, + pub proposal_dependencies: HashMap>, /// The underlying network pub network: Arc, @@ -107,7 +107,7 @@ impl, V: Versions> fn create_event_dependency( &self, dependency_type: ProposalDependency, - view_number: TYPES::Time, + view_number: TYPES::ViewTime, event_receiver: Receiver>>, ) -> EventDependency>> { EventDependency::new( @@ -181,7 +181,7 @@ impl, V: Versions> /// Creates the requisite dependencies for the Quorum Proposal task. It also handles any event forwarding. fn create_and_complete_dependencies( &self, - view_number: TYPES::Time, + view_number: TYPES::ViewTime, event_receiver: &Receiver>>, event: Arc>, ) -> AndDependency>>>> { @@ -283,13 +283,14 @@ impl, V: Versions> #[instrument(skip_all, fields(id = self.id, latest_proposed_view = *self.latest_proposed_view), name = "Create dependency task", level = "error")] fn create_dependency_task_if_new( &mut self, - view_number: TYPES::Time, + view_number: TYPES::ViewTime, + epoch_number: TYPES::EpochTime, event_receiver: Receiver>>, event_sender: Sender>>, event: Arc>, ) { // Don't even bother making the task if we are not entitled to propose anyway. - if self.quorum_membership.leader(view_number) != self.public_key { + if self.quorum_membership.leader(view_number, epoch_number) != self.public_key { tracing::trace!("We are not the leader of the next view"); return; } @@ -333,7 +334,7 @@ impl, V: Versions> /// Update the latest proposed view number. #[instrument(skip_all, fields(id = self.id, latest_proposed_view = *self.latest_proposed_view), name = "Update latest proposed view", level = "error")] - async fn update_latest_proposed_view(&mut self, new_view: TYPES::Time) -> bool { + async fn update_latest_proposed_view(&mut self, new_view: TYPES::ViewTime) -> bool { if *self.latest_proposed_view < *new_view { debug!( "Updating latest proposed view from {} to {}", @@ -342,7 +343,9 @@ impl, V: Versions> // Cancel the old dependency tasks. for view in (*self.latest_proposed_view + 1)..=(*new_view) { - if let Some(dependency) = self.proposal_dependencies.remove(&TYPES::Time::new(view)) + if let Some(dependency) = self + .proposal_dependencies + .remove(&TYPES::ViewTime::new(view)) { cancel_task(dependency).await; } @@ -380,9 +383,11 @@ impl, V: Versions> HotShotEvent::QcFormed(cert) => match cert.clone() { either::Right(timeout_cert) => { let view_number = timeout_cert.view_number + 1; + let epoch_number = self.consensus.read().await.cur_epoch(); self.create_dependency_task_if_new( view_number, + epoch_number, event_receiver, event_sender, Arc::clone(&event), @@ -411,9 +416,11 @@ impl, V: Versions> _auction_result, ) => { let view_number = *view_number; + let epoch_number = self.consensus.read().await.cur_epoch(); self.create_dependency_task_if_new( view_number, + epoch_number, event_receiver, event_sender, Arc::clone(&event), @@ -421,7 +428,11 @@ impl, V: Versions> } HotShotEvent::ViewSyncFinalizeCertificate2Recv(certificate) => { if !certificate - .is_valid_cert(self.quorum_membership.as_ref(), &self.upgrade_lock) + .is_valid_cert( + self.quorum_membership.as_ref(), + self.consensus.read().await.cur_epoch(), + &self.upgrade_lock, + ) .await { warn!( @@ -432,9 +443,11 @@ impl, V: Versions> } let view_number = certificate.view_number; + let epoch_number = self.consensus.read().await.cur_epoch(); self.create_dependency_task_if_new( view_number, + epoch_number, event_receiver, event_sender, event, @@ -447,9 +460,11 @@ impl, V: Versions> if !self.update_latest_proposed_view(view_number).await { tracing::trace!("Failed to update latest proposed view"); } + let epoch_number = self.consensus.read().await.cur_epoch(); self.create_dependency_task_if_new( view_number + 1, + epoch_number, event_receiver, event_sender, Arc::clone(&event), @@ -464,9 +479,11 @@ impl, V: Versions> } HotShotEvent::VidDisperseSend(vid_share, _) => { let view_number = vid_share.data.view_number(); + let epoch_number = self.consensus.read().await.cur_epoch(); self.create_dependency_task_if_new( view_number, + epoch_number, event_receiver, event_sender, Arc::clone(&event), @@ -490,8 +507,10 @@ impl, V: Versions> } HotShotEvent::HighQcUpdated(qc) => { let view_number = qc.view_number() + 1; + let epoch_number = self.consensus.read().await.cur_epoch(); self.create_dependency_task_if_new( view_number, + epoch_number, event_receiver, event_sender, Arc::clone(&event), diff --git a/crates/task-impls/src/quorum_proposal_recv/handlers.rs b/crates/task-impls/src/quorum_proposal_recv/handlers.rs index 7145c3bde8..50047bf583 100644 --- a/crates/task-impls/src/quorum_proposal_recv/handlers.rs +++ b/crates/task-impls/src/quorum_proposal_recv/handlers.rs @@ -104,7 +104,10 @@ async fn validate_proposal_liveness, /// View number this view is executing in. - pub cur_view: TYPES::Time, + pub cur_view: TYPES::ViewTime, /// Timestamp this view starts at. pub cur_view_time: i64, + /// Epoch number this node is executing in. + pub cur_epoch: TYPES::EpochTime, + /// The underlying network pub network: Arc, @@ -89,7 +92,7 @@ pub struct QuorumProposalRecvTaskState>>, + pub spawned_tasks: BTreeMap>>, /// Immutable instance state pub instance_state: Arc, @@ -105,7 +108,7 @@ impl, V: Versions> QuorumProposalRecvTaskState { /// Cancel all tasks the consensus tasks has spawned before the given view - pub async fn cancel_tasks(&mut self, view: TYPES::Time) { + pub async fn cancel_tasks(&mut self, view: TYPES::ViewTime) { let keep = self.spawned_tasks.split_off(&view); let mut cancel = Vec::new(); while let Some((_, tasks)) = self.spawned_tasks.pop_first() { diff --git a/crates/task-impls/src/quorum_vote/mod.rs b/crates/task-impls/src/quorum_vote/mod.rs index e7dd9c1699..a9c00536f5 100644 --- a/crates/task-impls/src/quorum_vote/mod.rs +++ b/crates/task-impls/src/quorum_vote/mod.rs @@ -75,7 +75,9 @@ pub struct VoteDependencyHandle, V /// Reference to the storage. pub storage: Arc>, /// View number to vote on. - pub view_number: TYPES::Time, + pub view_number: TYPES::ViewTime, + /// Epoch number to vote on. + pub epoch_number: TYPES::EpochTime, /// Event sender. pub sender: Sender>>, /// Event receiver. @@ -200,7 +202,8 @@ impl + 'static, V: Versions> vid_share: Proposal>, ) -> Result<()> { ensure!( - self.quorum_membership.has_stake(&self.public_key), + self.quorum_membership + .has_stake(&self.public_key, self.epoch_number), format!( "We were not chosen for quorum committee on {:?}", self.view_number @@ -373,10 +376,10 @@ pub struct QuorumVoteTaskState, V: pub instance_state: Arc, /// Latest view number that has been voted for. - pub latest_voted_view: TYPES::Time, + pub latest_voted_view: TYPES::ViewTime, /// Table for the in-progress dependency tasks. - pub vote_dependencies: HashMap>, + pub vote_dependencies: HashMap>, /// The underlying network pub network: Arc, @@ -406,7 +409,7 @@ impl, V: Versions> QuorumVoteTaskS fn create_event_dependency( &self, dependency_type: VoteDependency, - view_number: TYPES::Time, + view_number: TYPES::ViewTime, event_receiver: Receiver>>, ) -> EventDependency>> { EventDependency::new( @@ -450,7 +453,8 @@ impl, V: Versions> QuorumVoteTaskS #[instrument(skip_all, fields(id = self.id, latest_voted_view = *self.latest_voted_view), name = "Quorum vote crete dependency task if new", level = "error")] fn create_dependency_task_if_new( &mut self, - view_number: TYPES::Time, + view_number: TYPES::ViewTime, + epoch_number: TYPES::EpochTime, event_receiver: Receiver>>, event_sender: &Sender>>, event: Option>>, @@ -493,6 +497,7 @@ impl, V: Versions> QuorumVoteTaskS quorum_membership: Arc::clone(&self.quorum_membership), storage: Arc::clone(&self.storage), view_number, + epoch_number, sender: event_sender.clone(), receiver: event_receiver.clone(), upgrade_lock: self.upgrade_lock.clone(), @@ -505,7 +510,7 @@ impl, V: Versions> QuorumVoteTaskS /// Update the latest voted view number. #[instrument(skip_all, fields(id = self.id, latest_voted_view = *self.latest_voted_view), name = "Quorum vote update latest voted view", level = "error")] - async fn update_latest_voted_view(&mut self, new_view: TYPES::Time) -> bool { + async fn update_latest_voted_view(&mut self, new_view: TYPES::ViewTime) -> bool { if *self.latest_voted_view < *new_view { debug!( "Updating next vote view from {} to {} in the quorum vote task", @@ -514,7 +519,8 @@ impl, V: Versions> QuorumVoteTaskS // Cancel the old dependency tasks. for view in *self.latest_voted_view..(*new_view) { - if let Some(dependency) = self.vote_dependencies.remove(&TYPES::Time::new(view)) { + if let Some(dependency) = self.vote_dependencies.remove(&TYPES::ViewTime::new(view)) + { cancel_task(dependency).await; debug!("Vote dependency removed for view {:?}", view); } @@ -535,6 +541,7 @@ impl, V: Versions> QuorumVoteTaskS event_receiver: Receiver>>, event_sender: Sender>>, ) { + let current_epoch = self.consensus.read().await.cur_epoch(); match event.as_ref() { HotShotEvent::QuorumProposalValidated(proposal, _leaf) => { trace!("Received Proposal for view {}", *proposal.view_number()); @@ -548,6 +555,7 @@ impl, V: Versions> QuorumVoteTaskS self.create_dependency_task_if_new( proposal.view_number, + current_epoch, event_receiver, &event_sender, Some(Arc::clone(&event)), @@ -560,9 +568,14 @@ impl, V: Versions> QuorumVoteTaskS return; } + let current_epoch = self.consensus.read().await.cur_epoch(); // Validate the DAC. if !cert - .is_valid_cert(self.da_membership.as_ref(), &self.upgrade_lock) + .is_valid_cert( + self.da_membership.as_ref(), + current_epoch, + &self.upgrade_lock, + ) .await { return; @@ -579,7 +592,13 @@ impl, V: Versions> QuorumVoteTaskS &event_sender.clone(), ) .await; - self.create_dependency_task_if_new(view, event_receiver, &event_sender, None); + self.create_dependency_task_if_new( + view, + current_epoch, + event_receiver, + &event_sender, + None, + ); } HotShotEvent::VidShareRecv(sender, disperse) => { let view = disperse.data.view_number(); @@ -590,10 +609,14 @@ impl, V: Versions> QuorumVoteTaskS // Validate the VID share. let payload_commitment = disperse.data.payload_commitment; + let current_epoch = self.consensus.read().await.cur_epoch(); // Check sender of VID disperse share is signed by DA committee member let validate_sender = sender .validate(&disperse.signature, payload_commitment.as_ref()) - && self.da_membership.committee_members(view).contains(sender); + && self + .da_membership + .committee_members(view, current_epoch) + .contains(sender); // Check whether the data satisfies one of the following. // * From the right leader for this view. @@ -603,7 +626,7 @@ impl, V: Versions> QuorumVoteTaskS .validate(&disperse.signature, payload_commitment.as_ref()) || self .quorum_membership - .leader(view) + .leader(view, current_epoch) .validate(&disperse.signature, payload_commitment.as_ref()); if !validate_sender && !validated { warn!("Failed to validated the VID dispersal/share sig."); @@ -613,7 +636,7 @@ impl, V: Versions> QuorumVoteTaskS // NOTE: `verify_share` returns a nested `Result`, so we must check both the inner // and outer results #[allow(clippy::no_effect)] - match vid_scheme(self.quorum_membership.total_nodes()).verify_share( + match vid_scheme(self.quorum_membership.total_nodes(current_epoch)).verify_share( &disperse.data.share, &disperse.data.common, &payload_commitment, @@ -641,7 +664,13 @@ impl, V: Versions> QuorumVoteTaskS &event_sender.clone(), ) .await; - self.create_dependency_task_if_new(view, event_receiver, &event_sender, None); + self.create_dependency_task_if_new( + view, + current_epoch, + event_receiver, + &event_sender, + None, + ); } HotShotEvent::QuorumVoteDependenciesValidated(view_number) => { debug!("All vote dependencies verified for view {:?}", view_number); diff --git a/crates/task-impls/src/request.rs b/crates/task-impls/src/request.rs index 4ea5966fec..fe0f6c652a 100644 --- a/crates/task-impls/src/request.rs +++ b/crates/task-impls/src/request.rs @@ -56,7 +56,7 @@ pub struct NetworkRequestState> { /// before sending a request pub state: OuterConsensus, /// Last seen view, we won't request for proposals before older than this view - pub view: TYPES::Time, + pub view: TYPES::ViewTime, /// Delay before requesting peers pub delay: Duration, /// DA Membership @@ -70,7 +70,7 @@ pub struct NetworkRequestState> { /// A flag indicating that `HotShotEvent::Shutdown` has been received pub shutdown_flag: Arc, /// A flag indicating that `HotShotEvent::Shutdown` has been received - pub spawned_tasks: BTreeMap>>, + pub spawned_tasks: BTreeMap>>, } impl> Drop for NetworkRequestState { @@ -107,7 +107,8 @@ impl> TaskState for NetworkRequest .vid_shares() .contains_key(&prop_view) { - self.spawn_requests(prop_view, sender, receiver); + let current_epoch = self.state.read().await.cur_epoch(); + self.spawn_requests(prop_view, current_epoch, sender, receiver); } Ok(()) } @@ -144,7 +145,8 @@ impl> NetworkRequestState>>, receiver: &Receiver>>, ) { @@ -158,6 +160,7 @@ impl> NetworkRequestState> NetworkRequestState, sender: Sender>>, receiver: Receiver>>, - view: TYPES::Time, + view: TYPES::ViewTime, + epoch: TYPES::EpochTime, ) { let state = OuterConsensus::new(Arc::clone(&self.state.inner_consensus)); let network = Arc::clone(&self.network); let shutdown_flag = Arc::clone(&self.shutdown_flag); let delay = self.delay; - let da_committee_for_view = self.da_membership.committee_members(view); + let da_committee_for_view = self.da_membership.committee_members(view, epoch); let public_key = self.public_key.clone(); // Get committee members for view let mut recipients: Vec = self .da_membership - .committee_members(view) + .committee_members(view, epoch) .into_iter() .collect(); // Randomize the recipients so all replicas don't overload the same 1 recipients @@ -256,7 +260,7 @@ impl> NetworkRequestState::SignatureKey>, public_key: &::SignatureKey, - view: TYPES::Time, + view: TYPES::ViewTime, ) -> bool { // First send request to a random DA member for the view broadcast_event( @@ -299,7 +303,7 @@ impl> NetworkRequestState>>, da_members_for_view: BTreeSet<::SignatureKey>, - view: TYPES::Time, + view: TYPES::ViewTime, ) -> Option>> { EventDependency::new( receiver.clone(), @@ -326,7 +330,7 @@ impl> NetworkRequestState, sender: &Sender>>, public_key: &::SignatureKey, - view: &TYPES::Time, + view: &TYPES::ViewTime, shutdown_flag: &Arc, ) -> bool { let state = state.read().await; diff --git a/crates/task-impls/src/response.rs b/crates/task-impls/src/response.rs index e8e8483b08..bf1dd30587 100644 --- a/crates/task-impls/src/response.rs +++ b/crates/task-impls/src/response.rs @@ -76,7 +76,7 @@ impl NetworkResponseState { match event.as_ref() { HotShotEvent::VidRequestRecv(request, sender) => { // Verify request is valid - if !self.valid_sender(sender) + if !self.valid_sender(sender, self.consensus.read().await.cur_epoch()) || !valid_signature::(request, sender) { continue; @@ -140,7 +140,7 @@ impl NetworkResponseState { #[instrument(skip_all, target = "NetworkResponseState", fields(id = self.id))] async fn get_or_calc_vid_share( &self, - view: TYPES::Time, + view: TYPES::ViewTime, key: &TYPES::SignatureKey, ) -> Option>> { let contained = self @@ -156,6 +156,7 @@ impl NetworkResponseState { view, Arc::clone(&self.quorum), &self.private_key, + self.consensus.read().await.cur_epoch(), ) .await .is_none() @@ -167,6 +168,7 @@ impl NetworkResponseState { view, Arc::clone(&self.quorum), &self.private_key, + self.consensus.read().await.cur_epoch(), ) .await?; } @@ -188,9 +190,9 @@ impl NetworkResponseState { .cloned() } - /// Makes sure the sender is allowed to send a request. - fn valid_sender(&self, sender: &TYPES::SignatureKey) -> bool { - self.quorum.has_stake(sender) + /// Makes sure the sender is allowed to send a request in the given epoch. + fn valid_sender(&self, sender: &TYPES::SignatureKey, epoch: TYPES::EpochTime) -> bool { + self.quorum.has_stake(sender, epoch) } } diff --git a/crates/task-impls/src/transactions.rs b/crates/task-impls/src/transactions.rs index fc10b549ee..ed64100c70 100644 --- a/crates/task-impls/src/transactions.rs +++ b/crates/task-impls/src/transactions.rs @@ -82,7 +82,10 @@ pub struct TransactionTaskState, V pub output_event_stream: async_broadcast::Sender>, /// View number this view is executing in. - pub cur_view: TYPES::Time, + pub cur_view: TYPES::ViewTime, + + /// Epoch number this node is executing in. + pub cur_epoch: TYPES::EpochTime, /// Reference to consensus. Leader will require a read lock on this. pub consensus: OuterConsensus, @@ -117,7 +120,7 @@ impl, V: Versions> TransactionTask pub async fn handle_view_change( &mut self, event_stream: &Sender>>, - block_view: TYPES::Time, + block_view: TYPES::ViewTime, ) -> Option { let version = match self.upgrade_lock.version(block_view).await { Ok(v) => v, @@ -141,7 +144,7 @@ impl, V: Versions> TransactionTask pub async fn handle_view_change_legacy( &mut self, event_stream: &Sender>>, - block_view: TYPES::Time, + block_view: TYPES::ViewTime, ) -> Option { let version = match self.upgrade_lock.version(block_view).await { Ok(v) => v, @@ -201,10 +204,11 @@ impl, V: Versions> TransactionTask .number_of_empty_blocks_proposed .add(1); - let membership_total_nodes = self.membership.total_nodes(); - let Some(null_fee) = - null_block::builder_fee::(self.membership.total_nodes(), version) - else { + let membership_total_nodes = self.membership.total_nodes(self.cur_epoch); + let Some(null_fee) = null_block::builder_fee::( + self.membership.total_nodes(self.cur_epoch), + version, + ) else { error!("Failed to get null fee"); return None; }; @@ -239,7 +243,7 @@ impl, V: Versions> TransactionTask /// Returns an error if the solver cannot be contacted, or if none of the builders respond. async fn produce_block_marketplace( &mut self, - block_view: TYPES::Time, + block_view: TYPES::ViewTime, task_start_time: Instant, ) -> Result> { ensure!( @@ -336,13 +340,14 @@ impl, V: Versions> TransactionTask /// Produce a null block pub fn null_block( &self, - block_view: TYPES::Time, + block_view: TYPES::ViewTime, version: Version, ) -> Option> { - let membership_total_nodes = self.membership.total_nodes(); - let Some(null_fee) = - null_block::builder_fee::(self.membership.total_nodes(), version) - else { + let membership_total_nodes = self.membership.total_nodes(self.cur_epoch); + let Some(null_fee) = null_block::builder_fee::( + self.membership.total_nodes(self.cur_epoch), + version, + ) else { error!("Failed to calculate null block fee."); return None; }; @@ -367,7 +372,7 @@ impl, V: Versions> TransactionTask pub async fn handle_view_change_marketplace( &mut self, event_stream: &Sender>>, - block_view: TYPES::Time, + block_view: TYPES::ViewTime, ) -> Option { let task_start_time = Instant::now(); @@ -446,12 +451,13 @@ impl, V: Versions> TransactionTask let mut make_block = false; if *view - *self.cur_view > 1 { error!("View changed by more than 1 going to view {:?}", view); - make_block = self.membership.leader(view) == self.public_key; + make_block = self.membership.leader(view, self.cur_epoch) == self.public_key; } self.cur_view = view; let next_view = self.cur_view + 1; - let next_leader = self.membership.leader(next_view) == self.public_key; + let next_leader = + self.membership.leader(next_view, self.cur_epoch) == self.public_key; if !make_block && !next_leader { debug!("Not next leader for view {:?}", self.cur_view); return None; @@ -478,9 +484,9 @@ impl, V: Versions> TransactionTask #[instrument(skip_all, target = "TransactionTaskState", fields(id = self.id, cur_view = *self.cur_view, block_view = *block_view))] async fn last_vid_commitment_retry( &self, - block_view: TYPES::Time, + block_view: TYPES::ViewTime, task_start_time: Instant, - ) -> Result<(TYPES::Time, VidCommitment)> { + ) -> Result<(TYPES::ViewTime, VidCommitment)> { loop { match self.last_vid_commitment(block_view).await { Ok((view, comm)) => break Ok((view, comm)), @@ -499,10 +505,10 @@ impl, V: Versions> TransactionTask #[instrument(skip_all, target = "TransactionTaskState", fields(id = self.id, cur_view = *self.cur_view, block_view = *block_view))] async fn last_vid_commitment( &self, - block_view: TYPES::Time, - ) -> Result<(TYPES::Time, VidCommitment)> { + block_view: TYPES::ViewTime, + ) -> Result<(TYPES::ViewTime, VidCommitment)> { let consensus = self.consensus.read().await; - let mut target_view = TYPES::Time::new(block_view.saturating_sub(1)); + let mut target_view = TYPES::ViewTime::new(block_view.saturating_sub(1)); loop { let view_data = consensus @@ -524,8 +530,9 @@ impl, V: Versions> TransactionTask } ViewInner::Failed => { // For failed views, backtrack - target_view = - TYPES::Time::new(target_view.checked_sub(1).context("Reached genesis")?); + target_view = TYPES::ViewTime::new( + target_view.checked_sub(1).context("Reached genesis")?, + ); continue; } } @@ -533,7 +540,7 @@ impl, V: Versions> TransactionTask } #[instrument(skip_all, fields(id = self.id, cur_view = *self.cur_view, block_view = *block_view), name = "wait_for_block", level = "error")] - async fn wait_for_block(&self, block_view: TYPES::Time) -> Option> { + async fn wait_for_block(&self, block_view: TYPES::ViewTime) -> Option> { let task_start_time = Instant::now(); // Find commitment to the block we want to build upon @@ -597,7 +604,7 @@ impl, V: Versions> TransactionTask async fn get_available_blocks( &self, parent_comm: VidCommitment, - view_number: TYPES::Time, + view_number: TYPES::ViewTime, parent_comm_sig: &<::SignatureKey as SignatureKey>::PureAssembledSignatureType, ) -> Vec<(AvailableBlockInfo, usize)> { let tasks = self @@ -666,7 +673,7 @@ impl, V: Versions> TransactionTask async fn block_from_builder( &self, parent_comm: VidCommitment, - view_number: TYPES::Time, + view_number: TYPES::ViewTime, parent_comm_sig: &<::SignatureKey as SignatureKey>::PureAssembledSignatureType, ) -> anyhow::Result> { let mut available_blocks = self diff --git a/crates/task-impls/src/upgrade.rs b/crates/task-impls/src/upgrade.rs index d050ef4e54..1c1ffd8db6 100644 --- a/crates/task-impls/src/upgrade.rs +++ b/crates/task-impls/src/upgrade.rs @@ -43,7 +43,10 @@ pub struct UpgradeTaskState, V: Ve pub output_event_stream: async_broadcast::Sender>, /// View number this view is executing in. - pub cur_view: TYPES::Time, + pub cur_view: TYPES::ViewTime, + + /// Epoch number this node is executing in. + pub cur_epoch: TYPES::EpochTime, /// Membership for Quorum Certs/votes pub quorum_membership: Arc, @@ -166,7 +169,7 @@ impl, V: Versions> UpgradeTaskStat // the `UpgradeProposalRecv` event. Otherwise, the view number subtraction below will // cause an overflow error. // TODO Come back to this - we probably don't need this, but we should also never receive a UpgradeCertificate where this fails, investigate block ready so it doesn't make one for the genesis block - if self.cur_view != TYPES::Time::genesis() && view < self.cur_view - 1 { + if self.cur_view != TYPES::ViewTime::genesis() && view < self.cur_view - 1 { warn!("Discarding old upgrade proposal; the proposal is for view {:?}, but the current view is {:?}.", view, self.cur_view @@ -175,7 +178,7 @@ impl, V: Versions> UpgradeTaskStat } // We then validate that the proposal was issued by the leader for the view. - let view_leader_key = self.quorum_membership.leader(view); + let view_leader_key = self.quorum_membership.leader(view, self.cur_epoch); if &view_leader_key != sender { error!("Upgrade proposal doesn't have expected leader key for view {} \n Upgrade proposal is: {:?}", *view, proposal.data.clone()); return None; @@ -219,11 +222,12 @@ impl, V: Versions> UpgradeTaskStat // Check if we are the leader. { let view = vote.view_number(); - if self.quorum_membership.leader(view) != self.public_key { + if self.quorum_membership.leader(view, self.cur_epoch) != self.public_key { error!( "We are not the leader for view {} are we leader for next view? {}", *view, - self.quorum_membership.leader(view + 1) == self.public_key + self.quorum_membership.leader(view + 1, self.cur_epoch) + == self.public_key ); return None; } @@ -234,6 +238,7 @@ impl, V: Versions> UpgradeTaskStat vote, self.public_key.clone(), &self.quorum_membership, + self.cur_epoch, self.id, &event, &tx, @@ -260,23 +265,23 @@ impl, V: Versions> UpgradeTaskStat && time >= self.start_proposing_time && time < self.stop_proposing_time && !self.upgraded().await - && self - .quorum_membership - .leader(TYPES::Time::new(view + UPGRADE_PROPOSE_OFFSET)) - == self.public_key + && self.quorum_membership.leader( + TYPES::ViewTime::new(view + UPGRADE_PROPOSE_OFFSET), + self.cur_epoch, + ) == self.public_key { let upgrade_proposal_data = UpgradeProposalData { old_version: V::Base::VERSION, new_version: V::Upgrade::VERSION, new_version_hash: V::UPGRADE_HASH.to_vec(), - old_version_last_view: TYPES::Time::new(view + UPGRADE_BEGIN_OFFSET), - new_version_first_view: TYPES::Time::new(view + UPGRADE_FINISH_OFFSET), - decide_by: TYPES::Time::new(view + UPGRADE_DECIDE_BY_OFFSET), + old_version_last_view: TYPES::ViewTime::new(view + UPGRADE_BEGIN_OFFSET), + new_version_first_view: TYPES::ViewTime::new(view + UPGRADE_FINISH_OFFSET), + decide_by: TYPES::ViewTime::new(view + UPGRADE_DECIDE_BY_OFFSET), }; let upgrade_proposal = UpgradeProposal { upgrade_proposal: upgrade_proposal_data.clone(), - view_number: TYPES::Time::new(view + UPGRADE_PROPOSE_OFFSET), + view_number: TYPES::ViewTime::new(view + UPGRADE_PROPOSE_OFFSET), }; let signature = TYPES::SignatureKey::sign( diff --git a/crates/task-impls/src/vid.rs b/crates/task-impls/src/vid.rs index 3243f356ae..09cd2386dc 100644 --- a/crates/task-impls/src/vid.rs +++ b/crates/task-impls/src/vid.rs @@ -30,7 +30,9 @@ use crate::{ /// Tracks state of a VID task pub struct VidTaskState> { /// View number this view is executing in. - pub cur_view: TYPES::Time, + pub cur_view: TYPES::ViewTime, + /// Epoch number this node is executing in. + pub cur_epoch: TYPES::EpochTime, /// Reference to consensus. Leader will require a read lock on this. pub consensus: OuterConsensus, /// The underlying network @@ -42,7 +44,7 @@ pub struct VidTaskState> { /// Our Private Key pub private_key: ::PrivateKey, /// The view and ID of the current vote collection task, if there is one. - pub vote_collector: Option<(TYPES::Time, usize, usize)>, + pub vote_collector: Option<(TYPES::ViewTime, usize, usize)>, /// This state's ID pub id: u64, } @@ -73,6 +75,7 @@ impl> VidTaskState { Arc::clone(encoded_transactions), &Arc::clone(&self.membership), *view_number, + self.cur_epoch, vid_precompute.clone(), ) .await; diff --git a/crates/task-impls/src/view_sync.rs b/crates/task-impls/src/view_sync.rs index 0b8452a2f1..62715ee6d3 100644 --- a/crates/task-impls/src/view_sync.rs +++ b/crates/task-impls/src/view_sync.rs @@ -62,16 +62,18 @@ pub enum ViewSyncPhase { /// Type alias for a map from View Number to Relay to Vote Task type RelayMap = HashMap< - ::Time, + ::ViewTime, BTreeMap>, >; /// Main view sync task state pub struct ViewSyncTaskState, V: Versions> { /// View HotShot is currently in - pub current_view: TYPES::Time, + pub current_view: TYPES::ViewTime, /// View HotShot wishes to be in - pub next_view: TYPES::Time, + pub next_view: TYPES::ViewTime, + /// Epoch HotShot is currently in + pub current_epoch: TYPES::EpochTime, /// The underlying network pub network: Arc, /// Membership for the quorum @@ -87,7 +89,7 @@ pub struct ViewSyncTaskState, V: V pub num_timeouts_tracked: u64, /// Map of running replica tasks - pub replica_task_map: RwLock>>, + pub replica_task_map: RwLock>>, /// Map of pre-commit vote accumulates for the relay pub pre_commit_relay_map: RwLock< @@ -105,7 +107,7 @@ pub struct ViewSyncTaskState, V: V pub view_sync_timeout: Duration, /// Last view we garbage collected old tasks - pub last_garbage_collected_view: TYPES::Time, + pub last_garbage_collected_view: TYPES::ViewTime, /// Lock for a decided upgrade pub upgrade_lock: UpgradeLock, @@ -136,9 +138,11 @@ pub struct ViewSyncReplicaTaskState, V: Versions> ViewSyncTaskSta pub async fn send_to_or_create_replica( &mut self, event: Arc>, - view: TYPES::Time, + view: TYPES::ViewTime, sender: &Sender>>, ) { // This certificate is old, we can throw it away @@ -221,6 +225,7 @@ impl, V: Versions> ViewSyncTaskSta let mut replica_state: ViewSyncReplicaTaskState = ViewSyncReplicaTaskState { current_view: view, next_view: view, + current_epoch: self.current_epoch, relay: 0, finalized: false, sent_view_change_event: false, @@ -299,7 +304,11 @@ impl, V: Versions> ViewSyncTaskSta } // We do not have a relay task already running, so start one - if self.membership.leader(vote_view + relay) != self.public_key { + if self + .membership + .leader(vote_view + relay, self.current_epoch) + != self.public_key + { debug!("View sync vote sent to wrong leader"); return; } @@ -308,6 +317,7 @@ impl, V: Versions> ViewSyncTaskSta public_key: self.public_key.clone(), membership: Arc::clone(&self.membership), view: vote_view, + epoch: self.current_epoch, id: self.id, }; let vote_collector = @@ -337,7 +347,11 @@ impl, V: Versions> ViewSyncTaskSta } // We do not have a relay task already running, so start one - if self.membership.leader(vote_view + relay) != self.public_key { + if self + .membership + .leader(vote_view + relay, self.current_epoch) + != self.public_key + { debug!("View sync vote sent to wrong leader"); return; } @@ -346,6 +360,7 @@ impl, V: Versions> ViewSyncTaskSta public_key: self.public_key.clone(), membership: Arc::clone(&self.membership), view: vote_view, + epoch: self.current_epoch, id: self.id, }; let vote_collector = @@ -375,7 +390,11 @@ impl, V: Versions> ViewSyncTaskSta } // We do not have a relay task already running, so start one - if self.membership.leader(vote_view + relay) != self.public_key { + if self + .membership + .leader(vote_view + relay, self.current_epoch) + != self.public_key + { debug!("View sync vote sent to wrong leader"); return; } @@ -384,6 +403,7 @@ impl, V: Versions> ViewSyncTaskSta public_key: self.public_key.clone(), membership: Arc::clone(&self.membership), view: vote_view, + epoch: self.current_epoch, id: self.id, }; let vote_collector = @@ -395,7 +415,7 @@ impl, V: Versions> ViewSyncTaskSta } &HotShotEvent::ViewChange(new_view) => { - let new_view = TYPES::Time::new(*new_view); + let new_view = TYPES::ViewTime::new(*new_view); if self.current_view < new_view { debug!( "Change from view {} to view {} in view sync task", @@ -414,19 +434,19 @@ impl, V: Versions> ViewSyncTaskSta self.replica_task_map .write() .await - .remove_entry(&TYPES::Time::new(i)); + .remove_entry(&TYPES::ViewTime::new(i)); self.pre_commit_relay_map .write() .await - .remove_entry(&TYPES::Time::new(i)); + .remove_entry(&TYPES::ViewTime::new(i)); self.commit_relay_map .write() .await - .remove_entry(&TYPES::Time::new(i)); + .remove_entry(&TYPES::ViewTime::new(i)); self.finalize_relay_map .write() .await - .remove_entry(&TYPES::Time::new(i)); + .remove_entry(&TYPES::ViewTime::new(i)); } self.last_garbage_collected_view = self.current_view - 1; @@ -434,12 +454,12 @@ impl, V: Versions> ViewSyncTaskSta } &HotShotEvent::Timeout(view_number) => { // This is an old timeout and we can ignore it - if view_number <= TYPES::Time::new(*self.current_view) { + if view_number <= TYPES::ViewTime::new(*self.current_view) { return; } self.num_timeouts_tracked += 1; - let leader = self.membership.leader(view_number); + let leader = self.membership.leader(view_number, self.current_epoch); error!( %leader, leader_mnemonic = cdn_proto::util::mnemonic(&leader), @@ -465,7 +485,7 @@ impl, V: Versions> ViewSyncTaskSta // If this is the first timeout we've seen advance to the next view self.current_view = view_number; broadcast_event( - Arc::new(HotShotEvent::ViewChange(TYPES::Time::new( + Arc::new(HotShotEvent::ViewChange(TYPES::ViewTime::new( *self.current_view, ))), &event_stream, @@ -502,7 +522,11 @@ impl, V: Versions> // If certificate is not valid, return current state if !certificate - .is_valid_cert(self.membership.as_ref(), &self.upgrade_lock) + .is_valid_cert( + self.membership.as_ref(), + self.current_epoch, + &self.upgrade_lock, + ) .await { error!("Not valid view sync cert! {:?}", certificate.date()); @@ -561,7 +585,7 @@ impl, V: Versions> broadcast_event( Arc::new(HotShotEvent::ViewSyncTimeout( - TYPES::Time::new(*next_view), + TYPES::ViewTime::new(*next_view), relay, phase, )), @@ -584,7 +608,11 @@ impl, V: Versions> // If certificate is not valid, return current state if !certificate - .is_valid_cert(self.membership.as_ref(), &self.upgrade_lock) + .is_valid_cert( + self.membership.as_ref(), + self.current_epoch, + &self.upgrade_lock, + ) .await { error!("Not valid view sync cert! {:?}", certificate.date()); @@ -655,7 +683,7 @@ impl, V: Versions> ); broadcast_event( Arc::new(HotShotEvent::ViewSyncTimeout( - TYPES::Time::new(*next_view), + TYPES::ViewTime::new(*next_view), relay, phase, )), @@ -676,7 +704,11 @@ impl, V: Versions> // If certificate is not valid, return current state if !certificate - .is_valid_cert(self.membership.as_ref(), &self.upgrade_lock) + .is_valid_cert( + self.membership.as_ref(), + self.current_epoch, + &self.upgrade_lock, + ) .await { error!("Not valid view sync cert! {:?}", certificate.date()); @@ -708,7 +740,7 @@ impl, V: Versions> HotShotEvent::ViewSyncTrigger(view_number) => { let view_number = *view_number; - if self.next_view != TYPES::Time::new(*view_number) { + if self.next_view != TYPES::ViewTime::new(*view_number) { error!("Unexpected view number to triger view sync"); return None; } @@ -748,7 +780,7 @@ impl, V: Versions> info!("Vote sending timed out in ViewSyncTrigger"); broadcast_event( Arc::new(HotShotEvent::ViewSyncTimeout( - TYPES::Time::new(*next_view), + TYPES::ViewTime::new(*next_view), relay, ViewSyncPhase::None, )), @@ -764,7 +796,7 @@ impl, V: Versions> HotShotEvent::ViewSyncTimeout(round, relay, last_seen_certificate) => { let round = *round; // Shouldn't ever receive a timeout for a relay higher than ours - if TYPES::Time::new(*round) == self.next_view && *relay == self.relay { + if TYPES::ViewTime::new(*round) == self.next_view && *relay == self.relay { if let Some(timeout_task) = self.timeout_task.take() { cancel_task(timeout_task).await; } @@ -817,7 +849,7 @@ impl, V: Versions> ); broadcast_event( Arc::new(HotShotEvent::ViewSyncTimeout( - TYPES::Time::new(*next_view), + TYPES::ViewTime::new(*next_view), relay, last_cert, )), diff --git a/crates/task-impls/src/vote_collection.rs b/crates/task-impls/src/vote_collection.rs index a7ec9cd7e9..e9e7f4d924 100644 --- a/crates/task-impls/src/vote_collection.rs +++ b/crates/task-impls/src/vote_collection.rs @@ -39,7 +39,7 @@ use crate::{ /// Alias for a map of Vote Collectors pub type VoteCollectorsMap = - BTreeMap<::Time, VoteCollectionTaskState>; + BTreeMap<::ViewTime, VoteCollectionTaskState>; /// Task state for collecting votes of one type and emitting a certificate pub struct VoteCollectionTaskState< @@ -58,7 +58,10 @@ pub struct VoteCollectionTaskState< pub accumulator: Option>, /// The view which we are collecting votes for - pub view: TYPES::Time, + pub view: TYPES::ViewTime, + + /// The epoch which we are collecting votes for + pub epoch: TYPES::EpochTime, /// Node id pub id: u64, @@ -71,8 +74,12 @@ pub trait AggregatableVote< CERT: Certificate, > { - /// return the leader for this votes - fn leader(&self, membership: &TYPES::Membership) -> TYPES::SignatureKey; + /// return the leader for this votes in the given epoch + fn leader( + &self, + membership: &TYPES::Membership, + epoch: TYPES::EpochTime, + ) -> TYPES::SignatureKey; /// return the Hotshot event for the completion of this CERT fn make_cert_event(certificate: CERT, key: &TYPES::SignatureKey) -> HotShotEvent; @@ -93,7 +100,7 @@ impl< vote: &VOTE, event_stream: &Sender>>, ) -> Option { - if vote.leader(&self.membership) != self.public_key { + if vote.leader(&self.membership, self.epoch) != self.public_key { error!("Received vote for a view in which we were not the leader."); return None; } @@ -108,7 +115,10 @@ impl< } let accumulator = self.accumulator.as_mut()?; - match accumulator.accumulate(vote, &self.membership).await { + match accumulator + .accumulate(vote, &self.membership, self.epoch) + .await + { Either::Left(()) => None, Either::Right(cert) => { debug!("Certificate Formed! {:?}", cert); @@ -151,7 +161,9 @@ pub struct AccumulatorInfo { /// Membership we are accumulation votes for pub membership: Arc, /// View of the votes we are collecting - pub view: TYPES::Time, + pub view: TYPES::ViewTime, + /// Epoch of the votes we are collecting + pub epoch: TYPES::EpochTime, /// This nodes id pub id: u64, } @@ -192,6 +204,7 @@ where public_key: info.public_key.clone(), accumulator: Some(new_accumulator), view: info.view, + epoch: info.epoch, id: info.id, }; @@ -217,6 +230,7 @@ pub async fn handle_vote< vote: &VOTE, public_key: TYPES::SignatureKey, membership: &Arc, + epoch: TYPES::EpochTime, id: u64, event: &Arc>, event_stream: &Sender>>, @@ -231,6 +245,7 @@ pub async fn handle_vote< public_key, membership: Arc::clone(membership), view: vote.view_number(), + epoch, id, }; if let Some(collector) = create_vote_accumulator( @@ -292,8 +307,12 @@ type ViewSyncFinalizeVoteState = VoteCollectionTaskState< impl AggregatableVote, QuorumCertificate> for QuorumVote { - fn leader(&self, membership: &TYPES::Membership) -> TYPES::SignatureKey { - membership.leader(self.view_number() + 1) + fn leader( + &self, + membership: &TYPES::Membership, + epoch: TYPES::EpochTime, + ) -> TYPES::SignatureKey { + membership.leader(self.view_number() + 1, epoch) } fn make_cert_event( certificate: QuorumCertificate, @@ -306,8 +325,12 @@ impl AggregatableVote, QuorumCertifica impl AggregatableVote, UpgradeCertificate> for UpgradeVote { - fn leader(&self, membership: &TYPES::Membership) -> TYPES::SignatureKey { - membership.leader(self.view_number()) + fn leader( + &self, + membership: &TYPES::Membership, + epoch: TYPES::EpochTime, + ) -> TYPES::SignatureKey { + membership.leader(self.view_number(), epoch) } fn make_cert_event( certificate: UpgradeCertificate, @@ -320,8 +343,12 @@ impl AggregatableVote, UpgradeCertifi impl AggregatableVote, DaCertificate> for DaVote { - fn leader(&self, membership: &TYPES::Membership) -> TYPES::SignatureKey { - membership.leader(self.view_number()) + fn leader( + &self, + membership: &TYPES::Membership, + epoch: TYPES::EpochTime, + ) -> TYPES::SignatureKey { + membership.leader(self.view_number(), epoch) } fn make_cert_event( certificate: DaCertificate, @@ -334,8 +361,12 @@ impl AggregatableVote, DaCertificate AggregatableVote, TimeoutCertificate> for TimeoutVote { - fn leader(&self, membership: &TYPES::Membership) -> TYPES::SignatureKey { - membership.leader(self.view_number() + 1) + fn leader( + &self, + membership: &TYPES::Membership, + epoch: TYPES::EpochTime, + ) -> TYPES::SignatureKey { + membership.leader(self.view_number() + 1, epoch) } fn make_cert_event( certificate: TimeoutCertificate, @@ -349,8 +380,12 @@ impl AggregatableVote, ViewSyncCommitCertificate2> for ViewSyncCommitVote { - fn leader(&self, membership: &TYPES::Membership) -> TYPES::SignatureKey { - membership.leader(self.date().round + self.date().relay) + fn leader( + &self, + membership: &TYPES::Membership, + epoch: TYPES::EpochTime, + ) -> TYPES::SignatureKey { + membership.leader(self.date().round + self.date().relay, epoch) } fn make_cert_event( certificate: ViewSyncCommitCertificate2, @@ -364,8 +399,12 @@ impl AggregatableVote, ViewSyncPreCommitCertificate2> for ViewSyncPreCommitVote { - fn leader(&self, membership: &TYPES::Membership) -> TYPES::SignatureKey { - membership.leader(self.date().round + self.date().relay) + fn leader( + &self, + membership: &TYPES::Membership, + epoch: TYPES::EpochTime, + ) -> TYPES::SignatureKey { + membership.leader(self.date().round + self.date().relay, epoch) } fn make_cert_event( certificate: ViewSyncPreCommitCertificate2, @@ -379,8 +418,12 @@ impl AggregatableVote, ViewSyncFinalizeCertificate2> for ViewSyncFinalizeVote { - fn leader(&self, membership: &TYPES::Membership) -> TYPES::SignatureKey { - membership.leader(self.date().round + self.date().relay) + fn leader( + &self, + membership: &TYPES::Membership, + epoch: TYPES::EpochTime, + ) -> TYPES::SignatureKey { + membership.leader(self.date().round + self.date().relay, epoch) } fn make_cert_event( certificate: ViewSyncFinalizeCertificate2, diff --git a/crates/testing/src/byzantine/byzantine_behaviour.rs b/crates/testing/src/byzantine/byzantine_behaviour.rs index beab8b5ae6..83f30138bf 100644 --- a/crates/testing/src/byzantine/byzantine_behaviour.rs +++ b/crates/testing/src/byzantine/byzantine_behaviour.rs @@ -116,7 +116,7 @@ pub struct DishonestLeader { /// How far back to look for a QC pub view_look_back: usize, /// Shared state of all view numbers we send bad proposal at - pub dishonest_proposal_view_numbers: Arc>>, + pub dishonest_proposal_view_numbers: Arc>>, } /// Add method that will handle `QuorumProposalSend` events @@ -246,7 +246,7 @@ pub struct ViewDelay { /// How many views the node will be delayed pub number_of_views_to_delay: u64, /// A map that is from view number to vector of events - pub events_for_view: HashMap>>, + pub events_for_view: HashMap>>, /// Specify which view number to stop delaying pub stop_view_delay_at_view_number: u64, } @@ -271,7 +271,7 @@ impl + std::fmt::Debug, V: Version if view_diff > 0 { return match self .events_for_view - .remove(&::Time::new(view_diff)) + .remove(&::ViewTime::new(view_diff)) { Some(lookback_events) => lookback_events.clone(), // we have already return all received events for this view @@ -346,7 +346,8 @@ impl + std::fmt::Debug, V: Version ) { let network_state: NetworkEventTaskState<_, V, _, _> = NetworkEventTaskState { network, - view: TYPES::Time::genesis(), + view: TYPES::ViewTime::genesis(), + epoch: TYPES::EpochTime::genesis(), quorum_membership, da_membership, storage: Arc::clone(&handle.storage()), @@ -375,7 +376,7 @@ pub struct DishonestVoter { /// Collect all votes the node sends pub votes_sent: Vec>, /// Shared state with views numbers that leaders were dishonest at - pub dishonest_proposal_view_numbers: Arc>>, + pub dishonest_proposal_view_numbers: Arc>>, } #[async_trait] diff --git a/crates/testing/src/consistency_task.rs b/crates/testing/src/consistency_task.rs index 3cb04f65c1..3c15a0f715 100644 --- a/crates/testing/src/consistency_task.rs +++ b/crates/testing/src/consistency_task.rs @@ -24,10 +24,10 @@ use crate::{ }; /// Map from views to leaves for a single node, allowing multiple leaves for each view (because the node may a priori send us multiple leaves for a given view). -pub type NodeMap = BTreeMap<::Time, Vec>>; +pub type NodeMap = BTreeMap<::ViewTime, Vec>>; /// A sanitized map from views to leaves for a single node, with only a single leaf per view. -pub type NodeMapSanitized = BTreeMap<::Time, Leaf>; +pub type NodeMapSanitized = BTreeMap<::ViewTime, Leaf>; /// Validate that the `NodeMap` only has a single leaf per view. fn sanitize_node_map( @@ -68,7 +68,7 @@ async fn validate_node_map( .map(|((a, b), c)| (a, b, c)); let mut decided_upgrade_certificate = None; - let mut view_decided = TYPES::Time::new(0); + let mut view_decided = TYPES::ViewTime::new(0); for (grandparent, _parent, child) in leaf_triples { if let Some(cert) = grandparent.upgrade_certificate() { @@ -144,7 +144,7 @@ fn sanitize_network_map( Ok(result) } -pub type ViewMap = BTreeMap<::Time, BTreeMap>>; +pub type ViewMap = BTreeMap<::ViewTime, BTreeMap>>; // Invert the network map by interchanging the roles of the node_id and view number. // @@ -171,7 +171,7 @@ async fn invert_network_map( } /// A view map, sanitized to have exactly one leaf per view. -pub type ViewMapSanitized = BTreeMap<::Time, Leaf>; +pub type ViewMapSanitized = BTreeMap<::ViewTime, Leaf>; fn sanitize_view_map( view_map: &ViewMap, diff --git a/crates/testing/src/helpers.rs b/crates/testing/src/helpers.rs index b370f8d279..e0891ace20 100644 --- a/crates/testing/src/helpers.rs +++ b/crates/testing/src/helpers.rs @@ -7,6 +7,7 @@ #![allow(clippy::panic)] use std::{fmt::Debug, hash::Hash, marker::PhantomData, sync::Arc}; +use crate::test_builder::TestDescription; use async_broadcast::{Receiver, Sender}; use bitvec::bitvec; use committable::Committable; @@ -44,8 +45,6 @@ use hotshot_types::{ use jf_vid::VidScheme; use serde::Serialize; -use crate::test_builder::TestDescription; - /// create the [`SystemContextHandle`] from a node id /// # Panics /// if cannot create a [`HotShotInitializer`] @@ -122,7 +121,8 @@ pub async fn build_cert< >( data: DATAType, membership: &TYPES::Membership, - view: TYPES::Time, + view: TYPES::ViewTime, + epoch: TYPES::EpochTime, public_key: &TYPES::SignatureKey, private_key: &::PrivateKey, upgrade_lock: &UpgradeLock, @@ -131,6 +131,7 @@ pub async fn build_cert< &data, membership, view, + epoch, upgrade_lock, ) .await; @@ -186,10 +187,11 @@ pub async fn build_assembled_sig< >( data: &DATAType, membership: &TYPES::Membership, - view: TYPES::Time, + view: TYPES::ViewTime, + epoch: TYPES::EpochTime, upgrade_lock: &UpgradeLock, ) -> ::QcType { - let stake_table = membership.stake_table(); + let stake_table = membership.stake_table(epoch); let real_qc_pp: ::QcParams = ::public_parameter( stake_table.clone(), @@ -244,18 +246,23 @@ pub fn key_pair_for_id( #[must_use] pub fn vid_scheme_from_view_number( membership: &TYPES::Membership, - view_number: TYPES::Time, + view_number: TYPES::ViewTime, + epoch_number: TYPES::EpochTime, ) -> VidSchemeType { - let num_storage_nodes = membership.committee_members(view_number).len(); + let num_storage_nodes = membership + .committee_members(view_number, epoch_number) + .len(); vid_scheme(num_storage_nodes) } pub fn vid_payload_commitment( quorum_membership: &::Membership, - view_number: TYPES::Time, + view_number: TYPES::ViewTime, + epoch_number: TYPES::EpochTime, transactions: Vec, ) -> VidCommitment { - let mut vid = vid_scheme_from_view_number::(quorum_membership, view_number); + let mut vid = + vid_scheme_from_view_number::(quorum_membership, view_number, epoch_number); let encoded_transactions = TestTransaction::encode(&transactions); let vid_disperse = vid.disperse(&encoded_transactions).unwrap(); @@ -265,19 +272,24 @@ pub fn vid_payload_commitment( pub fn da_payload_commitment( quorum_membership: &::Membership, transactions: Vec, + epoch_number: TYPES::EpochTime, ) -> VidCommitment { let encoded_transactions = TestTransaction::encode(&transactions); - vid_commitment(&encoded_transactions, quorum_membership.total_nodes()) + vid_commitment( + &encoded_transactions, + quorum_membership.total_nodes(epoch_number), + ) } pub fn build_payload_commitment( membership: &::Membership, - view: TYPES::Time, + view: TYPES::ViewTime, + epoch: TYPES::EpochTime, ) -> ::Commit { // Make some empty encoded transactions, we just care about having a commitment handy for the // later calls. We need the VID commitment to be able to propose later. - let mut vid = vid_scheme_from_view_number::(membership, view); + let mut vid = vid_scheme_from_view_number::(membership, view, epoch); let encoded_transactions = Vec::new(); vid.commit_only(&encoded_transactions).unwrap() } @@ -285,17 +297,20 @@ pub fn build_payload_commitment( /// TODO: pub fn build_vid_proposal( quorum_membership: &::Membership, - view_number: TYPES::Time, + view_number: TYPES::ViewTime, + epoch_number: TYPES::EpochTime, transactions: Vec, private_key: &::PrivateKey, ) -> VidProposal { - let mut vid = vid_scheme_from_view_number::(quorum_membership, view_number); + let mut vid = + vid_scheme_from_view_number::(quorum_membership, view_number, epoch_number); let encoded_transactions = TestTransaction::encode(&transactions); let vid_disperse = VidDisperse::from_membership( view_number, vid.disperse(&encoded_transactions).unwrap(), quorum_membership, + epoch_number, ); let signature = @@ -320,10 +335,12 @@ pub fn build_vid_proposal( ) } +#[allow(clippy::too_many_arguments)] pub async fn build_da_certificate( quorum_membership: &::Membership, da_membership: &::Membership, - view_number: TYPES::Time, + view_number: TYPES::ViewTime, + epoch_number: TYPES::EpochTime, transactions: Vec, public_key: &TYPES::SignatureKey, private_key: &::PrivateKey, @@ -331,8 +348,10 @@ pub async fn build_da_certificate( ) -> DaCertificate { let encoded_transactions = TestTransaction::encode(&transactions); - let da_payload_commitment = - vid_commitment(&encoded_transactions, quorum_membership.total_nodes()); + let da_payload_commitment = vid_commitment( + &encoded_transactions, + quorum_membership.total_nodes(epoch_number), + ); let da_data = DaData { payload_commit: da_payload_commitment, @@ -342,6 +361,7 @@ pub async fn build_da_certificate( da_data, da_membership, view_number, + epoch_number, public_key, private_key, upgrade_lock, diff --git a/crates/testing/src/overall_safety_task.rs b/crates/testing/src/overall_safety_task.rs index 3cbe364517..950cd24cfe 100644 --- a/crates/testing/src/overall_safety_task.rs +++ b/crates/testing/src/overall_safety_task.rs @@ -61,12 +61,12 @@ pub enum OverallSafetyTaskErr { NotEnoughDecides { got: usize, expected: usize }, #[error("Too many view failures: {0:?}")] - TooManyFailures(HashSet), + TooManyFailures(HashSet), #[error("Inconsistent failed views: expected: {expected_failed_views:?}, actual: {actual_failed_views:?}")] InconsistentFailedViews { - expected_failed_views: Vec, - actual_failed_views: HashSet, + expected_failed_views: Vec, + actual_failed_views: HashSet, }, #[error( "Not enough round results: results_count: {results_count}, views_count: {views_count}" @@ -97,7 +97,7 @@ pub struct OverallSafetyTask, V: Versions> OverallSafetyTask { - async fn handle_view_failure(&mut self, num_failed_views: usize, view_number: TYPES::Time) { + async fn handle_view_failure(&mut self, num_failed_views: usize, view_number: TYPES::ViewTime) { let expected_views_to_fail = &mut self.properties.expected_views_to_fail; self.ctx.failed_views.insert(view_number); @@ -155,7 +155,7 @@ impl, V: Versions> TestTas block_size: maybe_block_size, } => { // Skip the genesis leaf. - if leaf_chain.last().unwrap().leaf.view_number() == TYPES::Time::genesis() { + if leaf_chain.last().unwrap().leaf.view_number() == TYPES::ViewTime::genesis() { return Ok(()); } let paired_up = (leaf_chain.to_vec(), (*qc).clone()); @@ -364,18 +364,18 @@ impl Default for RoundCtx { pub struct RoundCtx { /// results from previous rounds /// view number -> round result - pub round_results: HashMap>, + pub round_results: HashMap>, /// during the run view refactor - pub failed_views: HashSet, + pub failed_views: HashSet, /// successful views - pub successful_views: HashSet, + pub successful_views: HashSet, } impl RoundCtx { /// inserts an error into the context pub fn insert_error_to_context( &mut self, - view_number: TYPES::Time, + view_number: TYPES::ViewTime, idx: usize, error: Arc>, ) { @@ -569,7 +569,7 @@ pub struct OverallSafetyPropertiesDescription { /// required to mark view as successful pub threshold_calculator: Arc usize + Send + Sync>, /// pass in the views that we expect to fail - pub expected_views_to_fail: HashMap, + pub expected_views_to_fail: HashMap, } impl std::fmt::Debug for OverallSafetyPropertiesDescription { diff --git a/crates/testing/src/spinning_task.rs b/crates/testing/src/spinning_task.rs index 9c0efc2a43..71df1d9ba5 100644 --- a/crates/testing/src/spinning_task.rs +++ b/crates/testing/src/spinning_task.rs @@ -57,9 +57,9 @@ pub struct SpinningTask< /// late start nodes pub(crate) late_start: HashMap>, /// time based changes - pub(crate) changes: BTreeMap>, + pub(crate) changes: BTreeMap>, /// most recent view seen by spinning task - pub(crate) latest_view: Option, + pub(crate) latest_view: Option, /// Last decided leaf that can be used as the anchor leaf to initialize the node. pub(crate) last_decided_leaf: Leaf, /// Highest qc seen in the test for restarting nodes @@ -145,8 +145,8 @@ where self.last_decided_leaf.clone(), TestInstanceState::new(self.async_delay_config.clone()), None, - TYPES::Time::genesis(), - TYPES::Time::genesis(), + TYPES::ViewTime::genesis(), + TYPES::ViewTime::genesis(), BTreeMap::new(), self.high_qc.clone(), None, diff --git a/crates/testing/src/test_builder.rs b/crates/testing/src/test_builder.rs index ea14c428be..ca78822b87 100644 --- a/crates/testing/src/test_builder.rs +++ b/crates/testing/src/test_builder.rs @@ -515,6 +515,7 @@ where stop_proposing_time: 0, start_voting_time: u64::MAX, stop_voting_time: 0, + epoch_height: 0, }; let TimingData { next_view_timeout, diff --git a/crates/testing/src/test_runner.rs b/crates/testing/src/test_runner.rs index 8ef534860f..c6dd95f3d9 100644 --- a/crates/testing/src/test_runner.rs +++ b/crates/testing/src/test_runner.rs @@ -170,10 +170,10 @@ where // add spinning task // map spinning to view - let mut changes: BTreeMap> = BTreeMap::new(); + let mut changes: BTreeMap> = BTreeMap::new(); for (view, mut change) in spinning_changes { changes - .entry(TYPES::Time::new(view)) + .entry(TYPES::ViewTime::new(view)) .or_insert_with(Vec::new) .append(&mut change); } diff --git a/crates/testing/src/view_generator.rs b/crates/testing/src/view_generator.rs index 315ac2d5e8..dcaedd5dc1 100644 --- a/crates/testing/src/view_generator.rs +++ b/crates/testing/src/view_generator.rs @@ -12,6 +12,9 @@ use std::{ task::{Context, Poll}, }; +use crate::helpers::{ + build_cert, build_da_certificate, build_vid_proposal, da_payload_commitment, key_pair_for_id, +}; use futures::{FutureExt, Stream}; use hotshot::types::{BLSPubKey, SignatureKey, SystemContextHandle}; use hotshot_example_types::{ @@ -19,6 +22,7 @@ use hotshot_example_types::{ node_types::{MemoryImpl, TestTypes, TestVersions}, state_types::{TestInstanceState, TestValidatedState}, }; +use hotshot_types::data::EpochNumber; use hotshot_types::{ data::{ DaProposal, Leaf, QuorumProposal, VidDisperse, VidDisperseShare, ViewChangeEvidence, @@ -42,16 +46,13 @@ use hotshot_types::{ use rand::{thread_rng, Rng}; use sha2::{Digest, Sha256}; -use crate::helpers::{ - build_cert, build_da_certificate, build_vid_proposal, da_payload_commitment, key_pair_for_id, -}; - #[derive(Clone)] pub struct TestView { pub da_proposal: Proposal>, pub quorum_proposal: Proposal>, pub leaf: Leaf, pub view_number: ViewNumber, + pub epoch_number: EpochNumber, pub quorum_membership: ::Membership, pub da_membership: ::Membership, pub vid_disperse: Proposal>, @@ -75,6 +76,7 @@ impl TestView { da_membership: &::Membership, ) -> Self { let genesis_view = ViewNumber::new(1); + let genesis_epoch = EpochNumber::new(1); let upgrade_lock = UpgradeLock::new(); let transactions = Vec::new(); @@ -97,12 +99,16 @@ impl TestView { let leader_public_key = public_key; - let payload_commitment = - da_payload_commitment::(quorum_membership, transactions.clone()); + let payload_commitment = da_payload_commitment::( + quorum_membership, + transactions.clone(), + genesis_epoch, + ); let (vid_disperse, vid_proposal) = build_vid_proposal( quorum_membership, genesis_view, + genesis_epoch, transactions.clone(), &private_key, ); @@ -111,6 +117,7 @@ impl TestView { quorum_membership, da_membership, genesis_view, + genesis_epoch, transactions.clone(), &public_key, &private_key, @@ -180,6 +187,7 @@ impl TestView { quorum_proposal, leaf, view_number: genesis_view, + epoch_number: genesis_epoch, quorum_membership: quorum_membership.clone(), da_membership: da_membership.clone(), vid_disperse, @@ -237,12 +245,16 @@ impl TestView { &metadata, ); - let payload_commitment = - da_payload_commitment::(quorum_membership, transactions.clone()); + let payload_commitment = da_payload_commitment::( + quorum_membership, + transactions.clone(), + self.epoch_number, + ); let (vid_disperse, vid_proposal) = build_vid_proposal( quorum_membership, next_view, + self.epoch_number, transactions.clone(), &private_key, ); @@ -251,6 +263,7 @@ impl TestView { quorum_membership, da_membership, next_view, + self.epoch_number, transactions.clone(), &public_key, &private_key, @@ -268,6 +281,7 @@ impl TestView { quorum_data, quorum_membership, old_view, + self.epoch_number, &old_public_key, &old_private_key, &self.upgrade_lock, @@ -285,6 +299,7 @@ impl TestView { data.clone(), quorum_membership, next_view, + self.epoch_number, &public_key, &private_key, &self.upgrade_lock, @@ -307,6 +322,7 @@ impl TestView { data.clone(), quorum_membership, next_view, + self.epoch_number, &public_key, &private_key, &self.upgrade_lock, @@ -329,6 +345,7 @@ impl TestView { data.clone(), quorum_membership, next_view, + self.epoch_number, &public_key, &private_key, &self.upgrade_lock, @@ -406,6 +423,7 @@ impl TestView { quorum_proposal, leaf, view_number: next_view, + epoch_number: self.epoch_number, quorum_membership: quorum_membership.clone(), da_membership: self.da_membership.clone(), vid_disperse, diff --git a/crates/testing/tests/tests_1/consensus_task.rs b/crates/testing/tests/tests_1/consensus_task.rs index ad27388433..c5f208d01c 100644 --- a/crates/testing/tests/tests_1/consensus_task.rs +++ b/crates/testing/tests/tests_1/consensus_task.rs @@ -34,6 +34,7 @@ use hotshot_testing::{ serial, view_generator::TestViewGenerator, }; +use hotshot_types::data::EpochNumber; use hotshot_types::{ data::{null_block, ViewChangeEvidence, ViewNumber}, simple_vote::{TimeoutData, TimeoutVote, ViewSyncFinalizeData}, @@ -67,7 +68,11 @@ async fn test_consensus_task() { // Make some empty encoded transactions, we just care about having a commitment handy for the // later calls. We need the VID commitment to be able to propose later. - let mut vid = vid_scheme_from_view_number::(&quorum_membership, ViewNumber::new(2)); + let mut vid = vid_scheme_from_view_number::( + &quorum_membership, + ViewNumber::new(2), + EpochNumber::new(0), + ); let encoded_transactions = Vec::new(); let vid_disperse = vid.disperse(&encoded_transactions).unwrap(); let payload_commitment = vid_disperse.commit; @@ -111,7 +116,7 @@ async fn test_consensus_task() { }, ViewNumber::new(2), vec1![null_block::builder_fee::( - quorum_membership.total_nodes(), + quorum_membership.total_nodes(EpochNumber::new(0)), ::Base::VERSION, ) .unwrap()], @@ -223,7 +228,11 @@ async fn test_view_sync_finalize_propose() { // Make some empty encoded transactions, we just care about having a commitment handy for the // later calls. We need the VID commitment to be able to propose later. - let mut vid = vid_scheme_from_view_number::(&quorum_membership, ViewNumber::new(4)); + let mut vid = vid_scheme_from_view_number::( + &quorum_membership, + ViewNumber::new(4), + EpochNumber::new(0), + ); let encoded_transactions = Vec::new(); let vid_disperse = vid.disperse(&encoded_transactions).unwrap(); let payload_commitment = vid_disperse.commit; diff --git a/crates/testing/tests/tests_1/da_task.rs b/crates/testing/tests/tests_1/da_task.rs index b39f59ff32..1f78de5467 100644 --- a/crates/testing/tests/tests_1/da_task.rs +++ b/crates/testing/tests/tests_1/da_task.rs @@ -21,6 +21,7 @@ use hotshot_testing::{ serial, view_generator::TestViewGenerator, }; +use hotshot_types::data::EpochNumber; use hotshot_types::{ data::{null_block, PackedBundle, ViewNumber}, simple_vote::DaData, @@ -50,7 +51,11 @@ async fn test_da_task() { let encoded_transactions = Arc::from(TestTransaction::encode(&transactions)); let (payload_commit, precompute) = precompute_vid_commitment( &encoded_transactions, - handle.hotshot.memberships.quorum_membership.total_nodes(), + handle + .hotshot + .memberships + .quorum_membership + .total_nodes(EpochNumber::new(0)), ); let mut generator = TestViewGenerator::generate(quorum_membership.clone(), da_membership); @@ -96,7 +101,7 @@ async fn test_da_task() { }, ViewNumber::new(2), vec1::vec1![null_block::builder_fee::( - quorum_membership.total_nodes(), + quorum_membership.total_nodes(EpochNumber::new(0)), ::Base::VERSION ) .unwrap()], @@ -147,7 +152,11 @@ async fn test_da_task_storage_failure() { let encoded_transactions = Arc::from(TestTransaction::encode(&transactions)); let (payload_commit, precompute) = precompute_vid_commitment( &encoded_transactions, - handle.hotshot.memberships.quorum_membership.total_nodes(), + handle + .hotshot + .memberships + .quorum_membership + .total_nodes(EpochNumber::new(0)), ); let mut generator = TestViewGenerator::generate(quorum_membership.clone(), da_membership); @@ -193,7 +202,7 @@ async fn test_da_task_storage_failure() { }, ViewNumber::new(2), vec1::vec1![null_block::builder_fee::( - quorum_membership.total_nodes(), + quorum_membership.total_nodes(EpochNumber::new(0)), ::Base::VERSION ) .unwrap()], diff --git a/crates/testing/tests/tests_1/network_task.rs b/crates/testing/tests/tests_1/network_task.rs index 03a855a05f..571593d3c3 100644 --- a/crates/testing/tests/tests_1/network_task.rs +++ b/crates/testing/tests/tests_1/network_task.rs @@ -17,6 +17,7 @@ use hotshot_testing::{ helpers::build_system_handle, test_builder::TestDescription, test_task::add_network_message_test_task, view_generator::TestViewGenerator, }; +use hotshot_types::data::EpochNumber; use hotshot_types::{ data::ViewNumber, message::UpgradeLock, @@ -25,6 +26,7 @@ use hotshot_types::{ node_implementation::{ConsensusTime, NodeType}, }, }; + // Test that the event task sends a message, and the message task receives it // and emits the proper event #[cfg(test)] @@ -62,6 +64,7 @@ async fn test_network_task() { NetworkEventTaskState { network: network.clone(), view: ViewNumber::new(0), + epoch: EpochNumber::new(0), quorum_membership: membership.clone(), da_membership: membership.clone(), upgrade_lock: upgrade_lock.clone(), @@ -138,6 +141,7 @@ async fn test_network_storage_fail() { NetworkEventTaskState { network: network.clone(), view: ViewNumber::new(0), + epoch: EpochNumber::new(0), quorum_membership: membership.clone(), da_membership: membership.clone(), upgrade_lock: upgrade_lock.clone(), diff --git a/crates/testing/tests/tests_1/transaction_task.rs b/crates/testing/tests/tests_1/transaction_task.rs index d73cd48ea7..b8c6a194b9 100644 --- a/crates/testing/tests/tests_1/transaction_task.rs +++ b/crates/testing/tests/tests_1/transaction_task.rs @@ -7,6 +7,7 @@ use hotshot_task_impls::{ events::HotShotEvent, harness::run_harness, transactions::TransactionTaskState, }; use hotshot_testing::helpers::build_system_handle; +use hotshot_types::data::EpochNumber; use hotshot_types::{ data::{null_block, PackedBundle, ViewNumber}, traits::{ @@ -39,7 +40,8 @@ async fn test_transaction_task_leader_two_views_in_a_row() { input.push(HotShotEvent::Shutdown); let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); - let (_, precompute_data) = precompute_vid_commitment(&[], quorum_membership.total_nodes()); + let (_, precompute_data) = + precompute_vid_commitment(&[], quorum_membership.total_nodes(EpochNumber::new(0))); // current view let mut exp_packed_bundle = PackedBundle::new( @@ -50,7 +52,7 @@ async fn test_transaction_task_leader_two_views_in_a_row() { current_view, vec1::vec1![ null_block::builder_fee::( - quorum_membership.total_nodes(), + quorum_membership.total_nodes(EpochNumber::new(0)), ::Base::VERSION ) .unwrap() diff --git a/crates/testing/tests/tests_1/upgrade_task_with_consensus.rs b/crates/testing/tests/tests_1/upgrade_task_with_consensus.rs index e226726246..caeb634e2a 100644 --- a/crates/testing/tests/tests_1/upgrade_task_with_consensus.rs +++ b/crates/testing/tests/tests_1/upgrade_task_with_consensus.rs @@ -29,7 +29,7 @@ use hotshot_testing::{ view_generator::TestViewGenerator, }; use hotshot_types::{ - data::{null_block, ViewNumber}, + data::{null_block, EpochNumber, ViewNumber}, simple_vote::UpgradeProposalData, traits::{ election::Membership, @@ -294,7 +294,7 @@ async fn test_upgrade_task_propose() { proposals[2].data.block_header.metadata, ViewNumber::new(3), vec1![null_block::builder_fee::( - quorum_membership.total_nodes(), + quorum_membership.total_nodes(EpochNumber::new(0)), ::Base::VERSION ) .unwrap()], @@ -390,7 +390,7 @@ async fn test_upgrade_task_blank_blocks() { let new_version = Version { major: 0, minor: 2 }; let builder_fee = null_block::builder_fee::( - quorum_membership.total_nodes(), + quorum_membership.total_nodes(EpochNumber::new(0)), ::Base::VERSION, ) .unwrap(); @@ -620,7 +620,9 @@ async fn test_upgrade_task_blank_blocks() { exact(ViewChange(ViewNumber::new(6))), validated_state_updated(), quorum_proposal_validated(), - quorum_proposal_send_with_null_block(quorum_membership.total_nodes()), + quorum_proposal_send_with_null_block( + quorum_membership.total_nodes(EpochNumber::new(0)), + ), leaf_decided(), quorum_vote_send(), ], diff --git a/crates/testing/tests/tests_1/vid_task.rs b/crates/testing/tests/tests_1/vid_task.rs index 17922d42cb..714a12a3b5 100644 --- a/crates/testing/tests/tests_1/vid_task.rs +++ b/crates/testing/tests/tests_1/vid_task.rs @@ -20,6 +20,7 @@ use hotshot_testing::{ script::{Expectations, InputOrder, TaskScript}, serial, }; +use hotshot_types::data::EpochNumber; use hotshot_types::{ data::{null_block, DaProposal, PackedBundle, VidDisperse, ViewNumber}, traits::{ @@ -50,7 +51,11 @@ async fn test_vid_task() { // quorum membership for VID share distribution let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); - let mut vid = vid_scheme_from_view_number::(&quorum_membership, ViewNumber::new(0)); + let mut vid = vid_scheme_from_view_number::( + &quorum_membership, + ViewNumber::new(0), + EpochNumber::new(0), + ); let transactions = vec![TestTransaction::new(vec![0])]; let (payload, metadata) = >::from_transactions( @@ -85,8 +90,12 @@ async fn test_vid_task() { _pd: PhantomData, }; - let vid_disperse = - VidDisperse::from_membership(message.data.view_number, vid_disperse, &quorum_membership); + let vid_disperse = VidDisperse::from_membership( + message.data.view_number, + vid_disperse, + &quorum_membership, + EpochNumber::new(0), + ); let vid_proposal = Proposal { data: vid_disperse.clone(), @@ -104,7 +113,7 @@ async fn test_vid_task() { }, ViewNumber::new(2), vec1::vec1![null_block::builder_fee::( - quorum_membership.total_nodes(), + quorum_membership.total_nodes(EpochNumber::new(0)), ::Base::VERSION ) .unwrap()], @@ -125,7 +134,7 @@ async fn test_vid_task() { }, ViewNumber::new(2), vec1![null_block::builder_fee::( - quorum_membership.total_nodes(), + quorum_membership.total_nodes(EpochNumber::new(0)), ::Base::VERSION ) .unwrap()], diff --git a/crates/testing/tests/tests_1/view_sync_task.rs b/crates/testing/tests/tests_1/view_sync_task.rs index c0c4913981..604d2bdb21 100644 --- a/crates/testing/tests/tests_1/view_sync_task.rs +++ b/crates/testing/tests/tests_1/view_sync_task.rs @@ -29,11 +29,13 @@ async fn test_view_sync_task() { let vote_data = ViewSyncPreCommitData { relay: 0, - round: ::Time::new(4), + round: ::ViewTime::new( + 4, + ), }; let vote = hotshot_types::simple_vote::ViewSyncPreCommitVote::::create_signed_vote( vote_data, - ::Time::new(4), + ::ViewTime::new(4), hotshot_types::traits::consensus_api::ConsensusApi::public_key(&handle), hotshot_types::traits::consensus_api::ConsensusApi::private_key(&handle), &handle.hotshot.upgrade_lock, diff --git a/crates/testing/tests/tests_3/byzantine_tests.rs b/crates/testing/tests/tests_3/byzantine_tests.rs index 4687d3ff66..e5ac199aaf 100644 --- a/crates/testing/tests/tests_3/byzantine_tests.rs +++ b/crates/testing/tests/tests_3/byzantine_tests.rs @@ -21,7 +21,7 @@ use hotshot_testing::{ test_builder::{Behaviour, TestDescription}, }; use hotshot_types::{ - data::ViewNumber, + data::{EpochNumber, ViewNumber}, message::{GeneralConsensusMessage, MessageKind, SequencingMessage}, traits::{ election::Membership, @@ -176,7 +176,7 @@ cross_tests!( view_increment: nodes_count as u64, modifier: Arc::new(move |_pk, message_kind, transmit_type: &mut TransmitType, membership: &::Membership| { if let MessageKind::Consensus(SequencingMessage::General(GeneralConsensusMessage::Vote(vote))) = message_kind { - *transmit_type = TransmitType::Direct(membership.leader(vote.view_number() + 1 - nodes_count as u64)); + *transmit_type = TransmitType::Direct(membership.leader(vote.view_number() + 1 - nodes_count as u64, EpochNumber::new(0))); } else { {} } diff --git a/crates/testing/tests/tests_3/memory_network.rs b/crates/testing/tests/tests_3/memory_network.rs index 38b2da02e1..09e42953ff 100644 --- a/crates/testing/tests/tests_3/memory_network.rs +++ b/crates/testing/tests/tests_3/memory_network.rs @@ -23,6 +23,7 @@ use hotshot_example_types::{ state_types::{TestInstanceState, TestValidatedState}, storage_types::TestStorage, }; +use hotshot_types::data::EpochNumber; use hotshot_types::{ data::ViewNumber, message::{DataMessage, Message, MessageKind, UpgradeLock}, @@ -53,7 +54,8 @@ pub struct Test; impl NodeType for Test { type AuctionResult = TestAuctionResult; - type Time = ViewNumber; + type ViewTime = ViewNumber; + type EpochTime = EpochNumber; type BlockHeader = TestBlockHeader; type BlockPayload = TestBlockPayload; type SignatureKey = BLSPubKey; diff --git a/crates/types/src/consensus.rs b/crates/types/src/consensus.rs index 26d7d8ca3f..6a1325133f 100644 --- a/crates/types/src/consensus.rs +++ b/crates/types/src/consensus.rs @@ -43,7 +43,7 @@ pub type CommitmentMap = HashMap, T>; /// A type alias for `BTreeMap>>>` pub type VidShares = BTreeMap< - ::Time, + ::ViewTime, HashMap<::SignatureKey, Proposal>>, >; @@ -272,27 +272,30 @@ impl HotShotActionViews { #[derive(custom_debug::Debug, Clone)] pub struct Consensus { /// The validated states that are currently loaded in memory. - validated_state_map: BTreeMap>, + validated_state_map: BTreeMap>, /// All the VID shares we've received for current and future views. vid_shares: VidShares, /// All the DA certs we've received for current and future views. /// view -> DA cert - saved_da_certs: HashMap>, + saved_da_certs: HashMap>, /// View number that is currently on. - cur_view: TYPES::Time, + cur_view: TYPES::ViewTime, + + /// Epoch number that is currently on. + cur_epoch: TYPES::EpochTime, /// Last proposals we sent out, None if we haven't proposed yet. /// Prevents duplicate proposals, and can be served to those trying to catchup - last_proposals: BTreeMap>>, + last_proposals: BTreeMap>>, /// last view had a successful decide event - last_decided_view: TYPES::Time, + last_decided_view: TYPES::ViewTime, /// The `locked_qc` view number - locked_view: TYPES::Time, + locked_view: TYPES::ViewTime, /// Map of leaf hash -> leaf /// - contains undecided leaves @@ -302,12 +305,12 @@ pub struct Consensus { /// Bundle of views which we performed the most recent action /// visibible to the network. Actions are votes and proposals /// for DA and Quorum - last_actions: HotShotActionViews, + last_actions: HotShotActionViews, /// Saved payloads. /// /// Encoded transactions for every view if we got a payload for that view. - saved_payloads: BTreeMap>, + saved_payloads: BTreeMap>, /// the highqc per spec high_qc: QuorumCertificate, @@ -387,14 +390,15 @@ impl Consensus { /// Constructor. #[allow(clippy::too_many_arguments)] pub fn new( - validated_state_map: BTreeMap>, - cur_view: TYPES::Time, - locked_view: TYPES::Time, - last_decided_view: TYPES::Time, - last_actioned_view: TYPES::Time, - last_proposals: BTreeMap>>, + validated_state_map: BTreeMap>, + cur_view: TYPES::ViewTime, + cur_epoch: TYPES::EpochTime, + locked_view: TYPES::ViewTime, + last_decided_view: TYPES::ViewTime, + last_actioned_view: TYPES::ViewTime, + last_proposals: BTreeMap>>, saved_leaves: CommitmentMap>, - saved_payloads: BTreeMap>, + saved_payloads: BTreeMap>, high_qc: QuorumCertificate, metrics: Arc, ) -> Self { @@ -403,6 +407,7 @@ impl Consensus { vid_shares: BTreeMap::new(), saved_da_certs: HashMap::new(), cur_view, + cur_epoch, last_decided_view, last_proposals, last_actions: HotShotActionViews::from_view(last_actioned_view), @@ -415,17 +420,22 @@ impl Consensus { } /// Get the current view. - pub fn cur_view(&self) -> TYPES::Time { + pub fn cur_view(&self) -> TYPES::ViewTime { self.cur_view } + /// Get the current epoch. + pub fn cur_epoch(&self) -> TYPES::EpochTime { + self.cur_epoch + } + /// Get the last decided view. - pub fn last_decided_view(&self) -> TYPES::Time { + pub fn last_decided_view(&self) -> TYPES::ViewTime { self.last_decided_view } /// Get the locked view. - pub fn locked_view(&self) -> TYPES::Time { + pub fn locked_view(&self) -> TYPES::ViewTime { self.locked_view } @@ -435,7 +445,7 @@ impl Consensus { } /// Get the validated state map. - pub fn validated_state_map(&self) -> &BTreeMap> { + pub fn validated_state_map(&self) -> &BTreeMap> { &self.validated_state_map } @@ -445,7 +455,7 @@ impl Consensus { } /// Get the saved payloads. - pub fn saved_payloads(&self) -> &BTreeMap> { + pub fn saved_payloads(&self) -> &BTreeMap> { &self.saved_payloads } @@ -455,19 +465,21 @@ impl Consensus { } /// Get the saved DA certs. - pub fn saved_da_certs(&self) -> &HashMap> { + pub fn saved_da_certs(&self) -> &HashMap> { &self.saved_da_certs } /// Get the map of our recent proposals - pub fn last_proposals(&self) -> &BTreeMap>> { + pub fn last_proposals( + &self, + ) -> &BTreeMap>> { &self.last_proposals } /// Update the current view. /// # Errors /// Can return an error when the new view_number is not higher than the existing view number. - pub fn update_view(&mut self, view_number: TYPES::Time) -> Result<()> { + pub fn update_view(&mut self, view_number: TYPES::ViewTime) -> Result<()> { ensure!( view_number > self.cur_view, "New view isn't newer than the current view." @@ -476,10 +488,22 @@ impl Consensus { Ok(()) } + /// Update the current epoch. + /// # Errors + /// Can return an error when the new epoch_number is not higher than the existing epoch number. + pub fn update_epoch(&mut self, epoch_number: TYPES::EpochTime) -> Result<()> { + ensure!( + epoch_number > self.cur_epoch, + "New epoch isn't newer than the current epoch." + ); + self.cur_epoch = epoch_number; + Ok(()) + } + /// Update the last actioned view internally for votes and proposals /// /// Returns true if the action is for a newer view than the last action of that type - pub fn update_action(&mut self, action: HotShotAction, view: TYPES::Time) -> bool { + pub fn update_action(&mut self, action: HotShotAction, view: TYPES::ViewTime) -> bool { let old_view = match action { HotShotAction::Vote => &mut self.last_actions.voted, HotShotAction::Propose => &mut self.last_actions.proposed, @@ -521,7 +545,7 @@ impl Consensus { > self .last_proposals .last_key_value() - .map_or(TYPES::Time::genesis(), |(k, _)| { *k }), + .map_or(TYPES::ViewTime::genesis(), |(k, _)| { *k }), "New view isn't newer than the previously proposed view." ); self.last_proposals @@ -533,7 +557,7 @@ impl Consensus { /// /// # Errors /// Can return an error when the new view_number is not higher than the existing decided view number. - pub fn update_last_decided_view(&mut self, view_number: TYPES::Time) -> Result<()> { + pub fn update_last_decided_view(&mut self, view_number: TYPES::ViewTime) -> Result<()> { ensure!( view_number > self.last_decided_view, "New view isn't newer than the previously decided view." @@ -546,7 +570,7 @@ impl Consensus { /// /// # Errors /// Can return an error when the new view_number is not higher than the existing locked view number. - pub fn update_locked_view(&mut self, view_number: TYPES::Time) -> Result<()> { + pub fn update_locked_view(&mut self, view_number: TYPES::ViewTime) -> Result<()> { ensure!( view_number > self.locked_view, "New view isn't newer than the previously locked view." @@ -562,7 +586,7 @@ impl Consensus { /// with the same view number. pub fn update_validated_state_map( &mut self, - view_number: TYPES::Time, + view_number: TYPES::ViewTime, view: View, ) -> Result<()> { if let Some(existing_view) = self.validated_state_map().get(&view_number) { @@ -600,7 +624,7 @@ impl Consensus { /// Can return an error when there's an existing payload corresponding to the same view number. pub fn update_saved_payloads( &mut self, - view_number: TYPES::Time, + view_number: TYPES::ViewTime, encoded_transaction: Arc<[u8]>, ) -> Result<()> { ensure!( @@ -628,7 +652,7 @@ impl Consensus { /// Add a new entry to the vid_shares map. pub fn update_vid_shares( &mut self, - view_number: TYPES::Time, + view_number: TYPES::ViewTime, disperse: Proposal>, ) { self.vid_shares @@ -638,7 +662,11 @@ impl Consensus { } /// Add a new entry to the da_certs map. - pub fn update_saved_da_certs(&mut self, view_number: TYPES::Time, cert: DaCertificate) { + pub fn update_saved_da_certs( + &mut self, + view_number: TYPES::ViewTime, + cert: DaCertificate, + ) { self.saved_da_certs.insert(view_number, cert); } @@ -647,8 +675,8 @@ impl Consensus { /// If the leaf or its ancestors are not found in storage pub fn visit_leaf_ancestors( &self, - start_from: TYPES::Time, - terminator: Terminator, + start_from: TYPES::ViewTime, + terminator: Terminator, ok_when_finished: bool, mut f: F, ) -> Result<(), HotShotError> @@ -707,7 +735,11 @@ impl Consensus { /// `saved_payloads` and `validated_state_map` fields of `Consensus`. /// # Panics /// On inconsistent stored entries - pub fn collect_garbage(&mut self, old_anchor_view: TYPES::Time, new_anchor_view: TYPES::Time) { + pub fn collect_garbage( + &mut self, + old_anchor_view: TYPES::ViewTime, + new_anchor_view: TYPES::ViewTime, + ) { // state check let anchor_entry = self .validated_state_map @@ -751,7 +783,7 @@ impl Consensus { /// Gets the validated state with the given view number, if in the state map. #[must_use] - pub fn state(&self, view_number: TYPES::Time) -> Option<&Arc> { + pub fn state(&self, view_number: TYPES::ViewTime) -> Option<&Arc> { match self.validated_state_map.get(&view_number) { Some(view) => view.state(), None => None, @@ -760,7 +792,7 @@ impl Consensus { /// Gets the validated state and state delta with the given view number, if in the state map. #[must_use] - pub fn state_and_delta(&self, view_number: TYPES::Time) -> StateAndDelta { + pub fn state_and_delta(&self, view_number: TYPES::ViewTime) -> StateAndDelta { match self.validated_state_map.get(&view_number) { Some(view) => view.state_and_delta(), None => (None, None), @@ -788,14 +820,16 @@ impl Consensus { #[instrument(skip_all, target = "Consensus", fields(view = *view))] pub async fn calculate_and_update_vid( consensus: OuterConsensus, - view: ::Time, + view: ::ViewTime, membership: Arc, private_key: &::PrivateKey, + epoch: TYPES::EpochTime, ) -> Option<()> { let consensus = consensus.upgradable_read().await; let txns = consensus.saved_payloads().get(&view)?; let vid = - VidDisperse::calculate_vid_disperse(Arc::clone(txns), &membership, view, None).await; + VidDisperse::calculate_vid_disperse(Arc::clone(txns), &membership, view, epoch, None) + .await; let shares = VidDisperseShare::from_vid_disperse(vid); let mut consensus = ConsensusUpgradableReadLockGuard::upgrade(consensus).await; for share in shares { @@ -820,7 +854,7 @@ pub struct CommitmentAndMetadata { /// Builder fee data pub fees: Vec1>, /// View number this block is for - pub block_view: TYPES::Time, + pub block_view: TYPES::ViewTime, /// auction result that the block was produced from, if any pub auction_result: Option, } diff --git a/crates/types/src/data.rs b/crates/types/src/data.rs index d05760bf7a..ca3b313fe2 100644 --- a/crates/types/src/data.rs +++ b/crates/types/src/data.rs @@ -9,14 +9,6 @@ //! This module provides types for representing consensus internal state, such as leaves, //! `HotShot`'s version of a block, and proposals, messages upon which to reach the consensus. -use std::{ - collections::BTreeMap, - fmt::{Debug, Display}, - hash::Hash, - marker::PhantomData, - sync::Arc, -}; - use anyhow::{ensure, Result}; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use async_lock::RwLock; @@ -28,6 +20,13 @@ use derivative::Derivative; use jf_vid::{precomputable::Precomputable, VidDisperse as JfVidDisperse, VidScheme}; use rand::Rng; use serde::{Deserialize, Serialize}; +use std::{ + collections::BTreeMap, + fmt::{Debug, Display}, + hash::Hash, + marker::PhantomData, + sync::Arc, +}; use thiserror::Error; #[cfg(async_executor_impl = "tokio")] use tokio::task::spawn_blocking; @@ -56,6 +55,62 @@ use crate::{ vote::{Certificate, HasViewNumber}, }; +/// Implements `ConsensusTime`, `Display`, `Add`, `AddAssign`, `Deref` and `Sub` +/// for the given thing wrapper type around u64. +macro_rules! impl_u64_wrapper { + ($t:ty) => { + impl ConsensusTime for $t { + /// Create a genesis number (0) + fn genesis() -> Self { + Self(0) + } + /// Create a new number with the given value. + fn new(n: u64) -> Self { + Self(n) + } + /// Return the u64 format + fn u64(&self) -> u64 { + self.0 + } + } + + impl Display for $t { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.0) + } + } + + impl std::ops::Add for $t { + type Output = $t; + + fn add(self, rhs: u64) -> Self::Output { + Self(self.0 + rhs) + } + } + + impl std::ops::AddAssign for $t { + fn add_assign(&mut self, rhs: u64) { + self.0 += rhs; + } + } + + impl std::ops::Deref for $t { + type Target = u64; + + fn deref(&self) -> &Self::Target { + &self.0 + } + } + + impl std::ops::Sub for $t { + type Output = $t; + fn sub(self, rhs: u64) -> Self::Output { + Self(self.0 - rhs) + } + } + }; +} + /// Type-safe wrapper around `u64` so we know the thing we're talking about is a view number. #[derive( Copy, @@ -73,27 +128,6 @@ use crate::{ )] pub struct ViewNumber(u64); -impl ConsensusTime for ViewNumber { - /// Create a genesis view number (0) - fn genesis() -> Self { - Self(0) - } - /// Create a new `ViewNumber` with the given value. - fn new(n: u64) -> Self { - Self(n) - } - /// Returen the u64 format - fn u64(&self) -> u64 { - self.0 - } -} - -impl Display for ViewNumber { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{}", self.0) - } -} - impl Committable for ViewNumber { fn commit(&self) -> Commitment { let builder = RawCommitmentBuilder::new("View Number Commitment"); @@ -101,34 +135,33 @@ impl Committable for ViewNumber { } } -impl std::ops::Add for ViewNumber { - type Output = ViewNumber; - - fn add(self, rhs: u64) -> Self::Output { - Self(self.0 + rhs) - } -} - -impl std::ops::AddAssign for ViewNumber { - fn add_assign(&mut self, rhs: u64) { - self.0 += rhs; - } -} +impl_u64_wrapper!(ViewNumber); -impl std::ops::Deref for ViewNumber { - type Target = u64; +/// Type-safe wrapper around `u64` so we know the thing we're talking about is a epoch number. +#[derive( + Copy, + Clone, + Debug, + PartialEq, + Eq, + PartialOrd, + Ord, + Hash, + Serialize, + Deserialize, + CanonicalSerialize, + CanonicalDeserialize, +)] +pub struct EpochNumber(u64); - fn deref(&self) -> &Self::Target { - &self.0 +impl Committable for EpochNumber { + fn commit(&self) -> Commitment { + let builder = RawCommitmentBuilder::new("Epoch Number Commitment"); + builder.u64(self.0).finalize() } } -impl std::ops::Sub for ViewNumber { - type Output = ViewNumber; - fn sub(self, rhs: u64) -> Self::Output { - Self(self.0 - rhs) - } -} +impl_u64_wrapper!(EpochNumber); /// A proposal to start providing data availability for a block. #[derive(custom_debug::Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)] @@ -139,7 +172,7 @@ pub struct DaProposal { /// Metadata of the block to be applied. pub metadata: >::Metadata, /// View this proposal applies to - pub view_number: TYPES::Time, + pub view_number: TYPES::ViewTime, } /// A proposal to upgrade the network @@ -152,7 +185,7 @@ where /// The information about which version we are upgrading to. pub upgrade_proposal: UpgradeProposalData, /// View this proposal applies to - pub view_number: TYPES::Time, + pub view_number: TYPES::ViewTime, } /// VID dispersal data @@ -163,7 +196,7 @@ where #[derive(Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)] pub struct VidDisperse { /// The view number for which this VID data is intended - pub view_number: TYPES::Time, + pub view_number: TYPES::ViewTime, /// Block payload commitment pub payload_commitment: VidCommitment, /// A storage node's key and its corresponding VID share @@ -173,16 +206,17 @@ pub struct VidDisperse { } impl VidDisperse { - /// Create VID dispersal from a specified membership + /// Create VID dispersal from a specified membership for a given epoch. /// Uses the specified function to calculate share dispersal /// Allows for more complex stake table functionality pub fn from_membership( - view_number: TYPES::Time, + view_number: TYPES::ViewTime, mut vid_disperse: JfVidDisperse, membership: &TYPES::Membership, + epoch: TYPES::EpochTime, ) -> Self { let shares = membership - .committee_members(view_number) + .committee_members(view_number, epoch) .iter() .map(|node| (node.clone(), vid_disperse.shares.remove(0))) .collect(); @@ -195,7 +229,7 @@ impl VidDisperse { } } - /// Calculate the vid disperse information from the payload given a view and membership, + /// Calculate the vid disperse information from the payload given a view, epoch and membership, /// optionally using precompute data from builder /// /// # Panics @@ -204,10 +238,11 @@ impl VidDisperse { pub async fn calculate_vid_disperse( txns: Arc<[u8]>, membership: &Arc, - view: TYPES::Time, + view: TYPES::ViewTime, + epoch: TYPES::EpochTime, precompute_data: Option, ) -> Self { - let num_nodes = membership.total_nodes(); + let num_nodes = membership.total_nodes(epoch); let vid_disperse = spawn_blocking(move || { precompute_data @@ -222,7 +257,7 @@ impl VidDisperse { // Unwrap here will just propagate any panic from the spawned task, it's not a new place we can panic. let vid_disperse = vid_disperse.unwrap(); - Self::from_membership(view, vid_disperse, membership.as_ref()) + Self::from_membership(view, vid_disperse, membership.as_ref(), epoch) } } @@ -239,7 +274,7 @@ pub enum ViewChangeEvidence { impl ViewChangeEvidence { /// Check that the given ViewChangeEvidence is relevant to the current view. - pub fn is_valid_for_view(&self, view: &TYPES::Time) -> bool { + pub fn is_valid_for_view(&self, view: &TYPES::ViewTime) -> bool { match self { ViewChangeEvidence::Timeout(timeout_cert) => timeout_cert.date().view == *view - 1, ViewChangeEvidence::ViewSync(view_sync_cert) => view_sync_cert.view_number == *view, @@ -251,7 +286,7 @@ impl ViewChangeEvidence { /// VID share and associated metadata for a single node pub struct VidDisperseShare { /// The view number for which this VID data is intended - pub view_number: TYPES::Time, + pub view_number: TYPES::ViewTime, /// Block payload commitment pub payload_commitment: VidCommitment, /// A storage node's key and its corresponding VID share @@ -353,7 +388,7 @@ pub struct QuorumProposal { pub block_header: TYPES::BlockHeader, /// CurView from leader when proposing leaf - pub view_number: TYPES::Time, + pub view_number: TYPES::ViewTime, /// Per spec, justification pub justify_qc: QuorumCertificate, @@ -369,31 +404,31 @@ pub struct QuorumProposal { } impl HasViewNumber for DaProposal { - fn view_number(&self) -> TYPES::Time { + fn view_number(&self) -> TYPES::ViewTime { self.view_number } } impl HasViewNumber for VidDisperse { - fn view_number(&self) -> TYPES::Time { + fn view_number(&self) -> TYPES::ViewTime { self.view_number } } impl HasViewNumber for VidDisperseShare { - fn view_number(&self) -> TYPES::Time { + fn view_number(&self) -> TYPES::ViewTime { self.view_number } } impl HasViewNumber for QuorumProposal { - fn view_number(&self) -> TYPES::Time { + fn view_number(&self) -> TYPES::ViewTime { self.view_number } } impl HasViewNumber for UpgradeProposal { - fn view_number(&self) -> TYPES::Time { + fn view_number(&self) -> TYPES::ViewTime { self.view_number } } @@ -430,7 +465,7 @@ pub trait TestableLeaf { #[serde(bound(deserialize = ""))] pub struct Leaf { /// CurView from leader when proposing leaf - view_number: TYPES::Time, + view_number: TYPES::ViewTime, /// Per spec, justification justify_qc: QuorumCertificate, @@ -503,7 +538,7 @@ impl QuorumCertificate { // since this is genesis, we should never have a decided upgrade certificate. let upgrade_lock = UpgradeLock::::new(); - let genesis_view = ::genesis(); + let genesis_view = ::genesis(); let data = QuorumData { leaf_commit: Leaf::genesis(validated_state, instance_state) @@ -563,13 +598,13 @@ impl Leaf { let justify_qc = QuorumCertificate::new( null_quorum_data.clone(), null_quorum_data.commit(), - ::genesis(), + ::genesis(), None, PhantomData, ); Self { - view_number: TYPES::Time::genesis(), + view_number: TYPES::ViewTime::genesis(), justify_qc, parent_commitment: null_quorum_data.leaf_commit, upgrade_certificate: None, @@ -579,7 +614,7 @@ impl Leaf { } /// Time when this leaf was created. - pub fn view_number(&self) -> TYPES::Time { + pub fn view_number(&self) -> TYPES::ViewTime { self.view_number } /// Height of this leaf in the chain. @@ -866,7 +901,7 @@ pub struct PackedBundle { pub metadata: >::Metadata, /// The view number that this block is associated with. - pub view_number: TYPES::Time, + pub view_number: TYPES::ViewTime, /// The sequencing fee for submitting bundles. pub sequencing_fees: Vec1>, @@ -883,7 +918,7 @@ impl PackedBundle { pub fn new( encoded_transactions: Arc<[u8]>, metadata: >::Metadata, - view_number: TYPES::Time, + view_number: TYPES::ViewTime, sequencing_fees: Vec1>, vid_precompute: Option, auction_result: Option, diff --git a/crates/types/src/error.rs b/crates/types/src/error.rs index f76c46906f..4dfd55a6c3 100644 --- a/crates/types/src/error.rs +++ b/crates/types/src/error.rs @@ -41,7 +41,7 @@ pub enum HotShotError { #[error("View {view_number} timed out: {state:?}")] ViewTimedOut { /// The view number that timed out - view_number: TYPES::Time, + view_number: TYPES::ViewTime, /// The state that the round was in when it timed out state: RoundTimedoutState, }, diff --git a/crates/types/src/event.rs b/crates/types/src/event.rs index 84f052d651..84241b1680 100644 --- a/crates/types/src/event.rs +++ b/crates/types/src/event.rs @@ -25,7 +25,7 @@ use crate::{ #[serde(bound(deserialize = "TYPES: NodeType"))] pub struct Event { /// The view number that this event originates from - pub view_number: TYPES::Time, + pub view_number: TYPES::ViewTime, /// The underlying event pub event: EventType, } @@ -127,17 +127,17 @@ pub enum EventType { /// A replica task was canceled by a timeout interrupt ReplicaViewTimeout { /// The view that timed out - view_number: TYPES::Time, + view_number: TYPES::ViewTime, }, /// The view has finished. If values were decided on, a `Decide` event will also be emitted. ViewFinished { /// The view number that has just finished - view_number: TYPES::Time, + view_number: TYPES::ViewTime, }, /// The view timed out ViewTimeout { /// The view that timed out - view_number: TYPES::Time, + view_number: TYPES::ViewTime, }, /// New transactions were received from the network /// or submitted to the network by us diff --git a/crates/types/src/lib.rs b/crates/types/src/lib.rs index 1a9ff34b6c..f7fc8a62d8 100644 --- a/crates/types/src/lib.rs +++ b/crates/types/src/lib.rs @@ -222,6 +222,8 @@ pub struct HotShotConfig { pub start_voting_time: u64, /// Unix time in seconds at which we stop voting on an upgrade. To prevent voting on an upgrade, set stop_voting_time <= start_voting_time. pub stop_voting_time: u64, + /// Number of blocks in an epoch, zero means there are no epochs + pub epoch_height: u64, } impl HotShotConfig { diff --git a/crates/types/src/message.rs b/crates/types/src/message.rs index 407845b001..3b0e7dcd9a 100644 --- a/crates/types/src/message.rs +++ b/crates/types/src/message.rs @@ -63,7 +63,7 @@ impl fmt::Debug for Message { impl HasViewNumber for Message { /// get the view number out of a message - fn view_number(&self) -> TYPES::Time { + fn view_number(&self) -> TYPES::ViewTime { self.kind.view_number() } } @@ -133,16 +133,16 @@ impl From> for MessageKind { } impl ViewMessage for MessageKind { - fn view_number(&self) -> TYPES::Time { + fn view_number(&self) -> TYPES::ViewTime { match &self { MessageKind::Consensus(message) => message.view_number(), MessageKind::Data(DataMessage::SubmitTransaction(_, v)) => *v, MessageKind::Data(DataMessage::RequestData(msg)) => msg.view, MessageKind::Data(DataMessage::DataResponse(msg)) => match msg { ResponseMessage::Found(m) => m.view_number(), - ResponseMessage::NotFound | ResponseMessage::Denied => TYPES::Time::new(1), + ResponseMessage::NotFound | ResponseMessage::Denied => TYPES::ViewTime::new(1), }, - MessageKind::External(_) => TYPES::Time::new(1), + MessageKind::External(_) => TYPES::ViewTime::new(1), } } @@ -234,7 +234,7 @@ pub enum SequencingMessage { impl SequencingMessage { /// Get the view number this message relates to - fn view_number(&self) -> TYPES::Time { + fn view_number(&self) -> TYPES::ViewTime { match &self { SequencingMessage::General(general_message) => { match general_message { @@ -328,7 +328,7 @@ pub enum DataMessage { /// Contains a transaction to be submitted /// TODO rethink this when we start to send these messages /// we only need the view number for broadcast - SubmitTransaction(TYPES::Transaction, TYPES::Time), + SubmitTransaction(TYPES::Transaction, TYPES::ViewTime), /// A request for data RequestData(DataRequest), /// A response to a data request @@ -359,10 +359,11 @@ where pub async fn validate_signature( &self, quorum_membership: &TYPES::Membership, + epoch: TYPES::EpochTime, upgrade_lock: &UpgradeLock, ) -> Result<()> { let view_number = self.data.view_number(); - let view_leader_key = quorum_membership.leader(view_number); + let view_leader_key = quorum_membership.leader(view_number, epoch); let proposed_leaf = Leaf::from_quorum_proposal(&self.data); ensure!( @@ -410,7 +411,7 @@ impl UpgradeLock { /// /// # Errors /// Returns an error if we do not support the version required by the decided upgrade certificate. - pub async fn version(&self, view: TYPES::Time) -> Result { + pub async fn version(&self, view: TYPES::ViewTime) -> Result { let upgrade_certificate = self.decided_upgrade_certificate.read().await; let version = match *upgrade_certificate { @@ -434,7 +435,7 @@ impl UpgradeLock { /// Calculate the version applied in a view, based on the provided upgrade lock. /// /// This function does not fail, since it does not check that the version is supported. - pub async fn version_infallible(&self, view: TYPES::Time) -> Version { + pub async fn version_infallible(&self, view: TYPES::ViewTime) -> Version { let upgrade_certificate = self.decided_upgrade_certificate.read().await; match *upgrade_certificate { diff --git a/crates/types/src/request_response.rs b/crates/types/src/request_response.rs index 8b6aab7785..43fd9c4402 100644 --- a/crates/types/src/request_response.rs +++ b/crates/types/src/request_response.rs @@ -48,7 +48,7 @@ pub type TakeReceiver = Mutex, ResponseChannel { /// The view number that we're requesting a proposal for. - pub view_number: TYPES::Time, + pub view_number: TYPES::ViewTime, /// Our public key. The ensures that the receipient can reply to /// us directly. diff --git a/crates/types/src/simple_certificate.rs b/crates/types/src/simple_certificate.rs index f948e3a20b..7ee5a15cc5 100644 --- a/crates/types/src/simple_certificate.rs +++ b/crates/types/src/simple_certificate.rs @@ -78,7 +78,7 @@ pub struct SimpleCertificate, /// Which view this QC relates to - pub view_number: TYPES::Time, + pub view_number: TYPES::ViewTime, /// assembled signature for certificate aggregation pub signatures: Option<::QcType>, /// phantom data for `THRESHOLD` and `TYPES` @@ -92,7 +92,7 @@ impl> pub fn new( data: VOTEABLE, vote_commitment: Commitment, - view_number: TYPES::Time, + view_number: TYPES::ViewTime, signatures: Option<::QcType>, pd: PhantomData<(TYPES, THRESHOLD)>, ) -> Self { @@ -133,7 +133,7 @@ impl> vote_commitment: Commitment>, data: Self::Voteable, sig: ::QcType, - view: TYPES::Time, + view: TYPES::ViewTime, ) -> Self { let vote_commitment_bytes: [u8; 32] = vote_commitment.into(); @@ -148,13 +148,14 @@ impl> async fn is_valid_cert, V: Versions>( &self, membership: &MEMBERSHIP, + epoch: TYPES::EpochTime, upgrade_lock: &UpgradeLock, ) -> bool { - if self.view_number == TYPES::Time::genesis() { + if self.view_number == TYPES::ViewTime::genesis() { return true; } let real_qc_pp = ::public_parameter( - membership.stake_table(), + membership.stake_table(epoch), U256::from(Self::threshold(membership)), ); let Ok(commit) = self.date_commitment(upgrade_lock).await else { @@ -187,7 +188,7 @@ impl> impl> HasViewNumber for SimpleCertificate { - fn view_number(&self) -> TYPES::Time { + fn view_number(&self) -> TYPES::ViewTime { self.view_number } } @@ -205,7 +206,7 @@ impl UpgradeCertificate { /// Returns an error when the certificate is no longer relevant pub async fn is_relevant( &self, - view_number: TYPES::Time, + view_number: TYPES::ViewTime, decided_upgrade_certificate: Arc>>, ) -> Result<()> { let decided_upgrade_certificate_read = decided_upgrade_certificate.read().await; @@ -226,11 +227,13 @@ impl UpgradeCertificate { pub async fn validate( upgrade_certificate: &Option, quorum_membership: &TYPES::Membership, + epoch: TYPES::EpochTime, upgrade_lock: &UpgradeLock, ) -> Result<()> { if let Some(ref cert) = upgrade_certificate { ensure!( - cert.is_valid_cert(quorum_membership, upgrade_lock).await, + cert.is_valid_cert(quorum_membership, epoch, upgrade_lock) + .await, "Invalid upgrade certificate." ); Ok(()) @@ -241,7 +244,7 @@ impl UpgradeCertificate { /// Given an upgrade certificate and a view, tests whether the view is in the period /// where we are upgrading, which requires that we propose with null blocks. - pub fn upgrading_in(&self, view: TYPES::Time) -> bool { + pub fn upgrading_in(&self, view: TYPES::ViewTime) -> bool { view > self.data.old_version_last_view && view < self.data.new_version_first_view } } diff --git a/crates/types/src/simple_vote.rs b/crates/types/src/simple_vote.rs index a86452b9a9..2c504d20b4 100644 --- a/crates/types/src/simple_vote.rs +++ b/crates/types/src/simple_vote.rs @@ -41,7 +41,7 @@ pub struct DaData { /// Data used for a timeout vote. pub struct TimeoutData { /// View the timeout is for - pub view: TYPES::Time, + pub view: TYPES::ViewTime, } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] /// Data used for a VID vote. @@ -55,7 +55,7 @@ pub struct ViewSyncPreCommitData { /// The relay this vote is intended for pub relay: u64, /// The view number we are trying to sync on - pub round: TYPES::Time, + pub round: TYPES::ViewTime, } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] /// Data used for a Commit vote. @@ -63,7 +63,7 @@ pub struct ViewSyncCommitData { /// The relay this vote is intended for pub relay: u64, /// The view number we are trying to sync on - pub round: TYPES::Time, + pub round: TYPES::ViewTime, } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] /// Data used for a Finalize vote. @@ -71,7 +71,7 @@ pub struct ViewSyncFinalizeData { /// The relay this vote is intended for pub relay: u64, /// The view number we are trying to sync on - pub round: TYPES::Time, + pub round: TYPES::ViewTime, } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] /// Data used for a Upgrade vote. @@ -82,13 +82,13 @@ pub struct UpgradeProposalData { pub new_version: Version, /// The last view in which we are allowed to reach a decide on this upgrade. /// If it is not decided by that view, we discard it. - pub decide_by: TYPES::Time, + pub decide_by: TYPES::ViewTime, /// A unique identifier for the specific protocol being voted on. pub new_version_hash: Vec, /// The last block for which the old version will be in effect. - pub old_version_last_view: TYPES::Time, + pub old_version_last_view: TYPES::ViewTime, /// The first block for which the new version will be in effect. - pub new_version_first_view: TYPES::Time, + pub new_version_first_view: TYPES::ViewTime, } /// Marker trait for data or commitments that can be voted on. @@ -123,11 +123,11 @@ pub struct SimpleVote { /// The leaf commitment being voted on. pub data: DATA, /// The view this vote was cast for - pub view_number: TYPES::Time, + pub view_number: TYPES::ViewTime, } impl HasViewNumber for SimpleVote { - fn view_number(&self) -> ::Time { + fn view_number(&self) -> ::ViewTime { self.view_number } } @@ -158,7 +158,7 @@ impl SimpleVote { /// If we are unable to sign the data pub async fn create_signed_vote( data: DATA, - view: TYPES::Time, + view: TYPES::ViewTime, pub_key: &TYPES::SignatureKey, private_key: &::PrivateKey, upgrade_lock: &UpgradeLock, @@ -187,7 +187,7 @@ pub struct VersionedVoteData { data: DATA, /// view number - view: TYPES::Time, + view: TYPES::ViewTime, /// version applied to the view number version: Version, @@ -204,7 +204,7 @@ impl VersionedVoteData, ) -> Result { let version = upgrade_lock.version(view).await?; @@ -222,7 +222,7 @@ impl VersionedVoteData, ) -> Self { let version = upgrade_lock.version_infallible(view).await; @@ -303,7 +303,7 @@ impl Committable for UpgradeProposalData { /// This implements commit for all the types which contain a view and relay public key. fn view_and_relay_commit( - view: TYPES::Time, + view: TYPES::ViewTime, relay: u64, tag: &str, ) -> Commitment { diff --git a/crates/types/src/traits/auction_results_provider.rs b/crates/types/src/traits/auction_results_provider.rs index 283ada9d68..0c8993ddd6 100644 --- a/crates/types/src/traits/auction_results_provider.rs +++ b/crates/types/src/traits/auction_results_provider.rs @@ -20,5 +20,8 @@ use super::node_implementation::NodeType; pub trait AuctionResultsProvider: Send + Sync + Clone { /// Fetches the auction result for a view. Does not cache the result, /// subsequent calls will invoke additional wasted calls. - async fn fetch_auction_result(&self, view_number: TYPES::Time) -> Result; + async fn fetch_auction_result( + &self, + view_number: TYPES::ViewTime, + ) -> Result; } diff --git a/crates/types/src/traits/election.rs b/crates/types/src/traits/election.rs index b03bf8a843..6ea0ce793a 100644 --- a/crates/types/src/traits/election.rs +++ b/crates/types/src/traits/election.rs @@ -23,33 +23,45 @@ pub trait Membership: committee_topic: Topic, ) -> Self; - /// Get all participants in the committee (including their stake) - fn stake_table(&self) -> Vec<::StakeTableEntry>; + /// Get all participants in the committee (including their stake) for a specific epoch + fn stake_table( + &self, + epoch: TYPES::EpochTime, + ) -> Vec<::StakeTableEntry>; - /// Get all participants in the committee for a specific view - fn committee_members(&self, view_number: TYPES::Time) -> BTreeSet; + /// Get all participants in the committee for a specific view for a specific epoch + fn committee_members( + &self, + view_number: TYPES::ViewTime, + epoch: TYPES::EpochTime, + ) -> BTreeSet; - /// Get all leaders in the committee for a specific view - fn committee_leaders(&self, view_number: TYPES::Time) -> BTreeSet; + /// Get all leaders in the committee for a specific view for a specific epoch + fn committee_leaders( + &self, + view_number: TYPES::ViewTime, + epoch: TYPES::EpochTime, + ) -> BTreeSet; /// Get the stake table entry for a public key, returns `None` if the - /// key is not in the table + /// key is not in the table for a specific epoch fn stake( &self, pub_key: &TYPES::SignatureKey, + epoch: TYPES::EpochTime, ) -> Option<::StakeTableEntry>; - /// See if a node has stake in the committee - fn has_stake(&self, pub_key: &TYPES::SignatureKey) -> bool; + /// See if a node has stake in the committee in a specific epoch + fn has_stake(&self, pub_key: &TYPES::SignatureKey, epoch: TYPES::EpochTime) -> bool; - /// The leader of the committee for view `view_number`. - fn leader(&self, view_number: TYPES::Time) -> TYPES::SignatureKey; + /// The leader of the committee for view `view_number` in an epoch `epoch`. + fn leader(&self, view_number: TYPES::ViewTime, epoch: TYPES::EpochTime) -> TYPES::SignatureKey; /// Get the network topic for the committee fn committee_topic(&self) -> Topic; - /// Returns the number of total nodes in the committee - fn total_nodes(&self) -> usize; + /// Returns the number of total nodes in the committee in an epoch `epoch` + fn total_nodes(&self, epoch: TYPES::EpochTime) -> usize; /// Returns the threshold for a specific `Membership` implementation fn success_threshold(&self) -> NonZeroU64; diff --git a/crates/types/src/traits/network.rs b/crates/types/src/traits/network.rs index c742eb8c21..c0d6cd8fba 100644 --- a/crates/types/src/traits/network.rs +++ b/crates/types/src/traits/network.rs @@ -143,7 +143,7 @@ pub trait Id: Eq + PartialEq + Hash {} /// a message pub trait ViewMessage { /// get the view out of the message - fn view_number(&self) -> TYPES::Time; + fn view_number(&self) -> TYPES::ViewTime; // TODO move out of this trait. /// get the purpose of the message fn purpose(&self) -> MessagePurpose; @@ -156,7 +156,7 @@ pub struct DataRequest { /// Request pub request: RequestKind, /// View this message is for - pub view: TYPES::Time, + pub view: TYPES::ViewTime, /// signature of the Sha256 hash of the data so outsiders can't use know /// public keys with stake. pub signature: ::PureAssembledSignatureType, @@ -166,11 +166,11 @@ pub struct DataRequest { #[derive(Serialize, Deserialize, Derivative, Clone, Debug, PartialEq, Eq, Hash)] pub enum RequestKind { /// Request VID data by our key and the VID commitment - Vid(TYPES::Time, TYPES::SignatureKey), + Vid(TYPES::ViewTime, TYPES::SignatureKey), /// Request a DA proposal for a certain view - DaProposal(TYPES::Time), + DaProposal(TYPES::ViewTime), /// Request for quorum proposal for a view - Proposal(TYPES::Time), + Proposal(TYPES::ViewTime), } /// A response for a request. `SequencingMessage` is the same as other network messages @@ -310,8 +310,12 @@ pub trait ConnectedNetwork: Clone + Send + Sync + 'st /// Update view can be used for any reason, but mostly it's for canceling tasks, /// and looking up the address of the leader of a future view. - async fn update_view<'a, TYPES>(&'a self, _view: u64, _membership: &TYPES::Membership) - where + async fn update_view<'a, TYPES>( + &'a self, + _view: u64, + _epoch: u64, + _membership: &TYPES::Membership, + ) where TYPES: NodeType + 'a, { } diff --git a/crates/types/src/traits/node_implementation.rs b/crates/types/src/traits/node_implementation.rs index 060df7adf3..fd481d3943 100644 --- a/crates/types/src/traits/node_implementation.rs +++ b/crates/types/src/traits/node_implementation.rs @@ -210,7 +210,9 @@ pub trait NodeType: /// The time type that this hotshot setup is using. /// /// This should be the same `Time` that `ValidatedState::Time` is using. - type Time: ConsensusTime + Display; + type ViewTime: ConsensusTime + Display; + /// Same as above but for epoch. + type EpochTime: ConsensusTime + Display; /// The AuctionSolverResult is a type that holds the data associated with a particular solver /// run, for a particular view. type AuctionResult: Debug @@ -244,7 +246,7 @@ pub trait NodeType: type InstanceState: InstanceState; /// The validated state type that this hotshot setup is using. - type ValidatedState: ValidatedState; + type ValidatedState: ValidatedState; /// Membership used for this implementation type Membership: Membership; diff --git a/crates/types/src/traits/storage.rs b/crates/types/src/traits/storage.rs index c11ba771b3..990d0010b8 100644 --- a/crates/types/src/traits/storage.rs +++ b/crates/types/src/traits/storage.rs @@ -36,7 +36,7 @@ pub trait Storage: Send + Sync + Clone { proposal: &Proposal>, ) -> Result<()>; /// Record a HotShotAction taken. - async fn record_action(&self, view: TYPES::Time, action: HotShotAction) -> Result<()>; + async fn record_action(&self, view: TYPES::ViewTime, action: HotShotAction) -> Result<()>; /// Update the current high QC in storage. async fn update_high_qc(&self, high_qc: QuorumCertificate) -> Result<()>; /// Update the currently undecided state of consensus. This includes the undecided leaf chain, @@ -44,7 +44,7 @@ pub trait Storage: Send + Sync + Clone { async fn update_undecided_state( &self, leafs: CommitmentMap>, - state: BTreeMap>, + state: BTreeMap>, ) -> Result<()>; /// Upgrade the current decided upgrade certificate in storage. async fn update_decided_upgrade_certificate( diff --git a/crates/types/src/utils.rs b/crates/types/src/utils.rs index 8795ab028a..ff264fa47d 100644 --- a/crates/types/src/utils.rs +++ b/crates/types/src/utils.rs @@ -150,7 +150,7 @@ pub struct View { #[derive(Debug, Clone)] pub struct RoundFinishedEvent { /// The round that finished - pub view_number: TYPES::Time, + pub view_number: TYPES::ViewTime, } /// Whether or not to stop inclusively or exclusively when walking diff --git a/crates/types/src/vote.rs b/crates/types/src/vote.rs index 0123ba0085..a811089d05 100644 --- a/crates/types/src/vote.rs +++ b/crates/types/src/vote.rs @@ -48,7 +48,7 @@ pub trait Vote: HasViewNumber { /// Any type that is associated with a view pub trait HasViewNumber { /// Returns the view number the type refers to. - fn view_number(&self) -> TYPES::Time; + fn view_number(&self) -> TYPES::ViewTime; } /** @@ -68,13 +68,14 @@ pub trait Certificate: HasViewNumber { vote_commitment: Commitment>, data: Self::Voteable, sig: ::QcType, - view: TYPES::Time, + view: TYPES::ViewTime, ) -> Self; - /// Checks if the cert is valid + /// Checks if the cert is valid in the given epoch fn is_valid_cert, V: Versions>( &self, membership: &MEMBERSHIP, + epoch: TYPES::EpochTime, upgrade_lock: &UpgradeLock, ) -> impl std::future::Future; /// Returns the amount of stake needed to create this certificate @@ -130,12 +131,14 @@ impl< V: Versions, > VoteAccumulator { - /// Add a vote to the total accumulated votes. Returns the accumulator or the certificate if we + /// Add a vote to the total accumulated votes for the given epoch. + /// Returns the accumulator or the certificate if we /// have accumulated enough votes to exceed the threshold for creating a certificate. pub async fn accumulate( &mut self, vote: &VOTE, membership: &TYPES::Membership, + epoch: TYPES::EpochTime, ) -> Either<(), CERT> { let key = vote.signing_key(); @@ -158,10 +161,10 @@ impl< return Either::Left(()); } - let Some(stake_table_entry) = membership.stake(&key) else { + let Some(stake_table_entry) = membership.stake(&key, epoch) else { return Either::Left(()); }; - let stake_table = membership.stake_table(); + let stake_table = membership.stake_table(epoch); let Some(vote_node_id) = stake_table .iter() .position(|x| *x == stake_table_entry.clone()) @@ -184,7 +187,7 @@ impl< let (signers, sig_list) = self .signers .entry(vote_commitment) - .or_insert((bitvec![0; membership.total_nodes()], Vec::new())); + .or_insert((bitvec![0; membership.total_nodes(epoch)], Vec::new())); if signers.get(vote_node_id).as_deref() == Some(&true) { error!("Node id is already in signers list"); return Either::Left(()); From 55a19d4842dc21659e286de114bd0ae5b21ee216 Mon Sep 17 00:00:00 2001 From: Lukasz Rzasik Date: Thu, 10 Oct 2024 17:31:54 -0600 Subject: [PATCH 02/16] Fixes for dependency-tasks tests --- .../tests/tests_1/quorum_proposal_task.rs | 92 ++++++++++++++----- .../tests_1/upgrade_task_with_proposal.rs | 21 ++++- .../tests/tests_1/vote_dependency_handle.rs | 2 + 3 files changed, 90 insertions(+), 25 deletions(-) diff --git a/crates/testing/tests/tests_1/quorum_proposal_task.rs b/crates/testing/tests/tests_1/quorum_proposal_task.rs index 8e5bb64259..d9ff4507d6 100644 --- a/crates/testing/tests/tests_1/quorum_proposal_task.rs +++ b/crates/testing/tests/tests_1/quorum_proposal_task.rs @@ -26,6 +26,7 @@ use hotshot_testing::{ serial, view_generator::TestViewGenerator, }; +use hotshot_types::data::EpochNumber; use hotshot_types::{ data::{null_block, Leaf, ViewChangeEvidence, ViewNumber}, simple_vote::{TimeoutData, ViewSyncFinalizeData}, @@ -58,8 +59,11 @@ async fn test_quorum_proposal_task_quorum_proposal_view_1() { let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let da_membership = handle.hotshot.memberships.da_membership.clone(); - let payload_commitment = - build_payload_commitment::(&quorum_membership, ViewNumber::new(node_id)); + let payload_commitment = build_payload_commitment::( + &quorum_membership, + ViewNumber::new(node_id), + EpochNumber::new(1), + ); let mut generator = TestViewGenerator::generate(quorum_membership.clone(), da_membership); @@ -91,7 +95,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_1() { let genesis_cert = proposals[0].data.justify_qc.clone(); let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); let builder_fee = null_block::builder_fee::( - quorum_membership.total_nodes(), + quorum_membership.total_nodes(EpochNumber::new(1)), ::Base::VERSION, ) .unwrap(); @@ -203,7 +207,7 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); let builder_fee = null_block::builder_fee::( - quorum_membership.total_nodes(), + quorum_membership.total_nodes(EpochNumber::new(1)), ::Base::VERSION, ) .unwrap(); @@ -212,7 +216,11 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { random![ QcFormed(either::Left(genesis_cert.clone())), SendPayloadCommitmentAndMetadata( - build_payload_commitment::(&quorum_membership, ViewNumber::new(1)), + build_payload_commitment::( + &quorum_membership, + ViewNumber::new(1), + EpochNumber::new(1) + ), builder_commitment.clone(), TestMetadata { num_transactions: 0 @@ -231,7 +239,11 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { QuorumProposalPreliminarilyValidated(proposals[0].clone()), QcFormed(either::Left(proposals[1].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( - build_payload_commitment::(&quorum_membership, ViewNumber::new(2)), + build_payload_commitment::( + &quorum_membership, + ViewNumber::new(2), + EpochNumber::new(1) + ), builder_commitment.clone(), proposals[0].data.block_header.metadata, ViewNumber::new(2), @@ -248,7 +260,11 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { QuorumProposalPreliminarilyValidated(proposals[1].clone()), QcFormed(either::Left(proposals[2].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( - build_payload_commitment::(&quorum_membership, ViewNumber::new(3)), + build_payload_commitment::( + &quorum_membership, + ViewNumber::new(3), + EpochNumber::new(1) + ), builder_commitment.clone(), proposals[1].data.block_header.metadata, ViewNumber::new(3), @@ -265,7 +281,11 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { QuorumProposalPreliminarilyValidated(proposals[2].clone()), QcFormed(either::Left(proposals[3].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( - build_payload_commitment::(&quorum_membership, ViewNumber::new(4)), + build_payload_commitment::( + &quorum_membership, + ViewNumber::new(4), + EpochNumber::new(1) + ), builder_commitment.clone(), proposals[2].data.block_header.metadata, ViewNumber::new(4), @@ -282,7 +302,11 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { QuorumProposalPreliminarilyValidated(proposals[3].clone()), QcFormed(either::Left(proposals[4].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( - build_payload_commitment::(&quorum_membership, ViewNumber::new(5)), + build_payload_commitment::( + &quorum_membership, + ViewNumber::new(5), + EpochNumber::new(1) + ), builder_commitment, proposals[3].data.block_header.metadata, ViewNumber::new(5), @@ -349,8 +373,11 @@ async fn test_quorum_proposal_task_qc_timeout() { let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let da_membership = handle.hotshot.memberships.da_membership.clone(); - let payload_commitment = - build_payload_commitment::(&quorum_membership, ViewNumber::new(node_id)); + let payload_commitment = build_payload_commitment::( + &quorum_membership, + ViewNumber::new(node_id), + EpochNumber::new(1), + ); let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); let mut generator = TestViewGenerator::generate(quorum_membership.clone(), da_membership); @@ -395,7 +422,7 @@ async fn test_quorum_proposal_task_qc_timeout() { }, ViewNumber::new(3), vec1![null_block::builder_fee::( - quorum_membership.total_nodes(), + quorum_membership.total_nodes(EpochNumber::new(1)), ::Base::VERSION ) .unwrap()], @@ -439,8 +466,11 @@ async fn test_quorum_proposal_task_view_sync() { let quorum_membership = handle.hotshot.memberships.quorum_membership.clone(); let da_membership = handle.hotshot.memberships.da_membership.clone(); - let payload_commitment = - build_payload_commitment::(&quorum_membership, ViewNumber::new(node_id)); + let payload_commitment = build_payload_commitment::( + &quorum_membership, + ViewNumber::new(node_id), + EpochNumber::new(1), + ); let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); let mut generator = TestViewGenerator::generate(quorum_membership.clone(), da_membership); @@ -487,7 +517,7 @@ async fn test_quorum_proposal_task_view_sync() { }, ViewNumber::new(2), vec1![null_block::builder_fee::( - quorum_membership.total_nodes(), + quorum_membership.total_nodes(EpochNumber::new(1)), ::Base::VERSION ) .unwrap()], @@ -564,7 +594,7 @@ async fn test_quorum_proposal_task_liveness_check() { let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); let builder_fee = null_block::builder_fee::( - quorum_membership.total_nodes(), + quorum_membership.total_nodes(EpochNumber::new(1)), ::Base::VERSION, ) .unwrap(); @@ -582,7 +612,11 @@ async fn test_quorum_proposal_task_liveness_check() { random![ QcFormed(either::Left(genesis_cert.clone())), SendPayloadCommitmentAndMetadata( - build_payload_commitment::(&quorum_membership, ViewNumber::new(1)), + build_payload_commitment::( + &quorum_membership, + ViewNumber::new(1), + EpochNumber::new(1) + ), builder_commitment.clone(), TestMetadata { num_transactions: 0 @@ -601,7 +635,11 @@ async fn test_quorum_proposal_task_liveness_check() { QuorumProposalPreliminarilyValidated(proposals[0].clone()), QcFormed(either::Left(proposals[1].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( - build_payload_commitment::(&quorum_membership, ViewNumber::new(2)), + build_payload_commitment::( + &quorum_membership, + ViewNumber::new(2), + EpochNumber::new(1) + ), builder_commitment.clone(), proposals[0].data.block_header.metadata, ViewNumber::new(2), @@ -618,7 +656,11 @@ async fn test_quorum_proposal_task_liveness_check() { QuorumProposalPreliminarilyValidated(proposals[1].clone()), QcFormed(either::Left(proposals[2].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( - build_payload_commitment::(&quorum_membership, ViewNumber::new(3)), + build_payload_commitment::( + &quorum_membership, + ViewNumber::new(3), + EpochNumber::new(1) + ), builder_commitment.clone(), proposals[1].data.block_header.metadata, ViewNumber::new(3), @@ -635,7 +677,11 @@ async fn test_quorum_proposal_task_liveness_check() { QuorumProposalPreliminarilyValidated(proposals[2].clone()), QcFormed(either::Left(proposals[3].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( - build_payload_commitment::(&quorum_membership, ViewNumber::new(4)), + build_payload_commitment::( + &quorum_membership, + ViewNumber::new(4), + EpochNumber::new(1) + ), builder_commitment.clone(), proposals[2].data.block_header.metadata, ViewNumber::new(4), @@ -652,7 +698,11 @@ async fn test_quorum_proposal_task_liveness_check() { QuorumProposalPreliminarilyValidated(proposals[3].clone()), QcFormed(either::Left(proposals[4].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( - build_payload_commitment::(&quorum_membership, ViewNumber::new(5)), + build_payload_commitment::( + &quorum_membership, + ViewNumber::new(5), + EpochNumber::new(1) + ), builder_commitment, proposals[3].data.block_header.metadata, ViewNumber::new(5), diff --git a/crates/testing/tests/tests_1/upgrade_task_with_proposal.rs b/crates/testing/tests/tests_1/upgrade_task_with_proposal.rs index b429fddaf4..730eeeca8a 100644 --- a/crates/testing/tests/tests_1/upgrade_task_with_proposal.rs +++ b/crates/testing/tests/tests_1/upgrade_task_with_proposal.rs @@ -31,6 +31,7 @@ use hotshot_testing::{ serial, view_generator::TestViewGenerator, }; +use hotshot_types::data::EpochNumber; use hotshot_types::{ data::{null_block, Leaf, ViewNumber}, simple_vote::UpgradeProposalData, @@ -147,7 +148,7 @@ async fn test_upgrade_task_with_proposal() { let genesis_leaf = Leaf::genesis(&validated_state, &*handle.hotshot.instance_state()).await; let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); let builder_fee = null_block::builder_fee::( - quorum_membership.total_nodes(), + quorum_membership.total_nodes(EpochNumber::new(1)), ::Base::VERSION, ) .unwrap(); @@ -173,7 +174,11 @@ async fn test_upgrade_task_with_proposal() { random![ QcFormed(either::Left(genesis_cert.clone())), SendPayloadCommitmentAndMetadata( - build_payload_commitment::(&quorum_membership, ViewNumber::new(1)), + build_payload_commitment::( + &quorum_membership, + ViewNumber::new(1), + EpochNumber::new(1) + ), builder_commitment.clone(), TestMetadata { num_transactions: 0 @@ -192,7 +197,11 @@ async fn test_upgrade_task_with_proposal() { QuorumProposalPreliminarilyValidated(proposals[0].clone()), QcFormed(either::Left(proposals[1].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( - build_payload_commitment::(&quorum_membership, ViewNumber::new(2)), + build_payload_commitment::( + &quorum_membership, + ViewNumber::new(2), + EpochNumber::new(1) + ), builder_commitment.clone(), proposals[0].data.block_header.metadata, ViewNumber::new(2), @@ -210,7 +219,11 @@ async fn test_upgrade_task_with_proposal() { QuorumProposalPreliminarilyValidated(proposals[1].clone()), QcFormed(either::Left(proposals[2].data.justify_qc.clone())), SendPayloadCommitmentAndMetadata( - build_payload_commitment::(&quorum_membership, ViewNumber::new(3)), + build_payload_commitment::( + &quorum_membership, + ViewNumber::new(3), + EpochNumber::new(1) + ), builder_commitment.clone(), proposals[1].data.block_header.metadata, ViewNumber::new(3), diff --git a/crates/testing/tests/tests_1/vote_dependency_handle.rs b/crates/testing/tests/tests_1/vote_dependency_handle.rs index 62d1d8040b..001dd4b7e8 100644 --- a/crates/testing/tests/tests_1/vote_dependency_handle.rs +++ b/crates/testing/tests/tests_1/vote_dependency_handle.rs @@ -13,6 +13,7 @@ use hotshot_testing::{ predicates::{event::*, Predicate, PredicateResult}, view_generator::TestViewGenerator, }; +use hotshot_types::data::EpochNumber; use hotshot_types::{ consensus::OuterConsensus, data::ViewNumber, @@ -98,6 +99,7 @@ async fn test_vote_dependency_handle() { quorum_membership: handle.hotshot.memberships.quorum_membership.clone().into(), storage: Arc::clone(&handle.storage()), view_number, + epoch_number: EpochNumber::new(1), sender: event_sender.clone(), receiver: event_receiver.clone(), upgrade_lock: handle.hotshot.upgrade_lock.clone(), From a46979e2636ff445c4c2d436c3c1f9e412199a53 Mon Sep 17 00:00:00 2001 From: Lukasz Rzasik Date: Fri, 11 Oct 2024 10:42:56 -0600 Subject: [PATCH 03/16] Fix possible deadlock --- crates/task-impls/src/consensus/mod.rs | 1 - crates/task-impls/src/helpers.rs | 14 +++++++------- crates/task-impls/src/quorum_proposal/mod.rs | 4 ++-- crates/task-impls/src/request.rs | 2 +- crates/task-impls/src/response.rs | 5 +++-- 5 files changed, 13 insertions(+), 13 deletions(-) diff --git a/crates/task-impls/src/consensus/mod.rs b/crates/task-impls/src/consensus/mod.rs index 8f52645f57..8eca1af11a 100644 --- a/crates/task-impls/src/consensus/mod.rs +++ b/crates/task-impls/src/consensus/mod.rs @@ -636,7 +636,6 @@ impl, V: Versions> ConsensusTaskSt info!("Failed to propose off SendPayloadCommitmentAndMetadata because we had view change evidence, but it was not current."); return; } - let current_epoch = self.consensus.read().await.cur_epoch(); match cert { ViewChangeEvidence::Timeout(tc) => { if self diff --git a/crates/task-impls/src/helpers.rs b/crates/task-impls/src/helpers.rs index e761a41450..16114e9b3e 100644 --- a/crates/task-impls/src/helpers.rs +++ b/crates/task-impls/src/helpers.rs @@ -493,15 +493,15 @@ pub async fn validate_proposal_safety_and_liveness< if let Err(e) = consensus_write.update_proposed_view(proposal.clone()) { tracing::debug!("Internal proposal update failed; error = {e:#}"); }; - - // Broadcast that we've updated our consensus state so that other tasks know it's safe to grab. - broadcast_event( - Arc::new(HotShotEvent::ValidatedStateUpdated(view_number, view)), - &event_stream, - ) - .await; } + // Broadcast that we've updated our consensus state so that other tasks know it's safe to grab. + broadcast_event( + Arc::new(HotShotEvent::ValidatedStateUpdated(view_number, view)), + &event_stream, + ) + .await; + let current_epoch = consensus.read().await.cur_epoch(); UpgradeCertificate::validate( &proposal.data.upgrade_certificate, diff --git a/crates/task-impls/src/quorum_proposal/mod.rs b/crates/task-impls/src/quorum_proposal/mod.rs index 45f761d322..95b6d99493 100644 --- a/crates/task-impls/src/quorum_proposal/mod.rs +++ b/crates/task-impls/src/quorum_proposal/mod.rs @@ -427,10 +427,11 @@ impl, V: Versions> ); } HotShotEvent::ViewSyncFinalizeCertificate2Recv(certificate) => { + let epoch_number = self.consensus.read().await.cur_epoch(); if !certificate .is_valid_cert( self.quorum_membership.as_ref(), - self.consensus.read().await.cur_epoch(), + epoch_number, &self.upgrade_lock, ) .await @@ -443,7 +444,6 @@ impl, V: Versions> } let view_number = certificate.view_number; - let epoch_number = self.consensus.read().await.cur_epoch(); self.create_dependency_task_if_new( view_number, diff --git a/crates/task-impls/src/request.rs b/crates/task-impls/src/request.rs index fe0f6c652a..50d465e2e9 100644 --- a/crates/task-impls/src/request.rs +++ b/crates/task-impls/src/request.rs @@ -97,6 +97,7 @@ impl> TaskState for NetworkRequest match event.as_ref() { HotShotEvent::QuorumProposalValidated(proposal, _) => { let prop_view = proposal.view_number(); + let current_epoch = self.state.read().await.cur_epoch(); // If we already have the VID shares for the next view, do nothing. if prop_view >= self.view @@ -107,7 +108,6 @@ impl> TaskState for NetworkRequest .vid_shares() .contains_key(&prop_view) { - let current_epoch = self.state.read().await.cur_epoch(); self.spawn_requests(prop_view, current_epoch, sender, receiver); } Ok(()) diff --git a/crates/task-impls/src/response.rs b/crates/task-impls/src/response.rs index bf1dd30587..98e727033e 100644 --- a/crates/task-impls/src/response.rs +++ b/crates/task-impls/src/response.rs @@ -151,12 +151,13 @@ impl NetworkResponseState { .get(&view) .is_some_and(|m| m.contains_key(key)); if !contained { + let current_epoch = self.consensus.read().await.cur_epoch(); if Consensus::calculate_and_update_vid( OuterConsensus::new(Arc::clone(&self.consensus)), view, Arc::clone(&self.quorum), &self.private_key, - self.consensus.read().await.cur_epoch(), + current_epoch, ) .await .is_none() @@ -168,7 +169,7 @@ impl NetworkResponseState { view, Arc::clone(&self.quorum), &self.private_key, - self.consensus.read().await.cur_epoch(), + current_epoch, ) .await?; } From 84b697d5056f09762eabd22d0ddcded1683ad6ab Mon Sep 17 00:00:00 2001 From: Lukasz Rzasik Date: Fri, 11 Oct 2024 14:57:47 -0600 Subject: [PATCH 04/16] Add epoch_height to config file --- crates/orchestrator/run-config.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/crates/orchestrator/run-config.toml b/crates/orchestrator/run-config.toml index 909a6043b0..d1fecc10c9 100644 --- a/crates/orchestrator/run-config.toml +++ b/crates/orchestrator/run-config.toml @@ -38,6 +38,7 @@ timeout_ratio = [11, 10] round_start_delay = 1 start_delay = 1 num_bootstrap = 5 +epoch_height = 0 [random_builder] txn_in_block = 100 From 9ef171a464bf3c28990be78031023d01dbc90860 Mon Sep 17 00:00:00 2001 From: Lukasz Rzasik Date: Wed, 16 Oct 2024 14:54:05 +0200 Subject: [PATCH 05/16] Rename `ViewTime` and `EpochTime` to `View` and `Epoch` --- .../src/auction_results_provider_types.rs | 2 +- crates/example-types/src/node_types.rs | 16 ++-- crates/example-types/src/storage_types.rs | 18 ++-- crates/examples/infra/mod.rs | 4 +- crates/hotshot/src/lib.rs | 34 +++---- crates/hotshot/src/tasks/mod.rs | 4 +- crates/hotshot/src/tasks/task_state.rs | 2 +- .../traits/election/randomized_committee.rs | 20 ++--- .../src/traits/election/static_committee.rs | 20 ++--- .../static_committee_leader_two_views.rs | 20 ++--- .../src/traits/networking/libp2p_network.rs | 4 +- crates/hotshot/src/types/handle.rs | 14 +-- crates/task-impls/src/consensus2/handlers.rs | 6 +- crates/task-impls/src/consensus2/mod.rs | 6 +- crates/task-impls/src/da.rs | 6 +- crates/task-impls/src/events.rs | 26 +++--- crates/task-impls/src/helpers.rs | 18 ++-- crates/task-impls/src/network.rs | 8 +- .../src/quorum_proposal/handlers.rs | 4 +- crates/task-impls/src/quorum_proposal/mod.rs | 16 ++-- .../src/quorum_proposal_recv/mod.rs | 8 +- crates/task-impls/src/quorum_vote/mod.rs | 18 ++-- crates/task-impls/src/request.rs | 18 ++-- crates/task-impls/src/response.rs | 4 +- crates/task-impls/src/transactions.rs | 32 +++---- crates/task-impls/src/upgrade.rs | 18 ++-- crates/task-impls/src/vid.rs | 6 +- crates/task-impls/src/view_sync.rs | 46 +++++----- crates/task-impls/src/vote_collection.rs | 28 +++--- .../src/byzantine/byzantine_behaviour.rs | 12 +-- crates/testing/src/consistency_task.rs | 10 +-- crates/testing/src/helpers.rs | 30 +++---- crates/testing/src/overall_safety_task.rs | 20 ++--- crates/testing/src/spinning_task.rs | 8 +- crates/testing/src/test_runner.rs | 4 +- .../testing/tests/tests_1/view_sync_task.rs | 4 +- .../testing/tests/tests_3/memory_network.rs | 4 +- crates/types/src/consensus.rs | 90 +++++++++---------- crates/types/src/data.rs | 44 ++++----- crates/types/src/error.rs | 2 +- crates/types/src/event.rs | 8 +- crates/types/src/message.rs | 18 ++-- crates/types/src/request_response.rs | 2 +- crates/types/src/simple_certificate.rs | 18 ++-- crates/types/src/simple_vote.rs | 28 +++--- .../src/traits/auction_results_provider.rs | 2 +- crates/types/src/traits/election.rs | 18 ++-- crates/types/src/traits/network.rs | 10 +-- .../types/src/traits/node_implementation.rs | 6 +- crates/types/src/traits/storage.rs | 4 +- crates/types/src/utils.rs | 2 +- crates/types/src/vote.rs | 8 +- 52 files changed, 389 insertions(+), 389 deletions(-) diff --git a/crates/example-types/src/auction_results_provider_types.rs b/crates/example-types/src/auction_results_provider_types.rs index d6b6490189..22d375c2bb 100644 --- a/crates/example-types/src/auction_results_provider_types.rs +++ b/crates/example-types/src/auction_results_provider_types.rs @@ -50,7 +50,7 @@ impl AuctionResultsProvider for TestAuctionResultsProvid /// in the solver. async fn fetch_auction_result( &self, - view_number: TYPES::ViewTime, + view_number: TYPES::View, ) -> Result { if let Some(url) = &self.broadcast_url { let resp = diff --git a/crates/example-types/src/node_types.rs b/crates/example-types/src/node_types.rs index d1dff916ca..1ab2446c12 100644 --- a/crates/example-types/src/node_types.rs +++ b/crates/example-types/src/node_types.rs @@ -45,8 +45,8 @@ use vbs::version::StaticVersion; pub struct TestTypes; impl NodeType for TestTypes { type AuctionResult = TestAuctionResult; - type ViewTime = ViewNumber; - type EpochTime = EpochNumber; + type View = ViewNumber; + type Epoch = EpochNumber; type BlockHeader = TestBlockHeader; type BlockPayload = TestBlockPayload; type SignatureKey = BLSPubKey; @@ -75,8 +75,8 @@ impl NodeType for TestTypes { pub struct TestTypesRandomizedLeader; impl NodeType for TestTypesRandomizedLeader { type AuctionResult = TestAuctionResult; - type ViewTime = ViewNumber; - type EpochTime = EpochNumber; + type View = ViewNumber; + type Epoch = EpochNumber; type BlockHeader = TestBlockHeader; type BlockPayload = TestBlockPayload; type SignatureKey = BLSPubKey; @@ -105,8 +105,8 @@ impl NodeType for TestTypesRandomizedLeader { pub struct TestConsecutiveLeaderTypes; impl NodeType for TestConsecutiveLeaderTypes { type AuctionResult = TestAuctionResult; - type ViewTime = ViewNumber; - type EpochTime = EpochNumber; + type View = ViewNumber; + type Epoch = EpochNumber; type BlockHeader = TestBlockHeader; type BlockPayload = TestBlockPayload; type SignatureKey = BLSPubKey; @@ -238,8 +238,8 @@ mod tests { let data = TestData { data: 10 }; - let view_0 = ::ViewTime::new(0); - let view_1 = ::ViewTime::new(1); + let view_0 = ::View::new(0); + let view_1 = ::View::new(1); let versioned_data_0 = VersionedVoteData::::new( diff --git a/crates/example-types/src/storage_types.rs b/crates/example-types/src/storage_types.rs index 6749326746..2a093b1f02 100644 --- a/crates/example-types/src/storage_types.rs +++ b/crates/example-types/src/storage_types.rs @@ -29,17 +29,17 @@ use hotshot_types::{ use crate::testable_delay::{DelayConfig, SupportedTraitTypesForAsyncDelay, TestableDelay}; type VidShares = HashMap< - ::ViewTime, + ::View, HashMap<::SignatureKey, Proposal>>, >; #[derive(Clone, Debug)] pub struct TestStorageState { vids: VidShares, - das: HashMap>>, - proposals: BTreeMap>>, + das: HashMap>>, + proposals: BTreeMap>>, high_qc: Option>, - action: TYPES::ViewTime, + action: TYPES::View, } impl Default for TestStorageState { @@ -49,7 +49,7 @@ impl Default for TestStorageState { das: HashMap::new(), proposals: BTreeMap::new(), high_qc: None, - action: TYPES::ViewTime::genesis(), + action: TYPES::View::genesis(), } } } @@ -87,7 +87,7 @@ impl TestableDelay for TestStorage { impl TestStorage { pub async fn proposals_cloned( &self, - ) -> BTreeMap>> { + ) -> BTreeMap>> { self.inner.read().await.proposals.clone() } pub async fn high_qc_cloned(&self) -> Option> { @@ -96,7 +96,7 @@ impl TestStorage { pub async fn decided_upgrade_certificate(&self) -> Option> { self.decided_upgrade_certificate.read().await.clone() } - pub async fn last_actioned_view(&self) -> TYPES::ViewTime { + pub async fn last_actioned_view(&self) -> TYPES::View { self.inner.read().await.action } } @@ -145,7 +145,7 @@ impl Storage for TestStorage { async fn record_action( &self, - view: ::ViewTime, + view: ::View, action: hotshot_types::event::HotShotAction, ) -> Result<()> { if self.should_return_err { @@ -180,7 +180,7 @@ impl Storage for TestStorage { async fn update_undecided_state( &self, _leafs: CommitmentMap>, - _state: BTreeMap>, + _state: BTreeMap>, ) -> Result<()> { if self.should_return_err { bail!("Failed to update high qc to storage"); diff --git a/crates/examples/infra/mod.rs b/crates/examples/infra/mod.rs index 17e1361da7..0c7e8db673 100755 --- a/crates/examples/infra/mod.rs +++ b/crates/examples/infra/mod.rs @@ -463,7 +463,7 @@ pub trait RunDa< let start = Instant::now(); let mut event_stream = context.event_stream(); - let mut anchor_view: TYPES::ViewTime = ::genesis(); + let mut anchor_view: TYPES::View = ::genesis(); let mut num_successful_commits = 0; context.hotshot.start_consensus().await; @@ -563,7 +563,7 @@ pub trait RunDa< .hotshot .memberships .quorum_membership - .committee_leaders(TYPES::ViewTime::genesis(), TYPES::EpochTime::genesis()) + .committee_leaders(TYPES::View::genesis(), TYPES::Epoch::genesis()) .len(); let total_num_views = usize::try_from(consensus.locked_view().u64()).unwrap(); // `failed_num_views` could include uncommitted views diff --git a/crates/hotshot/src/lib.rs b/crates/hotshot/src/lib.rs index c23c0e7812..fe3e572595 100644 --- a/crates/hotshot/src/lib.rs +++ b/crates/hotshot/src/lib.rs @@ -123,7 +123,7 @@ pub struct SystemContext, V: Versi instance_state: Arc, /// The view to enter when first starting consensus - start_view: TYPES::ViewTime, + start_view: TYPES::View, /// Access to the output event stream. output_event_stream: (Sender>, InactiveReceiver>), @@ -303,11 +303,11 @@ impl, V: Versions> SystemContext, V: Versions> SystemContext, V: Versions> SystemContext, V: Versions> SystemContext Option> { + pub async fn state(&self, view: TYPES::View) -> Option> { self.consensus.read().await.state(view).cloned() } @@ -974,10 +974,10 @@ pub struct HotShotInitializer { state_delta: Option>::Delta>>, /// Starting view number that should be equivelant to the view the node shut down with last. - start_view: TYPES::ViewTime, + start_view: TYPES::View, /// The view we last performed an action in. An action is Proposing or voting for /// Either the quorum or DA. - actioned_view: TYPES::ViewTime, + actioned_view: TYPES::View, /// Highest QC that was seen, for genesis it's the genesis QC. It should be for a view greater /// than `inner`s view number for the non genesis case because we must have seen higher QCs /// to decide on the leaf. @@ -988,9 +988,9 @@ pub struct HotShotInitializer { /// to vote and propose right away if they didn't miss anything while down. undecided_leafs: Vec>, /// Not yet decided state - undecided_state: BTreeMap>, + undecided_state: BTreeMap>, /// Proposals we have sent out to provide to others for catchup - saved_proposals: BTreeMap>>, + saved_proposals: BTreeMap>>, } impl HotShotInitializer { @@ -1007,8 +1007,8 @@ impl HotShotInitializer { inner: Leaf::genesis(&validated_state, &instance_state).await, validated_state: Some(Arc::new(validated_state)), state_delta: Some(Arc::new(state_delta)), - start_view: TYPES::ViewTime::new(0), - actioned_view: TYPES::ViewTime::new(0), + start_view: TYPES::View::new(0), + actioned_view: TYPES::View::new(0), saved_proposals: BTreeMap::new(), high_qc, decided_upgrade_certificate: None, @@ -1030,13 +1030,13 @@ impl HotShotInitializer { anchor_leaf: Leaf, instance_state: TYPES::InstanceState, validated_state: Option>, - start_view: TYPES::ViewTime, - actioned_view: TYPES::ViewTime, - saved_proposals: BTreeMap>>, + start_view: TYPES::View, + actioned_view: TYPES::View, + saved_proposals: BTreeMap>>, high_qc: QuorumCertificate, decided_upgrade_certificate: Option>, undecided_leafs: Vec>, - undecided_state: BTreeMap>, + undecided_state: BTreeMap>, ) -> Self { Self { inner: anchor_leaf, diff --git a/crates/hotshot/src/tasks/mod.rs b/crates/hotshot/src/tasks/mod.rs index 6a3f3ed419..22f2826ff0 100644 --- a/crates/hotshot/src/tasks/mod.rs +++ b/crates/hotshot/src/tasks/mod.rs @@ -169,8 +169,8 @@ pub fn add_network_event_task< ) { let network_state: NetworkEventTaskState<_, V, _, _> = NetworkEventTaskState { network, - view: TYPES::ViewTime::genesis(), - epoch: TYPES::EpochTime::genesis(), + view: TYPES::View::genesis(), + epoch: TYPES::Epoch::genesis(), quorum_membership, da_membership, storage: Arc::clone(&handle.storage()), diff --git a/crates/hotshot/src/tasks/task_state.rs b/crates/hotshot/src/tasks/task_state.rs index 5cb60d7274..ecc58fcab1 100644 --- a/crates/hotshot/src/tasks/task_state.rs +++ b/crates/hotshot/src/tasks/task_state.rs @@ -176,7 +176,7 @@ impl, V: Versions> CreateTaskState finalize_relay_map: HashMap::default().into(), view_sync_timeout: handle.hotshot.config.view_sync_timeout, id: handle.hotshot.id, - last_garbage_collected_view: TYPES::ViewTime::new(0), + last_garbage_collected_view: TYPES::View::new(0), upgrade_lock: handle.hotshot.upgrade_lock.clone(), } } diff --git a/crates/hotshot/src/traits/election/randomized_committee.rs b/crates/hotshot/src/traits/election/randomized_committee.rs index 073fe63f5b..4fed098e9c 100644 --- a/crates/hotshot/src/traits/election/randomized_committee.rs +++ b/crates/hotshot/src/traits/election/randomized_committee.rs @@ -82,7 +82,7 @@ impl Membership for RandomizedCommittee { /// Get the stake table for the current view fn stake_table( &self, - _epoch: ::EpochTime, + _epoch: ::Epoch, ) -> Vec<<::SignatureKey as SignatureKey>::StakeTableEntry> { self.stake_table.clone() } @@ -90,8 +90,8 @@ impl Membership for RandomizedCommittee { /// Get all members of the committee for the current view fn committee_members( &self, - _view_number: ::ViewTime, - _epoch: ::EpochTime, + _view_number: ::View, + _epoch: ::Epoch, ) -> std::collections::BTreeSet<::SignatureKey> { self.stake_table .iter() @@ -102,8 +102,8 @@ impl Membership for RandomizedCommittee { /// Get all eligible leaders of the committee for the current view fn committee_leaders( &self, - _view_number: ::ViewTime, - _epoch: ::EpochTime, + _view_number: ::View, + _epoch: ::Epoch, ) -> std::collections::BTreeSet<::SignatureKey> { self.eligible_leaders .iter() @@ -115,7 +115,7 @@ impl Membership for RandomizedCommittee { fn stake( &self, pub_key: &::SignatureKey, - _epoch: ::EpochTime, + _epoch: ::Epoch, ) -> Option<::StakeTableEntry> { // Only return the stake if it is above zero self.indexed_stake_table.get(pub_key).cloned() @@ -125,7 +125,7 @@ impl Membership for RandomizedCommittee { fn has_stake( &self, pub_key: &::SignatureKey, - _epoch: ::EpochTime, + _epoch: ::Epoch, ) -> bool { self.indexed_stake_table .get(pub_key) @@ -140,8 +140,8 @@ impl Membership for RandomizedCommittee { /// Index the vector of public keys with the current view number fn leader( &self, - view_number: TYPES::ViewTime, - _epoch: ::EpochTime, + view_number: TYPES::View, + _epoch: ::Epoch, ) -> TYPES::SignatureKey { let mut rng: StdRng = rand::SeedableRng::seed_from_u64(*view_number); @@ -155,7 +155,7 @@ impl Membership for RandomizedCommittee { } /// Get the total number of nodes in the committee - fn total_nodes(&self, _epoch: ::EpochTime) -> usize { + fn total_nodes(&self, _epoch: ::Epoch) -> usize { self.stake_table.len() } diff --git a/crates/hotshot/src/traits/election/static_committee.rs b/crates/hotshot/src/traits/election/static_committee.rs index 054fc0aae9..2ef52a66e2 100644 --- a/crates/hotshot/src/traits/election/static_committee.rs +++ b/crates/hotshot/src/traits/election/static_committee.rs @@ -80,7 +80,7 @@ impl Membership for StaticCommittee { /// Get the stake table for the current view fn stake_table( &self, - _epoch: ::EpochTime, + _epoch: ::Epoch, ) -> Vec<<::SignatureKey as SignatureKey>::StakeTableEntry> { self.stake_table.clone() } @@ -88,8 +88,8 @@ impl Membership for StaticCommittee { /// Get all members of the committee for the current view fn committee_members( &self, - _view_number: ::ViewTime, - _epoch: ::EpochTime, + _view_number: ::View, + _epoch: ::Epoch, ) -> std::collections::BTreeSet<::SignatureKey> { self.stake_table .iter() @@ -100,8 +100,8 @@ impl Membership for StaticCommittee { /// Get all eligible leaders of the committee for the current view fn committee_leaders( &self, - _view_number: ::ViewTime, - _epoch: ::EpochTime, + _view_number: ::View, + _epoch: ::Epoch, ) -> std::collections::BTreeSet<::SignatureKey> { self.eligible_leaders .iter() @@ -113,7 +113,7 @@ impl Membership for StaticCommittee { fn stake( &self, pub_key: &::SignatureKey, - _epoch: ::EpochTime, + _epoch: ::Epoch, ) -> Option<::StakeTableEntry> { // Only return the stake if it is above zero self.indexed_stake_table.get(pub_key).cloned() @@ -123,7 +123,7 @@ impl Membership for StaticCommittee { fn has_stake( &self, pub_key: &::SignatureKey, - _epoch: ::EpochTime, + _epoch: ::Epoch, ) -> bool { self.indexed_stake_table .get(pub_key) @@ -138,8 +138,8 @@ impl Membership for StaticCommittee { /// Index the vector of public keys with the current view number fn leader( &self, - view_number: TYPES::ViewTime, - _epoch: ::EpochTime, + view_number: TYPES::View, + _epoch: ::Epoch, ) -> TYPES::SignatureKey { #[allow(clippy::cast_possible_truncation)] let index = *view_number as usize % self.eligible_leaders.len(); @@ -148,7 +148,7 @@ impl Membership for StaticCommittee { } /// Get the total number of nodes in the committee - fn total_nodes(&self, _epoch: ::EpochTime) -> usize { + fn total_nodes(&self, _epoch: ::Epoch) -> usize { self.stake_table.len() } diff --git a/crates/hotshot/src/traits/election/static_committee_leader_two_views.rs b/crates/hotshot/src/traits/election/static_committee_leader_two_views.rs index 9104bce6cb..db41aad2ab 100644 --- a/crates/hotshot/src/traits/election/static_committee_leader_two_views.rs +++ b/crates/hotshot/src/traits/election/static_committee_leader_two_views.rs @@ -80,7 +80,7 @@ impl Membership for StaticCommitteeLeaderForTwoViews::EpochTime, + _epoch: ::Epoch, ) -> Vec<<::SignatureKey as SignatureKey>::StakeTableEntry> { self.stake_table.clone() } @@ -88,8 +88,8 @@ impl Membership for StaticCommitteeLeaderForTwoViews::ViewTime, - _epoch: ::EpochTime, + _view_number: ::View, + _epoch: ::Epoch, ) -> std::collections::BTreeSet<::SignatureKey> { self.stake_table .iter() @@ -100,8 +100,8 @@ impl Membership for StaticCommitteeLeaderForTwoViews::ViewTime, - _epoch: ::EpochTime, + _view_number: ::View, + _epoch: ::Epoch, ) -> std::collections::BTreeSet<::SignatureKey> { self.eligible_leaders .iter() @@ -113,7 +113,7 @@ impl Membership for StaticCommitteeLeaderForTwoViews::SignatureKey, - _epoch: ::EpochTime, + _epoch: ::Epoch, ) -> Option<::StakeTableEntry> { // Only return the stake if it is above zero self.indexed_stake_table.get(pub_key).cloned() @@ -123,7 +123,7 @@ impl Membership for StaticCommitteeLeaderForTwoViews::SignatureKey, - _epoch: ::EpochTime, + _epoch: ::Epoch, ) -> bool { self.indexed_stake_table .get(pub_key) @@ -138,8 +138,8 @@ impl Membership for StaticCommitteeLeaderForTwoViews::EpochTime, + view_number: TYPES::View, + _epoch: ::Epoch, ) -> TYPES::SignatureKey { let index = usize::try_from((*view_number / 2) % self.eligible_leaders.len() as u64).unwrap(); @@ -148,7 +148,7 @@ impl Membership for StaticCommitteeLeaderForTwoViews::EpochTime) -> usize { + fn total_nodes(&self, _epoch: ::Epoch) -> usize { self.stake_table.len() } diff --git a/crates/hotshot/src/traits/networking/libp2p_network.rs b/crates/hotshot/src/traits/networking/libp2p_network.rs index 4d094856dc..952d627684 100644 --- a/crates/hotshot/src/traits/networking/libp2p_network.rs +++ b/crates/hotshot/src/traits/networking/libp2p_network.rs @@ -1023,8 +1023,8 @@ impl ConnectedNetwork for Libp2pNetwork { where TYPES: NodeType + 'a, { - let future_view = ::ViewTime::new(view) + LOOK_AHEAD; - let epoch = ::EpochTime::new(epoch); + let future_view = ::View::new(view) + LOOK_AHEAD; + let epoch = ::Epoch::new(epoch); let future_leader = membership.leader(future_view, epoch); let _ = self diff --git a/crates/hotshot/src/types/handle.rs b/crates/hotshot/src/types/handle.rs index df74c3f818..8c200c8d1a 100644 --- a/crates/hotshot/src/types/handle.rs +++ b/crates/hotshot/src/types/handle.rs @@ -96,8 +96,8 @@ impl + 'static, V: Versions> /// Errors if signing the request for proposal fails pub fn request_proposal( &self, - view: TYPES::ViewTime, - epoch: TYPES::EpochTime, + view: TYPES::View, + epoch: TYPES::Epoch, leaf_commitment: Commitment>, ) -> Result>>>> { // We need to be able to sign this request before submitting it to the network. Compute the @@ -203,7 +203,7 @@ impl + 'static, V: Versions> /// return [`None`] if the requested view has already been decided (but see /// [`decided_state`](Self::decided_state)) or if there is no path for the requested /// view to ever be decided. - pub async fn state(&self, view: TYPES::ViewTime) -> Option> { + pub async fn state(&self, view: TYPES::View) -> Option> { self.hotshot.state(view).await } @@ -278,8 +278,8 @@ impl + 'static, V: Versions> #[allow(clippy::unused_async)] // async for API compatibility reasons pub async fn leader( &self, - view_number: TYPES::ViewTime, - epoch_number: TYPES::EpochTime, + view_number: TYPES::View, + epoch_number: TYPES::Epoch, ) -> TYPES::SignatureKey { self.hotshot .memberships @@ -311,13 +311,13 @@ impl + 'static, V: Versions> /// Wrapper to get the view number this node is on. #[instrument(skip_all, target = "SystemContextHandle", fields(id = self.hotshot.id))] - pub async fn cur_view(&self) -> TYPES::ViewTime { + pub async fn cur_view(&self) -> TYPES::View { self.hotshot.consensus.read().await.cur_view() } /// Wrapper to get the epoch number this node is on. #[instrument(skip_all, target = "SystemContextHandle", fields(id = self.hotshot.id))] - pub async fn cur_epoch(&self) -> TYPES::EpochTime { + pub async fn cur_epoch(&self) -> TYPES::Epoch { self.hotshot.consensus.read().await.cur_epoch() } diff --git a/crates/task-impls/src/consensus2/handlers.rs b/crates/task-impls/src/consensus2/handlers.rs index ae0ac3d67f..6f90e89452 100644 --- a/crates/task-impls/src/consensus2/handlers.rs +++ b/crates/task-impls/src/consensus2/handlers.rs @@ -114,7 +114,7 @@ pub(crate) async fn handle_view_change< I: NodeImplementation, V: Versions, >( - new_view_number: TYPES::ViewTime, + new_view_number: TYPES::View, sender: &Sender>>, task_state: &mut Consensus2TaskState, ) -> Result<()> { @@ -155,7 +155,7 @@ pub(crate) async fn handle_view_change< async move { async_sleep(Duration::from_millis(timeout)).await; broadcast_event( - Arc::new(HotShotEvent::Timeout(TYPES::ViewTime::new(*view_number))), + Arc::new(HotShotEvent::Timeout(TYPES::View::new(*view_number))), &stream, ) .await; @@ -215,7 +215,7 @@ pub(crate) async fn handle_view_change< /// Handle a `Timeout` event. #[instrument(skip_all)] pub(crate) async fn handle_timeout, V: Versions>( - view_number: TYPES::ViewTime, + view_number: TYPES::View, sender: &Sender>>, task_state: &mut Consensus2TaskState, ) -> Result<()> { diff --git a/crates/task-impls/src/consensus2/mod.rs b/crates/task-impls/src/consensus2/mod.rs index ee69f17734..2fb6e30c02 100644 --- a/crates/task-impls/src/consensus2/mod.rs +++ b/crates/task-impls/src/consensus2/mod.rs @@ -70,13 +70,13 @@ pub struct Consensus2TaskState, V: pub storage: Arc>, /// The view number that this node is currently executing in. - pub cur_view: TYPES::ViewTime, + pub cur_view: TYPES::View, /// Timestamp this view starts at. pub cur_view_time: i64, /// The epoch number that this node is currently executing in. - pub cur_epoch: TYPES::EpochTime, + pub cur_epoch: TYPES::Epoch, /// Output events to application pub output_event_stream: async_broadcast::Sender>, @@ -91,7 +91,7 @@ pub struct Consensus2TaskState, V: pub consensus: OuterConsensus, /// The last decided view - pub last_decided_view: TYPES::ViewTime, + pub last_decided_view: TYPES::View, /// The node's id pub id: u64, diff --git a/crates/task-impls/src/da.rs b/crates/task-impls/src/da.rs index 9d2cc9fd07..2e7c1357ff 100644 --- a/crates/task-impls/src/da.rs +++ b/crates/task-impls/src/da.rs @@ -49,10 +49,10 @@ pub struct DaTaskState, V: Version pub output_event_stream: async_broadcast::Sender>, /// View number this view is executing in. - pub cur_view: TYPES::ViewTime, + pub cur_view: TYPES::View, /// Epoch number this node is executing in. - pub cur_epoch: TYPES::EpochTime, + pub cur_epoch: TYPES::Epoch, /// Reference to consensus. Leader will require a read lock on this. pub consensus: OuterConsensus, @@ -111,7 +111,7 @@ impl, V: Versions> DaTaskState TaskEvent for HotShotEvent { #[derive(Debug, Clone)] pub struct ProposalMissing { /// View of missing proposal - pub view: TYPES::ViewTime, + pub view: TYPES::View, /// Channel to send the response back to pub response_chan: Sender>>>, } @@ -93,7 +93,7 @@ pub enum HotShotEvent { /// Send a quorum vote to the next leader; emitted by a replica in the consensus task after seeing a valid quorum proposal QuorumVoteSend(QuorumVote), /// All dependencies for the quorum vote are validated. - QuorumVoteDependenciesValidated(TYPES::ViewTime), + QuorumVoteDependenciesValidated(TYPES::View), /// A quorum proposal with the given parent leaf is validated. /// The full validation checks include: /// 1. The proposal is not for an old view @@ -124,9 +124,9 @@ pub enum HotShotEvent { /// The DA leader has collected enough votes to form a DAC; emitted by the DA leader in the DA task; sent to the entire network via the networking task DacSend(DaCertificate, TYPES::SignatureKey), /// The current view has changed; emitted by the replica in the consensus task or replica in the view sync task; received by almost all other tasks - ViewChange(TYPES::ViewTime), + ViewChange(TYPES::View), /// Timeout for the view sync protocol; emitted by a replica in the view sync task - ViewSyncTimeout(TYPES::ViewTime, u64, ViewSyncPhase), + ViewSyncTimeout(TYPES::View, u64, ViewSyncPhase), /// Receive a `ViewSyncPreCommitVote` from the network; received by a relay in the view sync task ViewSyncPreCommitVoteRecv(ViewSyncPreCommitVote), @@ -157,9 +157,9 @@ pub enum HotShotEvent { ViewSyncFinalizeCertificate2Send(ViewSyncFinalizeCertificate2, TYPES::SignatureKey), /// Trigger the start of the view sync protocol; emitted by view sync task; internal trigger only - ViewSyncTrigger(TYPES::ViewTime), + ViewSyncTrigger(TYPES::View), /// A consensus view has timed out; emitted by a replica in the consensus task; received by the view sync task; internal event only - Timeout(TYPES::ViewTime), + Timeout(TYPES::View), /// Receive transactions from the network TransactionsRecv(Vec), /// Send transactions to the network @@ -169,14 +169,14 @@ pub enum HotShotEvent { VidCommitment, BuilderCommitment, >::Metadata, - TYPES::ViewTime, + TYPES::View, Vec1>, Option, ), /// Event when the transactions task has sequenced transactions. Contains the encoded transactions, the metadata, and the view number BlockRecv(PackedBundle), /// Event when the transactions task has a block formed - BlockReady(VidDisperse, TYPES::ViewTime), + BlockReady(VidDisperse, TYPES::View), /// Event when consensus decided on a leaf LeafDecided(Vec>), /// Send VID shares to VID storage nodes; emitted by the DA leader @@ -205,13 +205,13 @@ pub enum HotShotEvent { /* Consensus State Update Events */ /// A undecided view has been created and added to the validated state storage. - ValidatedStateUpdated(TYPES::ViewTime, View), + ValidatedStateUpdated(TYPES::View, View), /// A new locked view has been created (2-chain) - LockedViewUpdated(TYPES::ViewTime), + LockedViewUpdated(TYPES::View), /// A new anchor view has been successfully reached by this node (3-chain). - LastDecidedViewUpdated(TYPES::ViewTime), + LastDecidedViewUpdated(TYPES::View), /// A new high_qc has been reached by this node. UpdateHighQc(QuorumCertificate), @@ -260,7 +260,7 @@ pub enum HotShotEvent { impl HotShotEvent { #[allow(clippy::too_many_lines)] /// Return the view number for a hotshot event if present - pub fn view_number(&self) -> Option { + pub fn view_number(&self) -> Option { match self { HotShotEvent::QuorumVoteRecv(v) => Some(v.view_number()), HotShotEvent::TimeoutVoteRecv(v) | HotShotEvent::TimeoutVoteSend(v) => { @@ -512,7 +512,7 @@ impl Display for HotShotEvent { write!(f, "BlockReady(view_number={view_number:?})") } HotShotEvent::LeafDecided(leaves) => { - let view_numbers: Vec<::ViewTime> = + let view_numbers: Vec<::View> = leaves.iter().map(Leaf::view_number).collect(); write!(f, "LeafDecided({view_numbers:?})") } diff --git a/crates/task-impls/src/helpers.rs b/crates/task-impls/src/helpers.rs index 0b1435cca8..c4c67639e9 100644 --- a/crates/task-impls/src/helpers.rs +++ b/crates/task-impls/src/helpers.rs @@ -47,7 +47,7 @@ use crate::{events::HotShotEvent, request::REQUEST_TIMEOUT}; #[instrument(skip_all)] #[allow(clippy::too_many_arguments)] pub(crate) async fn fetch_proposal( - view_number: TYPES::ViewTime, + view_number: TYPES::View, event_sender: Sender>>, event_receiver: Receiver>>, quorum_membership: Arc, @@ -165,10 +165,10 @@ pub(crate) async fn fetch_proposal( #[derive(Debug)] pub struct LeafChainTraversalOutcome { /// The new locked view obtained from a 2 chain starting from the proposal's parent. - pub new_locked_view_number: Option, + pub new_locked_view_number: Option, /// The new decided view obtained from a 3 chain starting from the proposal's parent. - pub new_decided_view_number: Option, + pub new_decided_view_number: Option, /// The qc for the decided chain. pub new_decide_qc: Option>, @@ -353,7 +353,7 @@ pub async fn decide_from_proposal( #[instrument(skip_all)] #[allow(clippy::too_many_arguments)] pub(crate) async fn parent_leaf_and_state( - next_proposal_view_number: TYPES::ViewTime, + next_proposal_view_number: TYPES::View, event_sender: &Sender>>, event_receiver: &Receiver>>, quorum_membership: Arc, @@ -593,8 +593,8 @@ pub async fn validate_proposal_safety_and_liveness< /// If any validation or view number check fails. pub async fn validate_proposal_view_and_certs( proposal: &Proposal>, - cur_view: TYPES::ViewTime, - cur_epoch: TYPES::EpochTime, + cur_view: TYPES::View, + cur_epoch: TYPES::Epoch, quorum_membership: &Arc, timeout_membership: &Arc, upgrade_lock: &UpgradeLock, @@ -674,11 +674,11 @@ pub async fn validate_proposal_view_and_certs( /// TODO: Remove args when we merge dependency tasks. #[allow(clippy::too_many_arguments)] pub(crate) async fn update_view( - new_view: TYPES::ViewTime, + new_view: TYPES::View, event_stream: &Sender>>, timeout: u64, consensus: OuterConsensus, - cur_view: &mut TYPES::ViewTime, + cur_view: &mut TYPES::View, cur_view_time: &mut i64, timeout_task: &mut JoinHandle<()>, output_event_stream: &Sender>, @@ -725,7 +725,7 @@ pub(crate) async fn update_view( async move { async_sleep(timeout).await; broadcast_event( - Arc::new(HotShotEvent::Timeout(TYPES::ViewTime::new(*view_number))), + Arc::new(HotShotEvent::Timeout(TYPES::View::new(*view_number))), &stream, ) .await; diff --git a/crates/task-impls/src/network.rs b/crates/task-impls/src/network.rs index 2034e0aa64..9cb1f7abd0 100644 --- a/crates/task-impls/src/network.rs +++ b/crates/task-impls/src/network.rs @@ -163,7 +163,7 @@ impl NetworkMessageTaskState { // Send the external message to the external event stream so it can be processed broadcast_event( Event { - view_number: TYPES::ViewTime::new(1), + view_number: TYPES::View::new(1), event: EventType::ExternalMessageReceived(data), }, &self.external_event_stream, @@ -184,9 +184,9 @@ pub struct NetworkEventTaskState< /// comm network pub network: Arc, /// view number - pub view: TYPES::ViewTime, + pub view: TYPES::View, /// epoch number - pub epoch: TYPES::EpochTime, + pub epoch: TYPES::Epoch, /// quorum for the network pub quorum_membership: TYPES::Membership, /// da for the network @@ -301,7 +301,7 @@ impl< maybe_action: Option, storage: Arc>, state: Arc>>, - view: ::ViewTime, + view: ::View, ) -> Result<(), ()> { if let Some(action) = maybe_action { if !state.write().await.update_action(action, view) { diff --git a/crates/task-impls/src/quorum_proposal/handlers.rs b/crates/task-impls/src/quorum_proposal/handlers.rs index 862481434b..bcdc568e18 100644 --- a/crates/task-impls/src/quorum_proposal/handlers.rs +++ b/crates/task-impls/src/quorum_proposal/handlers.rs @@ -60,10 +60,10 @@ pub(crate) enum ProposalDependency { /// Handler for the proposal dependency pub struct ProposalDependencyHandle { /// Latest view number that has been proposed for (proxy for cur_view). - pub latest_proposed_view: TYPES::ViewTime, + pub latest_proposed_view: TYPES::View, /// The view number to propose for. - pub view_number: TYPES::ViewTime, + pub view_number: TYPES::View, /// The event sender. pub sender: Sender>>, diff --git a/crates/task-impls/src/quorum_proposal/mod.rs b/crates/task-impls/src/quorum_proposal/mod.rs index 95b6d99493..ef5def44fc 100644 --- a/crates/task-impls/src/quorum_proposal/mod.rs +++ b/crates/task-impls/src/quorum_proposal/mod.rs @@ -46,10 +46,10 @@ mod handlers; /// The state for the quorum proposal task. pub struct QuorumProposalTaskState, V: Versions> { /// Latest view number that has been proposed for. - pub latest_proposed_view: TYPES::ViewTime, + pub latest_proposed_view: TYPES::View, /// Table for the in-progress proposal dependency tasks. - pub proposal_dependencies: HashMap>, + pub proposal_dependencies: HashMap>, /// The underlying network pub network: Arc, @@ -107,7 +107,7 @@ impl, V: Versions> fn create_event_dependency( &self, dependency_type: ProposalDependency, - view_number: TYPES::ViewTime, + view_number: TYPES::View, event_receiver: Receiver>>, ) -> EventDependency>> { EventDependency::new( @@ -181,7 +181,7 @@ impl, V: Versions> /// Creates the requisite dependencies for the Quorum Proposal task. It also handles any event forwarding. fn create_and_complete_dependencies( &self, - view_number: TYPES::ViewTime, + view_number: TYPES::View, event_receiver: &Receiver>>, event: Arc>, ) -> AndDependency>>>> { @@ -283,8 +283,8 @@ impl, V: Versions> #[instrument(skip_all, fields(id = self.id, latest_proposed_view = *self.latest_proposed_view), name = "Create dependency task", level = "error")] fn create_dependency_task_if_new( &mut self, - view_number: TYPES::ViewTime, - epoch_number: TYPES::EpochTime, + view_number: TYPES::View, + epoch_number: TYPES::Epoch, event_receiver: Receiver>>, event_sender: Sender>>, event: Arc>, @@ -334,7 +334,7 @@ impl, V: Versions> /// Update the latest proposed view number. #[instrument(skip_all, fields(id = self.id, latest_proposed_view = *self.latest_proposed_view), name = "Update latest proposed view", level = "error")] - async fn update_latest_proposed_view(&mut self, new_view: TYPES::ViewTime) -> bool { + async fn update_latest_proposed_view(&mut self, new_view: TYPES::View) -> bool { if *self.latest_proposed_view < *new_view { debug!( "Updating latest proposed view from {} to {}", @@ -345,7 +345,7 @@ impl, V: Versions> for view in (*self.latest_proposed_view + 1)..=(*new_view) { if let Some(dependency) = self .proposal_dependencies - .remove(&TYPES::ViewTime::new(view)) + .remove(&TYPES::View::new(view)) { cancel_task(dependency).await; } diff --git a/crates/task-impls/src/quorum_proposal_recv/mod.rs b/crates/task-impls/src/quorum_proposal_recv/mod.rs index aa9ea0bae1..281e472fd0 100644 --- a/crates/task-impls/src/quorum_proposal_recv/mod.rs +++ b/crates/task-impls/src/quorum_proposal_recv/mod.rs @@ -55,13 +55,13 @@ pub struct QuorumProposalRecvTaskState, /// View number this view is executing in. - pub cur_view: TYPES::ViewTime, + pub cur_view: TYPES::View, /// Timestamp this view starts at. pub cur_view_time: i64, /// Epoch number this node is executing in. - pub cur_epoch: TYPES::EpochTime, + pub cur_epoch: TYPES::Epoch, /// The underlying network pub network: Arc, @@ -92,7 +92,7 @@ pub struct QuorumProposalRecvTaskState>>, + pub spawned_tasks: BTreeMap>>, /// Immutable instance state pub instance_state: Arc, @@ -108,7 +108,7 @@ impl, V: Versions> QuorumProposalRecvTaskState { /// Cancel all tasks the consensus tasks has spawned before the given view - pub async fn cancel_tasks(&mut self, view: TYPES::ViewTime) { + pub async fn cancel_tasks(&mut self, view: TYPES::View) { let keep = self.spawned_tasks.split_off(&view); let mut cancel = Vec::new(); while let Some((_, tasks)) = self.spawned_tasks.pop_first() { diff --git a/crates/task-impls/src/quorum_vote/mod.rs b/crates/task-impls/src/quorum_vote/mod.rs index a9c00536f5..70effb8f97 100644 --- a/crates/task-impls/src/quorum_vote/mod.rs +++ b/crates/task-impls/src/quorum_vote/mod.rs @@ -75,9 +75,9 @@ pub struct VoteDependencyHandle, V /// Reference to the storage. pub storage: Arc>, /// View number to vote on. - pub view_number: TYPES::ViewTime, + pub view_number: TYPES::View, /// Epoch number to vote on. - pub epoch_number: TYPES::EpochTime, + pub epoch_number: TYPES::Epoch, /// Event sender. pub sender: Sender>>, /// Event receiver. @@ -376,10 +376,10 @@ pub struct QuorumVoteTaskState, V: pub instance_state: Arc, /// Latest view number that has been voted for. - pub latest_voted_view: TYPES::ViewTime, + pub latest_voted_view: TYPES::View, /// Table for the in-progress dependency tasks. - pub vote_dependencies: HashMap>, + pub vote_dependencies: HashMap>, /// The underlying network pub network: Arc, @@ -409,7 +409,7 @@ impl, V: Versions> QuorumVoteTaskS fn create_event_dependency( &self, dependency_type: VoteDependency, - view_number: TYPES::ViewTime, + view_number: TYPES::View, event_receiver: Receiver>>, ) -> EventDependency>> { EventDependency::new( @@ -453,8 +453,8 @@ impl, V: Versions> QuorumVoteTaskS #[instrument(skip_all, fields(id = self.id, latest_voted_view = *self.latest_voted_view), name = "Quorum vote crete dependency task if new", level = "error")] fn create_dependency_task_if_new( &mut self, - view_number: TYPES::ViewTime, - epoch_number: TYPES::EpochTime, + view_number: TYPES::View, + epoch_number: TYPES::Epoch, event_receiver: Receiver>>, event_sender: &Sender>>, event: Option>>, @@ -510,7 +510,7 @@ impl, V: Versions> QuorumVoteTaskS /// Update the latest voted view number. #[instrument(skip_all, fields(id = self.id, latest_voted_view = *self.latest_voted_view), name = "Quorum vote update latest voted view", level = "error")] - async fn update_latest_voted_view(&mut self, new_view: TYPES::ViewTime) -> bool { + async fn update_latest_voted_view(&mut self, new_view: TYPES::View) -> bool { if *self.latest_voted_view < *new_view { debug!( "Updating next vote view from {} to {} in the quorum vote task", @@ -519,7 +519,7 @@ impl, V: Versions> QuorumVoteTaskS // Cancel the old dependency tasks. for view in *self.latest_voted_view..(*new_view) { - if let Some(dependency) = self.vote_dependencies.remove(&TYPES::ViewTime::new(view)) + if let Some(dependency) = self.vote_dependencies.remove(&TYPES::View::new(view)) { cancel_task(dependency).await; debug!("Vote dependency removed for view {:?}", view); diff --git a/crates/task-impls/src/request.rs b/crates/task-impls/src/request.rs index 50d465e2e9..8cd336e7b1 100644 --- a/crates/task-impls/src/request.rs +++ b/crates/task-impls/src/request.rs @@ -56,7 +56,7 @@ pub struct NetworkRequestState> { /// before sending a request pub state: OuterConsensus, /// Last seen view, we won't request for proposals before older than this view - pub view: TYPES::ViewTime, + pub view: TYPES::View, /// Delay before requesting peers pub delay: Duration, /// DA Membership @@ -70,7 +70,7 @@ pub struct NetworkRequestState> { /// A flag indicating that `HotShotEvent::Shutdown` has been received pub shutdown_flag: Arc, /// A flag indicating that `HotShotEvent::Shutdown` has been received - pub spawned_tasks: BTreeMap>>, + pub spawned_tasks: BTreeMap>>, } impl> Drop for NetworkRequestState { @@ -145,8 +145,8 @@ impl> NetworkRequestState>>, receiver: &Receiver>>, ) { @@ -173,8 +173,8 @@ impl> NetworkRequestState, sender: Sender>>, receiver: Receiver>>, - view: TYPES::ViewTime, - epoch: TYPES::EpochTime, + view: TYPES::View, + epoch: TYPES::Epoch, ) { let state = OuterConsensus::new(Arc::clone(&self.state.inner_consensus)); let network = Arc::clone(&self.network); @@ -260,7 +260,7 @@ impl> NetworkRequestState::SignatureKey>, public_key: &::SignatureKey, - view: TYPES::ViewTime, + view: TYPES::View, ) -> bool { // First send request to a random DA member for the view broadcast_event( @@ -303,7 +303,7 @@ impl> NetworkRequestState>>, da_members_for_view: BTreeSet<::SignatureKey>, - view: TYPES::ViewTime, + view: TYPES::View, ) -> Option>> { EventDependency::new( receiver.clone(), @@ -330,7 +330,7 @@ impl> NetworkRequestState, sender: &Sender>>, public_key: &::SignatureKey, - view: &TYPES::ViewTime, + view: &TYPES::View, shutdown_flag: &Arc, ) -> bool { let state = state.read().await; diff --git a/crates/task-impls/src/response.rs b/crates/task-impls/src/response.rs index 98e727033e..475a843896 100644 --- a/crates/task-impls/src/response.rs +++ b/crates/task-impls/src/response.rs @@ -140,7 +140,7 @@ impl NetworkResponseState { #[instrument(skip_all, target = "NetworkResponseState", fields(id = self.id))] async fn get_or_calc_vid_share( &self, - view: TYPES::ViewTime, + view: TYPES::View, key: &TYPES::SignatureKey, ) -> Option>> { let contained = self @@ -192,7 +192,7 @@ impl NetworkResponseState { } /// Makes sure the sender is allowed to send a request in the given epoch. - fn valid_sender(&self, sender: &TYPES::SignatureKey, epoch: TYPES::EpochTime) -> bool { + fn valid_sender(&self, sender: &TYPES::SignatureKey, epoch: TYPES::Epoch) -> bool { self.quorum.has_stake(sender, epoch) } } diff --git a/crates/task-impls/src/transactions.rs b/crates/task-impls/src/transactions.rs index 43f93bd083..da0429608a 100644 --- a/crates/task-impls/src/transactions.rs +++ b/crates/task-impls/src/transactions.rs @@ -82,10 +82,10 @@ pub struct TransactionTaskState, V pub output_event_stream: async_broadcast::Sender>, /// View number this view is executing in. - pub cur_view: TYPES::ViewTime, + pub cur_view: TYPES::View, /// Epoch number this node is executing in. - pub cur_epoch: TYPES::EpochTime, + pub cur_epoch: TYPES::Epoch, /// Reference to consensus. Leader will require a read lock on this. pub consensus: OuterConsensus, @@ -120,7 +120,7 @@ impl, V: Versions> TransactionTask pub async fn handle_view_change( &mut self, event_stream: &Sender>>, - block_view: TYPES::ViewTime, + block_view: TYPES::View, ) -> Option { let version = match self.upgrade_lock.version(block_view).await { Ok(v) => v, @@ -144,7 +144,7 @@ impl, V: Versions> TransactionTask pub async fn handle_view_change_legacy( &mut self, event_stream: &Sender>>, - block_view: TYPES::ViewTime, + block_view: TYPES::View, ) -> Option { let version = match self.upgrade_lock.version(block_view).await { Ok(v) => v, @@ -243,7 +243,7 @@ impl, V: Versions> TransactionTask /// Returns an error if the solver cannot be contacted, or if none of the builders respond. async fn produce_block_marketplace( &mut self, - block_view: TYPES::ViewTime, + block_view: TYPES::View, task_start_time: Instant, ) -> Result> { ensure!( @@ -340,7 +340,7 @@ impl, V: Versions> TransactionTask /// Produce a null block pub fn null_block( &self, - block_view: TYPES::ViewTime, + block_view: TYPES::View, version: Version, ) -> Option> { let membership_total_nodes = self.membership.total_nodes(self.cur_epoch); @@ -372,7 +372,7 @@ impl, V: Versions> TransactionTask pub async fn handle_view_change_marketplace( &mut self, event_stream: &Sender>>, - block_view: TYPES::ViewTime, + block_view: TYPES::View, ) -> Option { let task_start_time = Instant::now(); @@ -484,9 +484,9 @@ impl, V: Versions> TransactionTask #[instrument(skip_all, target = "TransactionTaskState", fields(id = self.id, cur_view = *self.cur_view, block_view = *block_view))] async fn last_vid_commitment_retry( &self, - block_view: TYPES::ViewTime, + block_view: TYPES::View, task_start_time: Instant, - ) -> Result<(TYPES::ViewTime, VidCommitment)> { + ) -> Result<(TYPES::View, VidCommitment)> { loop { match self.last_vid_commitment(block_view).await { Ok((view, comm)) => break Ok((view, comm)), @@ -505,10 +505,10 @@ impl, V: Versions> TransactionTask #[instrument(skip_all, target = "TransactionTaskState", fields(id = self.id, cur_view = *self.cur_view, block_view = *block_view))] async fn last_vid_commitment( &self, - block_view: TYPES::ViewTime, - ) -> Result<(TYPES::ViewTime, VidCommitment)> { + block_view: TYPES::View, + ) -> Result<(TYPES::View, VidCommitment)> { let consensus = self.consensus.read().await; - let mut target_view = TYPES::ViewTime::new(block_view.saturating_sub(1)); + let mut target_view = TYPES::View::new(block_view.saturating_sub(1)); loop { let view_data = consensus @@ -530,7 +530,7 @@ impl, V: Versions> TransactionTask } ViewInner::Failed => { // For failed views, backtrack - target_view = TYPES::ViewTime::new( + target_view = TYPES::View::new( target_view.checked_sub(1).context("Reached genesis")?, ); continue; @@ -540,7 +540,7 @@ impl, V: Versions> TransactionTask } #[instrument(skip_all, fields(id = self.id, cur_view = *self.cur_view, block_view = *block_view), name = "wait_for_block", level = "error")] - async fn wait_for_block(&self, block_view: TYPES::ViewTime) -> Option> { + async fn wait_for_block(&self, block_view: TYPES::View) -> Option> { let task_start_time = Instant::now(); // Find commitment to the block we want to build upon @@ -604,7 +604,7 @@ impl, V: Versions> TransactionTask async fn get_available_blocks( &self, parent_comm: VidCommitment, - view_number: TYPES::ViewTime, + view_number: TYPES::View, parent_comm_sig: &<::SignatureKey as SignatureKey>::PureAssembledSignatureType, ) -> Vec<(AvailableBlockInfo, usize)> { let tasks = self @@ -673,7 +673,7 @@ impl, V: Versions> TransactionTask async fn block_from_builder( &self, parent_comm: VidCommitment, - view_number: TYPES::ViewTime, + view_number: TYPES::View, parent_comm_sig: &<::SignatureKey as SignatureKey>::PureAssembledSignatureType, ) -> anyhow::Result> { let mut available_blocks = self diff --git a/crates/task-impls/src/upgrade.rs b/crates/task-impls/src/upgrade.rs index 1c1ffd8db6..f31debd9a9 100644 --- a/crates/task-impls/src/upgrade.rs +++ b/crates/task-impls/src/upgrade.rs @@ -43,10 +43,10 @@ pub struct UpgradeTaskState, V: Ve pub output_event_stream: async_broadcast::Sender>, /// View number this view is executing in. - pub cur_view: TYPES::ViewTime, + pub cur_view: TYPES::View, /// Epoch number this node is executing in. - pub cur_epoch: TYPES::EpochTime, + pub cur_epoch: TYPES::Epoch, /// Membership for Quorum Certs/votes pub quorum_membership: Arc, @@ -169,7 +169,7 @@ impl, V: Versions> UpgradeTaskStat // the `UpgradeProposalRecv` event. Otherwise, the view number subtraction below will // cause an overflow error. // TODO Come back to this - we probably don't need this, but we should also never receive a UpgradeCertificate where this fails, investigate block ready so it doesn't make one for the genesis block - if self.cur_view != TYPES::ViewTime::genesis() && view < self.cur_view - 1 { + if self.cur_view != TYPES::View::genesis() && view < self.cur_view - 1 { warn!("Discarding old upgrade proposal; the proposal is for view {:?}, but the current view is {:?}.", view, self.cur_view @@ -266,22 +266,22 @@ impl, V: Versions> UpgradeTaskStat && time < self.stop_proposing_time && !self.upgraded().await && self.quorum_membership.leader( - TYPES::ViewTime::new(view + UPGRADE_PROPOSE_OFFSET), - self.cur_epoch, + TYPES::View::new(view + UPGRADE_PROPOSE_OFFSET), + self.cur_epoch, ) == self.public_key { let upgrade_proposal_data = UpgradeProposalData { old_version: V::Base::VERSION, new_version: V::Upgrade::VERSION, new_version_hash: V::UPGRADE_HASH.to_vec(), - old_version_last_view: TYPES::ViewTime::new(view + UPGRADE_BEGIN_OFFSET), - new_version_first_view: TYPES::ViewTime::new(view + UPGRADE_FINISH_OFFSET), - decide_by: TYPES::ViewTime::new(view + UPGRADE_DECIDE_BY_OFFSET), + old_version_last_view: TYPES::View::new(view + UPGRADE_BEGIN_OFFSET), + new_version_first_view: TYPES::View::new(view + UPGRADE_FINISH_OFFSET), + decide_by: TYPES::View::new(view + UPGRADE_DECIDE_BY_OFFSET), }; let upgrade_proposal = UpgradeProposal { upgrade_proposal: upgrade_proposal_data.clone(), - view_number: TYPES::ViewTime::new(view + UPGRADE_PROPOSE_OFFSET), + view_number: TYPES::View::new(view + UPGRADE_PROPOSE_OFFSET), }; let signature = TYPES::SignatureKey::sign( diff --git a/crates/task-impls/src/vid.rs b/crates/task-impls/src/vid.rs index 433f5ad775..106203bd07 100644 --- a/crates/task-impls/src/vid.rs +++ b/crates/task-impls/src/vid.rs @@ -30,9 +30,9 @@ use crate::{ /// Tracks state of a VID task pub struct VidTaskState> { /// View number this view is executing in. - pub cur_view: TYPES::ViewTime, + pub cur_view: TYPES::View, /// Epoch number this node is executing in. - pub cur_epoch: TYPES::EpochTime, + pub cur_epoch: TYPES::Epoch, /// Reference to consensus. Leader will require a read lock on this. pub consensus: OuterConsensus, /// The underlying network @@ -44,7 +44,7 @@ pub struct VidTaskState> { /// Our Private Key pub private_key: ::PrivateKey, /// The view and ID of the current vote collection task, if there is one. - pub vote_collector: Option<(TYPES::ViewTime, usize, usize)>, + pub vote_collector: Option<(TYPES::View, usize, usize)>, /// This state's ID pub id: u64, } diff --git a/crates/task-impls/src/view_sync.rs b/crates/task-impls/src/view_sync.rs index 49055f288a..6bdcf6e614 100644 --- a/crates/task-impls/src/view_sync.rs +++ b/crates/task-impls/src/view_sync.rs @@ -62,18 +62,18 @@ pub enum ViewSyncPhase { /// Type alias for a map from View Number to Relay to Vote Task type RelayMap = HashMap< - ::ViewTime, + ::View, BTreeMap>, >; /// Main view sync task state pub struct ViewSyncTaskState, V: Versions> { /// View HotShot is currently in - pub current_view: TYPES::ViewTime, + pub current_view: TYPES::View, /// View HotShot wishes to be in - pub next_view: TYPES::ViewTime, + pub next_view: TYPES::View, /// Epoch HotShot is currently in - pub current_epoch: TYPES::EpochTime, + pub current_epoch: TYPES::Epoch, /// The underlying network pub network: Arc, /// Membership for the quorum @@ -89,7 +89,7 @@ pub struct ViewSyncTaskState, V: V pub num_timeouts_tracked: u64, /// Map of running replica tasks - pub replica_task_map: RwLock>>, + pub replica_task_map: RwLock>>, /// Map of pre-commit vote accumulates for the relay pub pre_commit_relay_map: RwLock< @@ -107,7 +107,7 @@ pub struct ViewSyncTaskState, V: V pub view_sync_timeout: Duration, /// Last view we garbage collected old tasks - pub last_garbage_collected_view: TYPES::ViewTime, + pub last_garbage_collected_view: TYPES::View, /// Lock for a decided upgrade pub upgrade_lock: UpgradeLock, @@ -138,11 +138,11 @@ pub struct ViewSyncReplicaTaskState, V: Versions> ViewSyncTaskSta pub async fn send_to_or_create_replica( &mut self, event: Arc>, - view: TYPES::ViewTime, + view: TYPES::View, sender: &Sender>>, ) { // This certificate is old, we can throw it away @@ -415,7 +415,7 @@ impl, V: Versions> ViewSyncTaskSta } &HotShotEvent::ViewChange(new_view) => { - let new_view = TYPES::ViewTime::new(*new_view); + let new_view = TYPES::View::new(*new_view); if self.current_view < new_view { debug!( "Change from view {} to view {} in view sync task", @@ -434,19 +434,19 @@ impl, V: Versions> ViewSyncTaskSta self.replica_task_map .write() .await - .remove_entry(&TYPES::ViewTime::new(i)); + .remove_entry(&TYPES::View::new(i)); self.pre_commit_relay_map .write() .await - .remove_entry(&TYPES::ViewTime::new(i)); + .remove_entry(&TYPES::View::new(i)); self.commit_relay_map .write() .await - .remove_entry(&TYPES::ViewTime::new(i)); + .remove_entry(&TYPES::View::new(i)); self.finalize_relay_map .write() .await - .remove_entry(&TYPES::ViewTime::new(i)); + .remove_entry(&TYPES::View::new(i)); } self.last_garbage_collected_view = self.current_view - 1; @@ -454,7 +454,7 @@ impl, V: Versions> ViewSyncTaskSta } &HotShotEvent::Timeout(view_number) => { // This is an old timeout and we can ignore it - if view_number <= TYPES::ViewTime::new(*self.current_view) { + if view_number <= TYPES::View::new(*self.current_view) { return; } @@ -485,7 +485,7 @@ impl, V: Versions> ViewSyncTaskSta // If this is the first timeout we've seen advance to the next view self.current_view = view_number; broadcast_event( - Arc::new(HotShotEvent::ViewChange(TYPES::ViewTime::new( + Arc::new(HotShotEvent::ViewChange(TYPES::View::new( *self.current_view, ))), &event_stream, @@ -585,7 +585,7 @@ impl, V: Versions> broadcast_event( Arc::new(HotShotEvent::ViewSyncTimeout( - TYPES::ViewTime::new(*next_view), + TYPES::View::new(*next_view), relay, phase, )), @@ -683,7 +683,7 @@ impl, V: Versions> ); broadcast_event( Arc::new(HotShotEvent::ViewSyncTimeout( - TYPES::ViewTime::new(*next_view), + TYPES::View::new(*next_view), relay, phase, )), @@ -740,7 +740,7 @@ impl, V: Versions> HotShotEvent::ViewSyncTrigger(view_number) => { let view_number = *view_number; - if self.next_view != TYPES::ViewTime::new(*view_number) { + if self.next_view != TYPES::View::new(*view_number) { error!("Unexpected view number to triger view sync"); return None; } @@ -780,7 +780,7 @@ impl, V: Versions> warn!("Vote sending timed out in ViewSyncTrigger"); broadcast_event( Arc::new(HotShotEvent::ViewSyncTimeout( - TYPES::ViewTime::new(*next_view), + TYPES::View::new(*next_view), relay, ViewSyncPhase::None, )), @@ -796,7 +796,7 @@ impl, V: Versions> HotShotEvent::ViewSyncTimeout(round, relay, last_seen_certificate) => { let round = *round; // Shouldn't ever receive a timeout for a relay higher than ours - if TYPES::ViewTime::new(*round) == self.next_view && *relay == self.relay { + if TYPES::View::new(*round) == self.next_view && *relay == self.relay { if let Some(timeout_task) = self.timeout_task.take() { cancel_task(timeout_task).await; } @@ -849,7 +849,7 @@ impl, V: Versions> ); broadcast_event( Arc::new(HotShotEvent::ViewSyncTimeout( - TYPES::ViewTime::new(*next_view), + TYPES::View::new(*next_view), relay, last_cert, )), diff --git a/crates/task-impls/src/vote_collection.rs b/crates/task-impls/src/vote_collection.rs index e9e7f4d924..58f6b46fc8 100644 --- a/crates/task-impls/src/vote_collection.rs +++ b/crates/task-impls/src/vote_collection.rs @@ -39,7 +39,7 @@ use crate::{ /// Alias for a map of Vote Collectors pub type VoteCollectorsMap = - BTreeMap<::ViewTime, VoteCollectionTaskState>; + BTreeMap<::View, VoteCollectionTaskState>; /// Task state for collecting votes of one type and emitting a certificate pub struct VoteCollectionTaskState< @@ -58,10 +58,10 @@ pub struct VoteCollectionTaskState< pub accumulator: Option>, /// The view which we are collecting votes for - pub view: TYPES::ViewTime, + pub view: TYPES::View, /// The epoch which we are collecting votes for - pub epoch: TYPES::EpochTime, + pub epoch: TYPES::Epoch, /// Node id pub id: u64, @@ -78,7 +78,7 @@ pub trait AggregatableVote< fn leader( &self, membership: &TYPES::Membership, - epoch: TYPES::EpochTime, + epoch: TYPES::Epoch, ) -> TYPES::SignatureKey; /// return the Hotshot event for the completion of this CERT @@ -161,9 +161,9 @@ pub struct AccumulatorInfo { /// Membership we are accumulation votes for pub membership: Arc, /// View of the votes we are collecting - pub view: TYPES::ViewTime, + pub view: TYPES::View, /// Epoch of the votes we are collecting - pub epoch: TYPES::EpochTime, + pub epoch: TYPES::Epoch, /// This nodes id pub id: u64, } @@ -230,7 +230,7 @@ pub async fn handle_vote< vote: &VOTE, public_key: TYPES::SignatureKey, membership: &Arc, - epoch: TYPES::EpochTime, + epoch: TYPES::Epoch, id: u64, event: &Arc>, event_stream: &Sender>>, @@ -310,7 +310,7 @@ impl AggregatableVote, QuorumCertifica fn leader( &self, membership: &TYPES::Membership, - epoch: TYPES::EpochTime, + epoch: TYPES::Epoch, ) -> TYPES::SignatureKey { membership.leader(self.view_number() + 1, epoch) } @@ -328,7 +328,7 @@ impl AggregatableVote, UpgradeCertifi fn leader( &self, membership: &TYPES::Membership, - epoch: TYPES::EpochTime, + epoch: TYPES::Epoch, ) -> TYPES::SignatureKey { membership.leader(self.view_number(), epoch) } @@ -346,7 +346,7 @@ impl AggregatableVote, DaCertificate TYPES::SignatureKey { membership.leader(self.view_number(), epoch) } @@ -364,7 +364,7 @@ impl AggregatableVote, TimeoutCertifi fn leader( &self, membership: &TYPES::Membership, - epoch: TYPES::EpochTime, + epoch: TYPES::Epoch, ) -> TYPES::SignatureKey { membership.leader(self.view_number() + 1, epoch) } @@ -383,7 +383,7 @@ impl fn leader( &self, membership: &TYPES::Membership, - epoch: TYPES::EpochTime, + epoch: TYPES::Epoch, ) -> TYPES::SignatureKey { membership.leader(self.date().round + self.date().relay, epoch) } @@ -402,7 +402,7 @@ impl fn leader( &self, membership: &TYPES::Membership, - epoch: TYPES::EpochTime, + epoch: TYPES::Epoch, ) -> TYPES::SignatureKey { membership.leader(self.date().round + self.date().relay, epoch) } @@ -421,7 +421,7 @@ impl fn leader( &self, membership: &TYPES::Membership, - epoch: TYPES::EpochTime, + epoch: TYPES::Epoch, ) -> TYPES::SignatureKey { membership.leader(self.date().round + self.date().relay, epoch) } diff --git a/crates/testing/src/byzantine/byzantine_behaviour.rs b/crates/testing/src/byzantine/byzantine_behaviour.rs index 83f30138bf..825108e48b 100644 --- a/crates/testing/src/byzantine/byzantine_behaviour.rs +++ b/crates/testing/src/byzantine/byzantine_behaviour.rs @@ -116,7 +116,7 @@ pub struct DishonestLeader { /// How far back to look for a QC pub view_look_back: usize, /// Shared state of all view numbers we send bad proposal at - pub dishonest_proposal_view_numbers: Arc>>, + pub dishonest_proposal_view_numbers: Arc>>, } /// Add method that will handle `QuorumProposalSend` events @@ -246,7 +246,7 @@ pub struct ViewDelay { /// How many views the node will be delayed pub number_of_views_to_delay: u64, /// A map that is from view number to vector of events - pub events_for_view: HashMap>>, + pub events_for_view: HashMap>>, /// Specify which view number to stop delaying pub stop_view_delay_at_view_number: u64, } @@ -271,7 +271,7 @@ impl + std::fmt::Debug, V: Version if view_diff > 0 { return match self .events_for_view - .remove(&::ViewTime::new(view_diff)) + .remove(&::View::new(view_diff)) { Some(lookback_events) => lookback_events.clone(), // we have already return all received events for this view @@ -346,8 +346,8 @@ impl + std::fmt::Debug, V: Version ) { let network_state: NetworkEventTaskState<_, V, _, _> = NetworkEventTaskState { network, - view: TYPES::ViewTime::genesis(), - epoch: TYPES::EpochTime::genesis(), + view: TYPES::View::genesis(), + epoch: TYPES::Epoch::genesis(), quorum_membership, da_membership, storage: Arc::clone(&handle.storage()), @@ -376,7 +376,7 @@ pub struct DishonestVoter { /// Collect all votes the node sends pub votes_sent: Vec>, /// Shared state with views numbers that leaders were dishonest at - pub dishonest_proposal_view_numbers: Arc>>, + pub dishonest_proposal_view_numbers: Arc>>, } #[async_trait] diff --git a/crates/testing/src/consistency_task.rs b/crates/testing/src/consistency_task.rs index 3c15a0f715..0d78c4d12c 100644 --- a/crates/testing/src/consistency_task.rs +++ b/crates/testing/src/consistency_task.rs @@ -24,10 +24,10 @@ use crate::{ }; /// Map from views to leaves for a single node, allowing multiple leaves for each view (because the node may a priori send us multiple leaves for a given view). -pub type NodeMap = BTreeMap<::ViewTime, Vec>>; +pub type NodeMap = BTreeMap<::View, Vec>>; /// A sanitized map from views to leaves for a single node, with only a single leaf per view. -pub type NodeMapSanitized = BTreeMap<::ViewTime, Leaf>; +pub type NodeMapSanitized = BTreeMap<::View, Leaf>; /// Validate that the `NodeMap` only has a single leaf per view. fn sanitize_node_map( @@ -68,7 +68,7 @@ async fn validate_node_map( .map(|((a, b), c)| (a, b, c)); let mut decided_upgrade_certificate = None; - let mut view_decided = TYPES::ViewTime::new(0); + let mut view_decided = TYPES::View::new(0); for (grandparent, _parent, child) in leaf_triples { if let Some(cert) = grandparent.upgrade_certificate() { @@ -144,7 +144,7 @@ fn sanitize_network_map( Ok(result) } -pub type ViewMap = BTreeMap<::ViewTime, BTreeMap>>; +pub type ViewMap = BTreeMap<::View, BTreeMap>>; // Invert the network map by interchanging the roles of the node_id and view number. // @@ -171,7 +171,7 @@ async fn invert_network_map( } /// A view map, sanitized to have exactly one leaf per view. -pub type ViewMapSanitized = BTreeMap<::ViewTime, Leaf>; +pub type ViewMapSanitized = BTreeMap<::View, Leaf>; fn sanitize_view_map( view_map: &ViewMap, diff --git a/crates/testing/src/helpers.rs b/crates/testing/src/helpers.rs index e0891ace20..c9113e711d 100644 --- a/crates/testing/src/helpers.rs +++ b/crates/testing/src/helpers.rs @@ -121,8 +121,8 @@ pub async fn build_cert< >( data: DATAType, membership: &TYPES::Membership, - view: TYPES::ViewTime, - epoch: TYPES::EpochTime, + view: TYPES::View, + epoch: TYPES::Epoch, public_key: &TYPES::SignatureKey, private_key: &::PrivateKey, upgrade_lock: &UpgradeLock, @@ -187,8 +187,8 @@ pub async fn build_assembled_sig< >( data: &DATAType, membership: &TYPES::Membership, - view: TYPES::ViewTime, - epoch: TYPES::EpochTime, + view: TYPES::View, + epoch: TYPES::Epoch, upgrade_lock: &UpgradeLock, ) -> ::QcType { let stake_table = membership.stake_table(epoch); @@ -246,8 +246,8 @@ pub fn key_pair_for_id( #[must_use] pub fn vid_scheme_from_view_number( membership: &TYPES::Membership, - view_number: TYPES::ViewTime, - epoch_number: TYPES::EpochTime, + view_number: TYPES::View, + epoch_number: TYPES::Epoch, ) -> VidSchemeType { let num_storage_nodes = membership .committee_members(view_number, epoch_number) @@ -257,8 +257,8 @@ pub fn vid_scheme_from_view_number( pub fn vid_payload_commitment( quorum_membership: &::Membership, - view_number: TYPES::ViewTime, - epoch_number: TYPES::EpochTime, + view_number: TYPES::View, + epoch_number: TYPES::Epoch, transactions: Vec, ) -> VidCommitment { let mut vid = @@ -272,7 +272,7 @@ pub fn vid_payload_commitment( pub fn da_payload_commitment( quorum_membership: &::Membership, transactions: Vec, - epoch_number: TYPES::EpochTime, + epoch_number: TYPES::Epoch, ) -> VidCommitment { let encoded_transactions = TestTransaction::encode(&transactions); @@ -284,8 +284,8 @@ pub fn da_payload_commitment( pub fn build_payload_commitment( membership: &::Membership, - view: TYPES::ViewTime, - epoch: TYPES::EpochTime, + view: TYPES::View, + epoch: TYPES::Epoch, ) -> ::Commit { // Make some empty encoded transactions, we just care about having a commitment handy for the // later calls. We need the VID commitment to be able to propose later. @@ -297,8 +297,8 @@ pub fn build_payload_commitment( /// TODO: pub fn build_vid_proposal( quorum_membership: &::Membership, - view_number: TYPES::ViewTime, - epoch_number: TYPES::EpochTime, + view_number: TYPES::View, + epoch_number: TYPES::Epoch, transactions: Vec, private_key: &::PrivateKey, ) -> VidProposal { @@ -339,8 +339,8 @@ pub fn build_vid_proposal( pub async fn build_da_certificate( quorum_membership: &::Membership, da_membership: &::Membership, - view_number: TYPES::ViewTime, - epoch_number: TYPES::EpochTime, + view_number: TYPES::View, + epoch_number: TYPES::Epoch, transactions: Vec, public_key: &TYPES::SignatureKey, private_key: &::PrivateKey, diff --git a/crates/testing/src/overall_safety_task.rs b/crates/testing/src/overall_safety_task.rs index 950cd24cfe..979c2c2a04 100644 --- a/crates/testing/src/overall_safety_task.rs +++ b/crates/testing/src/overall_safety_task.rs @@ -61,12 +61,12 @@ pub enum OverallSafetyTaskErr { NotEnoughDecides { got: usize, expected: usize }, #[error("Too many view failures: {0:?}")] - TooManyFailures(HashSet), + TooManyFailures(HashSet), #[error("Inconsistent failed views: expected: {expected_failed_views:?}, actual: {actual_failed_views:?}")] InconsistentFailedViews { - expected_failed_views: Vec, - actual_failed_views: HashSet, + expected_failed_views: Vec, + actual_failed_views: HashSet, }, #[error( "Not enough round results: results_count: {results_count}, views_count: {views_count}" @@ -97,7 +97,7 @@ pub struct OverallSafetyTask, V: Versions> OverallSafetyTask { - async fn handle_view_failure(&mut self, num_failed_views: usize, view_number: TYPES::ViewTime) { + async fn handle_view_failure(&mut self, num_failed_views: usize, view_number: TYPES::View) { let expected_views_to_fail = &mut self.properties.expected_views_to_fail; self.ctx.failed_views.insert(view_number); @@ -155,7 +155,7 @@ impl, V: Versions> TestTas block_size: maybe_block_size, } => { // Skip the genesis leaf. - if leaf_chain.last().unwrap().leaf.view_number() == TYPES::ViewTime::genesis() { + if leaf_chain.last().unwrap().leaf.view_number() == TYPES::View::genesis() { return Ok(()); } let paired_up = (leaf_chain.to_vec(), (*qc).clone()); @@ -364,18 +364,18 @@ impl Default for RoundCtx { pub struct RoundCtx { /// results from previous rounds /// view number -> round result - pub round_results: HashMap>, + pub round_results: HashMap>, /// during the run view refactor - pub failed_views: HashSet, + pub failed_views: HashSet, /// successful views - pub successful_views: HashSet, + pub successful_views: HashSet, } impl RoundCtx { /// inserts an error into the context pub fn insert_error_to_context( &mut self, - view_number: TYPES::ViewTime, + view_number: TYPES::View, idx: usize, error: Arc>, ) { @@ -569,7 +569,7 @@ pub struct OverallSafetyPropertiesDescription { /// required to mark view as successful pub threshold_calculator: Arc usize + Send + Sync>, /// pass in the views that we expect to fail - pub expected_views_to_fail: HashMap, + pub expected_views_to_fail: HashMap, } impl std::fmt::Debug for OverallSafetyPropertiesDescription { diff --git a/crates/testing/src/spinning_task.rs b/crates/testing/src/spinning_task.rs index fcef6806e3..5593a4336e 100644 --- a/crates/testing/src/spinning_task.rs +++ b/crates/testing/src/spinning_task.rs @@ -58,9 +58,9 @@ pub struct SpinningTask< /// late start nodes pub(crate) late_start: HashMap>, /// time based changes - pub(crate) changes: BTreeMap>, + pub(crate) changes: BTreeMap>, /// most recent view seen by spinning task - pub(crate) latest_view: Option, + pub(crate) latest_view: Option, /// Last decided leaf that can be used as the anchor leaf to initialize the node. pub(crate) last_decided_leaf: Leaf, /// Highest qc seen in the test for restarting nodes @@ -155,8 +155,8 @@ where self.last_decided_leaf.clone(), TestInstanceState::new(self.async_delay_config.clone()), None, - TYPES::ViewTime::genesis(), - TYPES::ViewTime::genesis(), + TYPES::View::genesis(), + TYPES::View::genesis(), BTreeMap::new(), self.high_qc.clone(), None, diff --git a/crates/testing/src/test_runner.rs b/crates/testing/src/test_runner.rs index da3c88d180..9291bf76b0 100644 --- a/crates/testing/src/test_runner.rs +++ b/crates/testing/src/test_runner.rs @@ -170,10 +170,10 @@ where // add spinning task // map spinning to view - let mut changes: BTreeMap> = BTreeMap::new(); + let mut changes: BTreeMap> = BTreeMap::new(); for (view, mut change) in spinning_changes { changes - .entry(TYPES::ViewTime::new(view)) + .entry(TYPES::View::new(view)) .or_insert_with(Vec::new) .append(&mut change); } diff --git a/crates/testing/tests/tests_1/view_sync_task.rs b/crates/testing/tests/tests_1/view_sync_task.rs index 604d2bdb21..aaacc26477 100644 --- a/crates/testing/tests/tests_1/view_sync_task.rs +++ b/crates/testing/tests/tests_1/view_sync_task.rs @@ -29,13 +29,13 @@ async fn test_view_sync_task() { let vote_data = ViewSyncPreCommitData { relay: 0, - round: ::ViewTime::new( + round: ::View::new( 4, ), }; let vote = hotshot_types::simple_vote::ViewSyncPreCommitVote::::create_signed_vote( vote_data, - ::ViewTime::new(4), + ::View::new(4), hotshot_types::traits::consensus_api::ConsensusApi::public_key(&handle), hotshot_types::traits::consensus_api::ConsensusApi::private_key(&handle), &handle.hotshot.upgrade_lock, diff --git a/crates/testing/tests/tests_3/memory_network.rs b/crates/testing/tests/tests_3/memory_network.rs index 09e42953ff..3050cd7d32 100644 --- a/crates/testing/tests/tests_3/memory_network.rs +++ b/crates/testing/tests/tests_3/memory_network.rs @@ -54,8 +54,8 @@ pub struct Test; impl NodeType for Test { type AuctionResult = TestAuctionResult; - type ViewTime = ViewNumber; - type EpochTime = EpochNumber; + type View = ViewNumber; + type Epoch = EpochNumber; type BlockHeader = TestBlockHeader; type BlockPayload = TestBlockPayload; type SignatureKey = BLSPubKey; diff --git a/crates/types/src/consensus.rs b/crates/types/src/consensus.rs index 311aecccf5..c311e76c26 100644 --- a/crates/types/src/consensus.rs +++ b/crates/types/src/consensus.rs @@ -43,7 +43,7 @@ pub type CommitmentMap = HashMap, T>; /// A type alias for `BTreeMap>>>` pub type VidShares = BTreeMap< - ::ViewTime, + ::View, HashMap<::SignatureKey, Proposal>>, >; @@ -272,30 +272,30 @@ impl HotShotActionViews { #[derive(custom_debug::Debug, Clone)] pub struct Consensus { /// The validated states that are currently loaded in memory. - validated_state_map: BTreeMap>, + validated_state_map: BTreeMap>, /// All the VID shares we've received for current and future views. vid_shares: VidShares, /// All the DA certs we've received for current and future views. /// view -> DA cert - saved_da_certs: HashMap>, + saved_da_certs: HashMap>, /// View number that is currently on. - cur_view: TYPES::ViewTime, + cur_view: TYPES::View, /// Epoch number that is currently on. - cur_epoch: TYPES::EpochTime, + cur_epoch: TYPES::Epoch, /// Last proposals we sent out, None if we haven't proposed yet. /// Prevents duplicate proposals, and can be served to those trying to catchup - last_proposals: BTreeMap>>, + last_proposals: BTreeMap>>, /// last view had a successful decide event - last_decided_view: TYPES::ViewTime, + last_decided_view: TYPES::View, /// The `locked_qc` view number - locked_view: TYPES::ViewTime, + locked_view: TYPES::View, /// Map of leaf hash -> leaf /// - contains undecided leaves @@ -305,12 +305,12 @@ pub struct Consensus { /// Bundle of views which we performed the most recent action /// visibible to the network. Actions are votes and proposals /// for DA and Quorum - last_actions: HotShotActionViews, + last_actions: HotShotActionViews, /// Saved payloads. /// /// Encoded transactions for every view if we got a payload for that view. - saved_payloads: BTreeMap>, + saved_payloads: BTreeMap>, /// the highqc per spec high_qc: QuorumCertificate, @@ -390,15 +390,15 @@ impl Consensus { /// Constructor. #[allow(clippy::too_many_arguments)] pub fn new( - validated_state_map: BTreeMap>, - cur_view: TYPES::ViewTime, - cur_epoch: TYPES::EpochTime, - locked_view: TYPES::ViewTime, - last_decided_view: TYPES::ViewTime, - last_actioned_view: TYPES::ViewTime, - last_proposals: BTreeMap>>, + validated_state_map: BTreeMap>, + cur_view: TYPES::View, + cur_epoch: TYPES::Epoch, + locked_view: TYPES::View, + last_decided_view: TYPES::View, + last_actioned_view: TYPES::View, + last_proposals: BTreeMap>>, saved_leaves: CommitmentMap>, - saved_payloads: BTreeMap>, + saved_payloads: BTreeMap>, high_qc: QuorumCertificate, metrics: Arc, ) -> Self { @@ -420,22 +420,22 @@ impl Consensus { } /// Get the current view. - pub fn cur_view(&self) -> TYPES::ViewTime { + pub fn cur_view(&self) -> TYPES::View { self.cur_view } /// Get the current epoch. - pub fn cur_epoch(&self) -> TYPES::EpochTime { + pub fn cur_epoch(&self) -> TYPES::Epoch { self.cur_epoch } /// Get the last decided view. - pub fn last_decided_view(&self) -> TYPES::ViewTime { + pub fn last_decided_view(&self) -> TYPES::View { self.last_decided_view } /// Get the locked view. - pub fn locked_view(&self) -> TYPES::ViewTime { + pub fn locked_view(&self) -> TYPES::View { self.locked_view } @@ -445,7 +445,7 @@ impl Consensus { } /// Get the validated state map. - pub fn validated_state_map(&self) -> &BTreeMap> { + pub fn validated_state_map(&self) -> &BTreeMap> { &self.validated_state_map } @@ -455,7 +455,7 @@ impl Consensus { } /// Get the saved payloads. - pub fn saved_payloads(&self) -> &BTreeMap> { + pub fn saved_payloads(&self) -> &BTreeMap> { &self.saved_payloads } @@ -465,21 +465,21 @@ impl Consensus { } /// Get the saved DA certs. - pub fn saved_da_certs(&self) -> &HashMap> { + pub fn saved_da_certs(&self) -> &HashMap> { &self.saved_da_certs } /// Get the map of our recent proposals pub fn last_proposals( &self, - ) -> &BTreeMap>> { + ) -> &BTreeMap>> { &self.last_proposals } /// Update the current view. /// # Errors /// Can return an error when the new view_number is not higher than the existing view number. - pub fn update_view(&mut self, view_number: TYPES::ViewTime) -> Result<()> { + pub fn update_view(&mut self, view_number: TYPES::View) -> Result<()> { ensure!( view_number > self.cur_view, "New view isn't newer than the current view." @@ -491,7 +491,7 @@ impl Consensus { /// Update the current epoch. /// # Errors /// Can return an error when the new epoch_number is not higher than the existing epoch number. - pub fn update_epoch(&mut self, epoch_number: TYPES::EpochTime) -> Result<()> { + pub fn update_epoch(&mut self, epoch_number: TYPES::Epoch) -> Result<()> { ensure!( epoch_number > self.cur_epoch, "New epoch isn't newer than the current epoch." @@ -503,7 +503,7 @@ impl Consensus { /// Update the last actioned view internally for votes and proposals /// /// Returns true if the action is for a newer view than the last action of that type - pub fn update_action(&mut self, action: HotShotAction, view: TYPES::ViewTime) -> bool { + pub fn update_action(&mut self, action: HotShotAction, view: TYPES::View) -> bool { let old_view = match action { HotShotAction::Vote => &mut self.last_actions.voted, HotShotAction::Propose => &mut self.last_actions.proposed, @@ -545,7 +545,7 @@ impl Consensus { > self .last_proposals .last_key_value() - .map_or(TYPES::ViewTime::genesis(), |(k, _)| { *k }), + .map_or(TYPES::View::genesis(), |(k, _)| { *k }), "New view isn't newer than the previously proposed view." ); self.last_proposals @@ -557,7 +557,7 @@ impl Consensus { /// /// # Errors /// Can return an error when the new view_number is not higher than the existing decided view number. - pub fn update_last_decided_view(&mut self, view_number: TYPES::ViewTime) -> Result<()> { + pub fn update_last_decided_view(&mut self, view_number: TYPES::View) -> Result<()> { ensure!( view_number > self.last_decided_view, "New view isn't newer than the previously decided view." @@ -570,7 +570,7 @@ impl Consensus { /// /// # Errors /// Can return an error when the new view_number is not higher than the existing locked view number. - pub fn update_locked_view(&mut self, view_number: TYPES::ViewTime) -> Result<()> { + pub fn update_locked_view(&mut self, view_number: TYPES::View) -> Result<()> { ensure!( view_number > self.locked_view, "New view isn't newer than the previously locked view." @@ -586,7 +586,7 @@ impl Consensus { /// with the same view number. pub fn update_validated_state_map( &mut self, - view_number: TYPES::ViewTime, + view_number: TYPES::View, new_view: View, ) -> Result<()> { if let Some(existing_view) = self.validated_state_map().get(&view_number) { @@ -631,7 +631,7 @@ impl Consensus { /// Can return an error when there's an existing payload corresponding to the same view number. pub fn update_saved_payloads( &mut self, - view_number: TYPES::ViewTime, + view_number: TYPES::View, encoded_transaction: Arc<[u8]>, ) -> Result<()> { ensure!( @@ -659,7 +659,7 @@ impl Consensus { /// Add a new entry to the vid_shares map. pub fn update_vid_shares( &mut self, - view_number: TYPES::ViewTime, + view_number: TYPES::View, disperse: Proposal>, ) { self.vid_shares @@ -671,7 +671,7 @@ impl Consensus { /// Add a new entry to the da_certs map. pub fn update_saved_da_certs( &mut self, - view_number: TYPES::ViewTime, + view_number: TYPES::View, cert: DaCertificate, ) { self.saved_da_certs.insert(view_number, cert); @@ -682,8 +682,8 @@ impl Consensus { /// If the leaf or its ancestors are not found in storage pub fn visit_leaf_ancestors( &self, - start_from: TYPES::ViewTime, - terminator: Terminator, + start_from: TYPES::View, + terminator: Terminator, ok_when_finished: bool, mut f: F, ) -> Result<(), HotShotError> @@ -744,8 +744,8 @@ impl Consensus { /// On inconsistent stored entries pub fn collect_garbage( &mut self, - old_anchor_view: TYPES::ViewTime, - new_anchor_view: TYPES::ViewTime, + old_anchor_view: TYPES::View, + new_anchor_view: TYPES::View, ) { // state check let anchor_entry = self @@ -790,7 +790,7 @@ impl Consensus { /// Gets the validated state with the given view number, if in the state map. #[must_use] - pub fn state(&self, view_number: TYPES::ViewTime) -> Option<&Arc> { + pub fn state(&self, view_number: TYPES::View) -> Option<&Arc> { match self.validated_state_map.get(&view_number) { Some(view) => view.state(), None => None, @@ -799,7 +799,7 @@ impl Consensus { /// Gets the validated state and state delta with the given view number, if in the state map. #[must_use] - pub fn state_and_delta(&self, view_number: TYPES::ViewTime) -> StateAndDelta { + pub fn state_and_delta(&self, view_number: TYPES::View) -> StateAndDelta { match self.validated_state_map.get(&view_number) { Some(view) => view.state_and_delta(), None => (None, None), @@ -827,10 +827,10 @@ impl Consensus { #[instrument(skip_all, target = "Consensus", fields(view = *view))] pub async fn calculate_and_update_vid( consensus: OuterConsensus, - view: ::ViewTime, + view: ::View, membership: Arc, private_key: &::PrivateKey, - epoch: TYPES::EpochTime, + epoch: TYPES::Epoch, ) -> Option<()> { let consensus = consensus.upgradable_read().await; let txns = consensus.saved_payloads().get(&view)?; @@ -861,7 +861,7 @@ pub struct CommitmentAndMetadata { /// Builder fee data pub fees: Vec1>, /// View number this block is for - pub block_view: TYPES::ViewTime, + pub block_view: TYPES::View, /// auction result that the block was produced from, if any pub auction_result: Option, } diff --git a/crates/types/src/data.rs b/crates/types/src/data.rs index ca3b313fe2..61d4d5a7bf 100644 --- a/crates/types/src/data.rs +++ b/crates/types/src/data.rs @@ -172,7 +172,7 @@ pub struct DaProposal { /// Metadata of the block to be applied. pub metadata: >::Metadata, /// View this proposal applies to - pub view_number: TYPES::ViewTime, + pub view_number: TYPES::View, } /// A proposal to upgrade the network @@ -185,7 +185,7 @@ where /// The information about which version we are upgrading to. pub upgrade_proposal: UpgradeProposalData, /// View this proposal applies to - pub view_number: TYPES::ViewTime, + pub view_number: TYPES::View, } /// VID dispersal data @@ -196,7 +196,7 @@ where #[derive(Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)] pub struct VidDisperse { /// The view number for which this VID data is intended - pub view_number: TYPES::ViewTime, + pub view_number: TYPES::View, /// Block payload commitment pub payload_commitment: VidCommitment, /// A storage node's key and its corresponding VID share @@ -210,10 +210,10 @@ impl VidDisperse { /// Uses the specified function to calculate share dispersal /// Allows for more complex stake table functionality pub fn from_membership( - view_number: TYPES::ViewTime, + view_number: TYPES::View, mut vid_disperse: JfVidDisperse, membership: &TYPES::Membership, - epoch: TYPES::EpochTime, + epoch: TYPES::Epoch, ) -> Self { let shares = membership .committee_members(view_number, epoch) @@ -238,8 +238,8 @@ impl VidDisperse { pub async fn calculate_vid_disperse( txns: Arc<[u8]>, membership: &Arc, - view: TYPES::ViewTime, - epoch: TYPES::EpochTime, + view: TYPES::View, + epoch: TYPES::Epoch, precompute_data: Option, ) -> Self { let num_nodes = membership.total_nodes(epoch); @@ -274,7 +274,7 @@ pub enum ViewChangeEvidence { impl ViewChangeEvidence { /// Check that the given ViewChangeEvidence is relevant to the current view. - pub fn is_valid_for_view(&self, view: &TYPES::ViewTime) -> bool { + pub fn is_valid_for_view(&self, view: &TYPES::View) -> bool { match self { ViewChangeEvidence::Timeout(timeout_cert) => timeout_cert.date().view == *view - 1, ViewChangeEvidence::ViewSync(view_sync_cert) => view_sync_cert.view_number == *view, @@ -286,7 +286,7 @@ impl ViewChangeEvidence { /// VID share and associated metadata for a single node pub struct VidDisperseShare { /// The view number for which this VID data is intended - pub view_number: TYPES::ViewTime, + pub view_number: TYPES::View, /// Block payload commitment pub payload_commitment: VidCommitment, /// A storage node's key and its corresponding VID share @@ -388,7 +388,7 @@ pub struct QuorumProposal { pub block_header: TYPES::BlockHeader, /// CurView from leader when proposing leaf - pub view_number: TYPES::ViewTime, + pub view_number: TYPES::View, /// Per spec, justification pub justify_qc: QuorumCertificate, @@ -404,31 +404,31 @@ pub struct QuorumProposal { } impl HasViewNumber for DaProposal { - fn view_number(&self) -> TYPES::ViewTime { + fn view_number(&self) -> TYPES::View { self.view_number } } impl HasViewNumber for VidDisperse { - fn view_number(&self) -> TYPES::ViewTime { + fn view_number(&self) -> TYPES::View { self.view_number } } impl HasViewNumber for VidDisperseShare { - fn view_number(&self) -> TYPES::ViewTime { + fn view_number(&self) -> TYPES::View { self.view_number } } impl HasViewNumber for QuorumProposal { - fn view_number(&self) -> TYPES::ViewTime { + fn view_number(&self) -> TYPES::View { self.view_number } } impl HasViewNumber for UpgradeProposal { - fn view_number(&self) -> TYPES::ViewTime { + fn view_number(&self) -> TYPES::View { self.view_number } } @@ -465,7 +465,7 @@ pub trait TestableLeaf { #[serde(bound(deserialize = ""))] pub struct Leaf { /// CurView from leader when proposing leaf - view_number: TYPES::ViewTime, + view_number: TYPES::View, /// Per spec, justification justify_qc: QuorumCertificate, @@ -538,7 +538,7 @@ impl QuorumCertificate { // since this is genesis, we should never have a decided upgrade certificate. let upgrade_lock = UpgradeLock::::new(); - let genesis_view = ::genesis(); + let genesis_view = ::genesis(); let data = QuorumData { leaf_commit: Leaf::genesis(validated_state, instance_state) @@ -598,13 +598,13 @@ impl Leaf { let justify_qc = QuorumCertificate::new( null_quorum_data.clone(), null_quorum_data.commit(), - ::genesis(), + ::genesis(), None, PhantomData, ); Self { - view_number: TYPES::ViewTime::genesis(), + view_number: TYPES::View::genesis(), justify_qc, parent_commitment: null_quorum_data.leaf_commit, upgrade_certificate: None, @@ -614,7 +614,7 @@ impl Leaf { } /// Time when this leaf was created. - pub fn view_number(&self) -> TYPES::ViewTime { + pub fn view_number(&self) -> TYPES::View { self.view_number } /// Height of this leaf in the chain. @@ -901,7 +901,7 @@ pub struct PackedBundle { pub metadata: >::Metadata, /// The view number that this block is associated with. - pub view_number: TYPES::ViewTime, + pub view_number: TYPES::View, /// The sequencing fee for submitting bundles. pub sequencing_fees: Vec1>, @@ -918,7 +918,7 @@ impl PackedBundle { pub fn new( encoded_transactions: Arc<[u8]>, metadata: >::Metadata, - view_number: TYPES::ViewTime, + view_number: TYPES::View, sequencing_fees: Vec1>, vid_precompute: Option, auction_result: Option, diff --git a/crates/types/src/error.rs b/crates/types/src/error.rs index 4dfd55a6c3..80c1baae8d 100644 --- a/crates/types/src/error.rs +++ b/crates/types/src/error.rs @@ -41,7 +41,7 @@ pub enum HotShotError { #[error("View {view_number} timed out: {state:?}")] ViewTimedOut { /// The view number that timed out - view_number: TYPES::ViewTime, + view_number: TYPES::View, /// The state that the round was in when it timed out state: RoundTimedoutState, }, diff --git a/crates/types/src/event.rs b/crates/types/src/event.rs index 84241b1680..7c53bce02f 100644 --- a/crates/types/src/event.rs +++ b/crates/types/src/event.rs @@ -25,7 +25,7 @@ use crate::{ #[serde(bound(deserialize = "TYPES: NodeType"))] pub struct Event { /// The view number that this event originates from - pub view_number: TYPES::ViewTime, + pub view_number: TYPES::View, /// The underlying event pub event: EventType, } @@ -127,17 +127,17 @@ pub enum EventType { /// A replica task was canceled by a timeout interrupt ReplicaViewTimeout { /// The view that timed out - view_number: TYPES::ViewTime, + view_number: TYPES::View, }, /// The view has finished. If values were decided on, a `Decide` event will also be emitted. ViewFinished { /// The view number that has just finished - view_number: TYPES::ViewTime, + view_number: TYPES::View, }, /// The view timed out ViewTimeout { /// The view that timed out - view_number: TYPES::ViewTime, + view_number: TYPES::View, }, /// New transactions were received from the network /// or submitted to the network by us diff --git a/crates/types/src/message.rs b/crates/types/src/message.rs index 3b0e7dcd9a..82a561cd4b 100644 --- a/crates/types/src/message.rs +++ b/crates/types/src/message.rs @@ -63,7 +63,7 @@ impl fmt::Debug for Message { impl HasViewNumber for Message { /// get the view number out of a message - fn view_number(&self) -> TYPES::ViewTime { + fn view_number(&self) -> TYPES::View { self.kind.view_number() } } @@ -133,16 +133,16 @@ impl From> for MessageKind { } impl ViewMessage for MessageKind { - fn view_number(&self) -> TYPES::ViewTime { + fn view_number(&self) -> TYPES::View { match &self { MessageKind::Consensus(message) => message.view_number(), MessageKind::Data(DataMessage::SubmitTransaction(_, v)) => *v, MessageKind::Data(DataMessage::RequestData(msg)) => msg.view, MessageKind::Data(DataMessage::DataResponse(msg)) => match msg { ResponseMessage::Found(m) => m.view_number(), - ResponseMessage::NotFound | ResponseMessage::Denied => TYPES::ViewTime::new(1), + ResponseMessage::NotFound | ResponseMessage::Denied => TYPES::View::new(1), }, - MessageKind::External(_) => TYPES::ViewTime::new(1), + MessageKind::External(_) => TYPES::View::new(1), } } @@ -234,7 +234,7 @@ pub enum SequencingMessage { impl SequencingMessage { /// Get the view number this message relates to - fn view_number(&self) -> TYPES::ViewTime { + fn view_number(&self) -> TYPES::View { match &self { SequencingMessage::General(general_message) => { match general_message { @@ -328,7 +328,7 @@ pub enum DataMessage { /// Contains a transaction to be submitted /// TODO rethink this when we start to send these messages /// we only need the view number for broadcast - SubmitTransaction(TYPES::Transaction, TYPES::ViewTime), + SubmitTransaction(TYPES::Transaction, TYPES::View), /// A request for data RequestData(DataRequest), /// A response to a data request @@ -359,7 +359,7 @@ where pub async fn validate_signature( &self, quorum_membership: &TYPES::Membership, - epoch: TYPES::EpochTime, + epoch: TYPES::Epoch, upgrade_lock: &UpgradeLock, ) -> Result<()> { let view_number = self.data.view_number(); @@ -411,7 +411,7 @@ impl UpgradeLock { /// /// # Errors /// Returns an error if we do not support the version required by the decided upgrade certificate. - pub async fn version(&self, view: TYPES::ViewTime) -> Result { + pub async fn version(&self, view: TYPES::View) -> Result { let upgrade_certificate = self.decided_upgrade_certificate.read().await; let version = match *upgrade_certificate { @@ -435,7 +435,7 @@ impl UpgradeLock { /// Calculate the version applied in a view, based on the provided upgrade lock. /// /// This function does not fail, since it does not check that the version is supported. - pub async fn version_infallible(&self, view: TYPES::ViewTime) -> Version { + pub async fn version_infallible(&self, view: TYPES::View) -> Version { let upgrade_certificate = self.decided_upgrade_certificate.read().await; match *upgrade_certificate { diff --git a/crates/types/src/request_response.rs b/crates/types/src/request_response.rs index dbbb6f061e..6829d19743 100644 --- a/crates/types/src/request_response.rs +++ b/crates/types/src/request_response.rs @@ -16,7 +16,7 @@ use crate::traits::{node_implementation::NodeType, signature_key::SignatureKey}; /// A signed request for a proposal. pub struct ProposalRequestPayload { /// The view number that we're requesting a proposal for. - pub view_number: TYPES::ViewTime, + pub view_number: TYPES::View, /// Our public key. The ensures that the receipient can reply to /// us directly. diff --git a/crates/types/src/simple_certificate.rs b/crates/types/src/simple_certificate.rs index 7ee5a15cc5..88774d2cb9 100644 --- a/crates/types/src/simple_certificate.rs +++ b/crates/types/src/simple_certificate.rs @@ -78,7 +78,7 @@ pub struct SimpleCertificate, /// Which view this QC relates to - pub view_number: TYPES::ViewTime, + pub view_number: TYPES::View, /// assembled signature for certificate aggregation pub signatures: Option<::QcType>, /// phantom data for `THRESHOLD` and `TYPES` @@ -92,7 +92,7 @@ impl> pub fn new( data: VOTEABLE, vote_commitment: Commitment, - view_number: TYPES::ViewTime, + view_number: TYPES::View, signatures: Option<::QcType>, pd: PhantomData<(TYPES, THRESHOLD)>, ) -> Self { @@ -133,7 +133,7 @@ impl> vote_commitment: Commitment>, data: Self::Voteable, sig: ::QcType, - view: TYPES::ViewTime, + view: TYPES::View, ) -> Self { let vote_commitment_bytes: [u8; 32] = vote_commitment.into(); @@ -148,10 +148,10 @@ impl> async fn is_valid_cert, V: Versions>( &self, membership: &MEMBERSHIP, - epoch: TYPES::EpochTime, + epoch: TYPES::Epoch, upgrade_lock: &UpgradeLock, ) -> bool { - if self.view_number == TYPES::ViewTime::genesis() { + if self.view_number == TYPES::View::genesis() { return true; } let real_qc_pp = ::public_parameter( @@ -188,7 +188,7 @@ impl> impl> HasViewNumber for SimpleCertificate { - fn view_number(&self) -> TYPES::ViewTime { + fn view_number(&self) -> TYPES::View { self.view_number } } @@ -206,7 +206,7 @@ impl UpgradeCertificate { /// Returns an error when the certificate is no longer relevant pub async fn is_relevant( &self, - view_number: TYPES::ViewTime, + view_number: TYPES::View, decided_upgrade_certificate: Arc>>, ) -> Result<()> { let decided_upgrade_certificate_read = decided_upgrade_certificate.read().await; @@ -227,7 +227,7 @@ impl UpgradeCertificate { pub async fn validate( upgrade_certificate: &Option, quorum_membership: &TYPES::Membership, - epoch: TYPES::EpochTime, + epoch: TYPES::Epoch, upgrade_lock: &UpgradeLock, ) -> Result<()> { if let Some(ref cert) = upgrade_certificate { @@ -244,7 +244,7 @@ impl UpgradeCertificate { /// Given an upgrade certificate and a view, tests whether the view is in the period /// where we are upgrading, which requires that we propose with null blocks. - pub fn upgrading_in(&self, view: TYPES::ViewTime) -> bool { + pub fn upgrading_in(&self, view: TYPES::View) -> bool { view > self.data.old_version_last_view && view < self.data.new_version_first_view } } diff --git a/crates/types/src/simple_vote.rs b/crates/types/src/simple_vote.rs index 2c504d20b4..928143dbf9 100644 --- a/crates/types/src/simple_vote.rs +++ b/crates/types/src/simple_vote.rs @@ -41,7 +41,7 @@ pub struct DaData { /// Data used for a timeout vote. pub struct TimeoutData { /// View the timeout is for - pub view: TYPES::ViewTime, + pub view: TYPES::View, } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] /// Data used for a VID vote. @@ -55,7 +55,7 @@ pub struct ViewSyncPreCommitData { /// The relay this vote is intended for pub relay: u64, /// The view number we are trying to sync on - pub round: TYPES::ViewTime, + pub round: TYPES::View, } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] /// Data used for a Commit vote. @@ -63,7 +63,7 @@ pub struct ViewSyncCommitData { /// The relay this vote is intended for pub relay: u64, /// The view number we are trying to sync on - pub round: TYPES::ViewTime, + pub round: TYPES::View, } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] /// Data used for a Finalize vote. @@ -71,7 +71,7 @@ pub struct ViewSyncFinalizeData { /// The relay this vote is intended for pub relay: u64, /// The view number we are trying to sync on - pub round: TYPES::ViewTime, + pub round: TYPES::View, } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] /// Data used for a Upgrade vote. @@ -82,13 +82,13 @@ pub struct UpgradeProposalData { pub new_version: Version, /// The last view in which we are allowed to reach a decide on this upgrade. /// If it is not decided by that view, we discard it. - pub decide_by: TYPES::ViewTime, + pub decide_by: TYPES::View, /// A unique identifier for the specific protocol being voted on. pub new_version_hash: Vec, /// The last block for which the old version will be in effect. - pub old_version_last_view: TYPES::ViewTime, + pub old_version_last_view: TYPES::View, /// The first block for which the new version will be in effect. - pub new_version_first_view: TYPES::ViewTime, + pub new_version_first_view: TYPES::View, } /// Marker trait for data or commitments that can be voted on. @@ -123,11 +123,11 @@ pub struct SimpleVote { /// The leaf commitment being voted on. pub data: DATA, /// The view this vote was cast for - pub view_number: TYPES::ViewTime, + pub view_number: TYPES::View, } impl HasViewNumber for SimpleVote { - fn view_number(&self) -> ::ViewTime { + fn view_number(&self) -> ::View { self.view_number } } @@ -158,7 +158,7 @@ impl SimpleVote { /// If we are unable to sign the data pub async fn create_signed_vote( data: DATA, - view: TYPES::ViewTime, + view: TYPES::View, pub_key: &TYPES::SignatureKey, private_key: &::PrivateKey, upgrade_lock: &UpgradeLock, @@ -187,7 +187,7 @@ pub struct VersionedVoteData { data: DATA, /// view number - view: TYPES::ViewTime, + view: TYPES::View, /// version applied to the view number version: Version, @@ -204,7 +204,7 @@ impl VersionedVoteData, ) -> Result { let version = upgrade_lock.version(view).await?; @@ -222,7 +222,7 @@ impl VersionedVoteData, ) -> Self { let version = upgrade_lock.version_infallible(view).await; @@ -303,7 +303,7 @@ impl Committable for UpgradeProposalData { /// This implements commit for all the types which contain a view and relay public key. fn view_and_relay_commit( - view: TYPES::ViewTime, + view: TYPES::View, relay: u64, tag: &str, ) -> Commitment { diff --git a/crates/types/src/traits/auction_results_provider.rs b/crates/types/src/traits/auction_results_provider.rs index 0c8993ddd6..245f8a8c34 100644 --- a/crates/types/src/traits/auction_results_provider.rs +++ b/crates/types/src/traits/auction_results_provider.rs @@ -22,6 +22,6 @@ pub trait AuctionResultsProvider: Send + Sync + Clone { /// subsequent calls will invoke additional wasted calls. async fn fetch_auction_result( &self, - view_number: TYPES::ViewTime, + view_number: TYPES::View, ) -> Result; } diff --git a/crates/types/src/traits/election.rs b/crates/types/src/traits/election.rs index 6ea0ce793a..43d0ebf12f 100644 --- a/crates/types/src/traits/election.rs +++ b/crates/types/src/traits/election.rs @@ -26,21 +26,21 @@ pub trait Membership: /// Get all participants in the committee (including their stake) for a specific epoch fn stake_table( &self, - epoch: TYPES::EpochTime, + epoch: TYPES::Epoch, ) -> Vec<::StakeTableEntry>; /// Get all participants in the committee for a specific view for a specific epoch fn committee_members( &self, - view_number: TYPES::ViewTime, - epoch: TYPES::EpochTime, + view_number: TYPES::View, + epoch: TYPES::Epoch, ) -> BTreeSet; /// Get all leaders in the committee for a specific view for a specific epoch fn committee_leaders( &self, - view_number: TYPES::ViewTime, - epoch: TYPES::EpochTime, + view_number: TYPES::View, + epoch: TYPES::Epoch, ) -> BTreeSet; /// Get the stake table entry for a public key, returns `None` if the @@ -48,20 +48,20 @@ pub trait Membership: fn stake( &self, pub_key: &TYPES::SignatureKey, - epoch: TYPES::EpochTime, + epoch: TYPES::Epoch, ) -> Option<::StakeTableEntry>; /// See if a node has stake in the committee in a specific epoch - fn has_stake(&self, pub_key: &TYPES::SignatureKey, epoch: TYPES::EpochTime) -> bool; + fn has_stake(&self, pub_key: &TYPES::SignatureKey, epoch: TYPES::Epoch) -> bool; /// The leader of the committee for view `view_number` in an epoch `epoch`. - fn leader(&self, view_number: TYPES::ViewTime, epoch: TYPES::EpochTime) -> TYPES::SignatureKey; + fn leader(&self, view_number: TYPES::View, epoch: TYPES::Epoch) -> TYPES::SignatureKey; /// Get the network topic for the committee fn committee_topic(&self) -> Topic; /// Returns the number of total nodes in the committee in an epoch `epoch` - fn total_nodes(&self, epoch: TYPES::EpochTime) -> usize; + fn total_nodes(&self, epoch: TYPES::Epoch) -> usize; /// Returns the threshold for a specific `Membership` implementation fn success_threshold(&self) -> NonZeroU64; diff --git a/crates/types/src/traits/network.rs b/crates/types/src/traits/network.rs index f9b5c942ba..90fedaba27 100644 --- a/crates/types/src/traits/network.rs +++ b/crates/types/src/traits/network.rs @@ -139,7 +139,7 @@ pub trait Id: Eq + PartialEq + Hash {} /// a message pub trait ViewMessage { /// get the view out of the message - fn view_number(&self) -> TYPES::ViewTime; + fn view_number(&self) -> TYPES::View; // TODO move out of this trait. /// get the purpose of the message fn purpose(&self) -> MessagePurpose; @@ -152,7 +152,7 @@ pub struct DataRequest { /// Request pub request: RequestKind, /// View this message is for - pub view: TYPES::ViewTime, + pub view: TYPES::View, /// signature of the Sha256 hash of the data so outsiders can't use know /// public keys with stake. pub signature: ::PureAssembledSignatureType, @@ -162,11 +162,11 @@ pub struct DataRequest { #[derive(Serialize, Deserialize, Derivative, Clone, Debug, PartialEq, Eq, Hash)] pub enum RequestKind { /// Request VID data by our key and the VID commitment - Vid(TYPES::ViewTime, TYPES::SignatureKey), + Vid(TYPES::View, TYPES::SignatureKey), /// Request a DA proposal for a certain view - DaProposal(TYPES::ViewTime), + DaProposal(TYPES::View), /// Request for quorum proposal for a view - Proposal(TYPES::ViewTime), + Proposal(TYPES::View), } /// A response for a request. `SequencingMessage` is the same as other network messages diff --git a/crates/types/src/traits/node_implementation.rs b/crates/types/src/traits/node_implementation.rs index fd481d3943..c84031218c 100644 --- a/crates/types/src/traits/node_implementation.rs +++ b/crates/types/src/traits/node_implementation.rs @@ -210,9 +210,9 @@ pub trait NodeType: /// The time type that this hotshot setup is using. /// /// This should be the same `Time` that `ValidatedState::Time` is using. - type ViewTime: ConsensusTime + Display; + type View: ConsensusTime + Display; /// Same as above but for epoch. - type EpochTime: ConsensusTime + Display; + type Epoch: ConsensusTime + Display; /// The AuctionSolverResult is a type that holds the data associated with a particular solver /// run, for a particular view. type AuctionResult: Debug @@ -246,7 +246,7 @@ pub trait NodeType: type InstanceState: InstanceState; /// The validated state type that this hotshot setup is using. - type ValidatedState: ValidatedState; + type ValidatedState: ValidatedState; /// Membership used for this implementation type Membership: Membership; diff --git a/crates/types/src/traits/storage.rs b/crates/types/src/traits/storage.rs index 990d0010b8..d400ce455b 100644 --- a/crates/types/src/traits/storage.rs +++ b/crates/types/src/traits/storage.rs @@ -36,7 +36,7 @@ pub trait Storage: Send + Sync + Clone { proposal: &Proposal>, ) -> Result<()>; /// Record a HotShotAction taken. - async fn record_action(&self, view: TYPES::ViewTime, action: HotShotAction) -> Result<()>; + async fn record_action(&self, view: TYPES::View, action: HotShotAction) -> Result<()>; /// Update the current high QC in storage. async fn update_high_qc(&self, high_qc: QuorumCertificate) -> Result<()>; /// Update the currently undecided state of consensus. This includes the undecided leaf chain, @@ -44,7 +44,7 @@ pub trait Storage: Send + Sync + Clone { async fn update_undecided_state( &self, leafs: CommitmentMap>, - state: BTreeMap>, + state: BTreeMap>, ) -> Result<()>; /// Upgrade the current decided upgrade certificate in storage. async fn update_decided_upgrade_certificate( diff --git a/crates/types/src/utils.rs b/crates/types/src/utils.rs index ff264fa47d..e3d19a8286 100644 --- a/crates/types/src/utils.rs +++ b/crates/types/src/utils.rs @@ -150,7 +150,7 @@ pub struct View { #[derive(Debug, Clone)] pub struct RoundFinishedEvent { /// The round that finished - pub view_number: TYPES::ViewTime, + pub view_number: TYPES::View, } /// Whether or not to stop inclusively or exclusively when walking diff --git a/crates/types/src/vote.rs b/crates/types/src/vote.rs index a811089d05..73b112b85e 100644 --- a/crates/types/src/vote.rs +++ b/crates/types/src/vote.rs @@ -48,7 +48,7 @@ pub trait Vote: HasViewNumber { /// Any type that is associated with a view pub trait HasViewNumber { /// Returns the view number the type refers to. - fn view_number(&self) -> TYPES::ViewTime; + fn view_number(&self) -> TYPES::View; } /** @@ -68,14 +68,14 @@ pub trait Certificate: HasViewNumber { vote_commitment: Commitment>, data: Self::Voteable, sig: ::QcType, - view: TYPES::ViewTime, + view: TYPES::View, ) -> Self; /// Checks if the cert is valid in the given epoch fn is_valid_cert, V: Versions>( &self, membership: &MEMBERSHIP, - epoch: TYPES::EpochTime, + epoch: TYPES::Epoch, upgrade_lock: &UpgradeLock, ) -> impl std::future::Future; /// Returns the amount of stake needed to create this certificate @@ -138,7 +138,7 @@ impl< &mut self, vote: &VOTE, membership: &TYPES::Membership, - epoch: TYPES::EpochTime, + epoch: TYPES::Epoch, ) -> Either<(), CERT> { let key = vote.signing_key(); From 59c7624114a881ae98b4ff9059782ddc71ad3af5 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Wed, 16 Oct 2024 14:14:05 -0400 Subject: [PATCH 06/16] make `leader` return a `Result` --- crates/example-types/src/node_types.rs | 16 +- .../traits/election/randomized_committee.rs | 5 +- .../src/traits/election/static_committee.rs | 5 +- .../static_committee_leader_two_views.rs | 6 +- .../src/traits/networking/libp2p_network.rs | 10 +- crates/hotshot/src/types/handle.rs | 5 +- crates/task-impls/src/consensus/handlers.rs | 12 +- crates/task-impls/src/consensus/mod.rs | 8 +- crates/task-impls/src/consensus2/handlers.rs | 280 ++++++++++++++++++ crates/task-impls/src/da.rs | 130 ++++---- crates/task-impls/src/helpers.rs | 4 +- crates/task-impls/src/network.rs | 195 ++++++++---- crates/task-impls/src/quorum_proposal/mod.rs | 98 +++--- crates/task-impls/src/quorum_vote/mod.rs | 89 +++--- crates/task-impls/src/transactions.rs | 32 +- crates/task-impls/src/upgrade.rs | 106 ++++--- crates/task-impls/src/view_sync.rs | 120 ++++---- crates/task-impls/src/vote_collection.rs | 178 +++++++---- crates/testing/src/helpers.rs | 3 +- crates/testing/src/view_generator.rs | 12 +- crates/testing/tests/tests_1/da_task.rs | 3 +- crates/testing/tests/tests_1/network_task.rs | 3 +- .../tests/tests_1/quorum_proposal_task.rs | 3 +- .../testing/tests/tests_1/transaction_task.rs | 3 +- .../tests_1/upgrade_task_with_proposal.rs | 3 +- crates/testing/tests/tests_1/vid_task.rs | 3 +- .../tests/tests_1/vote_dependency_handle.rs | 3 +- .../testing/tests/tests_3/byzantine_tests.rs | 2 +- .../testing/tests/tests_3/memory_network.rs | 3 +- crates/types/src/data.rs | 15 +- crates/types/src/message.rs | 2 +- crates/types/src/traits/election.rs | 9 +- 32 files changed, 881 insertions(+), 485 deletions(-) create mode 100644 crates/task-impls/src/consensus2/handlers.rs diff --git a/crates/example-types/src/node_types.rs b/crates/example-types/src/node_types.rs index 1ab2446c12..8884d3e7ce 100644 --- a/crates/example-types/src/node_types.rs +++ b/crates/example-types/src/node_types.rs @@ -4,12 +4,6 @@ // You should have received a copy of the MIT License // along with the HotShot repository. If not, see . -use crate::{ - auction_results_provider_types::{TestAuctionResult, TestAuctionResultsProvider}, - block_types::{TestBlockHeader, TestBlockPayload, TestTransaction}, - state_types::{TestInstanceState, TestValidatedState}, - storage_types::TestStorage, -}; use hotshot::traits::{ election::{ randomized_committee::RandomizedCommittee, static_committee::StaticCommittee, @@ -18,15 +12,21 @@ use hotshot::traits::{ implementations::{CombinedNetworks, Libp2pNetwork, MemoryNetwork, PushCdnNetwork}, NodeImplementation, }; -use hotshot_types::data::EpochNumber; use hotshot_types::{ - data::ViewNumber, + data::{EpochNumber, ViewNumber}, signature_key::{BLSPubKey, BuilderKey}, traits::node_implementation::{NodeType, Versions}, }; use serde::{Deserialize, Serialize}; use vbs::version::StaticVersion; +use crate::{ + auction_results_provider_types::{TestAuctionResult, TestAuctionResultsProvider}, + block_types::{TestBlockHeader, TestBlockPayload, TestTransaction}, + state_types::{TestInstanceState, TestValidatedState}, + storage_types::TestStorage, +}; + #[derive( Copy, Clone, diff --git a/crates/hotshot/src/traits/election/randomized_committee.rs b/crates/hotshot/src/traits/election/randomized_committee.rs index 4fed098e9c..8214c0da40 100644 --- a/crates/hotshot/src/traits/election/randomized_committee.rs +++ b/crates/hotshot/src/traits/election/randomized_committee.rs @@ -6,6 +6,7 @@ use std::{cmp::max, collections::BTreeMap, num::NonZeroU64}; +use anyhow::Result; use ethereum_types::U256; use hotshot_types::{ traits::{ @@ -142,7 +143,7 @@ impl Membership for RandomizedCommittee { &self, view_number: TYPES::View, _epoch: ::Epoch, - ) -> TYPES::SignatureKey { + ) -> Result { let mut rng: StdRng = rand::SeedableRng::seed_from_u64(*view_number); let randomized_view_number: u64 = rng.gen_range(0..=u64::MAX); @@ -151,7 +152,7 @@ impl Membership for RandomizedCommittee { let res = self.eligible_leaders[index].clone(); - TYPES::SignatureKey::public_key(&res) + Ok(TYPES::SignatureKey::public_key(&res)) } /// Get the total number of nodes in the committee diff --git a/crates/hotshot/src/traits/election/static_committee.rs b/crates/hotshot/src/traits/election/static_committee.rs index 2ef52a66e2..31f40f002a 100644 --- a/crates/hotshot/src/traits/election/static_committee.rs +++ b/crates/hotshot/src/traits/election/static_committee.rs @@ -6,6 +6,7 @@ use std::{cmp::max, collections::BTreeMap, num::NonZeroU64}; +use anyhow::Result; use ethereum_types::U256; use hotshot_types::{ traits::{ @@ -140,11 +141,11 @@ impl Membership for StaticCommittee { &self, view_number: TYPES::View, _epoch: ::Epoch, - ) -> TYPES::SignatureKey { + ) -> Result { #[allow(clippy::cast_possible_truncation)] let index = *view_number as usize % self.eligible_leaders.len(); let res = self.eligible_leaders[index].clone(); - TYPES::SignatureKey::public_key(&res) + Ok(TYPES::SignatureKey::public_key(&res)) } /// Get the total number of nodes in the committee diff --git a/crates/hotshot/src/traits/election/static_committee_leader_two_views.rs b/crates/hotshot/src/traits/election/static_committee_leader_two_views.rs index db41aad2ab..889d05a687 100644 --- a/crates/hotshot/src/traits/election/static_committee_leader_two_views.rs +++ b/crates/hotshot/src/traits/election/static_committee_leader_two_views.rs @@ -6,6 +6,7 @@ use std::{collections::BTreeMap, num::NonZeroU64}; +use anyhow::Result; use ethereum_types::U256; use hotshot_types::{ traits::{ @@ -140,11 +141,12 @@ impl Membership for StaticCommitteeLeaderForTwoViews::Epoch, - ) -> TYPES::SignatureKey { + ) -> Result { let index = usize::try_from((*view_number / 2) % self.eligible_leaders.len() as u64).unwrap(); let res = self.eligible_leaders[index].clone(); - TYPES::SignatureKey::public_key(&res) + + Ok(TYPES::SignatureKey::public_key(&res)) } /// Get the total number of nodes in the committee diff --git a/crates/hotshot/src/traits/networking/libp2p_network.rs b/crates/hotshot/src/traits/networking/libp2p_network.rs index 952d627684..8eb61440bb 100644 --- a/crates/hotshot/src/traits/networking/libp2p_network.rs +++ b/crates/hotshot/src/traits/networking/libp2p_network.rs @@ -1025,7 +1025,15 @@ impl ConnectedNetwork for Libp2pNetwork { { let future_view = ::View::new(view) + LOOK_AHEAD; let epoch = ::Epoch::new(epoch); - let future_leader = membership.leader(future_view, epoch); + let future_leader = match membership.leader(future_view, epoch) { + Ok(l) => l, + Err(e) => { + return tracing::info!( + "Failed to calculate leader for view {:?}: {e}", + future_view + ); + } + }; let _ = self .queue_node_lookup(ViewNumber::new(*future_view), future_leader) diff --git a/crates/hotshot/src/types/handle.rs b/crates/hotshot/src/types/handle.rs index 8c200c8d1a..d561cd3920 100644 --- a/crates/hotshot/src/types/handle.rs +++ b/crates/hotshot/src/types/handle.rs @@ -275,12 +275,15 @@ impl + 'static, V: Versions> } /// Wrapper for `HotShotConsensusApi`'s `leader` function + /// + /// # Errors + /// Returns an error if the leader cannot be calculated #[allow(clippy::unused_async)] // async for API compatibility reasons pub async fn leader( &self, view_number: TYPES::View, epoch_number: TYPES::Epoch, - ) -> TYPES::SignatureKey { + ) -> Result { self.hotshot .memberships .quorum_membership diff --git a/crates/task-impls/src/consensus/handlers.rs b/crates/task-impls/src/consensus/handlers.rs index fec58d8409..db2165dc61 100644 --- a/crates/task-impls/src/consensus/handlers.rs +++ b/crates/task-impls/src/consensus/handlers.rs @@ -44,7 +44,7 @@ pub(crate) async fn handle_quorum_vote_recv< ensure!( task_state .quorum_membership - .leader(vote.view_number() + 1, task_state.cur_epoch) + .leader(vote.view_number() + 1, task_state.cur_epoch)? == task_state.public_key, format!( "We are not the leader for view {:?}", @@ -63,7 +63,7 @@ pub(crate) async fn handle_quorum_vote_recv< sender, &task_state.upgrade_lock, ) - .await; + .await?; Ok(()) } @@ -83,7 +83,7 @@ pub(crate) async fn handle_timeout_vote_recv< ensure!( task_state .timeout_membership - .leader(vote.view_number() + 1, task_state.cur_epoch) + .leader(vote.view_number() + 1, task_state.cur_epoch)? == task_state.public_key, format!( "We are not the leader for view {:?}", @@ -102,7 +102,7 @@ pub(crate) async fn handle_timeout_vote_recv< sender, &task_state.upgrade_lock, ) - .await; + .await?; Ok(()) } @@ -177,7 +177,7 @@ pub(crate) async fn handle_view_change< let cur_view_time = Utc::now().timestamp(); if task_state .quorum_membership - .leader(old_view_number, task_state.cur_epoch) + .leader(old_view_number, task_state.cur_epoch)? == task_state.public_key { #[allow(clippy::cast_precision_loss)] @@ -274,7 +274,7 @@ pub(crate) async fn handle_timeout .add(1); if task_state .quorum_membership - .leader(view_number, task_state.cur_epoch) + .leader(view_number, task_state.cur_epoch)? == task_state.public_key { task_state diff --git a/crates/task-impls/src/consensus/mod.rs b/crates/task-impls/src/consensus/mod.rs index fb1ec86fca..e8a281173a 100644 --- a/crates/task-impls/src/consensus/mod.rs +++ b/crates/task-impls/src/consensus/mod.rs @@ -106,7 +106,7 @@ impl, V: Versions> ConsensusTaskSt &mut self, event: Arc>, sender: Sender>>, - ) { + ) -> Result<()> { match event.as_ref() { HotShotEvent::QuorumVoteRecv(ref vote) => { if let Err(e) = @@ -149,6 +149,8 @@ impl, V: Versions> ConsensusTaskSt } _ => {} } + + Ok(()) } } @@ -164,9 +166,7 @@ impl, V: Versions> TaskState sender: &Sender>, _receiver: &Receiver>, ) -> Result<()> { - self.handle(event, sender.clone()).await; - - Ok(()) + self.handle(event, sender.clone()).await } /// Joins all subtasks. diff --git a/crates/task-impls/src/consensus2/handlers.rs b/crates/task-impls/src/consensus2/handlers.rs new file mode 100644 index 0000000000..d63510d219 --- /dev/null +++ b/crates/task-impls/src/consensus2/handlers.rs @@ -0,0 +1,280 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + +use std::{sync::Arc, time::Duration}; + +use anyhow::{ensure, Context, Result}; +use async_broadcast::Sender; +use async_compatibility_layer::art::{async_sleep, async_spawn}; +use chrono::Utc; +use hotshot_types::{ + event::{Event, EventType}, + simple_vote::{QuorumVote, TimeoutData, TimeoutVote}, + traits::{ + election::Membership, + node_implementation::{ConsensusTime, NodeImplementation, NodeType}, + }, + vote::HasViewNumber, +}; +use tracing::{debug, error, instrument}; + +use super::Consensus2TaskState; +use crate::{ + consensus2::Versions, + events::HotShotEvent, + helpers::{broadcast_event, cancel_task}, + vote_collection::handle_vote, +}; + +/// Handle a `QuorumVoteRecv` event. +pub(crate) async fn handle_quorum_vote_recv< + TYPES: NodeType, + I: NodeImplementation, + V: Versions, +>( + vote: &QuorumVote, + event: Arc>, + sender: &Sender>>, + task_state: &mut Consensus2TaskState, +) -> Result<()> { + // Are we the leader for this view? + ensure!( + task_state + .quorum_membership + .leader(vote.view_number() + 1)? + == task_state.public_key, + format!( + "We are not the leader for view {:?}", + vote.view_number() + 1 + ) + ); + + handle_vote( + &mut task_state.vote_collectors, + vote, + task_state.public_key.clone(), + &task_state.quorum_membership, + task_state.id, + &event, + sender, + &task_state.upgrade_lock, + ) + .await?; + + Ok(()) +} + +/// Handle a `TimeoutVoteRecv` event. +pub(crate) async fn handle_timeout_vote_recv< + TYPES: NodeType, + I: NodeImplementation, + V: Versions, +>( + vote: &TimeoutVote, + event: Arc>, + sender: &Sender>>, + task_state: &mut Consensus2TaskState, +) -> Result<()> { + // Are we the leader for this view? + ensure!( + task_state + .timeout_membership + .leader(vote.view_number() + 1)? + == task_state.public_key, + format!( + "We are not the leader for view {:?}", + vote.view_number() + 1 + ) + ); + + handle_vote( + &mut task_state.timeout_vote_collectors, + vote, + task_state.public_key.clone(), + &task_state.quorum_membership, + task_state.id, + &event, + sender, + &task_state.upgrade_lock, + ) + .await?; + + Ok(()) +} + +/// Handle a `ViewChange` event. +#[instrument(skip_all)] +pub(crate) async fn handle_view_change< + TYPES: NodeType, + I: NodeImplementation, + V: Versions, +>( + new_view_number: TYPES::Time, + sender: &Sender>>, + task_state: &mut Consensus2TaskState, +) -> Result<()> { + ensure!( + new_view_number > task_state.cur_view, + "New view is not larger than the current view" + ); + + let old_view_number = task_state.cur_view; + debug!("Updating view from {old_view_number:?} to {new_view_number:?}"); + + // Move this node to the next view + task_state.cur_view = new_view_number; + + // If we have a decided upgrade certificate, the protocol version may also have been upgraded. + let decided_upgrade_certificate_read = task_state + .upgrade_lock + .decided_upgrade_certificate + .read() + .await + .clone(); + if let Some(cert) = decided_upgrade_certificate_read { + if new_view_number == cert.data.new_version_first_view { + error!( + "Version upgraded based on a decided upgrade cert: {:?}", + cert + ); + } + } + + // Spawn a timeout task if we did actually update view + let timeout = task_state.timeout; + let new_timeout_task = async_spawn({ + let stream = sender.clone(); + // Nuance: We timeout on the view + 1 here because that means that we have + // not seen evidence to transition to this new view + let view_number = new_view_number + 1; + async move { + async_sleep(Duration::from_millis(timeout)).await; + broadcast_event( + Arc::new(HotShotEvent::Timeout(TYPES::Time::new(*view_number))), + &stream, + ) + .await; + } + }); + + // Cancel the old timeout task + cancel_task(std::mem::replace( + &mut task_state.timeout_task, + new_timeout_task, + )) + .await; + + let consensus = task_state.consensus.read().await; + consensus + .metrics + .current_view + .set(usize::try_from(task_state.cur_view.u64()).unwrap()); + let cur_view_time = Utc::now().timestamp(); + if task_state.quorum_membership.leader(old_view_number)? == task_state.public_key { + #[allow(clippy::cast_precision_loss)] + consensus + .metrics + .view_duration_as_leader + .add_point((cur_view_time - task_state.cur_view_time) as f64); + } + task_state.cur_view_time = cur_view_time; + + // Do the comparison before the subtraction to avoid potential overflow, since + // `last_decided_view` may be greater than `cur_view` if the node is catching up. + if usize::try_from(task_state.cur_view.u64()).unwrap() + > usize::try_from(task_state.last_decided_view.u64()).unwrap() + { + consensus.metrics.number_of_views_since_last_decide.set( + usize::try_from(task_state.cur_view.u64()).unwrap() + - usize::try_from(task_state.last_decided_view.u64()).unwrap(), + ); + } + + broadcast_event( + Event { + view_number: old_view_number, + event: EventType::ViewFinished { + view_number: old_view_number, + }, + }, + &task_state.output_event_stream, + ) + .await; + Ok(()) +} + +/// Handle a `Timeout` event. +#[instrument(skip_all)] +pub(crate) async fn handle_timeout, V: Versions>( + view_number: TYPES::Time, + sender: &Sender>>, + task_state: &mut Consensus2TaskState, +) -> Result<()> { + ensure!( + task_state.cur_view < view_number, + "Timeout event is for an old view" + ); + + ensure!( + task_state + .timeout_membership + .has_stake(&task_state.public_key), + format!("We were not chosen for the consensus committee for view {view_number:?}") + ); + + let vote = TimeoutVote::create_signed_vote( + TimeoutData:: { view: view_number }, + view_number, + &task_state.public_key, + &task_state.private_key, + &task_state.upgrade_lock, + ) + .await + .context("Failed to sign TimeoutData")?; + + broadcast_event(Arc::new(HotShotEvent::TimeoutVoteSend(vote)), sender).await; + broadcast_event( + Event { + view_number, + event: EventType::ViewTimeout { view_number }, + }, + &task_state.output_event_stream, + ) + .await; + + debug!( + "We did not receive evidence for view {} in time, sending timeout vote for that view!", + *view_number + ); + + broadcast_event( + Event { + view_number, + event: EventType::ReplicaViewTimeout { view_number }, + }, + &task_state.output_event_stream, + ) + .await; + + task_state + .consensus + .read() + .await + .metrics + .number_of_timeouts + .add(1); + if task_state.quorum_membership.leader(view_number)? == task_state.public_key { + task_state + .consensus + .read() + .await + .metrics + .number_of_timeouts_as_leader + .add(1); + } + + Ok(()) +} diff --git a/crates/task-impls/src/da.rs b/crates/task-impls/src/da.rs index 2e7c1357ff..9d4f36b543 100644 --- a/crates/task-impls/src/da.rs +++ b/crates/task-impls/src/da.rs @@ -6,7 +6,7 @@ use std::{marker::PhantomData, sync::Arc}; -use anyhow::Result; +use anyhow::{ensure, Result}; use async_broadcast::{Receiver, Sender}; use async_compatibility_layer::art::async_spawn; use async_lock::RwLock; @@ -35,10 +35,10 @@ use hotshot_types::{ use sha2::{Digest, Sha256}; #[cfg(async_executor_impl = "tokio")] use tokio::task::spawn_blocking; -use tracing::{debug, error, info, instrument, warn}; +use tracing::{debug, info, instrument}; use crate::{ - events::{HotShotEvent, HotShotTaskCompleted}, + events::HotShotEvent, helpers::broadcast_event, vote_collection::{handle_vote, VoteCollectorsMap}, }; @@ -94,7 +94,7 @@ impl, V: Versions> DaTaskState>, event_stream: Sender>>, - ) -> Option { + ) -> Result<()> { match event.as_ref() { HotShotEvent::DaProposalRecv(proposal, sender) => { let sender = sender.clone(); @@ -111,35 +111,31 @@ impl, V: Versions> DaTaskState= self.cur_view - 1, + "Throwing away DA proposal that is more than one view older" + ); - if self + ensure!(self .consensus .read() .await .saved_payloads() - .contains_key(&view) - { - warn!("Received DA proposal for view {:?} but we already have a payload for that view. Throwing it away", view); - return None; - } + .contains_key(&view), + format!("Received DA proposal for view {:?} but we already have a payload for that view. Throwing it away", view)); let encoded_transactions_hash = Sha256::digest(&proposal.data.encoded_transactions); // ED Is this the right leader? - let view_leader_key = self.da_membership.leader(view, self.cur_epoch); - if view_leader_key != sender { - error!("DA proposal doesn't have expected leader key for view {} \n DA proposal is: {:?}", *view, proposal.data.clone()); - return None; - } + let view_leader_key = self.da_membership.leader(view, self.cur_epoch)?; + ensure!(view_leader_key == sender, + format!("DA proposal doesn't have expected leader key for view {} \n DA proposal is: {:?}", *view, proposal.data.clone()) + ); - if !view_leader_key.validate(&proposal.signature, &encoded_transactions_hash) { - error!("Could not verify proposal."); - return None; - } + ensure!( + view_leader_key.validate(&proposal.signature, &encoded_transactions_hash), + "Could not verify proposal." + ); broadcast_event( Arc::new(HotShotEvent::DaProposalValidated(proposal.clone(), sender)), @@ -149,10 +145,9 @@ impl, V: Versions> DaTaskState { let curr_view = self.consensus.read().await.cur_view(); - if curr_view > proposal.data.view_number() + 1 { - tracing::debug!("Validated DA proposal for prior view but it's too old now Current view {:?}, DA Proposal view {:?}", curr_view, proposal.data.view_number()); - return None; - } + ensure!(curr_view <= proposal.data.view_number() + 1, + format!("Validated DA proposal for prior view but it's too old now Current view {:?}, DA Proposal view {:?}", curr_view, proposal.data.view_number())); + // Proposal is fresh and valid, notify the application layer broadcast_event( Event { @@ -166,23 +161,17 @@ impl, V: Versions> DaTaskState, V: Versions> DaTaskState, V: Versions> DaTaskState, V: Versions> DaTaskState, V: Versions> DaTaskState { let view = *view; - if (*view != 0 || *self.cur_view > 0) && *self.cur_view >= *view { - return None; - } + + ensure!( + *self.cur_view < *view, + "Received a view change to an older view." + ); if *view - *self.cur_view > 1 { info!("View changed by more than 1 going to view {:?}", view); @@ -298,12 +286,13 @@ impl, V: Versions> DaTaskState { let PackedBundle:: { @@ -318,12 +307,8 @@ impl, V: Versions> DaTaskState = DaProposal { encoded_transactions: Arc::clone(encoded_transactions), @@ -347,14 +332,9 @@ impl, V: Versions> DaTaskState { - error!("Shutting down because of shutdown signal!"); - return Some(HotShotTaskCompleted); - } _ => {} } - None + Ok(()) } } @@ -371,9 +351,7 @@ impl, V: Versions> TaskState sender: &Sender>, _receiver: &Receiver>, ) -> Result<()> { - self.handle(event, sender.clone()).await; - - Ok(()) + self.handle(event, sender.clone()).await } async fn cancel_subtasks(&mut self) {} diff --git a/crates/task-impls/src/helpers.rs b/crates/task-impls/src/helpers.rs index cd4bf5b3cd..387a363a8c 100644 --- a/crates/task-impls/src/helpers.rs +++ b/crates/task-impls/src/helpers.rs @@ -367,7 +367,7 @@ pub(crate) async fn parent_leaf_and_state( ) -> Result<(Leaf, Arc<::ValidatedState>)> { let current_epoch = consensus.read().await.cur_epoch(); ensure!( - quorum_membership.leader(next_proposal_view_number, current_epoch) == public_key, + quorum_membership.leader(next_proposal_view_number, current_epoch)? == public_key, "Somehow we formed a QC but are not the leader for the next view {next_proposal_view_number:?}", ); let parent_view_number = consensus.read().await.high_qc().view_number(); @@ -697,7 +697,7 @@ pub(crate) async fn update_view, V let is_old_view_leader = task_state .quorum_membership - .leader(task_state.cur_view, task_state.cur_epoch) + .leader(task_state.cur_view, task_state.cur_epoch)? == task_state.public_key; let old_view = task_state.cur_view; diff --git a/crates/task-impls/src/network.rs b/crates/task-impls/src/network.rs index 9cb1f7abd0..8668a4cf48 100644 --- a/crates/task-impls/src/network.rs +++ b/crates/task-impls/src/network.rs @@ -349,24 +349,49 @@ impl< // ED Each network task is subscribed to all these message types. Need filters per network task HotShotEvent::QuorumVoteSend(vote) => { *maybe_action = Some(HotShotAction::Vote); + let view_number = vote.view_number() + 1; + let leader = match self.quorum_membership.leader(view_number, self.epoch) { + Ok(l) => l, + Err(e) => { + tracing::warn!( + "Failed to calculate leader for view number {:?}. Error: {:?}", + view_number, + e + ); + return None; + } + }; + Some(( vote.signing_key(), MessageKind::::from_consensus_message(SequencingMessage::General( GeneralConsensusMessage::Vote(vote.clone()), )), - TransmitType::Direct( - self.quorum_membership - .leader(vote.view_number() + 1, self.epoch), - ), + TransmitType::Direct(leader), + )) + } + HotShotEvent::QuorumProposalRequestSend(req, signature) => { + let view_number = req.view_number; + let leader = match self.quorum_membership.leader(view_number, self.epoch) { + Ok(l) => l, + Err(e) => { + tracing::warn!( + "Failed to calculate leader for view number {:?}. Error: {:?}", + view_number, + e + ); + return None; + } + }; + + Some(( + req.key.clone(), + MessageKind::::from_consensus_message(SequencingMessage::General( + GeneralConsensusMessage::ProposalRequested(req.clone(), signature), + )), + TransmitType::DaCommitteeAndLeaderBroadcast(leader), )) } - HotShotEvent::QuorumProposalRequestSend(req, signature) => Some(( - req.key.clone(), - MessageKind::::from_consensus_message(SequencingMessage::General( - GeneralConsensusMessage::ProposalRequested(req.clone(), signature), - )), - TransmitType::Broadcast, - )), HotShotEvent::QuorumProposalResponseSend(sender_key, proposal) => Some(( sender_key.clone(), MessageKind::::from_consensus_message(SequencingMessage::General( @@ -390,15 +415,25 @@ impl< } HotShotEvent::DaVoteSend(vote) => { *maybe_action = Some(HotShotAction::DaVote); + let view_number = vote.view_number(); + let leader = match self.quorum_membership.leader(view_number, self.epoch) { + Ok(l) => l, + Err(e) => { + tracing::warn!( + "Failed to calculate leader for view number {:?}. Error: {:?}", + view_number, + e + ); + return None; + } + }; + Some(( vote.signing_key(), MessageKind::::from_consensus_message(SequencingMessage::Da( DaConsensusMessage::DaVote(vote.clone()), )), - TransmitType::Direct( - self.quorum_membership - .leader(vote.view_number(), self.epoch), - ), + TransmitType::Direct(leader), )) } HotShotEvent::DacSend(certificate, sender) => { @@ -411,36 +446,72 @@ impl< TransmitType::Broadcast, )) } - HotShotEvent::ViewSyncPreCommitVoteSend(vote) => Some(( - vote.signing_key(), - MessageKind::::from_consensus_message(SequencingMessage::General( - GeneralConsensusMessage::ViewSyncPreCommitVote(vote.clone()), - )), - TransmitType::Direct( - self.quorum_membership - .leader(vote.view_number() + vote.date().relay, self.epoch), - ), - )), - HotShotEvent::ViewSyncCommitVoteSend(vote) => Some(( - vote.signing_key(), - MessageKind::::from_consensus_message(SequencingMessage::General( - GeneralConsensusMessage::ViewSyncCommitVote(vote.clone()), - )), - TransmitType::Direct( - self.quorum_membership - .leader(vote.view_number() + vote.date().relay, self.epoch), - ), - )), - HotShotEvent::ViewSyncFinalizeVoteSend(vote) => Some(( - vote.signing_key(), - MessageKind::::from_consensus_message(SequencingMessage::General( - GeneralConsensusMessage::ViewSyncFinalizeVote(vote.clone()), - )), - TransmitType::Direct( - self.quorum_membership - .leader(vote.view_number() + vote.date().relay, self.epoch), - ), - )), + HotShotEvent::ViewSyncPreCommitVoteSend(vote) => { + let view_number = vote.view_number() + vote.date().relay; + let leader = match self.quorum_membership.leader(view_number, self.epoch) { + Ok(l) => l, + Err(e) => { + tracing::warn!( + "Failed to calculate leader for view number {:?}. Error: {:?}", + view_number, + e + ); + return None; + } + }; + + Some(( + vote.signing_key(), + MessageKind::::from_consensus_message(SequencingMessage::General( + GeneralConsensusMessage::ViewSyncPreCommitVote(vote.clone()), + )), + TransmitType::Direct(leader), + )) + } + HotShotEvent::ViewSyncCommitVoteSend(vote) => { + let view_number = vote.view_number() + vote.date().relay; + let leader = match self.quorum_membership.leader(view_number, self.epoch) { + Ok(l) => l, + Err(e) => { + tracing::warn!( + "Failed to calculate leader for view number {:?}. Error: {:?}", + view_number, + e + ); + return None; + } + }; + + Some(( + vote.signing_key(), + MessageKind::::from_consensus_message(SequencingMessage::General( + GeneralConsensusMessage::ViewSyncCommitVote(vote.clone()), + )), + TransmitType::Direct(leader), + )) + } + HotShotEvent::ViewSyncFinalizeVoteSend(vote) => { + let view_number = vote.view_number() + vote.date().relay; + let leader = match self.quorum_membership.leader(view_number, self.epoch) { + Ok(l) => l, + Err(e) => { + tracing::warn!( + "Failed to calculate leader for view number {:?}. Error: {:?}", + view_number, + e + ); + return None; + } + }; + + Some(( + vote.signing_key(), + MessageKind::::from_consensus_message(SequencingMessage::General( + GeneralConsensusMessage::ViewSyncFinalizeVote(vote.clone()), + )), + TransmitType::Direct(leader), + )) + } HotShotEvent::ViewSyncPreCommitCertificate2Send(certificate, sender) => Some(( sender, MessageKind::::from_consensus_message(SequencingMessage::General( @@ -464,15 +535,24 @@ impl< )), HotShotEvent::TimeoutVoteSend(vote) => { *maybe_action = Some(HotShotAction::Vote); + let view_number = vote.view_number() + 1; + let leader = match self.quorum_membership.leader(view_number, self.epoch) { + Ok(l) => l, + Err(e) => { + tracing::warn!( + "Failed to calculate leader for view number {:?}. Error: {:?}", + view_number, + e + ); + return None; + } + }; Some(( vote.signing_key(), MessageKind::::from_consensus_message(SequencingMessage::General( GeneralConsensusMessage::TimeoutVote(vote.clone()), )), - TransmitType::Direct( - self.quorum_membership - .leader(vote.view_number() + 1, self.epoch), - ), + TransmitType::Direct(leader), )) } HotShotEvent::UpgradeProposalSend(proposal, sender) => Some(( @@ -484,15 +564,24 @@ impl< )), HotShotEvent::UpgradeVoteSend(vote) => { error!("Sending upgrade vote!"); + let view_number = vote.view_number(); + let leader = match self.quorum_membership.leader(view_number, self.epoch) { + Ok(l) => l, + Err(e) => { + tracing::warn!( + "Failed to calculate leader for view number {:?}. Error: {:?}", + view_number, + e + ); + return None; + } + }; Some(( vote.signing_key(), MessageKind::::from_consensus_message(SequencingMessage::General( GeneralConsensusMessage::UpgradeVote(vote.clone()), )), - TransmitType::Direct( - self.quorum_membership - .leader(vote.view_number(), self.epoch), - ), + TransmitType::Direct(leader), )) } HotShotEvent::ViewChange(view) => { diff --git a/crates/task-impls/src/quorum_proposal/mod.rs b/crates/task-impls/src/quorum_proposal/mod.rs index 7390427f09..65a3685e71 100644 --- a/crates/task-impls/src/quorum_proposal/mod.rs +++ b/crates/task-impls/src/quorum_proposal/mod.rs @@ -6,7 +6,7 @@ use std::{collections::HashMap, sync::Arc}; -use anyhow::Result; +use anyhow::{ensure, Result}; use async_broadcast::{Receiver, Sender}; use async_lock::RwLock; #[cfg(async_executor_impl = "async-std")] @@ -288,24 +288,25 @@ impl, V: Versions> event_receiver: Receiver>>, event_sender: Sender>>, event: Arc>, - ) { + ) -> Result<()> { // Don't even bother making the task if we are not entitled to propose anyway. - if self.quorum_membership.leader(view_number, epoch_number) != self.public_key { - tracing::trace!("We are not the leader of the next view"); - return; - } + ensure!( + self.quorum_membership.leader(view_number, epoch_number)? == self.public_key, + "We are not the leader of the next view" + ); // Don't try to propose twice for the same view. - if view_number <= self.latest_proposed_view { - tracing::trace!("We have already proposed for this view"); - return; - } + ensure!( + view_number > self.latest_proposed_view, + "We have already proposed for this view" + ); debug!("Attempting to make dependency task for view {view_number:?} and event {event:?}"); - if self.proposal_dependencies.contains_key(&view_number) { - debug!("Task already exists"); - return; - } + + ensure!( + !self.proposal_dependencies.contains_key(&view_number), + "Task already exists" + ); let dependency_chain = self.create_and_complete_dependencies(view_number, &event_receiver, event); @@ -330,6 +331,8 @@ impl, V: Versions> ); self.proposal_dependencies .insert(view_number, dependency_task.run()); + + Ok(()) } /// Update the latest proposed view number. @@ -363,7 +366,7 @@ impl, V: Versions> event: Arc>, event_receiver: Receiver>>, event_sender: Sender>>, - ) { + ) -> Result<()> { match event.as_ref() { HotShotEvent::UpgradeCertificateFormed(cert) => { debug!( @@ -389,7 +392,7 @@ impl, V: Versions> event_receiver, event_sender, Arc::clone(&event), - ); + )?; } either::Left(qc) => { // Only update if the qc is from a newer view @@ -422,24 +425,24 @@ impl, V: Versions> event_receiver, event_sender, Arc::clone(&event), - ); + )?; } HotShotEvent::ViewSyncFinalizeCertificate2Recv(certificate) => { let epoch_number = self.consensus.read().await.cur_epoch(); - if !certificate - .is_valid_cert( - self.quorum_membership.as_ref(), - epoch_number, - &self.upgrade_lock, - ) - .await - { - warn!( + + ensure!( + certificate + .is_valid_cert( + self.quorum_membership.as_ref(), + epoch_number, + &self.upgrade_lock + ) + .await, + format!( "View Sync Finalize certificate {:?} was invalid", certificate.data() - ); - return; - } + ) + ); let view_number = certificate.view_number; @@ -449,7 +452,7 @@ impl, V: Versions> event_receiver, event_sender, event, - ); + )?; } HotShotEvent::QuorumProposalPreliminarilyValidated(proposal) => { let view_number = proposal.data.view_number(); @@ -466,14 +469,15 @@ impl, V: Versions> event_receiver, event_sender, Arc::clone(&event), - ); + )?; } HotShotEvent::QuorumProposalSend(proposal, _) => { let view = proposal.data.view_number(); - if !self.update_latest_proposed_view(view).await { - tracing::trace!("Failed to update latest proposed view"); - return; - } + + ensure!( + self.update_latest_proposed_view(view).await, + "Failed to update latest proposed view" + ); } HotShotEvent::VidDisperseSend(vid_share, _) => { let view_number = vid_share.data.view_number(); @@ -485,17 +489,18 @@ impl, V: Versions> event_receiver, event_sender, Arc::clone(&event), - ); + )?; } HotShotEvent::UpdateHighQc(qc) => { - // First, update the high QC. - if let Err(e) = self.consensus.write().await.update_high_qc(qc.clone()) { - tracing::trace!("Failed to update high qc; error = {e}"); - } + // First update the high QC internally + self.consensus.write().await.update_high_qc(qc.clone())?; - if let Err(e) = self.storage.write().await.update_high_qc(qc.clone()).await { - warn!("Failed to store High QC of QC we formed; error = {:?}", e); - } + // Then update the high QC in storage + self.storage + .write() + .await + .update_high_qc(qc.clone()) + .await?; broadcast_event( HotShotEvent::HighQcUpdated(qc.clone()).into(), @@ -512,10 +517,11 @@ impl, V: Versions> event_receiver, event_sender, Arc::clone(&event), - ); + )?; } _ => {} } + Ok(()) } } @@ -531,9 +537,7 @@ impl, V: Versions> TaskState sender: &Sender>, receiver: &Receiver>, ) -> Result<()> { - self.handle(event, receiver.clone(), sender.clone()).await; - - Ok(()) + self.handle(event, receiver.clone(), sender.clone()).await } async fn cancel_subtasks(&mut self) { diff --git a/crates/task-impls/src/quorum_vote/mod.rs b/crates/task-impls/src/quorum_vote/mod.rs index 909fcf2db2..eaed877eff 100644 --- a/crates/task-impls/src/quorum_vote/mod.rs +++ b/crates/task-impls/src/quorum_vote/mod.rs @@ -539,8 +539,9 @@ impl, V: Versions> QuorumVoteTaskS event: Arc>, event_receiver: Receiver>>, event_sender: Sender>>, - ) { + ) -> Result<()> { let current_epoch = self.consensus.read().await.cur_epoch(); + match event.as_ref() { HotShotEvent::QuorumProposalValidated(proposal, _leaf) => { trace!("Received Proposal for view {}", *proposal.view_number()); @@ -562,23 +563,25 @@ impl, V: Versions> QuorumVoteTaskS } HotShotEvent::DaCertificateRecv(cert) => { let view = cert.view_number; + trace!("Received DAC for view {}", *view); - if view <= self.latest_voted_view { - return; - } + // Do nothing if the DAC is old + ensure!( + view > self.latest_voted_view, + "Received DAC for an older view." + ); let current_epoch = self.consensus.read().await.cur_epoch(); // Validate the DAC. - if !cert - .is_valid_cert( + ensure!( + cert.is_valid_cert( self.da_membership.as_ref(), current_epoch, - &self.upgrade_lock, + &self.upgrade_lock ) - .await - { - return; - } + .await, + "Invalid DAC" + ); // Add to the storage. self.consensus @@ -601,51 +604,43 @@ impl, V: Versions> QuorumVoteTaskS } HotShotEvent::VidShareRecv(sender, disperse) => { let view = disperse.data.view_number(); + // Do nothing if the VID share is old trace!("Received VID share for view {}", *view); - if view <= self.latest_voted_view { - return; - } + ensure!( + view > self.latest_voted_view, + "Received VID share for an older view." + ); // Validate the VID share. let payload_commitment = disperse.data.payload_commitment; let current_epoch = self.consensus.read().await.cur_epoch(); - // Check sender of VID disperse share is signed by DA committee member - let validate_sender = sender - .validate(&disperse.signature, payload_commitment.as_ref()) - && self - .da_membership + + // Check that the signature is valid + ensure!( + sender.validate(&disperse.signature, payload_commitment.as_ref()), + "VID share signature is invalid" + ); + + // ensure that the VID share was sent by a DA member OR the view leader + ensure!( + self.da_membership .committee_members(view, current_epoch) - .contains(sender); - - // Check whether the data satisfies one of the following. - // * From the right leader for this view. - // * Calculated and signed by the current node. - let validated = self - .public_key - .validate(&disperse.signature, payload_commitment.as_ref()) - || self - .quorum_membership - .leader(view, current_epoch) - .validate(&disperse.signature, payload_commitment.as_ref()); - if !validate_sender && !validated { - warn!("Failed to validated the VID dispersal/share sig."); - return; - } + .contains(sender) + || *sender == self.quorum_membership.leader(view, current_epoch)?, + "VID share was not sent by a DA member or the view leader." + ); // NOTE: `verify_share` returns a nested `Result`, so we must check both the inner // and outer results - #[allow(clippy::no_effect)] match vid_scheme(self.quorum_membership.total_nodes(current_epoch)).verify_share( &disperse.data.share, &disperse.data.common, &payload_commitment, ) { Ok(Err(())) | Err(_) => { - return; - } - Ok(Ok(())) => { - (); + bail!("Failed to verify VID share"); } + Ok(Ok(())) => {} } self.consensus @@ -653,10 +648,10 @@ impl, V: Versions> QuorumVoteTaskS .await .update_vid_shares(view, disperse.clone()); - if disperse.data.recipient_key != self.public_key { - debug!("Got a Valid VID share but it's not for our key"); - return; - } + ensure!( + disperse.data.recipient_key == self.public_key, + "Got a Valid VID share but it's not for our key" + ); broadcast_event( Arc::new(HotShotEvent::VidShareValidated(disperse.clone())), @@ -675,11 +670,11 @@ impl, V: Versions> QuorumVoteTaskS debug!("All vote dependencies verified for view {:?}", view_number); if !self.update_latest_voted_view(*view_number).await { debug!("view not updated"); - return; } } _ => {} } + Ok(()) } } @@ -695,9 +690,7 @@ impl, V: Versions> TaskState sender: &Sender>, receiver: &Receiver>, ) -> Result<()> { - self.handle(event, receiver.clone(), sender.clone()).await; - - Ok(()) + self.handle(event, receiver.clone(), sender.clone()).await } async fn cancel_subtasks(&mut self) { diff --git a/crates/task-impls/src/transactions.rs b/crates/task-impls/src/transactions.rs index 354d9d1fd6..cf120fde07 100644 --- a/crates/task-impls/src/transactions.rs +++ b/crates/task-impls/src/transactions.rs @@ -425,7 +425,7 @@ impl, V: Versions> TransactionTask &mut self, event: Arc>, event_stream: Sender>>, - ) -> Option { + ) -> Result<()> { match event.as_ref() { HotShotEvent::TransactionsRecv(transactions) => { broadcast_event( @@ -438,30 +438,31 @@ impl, V: Versions> TransactionTask &self.output_event_stream, ) .await; - - return None; } HotShotEvent::ViewChange(view) => { let view = *view; + debug!("view change in transactions to view {:?}", view); - if (*view != 0 || *self.cur_view > 0) && *self.cur_view >= *view { - return None; - } + ensure!(*view > *self.cur_view, format!("Received a view change to an older view: tried to change view to {:?} though we are at view {:?}", view, self.cur_view )); let mut make_block = false; if *view - *self.cur_view > 1 { info!("View changed by more than 1 going to view {:?}", view); - make_block = self.membership.leader(view, self.cur_epoch) == self.public_key; + make_block = self.membership.leader(view, self.cur_epoch)? == self.public_key; } self.cur_view = view; let next_view = self.cur_view + 1; let next_leader = - self.membership.leader(next_view, self.cur_epoch) == self.public_key; - if !make_block && !next_leader { - debug!("Not next leader for view {:?}", self.cur_view); - return None; - } + self.membership.leader(next_view, self.cur_epoch)? == self.public_key; + + ensure!( + make_block || next_leader, + format!( + "Not making the block because we are not leader for view {:?}", + self.cur_view + ) + ); if make_block { self.handle_view_change(&event_stream, self.cur_view).await; @@ -471,12 +472,9 @@ impl, V: Versions> TransactionTask self.handle_view_change(&event_stream, next_view).await; } } - HotShotEvent::Shutdown => { - return Some(HotShotTaskCompleted); - } _ => {} } - None + Ok(()) } /// Get VID commitment for the last successful view before `block_view`. @@ -792,7 +790,7 @@ impl, V: Versions> TaskState sender: &Sender>, _receiver: &Receiver>, ) -> Result<()> { - self.handle(event, sender.clone()).await; + self.handle(event, sender.clone()).await?; Ok(()) } diff --git a/crates/task-impls/src/upgrade.rs b/crates/task-impls/src/upgrade.rs index 4992b06887..19eba0107b 100644 --- a/crates/task-impls/src/upgrade.rs +++ b/crates/task-impls/src/upgrade.rs @@ -6,7 +6,7 @@ use std::{marker::PhantomData, sync::Arc, time::SystemTime}; -use anyhow::Result; +use anyhow::{ensure, Context, Result}; use async_broadcast::{Receiver, Sender}; use async_trait::async_trait; use committable::Committable; @@ -32,7 +32,7 @@ use tracing::{debug, error, info, instrument, warn}; use vbs::version::StaticVersionType; use crate::{ - events::{HotShotEvent, HotShotTaskCompleted}, + events::HotShotEvent, helpers::broadcast_event, vote_collection::{handle_vote, VoteCollectorsMap}, }; @@ -109,7 +109,7 @@ impl, V: Versions> UpgradeTaskStat &mut self, event: Arc>, tx: Sender>>, - ) -> Option { + ) -> Result<()> { match event.as_ref() { HotShotEvent::UpgradeProposalRecv(proposal, sender) => { info!("Received upgrade proposal: {:?}", proposal); @@ -117,34 +117,33 @@ impl, V: Versions> UpgradeTaskStat let view = *proposal.data.view_number(); // Skip voting if the version has already been upgraded. - if self.upgraded().await { - info!( - "Already upgraded to {:?}, skip voting.", - V::Upgrade::VERSION - ); - return None; - } + ensure!( + !self.upgraded().await, + format!("Already upgraded to {:?}; not voting.", V::Upgrade::VERSION) + ); let time = SystemTime::now() .duration_since(SystemTime::UNIX_EPOCH) - .ok()? + .context("Failed to calculate duration")? .as_secs(); - if time < self.start_voting_time || time >= self.stop_voting_time { - return None; - } + ensure!( + time >= self.start_voting_time && time < self.stop_voting_time, + "Refusing to vote because we are no longer in the configured vote time window." + ); - if view < self.start_voting_view || view >= self.stop_voting_view { - return None; - } + ensure!( + view >= self.start_voting_view && view < self.stop_voting_view, + "Refusing to vote because we are no longer in the configured vote view window." + ); // If the proposal does not match our upgrade target, we immediately exit. - if proposal.data.upgrade_proposal.new_version_hash != V::UPGRADE_HASH - || proposal.data.upgrade_proposal.old_version != V::Base::VERSION - || proposal.data.upgrade_proposal.new_version != V::Upgrade::VERSION - { - return None; - } + ensure!( + proposal.data.upgrade_proposal.new_version_hash == V::UPGRADE_HASH + && proposal.data.upgrade_proposal.old_version == V::Base::VERSION + && proposal.data.upgrade_proposal.new_version == V::Upgrade::VERSION, + "Proposal does not match our upgrade target" + ); // If we have an upgrade target, we validate that the proposal is relevant for the current view. info!( @@ -169,20 +168,23 @@ impl, V: Versions> UpgradeTaskStat // the `UpgradeProposalRecv` event. Otherwise, the view number subtraction below will // cause an overflow error. // TODO Come back to this - we probably don't need this, but we should also never receive a UpgradeCertificate where this fails, investigate block ready so it doesn't make one for the genesis block - if self.cur_view != TYPES::View::genesis() && view < self.cur_view - 1 { - warn!("Discarding old upgrade proposal; the proposal is for view {:?}, but the current view is {:?}.", + ensure!( + self.cur_view != TYPES::View::genesis() && *view >= self.cur_view.saturating_sub(1), + format!( + "Discarding old upgrade proposal; the proposal is for view {:?}, but the current view is {:?}.", view, self.cur_view - ); - return None; - } + ) + ); // We then validate that the proposal was issued by the leader for the view. - let view_leader_key = self.quorum_membership.leader(view, self.cur_epoch); - if &view_leader_key != sender { - error!("Upgrade proposal doesn't have expected leader key for view {} \n Upgrade proposal is: {:?}", *view, proposal.data.clone()); - return None; - } + let view_leader_key = self.quorum_membership.leader(view, self.cur_epoch)?; + ensure!( + view_leader_key == *sender, + format!( + "Upgrade proposal doesn't have expected leader key for view {} \n Upgrade proposal is: {:?}", *view, proposal.data.clone() + ) + ); // At this point, we've checked that: // * the proposal was expected, @@ -201,18 +203,15 @@ impl, V: Versions> UpgradeTaskStat .await; // If everything is fine up to here, we generate and send a vote on the proposal. - let Ok(vote) = UpgradeVote::create_signed_vote( + let vote = UpgradeVote::create_signed_vote( proposal.data.upgrade_proposal.clone(), view, &self.public_key, &self.private_key, &self.upgrade_lock, ) - .await - else { - error!("Failed to sign UpgradeVote!"); - return None; - }; + .await?; + debug!("Sending upgrade vote {:?}", vote.view_number()); broadcast_event(Arc::new(HotShotEvent::UpgradeVoteSend(vote)), &tx).await; } @@ -222,15 +221,15 @@ impl, V: Versions> UpgradeTaskStat // Check if we are the leader. { let view = vote.view_number(); - if self.quorum_membership.leader(view, self.cur_epoch) != self.public_key { - error!( + ensure!( + self.quorum_membership.leader(view, self.cur_epoch)? == self.public_key, + format!( "We are not the leader for view {} are we leader for next view? {}", *view, - self.quorum_membership.leader(view + 1, self.cur_epoch) + self.quorum_membership.leader(view + 1, self.cur_epoch)? == self.public_key - ); - return None; - } + ) + ); } handle_vote( @@ -244,19 +243,17 @@ impl, V: Versions> UpgradeTaskStat &tx, &self.upgrade_lock, ) - .await; + .await?; } HotShotEvent::ViewChange(new_view) => { - if self.cur_view >= *new_view { - return None; - } + ensure!(self.cur_view < *new_view); self.cur_view = *new_view; let view: u64 = *self.cur_view; let time = SystemTime::now() .duration_since(SystemTime::UNIX_EPOCH) - .ok()? + .context("Failed to calculate duration")? .as_secs(); // We try to form a certificate 5 views before we're leader. @@ -268,7 +265,7 @@ impl, V: Versions> UpgradeTaskStat && self.quorum_membership.leader( TYPES::View::new(view + UPGRADE_PROPOSE_OFFSET), self.cur_epoch, - ) == self.public_key + )? == self.public_key { let upgrade_proposal_data = UpgradeProposalData { old_version: V::Base::VERSION, @@ -307,16 +304,13 @@ impl, V: Versions> UpgradeTaskStat ) .await; } - - return None; } HotShotEvent::Shutdown => { error!("Shutting down because of shutdown signal!"); - return Some(HotShotTaskCompleted); } _ => {} } - None + Ok(()) } } @@ -333,7 +327,7 @@ impl, V: Versions> TaskState sender: &Sender>, _receiver: &Receiver>, ) -> Result<()> { - self.handle(event, sender.clone()).await; + self.handle(event, sender.clone()).await?; Ok(()) } diff --git a/crates/task-impls/src/view_sync.rs b/crates/task-impls/src/view_sync.rs index fb00054f88..a05f50d64b 100644 --- a/crates/task-impls/src/view_sync.rs +++ b/crates/task-impls/src/view_sync.rs @@ -12,7 +12,7 @@ use std::{ time::Duration, }; -use anyhow::Result; +use anyhow::{ensure, Result}; use async_broadcast::{Receiver, Sender}; use async_compatibility_layer::art::{async_sleep, async_spawn}; use async_lock::RwLock; @@ -125,9 +125,7 @@ impl, V: Versions> TaskState sender: &Sender>, _receiver: &Receiver>, ) -> Result<()> { - self.handle(event, sender.clone()).await; - - Ok(()) + self.handle(event, sender.clone()).await } async fn cancel_subtasks(&mut self) {} @@ -258,7 +256,7 @@ impl, V: Versions> ViewSyncTaskSta &mut self, event: Arc>, event_stream: Sender>>, - ) { + ) -> Result<()> { match event.as_ref() { HotShotEvent::ViewSyncPreCommitCertificate2Recv(certificate) => { debug!("Received view sync cert for phase {:?}", certificate); @@ -292,26 +290,26 @@ impl, V: Versions> ViewSyncTaskSta let relay_map = map.entry(vote_view).or_insert(BTreeMap::new()); if let Some(relay_task) = relay_map.get_mut(&relay) { debug!("Forwarding message"); - let result = relay_task - .handle_vote_event(Arc::clone(&event), &event_stream) - .await; - if result == Some(HotShotTaskCompleted) { - // The protocol has finished + // Handle the vote and check if the accumulator has returned successfully + if relay_task + .handle_vote_event(Arc::clone(&event), &event_stream) + .await? + .is_some() + { map.remove(&vote_view); } - return; + + return Ok(()); } // We do not have a relay task already running, so start one - if self - .membership - .leader(vote_view + relay, self.current_epoch) - != self.public_key - { - debug!("View sync vote sent to wrong leader"); - return; - } + ensure!( + self.membership + .leader(vote_view + relay, self.current_epoch)? + == self.public_key, + "View sync vote sent to wrong leader" + ); let info = AccumulatorInfo { public_key: self.public_key.clone(), @@ -322,10 +320,9 @@ impl, V: Versions> ViewSyncTaskSta }; let vote_collector = create_vote_accumulator(&info, event, &event_stream, self.upgrade_lock.clone()) - .await; - if let Some(vote_task) = vote_collector { - relay_map.insert(relay, vote_task); - } + .await?; + + relay_map.insert(relay, vote_collector); } HotShotEvent::ViewSyncCommitVoteRecv(ref vote) => { @@ -335,26 +332,26 @@ impl, V: Versions> ViewSyncTaskSta let relay_map = map.entry(vote_view).or_insert(BTreeMap::new()); if let Some(relay_task) = relay_map.get_mut(&relay) { debug!("Forwarding message"); - let result = relay_task - .handle_vote_event(Arc::clone(&event), &event_stream) - .await; - if result == Some(HotShotTaskCompleted) { - // The protocol has finished + // Handle the vote and check if the accumulator has returned successfully + if relay_task + .handle_vote_event(Arc::clone(&event), &event_stream) + .await? + .is_some() + { map.remove(&vote_view); } - return; + + return Ok(()); } // We do not have a relay task already running, so start one - if self - .membership - .leader(vote_view + relay, self.current_epoch) - != self.public_key - { - debug!("View sync vote sent to wrong leader"); - return; - } + ensure!( + self.membership + .leader(vote_view + relay, self.current_epoch)? + == self.public_key, + "View sync vote sent to wrong leader" + ); let info = AccumulatorInfo { public_key: self.public_key.clone(), @@ -363,12 +360,11 @@ impl, V: Versions> ViewSyncTaskSta epoch: self.current_epoch, id: self.id, }; + let vote_collector = create_vote_accumulator(&info, event, &event_stream, self.upgrade_lock.clone()) - .await; - if let Some(vote_task) = vote_collector { - relay_map.insert(relay, vote_task); - } + .await?; + relay_map.insert(relay, vote_collector); } HotShotEvent::ViewSyncFinalizeVoteRecv(vote) => { @@ -378,26 +374,26 @@ impl, V: Versions> ViewSyncTaskSta let relay_map = map.entry(vote_view).or_insert(BTreeMap::new()); if let Some(relay_task) = relay_map.get_mut(&relay) { debug!("Forwarding message"); - let result = relay_task - .handle_vote_event(Arc::clone(&event), &event_stream) - .await; - if result == Some(HotShotTaskCompleted) { - // The protocol has finished + // Handle the vote and check if the accumulator has returned successfully + if relay_task + .handle_vote_event(Arc::clone(&event), &event_stream) + .await? + .is_some() + { map.remove(&vote_view); } - return; + + return Ok(()); } // We do not have a relay task already running, so start one - if self - .membership - .leader(vote_view + relay, self.current_epoch) - != self.public_key - { - debug!("View sync vote sent to wrong leader"); - return; - } + ensure!( + self.membership + .leader(vote_view + relay, self.current_epoch)? + == self.public_key, + "View sync vote sent to wrong leader" + ); let info = AccumulatorInfo { public_key: self.public_key.clone(), @@ -409,7 +405,7 @@ impl, V: Versions> ViewSyncTaskSta let vote_collector = create_vote_accumulator(&info, event, &event_stream, self.upgrade_lock.clone()) .await; - if let Some(vote_task) = vote_collector { + if let Ok(vote_task) = vote_collector { relay_map.insert(relay, vote_task); } } @@ -454,13 +450,14 @@ impl, V: Versions> ViewSyncTaskSta } &HotShotEvent::Timeout(view_number) => { // This is an old timeout and we can ignore it - if view_number <= TYPES::View::new(*self.current_view) { - return; - } + ensure!( + view_number > self.current_view, + "Discarding old timeout vote." + ); self.num_timeouts_tracked += 1; - let leader = self.membership.leader(view_number, self.current_epoch); - warn!( + let leader = self.membership.leader(view_number, self.current_epoch)?; + error!( %leader, leader_mnemonic = cdn_proto::util::mnemonic(&leader), view_number = *view_number, @@ -496,6 +493,7 @@ impl, V: Versions> ViewSyncTaskSta _ => {} } + Ok(()) } } diff --git a/crates/task-impls/src/vote_collection.rs b/crates/task-impls/src/vote_collection.rs index 533a96b719..62d2630be1 100644 --- a/crates/task-impls/src/vote_collection.rs +++ b/crates/task-impls/src/vote_collection.rs @@ -11,6 +11,7 @@ use std::{ sync::Arc, }; +use anyhow::{ensure, Context, Result}; use async_broadcast::Sender; use async_trait::async_trait; use either::Either::{self, Left, Right}; @@ -30,12 +31,9 @@ use hotshot_types::{ }, vote::{Certificate, HasViewNumber, Vote, VoteAccumulator}, }; -use tracing::{debug, error}; +use tracing::debug; -use crate::{ - events::{HotShotEvent, HotShotTaskCompleted}, - helpers::broadcast_event, -}; +use crate::{events::HotShotEvent, helpers::broadcast_event}; /// Alias for a map of Vote Collectors pub type VoteCollectorsMap = @@ -74,8 +72,15 @@ pub trait AggregatableVote< CERT: Certificate, > { - /// return the leader for this votes in the given epoch - fn leader(&self, membership: &TYPES::Membership, epoch: TYPES::Epoch) -> TYPES::SignatureKey; + /// return the leader for this votes + /// + /// # Errors + /// if the leader cannot be calculated + fn leader( + &self, + membership: &TYPES::Membership, + epoch: TYPES::Epoch, + ) -> Result; /// return the Hotshot event for the completion of this CERT fn make_cert_event(certificate: CERT, key: &TYPES::SignatureKey) -> HotShotEvent; @@ -84,48 +89,55 @@ pub trait AggregatableVote< impl< TYPES: NodeType, VOTE: Vote + AggregatableVote, - CERT: Certificate + Debug, + CERT: Certificate + Clone + Debug, V: Versions, > VoteCollectionTaskState { /// Take one vote and accumulate it. Returns either the cert or the updated state /// after the vote is accumulated + /// + /// # Errors + /// If are unable to accumulate the vote #[allow(clippy::question_mark)] pub async fn accumulate_vote( &mut self, vote: &VOTE, event_stream: &Sender>>, - ) -> Option { - if vote.leader(&self.membership, self.epoch) != self.public_key { - error!("Received vote for a view in which we were not the leader."); - return None; - } - - if vote.view_number() != self.view { - error!( + ) -> Result> { + ensure!( + vote.leader(&self.membership, self.epoch)? == self.public_key, + "Received vote for a view in which we were not the leader." + ); + ensure!( + vote.view_number() == self.view, + format!( "Vote view does not match! vote view is {} current view is {}", *vote.view_number(), *self.view - ); - return None; - } + ) + ); + + let accumulator = self + .accumulator + .as_mut() + .context("No accumulator to handle vote with.")?; - let accumulator = self.accumulator.as_mut()?; match accumulator .accumulate(vote, &self.membership, self.epoch) .await { - Either::Left(()) => None, + Either::Left(()) => Ok(None), Either::Right(cert) => { debug!("Certificate Formed! {:?}", cert); broadcast_event( - Arc::new(VOTE::make_cert_event(cert, &self.public_key)), + Arc::new(VOTE::make_cert_event(cert.clone(), &self.public_key)), event_stream, ) .await; self.accumulator = None; - Some(HotShotTaskCompleted) + + Ok(Some(cert)) } } } @@ -140,11 +152,14 @@ where CERT: Certificate + Debug, { /// Handle a vote event + /// + /// # Errors + /// Returns an error if we fail to handle the vote async fn handle_vote_event( &mut self, event: Arc>, sender: &Sender>>, - ) -> Option; + ) -> Result>; /// Event filter to use for this event fn filter(event: Arc>) -> bool; @@ -165,6 +180,10 @@ pub struct AccumulatorInfo { } /// Generic function for spawning a vote task. Returns the event stream id of the spawned task if created +/// +/// # Errors +/// If we faile to create the accumulator +/// /// # Panics /// Calls unwrap but should never panic. pub async fn create_vote_accumulator( @@ -172,7 +191,7 @@ pub async fn create_vote_accumulator( event: Arc>, sender: &Sender>>, upgrade_lock: UpgradeLock, -) -> Option> +) -> Result> where TYPES: NodeType, VOTE: Vote @@ -204,17 +223,15 @@ where id: info.id, }; - let result = state.handle_vote_event(Arc::clone(&event), sender).await; + state.handle_vote_event(Arc::clone(&event), sender).await?; - if result == Some(HotShotTaskCompleted) { - // The protocol has finished - return None; - } - - Some(state) + Ok(state) } /// A helper function that handles a vote regardless whether it's the first vote in the view or not. +/// +/// # Errors +/// If we fail to handle the vote #[allow(clippy::too_many_arguments)] pub async fn handle_vote< TYPES: NodeType, @@ -231,7 +248,8 @@ pub async fn handle_vote< event: &Arc>, event_stream: &Sender>>, upgrade_lock: &UpgradeLock, -) where +) -> Result<()> +where VoteCollectionTaskState: HandleVoteEvent, { match collectors.entry(vote.view_number()) { @@ -244,29 +262,31 @@ pub async fn handle_vote< epoch, id, }; - if let Some(collector) = create_vote_accumulator( + let collector = create_vote_accumulator( &info, Arc::clone(event), event_stream, upgrade_lock.clone(), ) - .await - { - entry.insert(collector); - }; + .await?; + + entry.insert(collector); + + Ok(()) } Entry::Occupied(mut entry) => { - let result = entry + // handle the vote, and garbage collect if the vote collector is finished + if entry .get_mut() .handle_vote_event(Arc::clone(event), event_stream) - .await; - - if result == Some(HotShotTaskCompleted) { - // garbage collect vote collectors for old views (including the one just finished) + .await? + .is_some() + { entry.remove(); *collectors = collectors.split_off(&vote.view_number()); - // The protocol has finished } + + Ok(()) } } } @@ -303,7 +323,11 @@ type ViewSyncFinalizeVoteState = VoteCollectionTaskState< impl AggregatableVote, QuorumCertificate> for QuorumVote { - fn leader(&self, membership: &TYPES::Membership, epoch: TYPES::Epoch) -> TYPES::SignatureKey { + fn leader( + &self, + membership: &TYPES::Membership, + epoch: TYPES::Epoch, + ) -> Result { membership.leader(self.view_number() + 1, epoch) } fn make_cert_event( @@ -317,7 +341,11 @@ impl AggregatableVote, QuorumCertifica impl AggregatableVote, UpgradeCertificate> for UpgradeVote { - fn leader(&self, membership: &TYPES::Membership, epoch: TYPES::Epoch) -> TYPES::SignatureKey { + fn leader( + &self, + membership: &TYPES::Membership, + epoch: TYPES::Epoch, + ) -> Result { membership.leader(self.view_number(), epoch) } fn make_cert_event( @@ -331,7 +359,11 @@ impl AggregatableVote, UpgradeCertifi impl AggregatableVote, DaCertificate> for DaVote { - fn leader(&self, membership: &TYPES::Membership, epoch: TYPES::Epoch) -> TYPES::SignatureKey { + fn leader( + &self, + membership: &TYPES::Membership, + epoch: TYPES::Epoch, + ) -> Result { membership.leader(self.view_number(), epoch) } fn make_cert_event( @@ -345,7 +377,11 @@ impl AggregatableVote, DaCertificate AggregatableVote, TimeoutCertificate> for TimeoutVote { - fn leader(&self, membership: &TYPES::Membership, epoch: TYPES::Epoch) -> TYPES::SignatureKey { + fn leader( + &self, + membership: &TYPES::Membership, + epoch: TYPES::Epoch, + ) -> Result { membership.leader(self.view_number() + 1, epoch) } fn make_cert_event( @@ -360,7 +396,11 @@ impl AggregatableVote, ViewSyncCommitCertificate2> for ViewSyncCommitVote { - fn leader(&self, membership: &TYPES::Membership, epoch: TYPES::Epoch) -> TYPES::SignatureKey { + fn leader( + &self, + membership: &TYPES::Membership, + epoch: TYPES::Epoch, + ) -> Result { membership.leader(self.date().round + self.date().relay, epoch) } fn make_cert_event( @@ -375,7 +415,11 @@ impl AggregatableVote, ViewSyncPreCommitCertificate2> for ViewSyncPreCommitVote { - fn leader(&self, membership: &TYPES::Membership, epoch: TYPES::Epoch) -> TYPES::SignatureKey { + fn leader( + &self, + membership: &TYPES::Membership, + epoch: TYPES::Epoch, + ) -> Result { membership.leader(self.date().round + self.date().relay, epoch) } fn make_cert_event( @@ -390,7 +434,11 @@ impl AggregatableVote, ViewSyncFinalizeCertificate2> for ViewSyncFinalizeVote { - fn leader(&self, membership: &TYPES::Membership, epoch: TYPES::Epoch) -> TYPES::SignatureKey { + fn leader( + &self, + membership: &TYPES::Membership, + epoch: TYPES::Epoch, + ) -> Result { membership.leader(self.date().round + self.date().relay, epoch) } fn make_cert_event( @@ -411,10 +459,10 @@ impl &mut self, event: Arc>, sender: &Sender>>, - ) -> Option { + ) -> Result>> { match event.as_ref() { HotShotEvent::QuorumVoteRecv(vote) => self.accumulate_vote(vote, sender).await, - _ => None, + _ => Ok(None), } } fn filter(event: Arc>) -> bool { @@ -432,10 +480,10 @@ impl &mut self, event: Arc>, sender: &Sender>>, - ) -> Option { + ) -> Result>> { match event.as_ref() { HotShotEvent::UpgradeVoteRecv(vote) => self.accumulate_vote(vote, sender).await, - _ => None, + _ => Ok(None), } } fn filter(event: Arc>) -> bool { @@ -451,10 +499,10 @@ impl HandleVoteEvent, DaCerti &mut self, event: Arc>, sender: &Sender>>, - ) -> Option { + ) -> Result>> { match event.as_ref() { HotShotEvent::DaVoteRecv(vote) => self.accumulate_vote(vote, sender).await, - _ => None, + _ => Ok(None), } } fn filter(event: Arc>) -> bool { @@ -471,10 +519,10 @@ impl &mut self, event: Arc>, sender: &Sender>>, - ) -> Option { + ) -> Result>> { match event.as_ref() { HotShotEvent::TimeoutVoteRecv(vote) => self.accumulate_vote(vote, sender).await, - _ => None, + _ => Ok(None), } } fn filter(event: Arc>) -> bool { @@ -491,12 +539,12 @@ impl &mut self, event: Arc>, sender: &Sender>>, - ) -> Option { + ) -> Result>> { match event.as_ref() { HotShotEvent::ViewSyncPreCommitVoteRecv(vote) => { self.accumulate_vote(vote, sender).await } - _ => None, + _ => Ok(None), } } fn filter(event: Arc>) -> bool { @@ -513,10 +561,10 @@ impl &mut self, event: Arc>, sender: &Sender>>, - ) -> Option { + ) -> Result>> { match event.as_ref() { HotShotEvent::ViewSyncCommitVoteRecv(vote) => self.accumulate_vote(vote, sender).await, - _ => None, + _ => Ok(None), } } fn filter(event: Arc>) -> bool { @@ -533,12 +581,12 @@ impl &mut self, event: Arc>, sender: &Sender>>, - ) -> Option { + ) -> Result>> { match event.as_ref() { HotShotEvent::ViewSyncFinalizeVoteRecv(vote) => { self.accumulate_vote(vote, sender).await } - _ => None, + _ => Ok(None), } } fn filter(event: Arc>) -> bool { diff --git a/crates/testing/src/helpers.rs b/crates/testing/src/helpers.rs index c9113e711d..5dbc6e4c43 100644 --- a/crates/testing/src/helpers.rs +++ b/crates/testing/src/helpers.rs @@ -7,7 +7,6 @@ #![allow(clippy::panic)] use std::{fmt::Debug, hash::Hash, marker::PhantomData, sync::Arc}; -use crate::test_builder::TestDescription; use async_broadcast::{Receiver, Sender}; use bitvec::bitvec; use committable::Committable; @@ -45,6 +44,8 @@ use hotshot_types::{ use jf_vid::VidScheme; use serde::Serialize; +use crate::test_builder::TestDescription; + /// create the [`SystemContextHandle`] from a node id /// # Panics /// if cannot create a [`HotShotInitializer`] diff --git a/crates/testing/src/view_generator.rs b/crates/testing/src/view_generator.rs index dcaedd5dc1..87a688b74b 100644 --- a/crates/testing/src/view_generator.rs +++ b/crates/testing/src/view_generator.rs @@ -12,9 +12,6 @@ use std::{ task::{Context, Poll}, }; -use crate::helpers::{ - build_cert, build_da_certificate, build_vid_proposal, da_payload_commitment, key_pair_for_id, -}; use futures::{FutureExt, Stream}; use hotshot::types::{BLSPubKey, SignatureKey, SystemContextHandle}; use hotshot_example_types::{ @@ -22,11 +19,10 @@ use hotshot_example_types::{ node_types::{MemoryImpl, TestTypes, TestVersions}, state_types::{TestInstanceState, TestValidatedState}, }; -use hotshot_types::data::EpochNumber; use hotshot_types::{ data::{ - DaProposal, Leaf, QuorumProposal, VidDisperse, VidDisperseShare, ViewChangeEvidence, - ViewNumber, + DaProposal, EpochNumber, Leaf, QuorumProposal, VidDisperse, VidDisperseShare, + ViewChangeEvidence, ViewNumber, }, message::{Proposal, UpgradeLock}, simple_certificate::{ @@ -46,6 +42,10 @@ use hotshot_types::{ use rand::{thread_rng, Rng}; use sha2::{Digest, Sha256}; +use crate::helpers::{ + build_cert, build_da_certificate, build_vid_proposal, da_payload_commitment, key_pair_for_id, +}; + #[derive(Clone)] pub struct TestView { pub da_proposal: Proposal>, diff --git a/crates/testing/tests/tests_1/da_task.rs b/crates/testing/tests/tests_1/da_task.rs index 1f78de5467..ad417a0431 100644 --- a/crates/testing/tests/tests_1/da_task.rs +++ b/crates/testing/tests/tests_1/da_task.rs @@ -21,9 +21,8 @@ use hotshot_testing::{ serial, view_generator::TestViewGenerator, }; -use hotshot_types::data::EpochNumber; use hotshot_types::{ - data::{null_block, PackedBundle, ViewNumber}, + data::{null_block, EpochNumber, PackedBundle, ViewNumber}, simple_vote::DaData, traits::{ block_contents::precompute_vid_commitment, diff --git a/crates/testing/tests/tests_1/network_task.rs b/crates/testing/tests/tests_1/network_task.rs index 571593d3c3..a9a6db1edb 100644 --- a/crates/testing/tests/tests_1/network_task.rs +++ b/crates/testing/tests/tests_1/network_task.rs @@ -17,9 +17,8 @@ use hotshot_testing::{ helpers::build_system_handle, test_builder::TestDescription, test_task::add_network_message_test_task, view_generator::TestViewGenerator, }; -use hotshot_types::data::EpochNumber; use hotshot_types::{ - data::ViewNumber, + data::{EpochNumber, ViewNumber}, message::UpgradeLock, traits::{ election::Membership, diff --git a/crates/testing/tests/tests_1/quorum_proposal_task.rs b/crates/testing/tests/tests_1/quorum_proposal_task.rs index 5be8b11ba1..ae5fa54fdc 100644 --- a/crates/testing/tests/tests_1/quorum_proposal_task.rs +++ b/crates/testing/tests/tests_1/quorum_proposal_task.rs @@ -24,9 +24,8 @@ use hotshot_testing::{ serial, view_generator::TestViewGenerator, }; -use hotshot_types::data::EpochNumber; use hotshot_types::{ - data::{null_block, Leaf, ViewChangeEvidence, ViewNumber}, + data::{null_block, EpochNumber, Leaf, ViewChangeEvidence, ViewNumber}, simple_vote::{TimeoutData, ViewSyncFinalizeData}, traits::{ election::Membership, diff --git a/crates/testing/tests/tests_1/transaction_task.rs b/crates/testing/tests/tests_1/transaction_task.rs index b8c6a194b9..04cd0d528e 100644 --- a/crates/testing/tests/tests_1/transaction_task.rs +++ b/crates/testing/tests/tests_1/transaction_task.rs @@ -7,9 +7,8 @@ use hotshot_task_impls::{ events::HotShotEvent, harness::run_harness, transactions::TransactionTaskState, }; use hotshot_testing::helpers::build_system_handle; -use hotshot_types::data::EpochNumber; use hotshot_types::{ - data::{null_block, PackedBundle, ViewNumber}, + data::{null_block, EpochNumber, PackedBundle, ViewNumber}, traits::{ block_contents::precompute_vid_commitment, election::Membership, diff --git a/crates/testing/tests/tests_1/upgrade_task_with_proposal.rs b/crates/testing/tests/tests_1/upgrade_task_with_proposal.rs index f0493fb5e6..b82804f4f8 100644 --- a/crates/testing/tests/tests_1/upgrade_task_with_proposal.rs +++ b/crates/testing/tests/tests_1/upgrade_task_with_proposal.rs @@ -29,9 +29,8 @@ use hotshot_testing::{ serial, view_generator::TestViewGenerator, }; -use hotshot_types::data::EpochNumber; use hotshot_types::{ - data::{null_block, Leaf, ViewNumber}, + data::{null_block, EpochNumber, Leaf, ViewNumber}, simple_vote::UpgradeProposalData, traits::{ election::Membership, diff --git a/crates/testing/tests/tests_1/vid_task.rs b/crates/testing/tests/tests_1/vid_task.rs index 714a12a3b5..c2ca9aec09 100644 --- a/crates/testing/tests/tests_1/vid_task.rs +++ b/crates/testing/tests/tests_1/vid_task.rs @@ -20,9 +20,8 @@ use hotshot_testing::{ script::{Expectations, InputOrder, TaskScript}, serial, }; -use hotshot_types::data::EpochNumber; use hotshot_types::{ - data::{null_block, DaProposal, PackedBundle, VidDisperse, ViewNumber}, + data::{null_block, DaProposal, EpochNumber, PackedBundle, VidDisperse, ViewNumber}, traits::{ consensus_api::ConsensusApi, election::Membership, diff --git a/crates/testing/tests/tests_1/vote_dependency_handle.rs b/crates/testing/tests/tests_1/vote_dependency_handle.rs index 6fe2979420..7f26189669 100644 --- a/crates/testing/tests/tests_1/vote_dependency_handle.rs +++ b/crates/testing/tests/tests_1/vote_dependency_handle.rs @@ -11,10 +11,9 @@ use hotshot_testing::{ predicates::{event::*, Predicate, PredicateResult}, view_generator::TestViewGenerator, }; -use hotshot_types::data::EpochNumber; use hotshot_types::{ consensus::OuterConsensus, - data::ViewNumber, + data::{EpochNumber, ViewNumber}, traits::{consensus_api::ConsensusApi, node_implementation::ConsensusTime}, vote::HasViewNumber, }; diff --git a/crates/testing/tests/tests_3/byzantine_tests.rs b/crates/testing/tests/tests_3/byzantine_tests.rs index e5ac199aaf..00ea1ff4d7 100644 --- a/crates/testing/tests/tests_3/byzantine_tests.rs +++ b/crates/testing/tests/tests_3/byzantine_tests.rs @@ -176,7 +176,7 @@ cross_tests!( view_increment: nodes_count as u64, modifier: Arc::new(move |_pk, message_kind, transmit_type: &mut TransmitType, membership: &::Membership| { if let MessageKind::Consensus(SequencingMessage::General(GeneralConsensusMessage::Vote(vote))) = message_kind { - *transmit_type = TransmitType::Direct(membership.leader(vote.view_number() + 1 - nodes_count as u64, EpochNumber::new(0))); + *transmit_type = TransmitType::Direct(membership.leader(vote.view_number() + 1 - nodes_count as u64, EpochNumber::new(0)).unwrap()); } else { {} } diff --git a/crates/testing/tests/tests_3/memory_network.rs b/crates/testing/tests/tests_3/memory_network.rs index 3050cd7d32..3e56630313 100644 --- a/crates/testing/tests/tests_3/memory_network.rs +++ b/crates/testing/tests/tests_3/memory_network.rs @@ -23,9 +23,8 @@ use hotshot_example_types::{ state_types::{TestInstanceState, TestValidatedState}, storage_types::TestStorage, }; -use hotshot_types::data::EpochNumber; use hotshot_types::{ - data::ViewNumber, + data::{EpochNumber, ViewNumber}, message::{DataMessage, Message, MessageKind, UpgradeLock}, signature_key::{BLSPubKey, BuilderKey}, traits::{ diff --git a/crates/types/src/data.rs b/crates/types/src/data.rs index 3518f2e8bd..94d15cf29f 100644 --- a/crates/types/src/data.rs +++ b/crates/types/src/data.rs @@ -9,6 +9,14 @@ //! This module provides types for representing consensus internal state, such as leaves, //! `HotShot`'s version of a block, and proposals, messages upon which to reach the consensus. +use std::{ + collections::BTreeMap, + fmt::{Debug, Display}, + hash::Hash, + marker::PhantomData, + sync::Arc, +}; + use anyhow::{ensure, Result}; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use async_lock::RwLock; @@ -20,13 +28,6 @@ use derivative::Derivative; use jf_vid::{precomputable::Precomputable, VidDisperse as JfVidDisperse, VidScheme}; use rand::Rng; use serde::{Deserialize, Serialize}; -use std::{ - collections::BTreeMap, - fmt::{Debug, Display}, - hash::Hash, - marker::PhantomData, - sync::Arc, -}; use thiserror::Error; #[cfg(async_executor_impl = "tokio")] use tokio::task::spawn_blocking; diff --git a/crates/types/src/message.rs b/crates/types/src/message.rs index 82a561cd4b..aa5d8198bb 100644 --- a/crates/types/src/message.rs +++ b/crates/types/src/message.rs @@ -363,7 +363,7 @@ where upgrade_lock: &UpgradeLock, ) -> Result<()> { let view_number = self.data.view_number(); - let view_leader_key = quorum_membership.leader(view_number, epoch); + let view_leader_key = quorum_membership.leader(view_number, epoch)?; let proposed_leaf = Leaf::from_quorum_proposal(&self.data); ensure!( diff --git a/crates/types/src/traits/election.rs b/crates/types/src/traits/election.rs index 43d0ebf12f..9c8e950bfc 100644 --- a/crates/types/src/traits/election.rs +++ b/crates/types/src/traits/election.rs @@ -7,6 +7,8 @@ //! The election trait, used to decide which node is the leader and determine if a vote is valid. use std::{collections::BTreeSet, fmt::Debug, hash::Hash, num::NonZeroU64}; +use anyhow::Result; + use super::{network::Topic, node_implementation::NodeType}; use crate::{traits::signature_key::SignatureKey, PeerConfig}; @@ -54,8 +56,11 @@ pub trait Membership: /// See if a node has stake in the committee in a specific epoch fn has_stake(&self, pub_key: &TYPES::SignatureKey, epoch: TYPES::Epoch) -> bool; - /// The leader of the committee for view `view_number` in an epoch `epoch`. - fn leader(&self, view_number: TYPES::View, epoch: TYPES::Epoch) -> TYPES::SignatureKey; + /// The leader of the committee for view `view_number` in `epoch`. + /// + /// # Errors + /// Returns an error if the leader cannot be calculated + fn leader(&self, view: TYPES::View, epoch: TYPES::Epoch) -> Result; /// Get the network topic for the committee fn committee_topic(&self) -> Topic; From 15289ff5615a5a7acfc226953b074c2161997c86 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Mon, 7 Oct 2024 18:42:30 -0400 Subject: [PATCH 07/16] fix --- crates/task-impls/src/da.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/task-impls/src/da.rs b/crates/task-impls/src/da.rs index 9d4f36b543..9f00ee0b0d 100644 --- a/crates/task-impls/src/da.rs +++ b/crates/task-impls/src/da.rs @@ -116,7 +116,7 @@ impl, V: Versions> DaTaskState Date: Tue, 8 Oct 2024 12:52:00 -0400 Subject: [PATCH 08/16] fix --- crates/task-impls/src/transactions.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/crates/task-impls/src/transactions.rs b/crates/task-impls/src/transactions.rs index cf120fde07..02aa9136c5 100644 --- a/crates/task-impls/src/transactions.rs +++ b/crates/task-impls/src/transactions.rs @@ -443,7 +443,7 @@ impl, V: Versions> TransactionTask let view = *view; debug!("view change in transactions to view {:?}", view); - ensure!(*view > *self.cur_view, format!("Received a view change to an older view: tried to change view to {:?} though we are at view {:?}", view, self.cur_view )); + ensure!(*view > *self.cur_view || *self.cur_view == 0, format!("Received a view change to an older view: tried to change view to {:?} though we are at view {:?}", view, self.cur_view )); let mut make_block = false; if *view - *self.cur_view > 1 { @@ -790,9 +790,7 @@ impl, V: Versions> TaskState sender: &Sender>, _receiver: &Receiver>, ) -> Result<()> { - self.handle(event, sender.clone()).await?; - - Ok(()) + self.handle(event, sender.clone()).await } async fn cancel_subtasks(&mut self) {} From b5a6d3095ecef26429d3787a1ec7ff0295db5df4 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Tue, 8 Oct 2024 13:08:28 -0400 Subject: [PATCH 09/16] fix --- crates/task-impls/src/upgrade.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/task-impls/src/upgrade.rs b/crates/task-impls/src/upgrade.rs index 19eba0107b..570a5d4212 100644 --- a/crates/task-impls/src/upgrade.rs +++ b/crates/task-impls/src/upgrade.rs @@ -246,7 +246,7 @@ impl, V: Versions> UpgradeTaskStat .await?; } HotShotEvent::ViewChange(new_view) => { - ensure!(self.cur_view < *new_view); + ensure!(self.cur_view < *new_view || *self.cur_view == 0); self.cur_view = *new_view; From f6c3d19eefee706a7609221b7cd96f707005122e Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Tue, 8 Oct 2024 13:21:19 -0400 Subject: [PATCH 10/16] fix --- crates/types/src/consensus.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/types/src/consensus.rs b/crates/types/src/consensus.rs index a351a09b96..c24a25cc4d 100644 --- a/crates/types/src/consensus.rs +++ b/crates/types/src/consensus.rs @@ -645,7 +645,7 @@ impl Consensus { /// Can return an error when the provided high_qc is not newer than the existing entry. pub fn update_high_qc(&mut self, high_qc: QuorumCertificate) -> Result<()> { ensure!( - high_qc.view_number > self.high_qc.view_number, + high_qc.view_number > self.high_qc.view_number || high_qc == self.high_qc, "High QC with an equal or higher view exists." ); debug!("Updating high QC"); From da977a9026a94eaa3a4e569341850ca127337c1d Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Wed, 16 Oct 2024 14:36:51 -0400 Subject: [PATCH 11/16] add `anyhow` replacement --- Cargo.lock | 8 + Cargo.toml | 1 + crates/task-impls/Cargo.toml | 1 + crates/utils/Cargo.toml | 11 ++ crates/utils/src/lib.rs | 4 + crates/utils/src/result12345.rs | 185 ++++++++++++++++++++ crates/utils/src/result12345/macros.rs | 227 +++++++++++++++++++++++++ 7 files changed, 437 insertions(+) create mode 100644 crates/utils/Cargo.toml create mode 100644 crates/utils/src/lib.rs create mode 100644 crates/utils/src/result12345.rs create mode 100644 crates/utils/src/result12345/macros.rs diff --git a/Cargo.lock b/Cargo.lock index 21c52b43de..231d39f84b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3363,6 +3363,7 @@ dependencies = [ "tokio", "tracing", "url", + "utils", "vbs", "vec1", ] @@ -8381,6 +8382,13 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" +[[package]] +name = "utils" +version = "0.5.77" +dependencies = [ + "tracing", +] + [[package]] name = "uuid" version = "1.8.0" diff --git a/Cargo.toml b/Cargo.toml index 02a9c4207e..0dafb4e7cf 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -24,6 +24,7 @@ members = [ "crates/types", "crates/builder-api", "crates/fakeapi", + "crates/utils", ] resolver = "2" diff --git a/crates/task-impls/Cargo.toml b/crates/task-impls/Cargo.toml index 2492438ea4..c9a67c0c6a 100644 --- a/crates/task-impls/Cargo.toml +++ b/crates/task-impls/Cargo.toml @@ -38,6 +38,7 @@ tagged-base64 = { workspace = true } time = { workspace = true } tracing = { workspace = true } url = { workspace = true } +utils = { path = "../utils" } vbs = { workspace = true } vec1 = { workspace = true } diff --git a/crates/utils/Cargo.toml b/crates/utils/Cargo.toml new file mode 100644 index 0000000000..fba1648dd7 --- /dev/null +++ b/crates/utils/Cargo.toml @@ -0,0 +1,11 @@ +[package] +name = "utils" +version = { workspace = true } +edition = { workspace = true } +description = "Utils" + +[dependencies] +tracing = { workspace = true } + +[lints] +workspace = true diff --git a/crates/utils/src/lib.rs b/crates/utils/src/lib.rs new file mode 100644 index 0000000000..266fa1e6d2 --- /dev/null +++ b/crates/utils/src/lib.rs @@ -0,0 +1,4 @@ +//! General (not HotShot-specific) utilities + +/// Error utilities, intended to function as a replacement to `anyhow`. +pub mod result12345; diff --git a/crates/utils/src/result12345.rs b/crates/utils/src/result12345.rs new file mode 100644 index 0000000000..42edbed2ff --- /dev/null +++ b/crates/utils/src/result12345.rs @@ -0,0 +1,185 @@ +use std::{cmp::max, fmt::Display}; + +/// Macros +mod macros; +#[allow(unused_imports)] +pub use macros::*; + +/// Default log level for the crate +pub const DEFAULT_LOG_LEVEL: Level = Level::Info; + +/// Trait for logging errors +pub trait Log { + /// Log an error via `tracing` utilities, printing it. + fn log(&self); +} + +impl Log for Error { + fn log(&self) { + let mut error_level = self.level; + if error_level == Level::Unspecified { + error_level = DEFAULT_LOG_LEVEL; + } + + match error_level { + Level::Trace => { + tracing::trace!("{}", self.message); + } + Level::Debug => { + tracing::debug!("{}", self.message); + } + Level::Info => { + tracing::info!("{}", self.message); + } + Level::Warn => { + tracing::warn!("{}", self.message); + } + Level::Error => { + tracing::error!("{}", self.message); + } + // impossible + Level::Unspecified => {} + } + } +} + +impl Log for Result { + fn log(&self) { + let error = match self { + Ok(_) => { + return; + } + Err(e) => e, + }; + + let mut error_level = error.level; + if error_level == Level::Unspecified { + error_level = DEFAULT_LOG_LEVEL; + } + + match error_level { + Level::Trace => { + tracing::trace!("{}", error.message); + } + Level::Debug => { + tracing::debug!("{}", error.message); + } + Level::Info => { + tracing::info!("{}", error.message); + } + Level::Warn => { + tracing::warn!("{}", error.message); + } + Level::Error => { + tracing::error!("{}", error.message); + } + // impossible + Level::Unspecified => {} + } + } +} + +#[derive(Debug, Clone)] +/// main error type +pub struct Error { + /// level + level: Level, + /// message + message: String, +} + +/// Trait for a `std::result::Result` that can be wrapped into a `Result` +pub trait Wrap { + /// Wrap the value into a `Result` + fn wrap(self) -> Result; +} + +impl Wrap for std::result::Result +where + E: Display, +{ + fn wrap(self) -> Result { + match self { + Ok(t) => Ok(t), + Err(e) => Err(Error { + level: Level::Unspecified, + message: format!("{e}"), + }), + } + } +} + +impl Display for Error { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.message) + } +} + +/// Alias for the main `Result` type used by the crate. +pub type Result = std::result::Result; + +#[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone, Copy)] +/// Possible log levels +pub enum Level { + /// Unspecified log level + Unspecified, + /// TRACE + Trace, + /// DEBUG + Debug, + /// INFO + Info, + /// WARN + Warn, + /// ERROR + Error, +} + +/// Prepend an error to its cause +fn concatenate(error: &String, cause: &String) -> String { + format!("{error}\ncaused by: {cause}") +} + +/// Trait for converting error types to a `Result`. +pub trait Context { + /// Attach context to the given error. + /// + /// # Errors + /// Propagates errors from `self` + fn context(self, error: Error) -> Result; +} + +impl Context for Result { + fn context(self, error: Error) -> Result { + match self { + Ok(t) => Ok(t), + Err(cause) => Err(Error { + level: max(error.level, cause.level), + message: concatenate(&error.message, &format!("{cause}")), + }), + } + } +} + +impl Context for Option { + fn context(self, error: Error) -> Result { + match self { + Some(t) => Ok(t), + None => Err(error), + } + } +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn ordering() { + assert!(Level::Trace < Level::Debug); + assert!(Level::Debug < Level::Info); + assert!(Level::Info < Level::Warn); + assert!(Level::Warn < Level::Error); + assert!(max(Level::Trace, Level::Error) == Level::Error); + } +} diff --git a/crates/utils/src/result12345/macros.rs b/crates/utils/src/result12345/macros.rs new file mode 100644 index 0000000000..f2f260071a --- /dev/null +++ b/crates/utils/src/result12345/macros.rs @@ -0,0 +1,227 @@ +#[macro_export] +/// Print the file and line number of the location this macro is invoked +macro_rules! line_info { + () => { + format!("{}:{}", file!(), line!()) + }; +} +pub use line_info; + +#[macro_export] +/// Create an error at the trace level. +/// +/// The argument can be either: +/// - an expression implementing `Display` +/// - a string literal +/// - a format string, similar to the `format!()` macro +macro_rules! trace { + ($error:expr) => { + Error { + level: Level::Trace, + message: format!("{}: {}", line_info!(), $error) + } + }; + ($message:literal) => { + Error { + level: Level::Trace, + message: format!("{}: {}", line_info!(), $message) + } + }; + ($fmt:expr, $($arg:tt)*) => { + Error { + level: Level::Trace, + message: format!("{}: {}", line_info!(), format!($fmt, $($arg)*)) + } + }; +} +pub use trace; + +#[macro_export] +/// Create an error at the debug level. +/// +/// The argument can be either: +/// - an expression implementing `Display` +/// - a string literal +/// - a format string, similar to the `format!()` macro +macro_rules! debug { + ($error:expr) => { + Error { + level: Level::Debug, + message: format!("{}: {}", line_info!(), $error) + } + }; + ($message:literal) => { + Error { + level: Level::Debug, + message: format!("{}: {}", line_info!(), $message) + } + }; + ($fmt:expr, $($arg:tt)*) => { + Error { + level: Level::Debug, + message: format!("{}: {}", line_info!(), format!($fmt, $($arg)*)) + } + }; +} +pub use debug; + +#[macro_export] +/// Create an error at the info level. +/// +/// The argument can be either: +/// - an expression implementing `Display` +/// - a string literal +/// - a format string, similar to the `format!()` macro +macro_rules! info { + ($error:expr) => { + Error { + level: Level::Info, + message: format!("{}: {}", line_info!(), $error) + } + }; + ($message:literal) => { + Error { + level: Level::Info, + message: format!("{}: {}", line_info!(), $message) + } + }; + ($fmt:expr, $($arg:tt)*) => { + Error { + level: Level::Info, + message: format!("{}: {}", line_info!(), format!($fmt, $($arg)*)) + } + }; +} +pub use info; + +#[macro_export] +/// Create an error at the warn level. +/// +/// The argument can be either: +/// - an expression implementing `Display` +/// - a string literal +/// - a format string, similar to the `format!()` macro +macro_rules! warn { + ($error:expr) => { + Error { + level: Level::Warn, + message: format!("{}: {}", line_info!(), $error) + } + }; + ($message:literal) => { + Error { + level: Level::Warn, + message: format!("{}: {}", line_info!(), $message) + } + }; + ($fmt:expr, $($arg:tt)*) => { + Error { + level: Level::Warn, + message: format!("{}: {}", line_info!(), format!($fmt, $($arg)*)) + } + }; +} +pub use crate::warn; + +#[macro_export] +/// Create an error at the error level. +/// +/// The argument can be either: +/// - an expression implementing `Display` +/// - a string literal +/// - a format string, similar to the `format!()` macro +macro_rules! error { + ($error:expr) => { + Error { + level: Level::Error, + message: format!("{}: {}", line_info!(), $error) + } + }; + ($message:literal) => { + Error { + level: Level::Error, + message: format!("{}: {}", line_info!(), $message) + } + }; + ($fmt:expr, $($arg:tt)*) => { + Error { + level: Level::Error, + message: format!("{}: {}", line_info!(), format!($fmt, $($arg)*)) + } + }; +} +pub use error; + +#[macro_export] +/// Check that the given condition holds, otherwise return an error. +/// +/// The argument can be either: +/// - a condition, in which case a generic error is logged at the `Unspecified` level. +/// - a condition and a string literal, in which case the provided literal is logged at the `Unspecified` level. +/// - a condition and a format expression, in which case the message is formatted and logged at the `Unspecified` level. +/// - a condition and an `Error`, in which case the given error is logged unchanged. +macro_rules! ensure { + ($condition:expr) => { + if !$condition { + return Err(Error { + level: Level::Unspecified, + message: format!("{}: condition '{}' failed.", line_info!(), stringify!($condition)) + }); + } + }; + ($condition:expr, $message:literal) => { + if !$condition { + return Err(Error { + level: Level::Unspecified, + message: format!("{}: {}", line_info!(), $message) + }); + } + }; + ($condition:expr, $fmt:expr, $($arg:tt)*) => { + if !$condition { + return Err(Error { + level: Level::Unspecified, + message: format!("{}: {}", line_info!(), format!($fmt, $($arg)*)) + }); + } + }; + ($condition:expr, $error:expr) => { + if !$condition { + return Err($error); + } + }; +} +pub use ensure; + +#[macro_export] +/// Return an error. +/// +/// The argument can be either: +/// - nothing, in which case a generic message is logged at the `Unspecified` level. +/// - a string literal, in which case the provided literal is logged at the `Unspecified` level. +/// - a format expression, in which case the message is formatted and logged at the `Unspecified` level. +/// - an `Error`, in which case the given error is logged unchanged. +macro_rules! bail { + () => { + return Err(Error { + level: Level::Unspecified, + message: format!("{}: bailed.", line_info!()), + }); + }; + ($message:literal) => { + return Err(Error { + level: Level::Unspecified, + message: format!("{}: {}", line_info!(), $message) + }); + }; + ($fmt:expr, $($arg:tt)*) => { + return Err(Error { + level: Level::Unspecified, + message: format!("{}: {}", line_info!(), format!($fmt, $($arg)*)) + }); + }; + ($error:expr) => { + return Err($error); + }; +} +pub use bail; From 6ecbbfe296c3f9a91cc2adcca35e53bdd501156f Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Wed, 16 Oct 2024 14:39:34 -0400 Subject: [PATCH 12/16] clippy --- crates/utils/src/result12345.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/crates/utils/src/result12345.rs b/crates/utils/src/result12345.rs index 42edbed2ff..1136b6e6de 100644 --- a/crates/utils/src/result12345.rs +++ b/crates/utils/src/result12345.rs @@ -91,6 +91,9 @@ pub struct Error { /// Trait for a `std::result::Result` that can be wrapped into a `Result` pub trait Wrap { /// Wrap the value into a `Result` + /// + /// # Errors + /// Propagates errors from `self` fn wrap(self) -> Result; } From 85d428cc363d51c74aaefccf816929d709d8807e Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Fri, 18 Oct 2024 14:52:46 -0400 Subject: [PATCH 13/16] convert to new `Result` type --- crates/hotshot/Cargo.toml | 1 + .../traits/election/randomized_committee.rs | 2 +- .../src/traits/election/static_committee.rs | 2 +- .../static_committee_leader_two_views.rs | 2 +- crates/hotshot/src/types/handle.rs | 3 +- crates/task-impls/src/consensus/handlers.rs | 19 ++--- crates/task-impls/src/consensus/mod.rs | 2 +- crates/task-impls/src/consensus2/handlers.rs | 2 +- crates/task-impls/src/da.rs | 74 +++++++++++++------ crates/task-impls/src/helpers.rs | 62 ++++++++-------- crates/task-impls/src/network.rs | 24 +++--- .../src/quorum_proposal/handlers.rs | 21 +++--- crates/task-impls/src/quorum_proposal/mod.rs | 36 ++++++--- .../src/quorum_proposal_recv/handlers.rs | 14 ++-- .../src/quorum_proposal_recv/mod.rs | 2 +- crates/task-impls/src/quorum_vote/handlers.rs | 8 +- crates/task-impls/src/quorum_vote/mod.rs | 64 +++++++++------- crates/task-impls/src/request.rs | 2 +- crates/task-impls/src/rewind.rs | 2 +- crates/task-impls/src/transactions.rs | 58 +++++++++------ crates/task-impls/src/upgrade.rs | 35 +++++---- crates/task-impls/src/vid.rs | 2 +- crates/task-impls/src/view_sync.rs | 66 ++++++++--------- crates/task-impls/src/vote_collection.rs | 18 ++--- crates/task/Cargo.toml | 1 + crates/task/src/task.rs | 2 +- crates/types/Cargo.toml | 1 + crates/types/src/consensus.rs | 44 +++++------ crates/types/src/data.rs | 35 +-------- crates/types/src/message.rs | 12 ++- crates/types/src/simple_certificate.rs | 2 +- crates/types/src/simple_vote.rs | 6 +- crates/types/src/traits/election.rs | 2 +- crates/types/src/vote.rs | 2 +- crates/utils/src/result12345.rs | 7 +- 35 files changed, 341 insertions(+), 294 deletions(-) diff --git a/crates/hotshot/Cargo.toml b/crates/hotshot/Cargo.toml index 874e94939d..c3e345f43a 100644 --- a/crates/hotshot/Cargo.toml +++ b/crates/hotshot/Cargo.toml @@ -58,6 +58,7 @@ sha2 = { workspace = true } url = { workspace = true } num_enum = "0.7" parking_lot = "0.12" +utils = { path = "../utils" } [target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] tokio = { workspace = true } diff --git a/crates/hotshot/src/traits/election/randomized_committee.rs b/crates/hotshot/src/traits/election/randomized_committee.rs index 8214c0da40..c359b2f1c3 100644 --- a/crates/hotshot/src/traits/election/randomized_committee.rs +++ b/crates/hotshot/src/traits/election/randomized_committee.rs @@ -6,7 +6,6 @@ use std::{cmp::max, collections::BTreeMap, num::NonZeroU64}; -use anyhow::Result; use ethereum_types::U256; use hotshot_types::{ traits::{ @@ -18,6 +17,7 @@ use hotshot_types::{ PeerConfig, }; use rand::{rngs::StdRng, Rng}; +use utils::result12345::Result; #[derive(Clone, Debug, Eq, PartialEq, Hash)] diff --git a/crates/hotshot/src/traits/election/static_committee.rs b/crates/hotshot/src/traits/election/static_committee.rs index 31f40f002a..c799269fb8 100644 --- a/crates/hotshot/src/traits/election/static_committee.rs +++ b/crates/hotshot/src/traits/election/static_committee.rs @@ -6,7 +6,6 @@ use std::{cmp::max, collections::BTreeMap, num::NonZeroU64}; -use anyhow::Result; use ethereum_types::U256; use hotshot_types::{ traits::{ @@ -17,6 +16,7 @@ use hotshot_types::{ }, PeerConfig, }; +use utils::result12345::Result; #[derive(Clone, Debug, Eq, PartialEq, Hash)] diff --git a/crates/hotshot/src/traits/election/static_committee_leader_two_views.rs b/crates/hotshot/src/traits/election/static_committee_leader_two_views.rs index 889d05a687..7fdb812535 100644 --- a/crates/hotshot/src/traits/election/static_committee_leader_two_views.rs +++ b/crates/hotshot/src/traits/election/static_committee_leader_two_views.rs @@ -6,7 +6,6 @@ use std::{collections::BTreeMap, num::NonZeroU64}; -use anyhow::Result; use ethereum_types::U256; use hotshot_types::{ traits::{ @@ -17,6 +16,7 @@ use hotshot_types::{ }, PeerConfig, }; +use utils::result12345::Result; #[derive(Clone, Debug, Eq, PartialEq, Hash)] diff --git a/crates/hotshot/src/types/handle.rs b/crates/hotshot/src/types/handle.rs index d561cd3920..caa276927f 100644 --- a/crates/hotshot/src/types/handle.rs +++ b/crates/hotshot/src/types/handle.rs @@ -8,7 +8,7 @@ use std::sync::Arc; -use anyhow::{anyhow, Ok, Result}; +use anyhow::{anyhow, Context, Ok, Result}; use async_broadcast::{InactiveReceiver, Receiver, Sender}; use async_lock::RwLock; use committable::{Commitment, Committable}; @@ -288,6 +288,7 @@ impl + 'static, V: Versions> .memberships .quorum_membership .leader(view_number, epoch_number) + .context("Failed to lookup leader") } // Below is for testing only: diff --git a/crates/task-impls/src/consensus/handlers.rs b/crates/task-impls/src/consensus/handlers.rs index db2165dc61..f276eb0b8e 100644 --- a/crates/task-impls/src/consensus/handlers.rs +++ b/crates/task-impls/src/consensus/handlers.rs @@ -6,7 +6,6 @@ use std::{sync::Arc, time::Duration}; -use anyhow::{ensure, Context, Result}; use async_broadcast::Sender; use async_compatibility_layer::art::{async_sleep, async_spawn}; use chrono::Utc; @@ -19,7 +18,8 @@ use hotshot_types::{ }, vote::HasViewNumber, }; -use tracing::{debug, error, instrument}; +use tracing::instrument; +use utils::result12345::*; use super::ConsensusTaskState; use crate::{ @@ -46,7 +46,7 @@ pub(crate) async fn handle_quorum_vote_recv< .quorum_membership .leader(vote.view_number() + 1, task_state.cur_epoch)? == task_state.public_key, - format!( + info!( "We are not the leader for view {:?}", vote.view_number() + 1 ) @@ -85,7 +85,7 @@ pub(crate) async fn handle_timeout_vote_recv< .timeout_membership .leader(vote.view_number() + 1, task_state.cur_epoch)? == task_state.public_key, - format!( + info!( "We are not the leader for view {:?}", vote.view_number() + 1 ) @@ -124,7 +124,7 @@ pub(crate) async fn handle_view_change< ); let old_view_number = task_state.cur_view; - debug!("Updating view from {old_view_number:?} to {new_view_number:?}"); + tracing::debug!("Updating view from {old_view_number:?} to {new_view_number:?}"); // Move this node to the next view task_state.cur_view = new_view_number; @@ -138,7 +138,7 @@ pub(crate) async fn handle_view_change< .clone(); if let Some(cert) = decided_upgrade_certificate_read { if new_view_number == cert.data.new_version_first_view { - error!( + tracing::error!( "Version upgraded based on a decided upgrade cert: {:?}", cert ); @@ -228,7 +228,7 @@ pub(crate) async fn handle_timeout task_state .timeout_membership .has_stake(&task_state.public_key, task_state.cur_epoch), - format!("We were not chosen for the consensus committee for view {view_number:?}") + debug!("We were not chosen for the consensus committee for view {view_number:?}") ); let vote = TimeoutVote::create_signed_vote( @@ -239,7 +239,8 @@ pub(crate) async fn handle_timeout &task_state.upgrade_lock, ) .await - .context("Failed to sign TimeoutData")?; + .wrap() + .context(error!("Failed to sign TimeoutData"))?; broadcast_event(Arc::new(HotShotEvent::TimeoutVoteSend(vote)), sender).await; broadcast_event( @@ -251,7 +252,7 @@ pub(crate) async fn handle_timeout ) .await; - debug!( + tracing::debug!( "We did not receive evidence for view {} in time, sending timeout vote for that view!", *view_number ); diff --git a/crates/task-impls/src/consensus/mod.rs b/crates/task-impls/src/consensus/mod.rs index e8a281173a..b01ea85caf 100644 --- a/crates/task-impls/src/consensus/mod.rs +++ b/crates/task-impls/src/consensus/mod.rs @@ -6,7 +6,6 @@ use std::sync::Arc; -use anyhow::Result; use async_broadcast::{Receiver, Sender}; use async_lock::RwLock; #[cfg(async_executor_impl = "async-std")] @@ -27,6 +26,7 @@ use hotshot_types::{ #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; use tracing::instrument; +use utils::result12345::Result; use self::handlers::{ handle_quorum_vote_recv, handle_timeout, handle_timeout_vote_recv, handle_view_change, diff --git a/crates/task-impls/src/consensus2/handlers.rs b/crates/task-impls/src/consensus2/handlers.rs index d63510d219..1885d0710a 100644 --- a/crates/task-impls/src/consensus2/handlers.rs +++ b/crates/task-impls/src/consensus2/handlers.rs @@ -6,7 +6,7 @@ use std::{sync::Arc, time::Duration}; -use anyhow::{ensure, Context, Result}; +use utils::result12345::{ensure, Context, Result}; use async_broadcast::Sender; use async_compatibility_layer::art::{async_sleep, async_spawn}; use chrono::Utc; diff --git a/crates/task-impls/src/da.rs b/crates/task-impls/src/da.rs index 9f00ee0b0d..4aee6f5409 100644 --- a/crates/task-impls/src/da.rs +++ b/crates/task-impls/src/da.rs @@ -6,7 +6,6 @@ use std::{marker::PhantomData, sync::Arc}; -use anyhow::{ensure, Result}; use async_broadcast::{Receiver, Sender}; use async_compatibility_layer::art::async_spawn; use async_lock::RwLock; @@ -35,7 +34,8 @@ use hotshot_types::{ use sha2::{Digest, Sha256}; #[cfg(async_executor_impl = "tokio")] use tokio::task::spawn_blocking; -use tracing::{debug, info, instrument}; +use tracing::instrument; +use utils::result12345::*; use crate::{ events::HotShotEvent, @@ -98,7 +98,7 @@ impl, V: Versions> DaTaskState { let sender = sender.clone(); - debug!( + tracing::debug!( "DA proposal received for view: {:?}", proposal.data.view_number() ); @@ -116,20 +116,29 @@ impl, V: Versions> DaTaskState, V: Versions> DaTaskState { let curr_view = self.consensus.read().await.cur_view(); - ensure!(curr_view <= proposal.data.view_number() + 1, - format!("Validated DA proposal for prior view but it's too old now Current view {:?}, DA Proposal view {:?}", curr_view, proposal.data.view_number())); + ensure!( + curr_view <= proposal.data.view_number() + 1, + info!( + "Validated DA proposal for prior view but it's too old now Current view {:?}, DA Proposal view {:?}", + curr_view, + proposal.data.view_number() + ) + ); // Proposal is fresh and valid, notify the application layer broadcast_event( @@ -164,13 +179,19 @@ impl, V: Versions> DaTaskState, V: Versions> DaTaskState, V: Versions> DaTaskState { - debug!("DA vote recv, Main Task {:?}", vote.view_number()); + tracing::debug!("DA vote recv, Main Task {:?}", vote.view_number()); // Check if we are the leader and the vote is from the sender. let view = vote.view_number(); ensure!( self.da_membership.leader(view, self.cur_epoch)? == self.public_key, - format!("We are not the DA committee leader for view {} are we leader for next view? {}", *view, self.da_membership.leader(view + 1, self.cur_epoch)? == self.public_key) + debug!( + "We are not the DA committee leader for view {} are we leader for next view? {}", + *view, + self.da_membership.leader(view + 1, self.cur_epoch)? == self.public_key + ) ); handle_vote( @@ -277,11 +302,11 @@ impl, V: Versions> DaTaskState 1 { - info!("View changed by more than 1 going to view {:?}", view); + tracing::info!("View changed by more than 1 going to view {:?}", view); } self.cur_view = view; @@ -292,7 +317,7 @@ impl, V: Versions> DaTaskState { let PackedBundle:: { @@ -308,7 +333,8 @@ impl, V: Versions> DaTaskState = DaProposal { encoded_transactions: Arc::clone(encoded_transactions), diff --git a/crates/task-impls/src/helpers.rs b/crates/task-impls/src/helpers.rs index 387a363a8c..7c1109d936 100644 --- a/crates/task-impls/src/helpers.rs +++ b/crates/task-impls/src/helpers.rs @@ -10,7 +10,6 @@ use std::{ sync::Arc, }; -use anyhow::{bail, ensure, Context, Result}; use async_broadcast::{Receiver, SendError, Sender}; use async_compatibility_layer::art::{async_sleep, async_spawn, async_timeout}; use async_lock::RwLock; @@ -39,7 +38,8 @@ use hotshot_types::{ }; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; -use tracing::{debug, info, instrument, warn}; +use tracing::instrument; +use utils::result12345::*; use crate::{ events::HotShotEvent, quorum_proposal_recv::QuorumProposalRecvTaskState, @@ -70,7 +70,9 @@ pub(crate) async fn fetch_proposal( let signature = TYPES::SignatureKey::sign( &sender_private_key, signed_proposal_request.commit().as_ref(), - )?; + ) + .wrap() + .context(error!("Failed to sign proposal. This should never happen."))?; // First, broadcast that we need a proposal to the current leader broadcast_event( @@ -298,9 +300,11 @@ pub async fn decide_from_proposal( if let Some(cert) = leaf.upgrade_certificate() { if leaf.upgrade_certificate() != *existing_upgrade_cert_reader { if cert.data.decide_by < view_number { - warn!("Failed to decide an upgrade certificate in time. Ignoring."); + tracing::warn!( + "Failed to decide an upgrade certificate in time. Ignoring." + ); } else { - info!("Reached decide on upgrade certificate: {:?}", cert); + tracing::info!("Reached decide on upgrade certificate: {:?}", cert); res.decided_upgrade_cert = Some(cert.clone()); } } @@ -346,13 +350,13 @@ pub async fn decide_from_proposal( true }, ) { - debug!("Leaf ascension failed; error={e}"); + tracing::debug!("Leaf ascension failed; error={e}"); } res } -/// Gets the parent leaf and state from the parent of a proposal, returning an [`anyhow::Error`] if not. +/// Gets the parent leaf and state from the parent of a proposal, returning an [`utils::result12345::Error`] if not. #[instrument(skip_all)] #[allow(clippy::too_many_arguments)] pub(crate) async fn parent_leaf_and_state( @@ -368,7 +372,10 @@ pub(crate) async fn parent_leaf_and_state( let current_epoch = consensus.read().await.cur_epoch(); ensure!( quorum_membership.leader(next_proposal_view_number, current_epoch)? == public_key, - "Somehow we formed a QC but are not the leader for the next view {next_proposal_view_number:?}", + info!( + "Somehow we formed a QC but are not the leader for the next view {:?}", + next_proposal_view_number + ) ); let parent_view_number = consensus.read().await.high_qc().view_number(); if !consensus @@ -388,22 +395,21 @@ pub(crate) async fn parent_leaf_and_state( upgrade_lock, ) .await - .context("Failed to fetch proposal")?; + .context(info!("Failed to fetch proposal"))?; } let consensus_reader = consensus.read().await; let parent_view_number = consensus_reader.high_qc().view_number(); let parent_view = consensus_reader.validated_state_map().get(&parent_view_number).context( - format!("Couldn't find parent view in state map, waiting for replica to see proposal; parent_view_number: {}", *parent_view_number) + debug!("Couldn't find parent view in state map, waiting for replica to see proposal; parent_view_number: {}", *parent_view_number) )?; - // Leaf hash in view inner does not match high qc hash - Why? let (leaf_commitment, state) = parent_view.leaf_and_state().context( - format!("Parent of high QC points to a view without a proposal; parent_view_number: {parent_view_number:?}, parent_view {parent_view:?}") + info!("Parent of high QC points to a view without a proposal; parent_view_number: {parent_view_number:?}, parent_view {parent_view:?}") )?; if leaf_commitment != consensus_reader.high_qc().data().leaf_commit { // NOTE: This happens on the genesis block - debug!( + tracing::debug!( "They don't equal: {:?} {:?}", leaf_commitment, consensus_reader.high_qc().data().leaf_commit @@ -413,7 +419,7 @@ pub(crate) async fn parent_leaf_and_state( let leaf = consensus_reader .saved_leaves() .get(&leaf_commitment) - .context("Failed to find high QC of parent")?; + .context(info!("Failed to find high QC of parent"))?; let reached_decided = leaf.view_number() == consensus_reader.last_decided_view(); let parent_leaf = leaf.clone(); @@ -422,7 +428,7 @@ pub(crate) async fn parent_leaf_and_state( // Walk back until we find a decide if !reached_decided { - debug!("We have not reached decide"); + tracing::debug!("We have not reached decide"); while let Some(next_parent_leaf) = consensus_reader.saved_leaves().get(&next_parent_hash) { if next_parent_leaf.view_number() <= consensus_reader.last_decided_view() { break; @@ -548,7 +554,7 @@ pub async fn validate_proposal_safety_and_liveness< .await; } - format!("Failed safety and liveness check \n High QC is {:?} Proposal QC is {:?} Locked view is {:?}", read_consensus.high_qc(), proposal.data.clone(), read_consensus.locked_view()) + error!("Failed safety and liveness check \n High QC is {:?} Proposal QC is {:?} Locked view is {:?}", read_consensus.high_qc(), proposal.data.clone(), read_consensus.locked_view()) }); } @@ -559,7 +565,9 @@ pub async fn validate_proposal_safety_and_liveness< .write() .await .append_proposal(&proposal) - .await?; + .await + .wrap() + .context(error!("Failed to append proposal in storage!"))?; // We accept the proposal, notify the application layer broadcast_event( @@ -620,7 +628,7 @@ pub async fn validate_proposal_view_and_certs< // Verify a timeout certificate OR a view sync certificate exists and is valid. if proposal.data.justify_qc.view_number() != view - 1 { let received_proposal_cert = - proposal.data.proposal_certificate.clone().context(format!( + proposal.data.proposal_certificate.clone().context(debug!( "Quorum proposal for view {} needed a timeout or view sync certificate, but did not have one", *view ))?; @@ -684,7 +692,7 @@ pub async fn validate_proposal_view_and_certs< /// `timeout_task` which are updated during the operation of the function. /// /// # Errors -/// Returns an [`anyhow::Error`] when the new view is not greater than the current view. +/// Returns an [`utils::result12345::Error`] when the new view is not greater than the current view. pub(crate) async fn update_view, V: Versions>( new_view: TYPES::View, event_stream: &Sender>>, @@ -701,10 +709,10 @@ pub(crate) async fn update_view, V == task_state.public_key; let old_view = task_state.cur_view; - debug!("Updating view from {} to {}", *old_view, *new_view); + tracing::debug!("Updating view from {} to {}", *old_view, *new_view); if *old_view / 100 != *new_view / 100 { - info!("Progress: entered view {:>6}", *new_view); + tracing::info!("Progress: entered view {:>6}", *new_view); } task_state.cur_view = new_view; @@ -809,15 +817,3 @@ pub async fn broadcast_event(event: E, sender: &Send } } } - -/// Utilities to print anyhow logs. -pub trait AnyhowTracing { - /// Print logs as debug - fn err_as_debug(self); -} - -impl AnyhowTracing for anyhow::Result { - fn err_as_debug(self) { - let _ = self.inspect_err(|e| tracing::debug!("{}", format!("{:?}", e))); - } -} diff --git a/crates/task-impls/src/network.rs b/crates/task-impls/src/network.rs index 8668a4cf48..5bde39b80e 100644 --- a/crates/task-impls/src/network.rs +++ b/crates/task-impls/src/network.rs @@ -6,7 +6,6 @@ use std::{collections::HashMap, sync::Arc}; -use anyhow::Result; use async_broadcast::{Receiver, Sender}; use async_compatibility_layer::art::async_spawn; use async_lock::RwLock; @@ -31,7 +30,8 @@ use hotshot_types::{ }, vote::{HasViewNumber, Vote}, }; -use tracing::{error, instrument, warn}; +use tracing::instrument; +use utils::result12345::*; use crate::{ events::{HotShotEvent, HotShotTaskCompleted}, @@ -101,7 +101,7 @@ impl NetworkMessageTaskState { HotShotEvent::UpgradeProposalRecv(message, sender) } GeneralConsensusMessage::UpgradeVote(message) => { - error!("Received upgrade vote!"); + tracing::error!("Received upgrade vote!"); HotShotEvent::UpgradeVoteRecv(message) } }, @@ -264,7 +264,7 @@ impl< let serialized_message = match self.upgrade_lock.serialize(&message).await { Ok(serialized) => serialized, Err(e) => { - error!("Failed to serialize message: {}", e); + tracing::error!("Failed to serialize message: {}", e); continue; } }; @@ -289,7 +289,7 @@ impl< } match net.vid_broadcast_message(messages).await { Ok(()) => {} - Err(e) => warn!("Failed to send message from network task: {:?}", e), + Err(e) => tracing::warn!("Failed to send message from network task: {:?}", e), } }); @@ -302,16 +302,16 @@ impl< storage: Arc>, state: Arc>>, view: ::View, - ) -> Result<(), ()> { + ) -> std::result::Result<(), ()> { if let Some(action) = maybe_action { if !state.write().await.update_action(action, view) { - warn!("Already actioned {:?} in view {:?}", action, view); + tracing::warn!("Already actioned {:?} in view {:?}", action, view); return Err(()); } match storage.write().await.record_action(view, action).await { Ok(()) => Ok(()), Err(e) => { - warn!("Not Sending {:?} because of storage error: {:?}", action, e); + tracing::warn!("Not Sending {:?} because of storage error: {:?}", action, e); Err(()) } } @@ -563,7 +563,7 @@ impl< TransmitType::Broadcast, )), HotShotEvent::UpgradeVoteSend(vote) => { - error!("Sending upgrade vote!"); + tracing::error!("Sending upgrade vote!"); let view_number = vote.view_number(); let leader = match self.quorum_membership.leader(view_number, self.epoch) { Ok(l) => l, @@ -664,7 +664,7 @@ impl< let serialized_message = match upgrade_lock.serialize(&message).await { Ok(serialized) => serialized, Err(e) => { - error!("Failed to serialize message: {}", e); + tracing::error!("Failed to serialize message: {}", e); return; } }; @@ -686,7 +686,7 @@ impl< .direct_message(serialized_message.clone(), recipient) .await { - warn!("Failed to send message: {e:?}"); + tracing::warn!("Failed to send message: {e:?}"); } // Otherwise, send the next message. @@ -697,7 +697,7 @@ impl< match transmit_result { Ok(()) => {} - Err(e) => warn!("Failed to send message task: {:?}", e), + Err(e) => tracing::warn!("Failed to send message task: {:?}", e), } }); } diff --git a/crates/task-impls/src/quorum_proposal/handlers.rs b/crates/task-impls/src/quorum_proposal/handlers.rs index bcdc568e18..16030837d6 100644 --- a/crates/task-impls/src/quorum_proposal/handlers.rs +++ b/crates/task-impls/src/quorum_proposal/handlers.rs @@ -9,7 +9,6 @@ use std::{marker::PhantomData, sync::Arc, time::Duration}; -use anyhow::{ensure, Context, Result}; use async_broadcast::{Receiver, Sender}; use async_compatibility_layer::art::{async_sleep, async_spawn}; use async_lock::RwLock; @@ -26,7 +25,8 @@ use hotshot_types::{ block_contents::BlockHeader, node_implementation::NodeType, signature_key::SignatureKey, }, }; -use tracing::{debug, error, instrument}; +use tracing::instrument; +use utils::result12345::*; use vbs::version::StaticVersionType; use crate::{ @@ -182,7 +182,8 @@ impl ProposalDependencyHandle { version, ) .await - .context("Failed to construct legacy block header")? + .wrap() + .context(warn!("Failed to construct legacy block header"))? } else { TYPES::BlockHeader::new_marketplace( state.as_ref(), @@ -197,7 +198,8 @@ impl ProposalDependencyHandle { version, ) .await - .context("Failed to construct marketplace block header")? + .wrap() + .context(warn!("Failed to construct marketplace block header"))? }; let proposal = QuorumProposal { @@ -218,14 +220,15 @@ impl ProposalDependencyHandle { &self.private_key, proposed_leaf.commit(&self.upgrade_lock).await.as_ref(), ) - .context("Failed to compute proposed_leaf.commit()")?; + .wrap() + .context(error!("Failed to compute proposed_leaf.commit()"))?; let message = Proposal { data: proposal, signature, _pd: PhantomData, }; - debug!( + tracing::debug!( "Sending proposal for view {:?}", proposed_leaf.view_number(), ); @@ -335,14 +338,14 @@ impl HandleDepOutput for ProposalDependencyHandle< } if commit_and_metadata.is_none() { - error!( + tracing::error!( "Somehow completed the proposal dependency task without a commitment and metadata" ); return; } if vid_share.is_none() { - error!("Somehow completed the proposal dependency task without a VID share"); + tracing::error!("Somehow completed the proposal dependency task without a VID share"); return; } @@ -362,7 +365,7 @@ impl HandleDepOutput for ProposalDependencyHandle< ) .await { - error!("Failed to publish proposal; error = {e:#}"); + tracing::error!("Failed to publish proposal; error = {e:#}"); } } } diff --git a/crates/task-impls/src/quorum_proposal/mod.rs b/crates/task-impls/src/quorum_proposal/mod.rs index 65a3685e71..2e2fa7a243 100644 --- a/crates/task-impls/src/quorum_proposal/mod.rs +++ b/crates/task-impls/src/quorum_proposal/mod.rs @@ -6,7 +6,6 @@ use std::{collections::HashMap, sync::Arc}; -use anyhow::{ensure, Result}; use async_broadcast::{Receiver, Sender}; use async_lock::RwLock; #[cfg(async_executor_impl = "async-std")] @@ -33,7 +32,8 @@ use hotshot_types::{ }; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; -use tracing::{debug, instrument, warn}; +use tracing::instrument; +use utils::result12345::*; use self::handlers::{ProposalDependency, ProposalDependencyHandle}; use crate::{ @@ -171,7 +171,9 @@ impl, V: Versions> }; let valid = event_view == view_number; if valid { - debug!("Dependency {dependency_type:?} is complete for view {event_view:?}!",); + tracing::debug!( + "Dependency {dependency_type:?} is complete for view {event_view:?}!", + ); } valid }), @@ -301,7 +303,9 @@ impl, V: Versions> "We have already proposed for this view" ); - debug!("Attempting to make dependency task for view {view_number:?} and event {event:?}"); + tracing::debug!( + "Attempting to make dependency task for view {view_number:?} and event {event:?}" + ); ensure!( !self.proposal_dependencies.contains_key(&view_number), @@ -339,9 +343,10 @@ impl, V: Versions> #[instrument(skip_all, fields(id = self.id, latest_proposed_view = *self.latest_proposed_view), name = "Update latest proposed view", level = "error")] async fn update_latest_proposed_view(&mut self, new_view: TYPES::View) -> bool { if *self.latest_proposed_view < *new_view { - debug!( + tracing::debug!( "Updating latest proposed view from {} to {}", - *self.latest_proposed_view, *new_view + *self.latest_proposed_view, + *new_view ); // Cancel the old dependency tasks. @@ -369,14 +374,14 @@ impl, V: Versions> ) -> Result<()> { match event.as_ref() { HotShotEvent::UpgradeCertificateFormed(cert) => { - debug!( + tracing::debug!( "Upgrade certificate received for view {}!", *cert.view_number ); // Update our current upgrade_cert as long as we still have a chance of reaching a decide on it in time. if cert.data.decide_by >= self.latest_proposed_view + 3 { - debug!("Updating current formed_upgrade_certificate"); + tracing::debug!("Updating current formed_upgrade_certificate"); self.formed_upgrade_certificate = Some(cert.clone()); } @@ -438,7 +443,7 @@ impl, V: Versions> &self.upgrade_lock ) .await, - format!( + error!( "View Sync Finalize certificate {:?} was invalid", certificate.data() ) @@ -493,14 +498,23 @@ impl, V: Versions> } HotShotEvent::UpdateHighQc(qc) => { // First update the high QC internally - self.consensus.write().await.update_high_qc(qc.clone())?; + self.consensus + .write() + .await + .update_high_qc(qc.clone()) + .wrap() + .context(error!( + "Failed to update high QC in internal consensus state!" + ))?; // Then update the high QC in storage self.storage .write() .await .update_high_qc(qc.clone()) - .await?; + .await + .wrap() + .context(error!("Failed to update high QC in storage!"))?; broadcast_event( HotShotEvent::HighQcUpdated(qc.clone()).into(), diff --git a/crates/task-impls/src/quorum_proposal_recv/handlers.rs b/crates/task-impls/src/quorum_proposal_recv/handlers.rs index 2193bf3dfd..cb7f7619f3 100644 --- a/crates/task-impls/src/quorum_proposal_recv/handlers.rs +++ b/crates/task-impls/src/quorum_proposal_recv/handlers.rs @@ -8,7 +8,6 @@ use std::sync::Arc; -use anyhow::{bail, Context, Result}; use async_broadcast::{broadcast, Receiver, Sender}; use async_lock::RwLockUpgradableReadGuard; use committable::Committable; @@ -26,7 +25,8 @@ use hotshot_types::{ utils::{View, ViewInner}, vote::{Certificate, HasViewNumber}, }; -use tracing::{debug, error, instrument, warn}; +use tracing::instrument; +use utils::result12345::*; use super::QuorumProposalRecvTaskState; use crate::{ @@ -78,7 +78,7 @@ async fn validate_proposal_liveness(view_number, event_sender, task_state).await { - debug!("Liveness Branch - Failed to update view; error = {e:#}"); + tracing::debug!("Liveness Branch - Failed to update view; error = {e:#}"); } if !liveness_check { @@ -128,7 +128,7 @@ pub(crate) async fn handle_quorum_proposal_recv< validate_proposal_view_and_certs(proposal, task_state) .await - .context("Failed to validate proposal view or attached certs")?; + .context(warn!("Failed to validate proposal view or attached certs"))?; let view_number = proposal.data.view_number(); let justify_qc = proposal.data.justify_qc.clone(); @@ -220,7 +220,7 @@ pub(crate) async fn handle_quorum_proposal_recv< .await; let Some((parent_leaf, _parent_state)) = parent else { - warn!( + tracing::warn!( "Proposal's parent missing from storage with commitment: {:?}", justify_qc.data.leaf_commit ); @@ -239,7 +239,7 @@ pub(crate) async fn handle_quorum_proposal_recv< // NOTE: We could update our view with a valid TC but invalid QC, but that is not what we do here if let Err(e) = update_view::(view_number, event_sender, task_state).await { - debug!("Full Branch - Failed to update view; error = {e:#}"); + tracing::debug!("Full Branch - Failed to update view; error = {e:#}"); } Ok(()) diff --git a/crates/task-impls/src/quorum_proposal_recv/mod.rs b/crates/task-impls/src/quorum_proposal_recv/mod.rs index 281e472fd0..22ef5bf2a3 100644 --- a/crates/task-impls/src/quorum_proposal_recv/mod.rs +++ b/crates/task-impls/src/quorum_proposal_recv/mod.rs @@ -8,7 +8,6 @@ use std::{collections::BTreeMap, sync::Arc}; -use anyhow::{bail, Result}; use async_broadcast::{broadcast, Receiver, Sender}; use async_lock::RwLock; #[cfg(async_executor_impl = "async-std")] @@ -31,6 +30,7 @@ use hotshot_types::{ #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; use tracing::{debug, error, info, instrument, warn}; +use utils::result12345::{bail, Result}; use vbs::version::Version; use self::handlers::handle_quorum_proposal_recv; diff --git a/crates/task-impls/src/quorum_vote/handlers.rs b/crates/task-impls/src/quorum_vote/handlers.rs index d414573e19..8873953a7b 100644 --- a/crates/task-impls/src/quorum_vote/handlers.rs +++ b/crates/task-impls/src/quorum_vote/handlers.rs @@ -6,7 +6,6 @@ use std::sync::Arc; -use anyhow::Result; use async_broadcast::Sender; use chrono::Utc; use hotshot_types::{ @@ -19,7 +18,8 @@ use hotshot_types::{ }, vote::HasViewNumber, }; -use tracing::{debug, instrument}; +use tracing::instrument; +use utils::result12345::*; use super::QuorumVoteTaskState; use crate::{ @@ -115,7 +115,7 @@ pub(crate) async fn handle_quorum_proposal_validated< .number_of_views_per_decide_event .add_point(cur_number_of_views_per_decide_event as f64); - debug!( + tracing::debug!( "Sending Decide for view {:?}", consensus_writer.last_decided_view() ); @@ -139,7 +139,7 @@ pub(crate) async fn handle_quorum_proposal_validated< .await; broadcast_event(Arc::new(HotShotEvent::LeafDecided(leaves_decided)), sender).await; - debug!("Successfully sent decide event"); + tracing::debug!("Successfully sent decide event"); } Ok(()) diff --git a/crates/task-impls/src/quorum_vote/mod.rs b/crates/task-impls/src/quorum_vote/mod.rs index eaed877eff..0b8468bfba 100644 --- a/crates/task-impls/src/quorum_vote/mod.rs +++ b/crates/task-impls/src/quorum_vote/mod.rs @@ -6,7 +6,6 @@ use std::{collections::HashMap, sync::Arc}; -use anyhow::{bail, ensure, Context, Result}; use async_broadcast::{Receiver, Sender}; use async_lock::RwLock; #[cfg(async_executor_impl = "async-std")] @@ -38,7 +37,8 @@ use hotshot_types::{ use jf_vid::VidScheme; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; -use tracing::{debug, error, instrument, trace, warn}; +use tracing::instrument; +use utils::result12345::*; use crate::{ events::HotShotEvent, @@ -123,7 +123,7 @@ impl + 'static, V: Versions> .await .ok(), }; - let parent = maybe_parent.context(format!( + let parent = maybe_parent.context(info!( "Proposal's parent missing from storage with commitment: {:?}, proposal view {:?}", justify_qc.data().leaf_commit, proposed_leaf.view_number(), @@ -147,7 +147,8 @@ impl + 'static, V: Versions> version, ) .await - .context("Block header doesn't extend the proposal!")?; + .wrap() + .context(warn!("Block header doesn't extend the proposal!"))?; let state = Arc::new(validated_state); let delta = Arc::new(state_delta); @@ -189,7 +190,9 @@ impl + 'static, V: Versions> .write() .await .update_undecided_state(new_leaves, new_state) - .await?; + .await + .wrap() + .context(error!("Failed to update undecided state"))?; Ok(()) } @@ -204,10 +207,10 @@ impl + 'static, V: Versions> ensure!( self.quorum_membership .has_stake(&self.public_key, self.epoch_number), - format!( + info!( "We were not chosen for quorum committee on {:?}", self.view_number - ), + ) ); // Create and send the vote. @@ -221,8 +224,9 @@ impl + 'static, V: Versions> &self.upgrade_lock, ) .await - .context("Failed to sign vote")?; - debug!( + .wrap() + .context(error!("Failed to sign vote. This should never happen."))?; + tracing::debug!( "sending vote to next quorum leader {:?}", vote.view_number() + 1 ); @@ -232,7 +236,8 @@ impl + 'static, V: Versions> .await .append_vid(&vid_share) .await - .context("Failed to store VID share")?; + .wrap() + .context(error!("Failed to store VID share"))?; broadcast_event(Arc::new(HotShotEvent::QuorumVoteSend(vote)), &self.sender).await; Ok(()) @@ -283,7 +288,7 @@ impl + 'static, V: Versions> Handl let proposal_payload_comm = proposal.block_header.payload_commitment(); if let Some(comm) = payload_commitment { if proposal_payload_comm != comm { - error!("Quorum proposal has inconsistent payload commitment with DAC or VID."); + tracing::error!("Quorum proposal has inconsistent payload commitment with DAC or VID."); return; } } else { @@ -292,7 +297,7 @@ impl + 'static, V: Versions> Handl let parent_commitment = parent_leaf.commit(&self.upgrade_lock).await; let proposed_leaf = Leaf::from_quorum_proposal(proposal); if proposed_leaf.parent_commitment() != parent_commitment { - warn!("Proposed leaf parent commitment does not match parent leaf payload commitment. Aborting vote."); + tracing::warn!("Proposed leaf parent commitment does not match parent leaf payload commitment. Aborting vote."); return; } leaf = Some(proposed_leaf); @@ -301,7 +306,7 @@ impl + 'static, V: Versions> Handl let cert_payload_comm = cert.data().payload_commit; if let Some(comm) = payload_commitment { if cert_payload_comm != comm { - error!("DAC has inconsistent payload commitment with quorum proposal or VID."); + tracing::error!("DAC has inconsistent payload commitment with quorum proposal or VID."); return; } } else { @@ -313,7 +318,7 @@ impl + 'static, V: Versions> Handl vid_share = Some(share.clone()); if let Some(comm) = payload_commitment { if vid_payload_commitment != comm { - error!("VID has inconsistent payload commitment with quorum proposal or DAC."); + tracing::error!("VID has inconsistent payload commitment with quorum proposal or DAC."); return; } } else { @@ -332,7 +337,7 @@ impl + 'static, V: Versions> Handl .await; let Some(vid_share) = vid_share else { - error!( + tracing::error!( "We don't have the VID share for this view {:?}, but we should, because the vote dependencies have completed.", self.view_number ); @@ -340,7 +345,7 @@ impl + 'static, V: Versions> Handl }; let Some(leaf) = leaf else { - error!( + tracing::error!( "We don't have the leaf for this view {:?}, but we should, because the vote dependencies have completed.", self.view_number ); @@ -349,12 +354,12 @@ impl + 'static, V: Versions> Handl // Update internal state if let Err(e) = self.update_shared_state(&leaf, &vid_share).await { - error!("Failed to update shared consensus state; error = {e:#}"); + tracing::error!("Failed to update shared consensus state; error = {e:#}"); return; } if let Err(e) = self.submit_vote(leaf, vid_share).await { - debug!("Failed to vote; error = {e:#}"); + tracing::debug!("Failed to vote; error = {e:#}"); } } } @@ -440,7 +445,7 @@ impl, V: Versions> QuorumVoteTaskS } }; if event_view == view_number { - trace!("Vote dependency {:?} completed", dependency_type); + tracing::trace!("Vote dependency {:?} completed", dependency_type); return true; } false @@ -512,16 +517,17 @@ impl, V: Versions> QuorumVoteTaskS #[instrument(skip_all, fields(id = self.id, latest_voted_view = *self.latest_voted_view), name = "Quorum vote update latest voted view", level = "error")] async fn update_latest_voted_view(&mut self, new_view: TYPES::View) -> bool { if *self.latest_voted_view < *new_view { - debug!( + tracing::debug!( "Updating next vote view from {} to {} in the quorum vote task", - *self.latest_voted_view, *new_view + *self.latest_voted_view, + *new_view ); // Cancel the old dependency tasks. for view in *self.latest_voted_view..(*new_view) { if let Some(dependency) = self.vote_dependencies.remove(&TYPES::View::new(view)) { cancel_task(dependency).await; - debug!("Vote dependency removed for view {:?}", view); + tracing::debug!("Vote dependency removed for view {:?}", view); } } @@ -544,13 +550,15 @@ impl, V: Versions> QuorumVoteTaskS match event.as_ref() { HotShotEvent::QuorumProposalValidated(proposal, _leaf) => { - trace!("Received Proposal for view {}", *proposal.view_number()); + tracing::trace!("Received Proposal for view {}", *proposal.view_number()); // Handle the event before creating the dependency task. if let Err(e) = handle_quorum_proposal_validated(proposal, &event_sender, self).await { - debug!("Failed to handle QuorumProposalValidated event; error = {e:#}"); + tracing::debug!( + "Failed to handle QuorumProposalValidated event; error = {e:#}" + ); } self.create_dependency_task_if_new( @@ -564,7 +572,7 @@ impl, V: Versions> QuorumVoteTaskS HotShotEvent::DaCertificateRecv(cert) => { let view = cert.view_number; - trace!("Received DAC for view {}", *view); + tracing::trace!("Received DAC for view {}", *view); // Do nothing if the DAC is old ensure!( view > self.latest_voted_view, @@ -605,7 +613,7 @@ impl, V: Versions> QuorumVoteTaskS HotShotEvent::VidShareRecv(sender, disperse) => { let view = disperse.data.view_number(); // Do nothing if the VID share is old - trace!("Received VID share for view {}", *view); + tracing::trace!("Received VID share for view {}", *view); ensure!( view > self.latest_voted_view, "Received VID share for an older view." @@ -667,9 +675,9 @@ impl, V: Versions> QuorumVoteTaskS ); } HotShotEvent::QuorumVoteDependenciesValidated(view_number) => { - debug!("All vote dependencies verified for view {:?}", view_number); + tracing::debug!("All vote dependencies verified for view {:?}", view_number); if !self.update_latest_voted_view(*view_number).await { - debug!("view not updated"); + tracing::debug!("view not updated"); } } _ => {} diff --git a/crates/task-impls/src/request.rs b/crates/task-impls/src/request.rs index 8cd336e7b1..4f4eca3875 100644 --- a/crates/task-impls/src/request.rs +++ b/crates/task-impls/src/request.rs @@ -13,7 +13,6 @@ use std::{ time::Duration, }; -use anyhow::Result; use async_broadcast::{Receiver, Sender}; use async_compatibility_layer::art::{async_sleep, async_spawn, async_timeout}; #[cfg(async_executor_impl = "async-std")] @@ -38,6 +37,7 @@ use sha2::{Digest, Sha256}; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; use tracing::instrument; +use utils::result12345::Result; use crate::{events::HotShotEvent, helpers::broadcast_event}; diff --git a/crates/task-impls/src/rewind.rs b/crates/task-impls/src/rewind.rs index 669b410b52..bddce9e2e5 100644 --- a/crates/task-impls/src/rewind.rs +++ b/crates/task-impls/src/rewind.rs @@ -6,11 +6,11 @@ use std::{fs::OpenOptions, io::Write, sync::Arc}; -use anyhow::Result; use async_broadcast::{Receiver, Sender}; use async_trait::async_trait; use hotshot_task::task::TaskState; use hotshot_types::traits::node_implementation::NodeType; +use utils::result12345::Result; use crate::events::HotShotEvent; diff --git a/crates/task-impls/src/transactions.rs b/crates/task-impls/src/transactions.rs index 02aa9136c5..027724ef93 100644 --- a/crates/task-impls/src/transactions.rs +++ b/crates/task-impls/src/transactions.rs @@ -9,7 +9,6 @@ use std::{ time::{Duration, Instant}, }; -use anyhow::{bail, ensure, Context, Result}; use async_broadcast::{Receiver, Sender}; use async_compatibility_layer::art::{async_sleep, async_timeout}; use async_trait::async_trait; @@ -32,8 +31,9 @@ use hotshot_types::{ utils::ViewInner, vid::{VidCommitment, VidPrecomputeData}, }; -use tracing::{debug, error, info, instrument, warn}; +use tracing::instrument; use url::Url; +use utils::result12345::*; use vbs::version::{StaticVersionType, Version}; use vec1::Vec1; @@ -149,7 +149,7 @@ impl, V: Versions> TransactionTask let version = match self.upgrade_lock.version(block_view).await { Ok(v) => v, Err(err) => { - error!("Upgrade certificate requires unsupported version, refusing to request blocks: {}", err); + tracing::error!("Upgrade certificate requires unsupported version, refusing to request blocks: {}", err); return None; } }; @@ -191,7 +191,7 @@ impl, V: Versions> TransactionTask .await; } else { // If we couldn't get a block, send an empty block - info!( + tracing::info!( "Failed to get a block for view {:?}, proposing empty block", block_view ); @@ -209,7 +209,7 @@ impl, V: Versions> TransactionTask self.membership.total_nodes(self.cur_epoch), version, ) else { - error!("Failed to get null fee"); + tracing::error!("Failed to get null fee"); return None; }; @@ -254,13 +254,14 @@ impl, V: Versions> TransactionTask .await .as_ref() .is_some_and(|cert| cert.upgrading_in(block_view)), - "Not requesting block because we are upgrading", + info!("Not requesting block because we are upgrading") ); let (parent_view, parent_hash) = self .last_vid_commitment_retry(block_view, task_start_time) .await - .context("Failed to find parent hash in time")?; + .wrap() + .context(warn!("Failed to find parent hash in time"))?; let start = Instant::now(); @@ -270,10 +271,11 @@ impl, V: Versions> TransactionTask .fetch_auction_result(block_view), ) .await - .context("Timeout while getting auction result")?; + .wrap() + .context(warn!("Timeout while getting auction result"))?; let auction_result = maybe_auction_result - .map_err(|e| warn!("Failed to get auction results: {e:#}")) + .map_err(|e| tracing::warn!("Failed to get auction results: {e:#}")) .unwrap_or_default(); // We continue here, as we still have fallback builder URL let mut futures = Vec::new(); @@ -319,13 +321,16 @@ impl, V: Versions> TransactionTask let validated_state = self.consensus.read().await.decided_state(); let sequencing_fees = Vec1::try_from_vec(sequencing_fees) - .context("Failed to receive a bundle from any builder.")?; + .wrap() + .context(warn!("Failed to receive a bundle from any builder."))?; let (block_payload, metadata) = TYPES::BlockPayload::from_transactions( transactions, &validated_state, &Arc::clone(&self.instance_state), ) - .await?; + .await + .wrap() + .context(error!("Failed to construct block payload"))?; Ok(PackedBundle::new( block_payload.encode(), @@ -348,7 +353,7 @@ impl, V: Versions> TransactionTask self.membership.total_nodes(self.cur_epoch), version, ) else { - error!("Failed to calculate null block fee."); + tracing::error!("Failed to calculate null block fee."); return None; }; @@ -379,7 +384,7 @@ impl, V: Versions> TransactionTask let version = match self.upgrade_lock.version(block_view).await { Ok(v) => v, Err(err) => { - error!("Upgrade certificate requires unsupported version, refusing to request blocks: {}", err); + tracing::error!("Upgrade certificate requires unsupported version, refusing to request blocks: {}", err); return None; } }; @@ -442,12 +447,17 @@ impl, V: Versions> TransactionTask HotShotEvent::ViewChange(view) => { let view = *view; - debug!("view change in transactions to view {:?}", view); - ensure!(*view > *self.cur_view || *self.cur_view == 0, format!("Received a view change to an older view: tried to change view to {:?} though we are at view {:?}", view, self.cur_view )); + tracing::debug!("view change in transactions to view {:?}", view); + ensure!( + *view > *self.cur_view || *self.cur_view == 0, + debug!( + "Received a view change to an older view: tried to change view to {:?} though we are at view {:?}", view, self.cur_view + ) + ); let mut make_block = false; if *view - *self.cur_view > 1 { - info!("View changed by more than 1 going to view {:?}", view); + tracing::info!("View changed by more than 1 going to view {:?}", view); make_block = self.membership.leader(view, self.cur_epoch)? == self.public_key; } self.cur_view = view; @@ -458,7 +468,7 @@ impl, V: Versions> TransactionTask ensure!( make_block || next_leader, - format!( + debug!( "Not making the block because we are not leader for view {:?}", self.cur_view ) @@ -512,7 +522,9 @@ impl, V: Versions> TransactionTask let view_data = consensus .validated_state_map() .get(&target_view) - .context("Missing record for view {?target_view} in validated state")?; + .context(info!( + "Missing record for view {?target_view} in validated state" + ))?; match view_data.view_inner { ViewInner::Da { payload_commitment } => { @@ -523,13 +535,13 @@ impl, V: Versions> TransactionTask .. } => { let leaf = consensus.saved_leaves().get(&leaf_commitment).context - ("Missing leaf with commitment {leaf_commitment} for view {target_view} in saved_leaves")?; + (info!("Missing leaf with commitment {leaf_commitment} for view {target_view} in saved_leaves"))?; return Ok((target_view, leaf.payload_commitment())); } ViewInner::Failed => { // For failed views, backtrack target_view = - TYPES::View::new(target_view.checked_sub(1).context("Reached genesis")?); + TYPES::View::new(target_view.checked_sub(1).context(warn!("Reached genesis. Something is wrong -- have we not decided any blocks since genesis?"))?); continue; } } @@ -558,7 +570,7 @@ impl, V: Versions> TransactionTask ) { Ok(sig) => sig, Err(err) => { - error!(%err, "Failed to sign block hash"); + tracing::error!(%err, "Failed to sign block hash"); return None; } }; @@ -586,7 +598,7 @@ impl, V: Versions> TransactionTask // We timed out while getting available blocks Err(err) => { - info!(%err, "Timeout while getting available blocks"); + tracing::info!(%err, "Timeout while getting available blocks"); return None; } } @@ -672,7 +684,7 @@ impl, V: Versions> TransactionTask parent_comm: VidCommitment, view_number: TYPES::View, parent_comm_sig: &<::SignatureKey as SignatureKey>::PureAssembledSignatureType, - ) -> anyhow::Result> { + ) -> Result> { let mut available_blocks = self .get_available_blocks(parent_comm, view_number, parent_comm_sig) .await; diff --git a/crates/task-impls/src/upgrade.rs b/crates/task-impls/src/upgrade.rs index 570a5d4212..410274b1e5 100644 --- a/crates/task-impls/src/upgrade.rs +++ b/crates/task-impls/src/upgrade.rs @@ -6,7 +6,6 @@ use std::{marker::PhantomData, sync::Arc, time::SystemTime}; -use anyhow::{ensure, Context, Result}; use async_broadcast::{Receiver, Sender}; use async_trait::async_trait; use committable::Committable; @@ -28,7 +27,8 @@ use hotshot_types::{ }, vote::HasViewNumber, }; -use tracing::{debug, error, info, instrument, warn}; +use tracing::instrument; +use utils::result12345::*; use vbs::version::StaticVersionType; use crate::{ @@ -112,19 +112,22 @@ impl, V: Versions> UpgradeTaskStat ) -> Result<()> { match event.as_ref() { HotShotEvent::UpgradeProposalRecv(proposal, sender) => { - info!("Received upgrade proposal: {:?}", proposal); + tracing::info!("Received upgrade proposal: {:?}", proposal); let view = *proposal.data.view_number(); // Skip voting if the version has already been upgraded. ensure!( !self.upgraded().await, - format!("Already upgraded to {:?}; not voting.", V::Upgrade::VERSION) + info!("Already upgraded to {:?}; not voting.", V::Upgrade::VERSION) ); let time = SystemTime::now() .duration_since(SystemTime::UNIX_EPOCH) - .context("Failed to calculate duration")? + .wrap() + .context(error!( + "Failed to calculate duration. This should never happen." + ))? .as_secs(); ensure!( @@ -146,7 +149,7 @@ impl, V: Versions> UpgradeTaskStat ); // If we have an upgrade target, we validate that the proposal is relevant for the current view. - info!( + tracing::info!( "Upgrade proposal received for view: {:?}", proposal.data.view_number() ); @@ -170,7 +173,7 @@ impl, V: Versions> UpgradeTaskStat // TODO Come back to this - we probably don't need this, but we should also never receive a UpgradeCertificate where this fails, investigate block ready so it doesn't make one for the genesis block ensure!( self.cur_view != TYPES::View::genesis() && *view >= self.cur_view.saturating_sub(1), - format!( + warn!( "Discarding old upgrade proposal; the proposal is for view {:?}, but the current view is {:?}.", view, self.cur_view @@ -181,7 +184,7 @@ impl, V: Versions> UpgradeTaskStat let view_leader_key = self.quorum_membership.leader(view, self.cur_epoch)?; ensure!( view_leader_key == *sender, - format!( + info!( "Upgrade proposal doesn't have expected leader key for view {} \n Upgrade proposal is: {:?}", *view, proposal.data.clone() ) ); @@ -212,18 +215,18 @@ impl, V: Versions> UpgradeTaskStat ) .await?; - debug!("Sending upgrade vote {:?}", vote.view_number()); + tracing::debug!("Sending upgrade vote {:?}", vote.view_number()); broadcast_event(Arc::new(HotShotEvent::UpgradeVoteSend(vote)), &tx).await; } HotShotEvent::UpgradeVoteRecv(ref vote) => { - debug!("Upgrade vote recv, Main Task {:?}", vote.view_number()); + tracing::debug!("Upgrade vote recv, Main Task {:?}", vote.view_number()); // Check if we are the leader. { let view = vote.view_number(); ensure!( self.quorum_membership.leader(view, self.cur_epoch)? == self.public_key, - format!( + debug!( "We are not the leader for view {} are we leader for next view? {}", *view, self.quorum_membership.leader(view + 1, self.cur_epoch)? @@ -253,7 +256,10 @@ impl, V: Versions> UpgradeTaskStat let view: u64 = *self.cur_view; let time = SystemTime::now() .duration_since(SystemTime::UNIX_EPOCH) - .context("Failed to calculate duration")? + .wrap() + .context(error!( + "Failed to calculate duration. This should never happen." + ))? .as_secs(); // We try to form a certificate 5 views before we're leader. @@ -287,7 +293,7 @@ impl, V: Versions> UpgradeTaskStat ) .expect("Failed to sign upgrade proposal commitment!"); - warn!("Sending upgrade proposal:\n\n {:?}", upgrade_proposal); + tracing::warn!("Sending upgrade proposal:\n\n {:?}", upgrade_proposal); let message = Proposal { data: upgrade_proposal, @@ -305,9 +311,6 @@ impl, V: Versions> UpgradeTaskStat .await; } } - HotShotEvent::Shutdown => { - error!("Shutting down because of shutdown signal!"); - } _ => {} } Ok(()) diff --git a/crates/task-impls/src/vid.rs b/crates/task-impls/src/vid.rs index 106203bd07..b81ec7760c 100644 --- a/crates/task-impls/src/vid.rs +++ b/crates/task-impls/src/vid.rs @@ -6,7 +6,6 @@ use std::{marker::PhantomData, sync::Arc}; -use anyhow::Result; use async_broadcast::{Receiver, Sender}; use async_trait::async_trait; use hotshot_task::task::TaskState; @@ -21,6 +20,7 @@ use hotshot_types::{ }, }; use tracing::{debug, error, info, instrument}; +use utils::result12345::Result; use crate::{ events::{HotShotEvent, HotShotTaskCompleted}, diff --git a/crates/task-impls/src/view_sync.rs b/crates/task-impls/src/view_sync.rs index a05f50d64b..1db1032a5e 100644 --- a/crates/task-impls/src/view_sync.rs +++ b/crates/task-impls/src/view_sync.rs @@ -4,7 +4,6 @@ // You should have received a copy of the MIT License // along with the HotShot repository. If not, see . -#![allow(clippy::module_name_repetitions)] use std::{ collections::{BTreeMap, HashMap}, fmt::Debug, @@ -12,7 +11,6 @@ use std::{ time::Duration, }; -use anyhow::{ensure, Result}; use async_broadcast::{Receiver, Sender}; use async_compatibility_layer::art::{async_sleep, async_spawn}; use async_lock::RwLock; @@ -38,7 +36,8 @@ use hotshot_types::{ }; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; -use tracing::{debug, error, info, instrument, warn}; +use tracing::instrument; +use utils::result12345::*; use crate::{ events::{HotShotEvent, HotShotTaskCompleted}, @@ -197,7 +196,7 @@ impl, V: Versions> ViewSyncTaskSta // This certificate is old, we can throw it away // If next view = cert round, then that means we should already have a task running for it if self.current_view > view { - debug!("Already in a higher view than the view sync message"); + tracing::debug!("Already in a higher view than the view sync message"); return; } @@ -205,7 +204,7 @@ impl, V: Versions> ViewSyncTaskSta if let Some(replica_task) = task_map.get_mut(&view) { // Forward event then return - debug!("Forwarding message"); + tracing::debug!("Forwarding message"); let result = replica_task .handle(Arc::clone(&event), sender.clone()) .await; @@ -259,25 +258,25 @@ impl, V: Versions> ViewSyncTaskSta ) -> Result<()> { match event.as_ref() { HotShotEvent::ViewSyncPreCommitCertificate2Recv(certificate) => { - debug!("Received view sync cert for phase {:?}", certificate); + tracing::debug!("Received view sync cert for phase {:?}", certificate); let view = certificate.view_number(); self.send_to_or_create_replica(event, view, &event_stream) .await; } HotShotEvent::ViewSyncCommitCertificate2Recv(certificate) => { - debug!("Received view sync cert for phase {:?}", certificate); + tracing::debug!("Received view sync cert for phase {:?}", certificate); let view = certificate.view_number(); self.send_to_or_create_replica(event, view, &event_stream) .await; } HotShotEvent::ViewSyncFinalizeCertificate2Recv(certificate) => { - debug!("Received view sync cert for phase {:?}", certificate); + tracing::debug!("Received view sync cert for phase {:?}", certificate); let view = certificate.view_number(); self.send_to_or_create_replica(event, view, &event_stream) .await; } HotShotEvent::ViewSyncTimeout(view, _, _) => { - debug!("view sync timeout in main task {:?}", view); + tracing::debug!("view sync timeout in main task {:?}", view); let view = *view; self.send_to_or_create_replica(event, view, &event_stream) .await; @@ -289,7 +288,7 @@ impl, V: Versions> ViewSyncTaskSta let relay = vote.date().relay; let relay_map = map.entry(vote_view).or_insert(BTreeMap::new()); if let Some(relay_task) = relay_map.get_mut(&relay) { - debug!("Forwarding message"); + tracing::debug!("Forwarding message"); // Handle the vote and check if the accumulator has returned successfully if relay_task @@ -331,7 +330,7 @@ impl, V: Versions> ViewSyncTaskSta let relay = vote.date().relay; let relay_map = map.entry(vote_view).or_insert(BTreeMap::new()); if let Some(relay_task) = relay_map.get_mut(&relay) { - debug!("Forwarding message"); + tracing::debug!("Forwarding message"); // Handle the vote and check if the accumulator has returned successfully if relay_task @@ -373,7 +372,7 @@ impl, V: Versions> ViewSyncTaskSta let relay = vote.date().relay; let relay_map = map.entry(vote_view).or_insert(BTreeMap::new()); if let Some(relay_task) = relay_map.get_mut(&relay) { - debug!("Forwarding message"); + tracing::debug!("Forwarding message"); // Handle the vote and check if the accumulator has returned successfully if relay_task @@ -413,9 +412,10 @@ impl, V: Versions> ViewSyncTaskSta &HotShotEvent::ViewChange(new_view) => { let new_view = TYPES::View::new(*new_view); if self.current_view < new_view { - debug!( + tracing::debug!( "Change from view {} to view {} in view sync task", - *self.current_view, *new_view + *self.current_view, + *new_view ); self.current_view = new_view; @@ -457,7 +457,7 @@ impl, V: Versions> ViewSyncTaskSta self.num_timeouts_tracked += 1; let leader = self.membership.leader(view_number, self.current_epoch)?; - error!( + tracing::error!( %leader, leader_mnemonic = cdn_proto::util::mnemonic(&leader), view_number = *view_number, @@ -466,11 +466,11 @@ impl, V: Versions> ViewSyncTaskSta ); if self.num_timeouts_tracked >= 3 { - error!("Too many consecutive timeouts! This shouldn't happen"); + tracing::error!("Too many consecutive timeouts! This shouldn't happen"); } if self.num_timeouts_tracked >= 2 { - error!("Starting view sync protocol for view {}", *view_number + 1); + tracing::error!("Starting view sync protocol for view {}", *view_number + 1); self.send_to_or_create_replica( Arc::new(HotShotEvent::ViewSyncTrigger(view_number + 1)), @@ -513,7 +513,7 @@ impl, V: Versions> // Ignore certificate if it is for an older round if certificate.view_number() < self.next_view { - warn!("We're already in a higher round"); + tracing::warn!("We're already in a higher round"); return None; } @@ -527,7 +527,7 @@ impl, V: Versions> ) .await { - error!("Not valid view sync cert! {:?}", certificate.data()); + tracing::error!("Not valid view sync cert! {:?}", certificate.data()); return None; } @@ -554,7 +554,7 @@ impl, V: Versions> ) .await else { - error!("Failed to sign ViewSyncCommitData!"); + tracing::error!("Failed to sign ViewSyncCommitData!"); return None; }; let message = GeneralConsensusMessage::::ViewSyncCommitVote(vote); @@ -579,7 +579,7 @@ impl, V: Versions> let timeout = self.view_sync_timeout; async move { async_sleep(timeout).await; - warn!("Vote sending timed out in ViewSyncPreCommitCertificateRecv, Relay = {}", relay); + tracing::warn!("Vote sending timed out in ViewSyncPreCommitCertificateRecv, Relay = {}", relay); broadcast_event( Arc::new(HotShotEvent::ViewSyncTimeout( @@ -599,7 +599,7 @@ impl, V: Versions> // Ignore certificate if it is for an older round if certificate.view_number() < self.next_view { - warn!("We're already in a higher round"); + tracing::warn!("We're already in a higher round"); return None; } @@ -613,7 +613,7 @@ impl, V: Versions> ) .await { - error!("Not valid view sync cert! {:?}", certificate.data()); + tracing::error!("Not valid view sync cert! {:?}", certificate.data()); return None; } @@ -640,7 +640,7 @@ impl, V: Versions> ) .await else { - error!("Failed to sign view sync finalized vote!"); + tracing::error!("Failed to sign view sync finalized vote!"); return None; }; let message = GeneralConsensusMessage::::ViewSyncFinalizeVote(vote); @@ -653,7 +653,7 @@ impl, V: Versions> .await; } - info!( + tracing::info!( "View sync protocol has received view sync evidence to update the view to {}", *self.next_view ); @@ -675,7 +675,7 @@ impl, V: Versions> let timeout = self.view_sync_timeout; async move { async_sleep(timeout).await; - warn!( + tracing::warn!( "Vote sending timed out in ViewSyncCommitCertificateRecv, relay = {}", relay ); @@ -695,7 +695,7 @@ impl, V: Versions> HotShotEvent::ViewSyncFinalizeCertificate2Recv(certificate) => { // Ignore certificate if it is for an older round if certificate.view_number() < self.next_view { - warn!("We're already in a higher round"); + tracing::warn!("We're already in a higher round"); return None; } @@ -709,7 +709,7 @@ impl, V: Versions> ) .await { - error!("Not valid view sync cert! {:?}", certificate.data()); + tracing::error!("Not valid view sync cert! {:?}", certificate.data()); return None; } @@ -739,7 +739,7 @@ impl, V: Versions> HotShotEvent::ViewSyncTrigger(view_number) => { let view_number = *view_number; if self.next_view != TYPES::View::new(*view_number) { - error!("Unexpected view number to triger view sync"); + tracing::error!("Unexpected view number to triger view sync"); return None; } @@ -755,7 +755,7 @@ impl, V: Versions> ) .await else { - error!("Failed to sign pre commit vote!"); + tracing::error!("Failed to sign pre commit vote!"); return None; }; let message = GeneralConsensusMessage::::ViewSyncPreCommitVote(vote); @@ -775,7 +775,7 @@ impl, V: Versions> let timeout = self.view_sync_timeout; async move { async_sleep(timeout).await; - warn!("Vote sending timed out in ViewSyncTrigger"); + tracing::warn!("Vote sending timed out in ViewSyncTrigger"); broadcast_event( Arc::new(HotShotEvent::ViewSyncTimeout( TYPES::View::new(*next_view), @@ -813,7 +813,7 @@ impl, V: Versions> ) .await else { - error!("Failed to sign ViewSyncPreCommitData!"); + tracing::error!("Failed to sign ViewSyncPreCommitData!"); return None; }; let message = @@ -841,7 +841,7 @@ impl, V: Versions> let last_cert = last_seen_certificate.clone(); async move { async_sleep(timeout).await; - warn!( + tracing::warn!( "Vote sending timed out in ViewSyncTimeout relay = {}", relay ); diff --git a/crates/task-impls/src/vote_collection.rs b/crates/task-impls/src/vote_collection.rs index 62d2630be1..7fb897b82a 100644 --- a/crates/task-impls/src/vote_collection.rs +++ b/crates/task-impls/src/vote_collection.rs @@ -11,7 +11,6 @@ use std::{ sync::Arc, }; -use anyhow::{ensure, Context, Result}; use async_broadcast::Sender; use async_trait::async_trait; use either::Either::{self, Left, Right}; @@ -31,7 +30,7 @@ use hotshot_types::{ }, vote::{Certificate, HasViewNumber, Vote, VoteAccumulator}, }; -use tracing::debug; +use utils::result12345::*; use crate::{events::HotShotEvent, helpers::broadcast_event}; @@ -110,17 +109,16 @@ impl< ); ensure!( vote.view_number() == self.view, - format!( - "Vote view does not match! vote view is {} current view is {}", + error!( + "Vote view does not match! vote view is {} current view is {}. This vote should not have been passed to this accumulator.", *vote.view_number(), *self.view ) ); - let accumulator = self - .accumulator - .as_mut() - .context("No accumulator to handle vote with.")?; + let accumulator = self.accumulator.as_mut().context(warn!( + "No accumulator to handle vote with. This shouldn't happen." + ))?; match accumulator .accumulate(vote, &self.membership, self.epoch) @@ -128,7 +126,7 @@ impl< { Either::Left(()) => Ok(None), Either::Right(cert) => { - debug!("Certificate Formed! {:?}", cert); + tracing::debug!("Certificate Formed! {:?}", cert); broadcast_event( Arc::new(VOTE::make_cert_event(cert.clone(), &self.public_key)), @@ -254,7 +252,7 @@ where { match collectors.entry(vote.view_number()) { Entry::Vacant(entry) => { - debug!("Starting vote handle for view {:?}", vote.view_number()); + tracing::debug!("Starting vote handle for view {:?}", vote.view_number()); let info = AccumulatorInfo { public_key, membership: Arc::clone(membership), diff --git a/crates/task/Cargo.toml b/crates/task/Cargo.toml index 3983158a31..47261bfbd5 100644 --- a/crates/task/Cargo.toml +++ b/crates/task/Cargo.toml @@ -14,6 +14,7 @@ tracing = { workspace = true } async-compatibility-layer = { workspace = true } anyhow = { workspace = true } async-trait = { workspace = true } +utils = { path = "../utils" } [target.'cfg(all(async_executor_impl = "tokio"))'.dependencies] tokio = { workspace = true, features = [ diff --git a/crates/task/src/task.rs b/crates/task/src/task.rs index c623c5e43a..474bbd76b7 100644 --- a/crates/task/src/task.rs +++ b/crates/task/src/task.rs @@ -6,7 +6,6 @@ use std::sync::Arc; -use anyhow::Result; use async_broadcast::{Receiver, RecvError, Sender}; #[cfg(async_executor_impl = "async-std")] use async_std::task::{spawn, JoinHandle}; @@ -17,6 +16,7 @@ use futures::future::join_all; use futures::future::try_join_all; #[cfg(async_executor_impl = "tokio")] use tokio::task::{spawn, JoinHandle}; +use utils::result12345::Result; /// Trait for events that long-running tasks handle pub trait TaskEvent: PartialEq { diff --git a/crates/types/Cargo.toml b/crates/types/Cargo.toml index ca2541d709..e9ad6368b5 100644 --- a/crates/types/Cargo.toml +++ b/crates/types/Cargo.toml @@ -50,6 +50,7 @@ vbs = { workspace = true } displaydoc = { version = "0.2.5", default-features = false } dyn-clone = { git = "https://github.com/dtolnay/dyn-clone", tag = "1.0.17" } url = { workspace = true } +utils = { path = "../utils" } vec1 = { workspace = true } libp2p = { workspace = true } serde_json = { workspace = true } diff --git a/crates/types/src/consensus.rs b/crates/types/src/consensus.rs index c24a25cc4d..644d962b94 100644 --- a/crates/types/src/consensus.rs +++ b/crates/types/src/consensus.rs @@ -13,10 +13,10 @@ use std::{ sync::Arc, }; -use anyhow::{bail, ensure, Result}; use async_lock::{RwLock, RwLockReadGuard, RwLockUpgradableReadGuard, RwLockWriteGuard}; use committable::Commitment; -use tracing::{debug, error, instrument, trace}; +use tracing::instrument; +use utils::result12345::*; use vec1::Vec1; pub use crate::utils::{View, ViewInner}; @@ -68,31 +68,31 @@ impl OuterConsensus { /// Locks inner consensus for reading and leaves debug traces #[instrument(skip_all, target = "OuterConsensus")] pub async fn read(&self) -> ConsensusReadLockGuard<'_, TYPES> { - trace!("Trying to acquire read lock on consensus"); + tracing::trace!("Trying to acquire read lock on consensus"); let ret = self.inner_consensus.read().await; - trace!("Acquired read lock on consensus"); + tracing::trace!("Acquired read lock on consensus"); ConsensusReadLockGuard::new(ret) } /// Locks inner consensus for writing and leaves debug traces #[instrument(skip_all, target = "OuterConsensus")] pub async fn write(&self) -> ConsensusWriteLockGuard<'_, TYPES> { - trace!("Trying to acquire write lock on consensus"); + tracing::trace!("Trying to acquire write lock on consensus"); let ret = self.inner_consensus.write().await; - trace!("Acquired write lock on consensus"); + tracing::trace!("Acquired write lock on consensus"); ConsensusWriteLockGuard::new(ret) } /// Tries to acquire write lock on inner consensus and leaves debug traces #[instrument(skip_all, target = "OuterConsensus")] pub fn try_write(&self) -> Option> { - trace!("Trying to acquire write lock on consensus"); + tracing::trace!("Trying to acquire write lock on consensus"); let ret = self.inner_consensus.try_write(); if let Some(guard) = ret { - trace!("Acquired write lock on consensus"); + tracing::trace!("Acquired write lock on consensus"); Some(ConsensusWriteLockGuard::new(guard)) } else { - trace!("Failed to acquire write lock"); + tracing::trace!("Failed to acquire write lock"); None } } @@ -100,22 +100,22 @@ impl OuterConsensus { /// Acquires upgradable read lock on inner consensus and leaves debug traces #[instrument(skip_all, target = "OuterConsensus")] pub async fn upgradable_read(&self) -> ConsensusUpgradableReadLockGuard<'_, TYPES> { - trace!("Trying to acquire upgradable read lock on consensus"); + tracing::trace!("Trying to acquire upgradable read lock on consensus"); let ret = self.inner_consensus.upgradable_read().await; - trace!("Acquired upgradable read lock on consensus"); + tracing::trace!("Acquired upgradable read lock on consensus"); ConsensusUpgradableReadLockGuard::new(ret) } /// Tries to acquire read lock on inner consensus and leaves debug traces #[instrument(skip_all, target = "OuterConsensus")] pub fn try_read(&self) -> Option> { - trace!("Trying to acquire read lock on consensus"); + tracing::trace!("Trying to acquire read lock on consensus"); let ret = self.inner_consensus.try_read(); if let Some(guard) = ret { - trace!("Acquired read lock on consensus"); + tracing::trace!("Acquired read lock on consensus"); Some(ConsensusReadLockGuard::new(guard)) } else { - trace!("Failed to acquire read lock"); + tracing::trace!("Failed to acquire read lock"); None } } @@ -145,7 +145,7 @@ impl<'a, TYPES: NodeType> Deref for ConsensusReadLockGuard<'a, TYPES> { impl<'a, TYPES: NodeType> Drop for ConsensusReadLockGuard<'a, TYPES> { #[instrument(skip_all, target = "ConsensusReadLockGuard")] fn drop(&mut self) { - trace!("Read lock on consensus dropped"); + tracing::trace!("Read lock on consensus dropped"); } } @@ -179,7 +179,7 @@ impl<'a, TYPES: NodeType> DerefMut for ConsensusWriteLockGuard<'a, TYPES> { impl<'a, TYPES: NodeType> Drop for ConsensusWriteLockGuard<'a, TYPES> { #[instrument(skip_all, target = "ConsensusWriteLockGuard")] fn drop(&mut self) { - debug!("Write lock on consensus dropped"); + tracing::debug!("Write lock on consensus dropped"); } } @@ -206,9 +206,9 @@ impl<'a, TYPES: NodeType> ConsensusUpgradableReadLockGuard<'a, TYPES> { pub async fn upgrade(mut guard: Self) -> ConsensusWriteLockGuard<'a, TYPES> { let inner_guard = unsafe { ManuallyDrop::take(&mut guard.lock_guard) }; guard.taken = true; - debug!("Trying to upgrade upgradable read lock on consensus"); + tracing::debug!("Trying to upgrade upgradable read lock on consensus"); let ret = RwLockUpgradableReadGuard::upgrade(inner_guard).await; - debug!("Upgraded upgradable read lock on consensus"); + tracing::debug!("Upgraded upgradable read lock on consensus"); ConsensusWriteLockGuard::new(ret) } } @@ -226,7 +226,7 @@ impl<'a, TYPES: NodeType> Drop for ConsensusUpgradableReadLockGuard<'a, TYPES> { fn drop(&mut self) { if !self.taken { unsafe { ManuallyDrop::drop(&mut self.lock_guard) } - debug!("Upgradable read lock on consensus dropped"); + tracing::debug!("Upgradable read lock on consensus dropped"); } } } @@ -648,7 +648,7 @@ impl Consensus { high_qc.view_number > self.high_qc.view_number || high_qc == self.high_qc, "High QC with an equal or higher view exists." ); - debug!("Updating high QC"); + tracing::debug!("Updating high QC"); self.high_qc = high_qc; Ok(()) @@ -680,7 +680,7 @@ impl Consensus { terminator: Terminator, ok_when_finished: bool, mut f: F, - ) -> Result<(), HotShotError> + ) -> std::result::Result<(), HotShotError> where F: FnMut( &Leaf, @@ -744,7 +744,7 @@ impl Consensus { .next() .expect("INCONSISTENT STATE: anchor leaf not in state map!"); if *anchor_entry.0 != old_anchor_view { - error!( + tracing::error!( "Something about GC has failed. Older leaf exists than the previous anchor leaf." ); } diff --git a/crates/types/src/data.rs b/crates/types/src/data.rs index 94d15cf29f..923ab4058f 100644 --- a/crates/types/src/data.rs +++ b/crates/types/src/data.rs @@ -17,8 +17,6 @@ use std::{ sync::Arc, }; -use anyhow::{ensure, Result}; -use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use async_lock::RwLock; #[cfg(async_executor_impl = "async-std")] use async_std::task::spawn_blocking; @@ -32,6 +30,7 @@ use thiserror::Error; #[cfg(async_executor_impl = "tokio")] use tokio::task::spawn_blocking; use tracing::error; +use utils::result12345::*; use vec1::Vec1; use crate::{ @@ -113,20 +112,7 @@ macro_rules! impl_u64_wrapper { } /// Type-safe wrapper around `u64` so we know the thing we're talking about is a view number. -#[derive( - Copy, - Clone, - Debug, - PartialEq, - Eq, - PartialOrd, - Ord, - Hash, - Serialize, - Deserialize, - CanonicalSerialize, - CanonicalDeserialize, -)] +#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] pub struct ViewNumber(u64); impl Committable for ViewNumber { @@ -139,20 +125,7 @@ impl Committable for ViewNumber { impl_u64_wrapper!(ViewNumber); /// Type-safe wrapper around `u64` so we know the thing we're talking about is a epoch number. -#[derive( - Copy, - Clone, - Debug, - PartialEq, - Eq, - PartialOrd, - Ord, - Hash, - Serialize, - Deserialize, - CanonicalSerialize, - CanonicalDeserialize, -)] +#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] pub struct EpochNumber(u64); impl Committable for EpochNumber { @@ -655,7 +628,7 @@ impl Leaf { &mut self, block_payload: TYPES::BlockPayload, num_storage_nodes: usize, - ) -> Result<(), BlockError> { + ) -> std::result::Result<(), BlockError> { let encoded_txns = block_payload.encode(); let commitment = vid_commitment(&encoded_txns, num_storage_nodes); if commitment != self.block_header.payload_commitment() { diff --git a/crates/types/src/message.rs b/crates/types/src/message.rs index aa5d8198bb..38c467445b 100644 --- a/crates/types/src/message.rs +++ b/crates/types/src/message.rs @@ -11,11 +11,11 @@ use std::{fmt, fmt::Debug, marker::PhantomData, sync::Arc}; -use anyhow::{bail, ensure, Context, Result}; use async_lock::RwLock; use cdn_proto::util::mnemonic; use derivative::Derivative; use serde::{de::DeserializeOwned, Deserialize, Serialize}; +use utils::result12345::*; use vbs::{ version::{StaticVersionType, Version}, BinarySerializer, Serializer, @@ -472,7 +472,9 @@ impl UpgradeLock { } }; - serialized_message.context("Failed to serialize message!") + serialized_message + .wrap() + .context(info!("Failed to serialize message!")) } /// Deserialize a message with a version number, using `message.view_number()` to determine the message's version. This function will fail on improperly versioned messages. @@ -485,7 +487,8 @@ impl UpgradeLock { message: &[u8], ) -> Result { let actual_version = Version::deserialize(message) - .context("Failed to read message version!")? + .wrap() + .context(info!("Failed to read message version!"))? .0; let deserialized_message: M = match actual_version { @@ -495,7 +498,8 @@ impl UpgradeLock { bail!("Cannot deserialize message with stated version {}", v); } } - .context("Failed to deserialize message!")?; + .wrap() + .context(info!("Failed to deserialize message!"))?; let view = deserialized_message.view_number(); diff --git a/crates/types/src/simple_certificate.rs b/crates/types/src/simple_certificate.rs index bbdc88eb05..269f2caa9a 100644 --- a/crates/types/src/simple_certificate.rs +++ b/crates/types/src/simple_certificate.rs @@ -13,11 +13,11 @@ use std::{ sync::Arc, }; -use anyhow::{ensure, Result}; use async_lock::RwLock; use committable::{Commitment, Committable}; use ethereum_types::U256; use serde::{Deserialize, Serialize}; +use utils::result12345::*; use crate::{ data::serialize_signature2, diff --git a/crates/types/src/simple_vote.rs b/crates/types/src/simple_vote.rs index 1c1fcba31b..8c08faab13 100644 --- a/crates/types/src/simple_vote.rs +++ b/crates/types/src/simple_vote.rs @@ -8,9 +8,9 @@ use std::{fmt::Debug, hash::Hash, marker::PhantomData}; -use anyhow::Result; use committable::{Commitment, Committable}; use serde::{de::DeserializeOwned, Deserialize, Serialize}; +use utils::result12345::*; use vbs::version::{StaticVersionType, Version}; use crate::{ @@ -169,7 +169,9 @@ impl SimpleVote { let signature = ( pub_key.clone(), - TYPES::SignatureKey::sign(private_key, commit.as_ref())?, + TYPES::SignatureKey::sign(private_key, commit.as_ref()) + .wrap() + .context(warn!("Failed to sign vote"))?, ); Ok(Self { diff --git a/crates/types/src/traits/election.rs b/crates/types/src/traits/election.rs index 9c8e950bfc..872417754e 100644 --- a/crates/types/src/traits/election.rs +++ b/crates/types/src/traits/election.rs @@ -7,7 +7,7 @@ //! The election trait, used to decide which node is the leader and determine if a vote is valid. use std::{collections::BTreeSet, fmt::Debug, hash::Hash, num::NonZeroU64}; -use anyhow::Result; +use utils::result12345::Result; use super::{network::Topic, node_implementation::NodeType}; use crate::{traits::signature_key::SignatureKey, PeerConfig}; diff --git a/crates/types/src/vote.rs b/crates/types/src/vote.rs index 882512eae9..ccd4bdd006 100644 --- a/crates/types/src/vote.rs +++ b/crates/types/src/vote.rs @@ -11,12 +11,12 @@ use std::{ marker::PhantomData, }; -use anyhow::Result; use bitvec::{bitvec, vec::BitVec}; use committable::{Commitment, Committable}; use either::Either; use ethereum_types::U256; use tracing::error; +use utils::result12345::Result; use crate::{ message::UpgradeLock, diff --git a/crates/utils/src/result12345.rs b/crates/utils/src/result12345.rs index 1136b6e6de..62628207d1 100644 --- a/crates/utils/src/result12345.rs +++ b/crates/utils/src/result12345.rs @@ -80,14 +80,17 @@ impl Log for Result { } #[derive(Debug, Clone)] +#[must_use] /// main error type pub struct Error { /// level - level: Level, + pub level: Level, /// message - message: String, + pub message: String, } +impl std::error::Error for Error {} + /// Trait for a `std::result::Result` that can be wrapped into a `Result` pub trait Wrap { /// Wrap the value into a `Result` From b0b72cb04ac270dee11bdb220cb5d1b844fefe87 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Fri, 18 Oct 2024 15:38:37 -0400 Subject: [PATCH 14/16] update --- Cargo.lock | 3 + clippy.toml | 1 + crates/task-impls/src/quorum_vote/mod.rs | 2 +- crates/types/src/consensus.rs | 24 ++++--- crates/utils/src/result12345/macros.rs | 82 +++++++++++++++++++++--- 5 files changed, 90 insertions(+), 22 deletions(-) create mode 100644 clippy.toml diff --git a/Cargo.lock b/Cargo.lock index 231d39f84b..6b1fc87f3f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3137,6 +3137,7 @@ dependencies = [ "toml", "tracing", "url", + "utils", "vbs", ] @@ -3329,6 +3330,7 @@ dependencies = [ "futures", "tokio", "tracing", + "utils", ] [[package]] @@ -3467,6 +3469,7 @@ dependencies = [ "tracing", "typenum", "url", + "utils", "vbs", "vec1", ] diff --git a/clippy.toml b/clippy.toml new file mode 100644 index 0000000000..edd8ddcbdf --- /dev/null +++ b/clippy.toml @@ -0,0 +1 @@ +allowed-wildcard-imports = [ "utils", "hotshot_task_impls", "hotshot_types" ] diff --git a/crates/task-impls/src/quorum_vote/mod.rs b/crates/task-impls/src/quorum_vote/mod.rs index 0b8468bfba..ad156edb1a 100644 --- a/crates/task-impls/src/quorum_vote/mod.rs +++ b/crates/task-impls/src/quorum_vote/mod.rs @@ -131,7 +131,7 @@ impl + 'static, V: Versions> let consensus_reader = self.consensus.read().await; let (Some(parent_state), _) = consensus_reader.state_and_delta(parent.view_number()) else { - bail!("Parent state not found! Consensus internally inconsistent") + bail!("Parent state not found! Consensus internally inconsistent"); }; drop(consensus_reader); diff --git a/crates/types/src/consensus.rs b/crates/types/src/consensus.rs index 644d962b94..d914d977fa 100644 --- a/crates/types/src/consensus.rs +++ b/crates/types/src/consensus.rs @@ -593,19 +593,17 @@ impl Consensus { .. } = existing_view.view_inner { - match new_view.view_inner { - ViewInner::Leaf { - delta: ref new_delta, - .. - } => { - ensure!( - new_delta.is_some() || existing_delta.is_none(), - "Skipping the state update to not override a `Leaf` view with `Some` state delta." - ); - } - _ => { - bail!("Skipping the state update to not override a `Leaf` view with a non-`Leaf` view."); - } + if let ViewInner::Leaf { + delta: ref new_delta, + .. + } = new_view.view_inner + { + ensure!( + new_delta.is_some() || existing_delta.is_none(), + "Skipping the state update to not override a `Leaf` view with `Some` state delta." + ); + } else { + bail!("Skipping the state update to not override a `Leaf` view with a non-`Leaf` view."); } } } diff --git a/crates/utils/src/result12345/macros.rs b/crates/utils/src/result12345/macros.rs index f2f260071a..97894aba1a 100644 --- a/crates/utils/src/result12345/macros.rs +++ b/crates/utils/src/result12345/macros.rs @@ -152,6 +152,40 @@ macro_rules! error { } pub use error; +#[macro_export] +/// Log a `result12345::Error` at the corresponding level. +macro_rules! log { + ($result:expr) => { + if let Err(ref error) = $result { + let mut error_level = error.level; + if error_level == Level::Unspecified { + error_level = DEFAULT_LOG_LEVEL; + } + + match error_level { + Level::Trace => { + tracing::trace!("{}", error.message); + } + Level::Debug => { + tracing::debug!("{}", error.message); + } + Level::Info => { + tracing::info!("{}", error.message); + } + Level::Warn => { + tracing::warn!("{}", error.message); + } + Level::Error => { + tracing::error!("{}", error.message); + } + // impossible + Level::Unspecified => {} + } + } + }; +} +pub use log; + #[macro_export] /// Check that the given condition holds, otherwise return an error. /// @@ -163,31 +197,47 @@ pub use error; macro_rules! ensure { ($condition:expr) => { if !$condition { - return Err(Error { + let result = Err(Error { level: Level::Unspecified, message: format!("{}: condition '{}' failed.", line_info!(), stringify!($condition)) }); + + log!(result); + + return result; } }; ($condition:expr, $message:literal) => { if !$condition { - return Err(Error { + let result = Err(Error { level: Level::Unspecified, message: format!("{}: {}", line_info!(), $message) }); + + log!(result); + + return result; } }; ($condition:expr, $fmt:expr, $($arg:tt)*) => { if !$condition { - return Err(Error { + let result = Err(Error { level: Level::Unspecified, message: format!("{}: {}", line_info!(), format!($fmt, $($arg)*)) }); + + log!(result); + + return result; } }; ($condition:expr, $error:expr) => { if !$condition { - return Err($error); + let result = Err($error); + + log!(result); + + return result; } }; } @@ -203,25 +253,41 @@ pub use ensure; /// - an `Error`, in which case the given error is logged unchanged. macro_rules! bail { () => { - return Err(Error { + let result = Err(Error { level: Level::Unspecified, message: format!("{}: bailed.", line_info!()), }); + + log!(result); + + return result; }; ($message:literal) => { - return Err(Error { + let result = Err(Error { level: Level::Unspecified, message: format!("{}: {}", line_info!(), $message) }); + + log!(result); + + return result; }; ($fmt:expr, $($arg:tt)*) => { - return Err(Error { + let result = Err(Error { level: Level::Unspecified, message: format!("{}: {}", line_info!(), format!($fmt, $($arg)*)) }); + + log!(result); + + return result; }; ($error:expr) => { - return Err($error); + let result = Err($error); + + log!(result); + + return result; }; } pub use bail; From 7d37f5884630395c97d4befba05de4b83dd60b93 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Fri, 18 Oct 2024 18:28:24 -0400 Subject: [PATCH 15/16] update --- crates/task-impls/src/da.rs | 4 ++-- crates/types/src/simple_vote.rs | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/crates/task-impls/src/da.rs b/crates/task-impls/src/da.rs index 4aee6f5409..b8b76e696e 100644 --- a/crates/task-impls/src/da.rs +++ b/crates/task-impls/src/da.rs @@ -143,7 +143,7 @@ impl, V: Versions> DaTaskState, V: Versions> DaTaskState SimpleVote { pub_key.clone(), TYPES::SignatureKey::sign(private_key, commit.as_ref()) .wrap() - .context(warn!("Failed to sign vote"))?, + .context(error!("Failed to sign vote"))?, ); Ok(Self { From 9a9ef16c3f59ae73aaaa192e69378e5b86988e34 Mon Sep 17 00:00:00 2001 From: ss-es <155648797+ss-es@users.noreply.github.com> Date: Mon, 21 Oct 2024 11:58:11 -0400 Subject: [PATCH 16/16] rename --- crates/hotshot/src/traits/election/randomized_committee.rs | 2 +- crates/hotshot/src/traits/election/static_committee.rs | 2 +- .../traits/election/static_committee_leader_two_views.rs | 2 +- crates/task-impls/src/consensus/handlers.rs | 2 +- crates/task-impls/src/consensus/mod.rs | 2 +- crates/task-impls/src/consensus2/handlers.rs | 2 +- crates/task-impls/src/da.rs | 2 +- crates/task-impls/src/helpers.rs | 6 +++--- crates/task-impls/src/network.rs | 2 +- crates/task-impls/src/quorum_proposal/handlers.rs | 2 +- crates/task-impls/src/quorum_proposal/mod.rs | 2 +- crates/task-impls/src/quorum_proposal_recv/handlers.rs | 2 +- crates/task-impls/src/quorum_proposal_recv/mod.rs | 2 +- crates/task-impls/src/quorum_vote/handlers.rs | 2 +- crates/task-impls/src/quorum_vote/mod.rs | 2 +- crates/task-impls/src/request.rs | 2 +- crates/task-impls/src/rewind.rs | 2 +- crates/task-impls/src/transactions.rs | 2 +- crates/task-impls/src/upgrade.rs | 2 +- crates/task-impls/src/vid.rs | 2 +- crates/task-impls/src/view_sync.rs | 2 +- crates/task-impls/src/vote_collection.rs | 2 +- crates/task/src/task.rs | 2 +- crates/types/src/consensus.rs | 2 +- crates/types/src/data.rs | 2 +- crates/types/src/message.rs | 2 +- crates/types/src/simple_certificate.rs | 2 +- crates/types/src/simple_vote.rs | 2 +- crates/types/src/traits/election.rs | 2 +- crates/types/src/vote.rs | 2 +- crates/utils/src/{result12345.rs => anytrace.rs} | 0 crates/utils/src/{result12345 => anytrace}/macros.rs | 2 +- crates/utils/src/lib.rs | 2 +- 33 files changed, 34 insertions(+), 34 deletions(-) rename crates/utils/src/{result12345.rs => anytrace.rs} (100%) rename crates/utils/src/{result12345 => anytrace}/macros.rs (99%) diff --git a/crates/hotshot/src/traits/election/randomized_committee.rs b/crates/hotshot/src/traits/election/randomized_committee.rs index c359b2f1c3..d664e2a6e8 100644 --- a/crates/hotshot/src/traits/election/randomized_committee.rs +++ b/crates/hotshot/src/traits/election/randomized_committee.rs @@ -17,7 +17,7 @@ use hotshot_types::{ PeerConfig, }; use rand::{rngs::StdRng, Rng}; -use utils::result12345::Result; +use utils::anytrace::Result; #[derive(Clone, Debug, Eq, PartialEq, Hash)] diff --git a/crates/hotshot/src/traits/election/static_committee.rs b/crates/hotshot/src/traits/election/static_committee.rs index c799269fb8..acacc51cb6 100644 --- a/crates/hotshot/src/traits/election/static_committee.rs +++ b/crates/hotshot/src/traits/election/static_committee.rs @@ -16,7 +16,7 @@ use hotshot_types::{ }, PeerConfig, }; -use utils::result12345::Result; +use utils::anytrace::Result; #[derive(Clone, Debug, Eq, PartialEq, Hash)] diff --git a/crates/hotshot/src/traits/election/static_committee_leader_two_views.rs b/crates/hotshot/src/traits/election/static_committee_leader_two_views.rs index 7fdb812535..bb9574e37e 100644 --- a/crates/hotshot/src/traits/election/static_committee_leader_two_views.rs +++ b/crates/hotshot/src/traits/election/static_committee_leader_two_views.rs @@ -16,7 +16,7 @@ use hotshot_types::{ }, PeerConfig, }; -use utils::result12345::Result; +use utils::anytrace::Result; #[derive(Clone, Debug, Eq, PartialEq, Hash)] diff --git a/crates/task-impls/src/consensus/handlers.rs b/crates/task-impls/src/consensus/handlers.rs index f276eb0b8e..b6f2b843d3 100644 --- a/crates/task-impls/src/consensus/handlers.rs +++ b/crates/task-impls/src/consensus/handlers.rs @@ -19,7 +19,7 @@ use hotshot_types::{ vote::HasViewNumber, }; use tracing::instrument; -use utils::result12345::*; +use utils::anytrace::*; use super::ConsensusTaskState; use crate::{ diff --git a/crates/task-impls/src/consensus/mod.rs b/crates/task-impls/src/consensus/mod.rs index b01ea85caf..edff8f6078 100644 --- a/crates/task-impls/src/consensus/mod.rs +++ b/crates/task-impls/src/consensus/mod.rs @@ -26,7 +26,7 @@ use hotshot_types::{ #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; use tracing::instrument; -use utils::result12345::Result; +use utils::anytrace::Result; use self::handlers::{ handle_quorum_vote_recv, handle_timeout, handle_timeout_vote_recv, handle_view_change, diff --git a/crates/task-impls/src/consensus2/handlers.rs b/crates/task-impls/src/consensus2/handlers.rs index 1885d0710a..ec87f1b159 100644 --- a/crates/task-impls/src/consensus2/handlers.rs +++ b/crates/task-impls/src/consensus2/handlers.rs @@ -6,7 +6,7 @@ use std::{sync::Arc, time::Duration}; -use utils::result12345::{ensure, Context, Result}; +use utils::anytrace::{ensure, Context, Result}; use async_broadcast::Sender; use async_compatibility_layer::art::{async_sleep, async_spawn}; use chrono::Utc; diff --git a/crates/task-impls/src/da.rs b/crates/task-impls/src/da.rs index b8b76e696e..5ccb50e091 100644 --- a/crates/task-impls/src/da.rs +++ b/crates/task-impls/src/da.rs @@ -35,7 +35,7 @@ use sha2::{Digest, Sha256}; #[cfg(async_executor_impl = "tokio")] use tokio::task::spawn_blocking; use tracing::instrument; -use utils::result12345::*; +use utils::anytrace::*; use crate::{ events::HotShotEvent, diff --git a/crates/task-impls/src/helpers.rs b/crates/task-impls/src/helpers.rs index 7c1109d936..a0b10feada 100644 --- a/crates/task-impls/src/helpers.rs +++ b/crates/task-impls/src/helpers.rs @@ -39,7 +39,7 @@ use hotshot_types::{ #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; use tracing::instrument; -use utils::result12345::*; +use utils::anytrace::*; use crate::{ events::HotShotEvent, quorum_proposal_recv::QuorumProposalRecvTaskState, @@ -356,7 +356,7 @@ pub async fn decide_from_proposal( res } -/// Gets the parent leaf and state from the parent of a proposal, returning an [`utils::result12345::Error`] if not. +/// Gets the parent leaf and state from the parent of a proposal, returning an [`utils::anytrace::Error`] if not. #[instrument(skip_all)] #[allow(clippy::too_many_arguments)] pub(crate) async fn parent_leaf_and_state( @@ -692,7 +692,7 @@ pub async fn validate_proposal_view_and_certs< /// `timeout_task` which are updated during the operation of the function. /// /// # Errors -/// Returns an [`utils::result12345::Error`] when the new view is not greater than the current view. +/// Returns an [`utils::anytrace::Error`] when the new view is not greater than the current view. pub(crate) async fn update_view, V: Versions>( new_view: TYPES::View, event_stream: &Sender>>, diff --git a/crates/task-impls/src/network.rs b/crates/task-impls/src/network.rs index df6f53a294..d8ef530b54 100644 --- a/crates/task-impls/src/network.rs +++ b/crates/task-impls/src/network.rs @@ -31,7 +31,7 @@ use hotshot_types::{ vote::{HasViewNumber, Vote}, }; use tracing::instrument; -use utils::result12345::*; +use utils::anytrace::*; use crate::{ events::{HotShotEvent, HotShotTaskCompleted}, diff --git a/crates/task-impls/src/quorum_proposal/handlers.rs b/crates/task-impls/src/quorum_proposal/handlers.rs index 16030837d6..6ee2c2579e 100644 --- a/crates/task-impls/src/quorum_proposal/handlers.rs +++ b/crates/task-impls/src/quorum_proposal/handlers.rs @@ -26,7 +26,7 @@ use hotshot_types::{ }, }; use tracing::instrument; -use utils::result12345::*; +use utils::anytrace::*; use vbs::version::StaticVersionType; use crate::{ diff --git a/crates/task-impls/src/quorum_proposal/mod.rs b/crates/task-impls/src/quorum_proposal/mod.rs index b3b7701142..022b608698 100644 --- a/crates/task-impls/src/quorum_proposal/mod.rs +++ b/crates/task-impls/src/quorum_proposal/mod.rs @@ -33,7 +33,7 @@ use hotshot_types::{ #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; use tracing::instrument; -use utils::result12345::*; +use utils::anytrace::*; use self::handlers::{ProposalDependency, ProposalDependencyHandle}; use crate::{ diff --git a/crates/task-impls/src/quorum_proposal_recv/handlers.rs b/crates/task-impls/src/quorum_proposal_recv/handlers.rs index cb7f7619f3..3d1957c84a 100644 --- a/crates/task-impls/src/quorum_proposal_recv/handlers.rs +++ b/crates/task-impls/src/quorum_proposal_recv/handlers.rs @@ -26,7 +26,7 @@ use hotshot_types::{ vote::{Certificate, HasViewNumber}, }; use tracing::instrument; -use utils::result12345::*; +use utils::anytrace::*; use super::QuorumProposalRecvTaskState; use crate::{ diff --git a/crates/task-impls/src/quorum_proposal_recv/mod.rs b/crates/task-impls/src/quorum_proposal_recv/mod.rs index 22ef5bf2a3..7c3ab2ed24 100644 --- a/crates/task-impls/src/quorum_proposal_recv/mod.rs +++ b/crates/task-impls/src/quorum_proposal_recv/mod.rs @@ -30,7 +30,7 @@ use hotshot_types::{ #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; use tracing::{debug, error, info, instrument, warn}; -use utils::result12345::{bail, Result}; +use utils::anytrace::{bail, Result}; use vbs::version::Version; use self::handlers::handle_quorum_proposal_recv; diff --git a/crates/task-impls/src/quorum_vote/handlers.rs b/crates/task-impls/src/quorum_vote/handlers.rs index 8873953a7b..656737524f 100644 --- a/crates/task-impls/src/quorum_vote/handlers.rs +++ b/crates/task-impls/src/quorum_vote/handlers.rs @@ -19,7 +19,7 @@ use hotshot_types::{ vote::HasViewNumber, }; use tracing::instrument; -use utils::result12345::*; +use utils::anytrace::*; use super::QuorumVoteTaskState; use crate::{ diff --git a/crates/task-impls/src/quorum_vote/mod.rs b/crates/task-impls/src/quorum_vote/mod.rs index 281b601ede..f96331012c 100644 --- a/crates/task-impls/src/quorum_vote/mod.rs +++ b/crates/task-impls/src/quorum_vote/mod.rs @@ -38,7 +38,7 @@ use jf_vid::VidScheme; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; use tracing::instrument; -use utils::result12345::*; +use utils::anytrace::*; use crate::{ events::HotShotEvent, diff --git a/crates/task-impls/src/request.rs b/crates/task-impls/src/request.rs index 4f4eca3875..c7bc3c8957 100644 --- a/crates/task-impls/src/request.rs +++ b/crates/task-impls/src/request.rs @@ -37,7 +37,7 @@ use sha2::{Digest, Sha256}; #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; use tracing::instrument; -use utils::result12345::Result; +use utils::anytrace::Result; use crate::{events::HotShotEvent, helpers::broadcast_event}; diff --git a/crates/task-impls/src/rewind.rs b/crates/task-impls/src/rewind.rs index bddce9e2e5..9ae424b62b 100644 --- a/crates/task-impls/src/rewind.rs +++ b/crates/task-impls/src/rewind.rs @@ -10,7 +10,7 @@ use async_broadcast::{Receiver, Sender}; use async_trait::async_trait; use hotshot_task::task::TaskState; use hotshot_types::traits::node_implementation::NodeType; -use utils::result12345::Result; +use utils::anytrace::Result; use crate::events::HotShotEvent; diff --git a/crates/task-impls/src/transactions.rs b/crates/task-impls/src/transactions.rs index 027724ef93..72bcd8f78b 100644 --- a/crates/task-impls/src/transactions.rs +++ b/crates/task-impls/src/transactions.rs @@ -33,7 +33,7 @@ use hotshot_types::{ }; use tracing::instrument; use url::Url; -use utils::result12345::*; +use utils::anytrace::*; use vbs::version::{StaticVersionType, Version}; use vec1::Vec1; diff --git a/crates/task-impls/src/upgrade.rs b/crates/task-impls/src/upgrade.rs index 410274b1e5..fb4f4de7f4 100644 --- a/crates/task-impls/src/upgrade.rs +++ b/crates/task-impls/src/upgrade.rs @@ -28,7 +28,7 @@ use hotshot_types::{ vote::HasViewNumber, }; use tracing::instrument; -use utils::result12345::*; +use utils::anytrace::*; use vbs::version::StaticVersionType; use crate::{ diff --git a/crates/task-impls/src/vid.rs b/crates/task-impls/src/vid.rs index b81ec7760c..30d31dff7d 100644 --- a/crates/task-impls/src/vid.rs +++ b/crates/task-impls/src/vid.rs @@ -20,7 +20,7 @@ use hotshot_types::{ }, }; use tracing::{debug, error, info, instrument}; -use utils::result12345::Result; +use utils::anytrace::Result; use crate::{ events::{HotShotEvent, HotShotTaskCompleted}, diff --git a/crates/task-impls/src/view_sync.rs b/crates/task-impls/src/view_sync.rs index 97ac2f7c82..46e4b11c12 100644 --- a/crates/task-impls/src/view_sync.rs +++ b/crates/task-impls/src/view_sync.rs @@ -37,7 +37,7 @@ use hotshot_types::{ #[cfg(async_executor_impl = "tokio")] use tokio::task::JoinHandle; use tracing::instrument; -use utils::result12345::*; +use utils::anytrace::*; use crate::{ events::{HotShotEvent, HotShotTaskCompleted}, diff --git a/crates/task-impls/src/vote_collection.rs b/crates/task-impls/src/vote_collection.rs index 1b8b05651d..4c685ca978 100644 --- a/crates/task-impls/src/vote_collection.rs +++ b/crates/task-impls/src/vote_collection.rs @@ -30,7 +30,7 @@ use hotshot_types::{ }, vote::{Certificate, HasViewNumber, Vote, VoteAccumulator}, }; -use utils::result12345::*; +use utils::anytrace::*; use crate::{events::HotShotEvent, helpers::broadcast_event}; diff --git a/crates/task/src/task.rs b/crates/task/src/task.rs index 474bbd76b7..fc60db3064 100644 --- a/crates/task/src/task.rs +++ b/crates/task/src/task.rs @@ -16,7 +16,7 @@ use futures::future::join_all; use futures::future::try_join_all; #[cfg(async_executor_impl = "tokio")] use tokio::task::{spawn, JoinHandle}; -use utils::result12345::Result; +use utils::anytrace::Result; /// Trait for events that long-running tasks handle pub trait TaskEvent: PartialEq { diff --git a/crates/types/src/consensus.rs b/crates/types/src/consensus.rs index d914d977fa..930043e748 100644 --- a/crates/types/src/consensus.rs +++ b/crates/types/src/consensus.rs @@ -16,7 +16,7 @@ use std::{ use async_lock::{RwLock, RwLockReadGuard, RwLockUpgradableReadGuard, RwLockWriteGuard}; use committable::Commitment; use tracing::instrument; -use utils::result12345::*; +use utils::anytrace::*; use vec1::Vec1; pub use crate::utils::{View, ViewInner}; diff --git a/crates/types/src/data.rs b/crates/types/src/data.rs index 923ab4058f..3ea8566184 100644 --- a/crates/types/src/data.rs +++ b/crates/types/src/data.rs @@ -30,7 +30,7 @@ use thiserror::Error; #[cfg(async_executor_impl = "tokio")] use tokio::task::spawn_blocking; use tracing::error; -use utils::result12345::*; +use utils::anytrace::*; use vec1::Vec1; use crate::{ diff --git a/crates/types/src/message.rs b/crates/types/src/message.rs index 2f233f0af9..217b5d7578 100644 --- a/crates/types/src/message.rs +++ b/crates/types/src/message.rs @@ -19,7 +19,7 @@ use async_lock::RwLock; use cdn_proto::util::mnemonic; use derivative::Derivative; use serde::{de::DeserializeOwned, Deserialize, Serialize}; -use utils::result12345::*; +use utils::anytrace::*; use vbs::{ version::{StaticVersionType, Version}, BinarySerializer, Serializer, diff --git a/crates/types/src/simple_certificate.rs b/crates/types/src/simple_certificate.rs index 269f2caa9a..f7f58fd4e1 100644 --- a/crates/types/src/simple_certificate.rs +++ b/crates/types/src/simple_certificate.rs @@ -17,7 +17,7 @@ use async_lock::RwLock; use committable::{Commitment, Committable}; use ethereum_types::U256; use serde::{Deserialize, Serialize}; -use utils::result12345::*; +use utils::anytrace::*; use crate::{ data::serialize_signature2, diff --git a/crates/types/src/simple_vote.rs b/crates/types/src/simple_vote.rs index 43b0ddc524..56c98a4e63 100644 --- a/crates/types/src/simple_vote.rs +++ b/crates/types/src/simple_vote.rs @@ -10,7 +10,7 @@ use std::{fmt::Debug, hash::Hash, marker::PhantomData}; use committable::{Commitment, Committable}; use serde::{de::DeserializeOwned, Deserialize, Serialize}; -use utils::result12345::*; +use utils::anytrace::*; use vbs::version::{StaticVersionType, Version}; use crate::{ diff --git a/crates/types/src/traits/election.rs b/crates/types/src/traits/election.rs index 872417754e..d22f226875 100644 --- a/crates/types/src/traits/election.rs +++ b/crates/types/src/traits/election.rs @@ -7,7 +7,7 @@ //! The election trait, used to decide which node is the leader and determine if a vote is valid. use std::{collections::BTreeSet, fmt::Debug, hash::Hash, num::NonZeroU64}; -use utils::result12345::Result; +use utils::anytrace::Result; use super::{network::Topic, node_implementation::NodeType}; use crate::{traits::signature_key::SignatureKey, PeerConfig}; diff --git a/crates/types/src/vote.rs b/crates/types/src/vote.rs index ccd4bdd006..1275e172e0 100644 --- a/crates/types/src/vote.rs +++ b/crates/types/src/vote.rs @@ -16,7 +16,7 @@ use committable::{Commitment, Committable}; use either::Either; use ethereum_types::U256; use tracing::error; -use utils::result12345::Result; +use utils::anytrace::Result; use crate::{ message::UpgradeLock, diff --git a/crates/utils/src/result12345.rs b/crates/utils/src/anytrace.rs similarity index 100% rename from crates/utils/src/result12345.rs rename to crates/utils/src/anytrace.rs diff --git a/crates/utils/src/result12345/macros.rs b/crates/utils/src/anytrace/macros.rs similarity index 99% rename from crates/utils/src/result12345/macros.rs rename to crates/utils/src/anytrace/macros.rs index 97894aba1a..71036d21fb 100644 --- a/crates/utils/src/result12345/macros.rs +++ b/crates/utils/src/anytrace/macros.rs @@ -153,7 +153,7 @@ macro_rules! error { pub use error; #[macro_export] -/// Log a `result12345::Error` at the corresponding level. +/// Log a `anytrace::Error` at the corresponding level. macro_rules! log { ($result:expr) => { if let Err(ref error) = $result { diff --git a/crates/utils/src/lib.rs b/crates/utils/src/lib.rs index 266fa1e6d2..ba89a2efbb 100644 --- a/crates/utils/src/lib.rs +++ b/crates/utils/src/lib.rs @@ -1,4 +1,4 @@ //! General (not HotShot-specific) utilities /// Error utilities, intended to function as a replacement to `anyhow`. -pub mod result12345; +pub mod anytrace;