diff --git a/applications/minotari_node/src/commands/command/check_db.rs b/applications/minotari_node/src/commands/command/check_db.rs index 115d009f5f..8858bb39bd 100644 --- a/applications/minotari_node/src/commands/command/check_db.rs +++ b/applications/minotari_node/src/commands/command/check_db.rs @@ -48,7 +48,7 @@ impl CommandContext { let mut missing_headers = Vec::new(); print!("Searching for height: "); // We need to check every header, but not every block. - let horizon_height = meta.horizon_block_height(height); + let horizon_height = meta.pruned_height_at_given_chain_tip(height); while height > 0 { print!("{}", height); io::stdout().flush().await?; diff --git a/base_layer/common_types/src/chain_metadata.rs b/base_layer/common_types/src/chain_metadata.rs index 3d4b41335c..0ab3d7285b 100644 --- a/base_layer/common_types/src/chain_metadata.rs +++ b/base_layer/common_types/src/chain_metadata.rs @@ -25,11 +25,11 @@ use std::fmt::{Display, Error, Formatter}; use primitive_types::U256; use serde::{Deserialize, Serialize}; -use crate::types::{BlockHash, FixedHash}; +use crate::types::BlockHash; #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize, Hash)] pub struct ChainMetadata { - /// The current chain height, or the block number of the longest valid chain, or `None` if there is no chain + /// The current chain height, or the block number of the longest valid chain best_block_height: u64, /// The block hash of the current tip of the longest valid chain best_block_hash: BlockHash, @@ -40,7 +40,7 @@ pub struct ChainMetadata { pruning_horizon: u64, /// The height of the pruning horizon. This indicates from what height a full block can be provided /// (exclusive). If `pruned_height` is equal to the `best_block_height` no blocks can be - /// provided. Archival nodes wil always have an `pruned_height` of zero. + /// provided. Archival nodes wil always have a `pruned_height` of zero. pruned_height: u64, /// The total accumulated proof of work of the longest chain accumulated_difficulty: U256, @@ -67,37 +67,16 @@ impl ChainMetadata { } } - pub fn empty() -> ChainMetadata { - ChainMetadata { - best_block_height: 0, - best_block_hash: FixedHash::zero(), - pruning_horizon: 0, - pruned_height: 0, - accumulated_difficulty: 0.into(), - timestamp: 0, - } - } - /// The block height at the pruning horizon, given the chain height of the network. Typically database backends /// cannot provide any block data earlier than this point. /// Zero is returned if the blockchain still hasn't reached the pruning horizon. - pub fn horizon_block_height(&self, chain_tip: u64) -> u64 { + pub fn pruned_height_at_given_chain_tip(&self, chain_tip: u64) -> u64 { match self.pruning_horizon { 0 => 0, - horizon => chain_tip.saturating_sub(horizon), + pruning_horizon => chain_tip.saturating_sub(pruning_horizon), } } - /// Set the pruning horizon to indicate that the chain is in archival mode (i.e. a pruning horizon of zero) - pub fn archival_mode(&mut self) { - self.pruning_horizon = 0; - } - - /// Set the pruning horizon - pub fn set_pruning_horizon(&mut self, pruning_horizon: u64) { - self.pruning_horizon = pruning_horizon; - } - /// The configured number of blocks back from the tip that this database tracks. A value of 0 indicates that /// pruning mode is disabled and the node will keep full blocks from the time it was set. If pruning horizon /// was previously enabled, previously pruned blocks will remain pruned. If set from initial sync, full blocks @@ -123,7 +102,7 @@ impl ChainMetadata { /// The height of the pruning horizon. This indicates from what height a full block can be provided /// (exclusive). If `pruned_height` is equal to the `best_block_height` no blocks can be - /// provided. Archival nodes wil always have an `pruned_height` of zero. + /// provided. Archival nodes wil always have a `pruned_height` of zero. pub fn pruned_height(&self) -> u64 { self.pruned_height } @@ -143,14 +122,11 @@ impl ChainMetadata { impl Display for ChainMetadata { fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> { - let height = self.best_block_height; - let best_block = self.best_block_hash; - let accumulated_difficulty = self.accumulated_difficulty; - writeln!(f, "Height of longest chain: {}", height)?; - writeln!(f, "Total accumulated difficulty: {}", accumulated_difficulty)?; - writeln!(f, "Best block: {}", best_block)?; + writeln!(f, "Best block height: {}", self.best_block_height)?; + writeln!(f, "Total accumulated difficulty: {}", self.accumulated_difficulty)?; + writeln!(f, "Best block hash: {}", self.best_block_hash)?; writeln!(f, "Pruning horizon: {}", self.pruning_horizon)?; - writeln!(f, "Effective pruned height: {}", self.pruned_height)?; + writeln!(f, "Pruned height: {}", self.pruned_height)?; Ok(()) } } @@ -161,33 +137,53 @@ mod test { #[test] fn horizon_block_on_default() { - let metadata = ChainMetadata::empty(); - assert_eq!(metadata.horizon_block_height(0), 0); + let metadata = ChainMetadata { + best_block_height: 0, + best_block_hash: Default::default(), + pruning_horizon: 0, + pruned_height: 0, + accumulated_difficulty: Default::default(), + timestamp: 0, + }; + assert_eq!(metadata.pruned_height_at_given_chain_tip(0), 0); } #[test] fn pruned_mode() { - let mut metadata = ChainMetadata::empty(); + let mut metadata = ChainMetadata { + best_block_height: 0, + best_block_hash: Default::default(), + pruning_horizon: 0, + pruned_height: 0, + accumulated_difficulty: Default::default(), + timestamp: 0, + }; assert!(!metadata.is_pruned_node()); assert!(metadata.is_archival_node()); - metadata.set_pruning_horizon(2880); + metadata.pruning_horizon = 2880; assert!(metadata.is_pruned_node()); assert!(!metadata.is_archival_node()); - assert_eq!(metadata.horizon_block_height(0), 0); - assert_eq!(metadata.horizon_block_height(100), 0); - assert_eq!(metadata.horizon_block_height(2880), 0); - assert_eq!(metadata.horizon_block_height(2881), 1); + assert_eq!(metadata.pruned_height_at_given_chain_tip(0), 0); + assert_eq!(metadata.pruned_height_at_given_chain_tip(100), 0); + assert_eq!(metadata.pruned_height_at_given_chain_tip(2880), 0); + assert_eq!(metadata.pruned_height_at_given_chain_tip(2881), 1); } #[test] fn archival_node() { - let mut metadata = ChainMetadata::empty(); - metadata.archival_mode(); + let metadata = ChainMetadata { + best_block_height: 0, + best_block_hash: Default::default(), + pruning_horizon: 0, + pruned_height: 0, + accumulated_difficulty: Default::default(), + timestamp: 0, + }; // Chain is still empty - assert_eq!(metadata.horizon_block_height(0), 0); + assert_eq!(metadata.pruned_height_at_given_chain_tip(0), 0); // When pruning horizon is zero, the horizon block is always 0, the genesis block - assert_eq!(metadata.horizon_block_height(0), 0); - assert_eq!(metadata.horizon_block_height(100), 0); - assert_eq!(metadata.horizon_block_height(2881), 0); + assert_eq!(metadata.pruned_height_at_given_chain_tip(0), 0); + assert_eq!(metadata.pruned_height_at_given_chain_tip(100), 0); + assert_eq!(metadata.pruned_height_at_given_chain_tip(2881), 0); } } diff --git a/base_layer/core/src/base_node/proto/rpc.proto b/base_layer/core/src/base_node/proto/rpc.proto index bbc6aea8b3..00532116b0 100644 --- a/base_layer/core/src/base_node/proto/rpc.proto +++ b/base_layer/core/src/base_node/proto/rpc.proto @@ -57,16 +57,20 @@ message SyncKernelsRequest { } message SyncUtxosRequest { + // Start header hash to sync UTXOs from bytes start_header_hash = 1; + // End header hash to sync UTXOs to bytes end_header_hash = 2; } -message SyncUtxosResponse { - tari.types.TransactionOutput output = 1; - bytes mined_header = 2; -} -message PrunedOutput { - bytes hash = 1; +message SyncUtxosResponse { + oneof txo { + // The unspent transaction output + tari.types.TransactionOutput output = 1; + // If the TXO is spent, the commitment bytes are returned + bytes commitment = 2; + } + bytes mined_header = 3; } message SyncUtxosByBlockRequest { diff --git a/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync.rs b/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync.rs index fd0b80007a..0620e84bc3 100644 --- a/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync.rs +++ b/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync.rs @@ -57,26 +57,33 @@ impl HorizonStateSync { Err(err) => return err.into(), }; + let sync_peers = &mut self.sync_peers; + // Order sync peers according to accumulated difficulty + sync_peers.sort_by(|a, b| { + b.claimed_chain_metadata() + .accumulated_difficulty() + .cmp(&a.claimed_chain_metadata().accumulated_difficulty()) + }); + + // Target horizon sync height based on the last header we have synced let last_header = match shared.db.fetch_last_header().await { Ok(h) => h, Err(err) => return err.into(), }; + let target_horizon_sync_height = local_metadata.pruned_height_at_given_chain_tip(last_header.height); - let horizon_sync_height = local_metadata.horizon_block_height(last_header.height); - if local_metadata.pruned_height() >= horizon_sync_height { - info!(target: LOG_TARGET, "Horizon state was already synchronized."); + // Determine if we need to sync horizon state + if local_metadata.pruned_height() >= target_horizon_sync_height { + info!(target: LOG_TARGET, "Horizon state is already synchronized."); return StateEvent::HorizonStateSynchronized; } - - // We're already synced because we have full blocks higher than our target pruned height - if local_metadata.best_block_height() >= horizon_sync_height { + if local_metadata.best_block_height() >= target_horizon_sync_height { info!( target: LOG_TARGET, - "Tip height is higher than our pruned height. Horizon state is already synchronized." + "Our tip height is higher than our target pruned height. Horizon state is already synchronized." ); return StateEvent::HorizonStateSynchronized; } - let sync_peers = &mut self.sync_peers; let db = shared.db.clone(); let config = shared.config.blockchain_sync_config.clone(); @@ -90,7 +97,7 @@ impl HorizonStateSync { connectivity, rules, sync_peers, - horizon_sync_height, + target_horizon_sync_height, prover, validator, ); diff --git a/base_layer/core/src/base_node/state_machine_service/states/sync_decide.rs b/base_layer/core/src/base_node/state_machine_service/states/sync_decide.rs index 6d01bf2d4d..e9bfbbde52 100644 --- a/base_layer/core/src/base_node/state_machine_service/states/sync_decide.rs +++ b/base_layer/core/src/base_node/state_machine_service/states/sync_decide.rs @@ -62,63 +62,71 @@ impl DecideNextSync { ); if local_metadata.pruning_horizon() > 0 { - let last_header = match shared.db.fetch_last_header().await { - Ok(h) => h, - Err(err) => return err.into(), - }; - - let horizon_sync_height = local_metadata.horizon_block_height(last_header.height); // Filter sync peers that claim to be able to provide blocks up until our pruned height - let sync_peers = self - .sync_peers + debug!(target: LOG_TARGET, "Local metadata: {}", local_metadata); + let mut sync_peers = self.sync_peers.clone(); + let sync_peers = sync_peers .drain(..) .filter(|sync_peer| { let remote_metadata = sync_peer.claimed_chain_metadata(); - remote_metadata.best_block_height() >= horizon_sync_height + debug!(target: LOG_TARGET, "Peer metadata: {}", remote_metadata); + let remote_is_archival_node = remote_metadata.pruned_height() == 0; + let general_sync_conditions = + // Must be able to provide the correct amount of full blocks past the pruned height (i.e. the + // pruning horizon), otherwise our horizon spec will not be met + remote_metadata.best_block_height().saturating_sub(remote_metadata.pruned_height()) >= + local_metadata.pruning_horizon() && + // Must have a better blockchain tip than us + remote_metadata.best_block_height() > local_metadata.best_block_height() && + // Must be able to provide full blocks from the height we need detailed information + remote_metadata.pruned_height() <= local_metadata.best_block_height(); + let sync_from_prune_node = !remote_is_archival_node && + // Must have done initial sync (to detect genesis TXO spends) + local_metadata.best_block_height() > 0; + general_sync_conditions && (remote_is_archival_node || sync_from_prune_node) }) .collect::>(); if sync_peers.is_empty() { warn!( target: LOG_TARGET, - "Unable to find any appropriate sync peers for horizon sync" + "Unable to find any appropriate sync peers for horizon sync, trying for block sync" ); - return Continue; - } - - debug!( - target: LOG_TARGET, - "Proceeding to horizon sync with {} sync peer(s) with a best latency of {:.2?}", - sync_peers.len(), - sync_peers.first().map(|p| p.latency()).unwrap_or_default() - ); - ProceedToHorizonSync(sync_peers) - } else { - // Filter sync peers that are able to provide full blocks from our current tip - let sync_peers = self - .sync_peers - .drain(..) - .filter(|sync_peer| { - sync_peer.claimed_chain_metadata().pruned_height() <= local_metadata.best_block_height() - }) - .collect::>(); - - if sync_peers.is_empty() { - warn!( + } else { + debug!( target: LOG_TARGET, - "Unable to find any appropriate sync peers for block sync" + "Proceeding to horizon sync with {} sync peer(s) with a best latency of {:.2?}", + sync_peers.len(), + sync_peers.first().map(|p| p.latency()).unwrap_or_default() ); - return Continue; + return ProceedToHorizonSync(sync_peers); } + } + + // This is not a pruned node or horizon sync is not possible, try for block sync + + // Filter sync peers that are able to provide full blocks from our current tip + let sync_peers = self + .sync_peers + .drain(..) + .filter(|sync_peer| { + let remote_metadata = sync_peer.claimed_chain_metadata(); + remote_metadata.pruned_height() <= local_metadata.best_block_height() + }) + .collect::>(); - debug!( - target: LOG_TARGET, - "Proceeding to block sync with {} sync peer(s) with a best latency of {:.2?}", - sync_peers.len(), - sync_peers.first().map(|p| p.latency()).unwrap_or_default() - ); - ProceedToBlockSync(sync_peers) + if sync_peers.is_empty() { + warn!(target: LOG_TARGET, "Unable to find any appropriate sync peers for block sync"); + return Continue; } + + debug!( + target: LOG_TARGET, + "Proceeding to block sync with {} sync peer(s) with a best latency of {:.2?}", + sync_peers.len(), + sync_peers.first().map(|p| p.latency()).unwrap_or_default() + ); + ProceedToBlockSync(sync_peers) } } diff --git a/base_layer/core/src/base_node/sync/horizon_state_sync/error.rs b/base_layer/core/src/base_node/sync/horizon_state_sync/error.rs index 4f1a40ff89..6aff7e4510 100644 --- a/base_layer/core/src/base_node/sync/horizon_state_sync/error.rs +++ b/base_layer/core/src/base_node/sync/horizon_state_sync/error.rs @@ -30,6 +30,7 @@ use tari_comms::{ }; use tari_crypto::errors::RangeProofError; use tari_mmr::{error::MerkleMountainRangeError, sparse_merkle_tree::SMTError}; +use tari_utilities::ByteArrayError; use thiserror::Error; use tokio::task; @@ -97,6 +98,14 @@ pub enum HorizonSyncError { PeerNotFound, #[error("Sparse Merkle Tree error: {0}")] SMTError(#[from] SMTError), + #[error("ByteArrayError error: {0}")] + ByteArrayError(String), +} + +impl From for HorizonSyncError { + fn from(e: ByteArrayError) -> Self { + HorizonSyncError::ByteArrayError(e.to_string()) + } } impl From for HorizonSyncError { @@ -142,7 +151,8 @@ impl HorizonSyncError { err @ HorizonSyncError::ConversionError(_) | err @ HorizonSyncError::MerkleMountainRangeError(_) | err @ HorizonSyncError::FixedHashSizeError(_) | - err @ HorizonSyncError::TransactionError(_) => Some(BanReason { + err @ HorizonSyncError::TransactionError(_) | + err @ HorizonSyncError::ByteArrayError(_) => Some(BanReason { reason: format!("{}", err), ban_duration: BanPeriod::Long, }), diff --git a/base_layer/core/src/base_node/sync/horizon_state_sync/synchronizer.rs b/base_layer/core/src/base_node/sync/horizon_state_sync/synchronizer.rs index ae5bb28bec..63181336d6 100644 --- a/base_layer/core/src/base_node/sync/horizon_state_sync/synchronizer.rs +++ b/base_layer/core/src/base_node/sync/horizon_state_sync/synchronizer.rs @@ -43,6 +43,7 @@ use crate::{ hooks::Hooks, horizon_state_sync::{HorizonSyncInfo, HorizonSyncStatus}, rpc, + rpc::BaseNodeSyncRpcClient, BlockchainSyncConfig, SyncPeer, }, @@ -50,13 +51,15 @@ use crate::{ chain_storage::{async_db::AsyncBlockchainDb, BlockchainBackend, ChainStorageError, MmrTree}, common::{rolling_avg::RollingAverageTime, BanPeriod}, consensus::ConsensusManager, - proto::base_node::{SyncKernelsRequest, SyncUtxosRequest, SyncUtxosResponse}, + proto::base_node::{sync_utxos_response::Txo, SyncKernelsRequest, SyncUtxosRequest, SyncUtxosResponse}, transactions::transaction_components::{ transaction_output::batch_verify_range_proofs, + OutputType, TransactionKernel, TransactionOutput, }, validation::{helpers, FinalHorizonStateValidation}, + OutputSmt, PrunedKernelMmr, }; @@ -129,7 +132,7 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { target: LOG_TARGET, "Preparing database for horizon sync to height #{}", self.horizon_sync_height ); - let header = self.db().fetch_header(self.horizon_sync_height).await?.ok_or_else(|| { + let to_header = self.db().fetch_header(self.horizon_sync_height).await?.ok_or_else(|| { ChainStorageError::ValueNotFound { entity: "Header", field: "height", @@ -139,7 +142,7 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { let mut latency_increases_counter = 0; loop { - match self.sync(&header).await { + match self.sync(&to_header).await { Ok(()) => return Ok(()), Err(err @ HorizonSyncError::AllSyncPeersExceedLatency) => { // If we don't have many sync peers to select from, return the listening state and see if we can get @@ -167,7 +170,7 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { } } - async fn sync(&mut self, header: &BlockHeader) -> Result<(), HorizonSyncError> { + async fn sync(&mut self, to_header: &BlockHeader) -> Result<(), HorizonSyncError> { let sync_peer_node_ids = self.sync_peers.iter().map(|p| p.node_id()).cloned().collect::>(); info!( target: LOG_TARGET, @@ -176,7 +179,7 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { ); let mut latency_counter = 0usize; for node_id in sync_peer_node_ids { - match self.connect_and_attempt_sync(&node_id, header).await { + match self.connect_and_attempt_sync(&node_id, to_header).await { Ok(_) => return Ok(()), // Try another peer Err(err) => { @@ -213,8 +216,27 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { async fn connect_and_attempt_sync( &mut self, node_id: &NodeId, - header: &BlockHeader, + to_header: &BlockHeader, ) -> Result<(), HorizonSyncError> { + // Connect + let (mut client, sync_peer) = self.connect_sync_peer(node_id).await?; + + // Perform horizon sync + debug!(target: LOG_TARGET, "Check if pruning is needed"); + self.prune_if_needed().await?; + self.sync_kernels_and_outputs(sync_peer.clone(), &mut client, to_header) + .await?; + + // Validate and finalize horizon sync + self.finalize_horizon_sync(&sync_peer).await?; + + Ok(()) + } + + async fn connect_sync_peer( + &mut self, + node_id: &NodeId, + ) -> Result<(BaseNodeSyncRpcClient, SyncPeer), HorizonSyncError> { let peer_index = self .get_sync_peer_index(node_id) .ok_or(HorizonSyncError::PeerNotFound)?; @@ -246,14 +268,9 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { max_latency: self.max_latency, }); } - debug!(target: LOG_TARGET, "Sync peer latency is {:.2?}", latency); - let sync_peer = self.sync_peers[peer_index].clone(); - - self.begin_sync(sync_peer.clone(), &mut client, header).await?; - self.finalize_horizon_sync(&sync_peer).await?; - Ok(()) + Ok((client, self.sync_peers[peer_index].clone())) } async fn dial_sync_peer(&self, node_id: &NodeId) -> Result { @@ -269,30 +286,100 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { Ok(conn) } - async fn begin_sync( + async fn sync_kernels_and_outputs( &mut self, sync_peer: SyncPeer, client: &mut rpc::BaseNodeSyncRpcClient, to_header: &BlockHeader, ) -> Result<(), HorizonSyncError> { - debug!(target: LOG_TARGET, "Initializing"); - self.initialize().await?; - + // Note: We do not need to rewind kernels if the sync fails due to it being validated when inserted into + // the database. Furthermore, these kernels will also be successfully removed when we need to rewind + // the blockchain for whatever reason. debug!(target: LOG_TARGET, "Synchronizing kernels"); self.synchronize_kernels(sync_peer.clone(), client, to_header).await?; debug!(target: LOG_TARGET, "Synchronizing outputs"); - self.synchronize_outputs(sync_peer, client, to_header).await?; - Ok(()) + match self.synchronize_outputs(sync_peer, client, to_header).await { + Ok(_) => Ok(()), + Err(err) => { + // We need to clean up the outputs + let _ = self.clean_up_failed_output_sync(to_header).await; + Err(err) + }, + } } - async fn initialize(&mut self) -> Result<(), HorizonSyncError> { - let db = self.db(); - let local_metadata = db.get_chain_metadata().await?; + /// We clean up a failed output sync attempt and ignore any errors that occur during the clean up process. + async fn clean_up_failed_output_sync(&mut self, to_header: &BlockHeader) { + let tip_header = if let Ok(header) = self.db.fetch_tip_header().await { + header + } else { + return; + }; + let db = self.db().clone(); + let mut txn = db.write_transaction(); + let mut current_header = to_header.clone(); + loop { + if let Ok(outputs) = self.db.fetch_outputs_in_block(current_header.hash()).await { + for (count, output) in (1..=outputs.len()).zip(outputs.iter()) { + // Note: We do not need to clean up the SMT as it was not saved in the database yet, however, we + // need to clean up the outputs + txn.prune_output_from_all_dbs( + output.hash(), + output.commitment.clone(), + output.features.output_type, + ); + if let Err(e) = txn.commit().await { + warn!( + target: LOG_TARGET, + "Clean up failed sync - prune output from all dbs for header '{}': {}", + current_header.hash(), e + ); + } + if count % 100 == 0 || count == outputs.len() { + if let Err(e) = txn.commit().await { + warn!( + target: LOG_TARGET, + "Clean up failed sync - commit prune outputs for header '{}': {}", + current_header.hash(), e + ); + } + } + } + } + if let Err(e) = txn.commit().await { + warn!( + target: LOG_TARGET, "Clean up failed output sync - commit delete kernels for header '{}': {}", + current_header.hash(), e + ); + } + if let Ok(header) = db.fetch_header_by_block_hash(current_header.prev_hash).await { + if let Some(previous_header) = header { + current_header = previous_header; + } else { + warn!(target: LOG_TARGET, "Could not clean up failed output sync, previous_header link missing frm db"); + break; + } + } else { + warn!( + target: LOG_TARGET, + "Could not clean up failed output sync, header '{}' not in db", + current_header.prev_hash.to_hex() + ); + break; + } + if ¤t_header.hash() == tip_header.hash() { + debug!(target: LOG_TARGET, "Finished cleaning up failed output sync"); + break; + } + } + } + async fn prune_if_needed(&mut self) -> Result<(), HorizonSyncError> { + let local_metadata = self.db.get_chain_metadata().await?; let new_prune_height = cmp::min(local_metadata.best_block_height(), self.horizon_sync_height); if local_metadata.pruned_height() < new_prune_height { debug!(target: LOG_TARGET, "Pruning block chain to height {}", new_prune_height); - db.prune_to_height(new_prune_height).await?; + self.db.prune_to_height(new_prune_height).await?; } Ok(()) @@ -328,7 +415,7 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { "Requesting kernels from {} to {} ({} remaining)", local_num_kernels, remote_num_kernels, - remote_num_kernels - local_num_kernels, + remote_num_kernels.saturating_sub(local_num_kernels), ); let latency = client.get_last_request_latency(); @@ -374,7 +461,7 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { } txn.insert_kernel_via_horizon_sync(kernel, *current_header.hash(), mmr_position); - if mmr_position == current_header.header().kernel_mmr_size - 1 { + if mmr_position == current_header.header().kernel_mmr_size.saturating_sub(1) { let num_kernels = kernel_hashes.len(); debug!( target: LOG_TARGET, @@ -425,9 +512,9 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { num_kernels, mmr_position + 1, end, - end - (mmr_position + 1) + end.saturating_sub(mmr_position + 1) ); - if mmr_position < end - 1 { + if mmr_position < end.saturating_sub(1) { current_header = db.fetch_chain_header(current_header.height() + 1).await?; } } @@ -471,6 +558,7 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { Ok(()) } + // Synchronize outputs, returning true if any keys were deleted from the output SMT. #[allow(clippy::too_many_lines)] async fn synchronize_outputs( &mut self, @@ -479,9 +567,26 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { to_header: &BlockHeader, ) -> Result<(), HorizonSyncError> { info!(target: LOG_TARGET, "Starting output sync from peer {}", sync_peer); + let db = self.db().clone(); + let tip_header = db.fetch_tip_header().await?; - let remote_num_outputs = to_header.output_smt_size; - self.num_outputs = remote_num_outputs; + // Estimate the number of outputs to be downloaded; this cannot be known exactly until the sync is complete. + let mut current_header = to_header.clone(); + self.num_outputs = 0; + loop { + current_header = + if let Some(previous_header) = db.fetch_header_by_block_hash(current_header.prev_hash).await? { + self.num_outputs += current_header + .output_smt_size + .saturating_sub(previous_header.output_smt_size); + previous_header + } else { + break; + }; + if ¤t_header.hash() == tip_header.hash() { + break; + } + } let info = HorizonSyncInfo::new(vec![sync_peer.node_id().clone()], HorizonSyncStatus::Outputs { current: 0, @@ -490,86 +595,126 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { }); self.hooks.call_on_progress_horizon_hooks(info); - debug!( - target: LOG_TARGET, - "Requesting outputs from {}", - remote_num_outputs, - ); - let db = self.db().clone(); - - let end = remote_num_outputs; - let end_hash = to_header.hash(); - let start_hash = db.fetch_chain_header(1).await?; - let gen_block = db.fetch_chain_header(0).await?; - let latency = client.get_last_request_latency(); debug!( target: LOG_TARGET, - "Initiating output sync with peer `{}` (latency = {}ms)", + "Initiating output sync with peer `{}`, requesting ~{} outputs, tip_header height `{}`, \ + last_chain_header height `{}` (latency = {}ms)", sync_peer.node_id(), - latency.unwrap_or_default().as_millis() + self.num_outputs, + tip_header.height(), + db.fetch_last_chain_header().await?.height(), + latency.unwrap_or_default().as_millis(), ); + let start_chain_header = db.fetch_chain_header(tip_header.height() + 1).await?; let req = SyncUtxosRequest { - start_header_hash: start_hash.hash().to_vec(), - end_header_hash: end_hash.to_vec(), + start_header_hash: start_chain_header.hash().to_vec(), + end_header_hash: to_header.hash().to_vec(), }; - let mut output_stream = client.sync_utxos(req).await?; let mut txn = db.write_transaction(); - let mut utxo_counter = gen_block.header().output_smt_size; + let mut utxo_counter = 0u64; + let mut stxo_counter = 0u64; let timer = Instant::now(); let mut output_smt = db.fetch_tip_smt().await?; let mut last_sync_timer = Instant::now(); let mut avg_latency = RollingAverageTime::new(20); + let mut prev_header: Option = None; + let mut inputs_to_delete = Vec::new(); while let Some(response) = output_stream.next().await { let latency = last_sync_timer.elapsed(); avg_latency.add_sample(latency); let res: SyncUtxosResponse = response?; - utxo_counter += 1; - if utxo_counter > end { - return Err(HorizonSyncError::IncorrectResponse( - "Peer sent too many outputs".to_string(), - )); - } - let output = res - .output - .ok_or_else(|| HorizonSyncError::IncorrectResponse("Peer sent no transaction output data".into()))?; - let output_header = FixedHash::try_from(res.mined_header) + let output_header_hash = FixedHash::try_from(res.mined_header) .map_err(|_| HorizonSyncError::IncorrectResponse("Peer sent no mined header".into()))?; + // We only verify the SMT per header for consecutive syncs + if tip_header.height() > 0 { + if let Some(header) = prev_header.clone() { + if header.hash() != output_header_hash { + // Verify the SMT for the previous header + HorizonStateSynchronization::::check_output_smt_root_hash(&mut output_smt, &header)?; + } + } + } let current_header = self .db() - .fetch_header_by_block_hash(output_header) + .fetch_header_by_block_hash(output_header_hash) .await? .ok_or_else(|| { HorizonSyncError::IncorrectResponse("Peer sent mined header we do not know of".into()) })?; + prev_header = Some(current_header.clone()); + + let proto_output = res + .txo + .ok_or_else(|| HorizonSyncError::IncorrectResponse("Peer sent no transaction output data".into()))?; + match proto_output { + Txo::Output(output) => { + utxo_counter += 1; + // Increase the estimate number of outputs to be downloaded (for display purposes only). + if utxo_counter >= self.num_outputs { + self.num_outputs = utxo_counter + u64::from(current_header.hash() != to_header.hash()); + } - let constants = self.rules.consensus_constants(current_header.height).clone(); - let output = TransactionOutput::try_from(output).map_err(HorizonSyncError::ConversionError)?; - trace!( + let constants = self.rules.consensus_constants(current_header.height).clone(); + let output = TransactionOutput::try_from(output).map_err(HorizonSyncError::ConversionError)?; + debug!( target: LOG_TARGET, - "UTXO {} received from sync peer", + "UTXO `{}` received from sync peer ({} of {})", output.hash(), - ); - helpers::check_tari_script_byte_size(&output.script, constants.max_script_byte_size())?; - - batch_verify_range_proofs(&self.prover, &[&output])?; - let smt_key = NodeKey::try_from(output.commitment.as_bytes())?; - let smt_node = ValueHash::try_from(output.smt_hash(current_header.height).as_slice())?; - output_smt.insert(smt_key, smt_node)?; - txn.insert_output_via_horizon_sync( - output, - current_header.hash(), - current_header.height, - current_header.timestamp.as_u64(), - ); + utxo_counter, + self.num_outputs, + ); + helpers::check_tari_script_byte_size(&output.script, constants.max_script_byte_size())?; + + batch_verify_range_proofs(&self.prover, &[&output])?; + let smt_key = NodeKey::try_from(output.commitment.as_bytes())?; + let smt_node = ValueHash::try_from(output.smt_hash(current_header.height).as_slice())?; + output_smt.insert(smt_key, smt_node)?; + txn.insert_output_via_horizon_sync( + output, + current_header.hash(), + current_header.height, + current_header.timestamp.as_u64(), + ); - // we have checked the range proof, and we have checked that the linked to header exists. - txn.commit().await?; + // We have checked the range proof, and we have checked that the linked to header exists. + txn.commit().await?; + }, + Txo::Commitment(commitment_bytes) => { + stxo_counter += 1; + + let commitment = Commitment::from_canonical_bytes(commitment_bytes.as_slice())?; + match self + .db() + .fetch_unspent_output_hash_by_commitment(commitment.clone()) + .await? + { + Some(output_hash) => { + debug!( + target: LOG_TARGET, + "STXO hash `{}` received from sync peer ({})", + output_hash, + stxo_counter, + ); + let smt_key = NodeKey::try_from(commitment_bytes.as_slice())?; + output_smt.delete(&smt_key)?; + // This will only be committed once the SMT has been verified due to rewind difficulties if + // we need to abort the sync + inputs_to_delete.push((output_hash, commitment)); + }, + None => { + return Err(HorizonSyncError::IncorrectResponse( + "Peer sent unknown commitment hash".into(), + )) + }, + } + }, + } if utxo_counter % 100 == 0 { let info = HorizonSyncInfo::new(vec![sync_peer.node_id().clone()], HorizonSyncStatus::Outputs { @@ -583,33 +728,45 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { sync_peer.add_sample(last_sync_timer.elapsed()); last_sync_timer = Instant::now(); } - if utxo_counter != end { - return Err(HorizonSyncError::IncorrectResponse( - "Peer did not send enough outputs".to_string(), - )); + HorizonStateSynchronization::::check_output_smt_root_hash(&mut output_smt, to_header)?; + + // Commit in chunks to avoid locking the database for too long + let inputs_to_delete_len = inputs_to_delete.len(); + for (count, (output_hash, commitment)) in (1..=inputs_to_delete_len).zip(inputs_to_delete.into_iter()) { + txn.prune_output_from_all_dbs(output_hash, commitment, OutputType::default()); + if count % 100 == 0 || count == inputs_to_delete_len { + txn.commit().await?; + } } + // This has a very low probability of failure + db.set_tip_smt(output_smt).await?; debug!( target: LOG_TARGET, - "finished syncing UTXOs: {} downloaded in {:.2?}", - end, + "Finished syncing TXOs: {} unspent and {} spent downloaded in {:.2?}", + utxo_counter, + stxo_counter, timer.elapsed() ); + Ok(()) + } + + // Helper function to check the output SMT root hash against the expected root hash. + fn check_output_smt_root_hash(output_smt: &mut OutputSmt, header: &BlockHeader) -> Result<(), HorizonSyncError> { let root = FixedHash::try_from(output_smt.hash().as_slice())?; - if root != to_header.output_mr { + if root != header.output_mr { warn!( target: LOG_TARGET, - "Final target root(#{}) did not match expected (#{})", - to_header.output_mr.to_hex(), + "Target root(#{}) did not match expected (#{})", + header.output_mr.to_hex(), root.to_hex(), ); return Err(HorizonSyncError::InvalidMrRoot { mr_tree: "UTXO SMT".to_string(), - at_height: to_header.height, - expected_hex: to_header.output_mr.to_hex(), + at_height: header.height, + expected_hex: header.output_mr.to_hex(), actual_hex: root.to_hex(), }); } - db.set_tip_smt(output_smt).await?; Ok(()) } @@ -693,7 +850,7 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { curr_header.height(), curr_header.header().kernel_mmr_size, prev_kernel_mmr, - curr_header.header().kernel_mmr_size - 1 + curr_header.header().kernel_mmr_size.saturating_sub(1) ); trace!(target: LOG_TARGET, "Number of utxos returned: {}", utxos.len()); diff --git a/base_layer/core/src/base_node/sync/rpc/sync_utxos_task.rs b/base_layer/core/src/base_node/sync/rpc/sync_utxos_task.rs index 8b03e476a4..f6c992f0a9 100644 --- a/base_layer/core/src/base_node/sync/rpc/sync_utxos_task.rs +++ b/base_layer/core/src/base_node/sync/rpc/sync_utxos_task.rs @@ -20,7 +20,11 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use std::{convert::TryInto, sync::Arc, time::Instant}; +use std::{ + convert::{TryFrom, TryInto}, + sync::Arc, + time::Instant, +}; use log::*; use tari_comms::{ @@ -28,7 +32,7 @@ use tari_comms::{ protocol::rpc::{Request, RpcStatus, RpcStatusResultExt}, utils, }; -use tari_utilities::hex::Hex; +use tari_utilities::{hex::Hex, ByteArray}; use tokio::{sync::mpsc, task}; #[cfg(feature = "metrics")] @@ -36,7 +40,8 @@ use crate::base_node::metrics; use crate::{ blocks::BlockHeader, chain_storage::{async_db::AsyncBlockchainDb, BlockchainBackend}, - proto::base_node::{SyncUtxosRequest, SyncUtxosResponse}, + proto, + proto::base_node::{sync_utxos_response::Txo, SyncUtxosRequest, SyncUtxosResponse}, }; const LOG_TARGET: &str = "c::base_node::sync_rpc::sync_utxo_task"; @@ -70,7 +75,7 @@ where B: BlockchainBackend + 'static .fetch_header_by_block_hash(start_hash) .await .rpc_status_internal_error(LOG_TARGET)? - .ok_or_else(|| RpcStatus::not_found("Start header hash is was not found"))?; + .ok_or_else(|| RpcStatus::not_found("Start header hash was not found"))?; let end_hash = msg .end_header_hash @@ -83,7 +88,7 @@ where B: BlockchainBackend + 'static .fetch_header_by_block_hash(end_hash) .await .rpc_status_internal_error(LOG_TARGET)? - .ok_or_else(|| RpcStatus::not_found("End header hash is was not found"))?; + .ok_or_else(|| RpcStatus::not_found("End header hash was not found"))?; if start_header.height > end_header.height { return Err(RpcStatus::bad_request(&format!( "Start header height({}) cannot be greater than the end header height({})", @@ -123,78 +128,138 @@ where B: BlockchainBackend + 'static ) -> Result<(), RpcStatus> { debug!( target: LOG_TARGET, - "Starting stream task with current_header: {}, end_header: {},", + "Starting stream task with current_header: {}, end_header: {}", current_header.hash().to_hex(), end_header.hash().to_hex(), ); + let start_header = current_header.clone(); loop { let timer = Instant::now(); let current_header_hash = current_header.hash(); - debug!( target: LOG_TARGET, - "current header = {} ({})", + "Streaming TXO(s) for block #{} ({})", current_header.height, current_header_hash.to_hex() ); - if tx.is_closed() { - debug!( - target: LOG_TARGET, - "Peer '{}' exited UTXO sync session early", self.peer_node_id - ); + debug!(target: LOG_TARGET, "Peer '{}' exited TXO sync session early", self.peer_node_id); break; } let outputs_with_statuses = self .db - .fetch_outputs_in_block_with_spend_state(current_header.hash(), Some(end_header.hash())) + .fetch_outputs_in_block_with_spend_state(current_header_hash, Some(end_header.hash())) .await .rpc_status_internal_error(LOG_TARGET)?; + if tx.is_closed() { + debug!(target: LOG_TARGET, "Peer '{}' exited TXO sync session early", self.peer_node_id); + break; + } + + let mut outputs = Vec::with_capacity(outputs_with_statuses.len()); + for (output, spent) in outputs_with_statuses { + if !spent { + match proto::types::TransactionOutput::try_from(output.clone()) { + Ok(tx_ouput) => { + trace!( + target: LOG_TARGET, + "Unspent TXO (commitment '{}') to peer", + output.commitment.to_hex() + ); + outputs.push(Ok(SyncUtxosResponse { + txo: Some(Txo::Output(tx_ouput)), + mined_header: current_header_hash.to_vec(), + })); + }, + Err(e) => { + return Err(RpcStatus::general(&format!( + "Output '{}' RPC conversion error ({})", + output.hash().to_hex(), + e + ))) + }, + } + } + } debug!( target: LOG_TARGET, - "Streaming UTXO(s) for block #{}.", + "Adding {} outputs in response for block #{} '{}'", outputs.len(), current_header.height, + current_header_hash ); + + let inputs_in_block = self + .db + .fetch_inputs_in_block(current_header_hash) + .await + .rpc_status_internal_error(LOG_TARGET)?; if tx.is_closed() { - debug!( - target: LOG_TARGET, - "Peer '{}' exited UTXO sync session early", self.peer_node_id - ); + debug!(target: LOG_TARGET, "Peer '{}' exited TXO sync session early", self.peer_node_id); break; } - let utxos = outputs_with_statuses - .into_iter() - .filter_map(|(output, spent)| { - // We only send unspent utxos - if spent { - None - } else { - match output.try_into() { - Ok(tx_ouput) => Some(Ok(SyncUtxosResponse { - output: Some(tx_ouput), - mined_header: current_header.hash().to_vec(), - })), - Err(err) => Some(Err(err)), - } - } - }) - .collect::, String>>() - .map_err(|err| RpcStatus::bad_request(&err))? - .into_iter() - .map(Ok); + let mut inputs = Vec::with_capacity(inputs_in_block.len()); + for input in inputs_in_block { + let output_from_current_tranche = if let Some(mined_info) = self + .db + .fetch_output(input.output_hash()) + .await + .rpc_status_internal_error(LOG_TARGET)? + { + mined_info.mined_height >= start_header.height + } else { + false + }; + + if output_from_current_tranche { + trace!(target: LOG_TARGET, "Spent TXO (hash '{}') not sent to peer", input.output_hash().to_hex()); + } else { + let input_commitment = match self.db.fetch_output(input.output_hash()).await { + Ok(Some(o)) => o.output.commitment, + Ok(None) => { + return Err(RpcStatus::general(&format!( + "Mined info for input '{}' not found", + input.output_hash().to_hex() + ))) + }, + Err(e) => { + return Err(RpcStatus::general(&format!( + "Input '{}' not found ({})", + input.output_hash().to_hex(), + e + ))) + }, + }; + trace!(target: LOG_TARGET, "Spent TXO (commitment '{}') to peer", input_commitment.to_hex()); + inputs.push(Ok(SyncUtxosResponse { + txo: Some(Txo::Commitment(input_commitment.as_bytes().to_vec())), + mined_header: current_header_hash.to_vec(), + })); + } + } + debug!( + target: LOG_TARGET, + "Adding {} inputs in response for block #{} '{}'", inputs.len(), + current_header.height, + current_header_hash + ); + + let mut txos = Vec::with_capacity(outputs.len() + inputs.len()); + txos.append(&mut outputs); + txos.append(&mut inputs); + let txos = txos.into_iter(); // Ensure task stops if the peer prematurely stops their RPC session - let utxos_len = utxos.len(); - if utils::mpsc::send_all(tx, utxos).await.is_err() { + let txos_len = txos.len(); + if utils::mpsc::send_all(tx, txos).await.is_err() { break; } debug!( target: LOG_TARGET, - "Streamed {} utxos in {:.2?} (including stream backpressure)", - utxos_len, + "Streamed {} TXOs in {:.2?} (including stream backpressure)", + txos_len, timer.elapsed() ); @@ -217,7 +282,7 @@ where B: BlockchainBackend + 'static debug!( target: LOG_TARGET, - "UTXO sync completed to Header hash = {}", + "TXO sync completed to Header hash = {}", current_header.hash().to_hex() ); diff --git a/base_layer/core/src/base_node/sync/sync_peer.rs b/base_layer/core/src/base_node/sync/sync_peer.rs index 70d9b83df5..52877c627c 100644 --- a/base_layer/core/src/base_node/sync/sync_peer.rs +++ b/base_layer/core/src/base_node/sync/sync_peer.rs @@ -135,6 +135,8 @@ mod test { use super::*; mod sort_by_latency { + use primitive_types::U256; + use tari_common_types::types::FixedHash; use tari_comms::types::{CommsPublicKey, CommsSecretKey}; use tari_crypto::keys::{PublicKey, SecretKey}; @@ -147,7 +149,12 @@ mod test { let pk = CommsPublicKey::from_secret_key(&sk); let node_id = NodeId::from_key(&pk); let latency_option = latency.map(|latency| Duration::from_millis(latency as u64)); - PeerChainMetadata::new(node_id, ChainMetadata::empty(), latency_option).into() + PeerChainMetadata::new( + node_id, + ChainMetadata::new(0, FixedHash::zero(), 0, 0, U256::zero(), 0), + latency_option, + ) + .into() } #[test] diff --git a/base_layer/core/src/chain_storage/async_db.rs b/base_layer/core/src/chain_storage/async_db.rs index e108dae80a..5d15a9b668 100644 --- a/base_layer/core/src/chain_storage/async_db.rs +++ b/base_layer/core/src/chain_storage/async_db.rs @@ -26,7 +26,7 @@ use primitive_types::U256; use rand::{rngs::OsRng, RngCore}; use tari_common_types::{ chain_metadata::ChainMetadata, - types::{BlockHash, Commitment, FixedHash, HashOutput, PublicKey, Signature}, + types::{BlockHash, Commitment, HashOutput, PublicKey, Signature}, }; use tari_utilities::epoch_time::EpochTime; @@ -59,9 +59,10 @@ use crate::{ }, common::rolling_vec::RollingVec, proof_of_work::{PowAlgorithm, TargetDifficultyWindow}, - transactions::transaction_components::{TransactionKernel, TransactionOutput}, + transactions::transaction_components::{OutputType, TransactionInput, TransactionKernel, TransactionOutput}, OutputSmt, }; + const LOG_TARGET: &str = "c::bn::async_db"; fn trace_log(name: &str, f: F) -> R @@ -154,15 +155,23 @@ impl AsyncBlockchainDb { //---------------------------------- TXO --------------------------------------------// + make_async_fn!(fetch_output(output_hash: HashOutput) -> Option, "fetch_output"); + + make_async_fn!(fetch_input(output_hash: HashOutput) -> Option, "fetch_input"); + + make_async_fn!(fetch_unspent_output_hash_by_commitment(commitment: Commitment) -> Option, "fetch_unspent_output_by_commitment"); + make_async_fn!(fetch_outputs_with_spend_status_at_tip(hashes: Vec) -> Vec>, "fetch_outputs_with_spend_status_at_tip"); make_async_fn!(fetch_outputs_mined_info(hashes: Vec) -> Vec>, "fetch_outputs_mined_info"); make_async_fn!(fetch_inputs_mined_info(hashes: Vec) -> Vec>, "fetch_inputs_mined_info"); - make_async_fn!(fetch_outputs_in_block_with_spend_state(hash: HashOutput, spend_header: Option) -> Vec<(TransactionOutput, bool)>, "fetch_outputs_in_block_with_spend_state"); + make_async_fn!(fetch_outputs_in_block_with_spend_state(header_hash: HashOutput, spend_status_at_header: Option) -> Vec<(TransactionOutput, bool)>, "fetch_outputs_in_block_with_spend_state"); - make_async_fn!(fetch_outputs_in_block(hash: HashOutput) -> Vec, "fetch_outputs_in_block"); + make_async_fn!(fetch_outputs_in_block(header_hash: HashOutput) -> Vec, "fetch_outputs_in_block"); + + make_async_fn!(fetch_inputs_in_block(header_hash: HashOutput) -> Vec, "fetch_inputs_in_block"); make_async_fn!(utxo_count() -> usize, "utxo_count"); @@ -350,6 +359,22 @@ impl<'a, B: BlockchainBackend + 'static> AsyncDbTransaction<'a, B> { self } + pub fn prune_output_from_all_dbs( + &mut self, + output_hash: HashOutput, + commitment: Commitment, + output_type: OutputType, + ) -> &mut Self { + self.transaction + .prune_output_from_all_dbs(output_hash, commitment, output_type); + self + } + + pub fn delete_all_kernerls_in_block(&mut self, block_hash: BlockHash) -> &mut Self { + self.transaction.delete_all_kernerls_in_block(block_hash); + self + } + pub fn update_block_accumulated_data_via_horizon_sync( &mut self, header_hash: HashOutput, diff --git a/base_layer/core/src/chain_storage/blockchain_backend.rs b/base_layer/core/src/chain_storage/blockchain_backend.rs index d291a136a6..895982a371 100644 --- a/base_layer/core/src/chain_storage/blockchain_backend.rs +++ b/base_layer/core/src/chain_storage/blockchain_backend.rs @@ -3,7 +3,7 @@ use tari_common_types::{ chain_metadata::ChainMetadata, - types::{Commitment, FixedHash, HashOutput, PublicKey, Signature}, + types::{Commitment, HashOutput, PublicKey, Signature}, }; use super::TemplateRegistrationEntry; @@ -91,7 +91,7 @@ pub trait BlockchainBackend: Send + Sync { fn fetch_outputs_in_block_with_spend_state( &self, header_hash: &HashOutput, - spend_status_at_header: Option, + spend_status_at_header: Option, ) -> Result, ChainStorageError>; /// Fetch a specific output. Returns the output diff --git a/base_layer/core/src/chain_storage/blockchain_database.rs b/base_layer/core/src/chain_storage/blockchain_database.rs index e3e4ab071d..b8aa04e586 100644 --- a/base_layer/core/src/chain_storage/blockchain_database.rs +++ b/base_layer/core/src/chain_storage/blockchain_database.rs @@ -383,12 +383,24 @@ where B: BlockchainBackend db.fetch_chain_metadata() } - pub fn fetch_unspent_output_by_commitment( + /// Returns a copy of the current output mined info + pub fn fetch_output(&self, output_hash: HashOutput) -> Result, ChainStorageError> { + let db = self.db_read_access()?; + db.fetch_output(&output_hash) + } + + /// Returns a copy of the current input mined info + pub fn fetch_input(&self, output_hash: HashOutput) -> Result, ChainStorageError> { + let db = self.db_read_access()?; + db.fetch_input(&output_hash) + } + + pub fn fetch_unspent_output_hash_by_commitment( &self, - commitment: &Commitment, + commitment: Commitment, ) -> Result, ChainStorageError> { let db = self.db_read_access()?; - db.fetch_unspent_output_hash_by_commitment(commitment) + db.fetch_unspent_output_hash_by_commitment(&commitment) } /// Return a list of matching utxos, with each being `None` if not found. If found, the transaction @@ -456,16 +468,21 @@ where B: BlockchainBackend pub fn fetch_outputs_in_block_with_spend_state( &self, - hash: HashOutput, - spend_status_at_header: Option, + header_hash: HashOutput, + spend_status_at_header: Option, ) -> Result, ChainStorageError> { let db = self.db_read_access()?; - db.fetch_outputs_in_block_with_spend_state(&hash, spend_status_at_header) + db.fetch_outputs_in_block_with_spend_state(&header_hash, spend_status_at_header) + } + + pub fn fetch_outputs_in_block(&self, header_hash: HashOutput) -> Result, ChainStorageError> { + let db = self.db_read_access()?; + db.fetch_outputs_in_block(&header_hash) } - pub fn fetch_outputs_in_block(&self, hash: HashOutput) -> Result, ChainStorageError> { + pub fn fetch_inputs_in_block(&self, header_hash: HashOutput) -> Result, ChainStorageError> { let db = self.db_read_access()?; - db.fetch_outputs_in_block(&hash) + db.fetch_inputs_in_block(&header_hash) } /// Returns the number of UTXOs in the current unspent set @@ -2331,7 +2348,7 @@ fn find_strongest_orphan_tip( // block height will also be discarded. fn cleanup_orphans(db: &mut T, orphan_storage_capacity: usize) -> Result<(), ChainStorageError> { let metadata = db.fetch_chain_metadata()?; - let horizon_height = metadata.horizon_block_height(metadata.best_block_height()); + let horizon_height = metadata.pruned_height_at_given_chain_tip(metadata.best_block_height()); db.delete_oldest_orphans(horizon_height, orphan_storage_capacity) } diff --git a/base_layer/core/src/chain_storage/db_transaction.rs b/base_layer/core/src/chain_storage/db_transaction.rs index 6ca0d7bf52..8ad752a8ee 100644 --- a/base_layer/core/src/chain_storage/db_transaction.rs +++ b/base_layer/core/src/chain_storage/db_transaction.rs @@ -33,7 +33,7 @@ use tari_utilities::hex::Hex; use crate::{ blocks::{Block, BlockHeader, BlockHeaderAccumulatedData, ChainBlock, ChainHeader, UpdateBlockAccumulatedData}, chain_storage::{error::ChainStorageError, HorizonData, Reorg}, - transactions::transaction_components::{TransactionKernel, TransactionOutput}, + transactions::transaction_components::{OutputType, TransactionKernel, TransactionOutput}, OutputSmt, }; @@ -132,6 +132,26 @@ impl DbTransaction { self } + pub fn prune_output_from_all_dbs( + &mut self, + output_hash: HashOutput, + commitment: Commitment, + output_type: OutputType, + ) -> &mut Self { + self.operations.push(WriteOperation::PruneOutputFromAllDbs { + output_hash, + commitment, + output_type, + }); + self + } + + pub fn delete_all_kernerls_in_block(&mut self, block_hash: BlockHash) -> &mut Self { + self.operations + .push(WriteOperation::DeleteAllKernelsInBlock { block_hash }); + self + } + pub fn delete_all_inputs_in_block(&mut self, block_hash: BlockHash) -> &mut Self { self.operations .push(WriteOperation::DeleteAllInputsInBlock { block_hash }); @@ -304,6 +324,14 @@ pub enum WriteOperation { PruneOutputsSpentAtHash { block_hash: BlockHash, }, + PruneOutputFromAllDbs { + output_hash: HashOutput, + commitment: Commitment, + output_type: OutputType, + }, + DeleteAllKernelsInBlock { + block_hash: BlockHash, + }, DeleteAllInputsInBlock { block_hash: BlockHash, }, @@ -387,6 +415,18 @@ impl fmt::Display for WriteOperation { write!(f, "Update Block data for block {}", header_hash) }, PruneOutputsSpentAtHash { block_hash } => write!(f, "Prune output(s) at hash: {}", block_hash), + PruneOutputFromAllDbs { + output_hash, + commitment, + output_type, + } => write!( + f, + "Prune output from all dbs, hash : {}, commitment: {},output_type: {}", + output_hash, + commitment.to_hex(), + output_type, + ), + DeleteAllKernelsInBlock { block_hash } => write!(f, "Delete kernels in block {}", block_hash), DeleteAllInputsInBlock { block_hash } => write!(f, "Delete outputs in block {}", block_hash), SetAccumulatedDataForOrphan(accumulated_data) => { write!(f, "Set accumulated data for orphan {}", accumulated_data) diff --git a/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs b/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs index ea4c7ba449..35c8a3a532 100644 --- a/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs +++ b/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs @@ -97,6 +97,7 @@ use crate::{ transactions::{ aggregated_body::AggregateBody, transaction_components::{ + OutputType, SpentOutput, TransactionInput, TransactionKernel, @@ -391,6 +392,16 @@ impl LMDBDatabase { PruneOutputsSpentAtHash { block_hash } => { self.prune_outputs_spent_at_hash(&write_txn, block_hash)?; }, + PruneOutputFromAllDbs { + output_hash, + commitment, + output_type, + } => { + self.prune_output_from_all_dbs(&write_txn, output_hash, commitment, *output_type)?; + }, + DeleteAllKernelsInBlock { block_hash } => { + self.delete_all_kernels_in_block(&write_txn, block_hash)?; + }, DeleteAllInputsInBlock { block_hash } => { self.delete_all_inputs_in_block(&write_txn, block_hash)?; }, @@ -516,11 +527,6 @@ impl LMDBDatabase { ] } - fn prune_output(&self, txn: &WriteTransaction<'_>, key: OutputKey) -> Result<(), ChainStorageError> { - lmdb_delete(txn, &self.utxos_db, &key.convert_to_comp_key(), "utxos_db")?; - Ok(()) - } - fn insert_output( &self, txn: &WriteTransaction<'_>, @@ -1405,21 +1411,93 @@ impl LMDBDatabase { let inputs = lmdb_fetch_matching_after::(write_txn, &self.inputs_db, block_hash.as_slice())?; - for input in inputs { + for input_data in inputs { + let input = input_data.input; + // From 'utxo_commitment_index::utxo_commitment_index' + if let SpentOutput::OutputData { commitment, .. } = input.spent_output.clone() { + debug!(target: LOG_TARGET, "Pruning output from 'utxo_commitment_index': key '{}'", commitment.to_hex()); + lmdb_delete( + write_txn, + &self.utxo_commitment_index, + commitment.as_bytes(), + "utxo_commitment_index", + )?; + } + // From 'utxos_db::utxos_db' + if let Some(key_bytes) = + lmdb_get::<_, Vec>(write_txn, &self.txos_hash_to_index_db, input.output_hash().as_slice())? + { + let mut buffer = [0u8; 32]; + buffer.copy_from_slice(&key_bytes[0..32]); + let key = OutputKey::new(&FixedHash::from(buffer), &input.output_hash())?; + debug!(target: LOG_TARGET, "Pruning output from 'utxos_db': key '{}'", key.0); + lmdb_delete(write_txn, &self.utxos_db, &key.convert_to_comp_key(), "utxos_db")?; + }; + // From 'txos_hash_to_index_db::utxos_db' + debug!( + target: LOG_TARGET, + "Pruning output from 'txos_hash_to_index_db': key '{}'", + input.output_hash().to_hex() + ); lmdb_delete( write_txn, &self.txos_hash_to_index_db, - input.hash.as_slice(), + input.output_hash().as_slice(), "utxos_db", )?; - let key = OutputKey::new(block_hash, &input.hash)?; - debug!(target: LOG_TARGET, "Pruning output: {:?}", key); - self.prune_output(write_txn, key)?; } Ok(()) } + fn prune_output_from_all_dbs( + &self, + write_txn: &WriteTransaction<'_>, + output_hash: &HashOutput, + commitment: &Commitment, + output_type: OutputType, + ) -> Result<(), ChainStorageError> { + match lmdb_get::<_, Vec>(write_txn, &self.txos_hash_to_index_db, output_hash.as_slice())? { + Some(key_bytes) => { + if !matches!(output_type, OutputType::Burn) { + debug!(target: LOG_TARGET, "Pruning output from 'utxo_commitment_index': key '{}'", commitment.to_hex()); + lmdb_delete( + write_txn, + &self.utxo_commitment_index, + commitment.as_bytes(), + "utxo_commitment_index", + )?; + } + debug!(target: LOG_TARGET, "Pruning output from 'txos_hash_to_index_db': key '{}'", output_hash.to_hex()); + lmdb_delete( + write_txn, + &self.txos_hash_to_index_db, + output_hash.as_slice(), + "utxos_db", + )?; + + let mut buffer = [0u8; 32]; + buffer.copy_from_slice(&key_bytes[0..32]); + let key = OutputKey::new(&FixedHash::from(buffer), output_hash)?; + debug!(target: LOG_TARGET, "Pruning output from 'utxos_db': key '{}'", key.0); + lmdb_delete(write_txn, &self.utxos_db, &key.convert_to_comp_key(), "utxos_db")?; + }, + None => return Err(ChainStorageError::InvalidOperation("Output key not found".to_string())), + } + + Ok(()) + } + + fn delete_all_kernels_in_block( + &self, + txn: &WriteTransaction<'_>, + block_hash: &BlockHash, + ) -> Result<(), ChainStorageError> { + self.delete_block_kernels(txn, block_hash.as_slice())?; + debug!(target: LOG_TARGET, "Deleted kernels in block {}", block_hash.to_hex()); + Ok(()) + } + #[allow(clippy::ptr_arg)] fn fetch_orphan(&self, txn: &ConstTransaction<'_>, hash: &HashOutput) -> Result, ChainStorageError> { let val: Option = lmdb_get(txn, &self.orphans_db, hash.deref())?; @@ -1879,23 +1957,23 @@ impl BlockchainBackend for LMDBDatabase { fn fetch_outputs_in_block_with_spend_state( &self, - header_hash: &HashOutput, - spend_status_at_header: Option, + previous_header_hash: &HashOutput, + spend_status_at_header: Option, ) -> Result, ChainStorageError> { let txn = self.read_transaction()?; let mut outputs: Vec<(TransactionOutput, bool)> = - lmdb_fetch_matching_after::(&txn, &self.utxos_db, header_hash.deref())? + lmdb_fetch_matching_after::(&txn, &self.utxos_db, previous_header_hash.deref())? .into_iter() .map(|row| (row.output, false)) .collect(); - if let Some(header) = spend_status_at_header { + if let Some(header_hash) = spend_status_at_header { let header_height = - self.fetch_height_from_hash(&txn, header_hash)? + self.fetch_height_from_hash(&txn, &header_hash)? .ok_or(ChainStorageError::ValueNotFound { entity: "Header", field: "hash", - value: header.to_hex(), + value: header_hash.to_hex(), })?; for output in &mut outputs { let hash = output.0.hash(); @@ -1906,7 +1984,7 @@ impl BlockchainBackend for LMDBDatabase { ChainStorageError::ValueNotFound { entity: "input", field: "hash", - value: header.to_hex(), + value: header_hash.to_hex(), }, )?; if input.spent_height <= header_height { @@ -1945,10 +2023,13 @@ impl BlockchainBackend for LMDBDatabase { lmdb_fetch_matching_after(&txn, &self.utxos_db, header_hash.as_slice()) } - fn fetch_inputs_in_block(&self, header_hash: &HashOutput) -> Result, ChainStorageError> { + fn fetch_inputs_in_block( + &self, + previous_header_hash: &HashOutput, + ) -> Result, ChainStorageError> { let txn = self.read_transaction()?; Ok( - lmdb_fetch_matching_after(&txn, &self.inputs_db, header_hash.as_slice())? + lmdb_fetch_matching_after(&txn, &self.inputs_db, previous_header_hash.as_slice())? .into_iter() .map(|f: TransactionInputRowData| f.input) .collect(), diff --git a/base_layer/core/src/test_helpers/blockchain.rs b/base_layer/core/src/test_helpers/blockchain.rs index d60d6cfa03..87338619ea 100644 --- a/base_layer/core/src/test_helpers/blockchain.rs +++ b/base_layer/core/src/test_helpers/blockchain.rs @@ -32,7 +32,7 @@ use tari_common::configuration::Network; use tari_common_types::{ chain_metadata::ChainMetadata, tari_address::TariAddress, - types::{Commitment, FixedHash, HashOutput, PublicKey, Signature}, + types::{Commitment, HashOutput, PublicKey, Signature}, }; use tari_storage::lmdb_store::LMDBConfig; use tari_test_utils::paths::create_temporary_data_path; @@ -277,7 +277,7 @@ impl BlockchainBackend for TempDatabase { fn fetch_outputs_in_block_with_spend_state( &self, header_hash: &HashOutput, - spend_status_at_header: Option, + spend_status_at_header: Option, ) -> Result, ChainStorageError> { self.db .as_ref() diff --git a/base_layer/core/tests/helpers/block_builders.rs b/base_layer/core/tests/helpers/block_builders.rs index b4f11bc64b..1c9f038df1 100644 --- a/base_layer/core/tests/helpers/block_builders.rs +++ b/base_layer/core/tests/helpers/block_builders.rs @@ -191,6 +191,7 @@ fn update_genesis_block_mmr_roots(template: NewBlockTemplate) -> Result( consensus: &ConsensusManager, achieved_difficulty: Difficulty, key_manager: &MemoryDbKeyManager, -) -> Result { +) -> Result<(ChainBlock, WalletOutput), ChainStorageError> { append_block_with_coinbase(db, prev_block, txns, consensus, achieved_difficulty, key_manager) .await - .map(|(b, _)| b) + .map(|(b, wo)| (b, wo)) } /// Create a new block with the provided transactions and add a coinbase output. The new MMR roots are calculated, and @@ -577,7 +578,7 @@ pub async fn construct_chained_blocks( let mut prev_block = block0; let mut blocks = Vec::new(); for _i in 0..n { - let block = append_block(db, &prev_block, vec![], consensus, Difficulty::min(), key_manager) + let (block, _) = append_block(db, &prev_block, vec![], consensus, Difficulty::min(), key_manager) .await .unwrap(); prev_block = block.clone(); diff --git a/base_layer/core/tests/helpers/nodes.rs b/base_layer/core/tests/helpers/nodes.rs index 2417789169..207373969c 100644 --- a/base_layer/core/tests/helpers/nodes.rs +++ b/base_layer/core/tests/helpers/nodes.rs @@ -41,7 +41,7 @@ use tari_core::{ LocalNodeCommsInterface, StateMachineHandle, }, - chain_storage::{BlockchainDatabase, Validators}, + chain_storage::{BlockchainDatabase, BlockchainDatabaseConfig, Validators}, consensus::{ConsensusManager, ConsensusManagerBuilder, NetworkConsensus}, mempool::{ service::{LocalMempoolService, MempoolHandle}, @@ -52,7 +52,7 @@ use tari_core::{ OutboundMempoolServiceInterface, }, proof_of_work::randomx_factory::RandomXFactory, - test_helpers::blockchain::{create_store_with_consensus_and_validators, TempDatabase}, + test_helpers::blockchain::{create_store_with_consensus_and_validators_and_config, TempDatabase}, validation::{ mocks::MockValidator, transaction::TransactionChainLinkedValidator, @@ -186,7 +186,11 @@ impl BaseNodeBuilder { /// Build the test base node and start its services. #[allow(clippy::redundant_closure)] - pub async fn start(self, data_path: &str) -> (NodeInterfaces, ConsensusManager) { + pub async fn start( + self, + data_path: &str, + blockchain_db_config: BlockchainDatabaseConfig, + ) -> (NodeInterfaces, ConsensusManager) { let validators = self.validators.unwrap_or_else(|| { Validators::new( MockValidator::new(true), @@ -198,7 +202,11 @@ impl BaseNodeBuilder { let consensus_manager = self .consensus_manager .unwrap_or_else(|| ConsensusManagerBuilder::new(network).build().unwrap()); - let blockchain_db = create_store_with_consensus_and_validators(consensus_manager.clone(), validators); + let blockchain_db = create_store_with_consensus_and_validators_and_config( + consensus_manager.clone(), + validators, + blockchain_db_config, + ); let mempool_validator = TransactionChainLinkedValidator::new(blockchain_db.clone(), consensus_manager.clone()); let mempool = Mempool::new( self.mempool_config.unwrap_or_default(), @@ -234,127 +242,53 @@ pub async fn wait_until_online(nodes: &[&NodeInterfaces]) { } } -// Creates a network with two Base Nodes where each node in the network knows the other nodes in the network. -#[allow(dead_code)] -pub async fn create_network_with_2_base_nodes(data_path: &str) -> (NodeInterfaces, NodeInterfaces, ConsensusManager) { - let alice_node_identity = random_node_identity(); - let bob_node_identity = random_node_identity(); - - let network = Network::LocalNet; - let (alice_node, consensus_manager) = BaseNodeBuilder::new(network.into()) - .with_node_identity(alice_node_identity.clone()) - .with_peers(vec![bob_node_identity.clone()]) - .start(data_path) - .await; - let (bob_node, consensus_manager) = BaseNodeBuilder::new(network.into()) - .with_node_identity(bob_node_identity) - .with_peers(vec![alice_node_identity]) - .with_consensus_manager(consensus_manager) - .start(data_path) - .await; - - wait_until_online(&[&alice_node, &bob_node]).await; - - (alice_node, bob_node, consensus_manager) -} - -// Creates a network with two Base Nodes where each node in the network knows the other nodes in the network. -#[allow(dead_code)] -pub async fn create_network_with_2_base_nodes_with_config>( - mempool_service_config: MempoolServiceConfig, - liveness_service_config: LivenessConfig, - p2p_config: P2pConfig, +// Creates a network with multiple Base Nodes where each node in the network knows the other nodes in the network. +pub async fn create_network_with_multiple_base_nodes_with_config>( + mempool_service_configs: Vec, + liveness_service_configs: Vec, + blockchain_db_configs: Vec, + p2p_configs: Vec, consensus_manager: ConsensusManager, data_path: P, -) -> (NodeInterfaces, NodeInterfaces, ConsensusManager) { - let alice_node_identity = random_node_identity(); - let bob_node_identity = random_node_identity(); - let network = Network::LocalNet; - let (alice_node, consensus_manager) = BaseNodeBuilder::new(network.into()) - .with_node_identity(alice_node_identity.clone()) - .with_mempool_service_config(mempool_service_config.clone()) - .with_liveness_service_config(liveness_service_config.clone()) - .with_p2p_config(p2p_config.clone()) - .with_consensus_manager(consensus_manager) - .start(data_path.as_ref().join("alice").as_os_str().to_str().unwrap()) - .await; - let (bob_node, consensus_manager) = BaseNodeBuilder::new(network.into()) - .with_node_identity(bob_node_identity) - .with_peers(vec![alice_node_identity]) - .with_mempool_service_config(mempool_service_config) - .with_liveness_service_config(liveness_service_config) - .with_p2p_config(p2p_config.clone()) - .with_consensus_manager(consensus_manager) - .start(data_path.as_ref().join("bob").as_os_str().to_str().unwrap()) - .await; - - wait_until_online(&[&alice_node, &bob_node]).await; - - (alice_node, bob_node, consensus_manager) -} - -// Creates a network with three Base Nodes where each node in the network knows the other nodes in the network. -#[allow(dead_code)] -pub async fn create_network_with_3_base_nodes( - data_path: &str, -) -> (NodeInterfaces, NodeInterfaces, NodeInterfaces, ConsensusManager) { - let network = Network::LocalNet; - let consensus_manager = ConsensusManagerBuilder::new(network).build().unwrap(); - create_network_with_3_base_nodes_with_config( - MempoolServiceConfig::default(), - LivenessConfig::default(), - consensus_manager, - data_path, - ) - .await -} - -// Creates a network with three Base Nodes where each node in the network knows the other nodes in the network. -#[allow(dead_code)] -pub async fn create_network_with_3_base_nodes_with_config>( - mempool_service_config: MempoolServiceConfig, - liveness_service_config: LivenessConfig, - consensus_manager: ConsensusManager, - data_path: P, -) -> (NodeInterfaces, NodeInterfaces, NodeInterfaces, ConsensusManager) { - let alice_node_identity = random_node_identity(); - let bob_node_identity = random_node_identity(); - let carol_node_identity = random_node_identity(); - let network = Network::LocalNet; - - log::info!( - "Alice = {}, Bob = {}, Carol = {}", - alice_node_identity.node_id().short_str(), - bob_node_identity.node_id().short_str(), - carol_node_identity.node_id().short_str() - ); - let (carol_node, consensus_manager) = BaseNodeBuilder::new(network.into()) - .with_node_identity(carol_node_identity.clone()) - .with_mempool_service_config(mempool_service_config.clone()) - .with_liveness_service_config(liveness_service_config.clone()) - .with_consensus_manager(consensus_manager) - .start(data_path.as_ref().join("carol").as_os_str().to_str().unwrap()) - .await; - let (bob_node, consensus_manager) = BaseNodeBuilder::new(network.into()) - .with_node_identity(bob_node_identity.clone()) - .with_peers(vec![carol_node_identity.clone()]) - .with_mempool_service_config(mempool_service_config.clone()) - .with_liveness_service_config(liveness_service_config.clone()) - .with_consensus_manager(consensus_manager) - .start(data_path.as_ref().join("bob").as_os_str().to_str().unwrap()) - .await; - let (alice_node, consensus_manager) = BaseNodeBuilder::new(network.into()) - .with_node_identity(alice_node_identity) - .with_peers(vec![bob_node_identity, carol_node_identity]) - .with_mempool_service_config(mempool_service_config) - .with_liveness_service_config(liveness_service_config) - .with_consensus_manager(consensus_manager) - .start(data_path.as_ref().join("alice").as_os_str().to_str().unwrap()) - .await; + network: Network, +) -> (Vec, ConsensusManager) { + let num_of_nodes = mempool_service_configs.len(); + if num_of_nodes != liveness_service_configs.len() || + num_of_nodes != blockchain_db_configs.len() || + num_of_nodes != p2p_configs.len() + { + panic!("create_network_with_multiple_base_nodes_with_config: All configs must be the same length"); + } + let mut node_identities = Vec::with_capacity(num_of_nodes); + for i in 0..num_of_nodes { + node_identities.push(random_node_identity()); + log::info!( + "node identity {} = `{}`", + i + 1, + node_identities[node_identities.len() - 1].node_id().short_str() + ); + } + let mut node_interfaces = Vec::with_capacity(num_of_nodes); + for i in 0..num_of_nodes { + let (node, _) = BaseNodeBuilder::new(network.into()) + .with_node_identity(node_identities[i].clone()) + .with_peers(node_identities.iter().take(i).cloned().collect()) + .with_mempool_service_config(mempool_service_configs[i].clone()) + .with_liveness_service_config(liveness_service_configs[i].clone()) + .with_p2p_config(p2p_configs[i].clone()) + .with_consensus_manager(consensus_manager.clone()) + .start( + data_path.as_ref().join(i.to_string()).as_os_str().to_str().unwrap(), + blockchain_db_configs[i], + ) + .await; + node_interfaces.push(node); + } - wait_until_online(&[&alice_node, &bob_node, &carol_node]).await; + let node_interface_refs = node_interfaces.iter().collect::>(); + wait_until_online(node_interface_refs.as_slice()).await; - (alice_node, bob_node, carol_node, consensus_manager) + (node_interfaces, consensus_manager) } // Helper function for creating a random node indentity. diff --git a/base_layer/core/tests/helpers/sync.rs b/base_layer/core/tests/helpers/sync.rs index 2fe36d6578..a15cf7981f 100644 --- a/base_layer/core/tests/helpers/sync.rs +++ b/base_layer/core/tests/helpers/sync.rs @@ -20,7 +20,7 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use std::time::Duration; +use std::{sync::Arc, time::Duration}; use tari_common::configuration::Network; use tari_common_types::types::HashOutput; @@ -28,19 +28,32 @@ use tari_comms::peer_manager::NodeId; use tari_core::{ base_node::{ chain_metadata_service::PeerChainMetadata, - state_machine_service::states::{BlockSync, HeaderSyncState, StateEvent, StatusInfo}, + state_machine_service::states::{ + BlockSync, + DecideNextSync, + HeaderSyncState, + HorizonStateSync, + StateEvent, + StatusInfo, + }, sync::SyncPeer, BaseNodeStateMachine, BaseNodeStateMachineConfig, SyncValidators, }, blocks::ChainBlock, - chain_storage::DbTransaction, + chain_storage::{BlockchainDatabaseConfig, DbTransaction}, consensus::{ConsensusConstantsBuilder, ConsensusManager, ConsensusManagerBuilder}, mempool::MempoolServiceConfig, proof_of_work::{randomx_factory::RandomXFactory, Difficulty}, test_helpers::blockchain::TempDatabase, - transactions::key_manager::{create_memory_db_key_manager, MemoryDbKeyManager}, + transactions::{ + key_manager::{create_memory_db_key_manager, MemoryDbKeyManager}, + tari_amount::T, + test_helpers::schema_to_transaction, + transaction_components::{Transaction, WalletOutput}, + }, + txn_schema, validation::mocks::MockValidator, }; use tari_p2p::{services::liveness::LivenessConfig, P2pConfig}; @@ -50,11 +63,12 @@ use tokio::sync::{broadcast, watch}; use crate::helpers::{ block_builders::{append_block, create_genesis_block}, - nodes::{create_network_with_2_base_nodes_with_config, NodeInterfaces}, + nodes::{create_network_with_multiple_base_nodes_with_config, NodeInterfaces}, }; static EMISSION: [u64; 2] = [10, 10]; +/// Helper function to initialize header sync with a single peer pub fn initialize_sync_headers_with_ping_pong_data( local_node_interfaces: &NodeInterfaces, peer_node_interfaces: &NodeInterfaces, @@ -69,6 +83,7 @@ pub fn initialize_sync_headers_with_ping_pong_data( ) } +/// Helper function to initialize header sync with a single peer pub async fn sync_headers_execute( state_machine: &mut BaseNodeStateMachine, header_sync: &mut HeaderSyncState, @@ -76,6 +91,7 @@ pub async fn sync_headers_execute( header_sync.next_event(state_machine).await } +/// Helper function to initialize block sync with a single peer pub fn initialize_sync_blocks(peer_node_interfaces: &NodeInterfaces) -> BlockSync { BlockSync::from(vec![SyncPeer::from(PeerChainMetadata::new( peer_node_interfaces.node_identity.node_id().clone(), @@ -84,6 +100,7 @@ pub fn initialize_sync_blocks(peer_node_interfaces: &NodeInterfaces) -> BlockSyn ))]) } +/// Helper function to initialize block sync with a single peer pub async fn sync_blocks_execute( state_machine: &mut BaseNodeStateMachine, block_sync: &mut BlockSync, @@ -91,67 +108,108 @@ pub async fn sync_blocks_execute( block_sync.next_event(state_machine).await } -pub async fn create_network_with_local_and_peer_nodes() -> ( - BaseNodeStateMachine, - NodeInterfaces, - NodeInterfaces, +/// Helper function to decide what to do next +pub async fn decide_horizon_sync( + local_state_machine: &mut BaseNodeStateMachine, + local_header_sync: HeaderSyncState, +) -> StateEvent { + let mut next_sync = DecideNextSync::from(local_header_sync.clone()); + next_sync.next_event(local_state_machine).await +} + +/// Helper function to initialize horizon state sync with a single peer +pub fn initialize_horizon_sync_without_header_sync(peer_node_interfaces: &NodeInterfaces) -> HorizonStateSync { + HorizonStateSync::from(vec![SyncPeer::from(PeerChainMetadata::new( + peer_node_interfaces.node_identity.node_id().clone(), + peer_node_interfaces.blockchain_db.get_chain_metadata().unwrap(), + None, + ))]) +} + +/// Helper function to initialize horizon state sync with a single peer +pub async fn horizon_sync_execute( + state_machine: &mut BaseNodeStateMachine, + horizon_sync: &mut HorizonStateSync, +) -> StateEvent { + horizon_sync.next_event(state_machine).await +} + +/// Helper function to create a network with multiple nodes +pub async fn create_network_with_multiple_nodes( + blockchain_db_configs: Vec, +) -> ( + Vec>, + Vec, ChainBlock, ConsensusManager, MemoryDbKeyManager, + WalletOutput, ) { + let num_nodes = blockchain_db_configs.len(); + if num_nodes < 2 { + panic!("Must have at least 2 nodes"); + } let network = Network::LocalNet; let temp_dir = tempdir().unwrap(); let key_manager = create_memory_db_key_manager(); let consensus_constants = ConsensusConstantsBuilder::new(network) .with_emission_amounts(100_000_000.into(), &EMISSION, 100.into()) .build(); - let (initial_block, _) = create_genesis_block(&consensus_constants, &key_manager).await; + let (initial_block, coinbase_wallet_output) = create_genesis_block(&consensus_constants, &key_manager).await; let consensus_manager = ConsensusManagerBuilder::new(network) .add_consensus_constants(consensus_constants) .with_block(initial_block.clone()) .build() .unwrap(); - let (local_node, peer_node, consensus_manager) = create_network_with_2_base_nodes_with_config( - MempoolServiceConfig::default(), - LivenessConfig { - auto_ping_interval: Some(Duration::from_millis(100)), - ..Default::default() - }, - P2pConfig::default(), + let (node_interfaces, consensus_manager) = create_network_with_multiple_base_nodes_with_config( + vec![MempoolServiceConfig::default(); num_nodes], + vec![ + LivenessConfig { + auto_ping_interval: Some(Duration::from_millis(100)), + ..Default::default() + }; + num_nodes + ], + blockchain_db_configs, + vec![P2pConfig::default(); num_nodes], consensus_manager, temp_dir.path().to_str().unwrap(), + network, ) .await; let shutdown = Shutdown::new(); - let (state_change_event_publisher, _) = broadcast::channel(10); - let (status_event_sender, _status_event_receiver) = watch::channel(StatusInfo::new()); - // Alice needs a state machine for header sync - let local_state_machine = BaseNodeStateMachine::new( - local_node.blockchain_db.clone().into(), - local_node.local_nci.clone(), - local_node.comms.connectivity(), - local_node.comms.peer_manager(), - local_node.chain_metadata_handle.get_event_stream(), - BaseNodeStateMachineConfig::default(), - SyncValidators::new(MockValidator::new(true), MockValidator::new(true)), - status_event_sender, - state_change_event_publisher, - RandomXFactory::default(), - consensus_manager.clone(), - shutdown.to_signal(), - ); + let mut state_machines = Vec::with_capacity(num_nodes); + for node_interface in node_interfaces.iter().take(num_nodes) { + let (state_change_event_publisher, _) = broadcast::channel(10); + let (status_event_sender, _status_event_receiver) = watch::channel(StatusInfo::new()); + state_machines.push(BaseNodeStateMachine::new( + node_interface.blockchain_db.clone().into(), + node_interface.local_nci.clone(), + node_interface.comms.connectivity(), + node_interface.comms.peer_manager(), + node_interface.chain_metadata_handle.get_event_stream(), + BaseNodeStateMachineConfig::default(), + SyncValidators::new(MockValidator::new(true), MockValidator::new(true)), + status_event_sender, + state_change_event_publisher, + RandomXFactory::default(), + consensus_manager.clone(), + shutdown.to_signal(), + )); + } ( - local_state_machine, - local_node, - peer_node, + state_machines, + node_interfaces, initial_block, consensus_manager, key_manager, + coinbase_wallet_output, ) } +/// Helper enum to specify what to delete #[allow(dead_code)] #[derive(Debug)] pub enum WhatToDelete { @@ -174,7 +232,7 @@ fn delete_block(txn: &mut DbTransaction, node: &NodeInterfaces, blocks: &[ChainB ); } -// Delete blocks and headers in reverse order; the first block in the slice wil not be deleted +/// Delete blocks and headers in reverse order; the first block in the slice wil not be deleted pub fn delete_some_blocks_and_headers( blocks_with_anchor: &[ChainBlock], instruction: WhatToDelete, @@ -229,6 +287,7 @@ pub fn delete_some_blocks_and_headers( } } +/// Set the best block in the blockchain_db #[allow(dead_code)] pub fn set_best_block(block: &ChainBlock, previous_block_hash: &HashOutput, node: &NodeInterfaces) { let mut txn = DbTransaction::new(); @@ -242,47 +301,59 @@ pub fn set_best_block(block: &ChainBlock, previous_block_hash: &HashOutput, node node.blockchain_db.write(txn).unwrap(); } +/// Add some existing blocks to the blockchain_db pub fn add_some_existing_blocks(blocks: &[ChainBlock], node: &NodeInterfaces) { for block in blocks { let _res = node.blockchain_db.add_block(block.block().clone().into()).unwrap(); } } -// Return blocks added, including the start block +/// Return blocks and coinbases added, including the start block and coinbase pub async fn create_and_add_some_blocks( node: &NodeInterfaces, start_block: &ChainBlock, + start_coinbase: &WalletOutput, number_of_blocks: usize, consensus_manager: &ConsensusManager, key_manager: &MemoryDbKeyManager, difficulties: &[u64], -) -> Vec { - if number_of_blocks != difficulties.len() { + transactions: &Option>>, +) -> (Vec, Vec) { + let transactions = if let Some(val) = transactions { + val.clone() + } else { + vec![vec![]; number_of_blocks] + }; + if number_of_blocks != difficulties.len() || number_of_blocks != transactions.len() { panic!( - "Number of blocks ({}) and difficulties length ({}) must be equal", + "Number of blocks ({}), transactions length ({}) and difficulties length ({}) must be equal", number_of_blocks, + transactions.len(), difficulties.len() ); } let mut blocks = vec![start_block.clone()]; + let mut coinbases = vec![start_coinbase.clone()]; let mut prev_block = start_block.clone(); - for item in difficulties.iter().take(number_of_blocks) { - prev_block = append_block( + for (item, txns) in difficulties.iter().zip(transactions.iter()) { + let (new_block, coinbase) = append_block( &node.blockchain_db, &prev_block, - vec![], + txns.clone(), consensus_manager, Difficulty::from_u64(*item).unwrap(), key_manager, ) .await .unwrap(); - blocks.push(prev_block.clone()); + prev_block = new_block.clone(); + blocks.push(new_block.clone()); + coinbases.push(coinbase.clone()); } - blocks + (blocks, coinbases) } -// We give some time for the peer to be banned as it is an async process +/// We give some time for the peer to be banned as it is an async process pub async fn wait_for_is_peer_banned(this_node: &NodeInterfaces, peer_node_id: &NodeId, seconds: u64) -> bool { let interval_ms = 100; let intervals = seconds * 1000 / interval_ms; @@ -300,3 +371,143 @@ pub async fn wait_for_is_peer_banned(this_node: &NodeInterfaces, peer_node_id: & } false } + +/// Condensed format of the state machine state for display +pub fn state_event(event: &StateEvent) -> String { + match event { + StateEvent::Initialized => "Initialized".to_string(), + StateEvent::HeadersSynchronized(_, _) => "HeadersSynchronized".to_string(), + StateEvent::HeaderSyncFailed(_) => "HeaderSyncFailed".to_string(), + StateEvent::ProceedToHorizonSync(_) => "ProceedToHorizonSync".to_string(), + StateEvent::ProceedToBlockSync(_) => "ProceedToBlockSync".to_string(), + StateEvent::HorizonStateSynchronized => "HorizonStateSynchronized".to_string(), + StateEvent::HorizonStateSyncFailure => "HorizonStateSyncFailure".to_string(), + StateEvent::BlocksSynchronized => "BlocksSynchronized".to_string(), + StateEvent::BlockSyncFailed => "BlockSyncFailed".to_string(), + StateEvent::FallenBehind(_) => "FallenBehind".to_string(), + StateEvent::NetworkSilence => "NetworkSilence".to_string(), + StateEvent::FatalError(_) => "FatalError".to_string(), + StateEvent::Continue => "Continue".to_string(), + StateEvent::UserQuit => "UserQuit".to_string(), + } +} + +/// Return blocks and coinbases added, including the start block and coinbase +pub async fn create_block_chain_with_transactions( + node: &NodeInterfaces, + initial_block: &ChainBlock, + initial_coinbase: &WalletOutput, + consensus_manager: &ConsensusManager, + key_manager: &MemoryDbKeyManager, + intermediate_height: u64, + number_of_blocks: usize, + spend_genesis_coinbase_in_block: usize, + follow_up_transaction_in_block: usize, + follow_up_coinbases_to_spend: usize, +) -> (Vec, Vec) { + assert!(spend_genesis_coinbase_in_block > 1); + assert!((spend_genesis_coinbase_in_block as u64) < intermediate_height); + assert!(follow_up_transaction_in_block > spend_genesis_coinbase_in_block + 1); + assert!((follow_up_transaction_in_block as u64) > intermediate_height); + assert!(number_of_blocks as u64 > follow_up_transaction_in_block as u64 + intermediate_height + 1); + let add_blocks_a = spend_genesis_coinbase_in_block - 1; + let add_blocks_b = follow_up_transaction_in_block - 1 - add_blocks_a; + let add_blocks_c = number_of_blocks - add_blocks_a - add_blocks_b; + assert!(follow_up_coinbases_to_spend > add_blocks_a); + assert!(follow_up_coinbases_to_spend < follow_up_transaction_in_block); + + // Create a blockchain with some blocks to enable spending the genesys coinbase early on + let (blocks_a, coinbases_a) = create_and_add_some_blocks( + node, + initial_block, + initial_coinbase, + add_blocks_a, + consensus_manager, + key_manager, + &vec![3; add_blocks_a], + &None, + ) + .await; + assert_eq!(node.blockchain_db.get_height().unwrap(), add_blocks_a as u64); + assert_eq!( + node.blockchain_db.fetch_last_header().unwrap().height, + add_blocks_a as u64 + ); + // Add a transaction to spend the genesys coinbase + let schema = txn_schema!( + from: vec![initial_coinbase.clone()], + to: vec![1 * T; 10] + ); + let (txns_genesis_coinbase, _outputs) = schema_to_transaction(&[schema], key_manager).await; + let mut txns_all = vec![vec![]; add_blocks_b]; + txns_all[0] = txns_genesis_coinbase + .into_iter() + .map(|t| Arc::try_unwrap(t).unwrap()) + .collect::>(); + // Expand the blockchain with the genesys coinbase spend transaction + let (blocks_b, coinbases_b) = create_and_add_some_blocks( + node, + &blocks_a[blocks_a.len() - 1], + &coinbases_a[coinbases_a.len() - 1], + add_blocks_b, + consensus_manager, + key_manager, + &vec![3; add_blocks_b], + &Some(txns_all), + ) + .await; + assert_eq!( + node.blockchain_db.get_height().unwrap(), + (add_blocks_a + add_blocks_b) as u64 + ); + assert_eq!( + node.blockchain_db.fetch_last_header().unwrap().height, + (add_blocks_a + add_blocks_b) as u64 + ); + // Add a transaction to spend some more coinbase outputs + let mut coinbases_to_spend = Vec::with_capacity(follow_up_coinbases_to_spend); + for coinbase in coinbases_a.iter().skip(1) + // Skip the genesys coinbase + { + coinbases_to_spend.push(coinbase.clone()); + } + for coinbase in coinbases_b + .iter() + .skip(1) // Skip the last coinbase of the previously added blocks + .take(follow_up_coinbases_to_spend - coinbases_to_spend.len()) + { + coinbases_to_spend.push(coinbase.clone()); + } + assert_eq!(coinbases_to_spend.len(), follow_up_coinbases_to_spend); + let schema = txn_schema!( + from: coinbases_to_spend, + to: vec![1 * T; 20] + ); + let (txns_additional_coinbases, _outputs) = schema_to_transaction(&[schema], key_manager).await; + let mut txns_all = vec![vec![]; add_blocks_c]; + txns_all[0] = txns_additional_coinbases + .into_iter() + .map(|t| Arc::try_unwrap(t).unwrap()) + .collect::>(); + // Expand the blockchain with the spend transaction + let (blocks_c, coinbases_c) = create_and_add_some_blocks( + node, + &blocks_b[blocks_b.len() - 1], + &coinbases_b[coinbases_b.len() - 1], + add_blocks_c, + consensus_manager, + key_manager, + &vec![3; add_blocks_c], + &Some(txns_all), + ) + .await; + assert_eq!(node.blockchain_db.get_height().unwrap(), number_of_blocks as u64); + assert_eq!( + node.blockchain_db.fetch_last_header().unwrap().height, + number_of_blocks as u64 + ); + let blocks = [&blocks_a[..], &blocks_b[1..], &blocks_c[1..]].concat(); + let coinbases = [&coinbases_a[..], &coinbases_b[1..], &coinbases_c[1..]].concat(); + + (blocks, coinbases) +} diff --git a/base_layer/core/tests/tests/base_node_rpc.rs b/base_layer/core/tests/tests/base_node_rpc.rs index e19a7afe8f..bc8f0c39fb 100644 --- a/base_layer/core/tests/tests/base_node_rpc.rs +++ b/base_layer/core/tests/tests/base_node_rpc.rs @@ -41,6 +41,7 @@ use tari_core::{ sync::rpc::BaseNodeSyncRpcService, }, blocks::ChainBlock, + chain_storage::BlockchainDatabaseConfig, consensus::{ConsensusConstantsBuilder, ConsensusManager, ConsensusManagerBuilder, NetworkConsensus}, proto::{ base_node::{FetchMatchingUtxos, Signatures as SignaturesProto, SyncUtxosByBlockRequest}, @@ -94,7 +95,7 @@ async fn setup() -> ( .unwrap(); let (mut base_node, _consensus_manager) = BaseNodeBuilder::new(network) .with_consensus_manager(consensus_manager.clone()) - .start(temp_dir.path().to_str().unwrap()) + .start(temp_dir.path().to_str().unwrap(), BlockchainDatabaseConfig::default()) .await; base_node.mock_base_node_state_machine.publish_status(StatusInfo { bootstrapped: true, diff --git a/base_layer/core/tests/tests/block_sync.rs b/base_layer/core/tests/tests/block_sync.rs index 9011a4b276..ae22cb32b6 100644 --- a/base_layer/core/tests/tests/block_sync.rs +++ b/base_layer/core/tests/tests/block_sync.rs @@ -20,21 +20,40 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use tari_core::base_node::state_machine_service::states::StateEvent; +use tari_core::{base_node::state_machine_service::states::StateEvent, chain_storage::BlockchainDatabaseConfig}; -use crate::helpers::{sync, sync::WhatToDelete}; +use crate::helpers::{ + sync, + sync::{state_event, WhatToDelete}, +}; #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn test_block_sync_happy_path() { // env_logger::init(); // Set `$env:RUST_LOG = "trace"` // Create the network with Alice node and Bob node - let (mut alice_state_machine, alice_node, bob_node, initial_block, consensus_manager, key_manager) = - sync::create_network_with_local_and_peer_nodes().await; + let (mut state_machines, mut peer_nodes, initial_block, consensus_manager, key_manager, initial_coinbase) = + sync::create_network_with_multiple_nodes(vec![ + BlockchainDatabaseConfig::default(), + BlockchainDatabaseConfig::default(), + ]) + .await; + let mut alice_state_machine = state_machines.remove(0); + let alice_node = peer_nodes.remove(0); + let bob_node = peer_nodes.remove(0); // Add some block to Bob's chain - let _bob_blocks = - sync::create_and_add_some_blocks(&bob_node, &initial_block, 5, &consensus_manager, &key_manager, &[3; 5]).await; + let (_blocks, _coinbases) = sync::create_and_add_some_blocks( + &bob_node, + &initial_block, + &initial_coinbase, + 5, + &consensus_manager, + &key_manager, + &[3; 5], + &None, + ) + .await; assert_eq!(bob_node.blockchain_db.get_height().unwrap(), 5); // Alice attempts header sync @@ -78,17 +97,26 @@ async fn test_block_sync_peer_supplies_no_blocks_with_ban() { // env_logger::init(); // Set `$env:RUST_LOG = "trace"` // Create the network with Alice node and Bob node - let (mut alice_state_machine, alice_node, bob_node, initial_block, consensus_manager, key_manager) = - sync::create_network_with_local_and_peer_nodes().await; + let (mut state_machines, mut peer_nodes, initial_block, consensus_manager, key_manager, initial_coinbase) = + sync::create_network_with_multiple_nodes(vec![ + BlockchainDatabaseConfig::default(), + BlockchainDatabaseConfig::default(), + ]) + .await; + let mut alice_state_machine = state_machines.remove(0); + let alice_node = peer_nodes.remove(0); + let bob_node = peer_nodes.remove(0); // Add some block to Bob's chain - let blocks = sync::create_and_add_some_blocks( + let (blocks, _coinbases) = sync::create_and_add_some_blocks( &bob_node, &initial_block, + &initial_coinbase, 10, &consensus_manager, &key_manager, &[3; 10], + &None, ) .await; assert_eq!(bob_node.blockchain_db.get_height().unwrap(), 10); @@ -129,17 +157,26 @@ async fn test_block_sync_peer_supplies_not_all_blocks_with_ban() { // env_logger::init(); // Set `$env:RUST_LOG = "trace"` // Create the network with Alice node and Bob node - let (mut alice_state_machine, alice_node, bob_node, initial_block, consensus_manager, key_manager) = - sync::create_network_with_local_and_peer_nodes().await; + let (mut state_machines, mut peer_nodes, initial_block, consensus_manager, key_manager, initial_coinbase) = + sync::create_network_with_multiple_nodes(vec![ + BlockchainDatabaseConfig::default(), + BlockchainDatabaseConfig::default(), + ]) + .await; + let mut alice_state_machine = state_machines.remove(0); + let alice_node = peer_nodes.remove(0); + let bob_node = peer_nodes.remove(0); // Add some block to Bob's chain - let blocks = sync::create_and_add_some_blocks( + let (blocks, _coinbases) = sync::create_and_add_some_blocks( &bob_node, &initial_block, + &initial_coinbase, 10, &consensus_manager, &key_manager, &[3; 10], + &None, ) .await; assert_eq!(bob_node.blockchain_db.get_height().unwrap(), 10); @@ -174,3 +211,176 @@ async fn test_block_sync_peer_supplies_not_all_blocks_with_ban() { // Bob will be banned assert!(sync::wait_for_is_peer_banned(&alice_node, bob_node.node_identity.node_id(), 1).await); } + +#[allow(clippy::too_many_lines)] +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn test_block_sync_with_conbase_spend_happy_path_1() { + //` cargo test --release --test core_integration_tests + //` tests::horizon_sync::test_block_sync_with_conbase_spend_happy_path_1 > .\target\output.txt 2>&1 + // env_logger::init(); // Set `$env:RUST_LOG = "trace"` + + // Create the network with Bob (archival node) and Carol (archival node) + let (mut state_machines, mut peer_nodes, initial_block, consensus_manager, key_manager, initial_coinbase) = + sync::create_network_with_multiple_nodes(vec![ + // Carol is an archival node + BlockchainDatabaseConfig::default(), + // Bob is an archival node + BlockchainDatabaseConfig::default(), + ]) + .await; + let mut carol_state_machine = state_machines.remove(0); + let carol_node = peer_nodes.remove(0); + let bob_node = peer_nodes.remove(0); + + // Create a blockchain that spends the genesys coinbase early on and then later spends some more coinbase outputs + let follow_up_coinbases_to_spend = 4; + let (blocks, _coinbases) = sync::create_block_chain_with_transactions( + &bob_node, + &initial_block, + &initial_coinbase, + &consensus_manager, + &key_manager, + 3, + 10, // > follow_up_transaction_in_block + intermediate_height + 1 + 2, // < intermediate_height, + 5, // > intermediate_height + follow_up_coinbases_to_spend, // > spend_genesis_coinbase_in_block - 1, < follow_up_transaction_in_block + ) + .await; + + // Now rewind Bob's chain to height 1 (> pruning_horizon, < follow_up_transaction_in_block) + sync::delete_some_blocks_and_headers(&blocks[1..=10], WhatToDelete::BlocksAndHeaders, &bob_node); + assert_eq!(bob_node.blockchain_db.get_height().unwrap(), 1); + assert_eq!(bob_node.blockchain_db.fetch_last_header().unwrap().height, 1); + println!( + "\nBob's blockchain height: {}\n", + bob_node.blockchain_db.get_height().unwrap() + ); + + // 1. Carol attempts header sync sync from Bob + println!("\n1. Carol attempts header sync sync from Bob\n"); + + let mut header_sync_carol_from_bob = sync::initialize_sync_headers_with_ping_pong_data(&carol_node, &bob_node); + let event = sync::sync_headers_execute(&mut carol_state_machine, &mut header_sync_carol_from_bob).await; + let carol_header_height = carol_node.blockchain_db.fetch_last_header().unwrap().height; + println!("Event: {} to header {}", state_event(&event), carol_header_height); + assert_eq!(carol_header_height, 1); + + // 2. Carol attempts block sync from Bob to the tip (to height 1) + println!("\n2. Carol attempts block sync from Bob to the tip (to height 1)\n"); + + let mut block_sync = sync::initialize_sync_blocks(&bob_node); + let event = sync::sync_blocks_execute(&mut carol_state_machine, &mut block_sync).await; + println!( + "Event: {} to block {}", + state_event(&event), + carol_node.blockchain_db.get_height().unwrap() + ); + assert_eq!(event, StateEvent::BlocksSynchronized); + assert_eq!( + carol_node.blockchain_db.get_height().unwrap(), + carol_node.blockchain_db.fetch_last_header().unwrap().height + ); + // Bob will not be banned + assert!(!sync::wait_for_is_peer_banned(&carol_node, bob_node.node_identity.node_id(), 1).await); + + // Give Bob some more blocks + sync::add_some_existing_blocks(&blocks[2..=2], &bob_node); + assert_eq!(bob_node.blockchain_db.get_height().unwrap(), 2); + assert_eq!(bob_node.blockchain_db.fetch_last_header().unwrap().height, 2); + println!( + "\nBob's blockchain height: {}\n", + bob_node.blockchain_db.get_height().unwrap() + ); + + // 3. Carol attempts header sync sync from Bob + println!("\n3. Carol attempts header sync sync from Bob\n"); + + let mut header_sync_carol_from_bob = sync::initialize_sync_headers_with_ping_pong_data(&carol_node, &bob_node); + let event = sync::sync_headers_execute(&mut carol_state_machine, &mut header_sync_carol_from_bob).await; + let carol_header_height = carol_node.blockchain_db.fetch_last_header().unwrap().height; + println!("Event: {} to header {}", state_event(&event), carol_header_height); + assert_eq!(carol_header_height, 2); + + // 4. Carol attempts block sync from Bob to the tip (to height 2) + println!("\n4. Carol attempts block sync from Bob to the tip (to height 2)\n"); + + let mut block_sync = sync::initialize_sync_blocks(&bob_node); + let event = sync::sync_blocks_execute(&mut carol_state_machine, &mut block_sync).await; + println!( + "Event: {} to block {}", + state_event(&event), + carol_node.blockchain_db.get_height().unwrap() + ); + assert_eq!(event, StateEvent::BlocksSynchronized); + assert_eq!( + carol_node.blockchain_db.get_height().unwrap(), + carol_node.blockchain_db.fetch_last_header().unwrap().height + ); + // Bob will not be banned + assert!(!sync::wait_for_is_peer_banned(&carol_node, bob_node.node_identity.node_id(), 1).await); +} + +#[allow(clippy::too_many_lines)] +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn test_block_sync_with_conbase_spend_happy_path_2() { + //` cargo test --release --test core_integration_tests + //` tests::horizon_sync::test_block_sync_with_conbase_spend_happy_path_2 > .\target\output.txt 2>&1 + // env_logger::init(); // Set `$env:RUST_LOG = "trace"` + + // Create the network with Bob (archival node) and Carol (archival node) + let (mut state_machines, mut peer_nodes, initial_block, consensus_manager, key_manager, initial_coinbase) = + sync::create_network_with_multiple_nodes(vec![ + // Carol is an archival node + BlockchainDatabaseConfig::default(), + // Bob is an archival node + BlockchainDatabaseConfig::default(), + ]) + .await; + let mut carol_state_machine = state_machines.remove(0); + let carol_node = peer_nodes.remove(0); + let bob_node = peer_nodes.remove(0); + + // Create a blockchain that spends the genesys coinbase early on and then later spends some more coinbase outputs + let follow_up_coinbases_to_spend = 4; + let (_blocks, _coinbases) = sync::create_block_chain_with_transactions( + &bob_node, + &initial_block, + &initial_coinbase, + &consensus_manager, + &key_manager, + 3, + 10, // > follow_up_transaction_in_block + intermediate_height + 1 + 2, // < intermediate_height, + 5, // > intermediate_height + follow_up_coinbases_to_spend, // > spend_genesis_coinbase_in_block - 1, < follow_up_transaction_in_block + ) + .await; + + // 1. Carol attempts header sync sync from Bob + println!("\n1. Carol attempts header sync sync from Bob\n"); + + let mut header_sync_carol_from_bob = sync::initialize_sync_headers_with_ping_pong_data(&carol_node, &bob_node); + let event = sync::sync_headers_execute(&mut carol_state_machine, &mut header_sync_carol_from_bob).await; + let carol_header_height = carol_node.blockchain_db.fetch_last_header().unwrap().height; + println!("Event: {} to header {}", state_event(&event), carol_header_height); + assert_eq!(carol_header_height, 10); + + // 2. Carol attempts block sync from Bob to the tip (to height 10) + println!("\n2. Carol attempts block sync from Bob to the tip (to height 10)\n"); + + let mut block_sync = sync::initialize_sync_blocks(&bob_node); + let event = sync::sync_blocks_execute(&mut carol_state_machine, &mut block_sync).await; + println!( + "Event: {} to block {}", + state_event(&event), + carol_node.blockchain_db.get_height().unwrap() + ); + assert_eq!(event, StateEvent::BlocksSynchronized); + assert_eq!( + carol_node.blockchain_db.get_height().unwrap(), + carol_node.blockchain_db.fetch_last_header().unwrap().height + ); + // Bob will not be banned + assert!(!sync::wait_for_is_peer_banned(&carol_node, bob_node.node_identity.node_id(), 1).await); +} diff --git a/base_layer/core/tests/tests/header_sync.rs b/base_layer/core/tests/tests/header_sync.rs index 5745f24125..7e5125b75d 100644 --- a/base_layer/core/tests/tests/header_sync.rs +++ b/base_layer/core/tests/tests/header_sync.rs @@ -20,7 +20,10 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use tari_core::base_node::{state_machine_service::states::StateEvent, sync::HeaderSyncStatus}; +use tari_core::{ + base_node::{state_machine_service::states::StateEvent, sync::HeaderSyncStatus}, + chain_storage::BlockchainDatabaseConfig, +}; use crate::helpers::{sync, sync::WhatToDelete}; @@ -30,12 +33,28 @@ async fn test_header_sync_happy_path() { // env_logger::init(); // Set `$env:RUST_LOG = "trace"` // Create the network with Alice node and Bob node - let (mut alice_state_machine, alice_node, bob_node, initial_block, consensus_manager, key_manager) = - sync::create_network_with_local_and_peer_nodes().await; + let (mut state_machines, mut peer_nodes, initial_block, consensus_manager, key_manager, initial_coinbase) = + sync::create_network_with_multiple_nodes(vec![ + BlockchainDatabaseConfig::default(), + BlockchainDatabaseConfig::default(), + ]) + .await; + let mut alice_state_machine = state_machines.remove(0); + let alice_node = peer_nodes.remove(0); + let bob_node = peer_nodes.remove(0); // Add 1 block to Bob's chain - let bob_blocks = - sync::create_and_add_some_blocks(&bob_node, &initial_block, 1, &consensus_manager, &key_manager, &[3]).await; + let (bob_blocks, bob_coinbases) = sync::create_and_add_some_blocks( + &bob_node, + &initial_block, + &initial_coinbase, + 1, + &consensus_manager, + &key_manager, + &[3], + &None, + ) + .await; assert_eq!(bob_node.blockchain_db.get_height().unwrap(), 1); // Alice attempts header sync, still on the genesys block, headers will be lagging @@ -74,8 +93,17 @@ async fn test_header_sync_happy_path() { } // Bob adds another block - let _bob_blocks = - sync::create_and_add_some_blocks(&bob_node, &bob_blocks[1], 1, &consensus_manager, &key_manager, &[3]).await; + let (_blocks, _coinbases) = sync::create_and_add_some_blocks( + &bob_node, + &bob_blocks[1], + &bob_coinbases[1], + 1, + &consensus_manager, + &key_manager, + &[3], + &None, + ) + .await; assert_eq!(bob_node.blockchain_db.get_height().unwrap(), 2); // Alice attempts header sync, still on the genesys block, headers will be lagging @@ -102,25 +130,56 @@ async fn test_header_sync_with_fork_happy_path() { // env_logger::init(); // Set `$env:RUST_LOG = "trace"` // Create the network with Alice node and Bob node - let (mut alice_state_machine, alice_node, bob_node, initial_block, consensus_manager, key_manager) = - sync::create_network_with_local_and_peer_nodes().await; + let (mut state_machines, mut peer_nodes, initial_block, consensus_manager, key_manager, initial_coinbase) = + sync::create_network_with_multiple_nodes(vec![ + BlockchainDatabaseConfig::default(), + BlockchainDatabaseConfig::default(), + ]) + .await; + let mut alice_state_machine = state_machines.remove(0); + let alice_node = peer_nodes.remove(0); + let bob_node = peer_nodes.remove(0); // Add 1 block to Bob's chain - let bob_blocks = - sync::create_and_add_some_blocks(&bob_node, &initial_block, 1, &consensus_manager, &key_manager, &[3]).await; + let (bob_blocks, bob_coinbases) = sync::create_and_add_some_blocks( + &bob_node, + &initial_block, + &initial_coinbase, + 1, + &consensus_manager, + &key_manager, + &[3], + &None, + ) + .await; assert_eq!(bob_node.blockchain_db.get_height().unwrap(), 1); // Bob adds another block - let bob_blocks = - sync::create_and_add_some_blocks(&bob_node, &bob_blocks[1], 1, &consensus_manager, &key_manager, &[3]).await; + let (bob_blocks, bob_coinbases) = sync::create_and_add_some_blocks( + &bob_node, + &bob_blocks[1], + &bob_coinbases[1], + 1, + &consensus_manager, + &key_manager, + &[3], + &None, + ) + .await; assert_eq!(bob_node.blockchain_db.get_height().unwrap(), 2); // Alice adds 3 (different) blocks, with POW on par with Bob's chain, but with greater height - let _alice_blocks = - sync::create_and_add_some_blocks(&alice_node, &initial_block, 3, &consensus_manager, &key_manager, &[ - 3, 2, 1, - ]) - .await; + let _alice_blocks = sync::create_and_add_some_blocks( + &alice_node, + &initial_block, + &initial_coinbase, + 3, + &consensus_manager, + &key_manager, + &[3, 2, 1], + &None, + ) + .await; assert_eq!(alice_node.blockchain_db.get_height().unwrap(), 3); assert_eq!( alice_node @@ -148,8 +207,17 @@ async fn test_header_sync_with_fork_happy_path() { assert!(!sync::wait_for_is_peer_banned(&alice_node, bob_node.node_identity.node_id(), 1).await); // Bob adds more blocks and draws ahead of Alice - let _bob_blocks = - sync::create_and_add_some_blocks(&bob_node, &bob_blocks[1], 2, &consensus_manager, &key_manager, &[3; 2]).await; + let _blocks = sync::create_and_add_some_blocks( + &bob_node, + &bob_blocks[1], + &bob_coinbases[1], + 2, + &consensus_manager, + &key_manager, + &[3; 2], + &None, + ) + .await; assert_eq!(bob_node.blockchain_db.get_height().unwrap(), 4); // Alice attempts header sync to Bob's chain with higher POW, headers will be lagging with reorg steps @@ -176,17 +244,26 @@ async fn test_header_sync_uneven_headers_and_blocks_happy_path() { // env_logger::init(); // Set `$env:RUST_LOG = "trace"` // Create the network with Alice node and Bob node - let (mut alice_state_machine, alice_node, bob_node, initial_block, consensus_manager, key_manager) = - sync::create_network_with_local_and_peer_nodes().await; + let (mut state_machines, mut peer_nodes, initial_block, consensus_manager, key_manager, initial_coinbase) = + sync::create_network_with_multiple_nodes(vec![ + BlockchainDatabaseConfig::default(), + BlockchainDatabaseConfig::default(), + ]) + .await; + let mut alice_state_machine = state_machines.remove(0); + let alice_node = peer_nodes.remove(0); + let bob_node = peer_nodes.remove(0); // Add blocks and headers to Bob's chain, with more headers than blocks - let blocks = sync::create_and_add_some_blocks( + let (blocks, _coinbases) = sync::create_and_add_some_blocks( &bob_node, &initial_block, + &initial_coinbase, 10, &consensus_manager, &key_manager, &[3; 10], + &None, ) .await; sync::delete_some_blocks_and_headers(&blocks[5..=10], WhatToDelete::Blocks, &bob_node); @@ -224,17 +301,26 @@ async fn test_header_sync_uneven_headers_and_blocks_peer_lies_about_pow_no_ban() // env_logger::init(); // Set `$env:RUST_LOG = "trace"` // Create the network with Alice node and Bob node - let (mut alice_state_machine, alice_node, bob_node, initial_block, consensus_manager, key_manager) = - sync::create_network_with_local_and_peer_nodes().await; + let (mut state_machines, mut peer_nodes, initial_block, consensus_manager, key_manager, initial_coinbase) = + sync::create_network_with_multiple_nodes(vec![ + BlockchainDatabaseConfig::default(), + BlockchainDatabaseConfig::default(), + ]) + .await; + let mut alice_state_machine = state_machines.remove(0); + let alice_node = peer_nodes.remove(0); + let bob_node = peer_nodes.remove(0); // Add blocks and headers to Bob's chain, with more headers than blocks - let blocks = sync::create_and_add_some_blocks( + let (blocks, _coinbases) = sync::create_and_add_some_blocks( &bob_node, &initial_block, + &initial_coinbase, 10, &consensus_manager, &key_manager, &[3; 10], + &None, ) .await; sync::delete_some_blocks_and_headers(&blocks[5..=10], WhatToDelete::Blocks, &bob_node); @@ -287,12 +373,28 @@ async fn test_header_sync_even_headers_and_blocks_peer_lies_about_pow_with_ban() // env_logger::init(); // Set `$env:RUST_LOG = "trace"` // Create the network with Alice node and Bob node - let (mut alice_state_machine, alice_node, bob_node, initial_block, consensus_manager, key_manager) = - sync::create_network_with_local_and_peer_nodes().await; + let (mut state_machines, mut peer_nodes, initial_block, consensus_manager, key_manager, initial_coinbase) = + sync::create_network_with_multiple_nodes(vec![ + BlockchainDatabaseConfig::default(), + BlockchainDatabaseConfig::default(), + ]) + .await; + let mut alice_state_machine = state_machines.remove(0); + let alice_node = peer_nodes.remove(0); + let bob_node = peer_nodes.remove(0); // Add blocks and headers to Bob's chain - let blocks = - sync::create_and_add_some_blocks(&bob_node, &initial_block, 6, &consensus_manager, &key_manager, &[3; 6]).await; + let (blocks, _coinbases) = sync::create_and_add_some_blocks( + &bob_node, + &initial_block, + &initial_coinbase, + 6, + &consensus_manager, + &key_manager, + &[3; 6], + &None, + ) + .await; assert_eq!(bob_node.blockchain_db.get_height().unwrap(), 6); assert_eq!(bob_node.blockchain_db.fetch_last_header().unwrap().height, 6); @@ -333,12 +435,28 @@ async fn test_header_sync_even_headers_and_blocks_peer_metadata_improve_with_reo // env_logger::init(); // Set `$env:RUST_LOG = "trace"` // Create the network with Alice node and Bob node - let (mut alice_state_machine, alice_node, bob_node, initial_block, consensus_manager, key_manager) = - sync::create_network_with_local_and_peer_nodes().await; + let (mut state_machines, mut peer_nodes, initial_block, consensus_manager, key_manager, initial_coinbase) = + sync::create_network_with_multiple_nodes(vec![ + BlockchainDatabaseConfig::default(), + BlockchainDatabaseConfig::default(), + ]) + .await; + let mut alice_state_machine = state_machines.remove(0); + let alice_node = peer_nodes.remove(0); + let bob_node = peer_nodes.remove(0); // Add blocks and headers to Bob's chain - let blocks = - sync::create_and_add_some_blocks(&bob_node, &initial_block, 6, &consensus_manager, &key_manager, &[3; 6]).await; + let (blocks, coinbases) = sync::create_and_add_some_blocks( + &bob_node, + &initial_block, + &initial_coinbase, + 6, + &consensus_manager, + &key_manager, + &[3; 6], + &None, + ) + .await; assert_eq!(bob_node.blockchain_db.get_height().unwrap(), 6); assert_eq!(bob_node.blockchain_db.fetch_last_header().unwrap().height, 6); @@ -351,8 +469,17 @@ async fn test_header_sync_even_headers_and_blocks_peer_metadata_improve_with_reo let mut header_sync = sync::initialize_sync_headers_with_ping_pong_data(&alice_node, &bob_node); // Bob's chain will reorg with improved metadata sync::delete_some_blocks_and_headers(&blocks[4..=6], WhatToDelete::Blocks, &bob_node); - let _blocks = - sync::create_and_add_some_blocks(&bob_node, &blocks[4], 3, &consensus_manager, &key_manager, &[3; 3]).await; + let _blocks = sync::create_and_add_some_blocks( + &bob_node, + &blocks[4], + &coinbases[4], + 3, + &consensus_manager, + &key_manager, + &[3; 3], + &None, + ) + .await; assert_eq!(bob_node.blockchain_db.get_height().unwrap(), 7); assert_eq!(bob_node.blockchain_db.fetch_last_header().unwrap().height, 7); let event = sync::sync_headers_execute(&mut alice_state_machine, &mut header_sync).await; diff --git a/base_layer/core/tests/tests/horizon_sync.rs b/base_layer/core/tests/tests/horizon_sync.rs new file mode 100644 index 0000000000..df83120a60 --- /dev/null +++ b/base_layer/core/tests/tests/horizon_sync.rs @@ -0,0 +1,664 @@ +// Copyright 2022. The Tari Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +// following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +// disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +// products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +use std::cmp::min; + +use tari_core::{ + base_node::state_machine_service::states::{HorizonStateSync, StateEvent}, + chain_storage::BlockchainDatabaseConfig, +}; + +use crate::helpers::{ + sync, + sync::{decide_horizon_sync, state_event, WhatToDelete}, +}; + +#[allow(clippy::too_many_lines)] +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn test_horizon_sync_from_archival_node_happy_path() { + //` cargo test --release --test core_integration_tests + //` tests::horizon_sync::test_horizon_sync_from_archival_node_happy_path > .\target\output.txt 2>&1 + // env_logger::init(); // Set `$env:RUST_LOG = "trace"` + + // Create the network with Alice (pruning node) and Bob (archival node) + let pruning_horizon = 5; + let (mut state_machines, mut peer_nodes, initial_block, consensus_manager, key_manager, initial_coinbase) = + sync::create_network_with_multiple_nodes(vec![ + BlockchainDatabaseConfig { + orphan_storage_capacity: 5, + pruning_horizon, + pruning_interval: 5, + track_reorgs: false, + cleanup_orphans_at_startup: false, + }, + BlockchainDatabaseConfig::default(), + ]) + .await; + let mut alice_state_machine = state_machines.remove(0); + let alice_node = peer_nodes.remove(0); + let bob_node = peer_nodes.remove(0); + + // Create a blockchain that spends the genesys coinbase early on and then later spends some more coinbase outputs + let follow_up_coinbases_to_spend = 15; + let (blocks, coinbases) = sync::create_block_chain_with_transactions( + &bob_node, + &initial_block, + &initial_coinbase, + &consensus_manager, + &key_manager, + pruning_horizon, + 30, // > follow_up_transaction_in_block + pruning_horizon + 1 + 3, // < pruning_horizon + 16, // > pruning_horizon + follow_up_coinbases_to_spend, // > spend_genesis_coinbase_in_block - 1, < follow_up_transaction_in_block + ) + .await; + + // Now rewind Bob's chain to height 10 (> pruning_horizon, < follow_up_transaction_in_block) + sync::delete_some_blocks_and_headers(&blocks[10..=30], WhatToDelete::BlocksAndHeaders, &bob_node); + assert_eq!(bob_node.blockchain_db.get_height().unwrap(), 10); + assert_eq!(bob_node.blockchain_db.fetch_last_header().unwrap().height, 10); + + // 1. Alice attempts horizon sync without having done header sync + println!("\n1. Alice attempts horizon sync without having done header sync\n"); + + let mut horizon_sync = sync::initialize_horizon_sync_without_header_sync(&bob_node); + let event = sync::horizon_sync_execute(&mut alice_state_machine, &mut horizon_sync).await; + + println!( + "Event: {} to block {}", + state_event(&event), + alice_node.blockchain_db.get_height().unwrap() + ); + assert_eq!(event, StateEvent::HorizonStateSynchronized); + assert_eq!(alice_node.blockchain_db.get_height().unwrap(), 0); + // Bob will not be banned + assert!(!sync::wait_for_is_peer_banned(&alice_node, bob_node.node_identity.node_id(), 1).await); + + // 2. Alice does header sync (to height 10) + println!("\n2. Alice does header sync (to height 10)\n"); + + let mut header_sync = sync::initialize_sync_headers_with_ping_pong_data(&alice_node, &bob_node); + let _event = sync::sync_headers_execute(&mut alice_state_machine, &mut header_sync).await; + assert_eq!(alice_node.blockchain_db.fetch_last_header().unwrap().height, 10); + // Bob will not be banned + assert!(!sync::wait_for_is_peer_banned(&alice_node, bob_node.node_identity.node_id(), 1).await); + + // 3. Alice attempts horizon sync after header sync (to height 5; includes genesys block UTXO spend) + println!("\n3. Alice attempts horizon sync after header sync (to height 5; includes genesys block UTXO spend)\n"); + let output_hash = initial_coinbase.hash(&key_manager).await.unwrap(); + assert!(alice_node.blockchain_db.fetch_output(output_hash).unwrap().is_some()); + let commitment = initial_coinbase.commitment(&key_manager).await.unwrap(); + assert!(alice_node + .blockchain_db + .fetch_unspent_output_hash_by_commitment(commitment.clone()) + .unwrap() + .is_some()); + + let event = decide_horizon_sync(&mut alice_state_machine, header_sync.clone()).await; + let mut horizon_sync = match event { + StateEvent::ProceedToHorizonSync(sync_peers) => HorizonStateSync::from(sync_peers), + _ => panic!("3. Alice should proceed to horizon sync"), + }; + let event = sync::horizon_sync_execute(&mut alice_state_machine, &mut horizon_sync).await; + + println!( + "Event: {} to block {}", + state_event(&event), + alice_node.blockchain_db.get_height().unwrap() + ); + assert_eq!(event, StateEvent::HorizonStateSynchronized); + assert_eq!( + alice_node.blockchain_db.get_height().unwrap(), + alice_node.blockchain_db.fetch_last_header().unwrap().height - pruning_horizon + ); + assert!(alice_node.blockchain_db.fetch_output(output_hash).unwrap().is_none()); + assert!(alice_node + .blockchain_db + .fetch_unspent_output_hash_by_commitment(commitment) + .unwrap() + .is_none()); + // Bob will not be banned + assert!(!sync::wait_for_is_peer_banned(&alice_node, bob_node.node_identity.node_id(), 1).await); + + // 4. Alice attempts horizon sync again without any change in the blockchain + println!("\n4. Alice attempts horizon sync again without any change in the blockchain\n"); + + let event = decide_horizon_sync(&mut alice_state_machine, header_sync).await; + let mut horizon_sync = match event { + StateEvent::ProceedToHorizonSync(sync_peers) => HorizonStateSync::from(sync_peers), + _ => panic!("4. Alice should proceed to horizon sync"), + }; + let event = sync::horizon_sync_execute(&mut alice_state_machine, &mut horizon_sync).await; + + println!( + "Event: {} to block {}", + state_event(&event), + alice_node.blockchain_db.get_height().unwrap() + ); + assert_eq!(event, StateEvent::HorizonStateSynchronized); + assert_eq!( + alice_node.blockchain_db.get_height().unwrap(), + alice_node.blockchain_db.fetch_last_header().unwrap().height - pruning_horizon + ); + // Bob will not be banned + assert!(!sync::wait_for_is_peer_banned(&alice_node, bob_node.node_identity.node_id(), 1).await); + + // 5. Alice attempts block sync to the tip (to height 10) + println!("\n5. Alice attempts block sync to the tip (to height 10)\n"); + + let mut block_sync = sync::initialize_sync_blocks(&bob_node); + let event = sync::sync_blocks_execute(&mut alice_state_machine, &mut block_sync).await; + println!( + "Event: {} to block {}", + state_event(&event), + alice_node.blockchain_db.get_height().unwrap() + ); + assert_eq!(event, StateEvent::BlocksSynchronized); + assert_eq!( + alice_node.blockchain_db.get_height().unwrap(), + alice_node.blockchain_db.fetch_last_header().unwrap().height + ); + // Bob will not be banned + assert!(!sync::wait_for_is_peer_banned(&alice_node, bob_node.node_identity.node_id(), 1).await); + + // Give Bob some more blocks (containing the block with the spend transaction at height 16) + sync::add_some_existing_blocks(&blocks[11..=25], &bob_node); + assert_eq!(bob_node.blockchain_db.get_height().unwrap(), 25); + assert_eq!(bob_node.blockchain_db.fetch_last_header().unwrap().height, 25); + + // 6. Alice does header sync to the new height (to height 25) + println!("\n6. Alice does header sync to the new height (to height 25)\n"); + + let mut header_sync = sync::initialize_sync_headers_with_ping_pong_data(&alice_node, &bob_node); + let _event = sync::sync_headers_execute(&mut alice_state_machine, &mut header_sync).await; + assert_eq!(alice_node.blockchain_db.fetch_last_header().unwrap().height, 25); + // Bob will not be banned + assert!(!sync::wait_for_is_peer_banned(&alice_node, bob_node.node_identity.node_id(), 1).await); + + // 7. Alice attempts horizon sync to the new pruning height (to height 20 - STXOs should be pruned) Outputs created + // after height 10 and spent up to height 20 with corresponding inputs should not be streamed; we do not have way + // to verify this except looking at the detail log files. + println!("\n7. Alice attempts horizon sync to the new pruning height (to height 20 - STXOs should be pruned)\n"); + let spent_coinbases = coinbases + .iter() + .skip(1) + .take(10) // To current height + .collect::>(); + for output in &spent_coinbases { + let output_hash = output.hash(&key_manager).await.unwrap(); + assert!(alice_node.blockchain_db.fetch_output(output_hash).unwrap().is_some()); + let commitment = output.commitment(&key_manager).await.unwrap(); + assert!(alice_node + .blockchain_db + .fetch_unspent_output_hash_by_commitment(commitment) + .unwrap() + .is_some()); + } + + let event = decide_horizon_sync(&mut alice_state_machine, header_sync).await; + let mut horizon_sync = match event { + StateEvent::ProceedToHorizonSync(sync_peers) => HorizonStateSync::from(sync_peers), + _ => panic!("7. Alice should proceed to horizon sync"), + }; + let event = sync::horizon_sync_execute(&mut alice_state_machine, &mut horizon_sync).await; + + println!( + "Event: {} to block {}", + state_event(&event), + alice_node.blockchain_db.get_height().unwrap() + ); + assert_eq!(event, StateEvent::HorizonStateSynchronized); + assert_eq!( + alice_node.blockchain_db.get_height().unwrap(), + alice_node.blockchain_db.fetch_last_header().unwrap().height - pruning_horizon + ); + for output in &spent_coinbases { + let output_hash = output.hash(&key_manager).await.unwrap(); + assert!(alice_node.blockchain_db.fetch_output(output_hash).unwrap().is_none()); + let commitment = output.commitment(&key_manager).await.unwrap(); + assert!(alice_node + .blockchain_db + .fetch_unspent_output_hash_by_commitment(commitment) + .unwrap() + .is_none()); + } + // Bob will not be banned + assert!(!sync::wait_for_is_peer_banned(&alice_node, bob_node.node_identity.node_id(), 1).await); + + // Give Bob some more blocks (containing the block with the spend transaction at height 16) + sync::add_some_existing_blocks(&blocks[26..=30], &bob_node); + assert_eq!(bob_node.blockchain_db.get_height().unwrap(), 30); + assert_eq!(bob_node.blockchain_db.fetch_last_header().unwrap().height, 30); + + // 8. Alice does header sync to the new height (to height 30) + println!("\n8. Alice does header sync to the new height (to height 30)\n"); + + let mut header_sync = sync::initialize_sync_headers_with_ping_pong_data(&alice_node, &bob_node); + let _event = sync::sync_headers_execute(&mut alice_state_machine, &mut header_sync).await; + assert_eq!(alice_node.blockchain_db.fetch_last_header().unwrap().height, 30); + // Bob will not be banned + assert!(!sync::wait_for_is_peer_banned(&alice_node, bob_node.node_identity.node_id(), 1).await); + + // 9. Alice attempts horizon sync to the new pruning height (to height 25) + println!("\n9. Alice attempts horizon sync to the new pruning height (to height 25)\n"); + + let event = decide_horizon_sync(&mut alice_state_machine, header_sync).await; + let mut horizon_sync = match event { + StateEvent::ProceedToHorizonSync(sync_peers) => HorizonStateSync::from(sync_peers), + _ => panic!("9. Alice should proceed to horizon sync"), + }; + let event = sync::horizon_sync_execute(&mut alice_state_machine, &mut horizon_sync).await; + + println!( + "Event: {} to block {}", + state_event(&event), + alice_node.blockchain_db.get_height().unwrap() + ); + assert_eq!(event, StateEvent::HorizonStateSynchronized); + assert_eq!( + alice_node.blockchain_db.get_height().unwrap(), + alice_node.blockchain_db.fetch_last_header().unwrap().height - pruning_horizon + ); + // Bob will not be banned + assert!(!sync::wait_for_is_peer_banned(&alice_node, bob_node.node_identity.node_id(), 1).await); +} + +#[allow(clippy::too_many_lines)] +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn test_horizon_sync_from_prune_node_happy_path() { + //` cargo test --release --test core_integration_tests + //` tests::horizon_sync::test_horizon_sync_from_prune_node_happy_path > .\target\output.txt 2>&1 + // env_logger::init(); // Set `$env:RUST_LOG = "trace"` + + // Create the network with Alice (pruning node) and Bob (archival node) and Carol (pruning node) + let pruning_horizon_alice = 4; + let pruning_horizon_carol = 12; + let (mut state_machines, mut peer_nodes, initial_block, consensus_manager, key_manager, initial_coinbase) = + sync::create_network_with_multiple_nodes(vec![ + // Alice is a pruned node + BlockchainDatabaseConfig { + orphan_storage_capacity: 5, + pruning_horizon: pruning_horizon_alice, + pruning_interval: 5, + track_reorgs: false, + cleanup_orphans_at_startup: false, + }, + // Carol is a pruned node + BlockchainDatabaseConfig { + orphan_storage_capacity: 5, + pruning_horizon: pruning_horizon_carol, + pruning_interval: 5, + track_reorgs: false, + cleanup_orphans_at_startup: false, + }, + // Bob is an archival node + BlockchainDatabaseConfig::default(), + ]) + .await; + let mut alice_state_machine = state_machines.remove(0); + let mut carol_state_machine = state_machines.remove(0); + let alice_node = peer_nodes.remove(0); + let carol_node = peer_nodes.remove(0); + let bob_node = peer_nodes.remove(0); + + // Create a blockchain that spends the genesys coinbase early on and then later spends some more coinbase outputs + let follow_up_coinbases_to_spend = 5; + let (blocks, _coinbases) = sync::create_block_chain_with_transactions( + &bob_node, + &initial_block, + &initial_coinbase, + &consensus_manager, + &key_manager, + min(pruning_horizon_alice, pruning_horizon_carol), + 28, // > follow_up_transaction_in_block + pruning_horizon_carol + 1 + 2, // < pruning_horizon_alice, < pruning_horizon_carol + 14, // > pruning_horizon_alice, > pruning_horizon_carol + follow_up_coinbases_to_spend, // > spend_genesis_coinbase_in_block - 1, < follow_up_transaction_in_block + ) + .await; + + // Now rewind Bob's chain to height 8 (> pruning_horizon, < follow_up_transaction_in_block) + sync::delete_some_blocks_and_headers(&blocks[8..=28], WhatToDelete::BlocksAndHeaders, &bob_node); + assert_eq!(bob_node.blockchain_db.get_height().unwrap(), 8); + assert_eq!(bob_node.blockchain_db.fetch_last_header().unwrap().height, 8); + println!( + "\nBob's blockchain height: {}\n", + bob_node.blockchain_db.get_height().unwrap() + ); + + // 1. Alice attempts initial horizon sync from Bob (to pruning height 4; includes genesys block UTXO spend) + println!( + "\n1. Alice attempts initial horizon sync from Bob (to pruning height 4; includes genesys block UTXO spend)\n" + ); + let output_hash = initial_coinbase.hash(&key_manager).await.unwrap(); + assert!(alice_node.blockchain_db.fetch_output(output_hash).unwrap().is_some()); + let commitment = initial_coinbase.commitment(&key_manager).await.unwrap(); + assert!(alice_node + .blockchain_db + .fetch_unspent_output_hash_by_commitment(commitment.clone()) + .unwrap() + .is_some()); + + let header_sync_alice_from_bob = sync::initialize_sync_headers_with_ping_pong_data(&alice_node, &bob_node); + let event = sync::sync_headers_execute(&mut alice_state_machine, &mut header_sync_alice_from_bob.clone()).await; + let alice_header_height = alice_node.blockchain_db.fetch_last_header().unwrap().height; + println!("Event: {} to header {}", state_event(&event), alice_header_height); + assert_eq!(alice_header_height, 8); + let event = decide_horizon_sync(&mut alice_state_machine, header_sync_alice_from_bob).await; + let mut horizon_sync = match event { + StateEvent::ProceedToHorizonSync(sync_peers) => HorizonStateSync::from(sync_peers), + _ => panic!("1. Alice should proceed to horizon sync"), + }; + let event = sync::horizon_sync_execute(&mut alice_state_machine, &mut horizon_sync).await; + + println!( + "Event: {} to block {}", + state_event(&event), + alice_node.blockchain_db.get_height().unwrap() + ); + assert_eq!(event, StateEvent::HorizonStateSynchronized); + assert_eq!( + alice_node.blockchain_db.get_height().unwrap(), + alice_header_height - pruning_horizon_alice + ); + assert!(alice_node.blockchain_db.fetch_output(output_hash).unwrap().is_none()); + assert!(alice_node + .blockchain_db + .fetch_unspent_output_hash_by_commitment(commitment) + .unwrap() + .is_none()); + // Bob will not be banned + assert!(!sync::wait_for_is_peer_banned(&alice_node, bob_node.node_identity.node_id(), 1).await); + + // 2. Carol attempts initial horizon sync from Bob with inadequate height + println!("\n2. Carol attempts initial horizon sync from Bob with inadequate height\n"); + + let mut header_sync_carol_from_bob = sync::initialize_sync_headers_with_ping_pong_data(&carol_node, &bob_node); + let event = sync::sync_headers_execute(&mut carol_state_machine, &mut header_sync_carol_from_bob).await; + let carol_header_height = carol_node.blockchain_db.fetch_last_header().unwrap().height; + println!("Event: {} to header {}", state_event(&event), carol_header_height); + assert_eq!(carol_header_height, 8); + let event = decide_horizon_sync(&mut carol_state_machine, header_sync_carol_from_bob).await; + match event { + StateEvent::ProceedToBlockSync(_) => println!("Carol chose `ProceedToBlockSync` instead"), + _ => panic!("2. Carol should not choose '{:?}'", event), + } + + // Give Bob some more blocks + sync::add_some_existing_blocks(&blocks[9..=13], &bob_node); + assert_eq!(bob_node.blockchain_db.get_height().unwrap(), 13); + assert_eq!(bob_node.blockchain_db.fetch_last_header().unwrap().height, 13); + println!( + "\nBob's blockchain height: {}\n", + bob_node.blockchain_db.get_height().unwrap() + ); + + // 3. Alice attempts horizon sync from Bob (to pruning height 9) + println!("\n3. Alice attempts horizon sync from Bob (to pruning height 9)\n"); + + let mut header_sync_alice_from_bob = sync::initialize_sync_headers_with_ping_pong_data(&alice_node, &bob_node); + let event = sync::sync_headers_execute(&mut alice_state_machine, &mut header_sync_alice_from_bob).await; + let alice_header_height = alice_node.blockchain_db.fetch_last_header().unwrap().height; + println!("Event: {} to header {}", state_event(&event), alice_header_height); + assert_eq!(alice_header_height, 13); + let event = decide_horizon_sync(&mut alice_state_machine, header_sync_alice_from_bob).await; + let mut horizon_sync = match event { + StateEvent::ProceedToHorizonSync(sync_peers) => HorizonStateSync::from(sync_peers), + _ => panic!("3. Alice should proceed to horizon sync"), + }; + let event = sync::horizon_sync_execute(&mut alice_state_machine, &mut horizon_sync).await; + + println!( + "Event: {} to block {}", + state_event(&event), + alice_node.blockchain_db.get_height().unwrap() + ); + assert_eq!(event, StateEvent::HorizonStateSynchronized); + assert_eq!( + alice_node.blockchain_db.get_height().unwrap(), + alice_header_height - pruning_horizon_alice + ); + // Bob will not be banned + assert!(!sync::wait_for_is_peer_banned(&alice_node, bob_node.node_identity.node_id(), 1).await); + + // 4. Alice attempts block sync from Bob to the tip (to height 13) + println!("\n4. Alice attempts block sync from Bob to the tip (to height 13)\n"); + + let mut block_sync = sync::initialize_sync_blocks(&bob_node); + let event = sync::sync_blocks_execute(&mut alice_state_machine, &mut block_sync).await; + println!( + "Event: {} to block {}", + state_event(&event), + alice_node.blockchain_db.get_height().unwrap() + ); + assert_eq!(event, StateEvent::BlocksSynchronized); + assert_eq!( + alice_node.blockchain_db.get_height().unwrap(), + alice_node.blockchain_db.fetch_last_header().unwrap().height + ); + // Bob will not be banned + assert!(!sync::wait_for_is_peer_banned(&alice_node, bob_node.node_identity.node_id(), 1).await); + + // 5 Carol attempts initial horizon sync from Alice with adequate height (but Alice is not an archival node) + println!( + "\n5. Carol attempts initial horizon sync from Alice with adequate height (but Alice is not an archival \ + node)\n" + ); + + let mut header_sync_carol_from_alice = sync::initialize_sync_headers_with_ping_pong_data(&carol_node, &alice_node); + let event = sync::sync_headers_execute(&mut carol_state_machine, &mut header_sync_carol_from_alice).await; + let carol_header_height = carol_node.blockchain_db.fetch_last_header().unwrap().height; + println!("Event: {} to header {}", state_event(&event), carol_header_height); + assert_eq!(carol_header_height, 13); + let event = decide_horizon_sync(&mut carol_state_machine, header_sync_carol_from_alice).await; + match event { + StateEvent::Continue => println!("Carol chose `Continue` instead"), + _ => panic!("5. Carol should not choose '{:?}'", event), + } + // Alice will not be banned + assert!(!sync::wait_for_is_peer_banned(&carol_node, alice_node.node_identity.node_id(), 1).await); + + // 6. Carol attempts initial horizon sync from Bob with adequate height (to pruning height 1) + println!("\n6. Carol attempts initial horizon sync from Bob with adequate height (to height 1)\n"); + + let mut header_sync_carol_from_bob = sync::initialize_sync_headers_with_ping_pong_data(&carol_node, &bob_node); + let event = sync::sync_headers_execute(&mut carol_state_machine, &mut header_sync_carol_from_bob).await; + let carol_header_height = carol_node.blockchain_db.fetch_last_header().unwrap().height; + println!("Event: {} to header {}", state_event(&event), carol_header_height); + assert_eq!(carol_header_height, 13); + let event = decide_horizon_sync(&mut carol_state_machine, header_sync_carol_from_bob).await; + let mut horizon_sync = match event { + StateEvent::ProceedToHorizonSync(sync_peers) => HorizonStateSync::from(sync_peers), + _ => panic!("6. Carol should proceed to horizon sync"), + }; + let event = sync::horizon_sync_execute(&mut carol_state_machine, &mut horizon_sync).await; + + println!( + "Event: {} to block {}", + state_event(&event), + carol_node.blockchain_db.get_height().unwrap() + ); + assert_eq!(event, StateEvent::HorizonStateSynchronized); + assert_eq!( + carol_node.blockchain_db.get_height().unwrap(), + carol_header_height - pruning_horizon_carol + ); + // Bob will not be banned + assert!(!sync::wait_for_is_peer_banned(&carol_node, bob_node.node_identity.node_id(), 1).await); + + // Give Bob some more blocks + sync::add_some_existing_blocks(&blocks[14..=18], &bob_node); + assert_eq!(bob_node.blockchain_db.get_height().unwrap(), 18); + assert_eq!(bob_node.blockchain_db.fetch_last_header().unwrap().height, 18); + println!( + "\nBob's blockchain height: {}\n", + bob_node.blockchain_db.get_height().unwrap() + ); + + // 7. Alice attempts horizon sync from Bob (to pruning height 14) + println!("\n7. Alice attempts horizon sync from Bob (to pruning height 14)\n"); + + let mut header_sync_alice_from_bob = sync::initialize_sync_headers_with_ping_pong_data(&alice_node, &bob_node); + let event = sync::sync_headers_execute(&mut alice_state_machine, &mut header_sync_alice_from_bob).await; + let alice_header_height = alice_node.blockchain_db.fetch_last_header().unwrap().height; + println!("Event: {} to header {}", state_event(&event), alice_header_height); + assert_eq!(alice_header_height, 18); + let event = decide_horizon_sync(&mut alice_state_machine, header_sync_alice_from_bob).await; + let mut horizon_sync = match event { + StateEvent::ProceedToHorizonSync(sync_peers) => HorizonStateSync::from(sync_peers), + _ => panic!("7. Alice should proceed to horizon sync"), + }; + let event = sync::horizon_sync_execute(&mut alice_state_machine, &mut horizon_sync).await; + + println!( + "Event: {} to block {}", + state_event(&event), + alice_node.blockchain_db.get_height().unwrap() + ); + assert_eq!(event, StateEvent::HorizonStateSynchronized); + assert_eq!( + alice_node.blockchain_db.get_height().unwrap(), + alice_header_height - pruning_horizon_alice + ); + // Bob will not be banned + assert!(!sync::wait_for_is_peer_banned(&alice_node, bob_node.node_identity.node_id(), 1).await); + + // 8. Alice attempts block sync from Bob to the tip (to height 18) + println!("\n8. Alice attempts block sync from Bob to the tip (to height 18)\n"); + + let mut block_sync = sync::initialize_sync_blocks(&bob_node); + let event = sync::sync_blocks_execute(&mut alice_state_machine, &mut block_sync).await; + println!( + "Event: {} to block {}", + state_event(&event), + alice_node.blockchain_db.get_height().unwrap() + ); + assert_eq!(event, StateEvent::BlocksSynchronized); + assert_eq!( + alice_node.blockchain_db.get_height().unwrap(), + alice_node.blockchain_db.fetch_last_header().unwrap().height + ); + // Bob will not be banned + assert!(!sync::wait_for_is_peer_banned(&alice_node, bob_node.node_identity.node_id(), 1).await); + + // 9. Carol attempts horizon sync from Alice with inadequate pruning horizon (to height 6) + println!("\n9. Carol attempts horizon sync from Alice with inadequate pruning horizon (to height 6)\n"); + + let mut header_sync_carol_from_alice = sync::initialize_sync_headers_with_ping_pong_data(&carol_node, &alice_node); + let event = sync::sync_headers_execute(&mut carol_state_machine, &mut header_sync_carol_from_alice).await; + let carol_header_height = carol_node.blockchain_db.fetch_last_header().unwrap().height; + println!("Event: {} to header {}", state_event(&event), carol_header_height); + assert_eq!(carol_header_height, 18); + let event = decide_horizon_sync(&mut carol_state_machine, header_sync_carol_from_alice).await; + match event { + StateEvent::Continue => println!("Carol chose `Continue` instead"), + _ => panic!("9. Carol should not choose '{:?}'", event), + } + // Alice will not be banned + assert!(!sync::wait_for_is_peer_banned(&carol_node, alice_node.node_identity.node_id(), 1).await); + + // Give Bob some more blocks + sync::add_some_existing_blocks(&blocks[14..=22], &bob_node); + assert_eq!(bob_node.blockchain_db.get_height().unwrap(), 22); + assert_eq!(bob_node.blockchain_db.fetch_last_header().unwrap().height, 22); + println!( + "\nBob's blockchain height: {}\n", + bob_node.blockchain_db.get_height().unwrap() + ); + + // 10. Carol attempts horizon sync from Bob (to pruning height 10) + println!("\n10. Carol attempts horizon sync from Bob (to pruning height 10)\n"); + + let mut header_sync_carol_from_bob = sync::initialize_sync_headers_with_ping_pong_data(&carol_node, &bob_node); + let event = sync::sync_headers_execute(&mut carol_state_machine, &mut header_sync_carol_from_bob).await; + let carol_header_height = carol_node.blockchain_db.fetch_last_header().unwrap().height; + println!("Event: {} to header {}", state_event(&event), carol_header_height); + assert_eq!(carol_header_height, 22); + let event = decide_horizon_sync(&mut carol_state_machine, header_sync_carol_from_bob).await; + let mut horizon_sync = match event { + StateEvent::ProceedToHorizonSync(sync_peers) => HorizonStateSync::from(sync_peers), + _ => panic!("10. Carol should proceed to horizon sync"), + }; + let event = sync::horizon_sync_execute(&mut carol_state_machine, &mut horizon_sync).await; + + println!( + "Event: {} to block {}", + state_event(&event), + carol_node.blockchain_db.get_height().unwrap() + ); + assert_eq!(event, StateEvent::HorizonStateSynchronized); + assert_eq!( + carol_node.blockchain_db.get_height().unwrap(), + carol_header_height - pruning_horizon_carol + ); + // Bob will not be banned + assert!(!sync::wait_for_is_peer_banned(&carol_node, bob_node.node_identity.node_id(), 1).await); + + // 11. Carol attempts block sync from Bob to the tip (to height 22) + println!("\n11. Carol attempts block sync from Bob to the tip (to height 22)\n"); + + let mut block_sync = sync::initialize_sync_blocks(&bob_node); + let event = sync::sync_blocks_execute(&mut carol_state_machine, &mut block_sync).await; + println!( + "Event: {} to block {}", + state_event(&event), + carol_node.blockchain_db.get_height().unwrap() + ); + assert_eq!(event, StateEvent::BlocksSynchronized); + assert_eq!( + carol_node.blockchain_db.get_height().unwrap(), + carol_node.blockchain_db.fetch_last_header().unwrap().height + ); + // Bob will not be banned + assert!(!sync::wait_for_is_peer_banned(&carol_node, bob_node.node_identity.node_id(), 1).await); + + // 12. Alice attempts horizon sync from Carol with adequate pruning horizon (to height 18) + println!("\n12. Alice attempts horizon sync from Carol with adequate pruning horizon (to height 18)\n"); + + let mut header_sync_alice_from_carol = sync::initialize_sync_headers_with_ping_pong_data(&alice_node, &carol_node); + let event = sync::sync_headers_execute(&mut alice_state_machine, &mut header_sync_alice_from_carol).await; + let alice_header_height = alice_node.blockchain_db.fetch_last_header().unwrap().height; + println!("Event: {} to header {}", state_event(&event), alice_header_height); + assert_eq!(alice_header_height, 22); + let event = decide_horizon_sync(&mut alice_state_machine, header_sync_alice_from_carol).await; + let mut horizon_sync = match event { + StateEvent::ProceedToHorizonSync(sync_peers) => HorizonStateSync::from(sync_peers), + _ => panic!("12. Alice should proceed to horizon sync"), + }; + let event = sync::horizon_sync_execute(&mut alice_state_machine, &mut horizon_sync).await; + + println!( + "Event: {} to block {}", + state_event(&event), + alice_node.blockchain_db.get_height().unwrap() + ); + assert_eq!(event, StateEvent::HorizonStateSynchronized); + assert_eq!( + alice_node.blockchain_db.get_height().unwrap(), + alice_header_height - pruning_horizon_alice + ); + // Bob will not be banned + assert!(!sync::wait_for_is_peer_banned(&alice_node, bob_node.node_identity.node_id(), 1).await); + // Carol will not be banned + assert!(!sync::wait_for_is_peer_banned(&alice_node, carol_node.node_identity.node_id(), 1).await); +} diff --git a/base_layer/core/tests/tests/mempool.rs b/base_layer/core/tests/tests/mempool.rs index 0bb1d7a6d3..8601ea4c95 100644 --- a/base_layer/core/tests/tests/mempool.rs +++ b/base_layer/core/tests/tests/mempool.rs @@ -28,6 +28,7 @@ use tari_common_types::types::{Commitment, PrivateKey, PublicKey, Signature}; use tari_comms_dht::domain_message::OutboundDomainMessage; use tari_core::{ base_node::state_machine_service::states::{ListeningInfo, StateInfo, StatusInfo}, + chain_storage::BlockchainDatabaseConfig, consensus::{ConsensusConstantsBuilder, ConsensusManager}, mempool::{Mempool, MempoolConfig, MempoolServiceConfig, TxStorageResponse}, proof_of_work::Difficulty, @@ -87,7 +88,7 @@ use crate::helpers::{ generate_block, generate_new_block, }, - nodes::{create_network_with_2_base_nodes_with_config, create_network_with_3_base_nodes_with_config}, + nodes::create_network_with_multiple_base_nodes_with_config, sample_blockchains::{create_new_blockchain, create_new_blockchain_with_constants}, }; @@ -1053,14 +1054,21 @@ async fn receive_and_propagate_transaction() { .with_block(block0) .build() .unwrap(); - let (mut alice_node, mut bob_node, mut carol_node, _consensus_manager) = - create_network_with_3_base_nodes_with_config( - MempoolServiceConfig::default(), - LivenessConfig::default(), - consensus_manager, - temp_dir.path().to_str().unwrap(), - ) - .await; + + let (mut node_interfaces, _consensus_manager) = create_network_with_multiple_base_nodes_with_config( + vec![MempoolServiceConfig::default(); 3], + vec![LivenessConfig::default(); 3], + vec![BlockchainDatabaseConfig::default(); 3], + vec![P2pConfig::default(); 3], + consensus_manager, + temp_dir.path().to_str().unwrap(), + network, + ) + .await; + let mut alice_node = node_interfaces.remove(0); + let mut bob_node = node_interfaces.remove(0); + let mut carol_node = node_interfaces.remove(0); + alice_node.mock_base_node_state_machine.publish_status(StatusInfo { bootstrapped: true, state_info: StateInfo::Listening(ListeningInfo::new(true)), @@ -1722,14 +1730,20 @@ async fn block_event_and_reorg_event_handling() { .with_block(block0.clone()) .build() .unwrap(); - let (mut alice, mut bob, consensus_manager) = create_network_with_2_base_nodes_with_config( - MempoolServiceConfig::default(), - LivenessConfig::default(), - P2pConfig::default(), + + let (mut node_interfaces, consensus_manager) = create_network_with_multiple_base_nodes_with_config( + vec![MempoolServiceConfig::default(); 2], + vec![LivenessConfig::default(); 2], + vec![BlockchainDatabaseConfig::default(); 2], + vec![P2pConfig::default(); 2], consensus_manager, temp_dir.path().to_str().unwrap(), + network, ) .await; + let mut alice = node_interfaces.remove(0); + let mut bob = node_interfaces.remove(0); + alice.mock_base_node_state_machine.publish_status(StatusInfo { bootstrapped: true, state_info: StateInfo::Listening(ListeningInfo::new(true)), diff --git a/base_layer/core/tests/tests/mod.rs b/base_layer/core/tests/tests/mod.rs index e36b646680..5e3ade249b 100644 --- a/base_layer/core/tests/tests/mod.rs +++ b/base_layer/core/tests/tests/mod.rs @@ -27,6 +27,7 @@ mod base_node_rpc; mod block_sync; mod block_validation; mod header_sync; +mod horizon_sync; mod mempool; mod node_comms_interface; mod node_service; diff --git a/base_layer/core/tests/tests/node_comms_interface.rs b/base_layer/core/tests/tests/node_comms_interface.rs index fb1753d8ea..4480cfce56 100644 --- a/base_layer/core/tests/tests/node_comms_interface.rs +++ b/base_layer/core/tests/tests/node_comms_interface.rs @@ -464,7 +464,7 @@ async fn inbound_fetch_blocks_before_horizon_height() { randomx_factory, ); - let block1 = append_block( + let (block1, _) = append_block( &store, &block0, vec![], @@ -474,7 +474,7 @@ async fn inbound_fetch_blocks_before_horizon_height() { ) .await .unwrap(); - let block2 = append_block( + let (block2, _) = append_block( &store, &block1, vec![], @@ -484,7 +484,7 @@ async fn inbound_fetch_blocks_before_horizon_height() { ) .await .unwrap(); - let block3 = append_block( + let (block3, _) = append_block( &store, &block2, vec![], @@ -494,7 +494,7 @@ async fn inbound_fetch_blocks_before_horizon_height() { ) .await .unwrap(); - let block4 = append_block( + let (block4, _) = append_block( &store, &block3, vec![], @@ -504,7 +504,7 @@ async fn inbound_fetch_blocks_before_horizon_height() { ) .await .unwrap(); - let _block5 = append_block( + let (_block5, _) = append_block( &store, &block4, vec![], diff --git a/base_layer/core/tests/tests/node_service.rs b/base_layer/core/tests/tests/node_service.rs index 2e037d0b09..c03a0d7b23 100644 --- a/base_layer/core/tests/tests/node_service.rs +++ b/base_layer/core/tests/tests/node_service.rs @@ -31,6 +31,7 @@ use tari_core::{ state_machine_service::states::{ListeningInfo, StateInfo, StatusInfo}, }, blocks::{ChainBlock, NewBlock}, + chain_storage::BlockchainDatabaseConfig, consensus::{ConsensusConstantsBuilder, ConsensusManager, ConsensusManagerBuilder, NetworkConsensus}, mempool::TxStorageResponse, proof_of_work::{randomx_factory::RandomXFactory, Difficulty, PowAlgorithm}, @@ -104,25 +105,37 @@ async fn propagate_and_forward_many_valid_blocks() { let (mut alice_node, rules) = BaseNodeBuilder::new(network.into()) .with_node_identity(alice_node_identity.clone()) .with_consensus_manager(rules) - .start(temp_dir.path().join("alice").to_str().unwrap()) + .start( + temp_dir.path().join("alice").to_str().unwrap(), + BlockchainDatabaseConfig::default(), + ) .await; let (mut bob_node, rules) = BaseNodeBuilder::new(network.into()) .with_node_identity(bob_node_identity.clone()) .with_peers(vec![alice_node_identity]) .with_consensus_manager(rules) - .start(temp_dir.path().join("bob").to_str().unwrap()) + .start( + temp_dir.path().join("bob").to_str().unwrap(), + BlockchainDatabaseConfig::default(), + ) .await; let (mut carol_node, rules) = BaseNodeBuilder::new(network.into()) .with_node_identity(carol_node_identity.clone()) .with_peers(vec![bob_node_identity.clone()]) .with_consensus_manager(rules) - .start(temp_dir.path().join("carol").to_str().unwrap()) + .start( + temp_dir.path().join("carol").to_str().unwrap(), + BlockchainDatabaseConfig::default(), + ) .await; let (mut dan_node, rules) = BaseNodeBuilder::new(network.into()) .with_node_identity(dan_node_identity) .with_peers(vec![carol_node_identity, bob_node_identity]) .with_consensus_manager(rules) - .start(temp_dir.path().join("dan").to_str().unwrap()) + .start( + temp_dir.path().join("dan").to_str().unwrap(), + BlockchainDatabaseConfig::default(), + ) .await; wait_until_online(&[&alice_node, &bob_node, &carol_node, &dan_node]).await; @@ -166,7 +179,8 @@ async fn propagate_and_forward_many_valid_blocks() { &key_manager, ) .await - .unwrap(), + .unwrap() + .0, ); blocks .extend(construct_chained_blocks(&alice_node.blockchain_db, blocks[0].clone(), &rules, 5, &key_manager).await); @@ -210,6 +224,7 @@ async fn propagate_and_forward_many_valid_blocks() { static EMISSION: [u64; 2] = [10, 10]; #[tokio::test(flavor = "multi_thread", worker_threads = 1)] +#[allow(clippy::too_many_lines)] async fn propagate_and_forward_invalid_block_hash() { // Alice will propagate a "made up" block hash to Bob, Bob will request the block from Alice. Alice will not be able // to provide the block and so Bob will not propagate the hash further to Carol. @@ -234,19 +249,28 @@ async fn propagate_and_forward_invalid_block_hash() { let (mut alice_node, rules) = BaseNodeBuilder::new(network.into()) .with_node_identity(alice_node_identity.clone()) .with_consensus_manager(rules) - .start(temp_dir.path().join("alice").to_str().unwrap()) + .start( + temp_dir.path().join("alice").to_str().unwrap(), + BlockchainDatabaseConfig::default(), + ) .await; let (mut bob_node, rules) = BaseNodeBuilder::new(network.into()) .with_node_identity(bob_node_identity.clone()) .with_peers(vec![alice_node_identity]) .with_consensus_manager(rules) - .start(temp_dir.path().join("bob").to_str().unwrap()) + .start( + temp_dir.path().join("bob").to_str().unwrap(), + BlockchainDatabaseConfig::default(), + ) .await; let (mut carol_node, rules) = BaseNodeBuilder::new(network.into()) .with_node_identity(carol_node_identity) .with_peers(vec![bob_node_identity]) .with_consensus_manager(rules) - .start(temp_dir.path().join("carol").to_str().unwrap()) + .start( + temp_dir.path().join("carol").to_str().unwrap(), + BlockchainDatabaseConfig::default(), + ) .await; wait_until_online(&[&alice_node, &bob_node, &carol_node]).await; @@ -276,7 +300,7 @@ async fn propagate_and_forward_invalid_block_hash() { ) .await; let txs = txs.into_iter().map(|tx| (*tx).clone()).collect(); - let block1 = append_block( + let (block1, _) = append_block( &alice_node.blockchain_db, &block0, txs, @@ -361,7 +385,10 @@ async fn propagate_and_forward_invalid_block() { let (mut dan_node, rules) = BaseNodeBuilder::new(network.into()) .with_node_identity(dan_node_identity.clone()) .with_consensus_manager(rules) - .start(temp_dir.path().join("dan").to_str().unwrap()) + .start( + temp_dir.path().join("dan").to_str().unwrap(), + BlockchainDatabaseConfig::default(), + ) .await; let (mut carol_node, rules) = BaseNodeBuilder::new(network.into()) .with_node_identity(carol_node_identity.clone()) @@ -372,20 +399,29 @@ async fn propagate_and_forward_invalid_block() { mock_validator.clone(), stateless_block_validator.clone(), ) - .start(temp_dir.path().join("carol").to_str().unwrap()) + .start( + temp_dir.path().join("carol").to_str().unwrap(), + BlockchainDatabaseConfig::default(), + ) .await; let (mut bob_node, rules) = BaseNodeBuilder::new(network.into()) .with_node_identity(bob_node_identity.clone()) .with_peers(vec![dan_node_identity]) .with_consensus_manager(rules) .with_validators(mock_validator.clone(), mock_validator, stateless_block_validator) - .start(temp_dir.path().join("bob").to_str().unwrap()) + .start( + temp_dir.path().join("bob").to_str().unwrap(), + BlockchainDatabaseConfig::default(), + ) .await; let (mut alice_node, rules) = BaseNodeBuilder::new(network.into()) .with_node_identity(alice_node_identity) .with_peers(vec![bob_node_identity, carol_node_identity]) .with_consensus_manager(rules) - .start(temp_dir.path().join("alice").to_str().unwrap()) + .start( + temp_dir.path().join("alice").to_str().unwrap(), + BlockchainDatabaseConfig::default(), + ) .await; alice_node @@ -423,7 +459,7 @@ async fn propagate_and_forward_invalid_block() { // This is a valid block, however Bob, Carol and Dan's block validator is set to always reject the block // after fetching it. - let block1 = append_block( + let (block1, _) = append_block( &alice_node.blockchain_db, &block0, vec![], @@ -485,14 +521,14 @@ async fn local_get_metadata() { let network = Network::LocalNet; let key_manager = create_memory_db_key_manager(); let (mut node, consensus_manager) = BaseNodeBuilder::new(network.into()) - .start(temp_dir.path().to_str().unwrap()) + .start(temp_dir.path().to_str().unwrap(), BlockchainDatabaseConfig::default()) .await; let db = &node.blockchain_db; let block0 = db.fetch_block(0, true).unwrap().try_into_chain_block().unwrap(); - let block1 = append_block(db, &block0, vec![], &consensus_manager, Difficulty::min(), &key_manager) + let (block1, _) = append_block(db, &block0, vec![], &consensus_manager, Difficulty::min(), &key_manager) .await .unwrap(); - let block2 = append_block(db, &block1, vec![], &consensus_manager, Difficulty::min(), &key_manager) + let (block2, _) = append_block(db, &block1, vec![], &consensus_manager, Difficulty::min(), &key_manager) .await .unwrap(); @@ -517,7 +553,7 @@ async fn local_get_new_block_template_and_get_new_block() { .unwrap(); let (mut node, _rules) = BaseNodeBuilder::new(network.into()) .with_consensus_manager(rules) - .start(temp_dir.path().to_str().unwrap()) + .start(temp_dir.path().to_str().unwrap(), BlockchainDatabaseConfig::default()) .await; let schema = [ @@ -566,7 +602,7 @@ async fn local_get_new_block_with_zero_conf() { HeaderFullValidator::new(rules.clone(), difficulty_calculator), BlockBodyInternalConsistencyValidator::new(rules, true, factories.clone()), ) - .start(temp_dir.path().to_str().unwrap()) + .start(temp_dir.path().to_str().unwrap(), BlockchainDatabaseConfig::default()) .await; let (tx01, tx01_out) = spend_utxos( @@ -652,7 +688,7 @@ async fn local_get_new_block_with_combined_transaction() { HeaderFullValidator::new(rules.clone(), difficulty_calculator), BlockBodyInternalConsistencyValidator::new(rules, true, factories.clone()), ) - .start(temp_dir.path().to_str().unwrap()) + .start(temp_dir.path().to_str().unwrap(), BlockchainDatabaseConfig::default()) .await; let (tx01, tx01_out) = spend_utxos( @@ -718,7 +754,7 @@ async fn local_submit_block() { let network = Network::LocalNet; let key_manager = create_memory_db_key_manager(); let (mut node, consensus_manager) = BaseNodeBuilder::new(network.into()) - .start(temp_dir.path().to_str().unwrap()) + .start(temp_dir.path().to_str().unwrap(), BlockchainDatabaseConfig::default()) .await; let db = &node.blockchain_db; diff --git a/base_layer/core/tests/tests/node_state_machine.rs b/base_layer/core/tests/tests/node_state_machine.rs index 55e68c79de..01761fba4f 100644 --- a/base_layer/core/tests/tests/node_state_machine.rs +++ b/base_layer/core/tests/tests/node_state_machine.rs @@ -36,6 +36,7 @@ use tari_core::{ }, SyncValidators, }, + chain_storage::BlockchainDatabaseConfig, consensus::{ConsensusConstantsBuilder, ConsensusManagerBuilder}, mempool::MempoolServiceConfig, proof_of_work::{randomx_factory::RandomXFactory, Difficulty}, @@ -58,8 +59,7 @@ use crate::helpers::{ block_builders::{append_block, chain_block, create_genesis_block}, chain_metadata::MockChainMetadata, nodes::{ - create_network_with_2_base_nodes_with_config, - create_network_with_3_base_nodes_with_config, + create_network_with_multiple_base_nodes_with_config, random_node_identity, wait_until_online, BaseNodeBuilder, @@ -81,17 +81,26 @@ async fn test_listening_lagging() { .with_block(prev_block.clone()) .build() .unwrap(); - let (alice_node, bob_node, consensus_manager) = create_network_with_2_base_nodes_with_config( - MempoolServiceConfig::default(), - LivenessConfig { - auto_ping_interval: Some(Duration::from_millis(100)), - ..Default::default() - }, - P2pConfig::default(), + + let (mut node_interfaces, consensus_manager) = create_network_with_multiple_base_nodes_with_config( + vec![MempoolServiceConfig::default(); 2], + vec![ + LivenessConfig { + auto_ping_interval: Some(Duration::from_millis(100)), + ..Default::default() + }; + 2 + ], + vec![BlockchainDatabaseConfig::default(); 2], + vec![P2pConfig::default(); 2], consensus_manager, temp_dir.path().to_str().unwrap(), + network, ) .await; + let alice_node = node_interfaces.remove(0); + let bob_node = node_interfaces.remove(0); + let shutdown = Shutdown::new(); let (state_change_event_publisher, _) = broadcast::channel(10); let (status_event_sender, _status_event_receiver) = watch::channel(StatusInfo::new()); @@ -117,7 +126,7 @@ async fn test_listening_lagging() { let mut bob_local_nci = bob_node.local_nci; // Bob Block 1 - no block event - let prev_block = append_block( + let (prev_block, _) = append_block( &bob_db, &prev_block, vec![], @@ -143,6 +152,7 @@ async fn test_listening_lagging() { assert!(matches!(next_event, StateEvent::FallenBehind(_))); } +#[allow(clippy::too_many_lines)] #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn test_listening_initial_fallen_behind() { let network = Network::LocalNet; @@ -157,23 +167,34 @@ async fn test_listening_initial_fallen_behind() { .with_block(gen_block.clone()) .build() .unwrap(); - let (alice_node, bob_node, charlie_node, consensus_manager) = create_network_with_3_base_nodes_with_config( - MempoolServiceConfig::default(), - LivenessConfig { - auto_ping_interval: Some(Duration::from_millis(100)), - ..Default::default() - }, + + let (mut node_interfaces, consensus_manager) = create_network_with_multiple_base_nodes_with_config( + vec![MempoolServiceConfig::default(); 3], + vec![ + LivenessConfig { + auto_ping_interval: Some(Duration::from_millis(100)), + ..Default::default() + }; + 3 + ], + vec![BlockchainDatabaseConfig::default(); 3], + vec![P2pConfig::default(); 3], consensus_manager, temp_dir.path().to_str().unwrap(), + network, ) .await; + let alice_node = node_interfaces.remove(0); + let bob_node = node_interfaces.remove(0); + let charlie_node = node_interfaces.remove(0); + let shutdown = Shutdown::new(); let bob_db = bob_node.blockchain_db; let mut bob_local_nci = bob_node.local_nci; // Bob Block 1 - no block event - let prev_block = append_block( + let (prev_block, _) = append_block( &bob_db, &gen_block, vec![], @@ -196,7 +217,7 @@ async fn test_listening_initial_fallen_behind() { let mut charlie_local_nci = charlie_node.local_nci; // charlie Block 1 - no block event - let prev_block = append_block( + let (prev_block, _) = append_block( &charlie_db, &gen_block, vec![], @@ -256,7 +277,7 @@ async fn test_listening_initial_fallen_behind() { async fn test_event_channel() { let temp_dir = tempdir().unwrap(); let (node, consensus_manager) = BaseNodeBuilder::new(Network::Esmeralda.into()) - .start(temp_dir.path().to_str().unwrap()) + .start(temp_dir.path().to_str().unwrap(), BlockchainDatabaseConfig::default()) .await; // let shutdown = Shutdown::new(); let db = create_test_blockchain_db(); diff --git a/base_layer/mmr/src/sparse_merkle_tree/tree.rs b/base_layer/mmr/src/sparse_merkle_tree/tree.rs index caa2b38102..922b4396b4 100644 --- a/base_layer/mmr/src/sparse_merkle_tree/tree.rs +++ b/base_layer/mmr/src/sparse_merkle_tree/tree.rs @@ -229,7 +229,7 @@ impl> SparseMerkleTree { Ok(result) } - /// Update and existing node at location `key` in the tree, or, if the key does not exist, insert a new node at + /// Update an existing node at location `key` in the tree, or, if the key does not exist, insert a new node at /// location `key` instead. Returns `Ok(UpdateResult::Updated)` if the node was updated, or /// `Ok(UpdateResult::Inserted)` if the node was inserted. ///