diff --git a/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync/horizon_state_synchronization.rs b/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync/horizon_state_synchronization.rs index 7e10d59745..007fee029a 100644 --- a/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync/horizon_state_synchronization.rs +++ b/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync/horizon_state_synchronization.rs @@ -29,8 +29,8 @@ use crate::{ }, sync::rpc, }, - blocks::BlockHeader, - chain_storage::{async_db::AsyncBlockchainDb, BlockchainBackend, ChainStorageError, MmrTree, PrunedOutput}, + blocks::{BlockHeader, UpdateBlockAccumulatedData}, + chain_storage::{async_db::AsyncBlockchainDb, BlockchainBackend, ChainStorageError, MmrTree}, proto::base_node::{ sync_utxo as proto_sync_utxo, sync_utxos_response::UtxoOrDeleted, @@ -44,16 +44,10 @@ use crate::{ use croaring::Bitmap; use futures::StreamExt; use log::*; -use std::{ - convert::{TryFrom, TryInto}, - sync::Arc, -}; -use tari_common_types::types::{HashDigest, RangeProofService}; +use std::convert::{TryFrom, TryInto}; +use tari_common_types::types::{Commitment, HashDigest, RangeProofService}; use tari_comms::PeerConnection; -use tari_crypto::{ - commitment::HomomorphicCommitment, - tari_utilities::{hex::Hex, Hashable}, -}; +use tari_crypto::tari_utilities::{hex::Hex, Hashable}; use tari_mmr::{MerkleMountainRange, MutableMmr}; const LOG_TARGET: &str = "c::bn::state_machine_service::states::horizon_state_sync"; @@ -65,6 +59,8 @@ pub struct HorizonStateSynchronization<'a, B: BlockchainBackend> { prover: &'a RangeProofService, num_kernels: u64, num_outputs: u64, + kernel_sum: Commitment, + utxo_sum: Commitment, } impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { @@ -81,6 +77,8 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { prover, num_kernels: 0, num_outputs: 0, + kernel_sum: Default::default(), + utxo_sum: Default::default(), } } @@ -119,6 +117,7 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { client: &mut rpc::BaseNodeSyncRpcClient, to_header: &BlockHeader, ) -> Result<(), HorizonSyncError> { + self.initialize().await?; debug!(target: LOG_TARGET, "Synchronizing kernels"); self.synchronize_kernels(client, to_header).await?; debug!(target: LOG_TARGET, "Synchronizing outputs"); @@ -126,12 +125,30 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { Ok(()) } + async fn initialize(&mut self) -> Result<(), HorizonSyncError> { + let metadata = self.db().get_chain_metadata().await?; + let data = self + .db() + .fetch_block_accumulated_data(metadata.best_block().clone()) + .await?; + self.utxo_sum = data.utxo_sum().clone(); + self.kernel_sum = data.kernel_sum().clone(); + debug!( + target: LOG_TARGET, + "Loaded utxo_sum = {}, kernel_sum = {}", + self.utxo_sum.to_hex(), + self.kernel_sum.to_hex() + ); + Ok(()) + } + async fn synchronize_kernels( &mut self, client: &mut rpc::BaseNodeSyncRpcClient, to_header: &BlockHeader, ) -> Result<(), HorizonSyncError> { let local_num_kernels = self.db().fetch_mmr_size(MmrTree::Kernel).await?; + let metadata = self.db().get_chain_metadata().await?; let remote_num_kernels = to_header.kernel_mmr_size; self.num_kernels = remote_num_kernels; @@ -192,6 +209,8 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { .map_err(HorizonSyncError::InvalidKernelSignature)?; kernels.push(kernel.clone()); + self.kernel_sum = &self.kernel_sum + &kernel.excess; + error!(target: LOG_TARGET, "DEBUG: kernel_sum = {}", self.kernel_sum.to_hex()); txn.insert_kernel_via_horizon_sync(kernel, current_header.hash().clone(), mmr_position as u32); if mmr_position == current_header.header().kernel_mmr_size - 1 { debug!( @@ -221,11 +240,16 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { }); } - txn.update_pruned_hash_set( - MmrTree::Kernel, + let kernel_hash_set = kernel_mmr.get_pruned_hash_set()?; + txn.update_block_accumulated_data_via_horizon_sync( current_header.hash().clone(), - kernel_mmr.get_pruned_hash_set()?, + UpdateBlockAccumulatedData { + kernel_sum: Some(self.kernel_sum.clone()), + kernel_hash_set: Some(kernel_hash_set), + ..Default::default() + }, ); + txn.set_pruned_height(metadata.pruned_height(), self.kernel_sum.clone(), self.utxo_sum.clone()); txn.commit().await?; if mmr_position < end - 1 { @@ -258,6 +282,8 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { ) -> Result<(), HorizonSyncError> { let local_num_outputs = self.db().fetch_mmr_size(MmrTree::Utxo).await?; + let metadata = self.db().get_chain_metadata().await?; + let remote_num_outputs = to_header.output_mmr_size; self.num_outputs = remote_num_outputs; @@ -322,10 +348,11 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { let block_data = db .fetch_block_accumulated_data(current_header.header().prev_hash.clone()) .await?; - let (_, output_pruned_set, rp_pruned_set, mut full_bitmap) = block_data.dissolve(); + let (_, output_pruned_set, witness_pruned_set, _) = block_data.dissolve(); + let mut full_bitmap = self.db().fetch_deleted_bitmap_at_tip().await?.into_bitmap(); let mut output_mmr = MerkleMountainRange::::new(output_pruned_set); - let mut witness_mmr = MerkleMountainRange::::new(rp_pruned_set); + let mut witness_mmr = MerkleMountainRange::::new(witness_pruned_set); while let Some(response) = output_stream.next().await { let res: SyncUtxosResponse = response?; @@ -356,6 +383,7 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { output_hashes.push(output.hash()); witness_hashes.push(output.witness_hash()); unpruned_outputs.push(output.clone()); + self.utxo_sum = &self.utxo_sum + &output.commitment; txn.insert_output_via_horizon_sync( output, current_header.hash().clone(), @@ -415,8 +443,8 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { witness_mmr.push(hash)?; } - // Check that the difference bitmap is excessively large. Bitmap::deserialize panics if greater than - // isize::MAX, however isize::MAX is still an inordinate amount of data. An + // Check that the difference bitmap isn't excessively large. Bitmap::deserialize panics if greater + // than isize::MAX, however isize::MAX is still an inordinate amount of data. An // arbitrary 4 MiB limit is used. const MAX_DIFF_BITMAP_BYTE_LEN: usize = 4 * 1024 * 1024; if diff_bitmap.len() > MAX_DIFF_BITMAP_BYTE_LEN { @@ -471,14 +499,19 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { } txn.update_deleted_bitmap(diff_bitmap.clone()); - txn.update_pruned_hash_set(MmrTree::Utxo, current_header.hash().clone(), pruned_output_set); - txn.update_pruned_hash_set( - MmrTree::Witness, + + let witness_hash_set = witness_mmr.get_pruned_hash_set()?; + txn.update_block_accumulated_data_via_horizon_sync( current_header.hash().clone(), - witness_mmr.get_pruned_hash_set()?, + UpdateBlockAccumulatedData { + utxo_sum: Some(self.utxo_sum.clone()), + utxo_hash_set: Some(pruned_output_set), + witness_hash_set: Some(witness_hash_set), + deleted_diff: Some(diff_bitmap.into()), + ..Default::default() + }, ); - txn.update_block_accumulated_data_with_deleted_diff(current_header.hash().clone(), diff_bitmap); - + txn.set_pruned_height(metadata.pruned_height(), self.kernel_sum.clone(), self.utxo_sum.clone()); txn.commit().await?; current_header = db.fetch_chain_header(current_header.height() + 1).await?; @@ -518,99 +551,25 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { async fn finalize_horizon_sync(&mut self) -> Result<(), HorizonSyncError> { debug!(target: LOG_TARGET, "Validating horizon state"); - let info = HorizonSyncInfo::new( + self.shared.set_state_info(StateInfo::HorizonSync(HorizonSyncInfo::new( vec![self.sync_peer.peer_node_id().clone()], HorizonSyncStatus::Finalizing, - ); - self.shared.set_state_info(StateInfo::HorizonSync(info)); + ))); let header = self.db().fetch_chain_header(self.horizon_sync_height).await?; - let mut pruned_utxo_sum = HomomorphicCommitment::default(); - let mut pruned_kernel_sum = HomomorphicCommitment::default(); - - let mut prev_mmr = 0; - let mut prev_kernel_mmr = 0; - let bitmap = Arc::new( - self.db() - .fetch_complete_deleted_bitmap_at(header.hash().clone()) - .await? - .into_bitmap(), - ); - let expected_prev_best_block = self.shared.db.get_chain_metadata().await?.best_block().clone(); - for h in 0..=header.height() { - let curr_header = self.db().fetch_chain_header(h).await?; - - trace!( - target: LOG_TARGET, - "Fetching utxos from db: height:{}, header.output_mmr:{}, prev_mmr:{}, end:{}", - curr_header.height(), - curr_header.header().output_mmr_size, - prev_mmr, - curr_header.header().output_mmr_size - 1 - ); - let (utxos, _) = self - .db() - .fetch_utxos_by_mmr_position(prev_mmr, curr_header.header().output_mmr_size - 1, bitmap.clone()) - .await?; - trace!( - target: LOG_TARGET, - "Fetching kernels from db: height:{}, header.kernel_mmr:{}, prev_mmr:{}, end:{}", - curr_header.height(), - curr_header.header().kernel_mmr_size, - prev_kernel_mmr, - curr_header.header().kernel_mmr_size - 1 - ); - let kernels = self - .db() - .fetch_kernels_by_mmr_position(prev_kernel_mmr, curr_header.header().kernel_mmr_size - 1) - .await?; - - let mut utxo_sum = HomomorphicCommitment::default(); - debug!(target: LOG_TARGET, "Number of kernels returned: {}", kernels.len()); - debug!(target: LOG_TARGET, "Number of utxos returned: {}", utxos.len()); - let mut prune_counter = 0; - for u in utxos { - match u { - PrunedOutput::NotPruned { output } => { - utxo_sum = &output.commitment + &utxo_sum; - }, - _ => { - prune_counter += 1; - }, - } - } - if prune_counter > 0 { - debug!(target: LOG_TARGET, "Pruned {} outputs", prune_counter); - } - prev_mmr = curr_header.header().output_mmr_size; - - pruned_utxo_sum = &utxo_sum + &pruned_utxo_sum; - - for k in kernels { - pruned_kernel_sum = &k.excess + &pruned_kernel_sum; - } - prev_kernel_mmr = curr_header.header().kernel_mmr_size; - - trace!( - target: LOG_TARGET, - "Height: {} Kernel sum:{:?} Pruned UTXO sum: {:?}", - h, - pruned_kernel_sum, - pruned_utxo_sum - ); - } self.shared .sync_validators .final_horizon_state .validate( - &*self.db().clone().into_inner().db_read_access()?, + &*self.db().inner().db_read_access()?, header.height(), - &pruned_utxo_sum, - &pruned_kernel_sum, + &self.utxo_sum, + &self.kernel_sum, ) .map_err(HorizonSyncError::FinalStateValidationFailed)?; + let metadata = self.db().get_chain_metadata().await?; info!( target: LOG_TARGET, "Horizon state validation succeeded! Committing horizon state." @@ -621,9 +580,9 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { header.height(), header.hash().clone(), header.accumulated_data().total_accumulated_difficulty, - expected_prev_best_block, + metadata.best_block().clone(), ) - .set_pruned_height(header.height(), pruned_kernel_sum, pruned_utxo_sum) + .set_pruned_height(header.height(), self.kernel_sum.clone(), self.utxo_sum.clone()) .commit() .await?; diff --git a/base_layer/core/src/blocks/accumulated_data.rs b/base_layer/core/src/blocks/accumulated_data.rs index c14f494c2e..fb7aaeec3a 100644 --- a/base_layer/core/src/blocks/accumulated_data.rs +++ b/base_layer/core/src/blocks/accumulated_data.rs @@ -49,29 +49,32 @@ use tari_mmr::{pruned_hashset::PrunedHashSet, ArrayLike}; const LOG_TARGET: &str = "c::bn::acc_data"; -#[derive(Debug, Serialize, Deserialize)] +#[derive(Debug, Serialize, Deserialize, Clone)] pub struct BlockAccumulatedData { pub(crate) kernels: PrunedHashSet, pub(crate) outputs: PrunedHashSet, + pub(crate) witness: PrunedHashSet, pub(crate) deleted: DeletedBitmap, - pub(crate) range_proofs: PrunedHashSet, pub(crate) kernel_sum: Commitment, + pub(crate) utxo_sum: Commitment, } impl BlockAccumulatedData { pub fn new( kernels: PrunedHashSet, outputs: PrunedHashSet, - range_proofs: PrunedHashSet, + witness: PrunedHashSet, deleted: Bitmap, - total_kernel_sum: Commitment, + kernel_sum: Commitment, + utxo_sum: Commitment, ) -> Self { Self { kernels, outputs, - range_proofs, + witness, deleted: DeletedBitmap { deleted }, - kernel_sum: total_kernel_sum, + kernel_sum, + utxo_sum, } } @@ -79,13 +82,22 @@ impl BlockAccumulatedData { &self.deleted.deleted } + pub fn set_deleted(&mut self, deleted: DeletedBitmap) -> &mut Self { + self.deleted = deleted; + self + } + pub fn dissolve(self) -> (PrunedHashSet, PrunedHashSet, PrunedHashSet, Bitmap) { - (self.kernels, self.outputs, self.range_proofs, self.deleted.deleted) + (self.kernels, self.outputs, self.witness, self.deleted.deleted) } pub fn kernel_sum(&self) -> &Commitment { &self.kernel_sum } + + pub fn utxo_sum(&self) -> &Commitment { + &self.utxo_sum + } } impl Default for BlockAccumulatedData { @@ -96,8 +108,9 @@ impl Default for BlockAccumulatedData { deleted: DeletedBitmap { deleted: Bitmap::create(), }, - range_proofs: Default::default(), + witness: Default::default(), kernel_sum: Default::default(), + utxo_sum: Default::default(), } } } @@ -110,11 +123,21 @@ impl Display for BlockAccumulatedData { self.outputs.len().unwrap_or(0), self.deleted.deleted.cardinality(), self.kernels.len().unwrap_or(0), - self.range_proofs.len().unwrap_or(0) + self.witness.len().unwrap_or(0) ) } } +#[derive(Debug, Clone, Default)] +pub struct UpdateBlockAccumulatedData { + pub kernel_hash_set: Option, + pub utxo_hash_set: Option, + pub witness_hash_set: Option, + pub deleted_diff: Option, + pub utxo_sum: Option, + pub kernel_sum: Option, +} + /// Wrapper struct to serialize and deserialize Bitmap #[derive(Debug, Clone)] pub struct DeletedBitmap { diff --git a/base_layer/core/src/blocks/mod.rs b/base_layer/core/src/blocks/mod.rs index 19a49c3071..3b7eb851f1 100644 --- a/base_layer/core/src/blocks/mod.rs +++ b/base_layer/core/src/blocks/mod.rs @@ -30,6 +30,7 @@ pub use accumulated_data::{ ChainHeader, CompleteDeletedBitmap, DeletedBitmap, + UpdateBlockAccumulatedData, }; mod error; diff --git a/base_layer/core/src/chain_storage/async_db.rs b/base_layer/core/src/chain_storage/async_db.rs index bf7c9b4a06..5a31c60bb6 100644 --- a/base_layer/core/src/chain_storage/async_db.rs +++ b/base_layer/core/src/chain_storage/async_db.rs @@ -32,6 +32,7 @@ use crate::{ DeletedBitmap, HistoricalBlock, NewBlockTemplate, + UpdateBlockAccumulatedData, }, chain_storage::{ blockchain_database::MmrRoots, @@ -61,7 +62,6 @@ use tari_common_types::{ chain_metadata::ChainMetadata, types::{BlockHash, Commitment, HashOutput, Signature}, }; -use tari_mmr::pruned_hashset::PrunedHashSet; const LOG_TARGET: &str = "c::bn::async_db"; @@ -145,7 +145,7 @@ impl AsyncBlockchainDb { //---------------------------------- Metadata --------------------------------------------// make_async_fn!(get_chain_metadata() -> ChainMetadata, "get_chain_metadata"); - make_async_fn!(fetch_horizon_data() -> Option, "fetch_horizon_data"); + make_async_fn!(fetch_horizon_data() -> HorizonData, "fetch_horizon_data"); //---------------------------------- TXO --------------------------------------------// make_async_fn!(fetch_utxo(hash: HashOutput) -> Option, "fetch_utxo"); @@ -280,11 +280,11 @@ impl<'a, B: BlockchainBackend + 'static> AsyncDbTransaction<'a, B> { &mut self, height: u64, hash: HashOutput, - accumulated_data: u128, + accumulated_difficulty: u128, expected_prev_best_block: HashOutput, ) -> &mut Self { self.transaction - .set_best_block(height, hash, accumulated_data, expected_prev_best_block); + .set_best_block(height, hash, accumulated_difficulty, expected_prev_best_block); self } @@ -328,23 +328,12 @@ impl<'a, B: BlockchainBackend + 'static> AsyncDbTransaction<'a, B> { self } - pub fn update_pruned_hash_set( + pub fn update_block_accumulated_data_via_horizon_sync( &mut self, - mmr_tree: MmrTree, header_hash: HashOutput, - pruned_hash_set: PrunedHashSet, + values: UpdateBlockAccumulatedData, ) -> &mut Self { - self.transaction - .update_pruned_hash_set(mmr_tree, header_hash, pruned_hash_set); - self - } - - pub fn update_block_accumulated_data_with_deleted_diff( - &mut self, - header_hash: HashOutput, - deleted: Bitmap, - ) -> &mut Self { - self.transaction.update_deleted_with_diff(header_hash, deleted); + self.transaction.update_block_accumulated_data(header_hash, values); self } diff --git a/base_layer/core/src/chain_storage/blockchain_database.rs b/base_layer/core/src/chain_storage/blockchain_database.rs index 52c1ce2099..d45c8e5fca 100644 --- a/base_layer/core/src/chain_storage/blockchain_database.rs +++ b/base_layer/core/src/chain_storage/blockchain_database.rs @@ -31,6 +31,7 @@ use crate::{ DeletedBitmap, HistoricalBlock, NewBlockTemplate, + UpdateBlockAccumulatedData, }, chain_storage::{ consts::{ @@ -208,8 +209,25 @@ where B: BlockchainBackend }; if is_empty { info!(target: LOG_TARGET, "Blockchain db is empty. Adding genesis block."); - let genesis_block = blockchain_db.consensus_manager.get_genesis_block(); - blockchain_db.insert_block(Arc::new(genesis_block))?; + let genesis_block = Arc::new(blockchain_db.consensus_manager.get_genesis_block()); + blockchain_db.insert_block(genesis_block.clone())?; + let mut txn = DbTransaction::new(); + let utxo_sum = genesis_block.block().body.outputs().iter().map(|k| &k.commitment).sum(); + let kernel_sum = genesis_block.block().body.kernels().iter().map(|k| &k.excess).sum(); + txn.update_block_accumulated_data(genesis_block.hash().clone(), UpdateBlockAccumulatedData { + kernel_hash_set: None, + utxo_hash_set: None, + witness_hash_set: None, + deleted_diff: None, + utxo_sum: Some(utxo_sum), + kernel_sum: Some(kernel_sum), + }); + txn.set_pruned_height( + 0, + genesis_block.block().body.kernels().iter().map(|k| &k.excess).sum(), + genesis_block.block().body.outputs().iter().map(|o| &o.commitment).sum(), + ); + blockchain_db.write(txn)?; blockchain_db.store_pruning_horizon(config.pruning_horizon)?; } if cleanup_orphans_at_startup { @@ -573,7 +591,7 @@ where B: BlockchainBackend /// Returns the sum of all kernels pub fn fetch_kernel_commitment_sum(&self, at_hash: &HashOutput) -> Result { - Ok(self.fetch_block_accumulated_data(at_hash.clone())?.kernel_sum) + Ok(self.fetch_block_accumulated_data(at_hash.clone())?.kernel_sum().clone()) } /// Returns `n` hashes from height _h - offset_ where _h_ is the tip header height back to `h - n - offset`. @@ -972,9 +990,9 @@ where B: BlockchainBackend rewind_to_hash(&mut *db, hash) } - pub fn fetch_horizon_data(&self) -> Result, ChainStorageError> { + pub fn fetch_horizon_data(&self) -> Result { let db = self.db_read_access()?; - db.fetch_horizon_data() + Ok(db.fetch_horizon_data()?.unwrap_or_default()) } pub fn fetch_complete_deleted_bitmap_at( @@ -1071,7 +1089,7 @@ pub fn calculate_mmr_roots(db: &T, block: &Block) -> Resul let BlockAccumulatedData { kernels, outputs, - range_proofs, + witness: range_proofs, .. } = db .fetch_block_accumulated_data(&header.prev_hash)? @@ -2034,6 +2052,7 @@ fn cleanup_orphans(db: &mut T, orphan_storage_capacity: us db.delete_oldest_orphans(horizon_height, orphan_storage_capacity) } + fn prune_database_if_needed( db: &mut T, pruning_horizon: u64, @@ -2067,6 +2086,7 @@ fn prune_database_if_needed( )?; let mut txn = DbTransaction::new(); for block_to_prune in (last_pruned + 1)..abs_pruning_horizon { + let header = db.fetch_chain_header_by_height(block_to_prune)?; let curr_block = db.fetch_block_accumulated_data_by_height(block_to_prune).or_not_found( "BlockAccumulatedData", "height", @@ -2074,12 +2094,20 @@ fn prune_database_if_needed( )?; // Note, this could actually be done in one step instead of each block, since deleted is // accumulated - let inputs_to_prune = curr_block.deleted.bitmap().clone() - last_block.deleted.bitmap(); + let output_mmr_positions = curr_block.deleted() - last_block.deleted(); last_block = curr_block; - txn.prune_outputs_and_update_horizon(inputs_to_prune.to_vec(), block_to_prune); + txn.prune_outputs_at_positions(output_mmr_positions.to_vec()); + txn.delete_all_inputs_in_block(header.hash().clone()); } + txn.set_pruned_height( + abs_pruning_horizon - 1, + last_block.kernel_sum().clone(), + last_block.utxo_sum().clone(), + ); + // TODO: prune block accumulated data + db.write(txn)?; } diff --git a/base_layer/core/src/chain_storage/db_transaction.rs b/base_layer/core/src/chain_storage/db_transaction.rs index 970988d655..01e7bcb78d 100644 --- a/base_layer/core/src/chain_storage/db_transaction.rs +++ b/base_layer/core/src/chain_storage/db_transaction.rs @@ -20,8 +20,8 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. use crate::{ - blocks::{Block, BlockHeader, BlockHeaderAccumulatedData, ChainBlock, ChainHeader}, - chain_storage::{error::ChainStorageError, MmrTree}, + blocks::{Block, BlockHeader, BlockHeaderAccumulatedData, ChainBlock, ChainHeader, UpdateBlockAccumulatedData}, + chain_storage::error::ChainStorageError, transactions::transaction::{TransactionKernel, TransactionOutput}, }; use croaring::Bitmap; @@ -35,7 +35,6 @@ use tari_crypto::tari_utilities::{ hex::{to_hex, Hex}, Hashable, }; -use tari_mmr::pruned_hashset::PrunedHashSet; #[derive(Debug)] pub struct DbTransaction { @@ -142,31 +141,26 @@ impl DbTransaction { self } - pub fn update_pruned_hash_set( - &mut self, - mmr_tree: MmrTree, - header_hash: HashOutput, - pruned_hash_set: PrunedHashSet, - ) -> &mut Self { - self.operations.push(WriteOperation::UpdatePrunedHashSet { - mmr_tree, - header_hash, - pruned_hash_set: Box::new(pruned_hash_set), + pub fn prune_outputs_at_positions(&mut self, output_mmr_positions: Vec) -> &mut Self { + self.operations.push(WriteOperation::PruneOutputsAtMmrPositions { + output_positions: output_mmr_positions, }); self } - pub fn prune_outputs_and_update_horizon(&mut self, output_mmr_positions: Vec, horizon: u64) -> &mut Self { - self.operations.push(WriteOperation::PruneOutputsAndUpdateHorizon { - output_positions: output_mmr_positions, - horizon, - }); + pub fn delete_all_inputs_in_block(&mut self, block_hash: BlockHash) -> &mut Self { + self.operations + .push(WriteOperation::DeleteAllInputsInBlock { block_hash }); self } - pub fn update_deleted_with_diff(&mut self, header_hash: HashOutput, deleted: Bitmap) -> &mut Self { + pub fn update_block_accumulated_data( + &mut self, + header_hash: HashOutput, + values: UpdateBlockAccumulatedData, + ) -> &mut Self { self.operations - .push(WriteOperation::UpdateDeletedBlockAccumulatedDataWithDiff { header_hash, deleted }); + .push(WriteOperation::UpdateBlockAccumulatedData { header_hash, values }); self } @@ -298,25 +292,18 @@ pub enum WriteOperation { DeleteOrphanChainTip(HashOutput), InsertOrphanChainTip(HashOutput), InsertMoneroSeedHeight(Vec, u64), - UpdatePrunedHashSet { - mmr_tree: MmrTree, - header_hash: HashOutput, - pruned_hash_set: Box, - }, - UpdateDeletedBlockAccumulatedDataWithDiff { + UpdateBlockAccumulatedData { header_hash: HashOutput, - deleted: Bitmap, + values: UpdateBlockAccumulatedData, }, UpdateDeletedBitmap { deleted: Bitmap, }, - PruneOutputsAndUpdateHorizon { + PruneOutputsAtMmrPositions { output_positions: Vec, - horizon: u64, }, - UpdateKernelSum { - header_hash: HashOutput, - kernel_sum: Commitment, + DeleteAllInputsInBlock { + block_hash: BlockHash, }, SetAccumulatedDataForOrphan(BlockHeaderAccumulatedData), SetBestBlock { @@ -383,14 +370,6 @@ impl fmt::Display for WriteOperation { write!(f, "Insert Monero seed string {} for height: {}", data.to_hex(), height) }, InsertChainOrphanBlock(block) => write!(f, "InsertChainOrphanBlock({})", block.hash().to_hex()), - UpdatePrunedHashSet { - mmr_tree, header_hash, .. - } => write!( - f, - "Update pruned hash set: {} header: {}", - mmr_tree, - header_hash.to_hex() - ), InsertPrunedOutput { header_hash: _, header_height: _, @@ -398,23 +377,14 @@ impl fmt::Display for WriteOperation { witness_hash: _, mmr_position: _, } => write!(f, "Insert pruned output"), - UpdateDeletedBlockAccumulatedDataWithDiff { - header_hash: _, - deleted: _, - } => write!(f, "Add deleted data for block"), + UpdateBlockAccumulatedData { header_hash, .. } => { + write!(f, "Update Block data for block {}", header_hash.to_hex()) + }, UpdateDeletedBitmap { deleted } => { write!(f, "Merge deleted bitmap at tip ({} new indexes)", deleted.cardinality()) }, - PruneOutputsAndUpdateHorizon { - output_positions, - horizon, - } => write!( - f, - "Prune {} outputs and set horizon to {}", - output_positions.len(), - horizon - ), - UpdateKernelSum { header_hash, .. } => write!(f, "Update kernel sum for block: {}", header_hash.to_hex()), + PruneOutputsAtMmrPositions { output_positions } => write!(f, "Prune {} output(s)", output_positions.len()), + DeleteAllInputsInBlock { block_hash } => write!(f, "Delete outputs in block {}", block_hash.to_hex()), SetAccumulatedDataForOrphan(accumulated_data) => { write!(f, "Set accumulated data for orphan {}", accumulated_data) }, diff --git a/base_layer/core/src/chain_storage/error.rs b/base_layer/core/src/chain_storage/error.rs index 14cbd7a53f..c776ec2222 100644 --- a/base_layer/core/src/chain_storage/error.rs +++ b/base_layer/core/src/chain_storage/error.rs @@ -169,13 +169,7 @@ pub trait OrNotFound { impl OrNotFound for Result, ChainStorageError> { fn or_not_found(self, entity: &'static str, field: &'static str, value: String) -> Result { - match self { - Ok(inner) => match inner { - None => Err(ChainStorageError::ValueNotFound { entity, field, value }), - Some(v) => Ok(v), - }, - Err(err) => Err(err), - } + self.and_then(|inner| inner.ok_or(ChainStorageError::ValueNotFound { entity, field, value })) } } diff --git a/base_layer/core/src/chain_storage/horizon_data.rs b/base_layer/core/src/chain_storage/horizon_data.rs index 6213d490f3..ae6a120bb4 100644 --- a/base_layer/core/src/chain_storage/horizon_data.rs +++ b/base_layer/core/src/chain_storage/horizon_data.rs @@ -21,9 +21,8 @@ // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. use serde::{Deserialize, Serialize}; use tari_common_types::types::Commitment; -use tari_crypto::tari_utilities::ByteArray; -#[derive(Clone, Debug, Serialize, Deserialize)] +#[derive(Clone, Debug, Serialize, Deserialize, Default)] pub struct HorizonData { kernel_sum: Commitment, utxo_sum: Commitment, @@ -35,10 +34,7 @@ impl HorizonData { } pub fn zero() -> Self { - HorizonData { - kernel_sum: Commitment::from_bytes(&[0u8; 32]).expect("Could not create commitment"), - utxo_sum: Commitment::from_bytes(&[0u8; 32]).expect("Could not create commitment"), - } + Default::default() } pub fn kernel_sum(&self) -> &Commitment { diff --git a/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs b/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs index 0b8e6333af..5184a744ec 100644 --- a/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs +++ b/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs @@ -33,6 +33,7 @@ use crate::{ ChainBlock, ChainHeader, DeletedBitmap, + UpdateBlockAccumulatedData, }, chain_storage::{ db_transaction::{DbKey, DbTransaction, DbValue, WriteOperation}, @@ -85,7 +86,7 @@ use tari_common_types::{ types::{BlockHash, Commitment, HashDigest, HashOutput, Signature, BLOCK_HASH_LENGTH}, }; use tari_crypto::tari_utilities::{hash::Hashable, hex::Hex, ByteArray}; -use tari_mmr::{pruned_hashset::PrunedHashSet, Hash, MerkleMountainRange, MutableMmr}; +use tari_mmr::{Hash, MerkleMountainRange, MutableMmr}; use tari_storage::lmdb_store::{db, LMDBBuilder, LMDBConfig, LMDBStore}; type DatabaseRef = Arc>; @@ -311,32 +312,19 @@ impl LMDBDatabase { self.insert_orphan_block(&write_txn, chain_block.block())?; self.set_accumulated_data_for_orphan(&write_txn, chain_block.accumulated_data())?; }, - UpdatePrunedHashSet { - mmr_tree, - header_hash, - pruned_hash_set, - } => { - self.update_pruned_hash_set(&write_txn, *mmr_tree, header_hash, (**pruned_hash_set).clone())?; - }, - UpdateDeletedBlockAccumulatedDataWithDiff { header_hash, deleted } => { - self.update_deleted_block_accumulated_data_with_diff(&write_txn, header_hash, deleted.clone())?; + UpdateBlockAccumulatedData { header_hash, values } => { + self.update_block_accumulated_data(&write_txn, header_hash, values.clone())?; }, UpdateDeletedBitmap { deleted } => { let mut bitmap = self.load_deleted_bitmap_model(&write_txn)?; bitmap.merge(deleted)?; bitmap.finish()?; }, - PruneOutputsAndUpdateHorizon { - output_positions, - horizon, - } => { - self.prune_outputs_and_update_horizon(&write_txn, output_positions, *horizon)?; + PruneOutputsAtMmrPositions { output_positions } => { + self.prune_outputs_at_positions(&write_txn, output_positions)?; }, - UpdateKernelSum { - header_hash, - kernel_sum, - } => { - self.update_block_accumulated_data_kernel_sum(&write_txn, header_hash, kernel_sum.clone())?; + DeleteAllInputsInBlock { block_hash } => { + self.delete_all_inputs_in_block(&write_txn, block_hash)?; }, SetBestBlock { height, @@ -1040,24 +1028,24 @@ impl LMDBDatabase { self.fetch_block_accumulated_data(&*txn, header.height - 1)? .ok_or_else(|| ChainStorageError::ValueNotFound { entity: "BlockAccumulatedData", - field: "prev_hash", - value: header.prev_hash.to_hex(), + field: "height", + value: (header.height - 1).to_string(), })? }; - let mut total_kernel_sum = Commitment::default(); - let mut total_utxo_sum = Commitment::default(); let BlockAccumulatedData { kernels: pruned_kernel_set, outputs: pruned_output_set, - range_proofs: pruned_proof_set, + witness: pruned_proof_set, + mut kernel_sum, + mut utxo_sum, .. } = data; let mut kernel_mmr = MerkleMountainRange::::new(pruned_kernel_set); for kernel in kernels { - total_kernel_sum = &total_kernel_sum + &kernel.excess; + kernel_sum = &kernel_sum + &kernel.excess; let pos = kernel_mmr.push(kernel.hash())?; trace!( target: LOG_TARGET, @@ -1070,7 +1058,7 @@ impl LMDBDatabase { let mut output_mmr = MutableMmr::::new(pruned_output_set, Bitmap::create())?; let mut witness_mmr = MerkleMountainRange::::new(pruned_proof_set); for output in outputs { - total_utxo_sum = &total_utxo_sum + &output.commitment; + utxo_sum = &utxo_sum + &output.commitment; output_mmr.push(output.hash())?; witness_mmr.push(output.witness_hash())?; debug!(target: LOG_TARGET, "Inserting output `{}`", output.commitment.to_hex()); @@ -1084,7 +1072,7 @@ impl LMDBDatabase { } for input in inputs { - total_utxo_sum = &total_utxo_sum - &input.commitment; + utxo_sum = &utxo_sum - &input.commitment; let index = self .fetch_mmr_leaf_index(&**txn, MmrTree::Utxo, &input.output_hash())? .ok_or(ChainStorageError::UnspendableInput)?; @@ -1119,7 +1107,8 @@ impl LMDBDatabase { output_mmr.mmr().get_pruned_hash_set()?, witness_mmr.get_pruned_hash_set()?, deleted_at_current_height, - total_kernel_sum, + kernel_sum, + utxo_sum, ), )?; @@ -1142,31 +1131,11 @@ impl LMDBDatabase { ) } - fn update_block_accumulated_data_kernel_sum( - &self, - write_txn: &WriteTransaction<'_>, - header_hash: &HashOutput, - kernel_sum: Commitment, - ) -> Result<(), ChainStorageError> { - let height = self.fetch_height_from_hash(write_txn, header_hash).or_not_found( - "BlockHash", - "hash", - header_hash.to_hex(), - )?; - let mut block_accum_data = self - .fetch_block_accumulated_data(write_txn, height)? - .unwrap_or_default(); - - block_accum_data.kernel_sum = kernel_sum; - lmdb_replace(write_txn, &self.block_accumulated_data_db, &height, &block_accum_data)?; - Ok(()) - } - - fn update_deleted_block_accumulated_data_with_diff( + fn update_block_accumulated_data( &self, write_txn: &WriteTransaction<'_>, header_hash: &HashOutput, - deleted: Bitmap, + values: UpdateBlockAccumulatedData, ) -> Result<(), ChainStorageError> { let height = self.fetch_height_from_hash(write_txn, header_hash).or_not_found( "BlockHash", @@ -1175,10 +1144,28 @@ impl LMDBDatabase { )?; let mut block_accum_data = self - .fetch_block_accumulated_data(write_txn, height)? + .fetch_block_accumulated_data(&*write_txn, height)? .unwrap_or_default(); - block_accum_data.deleted = deleted.into(); + if let Some(deleted_diff) = values.deleted_diff { + block_accum_data.deleted = deleted_diff; + } + if let Some(utxo_sum) = values.utxo_sum { + block_accum_data.utxo_sum = utxo_sum; + } + if let Some(kernel_sum) = values.kernel_sum { + block_accum_data.kernel_sum = kernel_sum; + } + if let Some(kernel_hash_set) = values.kernel_hash_set { + block_accum_data.kernels = kernel_hash_set; + } + if let Some(utxo_hash_set) = values.utxo_hash_set { + block_accum_data.outputs = utxo_hash_set; + } + if let Some(witness_hash_set) = values.witness_hash_set { + block_accum_data.witness = witness_hash_set; + } + lmdb_replace(write_txn, &self.block_accumulated_data_db, &height, &block_accum_data)?; Ok(()) } @@ -1206,36 +1193,20 @@ impl LMDBDatabase { Ok(()) } - fn update_pruned_hash_set( + fn delete_all_inputs_in_block( &self, - write_txn: &WriteTransaction<'_>, - mmr_tree: MmrTree, - header_hash: &HashOutput, - pruned_hash_set: PrunedHashSet, + txn: &WriteTransaction<'_>, + block_hash: &BlockHash, ) -> Result<(), ChainStorageError> { - let height = self.fetch_height_from_hash(write_txn, header_hash).or_not_found( - "BlockHash", - "hash", - header_hash.to_hex(), - )?; - let mut block_accum_data = self - .fetch_block_accumulated_data(write_txn, height)? - .unwrap_or_default(); - match mmr_tree { - MmrTree::Kernel => block_accum_data.kernels = pruned_hash_set, - MmrTree::Utxo => block_accum_data.outputs = pruned_hash_set, - MmrTree::Witness => block_accum_data.range_proofs = pruned_hash_set, - } - - lmdb_replace(write_txn, &self.block_accumulated_data_db, &height, &block_accum_data)?; + let inputs = lmdb_delete_keys_starting_with::>(&txn, &self.inputs_db, block_hash.to_hex().as_str())?; + debug!(target: LOG_TARGET, "Deleted {} input(s)", inputs.len()); Ok(()) } - fn prune_outputs_and_update_horizon( + fn prune_outputs_at_positions( &self, write_txn: &WriteTransaction<'_>, output_positions: &[u32], - horizon: u64, ) -> Result<(), ChainStorageError> { for pos in output_positions { let (_height, hash) = lmdb_first_after::<_, (u64, Vec)>( @@ -1249,12 +1220,6 @@ impl LMDBDatabase { self.prune_output(write_txn, &key)?; } - self.set_metadata( - write_txn, - MetadataKey::PrunedHeight, - MetadataValue::PrunedHeight(horizon), - )?; - Ok(()) } diff --git a/base_layer/core/src/proof_of_work/randomx_factory.rs b/base_layer/core/src/proof_of_work/randomx_factory.rs index 383bc0417f..79909c679b 100644 --- a/base_layer/core/src/proof_of_work/randomx_factory.rs +++ b/base_layer/core/src/proof_of_work/randomx_factory.rs @@ -15,7 +15,7 @@ pub struct RandomXVMInstance { // The cache and dataset for the VM need to be stored together with it since they are not // mix and match. instance: Arc)>>, - flags: RandomXFlag, + _flags: RandomXFlag, } impl RandomXVMInstance { @@ -48,7 +48,7 @@ impl RandomXVMInstance { Ok(Self { instance: Arc::new(Mutex::new((vm, cache, None))), - flags, + _flags: flags, }) } diff --git a/base_layer/core/src/transactions/aggregated_body.rs b/base_layer/core/src/transactions/aggregated_body.rs index a7f3134678..685ab6c47b 100644 --- a/base_layer/core/src/transactions/aggregated_body.rs +++ b/base_layer/core/src/transactions/aggregated_body.rs @@ -451,11 +451,7 @@ impl AggregateBody { fn validate_range_proofs(&self, range_proof_service: &RangeProofService) -> Result<(), TransactionError> { trace!(target: LOG_TARGET, "Checking range proofs"); for o in &self.outputs { - if !o.verify_range_proof(range_proof_service)? { - return Err(TransactionError::ValidationError( - "Range proof could not be verified".into(), - )); - } + o.verify_range_proof(range_proof_service)?; } Ok(()) } diff --git a/base_layer/core/src/transactions/coinbase_builder.rs b/base_layer/core/src/transactions/coinbase_builder.rs index 3c75d7fa84..90041e5876 100644 --- a/base_layer/core/src/transactions/coinbase_builder.rs +++ b/base_layer/core/src/transactions/coinbase_builder.rs @@ -323,7 +323,7 @@ mod test { assert!(factories .commitment .open_value(&p.spend_key, block_reward.into(), utxo.commitment())); - assert!(utxo.verify_range_proof(&factories.range_proof).unwrap()); + utxo.verify_range_proof(&factories.range_proof).unwrap(); assert!(utxo.features.flags.contains(OutputFlags::COINBASE_OUTPUT)); assert_eq!( tx.body.check_coinbase_output( diff --git a/base_layer/core/src/transactions/transaction.rs b/base_layer/core/src/transactions/transaction.rs index 9c59bbaddf..9dd488922a 100644 --- a/base_layer/core/src/transactions/transaction.rs +++ b/base_layer/core/src/transactions/transaction.rs @@ -263,6 +263,8 @@ pub enum TransactionError { NoSignatureError, #[error("A range proof construction or verification has produced an error: {0}")] RangeProofError(#[from] RangeProofError), + #[error("Range proof verification has failed")] + InvalidRangeProof, #[error("An error occurred while performing a commitment signature: {0}")] SigningError(#[from] CommitmentSignatureError), #[error("Invalid kernel in body")] @@ -687,8 +689,12 @@ impl TransactionOutput { } /// Verify that range proof is valid - pub fn verify_range_proof(&self, prover: &RangeProofService) -> Result { - Ok(prover.verify(&self.proof.0, &self.commitment)) + pub fn verify_range_proof(&self, prover: &RangeProofService) -> Result<(), TransactionError> { + if prover.verify(&self.proof.0, &self.commitment) { + Ok(()) + } else { + Err(TransactionError::InvalidRangeProof) + } } /// Verify that the metadata signature is valid @@ -1417,6 +1423,7 @@ mod test { test_helpers, test_helpers::{TestParams, UtxoTestParams}, transaction::OutputFeatures, + transaction_protocol::TransactionProtocolError, }, txn_schema, }; @@ -1477,7 +1484,7 @@ mod test { }); let script = unblinded_output1.script.clone(); let tx_output1 = unblinded_output1.as_transaction_output(&factories).unwrap(); - assert!(tx_output1.verify_range_proof(&factories.range_proof).unwrap()); + tx_output1.verify_range_proof(&factories.range_proof).unwrap(); let unblinded_output2 = test_params_2.create_unblinded_output(UtxoTestParams { value: (2u64.pow(32) + 1u64).into(), @@ -1517,7 +1524,11 @@ mod test { ) .unwrap(), ); - assert!(!tx_output3.verify_range_proof(&factories.range_proof).unwrap()); + let err = tx_output3.verify_range_proof(&factories.range_proof).unwrap_err(); + assert!(matches!( + err, + TransactionProtocolError::RangeProofError(RangeProofError::InvalidRangeProof) + )); } #[test] diff --git a/base_layer/core/src/transactions/transaction_protocol/recipient.rs b/base_layer/core/src/transactions/transaction_protocol/recipient.rs index c3c6f99ac1..f8cecc7c1e 100644 --- a/base_layer/core/src/transactions/transaction_protocol/recipient.rs +++ b/base_layer/core/src/transactions/transaction_protocol/recipient.rs @@ -263,7 +263,7 @@ mod test { assert!(factories .commitment .open_value(&p.spend_key, 500, &data.output.commitment)); - assert!(data.output.verify_range_proof(&factories.range_proof).unwrap()); + data.output.verify_range_proof(&factories.range_proof).unwrap(); let r_sum = &msg.public_nonce + &p.public_nonce; let e = build_challenge(&r_sum, &m); let s = Signature::sign(p.spend_key.clone(), p.nonce, &e).unwrap(); diff --git a/base_layer/core/src/transactions/transaction_protocol/sender.rs b/base_layer/core/src/transactions/transaction_protocol/sender.rs index b38577a2ad..8f32e5358f 100644 --- a/base_layer/core/src/transactions/transaction_protocol/sender.rs +++ b/base_layer/core/src/transactions/transaction_protocol/sender.rs @@ -385,11 +385,7 @@ impl SenderTransactionProtocol { ) -> Result<(), TPE> { match &mut self.state { SenderState::CollectingSingleSignature(info) => { - if !rec.output.verify_range_proof(prover)? { - return Err(TPE::ValidationError( - "Recipient output range proof failed to verify".into(), - )); - } + rec.output.verify_range_proof(prover)?; // Consolidate transaction info info.outputs.push(rec.output.clone()); @@ -725,7 +721,7 @@ mod test { commitment::HomomorphicCommitmentFactory, common::Blake256, keys::{PublicKey as PublicKeyTrait, SecretKey as SecretKeyTrait}, - range_proof::RangeProofService, + range_proof::{RangeProofError, RangeProofService}, ristretto::pedersen::PedersenCommitmentFactory, script, script::{ExecutionStack, TariScript}, @@ -1027,13 +1023,13 @@ mod test { // Receiver gets message, deserializes it etc, and creates his response let bob_info = SingleReceiverTransactionProtocol::create(&msg, b.nonce, b.spend_key, features, &factories, None).unwrap(); // Alice gets message back, deserializes it, etc - match alice.add_single_recipient_info(bob_info, &factories.range_proof) { - Ok(_) => panic!("Range proof should have failed to verify"), - Err(e) => assert_eq!( - e, - TransactionProtocolError::ValidationError("Recipient output range proof failed to verify".into()) - ), - } + let err = alice + .add_single_recipient_info(bob_info, &factories.range_proof) + .unwrap_err(); + assert!(matches!( + err, + TransactionProtocolError::RangeProofError(RangeProofError::InvalidRangeProof) + )); } #[test] diff --git a/base_layer/core/src/transactions/transaction_protocol/single_receiver.rs b/base_layer/core/src/transactions/transaction_protocol/single_receiver.rs index ab26e3e80c..3a15d6a134 100644 --- a/base_layer/core/src/transactions/transaction_protocol/single_receiver.rs +++ b/base_layer/core/src/transactions/transaction_protocol/single_receiver.rs @@ -219,10 +219,7 @@ mod test { factories.commitment.open_value(&k, info.amount.into(), &out.commitment), "Output commitment is invalid" ); - assert!( - out.verify_range_proof(&factories.range_proof).unwrap(), - "Range proof is invalid" - ); + out.verify_range_proof(&factories.range_proof).unwrap(); assert!(out.features.flags.is_empty(), "Output features flags have changed"); } } diff --git a/base_layer/core/src/transactions/transaction_protocol/transaction_initializer.rs b/base_layer/core/src/transactions/transaction_protocol/transaction_initializer.rs index ef45b0a449..71780fb9a3 100644 --- a/base_layer/core/src/transactions/transaction_protocol/transaction_initializer.rs +++ b/base_layer/core/src/transactions/transaction_protocol/transaction_initializer.rs @@ -70,7 +70,6 @@ pub const LOG_TARGET: &str = "c::tx::tx_protocol::tx_initializer"; /// methods, you can call `build()` which will return a #[derive(Debug, Clone)] pub struct SenderTransactionInitializer { - consensus_constants: ConsensusConstants, num_recipients: usize, amounts: FixedSet, lock_height: Option, @@ -113,7 +112,6 @@ impl SenderTransactionInitializer { pub fn new(num_recipients: usize, consensus_constants: ConsensusConstants) -> Self { Self { fee: Fee::new(*consensus_constants.transaction_weight()), - consensus_constants, num_recipients, amounts: FixedSet::new(num_recipients), lock_height: None, diff --git a/base_layer/p2p/src/dns/client.rs b/base_layer/p2p/src/dns/client.rs index 78093186ef..8a875e9429 100644 --- a/base_layer/p2p/src/dns/client.rs +++ b/base_layer/p2p/src/dns/client.rs @@ -106,7 +106,7 @@ impl DnsClient { #[derive(Clone)] pub struct Client { inner: C, - shutdown: Arc, + _shutdown: Arc, } impl Client { @@ -121,7 +121,7 @@ impl Client { Ok(Self { inner: client, - shutdown: Arc::new(shutdown), + _shutdown: Arc::new(shutdown), }) } } @@ -135,7 +135,7 @@ impl Client { Ok(Self { inner: client, - shutdown: Arc::new(shutdown), + _shutdown: Arc::new(shutdown), }) } } @@ -165,7 +165,7 @@ mod mock { let client = MockClientHandle::mock(messages); Ok(Self { inner: client, - shutdown: Arc::new(Shutdown::new()), + _shutdown: Arc::new(Shutdown::new()), }) } } diff --git a/base_layer/p2p/src/dns/mock.rs b/base_layer/p2p/src/dns/mock.rs index 9dec5155cb..1cd0c1bbad 100644 --- a/base_layer/p2p/src/dns/mock.rs +++ b/base_layer/p2p/src/dns/mock.rs @@ -34,7 +34,7 @@ use trust_dns_client::{ #[derive(Clone)] pub struct MockClientHandle { messages: Arc>>, - on_send: O, + _on_send: O, } impl MockClientHandle { @@ -44,7 +44,7 @@ impl MockClientHandle { MockClientHandle { messages: Arc::new(messages), - on_send: DefaultOnSend, + _on_send: DefaultOnSend, } } } diff --git a/base_layer/p2p/src/services/liveness/state.rs b/base_layer/p2p/src/services/liveness/state.rs index 383652a2ab..f61f14a295 100644 --- a/base_layer/p2p/src/services/liveness/state.rs +++ b/base_layer/p2p/src/services/liveness/state.rs @@ -74,7 +74,6 @@ pub struct LivenessState { pongs_received: usize, pings_sent: usize, pongs_sent: usize, - num_active_peers: usize, local_metadata: Metadata, } diff --git a/common/src/build/protobuf.rs b/common/src/build/protobuf.rs index 6898052e94..875320a468 100644 --- a/common/src/build/protobuf.rs +++ b/common/src/build/protobuf.rs @@ -24,9 +24,12 @@ where P: AsRef + Display { .output() .unwrap(); - if !out.status.success() { - panic!("status: {} - {}", out.status, String::from_utf8_lossy(&out.stderr)); - } + assert!( + out.status.success(), + "status: {} - {}", + out.status, + String::from_utf8_lossy(&out.stderr) + ); } } diff --git a/comms/dht/src/network_discovery/discovering.rs b/comms/dht/src/network_discovery/discovering.rs index e15e96ed8f..93cb55a5c7 100644 --- a/comms/dht/src/network_discovery/discovering.rs +++ b/comms/dht/src/network_discovery/discovering.rs @@ -41,10 +41,8 @@ const LOG_TARGET: &str = "comms::dht::network_discovery"; pub(super) struct Discovering { params: DiscoveryParams, context: NetworkDiscoveryContext, - candidate_peers: Vec, stats: DhtNetworkDiscoveryRoundInfo, neighbourhood_threshold: NodeDistance, - excluded_peers: Vec, } impl Discovering { @@ -52,10 +50,8 @@ impl Discovering { Self { params, context, - candidate_peers: Vec::new(), stats: Default::default(), neighbourhood_threshold: NodeDistance::max_distance(), - excluded_peers: Vec::new(), } } diff --git a/comms/src/multiplexing/yamux.rs b/comms/src/multiplexing/yamux.rs index 54696cecd7..811f485390 100644 --- a/comms/src/multiplexing/yamux.rs +++ b/comms/src/multiplexing/yamux.rs @@ -144,7 +144,7 @@ impl Control { let stream = self.inner.open_stream().await?; Ok(Substream { stream: stream.compat(), - counter_guard, + _counter_guard: counter_guard, }) } @@ -193,7 +193,7 @@ impl Stream for IncomingSubstreams { match futures::ready!(Pin::new(&mut self.inner).poll_recv(cx)) { Some(stream) => Poll::Ready(Some(Substream { stream: stream.compat(), - counter_guard: self.substream_counter.new_guard(), + _counter_guard: self.substream_counter.new_guard(), })), None => Poll::Ready(None), } @@ -209,7 +209,7 @@ impl Drop for IncomingSubstreams { #[derive(Debug)] pub struct Substream { stream: Compat, - counter_guard: AtomicRefCounterGuard, + _counter_guard: AtomicRefCounterGuard, } impl StreamId for Substream {