diff --git a/applications/tari_app_grpc/src/conversions/aggregate_body.rs b/applications/tari_app_grpc/src/conversions/aggregate_body.rs index eb5b22b27f..a548227034 100644 --- a/applications/tari_app_grpc/src/conversions/aggregate_body.rs +++ b/applications/tari_app_grpc/src/conversions/aggregate_body.rs @@ -31,22 +31,14 @@ impl TryFrom for grpc::AggregateBody { type Error = String; fn try_from(source: AggregateBody) -> Result { + let (inputs, outputs, kernels) = source.dissolve(); Ok(Self { - inputs: source - .inputs() - .iter() - .map(|input| grpc::TransactionInput::try_from(input.clone())) - .collect::, _>>()?, - outputs: source - .outputs() - .iter() - .map(|output| grpc::TransactionOutput::from(output.clone())) - .collect(), - kernels: source - .kernels() - .iter() - .map(|kernel| grpc::TransactionKernel::from(kernel.clone())) - .collect(), + inputs: inputs + .into_iter() + .map(grpc::TransactionInput::try_from) + .collect::, _>>()?, + outputs: outputs.into_iter().map(grpc::TransactionOutput::from).collect(), + kernels: kernels.into_iter().map(grpc::TransactionKernel::from).collect(), }) } } diff --git a/applications/tari_app_grpc/src/conversions/transaction.rs b/applications/tari_app_grpc/src/conversions/transaction.rs index 4e208fa3c1..fffb851ef7 100644 --- a/applications/tari_app_grpc/src/conversions/transaction.rs +++ b/applications/tari_app_grpc/src/conversions/transaction.rs @@ -20,7 +20,10 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use std::convert::{TryFrom, TryInto}; +use std::{ + convert::{TryFrom, TryInto}, + sync::Arc, +}; use tari_common_types::transaction::{TransactionDirection, TransactionStatus, TxId}; use tari_core::transactions::transaction::Transaction; @@ -41,6 +44,21 @@ impl TryFrom for grpc::Transaction { } } +impl TryFrom> for grpc::Transaction { + type Error = String; + + fn try_from(source: Arc) -> Result { + match Arc::try_unwrap(source) { + Ok(tx) => tx.try_into(), + Err(tx) => Ok(Self { + offset: Vec::from(tx.offset.as_bytes()), + body: Some(tx.body.clone().try_into()?), + script_offset: Vec::from(tx.script_offset.as_bytes()), + }), + } + } +} + impl TryFrom for Transaction { type Error = String; diff --git a/applications/tari_base_node/src/builder.rs b/applications/tari_base_node/src/builder.rs index df0b976c97..4bad86c03b 100644 --- a/applications/tari_base_node/src/builder.rs +++ b/applications/tari_base_node/src/builder.rs @@ -215,7 +215,10 @@ async fn build_node_context( cleanup_orphans_at_startup: bool, ) -> Result { //---------------------------------- Blockchain --------------------------------------------// - + debug!( + target: LOG_TARGET, + "Building base node context for {} network", config.network + ); let rules = ConsensusManager::builder(config.network).build(); let factories = CryptoFactories::default(); let randomx_factory = RandomXFactory::new(config.max_randomx_vms); @@ -250,7 +253,7 @@ async fn build_node_context( Box::new(TxInputAndMaturityValidator::new(blockchain_db.clone())), Box::new(TxConsensusValidator::new(blockchain_db.clone())), ]); - let mempool = Mempool::new(MempoolConfig::default(), rules.clone(), Arc::new(mempool_validator)); + let mempool = Mempool::new(MempoolConfig::default(), rules.clone(), Box::new(mempool_validator)); //---------------------------------- Base Node --------------------------------------------// debug!(target: LOG_TARGET, "Creating base node state machine."); diff --git a/applications/tari_base_node/src/grpc/base_node_grpc_server.rs b/applications/tari_base_node/src/grpc/base_node_grpc_server.rs index 1d7c044316..ac9c6882ae 100644 --- a/applications/tari_base_node/src/grpc/base_node_grpc_server.rs +++ b/applications/tari_base_node/src/grpc/base_node_grpc_server.rs @@ -36,11 +36,7 @@ use tari_app_utilities::consts; use tari_common_types::types::{Commitment, PublicKey, Signature}; use tari_comms::{Bytes, CommsNode}; use tari_core::{ - base_node::{ - comms_interface::{Broadcast, CommsInterfaceError}, - LocalNodeCommsInterface, - StateMachineHandle, - }, + base_node::{comms_interface::CommsInterfaceError, LocalNodeCommsInterface, StateMachineHandle}, blocks::{Block, BlockHeader, NewBlockTemplate}, chain_storage::{ChainStorageError, PrunedOutput}, consensus::{emission::Emission, ConsensusManager, NetworkConsensus}, @@ -722,7 +718,7 @@ impl tari_rpc::base_node_server::BaseNode for BaseNodeGrpcServer { let mut handler = self.node_service.clone(); let block_hash = handler - .submit_block(block, Broadcast::from(true)) + .submit_block(block) .await .map_err(|e| Status::internal(e.to_string()))?; diff --git a/applications/tari_console_wallet/src/grpc/wallet_grpc_server.rs b/applications/tari_console_wallet/src/grpc/wallet_grpc_server.rs index 47957c1152..f8faa5c431 100644 --- a/applications/tari_console_wallet/src/grpc/wallet_grpc_server.rs +++ b/applications/tari_console_wallet/src/grpc/wallet_grpc_server.rs @@ -182,18 +182,17 @@ impl wallet_server::Wallet for WalletGrpcServer { request: Request, ) -> Result, Status> { let request = request.into_inner(); - let mut tx_service = self.get_transaction_service(); - let response = tx_service + + let coinbase = tx_service .generate_coinbase_transaction(request.reward.into(), request.fee.into(), request.height) - .await; + .await + .map_err(|err| Status::unknown(err.to_string()))?; - match response { - Ok(resp) => Ok(Response::new(GetCoinbaseResponse { - transaction: Some(resp.try_into().map_err(Status::internal)?), - })), - Err(err) => Err(Status::unknown(err.to_string())), - } + let coinbase = coinbase.try_into().map_err(Status::internal)?; + Ok(Response::new(GetCoinbaseResponse { + transaction: Some(coinbase), + })) } async fn send_sha_atomic_swap_transaction( diff --git a/applications/tari_console_wallet/src/ui/components/transactions_tab.rs b/applications/tari_console_wallet/src/ui/components/transactions_tab.rs index 2674aa4d5f..315dad68cc 100644 --- a/applications/tari_console_wallet/src/ui/components/transactions_tab.rs +++ b/applications/tari_console_wallet/src/ui/components/transactions_tab.rs @@ -57,22 +57,24 @@ impl TransactionsTab { .constraints([pending_constraint, completed_constraint].as_ref()) .split(area); + self.draw_pending_transactions(f, list_areas[0], app_state); + self.draw_completed_transactions(f, list_areas[1], app_state); + } + + fn draw_pending_transactions(&mut self, f: &mut Frame, area: Rect, app_state: &AppState) + where B: Backend { let style = if self.selected_tx_list == SelectedTransactionList::PendingTxs { Style::default().fg(Color::Magenta).add_modifier(Modifier::BOLD) } else { Style::default().fg(Color::White).add_modifier(Modifier::BOLD) }; - let block = Block::default() - .borders(Borders::ALL) - .title(Span::styled("(P)ending Transactions", style)); - f.render_widget(block, list_areas[0]); - self.draw_pending_transactions(f, list_areas[0], app_state); - self.draw_completed_transactions(f, list_areas[1], app_state); - } + let title = Block::default().borders(Borders::ALL).title(Span::styled( + format!("(P)ending Transactions ({}) ", app_state.get_pending_txs().len()), + style, + )); + f.render_widget(title, area); - fn draw_pending_transactions(&mut self, f: &mut Frame, area: Rect, app_state: &AppState) - where B: Backend { // Pending Transactions self.pending_list_state.set_num_items(app_state.get_pending_txs().len()); let mut pending_list_state = self @@ -155,9 +157,10 @@ impl TransactionsTab { } else { Style::default().fg(Color::White).add_modifier(Modifier::BOLD) }; - let block = Block::default() - .borders(Borders::ALL) - .title(Span::styled("Completed (T)ransactions", style)); + let block = Block::default().borders(Borders::ALL).title(Span::styled( + format!("Completed (T)ransactions ({}) ", app_state.get_completed_txs().len()), + style, + )); f.render_widget(block, area); let completed_txs = app_state.get_completed_txs(); diff --git a/base_layer/core/src/base_node/chain_metadata_service/service.rs b/base_layer/core/src/base_node/chain_metadata_service/service.rs index ff6ed26843..00e8ee61e4 100644 --- a/base_layer/core/src/base_node/chain_metadata_service/service.rs +++ b/base_layer/core/src/base_node/chain_metadata_service/service.rs @@ -134,8 +134,8 @@ impl ChainMetadataService { /// Handle BlockEvents async fn handle_block_event(&mut self, event: &BlockEvent) -> Result<(), ChainMetadataSyncError> { match event { - BlockEvent::ValidBlockAdded(_, BlockAddResult::Ok(_), _) | - BlockEvent::ValidBlockAdded(_, BlockAddResult::ChainReorg { .. }, _) | + BlockEvent::ValidBlockAdded(_, BlockAddResult::Ok(_)) | + BlockEvent::ValidBlockAdded(_, BlockAddResult::ChainReorg { .. }) | BlockEvent::BlockSyncComplete(_) => { self.update_liveness_chain_metadata().await?; }, diff --git a/base_layer/core/src/base_node/comms_interface/inbound_handlers.rs b/base_layer/core/src/base_node/comms_interface/inbound_handlers.rs index 61abc6bf30..ac3d7e5566 100644 --- a/base_layer/core/src/base_node/comms_interface/inbound_handlers.rs +++ b/base_layer/core/src/base_node/comms_interface/inbound_handlers.rs @@ -20,10 +20,7 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use std::{ - fmt::{Display, Error, Formatter}, - sync::Arc, -}; +use std::sync::Arc; use log::*; use strum_macros::Display; @@ -44,7 +41,7 @@ use crate::{ blocks::{Block, BlockHeader, ChainBlock, NewBlock, NewBlockTemplate}, chain_storage::{async_db::AsyncBlockchainDb, BlockAddResult, BlockchainBackend, PrunedOutput}, consensus::{ConsensusConstants, ConsensusManager}, - mempool::{async_mempool, Mempool}, + mempool::Mempool, proof_of_work::{Difficulty, PowAlgorithm}, transactions::transaction::TransactionKernel, }; @@ -56,42 +53,12 @@ const MAX_HEADERS_PER_RESPONSE: u32 = 100; /// Broadcast is to notify subscribers if this is a valid propagated block event #[derive(Debug, Clone, Display)] pub enum BlockEvent { - ValidBlockAdded(Arc, BlockAddResult, Broadcast), - AddBlockFailed(Arc, Broadcast), + ValidBlockAdded(Arc, BlockAddResult), + AddBlockFailed(Arc), BlockSyncComplete(Arc), BlockSyncRewind(Vec>), } -/// Used to notify if the block event is for a propagated block. -#[derive(Debug, Clone, Copy)] -pub struct Broadcast(bool); - -impl Broadcast { - #[inline] - pub fn is_true(&self) -> bool { - self.0 - } -} - -#[allow(clippy::identity_op)] -impl Display for Broadcast { - fn fmt(&self, f: &mut Formatter) -> Result<(), Error> { - write!(f, "Broadcast[{}]", self.0) - } -} - -impl From for bool { - fn from(v: Broadcast) -> Self { - v.0 - } -} - -impl From for Broadcast { - fn from(v: bool) -> Self { - Broadcast(v) - } -} - /// The InboundNodeCommsInterface is used to handle all received inbound requests from remote nodes. pub struct InboundNodeCommsHandlers { block_event_sender: BlockEventSender, @@ -341,7 +308,13 @@ where T: BlockchainBackend + 'static request.max_weight }; - let transactions = async_mempool::retrieve(self.mempool.clone(), asking_weight) + debug!( + target: LOG_TARGET, + "Fetching transactions with a maximum weight of {} for the template", asking_weight + ); + let transactions = self + .mempool + .retrieve(asking_weight) .await? .into_iter() .map(|tx| Arc::try_unwrap(tx).unwrap_or_else(|tx| (*tx).clone())) @@ -350,7 +323,7 @@ where T: BlockchainBackend + 'static debug!( target: LOG_TARGET, "Adding {} transaction(s) to new block template", - transactions.len() + transactions.len(), ); let prev_hash = header.prev_hash.clone(); @@ -364,13 +337,23 @@ where T: BlockchainBackend + 'static ); debug!( target: LOG_TARGET, - "New block template requested at height {}", block_template.header.height, + "New block template requested at height {}, weight: {}", + block_template.header.height, + block_template.body.calculate_weight(constants.transaction_weight()) ); trace!(target: LOG_TARGET, "{}", block_template); Ok(NodeCommsResponse::NewBlockTemplate(block_template)) }, NodeCommsRequest::GetNewBlock(block_template) => { let block = self.blockchain_db.prepare_new_block(block_template).await?; + let constants = self.consensus_manager.consensus_constants(block.header.height); + debug!( + target: LOG_TARGET, + "Prepared new block from template (hash: {}, weight: {}, {})", + block.hash().to_hex(), + block.body.calculate_weight(constants.transaction_weight()), + block.body.to_counts_string() + ); Ok(NodeCommsResponse::NewBlock { success: true, error: None, @@ -506,7 +489,7 @@ where T: BlockchainBackend + 'static match block.pop() { Some(block) => { - self.handle_block(Arc::new(block.try_into_block()?), true.into(), Some(source_peer)) + self.handle_block(Arc::new(block.try_into_block()?), Some(source_peer)) .await?; Ok(()) }, @@ -529,7 +512,6 @@ where T: BlockchainBackend + 'static pub async fn handle_block( &self, block: Arc, - broadcast: Broadcast, source_peer: Option, ) -> Result { let block_hash = block.hash(); @@ -560,9 +542,9 @@ where T: BlockchainBackend + 'static self.blockchain_db.cleanup_orphans().await?; - self.publish_block_event(BlockEvent::ValidBlockAdded(block, block_add_result, broadcast)); + self.publish_block_event(BlockEvent::ValidBlockAdded(block, block_add_result)); - if should_propagate && broadcast.is_true() { + if should_propagate { info!( target: LOG_TARGET, "Propagate block ({}) to network.", @@ -582,7 +564,7 @@ where T: BlockchainBackend + 'static block_hash.to_hex(), e ); - self.publish_block_event(BlockEvent::AddBlockFailed(block, broadcast)); + self.publish_block_event(BlockEvent::AddBlockFailed(block)); Err(CommsInterfaceError::ChainStorageError(e)) }, } diff --git a/base_layer/core/src/base_node/comms_interface/local_interface.rs b/base_layer/core/src/base_node/comms_interface/local_interface.rs index 5e21ccb5fd..8a91c01263 100644 --- a/base_layer/core/src/base_node/comms_interface/local_interface.rs +++ b/base_layer/core/src/base_node/comms_interface/local_interface.rs @@ -34,7 +34,6 @@ use crate::{ comms_request::GetNewBlockTemplateRequest, error::CommsInterfaceError, BlockEvent, - Broadcast, NodeCommsRequest, NodeCommsResponse, }, @@ -52,7 +51,7 @@ pub type BlockEventReceiver = broadcast::Receiver>; #[derive(Clone)] pub struct LocalNodeCommsInterface { request_sender: SenderService>, - block_sender: SenderService<(Block, Broadcast), Result>, + block_sender: SenderService>, block_event_sender: BlockEventSender, } @@ -60,7 +59,7 @@ impl LocalNodeCommsInterface { /// Construct a new LocalNodeCommsInterface with the specified SenderService. pub fn new( request_sender: SenderService>, - block_sender: SenderService<(Block, Broadcast), Result>, + block_sender: SenderService>, block_event_sender: BlockEventSender, ) -> Self { Self { @@ -178,9 +177,9 @@ impl LocalNodeCommsInterface { } } - /// Submit a block to the base node service. Internal_only flag will prevent propagation. - pub async fn submit_block(&mut self, block: Block, propagate: Broadcast) -> Result { - self.block_sender.call((block, propagate)).await? + /// Submit a block to the base node service. + pub async fn submit_block(&mut self, block: Block) -> Result { + self.block_sender.call(block).await? } pub fn publish_block_event(&self, event: BlockEvent) -> usize { diff --git a/base_layer/core/src/base_node/comms_interface/mod.rs b/base_layer/core/src/base_node/comms_interface/mod.rs index 66185bdb01..e29e71a4c4 100644 --- a/base_layer/core/src/base_node/comms_interface/mod.rs +++ b/base_layer/core/src/base_node/comms_interface/mod.rs @@ -30,7 +30,7 @@ mod error; pub use error::CommsInterfaceError; mod inbound_handlers; -pub use inbound_handlers::{BlockEvent, Broadcast, InboundNodeCommsHandlers}; +pub use inbound_handlers::{BlockEvent, InboundNodeCommsHandlers}; mod local_interface; pub use local_interface::{BlockEventReceiver, BlockEventSender, LocalNodeCommsInterface}; diff --git a/base_layer/core/src/base_node/service/service.rs b/base_layer/core/src/base_node/service/service.rs index 830068f633..dc4998261c 100644 --- a/base_layer/core/src/base_node/service/service.rs +++ b/base_layer/core/src/base_node/service/service.rs @@ -49,13 +49,7 @@ use tokio::{ use crate::{ base_node::{ - comms_interface::{ - Broadcast, - CommsInterfaceError, - InboundNodeCommsHandlers, - NodeCommsRequest, - NodeCommsResponse, - }, + comms_interface::{CommsInterfaceError, InboundNodeCommsHandlers, NodeCommsRequest, NodeCommsResponse}, service::error::BaseNodeServiceError, state_machine_service::states::StateInfo, StateMachineHandle, @@ -158,7 +152,7 @@ where B: BlockchainBackend + 'static SInRes: Stream>, SBlockIn: Stream>, SLocalReq: Stream>>, - SLocalBlock: Stream>>, + SLocalBlock: Stream>>, { let outbound_request_stream = streams.outbound_request_stream.fuse(); pin_mut!(outbound_request_stream); @@ -208,7 +202,7 @@ where B: BlockchainBackend + 'static // Incoming block messages from the Comms layer Some(block_msg) = inbound_block_stream.next() => { - self.spawn_handle_incoming_block(block_msg).await; + self.spawn_handle_incoming_block(block_msg); } // Incoming local request messages from the LocalNodeCommsInterface and other local services @@ -310,7 +304,7 @@ where B: BlockchainBackend + 'static }); } - async fn spawn_handle_incoming_block(&self, new_block: DomainMessage) { + fn spawn_handle_incoming_block(&self, new_block: DomainMessage) { // Determine if we are bootstrapped let status_watch = self.state_machine_handle.get_status_info_watch(); @@ -357,14 +351,11 @@ where B: BlockchainBackend + 'static }); } - fn spawn_handle_local_block( - &self, - block_context: RequestContext<(Block, Broadcast), Result>, - ) { + fn spawn_handle_local_block(&self, block_context: RequestContext>) { let inbound_nch = self.inbound_nch.clone(); task::spawn(async move { - let ((block, broadcast), reply_tx) = block_context.split(); - let result = reply_tx.send(inbound_nch.handle_block(Arc::new(block), broadcast, None).await); + let (block, reply_tx) = block_context.split(); + let result = reply_tx.send(inbound_nch.handle_block(Arc::new(block), None).await); if let Err(e) = result { error!( diff --git a/base_layer/core/src/base_node/state_machine_service/states/block_sync.rs b/base_layer/core/src/base_node/state_machine_service/states/block_sync.rs index f38e0e9c5b..0b25135cb9 100644 --- a/base_layer/core/src/base_node/state_machine_service/states/block_sync.rs +++ b/base_layer/core/src/base_node/state_machine_service/states/block_sync.rs @@ -79,7 +79,6 @@ impl BlockSync { local_nci.publish_block_event(BlockEvent::ValidBlockAdded( block.block().clone().into(), BlockAddResult::Ok(block), - false.into(), )); let _ = status_event_sender.send(StatusInfo { diff --git a/base_layer/core/src/base_node/sync/rpc/service.rs b/base_layer/core/src/base_node/sync/rpc/service.rs index 9970806e3b..5a8ba4cae8 100644 --- a/base_layer/core/src/base_node/sync/rpc/service.rs +++ b/base_layer/core/src/base_node/sync/rpc/service.rs @@ -172,7 +172,7 @@ impl BaseNodeSyncService for BaseNodeSyncRpcServ // Check for reorgs during sync while let Ok(block_event) = block_event_stream.try_recv() { - if let BlockEvent::ValidBlockAdded(_, BlockAddResult::ChainReorg { removed, .. }, _) = + if let BlockEvent::ValidBlockAdded(_, BlockAddResult::ChainReorg { removed, .. }) = &*block_event { if let Some(reorg_block) = removed diff --git a/base_layer/core/src/blocks/block.rs b/base_layer/core/src/blocks/block.rs index 24182f54d7..e2fce4cf35 100644 --- a/base_layer/core/src/blocks/block.rs +++ b/base_layer/core/src/blocks/block.rs @@ -63,8 +63,8 @@ pub enum BlockValidationError { expected: u64, actual: u64, }, - #[error("The block weight is above the maximum")] - BlockTooLarge, + #[error("The block weight ({actual_weight}) is above the maximum ({max_weight})")] + BlockTooLarge { actual_weight: u64, max_weight: u64 }, } /// A Tari block. Blocks are linked together into a blockchain. diff --git a/base_layer/core/src/blocks/new_block_template.rs b/base_layer/core/src/blocks/new_block_template.rs index 35d26c49fd..0e1c507e99 100644 --- a/base_layer/core/src/blocks/new_block_template.rs +++ b/base_layer/core/src/blocks/new_block_template.rs @@ -56,15 +56,17 @@ impl NewBlockTemplate { } impl Display for NewBlockTemplate { - fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), std::fmt::Error> { - fmt.write_str("----------------- Block template-----------------\n")?; - fmt.write_str("--- Header ---\n")?; - fmt.write_str(&format!("{}\n", self.header))?; - fmt.write_str("--- Body ---\n")?; - fmt.write_str(&format!("{}\n", self.body))?; - fmt.write_str(&format!( - "Target difficulty: {}\nReward: {}\nTotal fees: {}\n", + fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), std::fmt::Error> { + writeln!(f, "----------------- Block template-----------------")?; + writeln!(f, "--- Header ---")?; + writeln!(f, "{}", self.header)?; + writeln!(f, "--- Body ---")?; + writeln!(f, "{}", self.body)?; + writeln!( + f, + "Target difficulty: {}\nReward: {}\nTotal fees: {}", self.target_difficulty, self.reward, self.total_fees - )) + )?; + Ok(()) } } diff --git a/base_layer/core/src/chain_storage/tests/blockchain_database.rs b/base_layer/core/src/chain_storage/tests/blockchain_database.rs index a638433517..d75b654057 100644 --- a/base_layer/core/src/chain_storage/tests/blockchain_database.rs +++ b/base_layer/core/src/chain_storage/tests/blockchain_database.rs @@ -386,9 +386,13 @@ mod add_block { let (blocks, outputs) = add_many_chained_blocks(5, &db); let prev_block = blocks.last().unwrap(); - - let (txns, tx_outputs) = - schema_to_transaction(&[txn_schema!(from: vec![outputs[0].clone()], to: vec![500 * T])]); + // Used to help identify the output we're interrogating in this test + let features = OutputFeatures::with_maturity(1); + let (txns, tx_outputs) = schema_to_transaction(&[txn_schema!( + from: vec![outputs[0].clone()], + to: vec![500 * T], + features: features + )]); let mut prev_utxo = tx_outputs[0].clone(); let (block, _) = create_next_block(&db, prev_block, txns); @@ -402,11 +406,18 @@ mod add_block { to_outputs: vec![prev_utxo.clone()], fee: 5.into(), lock_height: 0, - features: Default::default(), + features, script: tari_crypto::script![Nop], input_data: None, }]); - let commitment_hex = txns[0].body.outputs()[0].commitment.to_hex(); + let commitment_hex = txns[0] + .body + .outputs() + .iter() + .find(|o| o.features.maturity == 1) + .unwrap() + .commitment + .to_hex(); let (block, _) = create_next_block(&db, &prev_block, txns); let err = db.add_block(block.clone()).unwrap_err(); @@ -421,8 +432,11 @@ mod add_block { let block = db.add_block(block).unwrap().assert_added(); let prev_block = block.to_arc_block(); - // Different maturity so that the output hash is different in txo_hash_to_index_db - prev_utxo.features = OutputFeatures::with_maturity(1); + // Different metadata so that the output hash is different in txo_hash_to_index_db + prev_utxo.features = OutputFeatures { + metadata: vec![1], + ..Default::default() + }; // Now we can reuse a commitment let (txns, _) = schema_to_transaction(&[TransactionSchema { from: vec![outputs[1].clone()], diff --git a/base_layer/core/src/consensus/consensus_constants.rs b/base_layer/core/src/consensus/consensus_constants.rs index 3c9ac62ec7..cc3ba0a93b 100644 --- a/base_layer/core/src/consensus/consensus_constants.rs +++ b/base_layer/core/src/consensus/consensus_constants.rs @@ -24,13 +24,14 @@ use std::{collections::HashMap, ops::Add}; use chrono::{DateTime, Duration, Utc}; use tari_common::configuration::Network; -use tari_crypto::tari_utilities::epoch_time::EpochTime; +use tari_crypto::{script, tari_utilities::epoch_time::EpochTime}; use crate::{ - consensus::network::NetworkConsensus, + consensus::{network::NetworkConsensus, ConsensusEncodingSized}, proof_of_work::{Difficulty, PowAlgorithm}, transactions::{ tari_amount::{uT, MicroTari, T}, + transaction::OutputFeatures, weight::TransactionWeight, }, }; @@ -140,7 +141,11 @@ impl ConsensusConstants { } pub fn coinbase_weight(&self) -> u64 { - self.transaction_weight.calculate(1, 0, 1, 0) + // TODO: We do not know what script, features etc a coinbase has - this should be max coinbase size? + let metadata_size = self.transaction_weight.round_up_metadata_size( + script![Nop].consensus_encode_exact_size() + OutputFeatures::default().consensus_encode_exact_size(), + ); + self.transaction_weight.calculate(1, 0, 1, metadata_size) } /// The amount of PoW algorithms used by the Tari chain. @@ -234,7 +239,7 @@ impl ConsensusConstants { max_randomx_seed_height: u64::MAX, proof_of_work: algos, faucet_value: (5000 * 4000) * T, - transaction_weight: TransactionWeight::v2(), + transaction_weight: TransactionWeight::latest(), max_script_byte_size: 2048, }] } diff --git a/base_layer/core/src/mempool/async_mempool.rs b/base_layer/core/src/mempool/async_mempool.rs deleted file mode 100644 index a6c360950f..0000000000 --- a/base_layer/core/src/mempool/async_mempool.rs +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright 2019. The Tari Project -// -// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the -// following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following -// disclaimer. -// -// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the -// following disclaimer in the documentation and/or other materials provided with the distribution. -// -// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote -// products derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, -// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, -// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE -// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -use std::sync::Arc; - -use tari_common_types::types::Signature; - -use crate::{ - blocks::Block, - mempool::{error::MempoolError, Mempool, StateResponse, StatsResponse, TxStorageResponse}, - transactions::transaction::Transaction, -}; - -macro_rules! make_async { - ($fn:ident($($param1:ident:$ptype1:ty,$param2:ident:$ptype2:ty),+) -> $rtype:ty) => { - pub async fn $fn(mp: Mempool, $($param1: $ptype1, $param2: $ptype2),+) -> Result<$rtype, MempoolError> { - let mut mdc = vec![]; - log_mdc::iter(|k, v| mdc.push((k.to_owned(), v.to_owned()))); - tokio::task::spawn_blocking(move || { - log_mdc::extend(mdc.clone()); - mp.$fn($($param1,$param2),+) - }) - .await - .or_else(|err| Err(MempoolError::BlockingTaskSpawnError(err.to_string()))) - .and_then(|inner_result| inner_result) - } - }; - - ($fn:ident($($param:ident:$ptype:ty),+) -> $rtype:ty) => { - pub async fn $fn(mp: Mempool, $($param: $ptype),+) -> Result<$rtype, MempoolError> { - let mut mdc = vec![]; - log_mdc::iter(|k, v| mdc.push((k.to_owned(), v.to_owned()))); - tokio::task::spawn_blocking(move || { - log_mdc::extend(mdc.clone()); - mp.$fn($($param),+) - }) - .await - .or_else(|err| Err(MempoolError::BlockingTaskSpawnError(err.to_string()))) - .and_then(|inner_result| inner_result) - } - }; - - ($fn:ident() -> $rtype:ty) => { - pub async fn $fn(mp: Mempool) -> Result<$rtype, MempoolError> { - let mut mdc = vec![]; - log_mdc::iter(|k, v| mdc.push((k.to_owned(), v.to_owned()))); - tokio::task::spawn_blocking(move || { - log_mdc::extend(mdc.clone()); - mp.$fn() - }) - .await - .or_else(|err| Err(MempoolError::BlockingTaskSpawnError(err.to_string()))) - .and_then(|inner_result| inner_result) - } - }; -} - -make_async!(insert(tx: Arc) -> TxStorageResponse); -make_async!(process_published_block(published_block: Arc) -> ()); -make_async!(process_reorg(removed_blocks: Vec>, new_blocks: Vec>) -> ()); -make_async!(snapshot() -> Vec>); -make_async!(retrieve(total_weight: u64) -> Vec>); -make_async!(has_tx_with_excess_sig(excess_sig: Signature) -> TxStorageResponse); -make_async!(stats() -> StatsResponse); -make_async!(state() -> StateResponse); diff --git a/base_layer/core/src/mempool/error.rs b/base_layer/core/src/mempool/error.rs index e2ac8dc415..9d40fcfe5b 100644 --- a/base_layer/core/src/mempool/error.rs +++ b/base_layer/core/src/mempool/error.rs @@ -47,4 +47,6 @@ pub enum MempoolError { BackendError(String), #[error("Internal reply channel error: `{0}`")] TransportChannelError(#[from] TransportChannelError), + #[error("The transaction did not contain any kernels")] + TransactionNoKernels, } diff --git a/base_layer/core/src/mempool/mempool.rs b/base_layer/core/src/mempool/mempool.rs index 1788803865..1dbc9aee2a 100644 --- a/base_layer/core/src/mempool/mempool.rs +++ b/base_layer/core/src/mempool/mempool.rs @@ -20,9 +20,10 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use std::sync::{Arc, RwLock}; +use std::sync::Arc; use tari_common_types::types::Signature; +use tokio::sync::RwLock; use crate::{ blocks::Block, @@ -48,11 +49,11 @@ pub struct Mempool { } impl Mempool { - /// Create a new Mempool with an UnconfirmedPool, OrphanPool, PendingPool and ReOrgPool. + /// Create a new Mempool with an UnconfirmedPool and ReOrgPool. pub fn new( config: MempoolConfig, rules: ConsensusManager, - validator: Arc, + validator: Box, ) -> Self { Self { pool_storage: Arc::new(RwLock::new(MempoolStorage::new(config, rules, validator))), @@ -61,73 +62,57 @@ impl Mempool { /// Insert an unconfirmed transaction into the Mempool. The transaction *MUST* have passed through the validation /// pipeline already and will thus always be internally consistent by this stage - pub fn insert(&self, tx: Arc) -> Result { - self.pool_storage - .write() - .map_err(|e| MempoolError::BackendError(e.to_string()))? - .insert(tx) + pub async fn insert(&self, tx: Arc) -> Result { + self.pool_storage.write().await.insert(tx) } /// Update the Mempool based on the received published block. - pub fn process_published_block(&self, published_block: Arc) -> Result<(), MempoolError> { - self.pool_storage - .write() - .map_err(|e| MempoolError::BackendError(e.to_string()))? - .process_published_block(published_block) + pub async fn process_published_block(&self, published_block: &Block) -> Result<(), MempoolError> { + self.pool_storage.write().await.process_published_block(published_block) } /// In the event of a ReOrg, resubmit all ReOrged transactions into the Mempool and process each newly introduced /// block from the latest longest chain. - pub fn process_reorg( + pub async fn process_reorg( &self, removed_blocks: Vec>, new_blocks: Vec>, ) -> Result<(), MempoolError> { self.pool_storage .write() - .map_err(|e| MempoolError::BackendError(e.to_string()))? - .process_reorg(removed_blocks, new_blocks) + .await + .process_reorg(&removed_blocks, &new_blocks) } /// Returns all unconfirmed transaction stored in the Mempool, except the transactions stored in the ReOrgPool. // TODO: Investigate returning an iterator rather than a large vector of transactions - pub fn snapshot(&self) -> Result>, MempoolError> { - self.pool_storage - .read() - .map_err(|e| MempoolError::BackendError(e.to_string()))? - .snapshot() + pub async fn snapshot(&self) -> Result>, MempoolError> { + self.pool_storage.read().await.snapshot() } /// Returns a list of transaction ranked by transaction priority up to a given weight. /// Only transactions that fit into a block will be returned - pub fn retrieve(&self, total_weight: u64) -> Result>, MempoolError> { - self.pool_storage - .write() - .map_err(|e| MempoolError::BackendError(e.to_string()))? - .retrieve(total_weight) + pub async fn retrieve(&self, total_weight: u64) -> Result>, MempoolError> { + self.pool_storage.write().await.retrieve(total_weight) + } + + /// Check if the specified excess signature is found in the Mempool. + pub async fn has_tx_with_excess_sig(&self, excess_sig: &Signature) -> Result { + Ok(self.pool_storage.read().await.has_tx_with_excess_sig(excess_sig)) } /// Check if the specified transaction is stored in the Mempool. - pub fn has_tx_with_excess_sig(&self, excess_sig: Signature) -> Result { - self.pool_storage - .read() - .map_err(|e| MempoolError::BackendError(e.to_string()))? - .has_tx_with_excess_sig(excess_sig) + pub async fn has_transaction(&self, tx: &Transaction) -> Result { + self.pool_storage.read().await.has_transaction(tx) } /// Gathers and returns the stats of the Mempool. - pub fn stats(&self) -> Result { - self.pool_storage - .write() - .map_err(|e| MempoolError::BackendError(e.to_string()))? - .stats() + pub async fn stats(&self) -> Result { + self.pool_storage.write().await.stats() } /// Gathers and returns a breakdown of all the transaction in the Mempool. - pub fn state(&self) -> Result { - self.pool_storage - .write() - .map_err(|e| MempoolError::BackendError(e.to_string()))? - .state() + pub async fn state(&self) -> Result { + self.pool_storage.write().await.state() } } diff --git a/base_layer/core/src/mempool/mempool_storage.rs b/base_layer/core/src/mempool/mempool_storage.rs index 8cce61d712..c77883aabf 100644 --- a/base_layer/core/src/mempool/mempool_storage.rs +++ b/base_layer/core/src/mempool/mempool_storage.rs @@ -24,7 +24,7 @@ use std::sync::Arc; use log::*; use tari_common_types::types::Signature; -use tari_crypto::tari_utilities::{hex::Hex, Hashable}; +use tari_crypto::tari_utilities::hex::Hex; use crate::{ blocks::Block, @@ -50,7 +50,7 @@ pub const LOG_TARGET: &str = "c::mp::mempool_storage"; pub struct MempoolStorage { unconfirmed_pool: UnconfirmedPool, reorg_pool: ReorgPool, - validator: Arc, + validator: Box, rules: ConsensusManager, } @@ -59,7 +59,7 @@ impl MempoolStorage { pub fn new( config: MempoolConfig, rules: ConsensusManager, - validator: Arc, + validator: Box, ) -> Self { Self { unconfirmed_pool: UnconfirmedPool::new(config.unconfirmed_pool), @@ -79,17 +79,17 @@ impl MempoolStorage { .kernels() .first() .map(|k| k.excess_sig.get_signature().to_hex()) - .unwrap_or_else(|| "None".into()) + .unwrap_or_else(|| "None?!".into()) ); match self.validator.validate(&tx) { Ok(()) => { - let weight = *self.get_transaction_weight(0); + let weight = self.get_transaction_weight(0); self.unconfirmed_pool.insert(tx, None, &weight)?; Ok(TxStorageResponse::UnconfirmedPool) }, Err(ValidationError::UnknownInputs(dependent_outputs)) => { - if self.unconfirmed_pool.verify_outputs_exist(&dependent_outputs) { - let weight = *self.get_transaction_weight(0); + if self.unconfirmed_pool.contains_all_outputs(&dependent_outputs) { + let weight = self.get_transaction_weight(0); self.unconfirmed_pool.insert(tx, Some(dependent_outputs), &weight)?; Ok(TxStorageResponse::UnconfirmedPool) } else { @@ -116,8 +116,8 @@ impl MempoolStorage { } } - fn get_transaction_weight(&self, height: u64) -> &TransactionWeight { - self.rules.consensus_constants(height).transaction_weight() + fn get_transaction_weight(&self, height: u64) -> TransactionWeight { + *self.rules.consensus_constants(height).transaction_weight() } // Insert a set of new transactions into the UTxPool. @@ -129,13 +129,13 @@ impl MempoolStorage { } /// Update the Mempool based on the received published block. - pub fn process_published_block(&mut self, published_block: Arc) -> Result<(), MempoolError> { + pub fn process_published_block(&mut self, published_block: &Block) -> Result<(), MempoolError> { trace!(target: LOG_TARGET, "Mempool processing new block: {}", published_block); // Move published txs to ReOrgPool and discard double spends - self.reorg_pool.insert_txs( - self.unconfirmed_pool - .remove_published_and_discard_deprecated_transactions(&published_block), - )?; + let removed_transactions = self + .unconfirmed_pool + .remove_published_and_discard_deprecated_transactions(published_block); + self.reorg_pool.insert_txs(removed_transactions)?; Ok(()) } @@ -144,27 +144,10 @@ impl MempoolStorage { /// block from the latest longest chain. pub fn process_reorg( &mut self, - removed_blocks: Vec>, - new_blocks: Vec>, + removed_blocks: &[Arc], + new_blocks: &[Arc], ) -> Result<(), MempoolError> { debug!(target: LOG_TARGET, "Mempool processing reorg"); - for block in &removed_blocks { - debug!( - target: LOG_TARGET, - "Mempool processing reorg removed block {} ({})", - block.header.height, - block.header.hash().to_hex(), - ); - } - for block in &new_blocks { - debug!( - target: LOG_TARGET, - "Mempool processing reorg added new block {} ({})", - block.header.height, - block.header.hash().to_hex(), - ); - } - let previous_tip = removed_blocks.last().map(|block| block.header.height); let new_tip = new_blocks.last().map(|block| block.header.height); @@ -176,7 +159,7 @@ impl MempoolStorage { // Remove re-orged transactions from reorg pool and re-submit them to the unconfirmed mempool let removed_txs = self .reorg_pool - .remove_reorged_txs_and_discard_double_spends(removed_blocks, &new_blocks)?; + .remove_reorged_txs_and_discard_double_spends(removed_blocks, new_blocks)?; self.insert_txs(removed_txs)?; // Update the Mempool based on the received set of new blocks. for block in new_blocks { @@ -215,24 +198,56 @@ impl MempoolStorage { } /// Returns a list of transaction ranked by transaction priority up to a given weight. - /// Will only return transactions that will fit into a block + /// Will only return transactions that will fit into the given weight pub fn retrieve(&mut self, total_weight: u64) -> Result>, MempoolError> { let results = self.unconfirmed_pool.highest_priority_txs(total_weight)?; self.insert_txs(results.transactions_to_insert)?; Ok(results.retrieved_transactions) } - /// Check if the specified transaction is stored in the Mempool. - pub fn has_tx_with_excess_sig(&self, excess_sig: Signature) -> Result { - if self.unconfirmed_pool.has_tx_with_excess_sig(&excess_sig) { - Ok(TxStorageResponse::UnconfirmedPool) - } else if self.reorg_pool.has_tx_with_excess_sig(&excess_sig)? { - Ok(TxStorageResponse::ReorgPool) + /// Check if the specified excess signature is found in the Mempool. + pub fn has_tx_with_excess_sig(&self, excess_sig: &Signature) -> TxStorageResponse { + if self.unconfirmed_pool.has_tx_with_excess_sig(excess_sig) { + TxStorageResponse::UnconfirmedPool + } else if self.reorg_pool.has_tx_with_excess_sig(excess_sig) { + TxStorageResponse::ReorgPool } else { - Ok(TxStorageResponse::NotStored) + TxStorageResponse::NotStored } } + /// Check if the specified transaction is stored in the Mempool. + pub fn has_transaction(&self, tx: &Transaction) -> Result { + tx.body + .kernels() + .iter() + .fold(None, |stored, kernel| { + if stored.is_none() { + return Some(self.has_tx_with_excess_sig(&kernel.excess_sig)); + } + let stored = stored.unwrap(); + match (self.has_tx_with_excess_sig(&kernel.excess_sig), stored) { + // All (so far) in unconfirmed pool + (TxStorageResponse::UnconfirmedPool, TxStorageResponse::UnconfirmedPool) => { + Some(TxStorageResponse::UnconfirmedPool) + }, + // Some kernels from the transaction have already been processed, and others exist in the + // unconfirmed pool, therefore this specific transaction has not been stored (already spent) + (TxStorageResponse::UnconfirmedPool, TxStorageResponse::ReorgPool) | + (TxStorageResponse::ReorgPool, TxStorageResponse::UnconfirmedPool) => { + Some(TxStorageResponse::NotStoredAlreadySpent) + }, + // All (so far) in reorg pool + (TxStorageResponse::ReorgPool, TxStorageResponse::ReorgPool) => Some(TxStorageResponse::ReorgPool), + // Not stored + (TxStorageResponse::UnconfirmedPool, other) | + (TxStorageResponse::ReorgPool, other) | + (other, _) => Some(other), + } + }) + .ok_or(MempoolError::TransactionNoKernels) + } + // Returns the total number of transactions in the Mempool. fn len(&self) -> Result { Ok(self.unconfirmed_pool.len()) @@ -240,7 +255,7 @@ impl MempoolStorage { /// Gathers and returns the stats of the Mempool. pub fn stats(&mut self) -> Result { - let weight = *self.get_transaction_weight(0); + let weight = self.get_transaction_weight(0); Ok(StatsResponse { total_txs: self.len()?, unconfirmed_txs: self.unconfirmed_pool.len(), diff --git a/base_layer/core/src/mempool/mod.rs b/base_layer/core/src/mempool/mod.rs index 8947dd133a..4f18b8e5d2 100644 --- a/base_layer/core/src/mempool/mod.rs +++ b/base_layer/core/src/mempool/mod.rs @@ -47,10 +47,6 @@ pub use rpc::{MempoolRpcClient, MempoolRpcServer, MempoolRpcService, MempoolServ #[cfg(feature = "base_node")] mod unconfirmed_pool; -// public modules -#[cfg(feature = "base_node")] -pub mod async_mempool; - // Public re-exports #[cfg(feature = "base_node")] pub use error::MempoolError; @@ -160,9 +156,3 @@ impl Display for TxStorageResponse { fmt.write_str(storage) } } - -/// Events that can be published on state changes of the Mempool -#[derive(Debug, Clone)] -pub enum MempoolStateEvent { - Updated, -} diff --git a/base_layer/core/src/mempool/priority/error.rs b/base_layer/core/src/mempool/priority/error.rs deleted file mode 100644 index 10ac5ce58e..0000000000 --- a/base_layer/core/src/mempool/priority/error.rs +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2019 The Tari Project -// -// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the -// following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following -// disclaimer. -// -// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the -// following disclaimer in the documentation and/or other materials provided with the distribution. -// -// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote -// products derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, -// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, -// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE -// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -use tari_crypto::tari_utilities::message_format::MessageFormatError; -use thiserror::Error; - -#[derive(Debug, Error)] -pub enum PriorityError { - #[error("Message format error: `{0}`")] - MessageFormatError(#[from] MessageFormatError), -} diff --git a/base_layer/core/src/mempool/priority/mod.rs b/base_layer/core/src/mempool/priority/mod.rs index 5e8ef8fd1a..4db052729f 100644 --- a/base_layer/core/src/mempool/priority/mod.rs +++ b/base_layer/core/src/mempool/priority/mod.rs @@ -20,8 +20,5 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -mod error; -pub use error::PriorityError; - mod prioritized_transaction; pub use prioritized_transaction::{FeePriority, PrioritizedTransaction}; diff --git a/base_layer/core/src/mempool/priority/prioritized_transaction.rs b/base_layer/core/src/mempool/priority/prioritized_transaction.rs index fbe579658a..381717ba02 100644 --- a/base_layer/core/src/mempool/priority/prioritized_transaction.rs +++ b/base_layer/core/src/mempool/priority/prioritized_transaction.rs @@ -20,70 +20,87 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use std::sync::Arc; +use std::{ + fmt::{Display, Formatter}, + sync::Arc, +}; -use tari_common_types::types::HashOutput; -use tari_crypto::tari_utilities::message_format::MessageFormat; +use tari_common_types::types::{HashOutput, PrivateKey, PublicKey}; +use tari_utilities::{hex::Hex, ByteArray}; -use crate::{ - mempool::priority::PriorityError, - transactions::{transaction::Transaction, weight::TransactionWeight}, -}; +use crate::transactions::{transaction::Transaction, weight::TransactionWeight}; /// Create a unique unspent transaction priority based on the transaction fee, maturity of the oldest input UTXO and the /// excess_sig. The excess_sig is included to ensure the the priority key unique so it can be used with a BTreeMap. /// Normally, duplicate keys will be overwritten in a BTreeMap. -#[derive(PartialEq, Eq, PartialOrd, Ord, Debug)] +#[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone)] pub struct FeePriority(Vec); impl FeePriority { - pub fn try_construct(transaction: &Transaction, weight: u64) -> Result { + pub fn new(transaction: &Transaction, weight: u64) -> Self { // The weights have been normalised, so the fee priority is now equal to the fee per gram ± a few pct points - let fee_per_byte = ((transaction.body.get_total_fee().as_u64() as f64 / weight as f64) * 1000.0) as usize; // Include 3 decimal places before flooring - let mut fee_priority = fee_per_byte.to_binary()?; - fee_priority.reverse(); // Requires Big-endian for BtreeMap sorting - - let mut maturity_priority = (u64::MAX - transaction.min_input_maturity()).to_binary()?; - maturity_priority.reverse(); // Requires Big-endian for BtreeMap sorting + // Include 3 decimal places before flooring + let fee_per_byte = ((transaction.body.get_total_fee().as_u64() as f64 / weight as f64) * 1000.0) as u64; + // Big-endian used here, the MSB is in the starting index. The ordering for Vec is big-endian and the + // unconfirmed pool expects the lowest priority to be sorted lowest to highest in the BTreeMap + let fee_priority = fee_per_byte.to_be_bytes(); + let maturity_priority = (u64::MAX - transaction.min_input_maturity()).to_be_bytes(); - let mut priority = fee_priority; - priority.append(&mut maturity_priority); - priority.append(&mut transaction.body.kernels()[0].excess_sig.to_binary()?); - Ok(Self(priority)) - } -} - -impl Clone for FeePriority { - fn clone(&self) -> Self { - FeePriority(self.0.clone()) + let mut priority = vec![0u8; 8 + 8 + 64]; + priority[..8].copy_from_slice(&fee_priority[..]); + priority[8..16].copy_from_slice(&maturity_priority[..]); + // Use the aggregate signature and nonce. + // If a transaction has many kernels, unless they are all identical, the fee priority will be different. + let (agg_sig, agg_nonce) = transaction + .body + .kernels() + .iter() + .map(|k| (k.excess_sig.get_signature(), k.excess_sig.get_public_nonce())) + .fold( + (PrivateKey::default(), PublicKey::default()), + |(agg_sk, agg_nonce), (sig, nonce)| (agg_sk + sig, agg_nonce + nonce), + ); + priority[16..48].copy_from_slice(agg_sig.as_bytes()); + priority[48..80].copy_from_slice(agg_nonce.as_bytes()); + Self(priority) } } /// A prioritized transaction includes a transaction and the calculated priority of the transaction. #[derive(Clone)] pub struct PrioritizedTransaction { + pub key: usize, pub transaction: Arc, pub priority: FeePriority, pub weight: u64, - pub depended_output_hashes: Vec, + pub dependent_output_hashes: Vec, } impl PrioritizedTransaction { - pub fn try_construct( + pub fn new( + key: usize, weighting: &TransactionWeight, transaction: Arc, dependent_outputs: Option>, - ) -> Result { - let depended_output_hashes = match dependent_outputs { - Some(v) => v, - None => Vec::new(), - }; + ) -> PrioritizedTransaction { let weight = transaction.calculate_weight(weighting); - Ok(Self { - priority: FeePriority::try_construct(&transaction, weight)?, + Self { + key, + priority: FeePriority::new(&transaction, weight), weight, transaction, - depended_output_hashes, - }) + dependent_output_hashes: dependent_outputs.unwrap_or_default(), + } + } +} + +impl Display for PrioritizedTransaction { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + let sig_hex = self + .transaction + .first_kernel_excess_sig() + .map(|sig| sig.get_signature().to_hex()) + .unwrap_or_else(|| "No kernels!".to_string()); + write!(f, "{} (weight: {}, internal key: {})", sig_hex, self.weight, self.key) } } diff --git a/base_layer/core/src/mempool/reorg_pool/reorg_pool.rs b/base_layer/core/src/mempool/reorg_pool/reorg_pool.rs index d21be83e4a..20f521a0b4 100644 --- a/base_layer/core/src/mempool/reorg_pool/reorg_pool.rs +++ b/base_layer/core/src/mempool/reorg_pool/reorg_pool.rs @@ -86,15 +86,15 @@ impl ReorgPool { } /// Check if a transaction is stored in the ReorgPool - pub fn has_tx_with_excess_sig(&self, excess_sig: &Signature) -> Result { - Ok(self.pool_storage.has_tx_with_excess_sig(excess_sig)) + pub fn has_tx_with_excess_sig(&self, excess_sig: &Signature) -> bool { + self.pool_storage.has_tx_with_excess_sig(excess_sig) } /// Remove the transactions from the ReorgPool that were used in provided removed blocks. The transactions can be /// resubmitted to the Unconfirmed Pool. pub fn remove_reorged_txs_and_discard_double_spends( &mut self, - removed_blocks: Vec>, + removed_blocks: &[Arc], new_blocks: &[Arc], ) -> Result>, ReorgPoolError> { Ok(self @@ -144,41 +144,21 @@ mod test { .insert_txs(vec![tx1.clone(), tx2.clone(), tx3.clone(), tx4.clone()]) .unwrap(); // Check that oldest utx was removed to make room for new incoming transactions - assert!(!reorg_pool - .has_tx_with_excess_sig(&tx1.body.kernels()[0].excess_sig) - .unwrap(),); - assert!(reorg_pool - .has_tx_with_excess_sig(&tx2.body.kernels()[0].excess_sig) - .unwrap(),); - assert!(reorg_pool - .has_tx_with_excess_sig(&tx3.body.kernels()[0].excess_sig) - .unwrap(),); - assert!(reorg_pool - .has_tx_with_excess_sig(&tx4.body.kernels()[0].excess_sig) - .unwrap(),); + assert!(!reorg_pool.has_tx_with_excess_sig(&tx1.body.kernels()[0].excess_sig)); + assert!(reorg_pool.has_tx_with_excess_sig(&tx2.body.kernels()[0].excess_sig)); + assert!(reorg_pool.has_tx_with_excess_sig(&tx3.body.kernels()[0].excess_sig)); + assert!(reorg_pool.has_tx_with_excess_sig(&tx4.body.kernels()[0].excess_sig)); // Check that transactions that have been in the pool for longer than their Time-to-live have been removed thread::sleep(Duration::from_millis(51)); reorg_pool.insert_txs(vec![tx5.clone(), tx6.clone()]).unwrap(); assert_eq!(reorg_pool.len().unwrap(), 2); - assert!(!reorg_pool - .has_tx_with_excess_sig(&tx1.body.kernels()[0].excess_sig) - .unwrap(),); - assert!(!reorg_pool - .has_tx_with_excess_sig(&tx2.body.kernels()[0].excess_sig) - .unwrap(),); - assert!(!reorg_pool - .has_tx_with_excess_sig(&tx3.body.kernels()[0].excess_sig) - .unwrap(),); - assert!(!reorg_pool - .has_tx_with_excess_sig(&tx4.body.kernels()[0].excess_sig) - .unwrap(),); - assert!(reorg_pool - .has_tx_with_excess_sig(&tx5.body.kernels()[0].excess_sig) - .unwrap(),); - assert!(reorg_pool - .has_tx_with_excess_sig(&tx6.body.kernels()[0].excess_sig) - .unwrap(),); + assert!(!reorg_pool.has_tx_with_excess_sig(&tx1.body.kernels()[0].excess_sig)); + assert!(!reorg_pool.has_tx_with_excess_sig(&tx2.body.kernels()[0].excess_sig)); + assert!(!reorg_pool.has_tx_with_excess_sig(&tx3.body.kernels()[0].excess_sig)); + assert!(!reorg_pool.has_tx_with_excess_sig(&tx4.body.kernels()[0].excess_sig)); + assert!(reorg_pool.has_tx_with_excess_sig(&tx5.body.kernels()[0].excess_sig)); + assert!(reorg_pool.has_tx_with_excess_sig(&tx6.body.kernels()[0].excess_sig)); } #[test] @@ -208,26 +188,14 @@ mod test { .unwrap(); // Oldest transaction tx1 is removed to make space for new incoming transactions assert_eq!(reorg_pool.len().unwrap(), 5); - assert!(!reorg_pool - .has_tx_with_excess_sig(&tx1.body.kernels()[0].excess_sig) - .unwrap(),); - assert!(reorg_pool - .has_tx_with_excess_sig(&tx2.body.kernels()[0].excess_sig) - .unwrap(),); - assert!(reorg_pool - .has_tx_with_excess_sig(&tx3.body.kernels()[0].excess_sig) - .unwrap(),); - assert!(reorg_pool - .has_tx_with_excess_sig(&tx4.body.kernels()[0].excess_sig) - .unwrap(),); - assert!(reorg_pool - .has_tx_with_excess_sig(&tx5.body.kernels()[0].excess_sig) - .unwrap(),); - assert!(reorg_pool - .has_tx_with_excess_sig(&tx6.body.kernels()[0].excess_sig) - .unwrap(),); - - let reorg_blocks = vec![ + assert!(!reorg_pool.has_tx_with_excess_sig(&tx1.body.kernels()[0].excess_sig)); + assert!(reorg_pool.has_tx_with_excess_sig(&tx2.body.kernels()[0].excess_sig)); + assert!(reorg_pool.has_tx_with_excess_sig(&tx3.body.kernels()[0].excess_sig)); + assert!(reorg_pool.has_tx_with_excess_sig(&tx4.body.kernels()[0].excess_sig)); + assert!(reorg_pool.has_tx_with_excess_sig(&tx5.body.kernels()[0].excess_sig)); + assert!(reorg_pool.has_tx_with_excess_sig(&tx6.body.kernels()[0].excess_sig)); + + let reorg_blocks = &[ create_orphan_block(3000, vec![(*tx3).clone(), (*tx4).clone()], &consensus).into(), create_orphan_block(4000, vec![(*tx1).clone(), (*tx2).clone()], &consensus).into(), ]; @@ -241,23 +209,11 @@ mod test { assert!(removed_txs.contains(&tx4)); assert_eq!(reorg_pool.len().unwrap(), 2); - assert!(!reorg_pool - .has_tx_with_excess_sig(&tx1.body.kernels()[0].excess_sig) - .unwrap(),); - assert!(!reorg_pool - .has_tx_with_excess_sig(&tx2.body.kernels()[0].excess_sig) - .unwrap(),); - assert!(!reorg_pool - .has_tx_with_excess_sig(&tx3.body.kernels()[0].excess_sig) - .unwrap(),); - assert!(!reorg_pool - .has_tx_with_excess_sig(&tx4.body.kernels()[0].excess_sig) - .unwrap(),); - assert!(reorg_pool - .has_tx_with_excess_sig(&tx5.body.kernels()[0].excess_sig) - .unwrap(),); - assert!(reorg_pool - .has_tx_with_excess_sig(&tx6.body.kernels()[0].excess_sig) - .unwrap(),); + assert!(!reorg_pool.has_tx_with_excess_sig(&tx1.body.kernels()[0].excess_sig)); + assert!(!reorg_pool.has_tx_with_excess_sig(&tx2.body.kernels()[0].excess_sig)); + assert!(!reorg_pool.has_tx_with_excess_sig(&tx3.body.kernels()[0].excess_sig)); + assert!(!reorg_pool.has_tx_with_excess_sig(&tx4.body.kernels()[0].excess_sig)); + assert!(reorg_pool.has_tx_with_excess_sig(&tx5.body.kernels()[0].excess_sig)); + assert!(reorg_pool.has_tx_with_excess_sig(&tx6.body.kernels()[0].excess_sig)); } } diff --git a/base_layer/core/src/mempool/reorg_pool/reorg_pool_storage.rs b/base_layer/core/src/mempool/reorg_pool/reorg_pool_storage.rs index 32ec156c62..d6184ca0fc 100644 --- a/base_layer/core/src/mempool/reorg_pool/reorg_pool_storage.rs +++ b/base_layer/core/src/mempool/reorg_pool/reorg_pool_storage.rs @@ -23,8 +23,9 @@ use std::sync::Arc; use log::*; -use tari_common_types::types::Signature; +use tari_common_types::types::{PrivateKey, Signature}; use tari_crypto::tari_utilities::hex::Hex; +use tari_utilities::Hashable; use ttl_cache::TtlCache; use crate::{blocks::Block, mempool::reorg_pool::reorg_pool::ReorgPoolConfig, transactions::transaction::Transaction}; @@ -39,7 +40,7 @@ pub const LOG_TARGET: &str = "c::mp::reorg_pool::reorg_pool_storage"; /// oldest transactions will be removed to make space for incoming transactions. pub struct ReorgPoolStorage { config: ReorgPoolConfig, - txs_by_signature: TtlCache>, + txs_by_signature: TtlCache>, } impl ReorgPoolStorage { @@ -54,16 +55,21 @@ impl ReorgPoolStorage { /// Insert a new transaction into the ReorgPoolStorage. Published transactions will have a limited Time-to-live in /// the ReorgPoolStorage and will be discarded once the Time-to-live threshold has been reached. pub fn insert(&mut self, tx: Arc) { - let tx_key = tx.body.kernels()[0].excess_sig.clone(); - let _ = self - .txs_by_signature - .insert(tx_key.clone(), tx.clone(), self.config.tx_ttl); - debug!( - target: LOG_TARGET, - "Inserted transaction with signature {} into reorg pool:", - tx_key.get_signature().to_hex() - ); - trace!(target: LOG_TARGET, "{}", tx); + match tx.first_kernel_excess_sig() { + Some(excess_sig) => { + let tx_key = excess_sig.get_signature(); + let tx_key_hex = tx_key.to_hex(); + trace!(target: LOG_TARGET, "{}", tx); + self.txs_by_signature.insert(tx_key.clone(), tx, self.config.tx_ttl); + debug!( + target: LOG_TARGET, + "Inserted transaction with signature {} into reorg pool:", tx_key_hex + ); + }, + None => { + warn!(target: LOG_TARGET, "Inserted transaction without kernels!"); + }, + } } /// Insert a set of new transactions into the ReorgPoolStorage @@ -75,14 +81,14 @@ impl ReorgPoolStorage { /// Check if a transaction is stored in the ReorgPoolStorage pub fn has_tx_with_excess_sig(&self, excess_sig: &Signature) -> bool { - self.txs_by_signature.contains_key(excess_sig) + self.txs_by_signature.contains_key(excess_sig.get_signature()) } /// Remove double-spends from the ReorgPool. These transactions were orphaned by the provided published /// block. Check if any of the transactions in the ReorgPool has inputs that was spent by the provided /// published block. fn discard_double_spends(&mut self, published_block: &Block) { - let mut removed_tx_keys: Vec = Vec::new(); + let mut removed_tx_keys = Vec::new(); for (tx_key, ptx) in self.txs_by_signature.iter() { for input in ptx.body.inputs() { if published_block.body.inputs().contains(input) { @@ -96,7 +102,7 @@ impl ReorgPoolStorage { trace!( target: LOG_TARGET, "Removed double spend tx from reorg pool: {}", - tx_key.get_signature().to_hex() + tx_key.to_hex() ); } } @@ -105,17 +111,29 @@ impl ReorgPoolStorage { /// can be resubmitted to the Unconfirmed Pool. pub fn remove_reorged_txs_and_discard_double_spends( &mut self, - removed_blocks: Vec>, + removed_blocks: &[Arc], new_blocks: &[Arc], ) -> Vec> { for block in new_blocks { + debug!( + target: LOG_TARGET, + "Mempool processing reorg added new block {} ({})", + block.header.height, + block.header.hash().to_hex(), + ); self.discard_double_spends(block); } - let mut removed_txs: Vec> = Vec::new(); - for block in &removed_blocks { + let mut removed_txs = Vec::new(); + for block in removed_blocks { + debug!( + target: LOG_TARGET, + "Mempool processing reorg removed block {} ({})", + block.header.height, + block.header.hash().to_hex(), + ); for kernel in block.body.kernels() { - if let Some(removed_tx) = self.txs_by_signature.remove(&kernel.excess_sig) { + if let Some(removed_tx) = self.txs_by_signature.remove(kernel.excess_sig.get_signature()) { trace!(target: LOG_TARGET, "Removed tx from reorg pool: {:?}", removed_tx); removed_txs.push(removed_tx); } diff --git a/base_layer/core/src/mempool/service/error.rs b/base_layer/core/src/mempool/service/error.rs index b85e7063d1..522658858d 100644 --- a/base_layer/core/src/mempool/service/error.rs +++ b/base_layer/core/src/mempool/service/error.rs @@ -50,4 +50,6 @@ pub enum MempoolServiceError { BroadcastFailed, #[error("Conversion error: '{0}'")] ConversionError(String), + #[error("Transaction has no kernels")] + TransactionNoKernels, } diff --git a/base_layer/core/src/mempool/service/inbound_handlers.rs b/base_layer/core/src/mempool/service/inbound_handlers.rs index 05655f7415..8bc69d454f 100644 --- a/base_layer/core/src/mempool/service/inbound_handlers.rs +++ b/base_layer/core/src/mempool/service/inbound_handlers.rs @@ -25,16 +25,13 @@ use std::sync::Arc; use log::*; use tari_comms::peer_manager::NodeId; use tari_crypto::tari_utilities::hex::Hex; -use tokio::sync::broadcast; use crate::{ base_node::comms_interface::BlockEvent, chain_storage::BlockAddResult, mempool::{ - async_mempool, service::{MempoolRequest, MempoolResponse, MempoolServiceError, OutboundMempoolServiceInterface}, Mempool, - MempoolStateEvent, TxStorageResponse, }, transactions::transaction::Transaction, @@ -46,23 +43,14 @@ pub const LOG_TARGET: &str = "c::mp::service::inbound_handlers"; /// nodes. #[derive(Clone)] pub struct MempoolInboundHandlers { - event_publisher: broadcast::Sender, mempool: Mempool, outbound_nmi: OutboundMempoolServiceInterface, } impl MempoolInboundHandlers { /// Construct the MempoolInboundHandlers. - pub fn new( - event_publisher: broadcast::Sender, - mempool: Mempool, - outbound_nmi: OutboundMempoolServiceInterface, - ) -> Self { - Self { - event_publisher, - mempool, - outbound_nmi, - } + pub fn new(mempool: Mempool, outbound_nmi: OutboundMempoolServiceInterface) -> Self { + Self { mempool, outbound_nmi } } /// Handle inbound Mempool service requests from remote nodes and local services. @@ -70,14 +58,10 @@ impl MempoolInboundHandlers { debug!(target: LOG_TARGET, "Handling remote request: {}", request); use MempoolRequest::*; match request { - GetStats => Ok(MempoolResponse::Stats( - async_mempool::stats(self.mempool.clone()).await?, - )), - GetState => Ok(MempoolResponse::State( - async_mempool::state(self.mempool.clone()).await?, - )), + GetStats => Ok(MempoolResponse::Stats(self.mempool.stats().await?)), + GetState => Ok(MempoolResponse::State(self.mempool.state().await?)), GetTxStateByExcessSig(excess_sig) => Ok(MempoolResponse::TxStorage( - async_mempool::has_tx_with_excess_sig(self.mempool.clone(), excess_sig).await?, + self.mempool.has_tx_with_excess_sig(&excess_sig).await?, )), SubmitTransaction(tx) => { debug!( @@ -109,18 +93,20 @@ impl MempoolInboundHandlers { self.submit_transaction(tx, exclude_peers).await.map(|_| ()) } - // Submits a transaction to the mempool and propagate valid transactions. + /// Submits a transaction to the mempool and propagate valid transactions. async fn submit_transaction( &mut self, tx: Transaction, exclude_peers: Vec, ) -> Result { trace!(target: LOG_TARGET, "submit_transaction: {}.", tx); - let tx_storage = - async_mempool::has_tx_with_excess_sig(self.mempool.clone(), tx.body.kernels()[0].excess_sig.clone()) - .await?; - let kernel_excess_sig = tx.body.kernels()[0].excess_sig.get_signature().to_hex(); + let tx_storage = self.mempool.has_transaction(&tx).await?; + let kernel_excess_sig = tx + .first_kernel_excess_sig() + .ok_or(MempoolServiceError::TransactionNoKernels)? + .get_signature() + .to_hex(); if tx_storage.is_stored() { debug!( target: LOG_TARGET, @@ -128,7 +114,8 @@ impl MempoolInboundHandlers { ); return Ok(tx_storage); } - match async_mempool::insert(self.mempool.clone(), Arc::new(tx.clone())).await { + let tx = Arc::new(tx); + match self.mempool.insert(tx.clone()).await { Ok(tx_storage) => { debug!( target: LOG_TARGET, @@ -152,35 +139,24 @@ impl MempoolInboundHandlers { pub async fn handle_block_event(&mut self, block_event: &BlockEvent) -> Result<(), MempoolServiceError> { use BlockEvent::*; match block_event { - ValidBlockAdded(block, BlockAddResult::Ok(_), broadcast) => { - async_mempool::process_published_block(self.mempool.clone(), block.clone()).await?; - if broadcast.is_true() { - let _ = self.event_publisher.send(MempoolStateEvent::Updated); - } + ValidBlockAdded(block, BlockAddResult::Ok(_)) => { + self.mempool.process_published_block(block).await?; }, - ValidBlockAdded(_, BlockAddResult::ChainReorg { added, removed }, broadcast) => { - async_mempool::process_reorg( - self.mempool.clone(), - removed.iter().map(|b| b.to_arc_block()).collect(), - added.iter().map(|b| b.to_arc_block()).collect(), - ) - .await?; - if broadcast.is_true() { - let _ = self.event_publisher.send(MempoolStateEvent::Updated); - } + ValidBlockAdded(_, BlockAddResult::ChainReorg { added, removed }) => { + self.mempool + .process_reorg( + removed.iter().map(|b| b.to_arc_block()).collect(), + added.iter().map(|b| b.to_arc_block()).collect(), + ) + .await?; }, BlockSyncRewind(removed_blocks) if !removed_blocks.is_empty() => { - async_mempool::process_reorg( - self.mempool.clone(), - removed_blocks.iter().map(|b| b.to_arc_block()).collect(), - vec![], - ) - .await?; - let _ = self.event_publisher.send(MempoolStateEvent::Updated); + self.mempool + .process_reorg(removed_blocks.iter().map(|b| b.to_arc_block()).collect(), vec![]) + .await?; }, BlockSyncComplete(tip_block) => { - async_mempool::process_published_block(self.mempool.clone(), tip_block.to_arc_block()).await?; - let _ = self.event_publisher.send(MempoolStateEvent::Updated); + self.mempool.process_published_block(tip_block.block()).await?; }, _ => {}, } diff --git a/base_layer/core/src/mempool/service/initializer.rs b/base_layer/core/src/mempool/service/initializer.rs index 9573ebb056..e416d2757b 100644 --- a/base_layer/core/src/mempool/service/initializer.rs +++ b/base_layer/core/src/mempool/service/initializer.rs @@ -38,7 +38,7 @@ use tari_service_framework::{ ServiceInitializer, ServiceInitializerContext, }; -use tokio::sync::{broadcast, mpsc}; +use tokio::sync::mpsc; use crate::{ base_node::{comms_interface::LocalNodeCommsInterface, StateMachineHandle}, @@ -153,17 +153,11 @@ impl ServiceInitializer for MempoolServiceInitializer { let (outbound_tx_sender, outbound_tx_stream) = mpsc::unbounded_channel(); let (outbound_request_sender_service, outbound_request_stream) = reply_channel::unbounded(); let (local_request_sender_service, local_request_stream) = reply_channel::unbounded(); - let (mempool_state_event_publisher, _) = broadcast::channel(100); let outbound_mp_interface = OutboundMempoolServiceInterface::new(outbound_request_sender_service, outbound_tx_sender); - let local_mp_interface = - LocalMempoolService::new(local_request_sender_service, mempool_state_event_publisher.clone()); + let local_mp_interface = LocalMempoolService::new(local_request_sender_service); let config = self.config; - let inbound_handlers = MempoolInboundHandlers::new( - mempool_state_event_publisher, - self.mempool.clone(), - outbound_mp_interface.clone(), - ); + let inbound_handlers = MempoolInboundHandlers::new(self.mempool.clone(), outbound_mp_interface.clone()); // Register handle to OutboundMempoolServiceInterface before waiting for handles to be ready context.register_handle(outbound_mp_interface); diff --git a/base_layer/core/src/mempool/service/local_service.rs b/base_layer/core/src/mempool/service/local_service.rs index 1d58faec03..ae6645da98 100644 --- a/base_layer/core/src/mempool/service/local_service.rs +++ b/base_layer/core/src/mempool/service/local_service.rs @@ -22,12 +22,10 @@ use tari_common_types::types::Signature; use tari_service_framework::{reply_channel::SenderService, Service}; -use tokio::sync::broadcast; use crate::{ mempool::{ service::{MempoolRequest, MempoolResponse, MempoolServiceError}, - MempoolStateEvent, StateResponse, StatsResponse, TxStorageResponse, @@ -47,7 +45,6 @@ pub type LocalMempoolRequester = SenderService, } impl LocalMempoolService { @@ -57,18 +54,8 @@ impl LocalMempoolService { /// /// To make things a little more ergonomic, the channel handling is done for you in the other member functions, /// such that the request behaves like a standard future. - pub fn new( - request_sender: LocalMempoolRequester, - mempool_state_event_stream: broadcast::Sender, - ) -> Self { - LocalMempoolService { - request_sender, - mempool_state_event_stream, - } - } - - pub fn get_mempool_state_event_stream(&self) -> broadcast::Receiver { - self.mempool_state_event_stream.subscribe() + pub fn new(request_sender: LocalMempoolRequester) -> Self { + LocalMempoolService { request_sender } } /// Returns a future that resolves to the current mempool statistics @@ -119,7 +106,7 @@ impl LocalMempoolService { mod test { use futures::StreamExt; use tari_service_framework::reply_channel::{unbounded, Receiver}; - use tokio::{sync::broadcast, task}; + use tokio::task; use crate::mempool::{ service::{local_service::LocalMempoolService, MempoolRequest, MempoolResponse}, @@ -151,9 +138,8 @@ mod test { #[tokio::test] async fn mempool_stats() { - let (event_publisher, _) = broadcast::channel(100); let (tx, rx) = unbounded(); - let mut service = LocalMempoolService::new(tx, event_publisher); + let mut service = LocalMempoolService::new(tx); task::spawn(mock_handler(rx)); let stats = service.get_mempool_stats().await; let stats = stats.expect("get_mempool_stats should have succeeded"); @@ -162,9 +148,8 @@ mod test { #[tokio::test] async fn mempool_stats_from_multiple() { - let (event_publisher, _) = broadcast::channel(100); let (tx, rx) = unbounded(); - let mut service = LocalMempoolService::new(tx, event_publisher); + let mut service = LocalMempoolService::new(tx); let mut service2 = service.clone(); task::spawn(mock_handler(rx)); let stats = service.get_mempool_stats().await; diff --git a/base_layer/core/src/mempool/service/outbound_interface.rs b/base_layer/core/src/mempool/service/outbound_interface.rs index 95cbca53aa..315457eff8 100644 --- a/base_layer/core/src/mempool/service/outbound_interface.rs +++ b/base_layer/core/src/mempool/service/outbound_interface.rs @@ -20,6 +20,8 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +use std::sync::Arc; + use log::*; use tari_common_types::types::Signature; use tari_comms::peer_manager::NodeId; @@ -42,14 +44,14 @@ pub const LOG_TARGET: &str = "c::mp::service::outbound_interface"; #[derive(Clone)] pub struct OutboundMempoolServiceInterface { request_sender: SenderService>, - tx_sender: UnboundedSender<(Transaction, Vec)>, + tx_sender: UnboundedSender<(Arc, Vec)>, } impl OutboundMempoolServiceInterface { /// Construct a new OutboundMempoolServiceInterface with the specified SenderService. pub fn new( request_sender: SenderService>, - tx_sender: UnboundedSender<(Transaction, Vec)>, + tx_sender: UnboundedSender<(Arc, Vec)>, ) -> Self { Self { request_sender, @@ -70,7 +72,7 @@ impl OutboundMempoolServiceInterface { /// Transmit a transaction to remote base nodes, excluding the provided peers. pub async fn propagate_tx( &mut self, - transaction: Transaction, + transaction: Arc, exclude_peers: Vec, ) -> Result<(), MempoolServiceError> { self.tx_sender.send((transaction, exclude_peers)).or_else(|e| { diff --git a/base_layer/core/src/mempool/service/service.rs b/base_layer/core/src/mempool/service/service.rs index 44569bcef4..cb6edd6393 100644 --- a/base_layer/core/src/mempool/service/service.rs +++ b/base_layer/core/src/mempool/service/service.rs @@ -68,7 +68,7 @@ const LOG_TARGET: &str = "c::mempool::service::service"; /// A convenience struct to hold all the Mempool service streams pub struct MempoolStreams { pub outbound_request_stream: SOutReq, - pub outbound_tx_stream: mpsc::UnboundedReceiver<(Transaction, Vec)>, + pub outbound_tx_stream: mpsc::UnboundedReceiver<(Arc, Vec)>, pub inbound_request_stream: SInReq, pub inbound_response_stream: SInRes, pub inbound_transaction_stream: STxIn, @@ -167,7 +167,7 @@ impl MempoolService { // Incoming transaction messages from the Comms layer Some(transaction_msg) = inbound_transaction_stream.next() => { - self.spawn_handle_incoming_tx(transaction_msg).await; + self.spawn_handle_incoming_tx(transaction_msg); } // Incoming local request messages from the LocalMempoolServiceInterface and other local services @@ -228,7 +228,7 @@ impl MempoolService { }); } - fn spawn_handle_outbound_tx(&self, tx: Transaction, excluded_peers: Vec) { + fn spawn_handle_outbound_tx(&self, tx: Arc, excluded_peers: Vec) { let outbound_message_service = self.outbound_message_service.clone(); task::spawn(async move { let result = handle_outbound_tx(outbound_message_service, tx, excluded_peers).await; @@ -264,7 +264,7 @@ impl MempoolService { }); } - async fn spawn_handle_incoming_tx(&self, tx_msg: DomainMessage) { + fn spawn_handle_incoming_tx(&self, tx_msg: DomainMessage) { // Determine if we are bootstrapped let status_watch = self.state_machine.get_status_info_watch(); @@ -489,7 +489,7 @@ async fn handle_request_timeout( async fn handle_outbound_tx( mut outbound_message_service: OutboundMessageRequester, - tx: Transaction, + tx: Arc, exclude_peers: Vec, ) -> Result<(), MempoolServiceError> { let result = outbound_message_service diff --git a/base_layer/core/src/mempool/sync_protocol/error.rs b/base_layer/core/src/mempool/sync_protocol/error.rs index e529db7644..99663b3c57 100644 --- a/base_layer/core/src/mempool/sync_protocol/error.rs +++ b/base_layer/core/src/mempool/sync_protocol/error.rs @@ -41,4 +41,8 @@ pub enum MempoolProtocolError { DecodeFailed { peer: NodeId, source: prost::DecodeError }, #[error("Wire message from `{peer}` failed to convert to local type: {message}")] MessageConversionFailed { peer: NodeId, message: String }, + #[error("Send timeout occurred")] + SendTimeout, + #[error("Receive timeout occurred")] + RecvTimeout, } diff --git a/base_layer/core/src/mempool/sync_protocol/mod.rs b/base_layer/core/src/mempool/sync_protocol/mod.rs index 302596a3f6..136a64b19d 100644 --- a/base_layer/core/src/mempool/sync_protocol/mod.rs +++ b/base_layer/core/src/mempool/sync_protocol/mod.rs @@ -70,6 +70,7 @@ use std::{ atomic::{AtomicUsize, Ordering}, Arc, }, + time::Duration, }; use error::MempoolProtocolError; @@ -87,15 +88,17 @@ use tari_comms::{ Bytes, PeerConnection, }; -use tari_crypto::tari_utilities::{hex::Hex, ByteArray}; +use tari_crypto::tari_utilities::hex::Hex; +use tari_utilities::ByteArray; use tokio::{ io::{AsyncRead, AsyncWrite}, sync::Semaphore, task, + time, }; use crate::{ - mempool::{async_mempool, proto, Mempool, MempoolServiceConfig}, + mempool::{proto, Mempool, MempoolServiceConfig}, proto as shared_proto, transactions::transaction::Transaction, }; @@ -303,12 +306,12 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin self.peer_node_id.short_str() ); - let transactions = async_mempool::snapshot(self.mempool.clone()).await?; + let transactions = self.mempool.snapshot().await?; let items = transactions .iter() .take(self.config.initial_sync_max_transactions) - .map(|txn| txn.body.kernels()[0].excess_sig.get_signature().to_vec()) - .map(|sig| sig.to_vec()) + .filter_map(|txn| txn.first_kernel_excess_sig()) + .map(|excess| excess.get_signature().to_vec()) .collect(); let inventory = proto::TransactionInventory { items }; @@ -389,7 +392,7 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin inventory.items.len() ); - let transactions = async_mempool::snapshot(self.mempool.clone()).await?; + let transactions = self.mempool.snapshot().await?; let mut duplicate_inventory_items = Vec::new(); let (transactions, _) = transactions.into_iter().partition::, _>(|transaction| { @@ -503,12 +506,12 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin self.peer_node_id.short_str() ); - let store_state = async_mempool::has_tx_with_excess_sig(self.mempool.clone(), excess_sig.clone()).await?; + let store_state = self.mempool.has_transaction(&txn).await?; if store_state.is_stored() { return Ok(()); } - let stored_result = async_mempool::insert(self.mempool.clone(), Arc::new(txn)).await?; + let stored_result = self.mempool.insert(Arc::new(txn)).await?; if stored_result.is_stored() { debug!( target: LOG_TARGET, @@ -529,7 +532,7 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin async fn write_transactions(&mut self, transactions: Vec>) -> Result<(), MempoolProtocolError> { let txns = transactions.into_iter().take(self.config.initial_sync_max_transactions) .filter_map(|txn| { - match shared_proto::types::Transaction::try_from((*txn).clone()) { + match shared_proto::types::Transaction::try_from(txn) { Ok(txn) => Some(proto::TransactionItem { transaction: Some(txn), }), @@ -548,10 +551,9 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin } async fn read_message(&mut self) -> Result { - let msg = self - .framed - .next() + let msg = time::timeout(Duration::from_secs(10), self.framed.next()) .await + .map_err(|_| MempoolProtocolError::RecvTimeout)? .ok_or_else(|| MempoolProtocolError::SubstreamClosed(self.peer_node_id.clone()))??; T::decode(&mut msg.freeze()).map_err(|err| MempoolProtocolError::DecodeFailed { @@ -571,7 +573,12 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin } async fn write_message(&mut self, message: T) -> Result<(), MempoolProtocolError> { - self.framed.send(message.to_encoded_bytes().into()).await?; + time::timeout( + Duration::from_secs(10), + self.framed.send(message.to_encoded_bytes().into()), + ) + .await + .map_err(|_| MempoolProtocolError::SendTimeout)??; Ok(()) } } diff --git a/base_layer/core/src/mempool/sync_protocol/test.rs b/base_layer/core/src/mempool/sync_protocol/test.rs index ffdf42e1fe..80ebcc2e20 100644 --- a/base_layer/core/src/mempool/sync_protocol/test.rs +++ b/base_layer/core/src/mempool/sync_protocol/test.rs @@ -44,7 +44,6 @@ use tokio::{ use crate::{ consensus::ConsensusManager, mempool::{ - async_mempool, proto, sync_protocol::{MempoolPeerProtocol, MempoolSyncProtocol, MAX_FRAME_SIZE, MEMPOOL_SYNC_PROTOCOL}, Mempool, @@ -62,22 +61,22 @@ pub fn create_transactions(n: usize) -> Vec { .collect() } -fn new_mempool_with_transactions(n: usize) -> (Mempool, Vec) { +async fn new_mempool_with_transactions(n: usize) -> (Mempool, Vec) { let mempool = Mempool::new( Default::default(), ConsensusManager::builder(Network::LocalNet).build(), - Arc::new(MockValidator::new(true)), + Box::new(MockValidator::new(true)), ); let transactions = create_transactions(n); for txn in &transactions { - mempool.insert(Arc::new(txn.clone())).unwrap(); + mempool.insert(Arc::new(txn.clone())).await.unwrap(); } (mempool, transactions) } -fn setup( +async fn setup( num_txns: usize, ) -> ( ProtocolNotificationTx, @@ -87,7 +86,7 @@ fn setup( ) { let (protocol_notif_tx, protocol_notif_rx) = mpsc::channel(1); let (connectivity_events_tx, connectivity_events_rx) = broadcast::channel(10); - let (mempool, transactions) = new_mempool_with_transactions(num_txns); + let (mempool, transactions) = new_mempool_with_transactions(num_txns).await; let protocol = MempoolSyncProtocol::new( Default::default(), protocol_notif_rx, @@ -102,7 +101,7 @@ fn setup( #[tokio::test] async fn empty_set() { - let (_, connectivity_events_tx, mempool1, _) = setup(0); + let (_, connectivity_events_tx, mempool1, _) = setup(0).await; let node1 = build_node_identity(PeerFeatures::COMMUNICATION_NODE); let node2 = build_node_identity(PeerFeatures::COMMUNICATION_NODE); @@ -117,22 +116,22 @@ async fn empty_set() { let substream = node1_mock.next_incoming_substream().await.unwrap(); let framed = framing::canonical(substream, MAX_FRAME_SIZE); - let (mempool2, _) = new_mempool_with_transactions(0); + let (mempool2, _) = new_mempool_with_transactions(0).await; MempoolPeerProtocol::new(Default::default(), framed, node2.node_id().clone(), mempool2.clone()) .start_responder() .await .unwrap(); - let transactions = mempool2.snapshot().unwrap(); + let transactions = mempool2.snapshot().await.unwrap(); assert_eq!(transactions.len(), 0); - let transactions = mempool1.snapshot().unwrap(); + let transactions = mempool1.snapshot().await.unwrap(); assert_eq!(transactions.len(), 0); } #[tokio::test] async fn synchronise() { - let (_, connectivity_events_tx, mempool1, transactions1) = setup(5); + let (_, connectivity_events_tx, mempool1, transactions1) = setup(5).await; let node1 = build_node_identity(PeerFeatures::COMMUNICATION_NODE); let node2 = build_node_identity(PeerFeatures::COMMUNICATION_NODE); @@ -147,18 +146,18 @@ async fn synchronise() { let substream = node1_mock.next_incoming_substream().await.unwrap(); let framed = framing::canonical(substream, MAX_FRAME_SIZE); - let (mempool2, transactions2) = new_mempool_with_transactions(3); + let (mempool2, transactions2) = new_mempool_with_transactions(3).await; MempoolPeerProtocol::new(Default::default(), framed, node2.node_id().clone(), mempool2.clone()) .start_responder() .await .unwrap(); - let transactions = get_snapshot(&mempool2); + let transactions = get_snapshot(&mempool2).await; assert_eq!(transactions.len(), 8); assert!(transactions1.iter().all(|txn| transactions.contains(txn))); assert!(transactions2.iter().all(|txn| transactions.contains(txn))); - let transactions = get_snapshot(&mempool1); + let transactions = get_snapshot(&mempool1).await; assert_eq!(transactions.len(), 8); assert!(transactions1.iter().all(|txn| transactions.contains(txn))); assert!(transactions2.iter().all(|txn| transactions.contains(txn))); @@ -166,7 +165,7 @@ async fn synchronise() { #[tokio::test] async fn duplicate_set() { - let (_, connectivity_events_tx, mempool1, transactions1) = setup(2); + let (_, connectivity_events_tx, mempool1, transactions1) = setup(2).await; let node1 = build_node_identity(PeerFeatures::COMMUNICATION_NODE); let node2 = build_node_identity(PeerFeatures::COMMUNICATION_NODE); @@ -181,19 +180,19 @@ async fn duplicate_set() { let substream = node1_mock.next_incoming_substream().await.unwrap(); let framed = framing::canonical(substream, MAX_FRAME_SIZE); - let (mempool2, transactions2) = new_mempool_with_transactions(1); - mempool2.insert(Arc::new(transactions1[0].clone())).unwrap(); + let (mempool2, transactions2) = new_mempool_with_transactions(1).await; + mempool2.insert(Arc::new(transactions1[0].clone())).await.unwrap(); MempoolPeerProtocol::new(Default::default(), framed, node2.node_id().clone(), mempool2.clone()) .start_responder() .await .unwrap(); - let transactions = get_snapshot(&mempool2); + let transactions = get_snapshot(&mempool2).await; assert_eq!(transactions.len(), 3); assert!(transactions1.iter().all(|txn| transactions.contains(txn))); assert!(transactions2.iter().all(|txn| transactions.contains(txn))); - let transactions = get_snapshot(&mempool1); + let transactions = get_snapshot(&mempool1).await; assert_eq!(transactions.len(), 3); assert!(transactions1.iter().all(|txn| transactions.contains(txn))); assert!(transactions2.iter().all(|txn| transactions.contains(txn))); @@ -201,7 +200,7 @@ async fn duplicate_set() { #[tokio::test] async fn responder() { - let (protocol_notif, _, _, transactions1) = setup(2); + let (protocol_notif, _, _, transactions1) = setup(2).await; let node1 = build_node_identity(PeerFeatures::COMMUNICATION_NODE); let node2 = build_node_identity(PeerFeatures::COMMUNICATION_NODE); @@ -215,17 +214,15 @@ async fn responder() { .await .unwrap(); - let (mempool2, transactions2) = new_mempool_with_transactions(1); - async_mempool::insert(mempool2.clone(), Arc::new(transactions1[0].clone())) - .await - .unwrap(); + let (mempool2, transactions2) = new_mempool_with_transactions(1).await; + mempool2.insert(Arc::new(transactions1[0].clone())).await.unwrap(); let framed = framing::canonical(sock_out, MAX_FRAME_SIZE); MempoolPeerProtocol::new(Default::default(), framed, node2.node_id().clone(), mempool2.clone()) .start_initiator() .await .unwrap(); - let transactions = get_snapshot(&mempool2); + let transactions = get_snapshot(&mempool2).await; assert_eq!(transactions.len(), 3); assert!(transactions1.iter().all(|txn| transactions.contains(txn))); assert!(transactions2.iter().all(|txn| transactions.contains(txn))); @@ -237,7 +234,7 @@ async fn responder() { #[tokio::test] async fn initiator_messages() { - let (protocol_notif, _, _, transactions1) = setup(2); + let (protocol_notif, _, _, transactions1) = setup(2).await; let node1 = build_node_identity(PeerFeatures::COMMUNICATION_NODE); @@ -272,7 +269,7 @@ async fn initiator_messages() { #[tokio::test] async fn responder_messages() { - let (_, connectivity_events_tx, _, transactions1) = setup(1); + let (_, connectivity_events_tx, _, transactions1) = setup(1).await; let node1 = build_node_identity(PeerFeatures::COMMUNICATION_NODE); let node2 = build_node_identity(PeerFeatures::COMMUNICATION_NODE); @@ -321,8 +318,15 @@ async fn responder_messages() { assert!(framed.next().await.is_none()); } -fn get_snapshot(mempool: &Mempool) -> Vec { - mempool.snapshot().unwrap().iter().map(|t| &**t).cloned().collect() +async fn get_snapshot(mempool: &Mempool) -> Vec { + mempool + .snapshot() + .await + .unwrap() + .iter() + .map(|t| &**t) + .cloned() + .collect() } async fn read_message(reader: &mut S) -> T diff --git a/base_layer/core/src/mempool/unconfirmed_pool/error.rs b/base_layer/core/src/mempool/unconfirmed_pool/error.rs index 828c073e32..2f7906173f 100644 --- a/base_layer/core/src/mempool/unconfirmed_pool/error.rs +++ b/base_layer/core/src/mempool/unconfirmed_pool/error.rs @@ -22,14 +22,10 @@ use thiserror::Error; -use crate::mempool::priority::PriorityError; - #[derive(Debug, Error)] pub enum UnconfirmedPoolError { #[error("The HashMap and BTreeMap are out of sync")] StorageOutofSync, - #[error("Priority error: `{0}`")] - PriorityError(#[from] PriorityError), #[error("Transaction has no kernels")] TransactionNoKernels, } diff --git a/base_layer/core/src/mempool/unconfirmed_pool/unconfirmed_pool.rs b/base_layer/core/src/mempool/unconfirmed_pool/unconfirmed_pool.rs index ef2a1f6f38..d32514763d 100644 --- a/base_layer/core/src/mempool/unconfirmed_pool/unconfirmed_pool.rs +++ b/base_layer/core/src/mempool/unconfirmed_pool/unconfirmed_pool.rs @@ -27,7 +27,7 @@ use std::{ use log::*; use serde::{Deserialize, Serialize}; -use tari_common_types::types::{HashOutput, Signature}; +use tari_common_types::types::{HashOutput, PrivateKey, Signature}; use tari_crypto::tari_utilities::{hex::Hex, Hashable}; use crate::{ @@ -42,6 +42,8 @@ use crate::{ pub const LOG_TARGET: &str = "c::mp::unconfirmed_pool::unconfirmed_pool_storage"; +type TransactionKey = usize; + /// Configuration for the UnconfirmedPool #[derive(Clone, Copy, Serialize, Deserialize)] pub struct UnconfirmedPoolConfig { @@ -71,9 +73,11 @@ impl Default for UnconfirmedPoolConfig { /// these containers. pub struct UnconfirmedPool { config: UnconfirmedPoolConfig, - txs_by_signature: HashMap, - txs_by_priority: BTreeMap, - txs_by_output: HashMap>, + key_counter: usize, + tx_by_key: HashMap, + txs_by_signature: HashMap>, + tx_by_priority: BTreeMap, + txs_by_output: HashMap>, } // helper class to reduce type complexity @@ -87,23 +91,14 @@ impl UnconfirmedPool { pub fn new(config: UnconfirmedPoolConfig) -> Self { Self { config, + key_counter: 0, + tx_by_key: HashMap::new(), txs_by_signature: HashMap::new(), - txs_by_priority: BTreeMap::new(), + tx_by_priority: BTreeMap::new(), txs_by_output: HashMap::new(), } } - fn lowest_priority(&self) -> &FeePriority { - self.txs_by_priority.iter().next().unwrap().0 - } - - fn remove_lowest_priority_tx(&mut self) { - if let Some((priority, sig)) = self.txs_by_priority.iter().next().map(|(p, s)| (p.clone(), s.clone())) { - self.txs_by_signature.remove(&sig); - self.txs_by_priority.remove(&priority); - } - } - /// Insert a new transaction into the UnconfirmedPool. Low priority transactions will be removed to make space for /// higher priority transactions. The lowest priority transactions will be removed when the maximum capacity is /// reached and the new transaction has a higher priority than the currently stored lowest priority transaction. @@ -111,50 +106,47 @@ impl UnconfirmedPool { &mut self, tx: Arc, dependent_outputs: Option>, - transaction_weight: &TransactionWeight, + transaction_weighting: &TransactionWeight, ) -> Result<(), UnconfirmedPoolError> { - let tx_key = tx - .first_kernel_excess_sig() - .ok_or(UnconfirmedPoolError::TransactionNoKernels)?; - - if self.txs_by_signature.contains_key(tx_key) { + if tx + .body + .kernels() + .iter() + .all(|k| self.txs_by_signature.contains_key(k.excess_sig.get_signature())) + { return Ok(()); } - let prioritized_tx = PrioritizedTransaction::try_construct(transaction_weight, tx.clone(), dependent_outputs)?; - if self.txs_by_signature.len() >= self.config.storage_capacity { + let new_key = self.get_next_key(); + let prioritized_tx = PrioritizedTransaction::new(new_key, transaction_weighting, tx, dependent_outputs); + if self.tx_by_key.len() >= self.config.storage_capacity { if prioritized_tx.priority < *self.lowest_priority() { return Ok(()); } self.remove_lowest_priority_tx(); } - self.txs_by_priority - .insert(prioritized_tx.priority.clone(), tx_key.clone()); - self.txs_by_signature.insert(tx_key.clone(), prioritized_tx); - for output in tx.body.outputs().clone() { - self.txs_by_output - .entry(output.hash()) - .or_default() - .push(tx_key.clone()); + + self.tx_by_priority.insert(prioritized_tx.priority.clone(), new_key); + for output in prioritized_tx.transaction.body.outputs() { + self.txs_by_output.entry(output.hash()).or_default().push(new_key); + } + for kernel in prioritized_tx.transaction.body.kernels() { + let sig = kernel.excess_sig.get_signature(); + self.txs_by_signature.entry(sig.clone()).or_default().push(new_key); } + debug!( target: LOG_TARGET, - "Inserted transaction with signature {} into unconfirmed pool:", - tx_key.get_signature().to_hex() + "Inserted transaction {} into unconfirmed pool:", prioritized_tx ); + self.tx_by_key.insert(new_key, prioritized_tx); - trace!(target: LOG_TARGET, "insert: {}", tx); Ok(()) } /// TThis will search the unconfirmed pool for the set of outputs and return true if all of them are found - pub fn verify_outputs_exist(&mut self, outputs: &[HashOutput]) -> bool { - for hash in outputs { - if !self.txs_by_output.contains_key(hash) { - return false; - } - } - true + pub fn contains_all_outputs(&mut self, outputs: &[HashOutput]) -> bool { + outputs.iter().all(|hash| self.txs_by_output.contains_key(hash)) } /// Insert a set of new transactions into the UnconfirmedPool @@ -162,52 +154,51 @@ impl UnconfirmedPool { pub fn insert_many>>( &mut self, txs: I, - transaction_weight: &TransactionWeight, + transaction_weighting: &TransactionWeight, ) -> Result<(), UnconfirmedPoolError> { for tx in txs.into_iter() { - self.insert(tx, None, transaction_weight)?; + self.insert(tx, None, transaction_weighting)?; } Ok(()) } /// Check if a transaction is available in the UnconfirmedPool pub fn has_tx_with_excess_sig(&self, excess_sig: &Signature) -> bool { - self.txs_by_signature.contains_key(excess_sig) + self.txs_by_signature.contains_key(excess_sig.get_signature()) } /// Returns a set of the highest priority unconfirmed transactions, that can be included in a block pub fn highest_priority_txs(&mut self, total_weight: u64) -> Result { let mut selected_txs = HashMap::new(); - let mut curr_weight: u64 = 0; - let mut curr_skip_count: usize = 0; + let mut curr_weight = 0; + let mut curr_skip_count = 0; let mut transactions_to_remove_and_recheck = Vec::new(); - for (_, tx_key) in self.txs_by_priority.iter().rev() { + let mut potential_transactions_to_remove_and_recheck = Vec::new(); + for (_, tx_key) in self.tx_by_priority.iter().rev() { if selected_txs.contains_key(tx_key) { continue; } + let prioritized_transaction = self - .txs_by_signature + .tx_by_key .get(tx_key) .ok_or(UnconfirmedPoolError::StorageOutofSync)?; let mut total_transaction_weight = 0; - let mut potential_transactions_to_insert = HashMap::new(); - let mut potential_transactions_to_remove_and_recheck = Vec::new(); - self.get_all_dependant_transactions( + let mut candidate_transactions_to_select = HashMap::new(); + self.get_all_dependent_transactions( prioritized_transaction, - &mut potential_transactions_to_insert, + &mut candidate_transactions_to_select, &mut potential_transactions_to_remove_and_recheck, &selected_txs, &mut total_transaction_weight, )?; - if curr_weight + total_transaction_weight <= total_weight && - potential_transactions_to_remove_and_recheck.is_empty() + let total_weight_after_candidates = curr_weight + total_transaction_weight; + if total_weight_after_candidates <= total_weight && potential_transactions_to_remove_and_recheck.is_empty() { - if !UnconfirmedPool::find_duplicate_input(&selected_txs, &potential_transactions_to_insert) { + if !UnconfirmedPool::find_duplicate_input(&selected_txs, &candidate_transactions_to_select) { curr_weight += total_transaction_weight; - for (key, transaction) in potential_transactions_to_insert { - selected_txs.insert((key).clone(), transaction.transaction.clone()); - } + selected_txs.extend(candidate_transactions_to_select); } } else { transactions_to_remove_and_recheck.append(&mut potential_transactions_to_remove_and_recheck); @@ -219,50 +210,48 @@ impl UnconfirmedPool { } } // we need to remove all transactions that need to be rechecked. - for transaction in &transactions_to_remove_and_recheck { - let key = transaction - .first_kernel_excess_sig() - .ok_or(UnconfirmedPoolError::TransactionNoKernels)?; - debug!( - target: LOG_TARGET, - "Removing transaction with key {} from unconfirmed pool because it needs re-evaluation", - key.get_signature().to_hex() - ); - self.delete_transaction(key); + debug!( + target: LOG_TARGET, + "Removing {} transaction(s) from unconfirmed pool because they need re-evaluation", + transactions_to_remove_and_recheck.len() + ); + for (tx_key, _) in &transactions_to_remove_and_recheck { + self.remove_transaction(*tx_key); } + let results = RetrieveResults { retrieved_transactions: selected_txs.into_values().collect(), - transactions_to_insert: transactions_to_remove_and_recheck, + transactions_to_insert: transactions_to_remove_and_recheck + .into_iter() + .map(|(_, tx)| tx) + .collect(), }; Ok(results) } - fn get_all_dependant_transactions( + fn get_all_dependent_transactions( &self, transaction: &PrioritizedTransaction, - required_transactions: &mut HashMap, - transactions_to_delete: &mut Vec>, - already_selected_txs: &HashMap>, + required_transactions: &mut HashMap>, + transactions_to_delete: &mut Vec<(TransactionKey, Arc)>, + selected_txs: &HashMap>, total_weight: &mut u64, ) -> Result<(), UnconfirmedPoolError> { - for dependant_output in &transaction.depended_output_hashes { - match self.txs_by_output.get(dependant_output) { + for dependent_output in &transaction.dependent_output_hashes { + match self.txs_by_output.get(dependent_output) { Some(signatures) => { - let highest_signature = self.find_highest_priority_transaction(signatures)?; - if !already_selected_txs.contains_key(&highest_signature) { - let dependant_transaction = self - .txs_by_signature - .get(&highest_signature) - .ok_or(UnconfirmedPoolError::StorageOutofSync)?; - self.get_all_dependant_transactions( - dependant_transaction, + let dependent_transaction = self.find_highest_priority_transaction(signatures)?; + if !selected_txs.contains_key(&dependent_transaction.key) { + self.get_all_dependent_transactions( + dependent_transaction, required_transactions, transactions_to_delete, - already_selected_txs, + selected_txs, total_weight, )?; + if !transactions_to_delete.is_empty() { - transactions_to_delete.push(transaction.transaction.clone()); + transactions_to_delete.push((transaction.key, transaction.transaction.clone())); break; } } @@ -271,51 +260,50 @@ impl UnconfirmedPool { // this transactions requires an output, that the mempool does not currently have, but did have at // some point. This means that we need to remove this transaction and re // validate it - transactions_to_delete.push(transaction.transaction.clone()); + transactions_to_delete.push((transaction.key, transaction.transaction.clone())); break; }, } } - let key = transaction - .transaction - .first_kernel_excess_sig() - .ok_or(UnconfirmedPoolError::TransactionNoKernels)?; if required_transactions - .insert(key.clone(), (*transaction).clone()) + .insert(transaction.key, transaction.transaction.clone()) .is_none() { *total_weight += transaction.weight; - }; + } Ok(()) } - fn find_highest_priority_transaction(&self, signatures: &[Signature]) -> Result { - let mut highest_signature = signatures[0].clone(); - for signature in signatures.iter().skip(1) { - let transaction = self - .txs_by_signature - .get(signature) - .ok_or(UnconfirmedPoolError::StorageOutofSync)?; - let current_transaction = self - .txs_by_signature - .get(&highest_signature) - .ok_or(UnconfirmedPoolError::StorageOutofSync)?; - if transaction.priority > current_transaction.priority { - highest_signature = signature.clone(); + fn find_highest_priority_transaction( + &self, + keys: &[TransactionKey], + ) -> Result<&PrioritizedTransaction, UnconfirmedPoolError> { + if keys.is_empty() { + return Err(UnconfirmedPoolError::StorageOutofSync); + } + + let mut highest_transaction = self + .tx_by_key + .get(&keys[0]) + .ok_or(UnconfirmedPoolError::StorageOutofSync)?; + for key in keys.iter().skip(1) { + let transaction = self.tx_by_key.get(key).ok_or(UnconfirmedPoolError::StorageOutofSync)?; + if transaction.priority > highest_transaction.priority { + highest_transaction = transaction; } } - Ok(highest_signature) + Ok(highest_transaction) } // This will search a Vec> for duplicate inputs of a tx fn find_duplicate_input( - current_transactions: &HashMap>, - transactions_to_insert: &HashMap, + current_transactions: &HashMap>, + transactions_to_insert: &HashMap>, ) -> bool { - for (_, transaction_to_insert) in transactions_to_insert.iter() { + for (_, tx_to_insert) in transactions_to_insert.iter() { for (_, transaction) in current_transactions.iter() { for input in transaction.body.inputs() { - for tx_input in transaction_to_insert.transaction.body.inputs() { + for tx_input in tx_to_insert.body.inputs() { if tx_input.output_hash() == input.output_hash() { return true; } @@ -326,17 +314,25 @@ impl UnconfirmedPool { false } + fn lowest_priority(&self) -> &FeePriority { + self.tx_by_priority + .keys() + .next() + .expect("lowest_priority called on empty mempool") + } + + fn remove_lowest_priority_tx(&mut self) { + if let Some(tx_key) = self.tx_by_priority.values().next().copied() { + self.remove_transaction(tx_key); + } + } + /// Remove all current mempool transactions from the UnconfirmedPoolStorage, returning that which have been removed pub fn drain_all_mempool_transactions(&mut self) -> Vec> { - let mempool_txs: Vec> = self - .txs_by_signature - .drain() - .map(|(_key, val)| val.transaction) - .collect(); - self.txs_by_priority.clear(); + self.txs_by_signature.clear(); + self.tx_by_priority.clear(); self.txs_by_output.clear(); - - mempool_txs + self.tx_by_key.drain().map(|(_, val)| val.transaction).collect() } /// Remove all published transactions from the UnconfirmedPoolStorage and discard deprecated transactions @@ -350,41 +346,54 @@ impl UnconfirmedPool { published_block.header.height, published_block.header.hash().to_hex(), ); - // We need to make sure that none of the transactions in the block remains in the mempool - let mut transactions_to_remove = Vec::new(); - published_block.body.kernels().iter().for_each(|kernel| { - transactions_to_remove.push(kernel.excess_sig.clone()); - }); - let mut removed_transactions = self.delete_transactions(&transactions_to_remove); - // Remove all other deprecated transactions that cannot be valid anymore - removed_transactions.append(&mut self.remove_deprecated_transactions(published_block)); - removed_transactions - } + // Remove all transactions that contain the kernels found in this block + let mut to_remove = published_block + .body + .kernels() + .iter() + .map(|kernel| kernel.excess_sig.get_signature()) + .filter_map(|sig| self.txs_by_signature.get(sig)) + .flatten() + .copied() + .collect::>(); - // Remove all deprecated transactions from the UnconfirmedPool by scanning inputs and outputs. - fn remove_deprecated_transactions(&mut self, published_block: &Block) -> Vec> { - let mut transaction_keys_to_remove = Vec::new(); - for (tx_key, ptx) in self.txs_by_signature.iter() { - if UnconfirmedPool::find_matching_block_input(ptx, published_block) { - transaction_keys_to_remove.push(tx_key.clone()) - } - } - published_block.body.outputs().iter().for_each(|output| { - if let Some(signatures) = self.txs_by_output.get(&output.hash()) { - for signature in signatures { - transaction_keys_to_remove.push(signature.clone()) - } - } - }); - debug!( - target: LOG_TARGET, - "Removing transactions containing duplicated commitments from unconfirmed pool" + let mut removed_transactions = to_remove + .iter() + .filter_map(|key| self.remove_transaction(*key)) + .collect::>(); + + // Reuse the buffer, clear is very cheap + to_remove.clear(); + + // Remove all transactions that contain the inputs found in this block + to_remove.extend( + self.tx_by_key + .iter() + .filter(|(_, tx)| UnconfirmedPool::find_matching_block_input(tx, published_block)) + .map(|(key, _)| *key), + ); + + removed_transactions.extend(to_remove.iter().filter_map(|key| self.remove_transaction(*key))); + to_remove.clear(); + + // Remove all transactions that contain the outputs found in this block + to_remove.extend( + published_block + .body + .outputs() + .iter() + .filter_map(|output| self.txs_by_output.get(&output.hash())) + .flatten() + .copied(), ); - self.delete_transactions(&transaction_keys_to_remove) + + removed_transactions.extend(to_remove.iter().filter_map(|key| self.remove_transaction(*key))); + + removed_transactions } - // This is a helper function that searches a block and transaction for matching inputs + /// Searches a block and transaction for matching inputs fn find_matching_block_input(transaction: &PrioritizedTransaction, published_block: &Block) -> bool { for input in transaction.transaction.body.inputs() { for published_input in published_block.body.inputs() { @@ -396,55 +405,55 @@ impl UnconfirmedPool { false } - fn delete_transactions(&mut self, signature: &[Signature]) -> Vec> { - let mut removed_txs: Vec> = Vec::new(); - for tx_key in signature { - debug!( - target: LOG_TARGET, - "Removing transaction with key {} from unconfirmed pool", - tx_key.get_signature().to_hex() - ); - if let Some(transaction) = self.delete_transaction(tx_key) { - removed_txs.push(transaction); + /// Ensures that all transactions are safely deleted in order and from all storage + fn remove_transaction(&mut self, tx_key: TransactionKey) -> Option> { + let prioritized_transaction = self.tx_by_key.remove(&tx_key)?; + + self.tx_by_priority.remove(&prioritized_transaction.priority); + + for kernel in prioritized_transaction.transaction.body.kernels() { + let sig = kernel.excess_sig.get_signature(); + if let Some(keys) = self.txs_by_signature.get_mut(sig) { + let pos = keys.iter().position(|k| *k == tx_key).expect("mempool out of sync"); + keys.remove(pos); + if keys.is_empty() { + self.txs_by_signature.remove(sig); + } } } - removed_txs - } - // Helper function to ensure that all transactions are safely deleted in order and from all storage - fn delete_transaction(&mut self, signature: &Signature) -> Option> { - if let Some(prioritized_transaction) = self.txs_by_signature.remove(signature) { - self.txs_by_priority.remove(&prioritized_transaction.priority); - for output in prioritized_transaction.transaction.as_ref().body.outputs() { - let key = output.hash(); - if let Some(signatures) = self.txs_by_output.get_mut(&key) { - signatures.retain(|x| x != signature); - if signatures.is_empty() { - self.txs_by_output.remove(&key); - } + for output in prioritized_transaction.transaction.body.outputs() { + let output_hash = output.hash(); + if let Some(keys) = self.txs_by_output.get_mut(&output_hash) { + if let Some(pos) = keys.iter().position(|k| *k == tx_key) { + keys.remove(pos); + } + if keys.is_empty() { + self.txs_by_output.remove(&output_hash); } } - trace!( - target: LOG_TARGET, - "Deleted transaction: {}", - &prioritized_transaction.transaction - ); - return Some(prioritized_transaction.transaction); } - None + trace!( + target: LOG_TARGET, + "Deleted transaction: {}", + &prioritized_transaction.transaction + ); + Some(prioritized_transaction.transaction) } /// Remove all unconfirmed transactions that have become time locked. This can happen when the chain height was /// reduced on some reorgs. - pub fn remove_timelocked(&mut self, tip_height: u64) -> Vec> { - let mut removed_tx_keys: Vec = Vec::new(); - for (tx_key, ptx) in self.txs_by_signature.iter() { - if ptx.transaction.min_spendable_height() > tip_height + 1 { - removed_tx_keys.push(tx_key.clone()); - } - } + pub fn remove_timelocked(&mut self, tip_height: u64) { debug!(target: LOG_TARGET, "Removing time-locked inputs from unconfirmed pool"); - self.delete_transactions(&removed_tx_keys) + let to_remove = self + .tx_by_key + .iter() + .filter(|(_, ptx)| ptx.transaction.min_spendable_height() > tip_height + 1) + .map(|(k, _)| *k) + .collect::>(); + for tx_key in to_remove { + self.remove_transaction(tx_key); + } } /// Returns the total number of unconfirmed transactions stored in the UnconfirmedPool. @@ -454,28 +463,35 @@ impl UnconfirmedPool { /// Returns all transaction stored in the UnconfirmedPool. pub fn snapshot(&self) -> Vec> { - self.txs_by_signature - .iter() - .map(|(_, ptx)| ptx.transaction.clone()) - .collect() + self.tx_by_key.iter().map(|(_, ptx)| ptx.transaction.clone()).collect() } /// Returns the total weight of all transactions stored in the pool. pub fn calculate_weight(&self, transaction_weight: &TransactionWeight) -> u64 { - self.txs_by_signature.iter().fold(0, |weight, (_, ptx)| { + self.tx_by_key.values().fold(0, |weight, ptx| { weight + ptx.transaction.calculate_weight(transaction_weight) }) } - #[cfg(test)] /// Returns false if there are any inconsistencies in the internal mempool state, otherwise true - fn check_status(&self) -> bool { - if self.txs_by_priority.len() != self.txs_by_signature.len() { - return false; - } - self.txs_by_priority - .iter() - .all(|(_, tx_key)| self.txs_by_signature.contains_key(tx_key)) + #[cfg(test)] + fn check_data_contistency(&self) -> bool { + self.tx_by_priority.len() == self.tx_by_key.len() && + self.tx_by_priority + .values() + .all(|tx_key| self.tx_by_key.contains_key(tx_key)) && + self.txs_by_signature + .values() + .all(|tx_keys| tx_keys.iter().all(|tx_key| self.tx_by_key.contains_key(tx_key))) && + self.txs_by_output + .values() + .all(|tx_keys| tx_keys.iter().all(|tx_key| self.tx_by_key.contains_key(tx_key))) + } + + fn get_next_key(&mut self) -> usize { + let key = self.key_counter; + self.key_counter = (self.key_counter + 1) % usize::MAX; + key } } @@ -504,19 +520,12 @@ mod test { fn test_find_duplicate_input() { let tx1 = Arc::new(tx!(MicroTari(5000), fee: MicroTari(50), inputs: 2, outputs: 1).0); let tx2 = Arc::new(tx!(MicroTari(5000), fee: MicroTari(50), inputs: 2, outputs: 1).0); - let tx_weight = TransactionWeight::latest(); let mut tx_pool = HashMap::new(); let mut tx1_pool = HashMap::new(); let mut tx2_pool = HashMap::new(); - tx_pool.insert(tx1.first_kernel_excess_sig().unwrap().clone(), tx1.clone()); - tx1_pool.insert( - tx1.first_kernel_excess_sig().unwrap().clone(), - PrioritizedTransaction::try_construct(&tx_weight, tx1.clone(), None).unwrap(), - ); - tx2_pool.insert( - tx2.first_kernel_excess_sig().unwrap().clone(), - PrioritizedTransaction::try_construct(&tx_weight, tx2.clone(), None).unwrap(), - ); + tx_pool.insert(0usize, tx1.clone()); + tx1_pool.insert(1usize, tx1); + tx2_pool.insert(2usize, tx2); assert!( UnconfirmedPool::find_duplicate_input(&tx_pool, &tx1_pool), "Duplicate was not found" @@ -548,11 +557,11 @@ mod test { ) .unwrap(); // Check that lowest priority tx was removed to make room for new incoming transactions - assert!(unconfirmed_pool.has_tx_with_excess_sig(&tx1.body.kernels()[0].excess_sig),); - assert!(!unconfirmed_pool.has_tx_with_excess_sig(&tx2.body.kernels()[0].excess_sig),); - assert!(unconfirmed_pool.has_tx_with_excess_sig(&tx3.body.kernels()[0].excess_sig),); - assert!(unconfirmed_pool.has_tx_with_excess_sig(&tx4.body.kernels()[0].excess_sig),); - assert!(unconfirmed_pool.has_tx_with_excess_sig(&tx5.body.kernels()[0].excess_sig),); + assert!(unconfirmed_pool.has_tx_with_excess_sig(&tx1.body.kernels()[0].excess_sig)); + assert!(!unconfirmed_pool.has_tx_with_excess_sig(&tx2.body.kernels()[0].excess_sig)); + assert!(unconfirmed_pool.has_tx_with_excess_sig(&tx3.body.kernels()[0].excess_sig)); + assert!(unconfirmed_pool.has_tx_with_excess_sig(&tx4.body.kernels()[0].excess_sig)); + assert!(unconfirmed_pool.has_tx_with_excess_sig(&tx5.body.kernels()[0].excess_sig)); // Retrieve the set of highest priority unspent transactions let desired_weight = tx1.calculate_weight(&tx_weight) + tx3.calculate_weight(&tx_weight) + tx5.calculate_weight(&tx_weight); @@ -564,21 +573,21 @@ mod test { // Note that transaction tx5 could not be included as its weight was to big to fit into the remaining allocated // space, the second best transaction was then included - assert!(unconfirmed_pool.check_status()); + assert!(unconfirmed_pool.check_data_contistency()); } #[test] fn test_double_spend_inputs() { - let (tx1, _, _) = tx!(MicroTari(5_000), fee: MicroTari(50), inputs: 1, outputs: 1); + let (tx1, _, _) = tx!(MicroTari(5_000), fee: MicroTari(10), inputs: 1, outputs: 1); const INPUT_AMOUNT: MicroTari = MicroTari(5_000); - let (tx2, inputs, _) = tx!(INPUT_AMOUNT, fee: MicroTari(20), inputs: 1, outputs: 1); + let (tx2, inputs, _) = tx!(INPUT_AMOUNT, fee: MicroTari(5), inputs: 1, outputs: 1); let test_params = TestParams::new(); let mut stx_builder = SenderTransactionProtocol::builder(0, create_consensus_constants(0)); stx_builder .with_lock_height(0) - .with_fee_per_gram(20.into()) + .with_fee_per_gram(5.into()) .with_offset(Default::default()) .with_private_nonce(test_params.nonce.clone()) .with_change_secret(test_params.change_spend_key.clone()); @@ -587,7 +596,13 @@ mod test { let double_spend_utxo = tx2.body.inputs().first().unwrap().clone(); let double_spend_input = inputs.first().unwrap().clone(); - let estimated_fee = Fee::new(TransactionWeight::latest()).calculate(20.into(), 1, 1, 1, 0); + let estimated_fee = Fee::new(TransactionWeight::latest()).calculate( + 5.into(), + 1, + 1, + 1, + test_params.get_size_for_default_metadata(1), + ); let utxo = test_params.create_unblinded_output(UtxoTestParams { value: INPUT_AMOUNT - estimated_fee, @@ -677,7 +692,7 @@ mod test { assert!(!unconfirmed_pool.has_tx_with_excess_sig(&tx5.body.kernels()[0].excess_sig),); assert!(!unconfirmed_pool.has_tx_with_excess_sig(&tx6.body.kernels()[0].excess_sig),); - assert!(unconfirmed_pool.check_status()); + assert!(unconfirmed_pool.check_data_contistency()); } #[test] @@ -719,14 +734,14 @@ mod test { let _ = unconfirmed_pool.remove_published_and_discard_deprecated_transactions(&published_block); // Double spends are discarded - assert!(!unconfirmed_pool.has_tx_with_excess_sig(&tx1.body.kernels()[0].excess_sig),); - assert!(!unconfirmed_pool.has_tx_with_excess_sig(&tx2.body.kernels()[0].excess_sig),); - assert!(!unconfirmed_pool.has_tx_with_excess_sig(&tx3.body.kernels()[0].excess_sig),); - assert!(unconfirmed_pool.has_tx_with_excess_sig(&tx4.body.kernels()[0].excess_sig),); - assert!(!unconfirmed_pool.has_tx_with_excess_sig(&tx5.body.kernels()[0].excess_sig),); - assert!(!unconfirmed_pool.has_tx_with_excess_sig(&tx6.body.kernels()[0].excess_sig),); + assert!(!unconfirmed_pool.has_tx_with_excess_sig(&tx1.body.kernels()[0].excess_sig)); + assert!(!unconfirmed_pool.has_tx_with_excess_sig(&tx2.body.kernels()[0].excess_sig)); + assert!(!unconfirmed_pool.has_tx_with_excess_sig(&tx3.body.kernels()[0].excess_sig)); + assert!(unconfirmed_pool.has_tx_with_excess_sig(&tx4.body.kernels()[0].excess_sig)); + assert!(!unconfirmed_pool.has_tx_with_excess_sig(&tx5.body.kernels()[0].excess_sig)); + assert!(!unconfirmed_pool.has_tx_with_excess_sig(&tx6.body.kernels()[0].excess_sig)); - assert!(unconfirmed_pool.check_status()); + assert!(unconfirmed_pool.check_data_contistency()); } #[test] @@ -760,14 +775,15 @@ mod test { for txn in txns { for output in txn.as_ref().body.outputs() { - assert!(unconfirmed_pool.verify_outputs_exist(&[output.hash()])); - let signatures_by_output = unconfirmed_pool.txs_by_output.get(&output.hash()).unwrap(); + assert!(unconfirmed_pool.contains_all_outputs(&[output.hash()])); + let keys_by_output = unconfirmed_pool.txs_by_output.get(&output.hash()).unwrap(); // Each output must be referenced by two transactions - assert_eq!(signatures_by_output.len(), 2); - // Verify kernel signature present at least once + assert_eq!(keys_by_output.len(), 2); + // Verify kernel signature present exactly once let mut found = 0u8; - for signature in signatures_by_output { - if signature == &txn.as_ref().body.kernels()[0].excess_sig { + for key in keys_by_output { + let found_tx = &unconfirmed_pool.tx_by_key.get(key).unwrap().transaction; + if *found_tx == txn { found += 1; } } @@ -776,8 +792,20 @@ mod test { } // Remove some transactions - unconfirmed_pool.delete_transaction(&tx1.body.kernels()[0].excess_sig); - unconfirmed_pool.delete_transaction(&tx4.body.kernels()[0].excess_sig); + let k = *unconfirmed_pool + .txs_by_signature + .get(tx1.first_kernel_excess_sig().unwrap().get_signature()) + .unwrap() + .first() + .unwrap(); + unconfirmed_pool.remove_transaction(k); + let k = *unconfirmed_pool + .txs_by_signature + .get(tx4.first_kernel_excess_sig().unwrap().get_signature()) + .unwrap() + .first() + .unwrap(); + unconfirmed_pool.remove_transaction(k); let txns = vec![ Arc::new(tx2), @@ -786,13 +814,16 @@ mod test { ]; for txn in txns { for output in txn.as_ref().body.outputs() { - let signatures_by_output = unconfirmed_pool.txs_by_output.get(&output.hash()).unwrap(); + let keys_by_output = unconfirmed_pool.txs_by_output.get(&output.hash()).unwrap(); // Each output must be referenced by one transactions - assert_eq!(signatures_by_output.len(), 1); + assert_eq!(keys_by_output.len(), 1); // Verify kernel signature present exactly once - for signature in signatures_by_output { - assert_eq!(signature, &txn.as_ref().body.kernels()[0].excess_sig); - } + let key = keys_by_output.first().unwrap(); + let found_tx = &unconfirmed_pool.tx_by_key.get(key).unwrap().transaction; + assert_eq!( + found_tx.first_kernel_excess_sig().unwrap(), + txn.first_kernel_excess_sig().unwrap() + ); } } } diff --git a/base_layer/core/src/proto/transaction.rs b/base_layer/core/src/proto/transaction.rs index 1e87c90c63..e0c44ebb66 100644 --- a/base_layer/core/src/proto/transaction.rs +++ b/base_layer/core/src/proto/transaction.rs @@ -22,7 +22,10 @@ //! Impls for transaction proto -use std::convert::{TryFrom, TryInto}; +use std::{ + convert::{TryFrom, TryInto}, + sync::Arc, +}; use tari_common_types::types::{BlindingFactor, BulletRangeProof, Commitment, PublicKey}; use tari_crypto::{ @@ -459,3 +462,18 @@ impl TryFrom for proto::types::Transaction { }) } } + +impl TryFrom> for proto::types::Transaction { + type Error = String; + + fn try_from(tx: Arc) -> Result { + match Arc::try_unwrap(tx) { + Ok(tx) => Ok(tx.try_into()?), + Err(tx) => Ok(Self { + offset: Some(tx.offset.clone().into()), + body: Some(tx.body.clone().try_into()?), + script_offset: Some(tx.script_offset.clone().into()), + }), + } + } +} diff --git a/base_layer/core/src/transactions/aggregated_body.rs b/base_layer/core/src/transactions/aggregated_body.rs index ba1d31782f..6adfda9d52 100644 --- a/base_layer/core/src/transactions/aggregated_body.rs +++ b/base_layer/core/src/transactions/aggregated_body.rs @@ -44,23 +44,20 @@ use tari_crypto::{ tari_utilities::hex::Hex, }; -use crate::{ - consensus::ConsensusEncodingSized, - transactions::{ - crypto_factories::CryptoFactories, - tari_amount::MicroTari, - transaction::{ - KernelFeatures, - KernelSum, - OutputFlags, - Transaction, - TransactionError, - TransactionInput, - TransactionKernel, - TransactionOutput, - }, - weight::TransactionWeight, +use crate::transactions::{ + crypto_factories::CryptoFactories, + tari_amount::MicroTari, + transaction::{ + KernelFeatures, + KernelSum, + OutputFlags, + Transaction, + TransactionError, + TransactionInput, + TransactionKernel, + TransactionOutput, }, + weight::TransactionWeight, }; pub const LOG_TARGET: &str = "c::tx::aggregated_body"; @@ -480,20 +477,11 @@ impl AggregateBody { /// Returns the weight in grams of a body pub fn calculate_weight(&self, transaction_weight: &TransactionWeight) -> u64 { - let metadata_byte_size = self.sum_metadata_size(); - transaction_weight.calculate( - self.kernels().len(), - self.inputs().len(), - self.outputs().len(), - metadata_byte_size, - ) + transaction_weight.calculate_body(self) } pub fn sum_metadata_size(&self) -> usize { - self.outputs - .iter() - .map(|o| o.features.consensus_encode_exact_size() + o.script.consensus_encode_exact_size()) - .sum() + self.outputs.iter().map(|o| o.get_metadata_size()).sum() } pub fn is_sorted(&self) -> bool { diff --git a/base_layer/core/src/transactions/fee.rs b/base_layer/core/src/transactions/fee.rs index 295dfc4055..2028ea4dc1 100644 --- a/base_layer/core/src/transactions/fee.rs +++ b/base_layer/core/src/transactions/fee.rs @@ -23,6 +23,7 @@ use std::cmp::max; use super::{tari_amount::MicroTari, weight::TransactionWeight}; +use crate::transactions::aggregated_body::AggregateBody; #[derive(Debug, Clone, Copy)] pub struct Fee(TransactionWeight); @@ -35,7 +36,7 @@ impl Fee { } /// Computes the absolute transaction fee given the fee-per-gram, and the size of the transaction - /// NB: Each fee calculation should be done independently. No commutative, associative or distributive properties + /// NB: Each fee calculation should be done per transaction. No commutative, associative or distributive properties /// are guaranteed to hold between calculations. for e.g. fee(1,1,1,4) + fee(1,1,1,12) != fee(1,1,1,16) pub fn calculate( &self, @@ -43,11 +44,16 @@ impl Fee { num_kernels: usize, num_inputs: usize, num_outputs: usize, - total_metadata_byte_size: usize, + rounded_metadata_byte_size: usize, ) -> MicroTari { let weight = self - .0 - .calculate(num_kernels, num_inputs, num_outputs, total_metadata_byte_size); + .weighting() + .calculate(num_kernels, num_inputs, num_outputs, rounded_metadata_byte_size); + MicroTari::from(weight) * fee_per_gram + } + + pub fn calculate_body(&self, fee_per_gram: MicroTari, body: &AggregateBody) -> MicroTari { + let weight = self.weighting().calculate_body(body); MicroTari::from(weight) * fee_per_gram } @@ -55,6 +61,10 @@ impl Fee { pub fn normalize(fee: MicroTari) -> MicroTari { max(Self::MINIMUM_TRANSACTION_FEE, fee) } + + pub fn weighting(&self) -> &TransactionWeight { + &self.0 + } } impl From for Fee { diff --git a/base_layer/core/src/transactions/test_helpers.rs b/base_layer/core/src/transactions/test_helpers.rs index d80c8b472a..cf4304ec4e 100644 --- a/base_layer/core/src/transactions/test_helpers.rs +++ b/base_layer/core/src/transactions/test_helpers.rs @@ -176,6 +176,12 @@ impl TestParams { let input = unblinded.as_transaction_input(&self.commitment_factory).unwrap(); (input, unblinded) } + + pub fn get_size_for_default_metadata(&self, num_outputs: usize) -> usize { + self.fee().weighting().round_up_metadata_size( + script![Nop].consensus_encode_exact_size() + OutputFeatures::default().consensus_encode_exact_size(), + ) * num_outputs + } } impl Default for TestParams { @@ -344,7 +350,9 @@ pub struct TransactionSchema { } fn default_metadata_byte_size() -> usize { - OutputFeatures::default().consensus_encode_exact_size() + script![Nop].consensus_encode_exact_size() + TransactionWeight::latest().round_up_metadata_size( + OutputFeatures::default().consensus_encode_exact_size() + script![Nop].consensus_encode_exact_size(), + ) } /// Create an unconfirmed transaction for testing with a valid fee, unique access_sig, random inputs and outputs, the @@ -531,11 +539,11 @@ pub fn spend_utxos(schema: TransactionSchema) -> (Transaction, Vec (Transaction, Vec usize { + self.features.consensus_encode_exact_size() + self.script.consensus_encode_exact_size() + } } /// Implement the canonical hashing function for TransactionOutput for use in ordering. diff --git a/base_layer/core/src/transactions/transaction_protocol/sender.rs b/base_layer/core/src/transactions/transaction_protocol/sender.rs index 9810550736..33ae7b2435 100644 --- a/base_layer/core/src/transactions/transaction_protocol/sender.rs +++ b/base_layer/core/src/transactions/transaction_protocol/sender.rs @@ -571,20 +571,24 @@ impl SenderTransactionProtocol { let result = self .validate() .and_then(|_| Self::build_transaction(info, features, factories)); - if let Err(e) = result { - self.state = SenderState::Failed(e.clone()); - return Err(e); - } - let transaction = result.unwrap(); - let result = transaction - .validate_internal_consistency(true, factories, None, prev_header, height) - .map_err(TPE::TransactionBuildError); - if let Err(e) = result { - self.state = SenderState::Failed(e.clone()); - return Err(e); + match result { + Ok(mut transaction) => { + transaction.body.sort(); + let result = transaction + .validate_internal_consistency(true, factories, None, prev_header, height) + .map_err(TPE::TransactionBuildError); + if let Err(e) = result { + self.state = SenderState::Failed(e.clone()); + return Err(e); + } + self.state = SenderState::FinalizedTransaction(transaction); + Ok(()) + }, + Err(e) => { + self.state = SenderState::Failed(e.clone()); + Err(e) + }, } - self.state = SenderState::FinalizedTransaction(transaction); - Ok(()) }, _ => Err(TPE::InvalidStateError), } @@ -932,7 +936,9 @@ mod test { let (utxo, input) = create_test_input(MicroTari(25000), 0, &factories.commitment); let mut builder = SenderTransactionProtocol::builder(1, create_consensus_constants(0)); let script = script!(Nop); - let fee = builder.fee().calculate(MicroTari(20), 1, 1, 2, 0); + let expected_fee = builder + .fee() + .calculate(MicroTari(20), 1, 1, 2, a.get_size_for_default_metadata(2)); let features = OutputFeatures::default(); builder .with_lock_height(0) @@ -992,7 +998,7 @@ mod test { assert!(alice.is_finalized()); let tx = alice.get_transaction().unwrap(); assert_eq!(tx.offset, a.offset); - assert_eq!(tx.body.kernels()[0].fee, fee); + assert_eq!(tx.body.kernels()[0].fee, expected_fee); assert_eq!(tx.body.inputs().len(), 1); assert_eq!(tx.body.inputs()[0], utxo); assert_eq!(tx.body.outputs().len(), 2); diff --git a/base_layer/core/src/transactions/transaction_protocol/transaction_initializer.rs b/base_layer/core/src/transactions/transaction_protocol/transaction_initializer.rs index d5d2ee36c6..87ae83f90a 100644 --- a/base_layer/core/src/transactions/transaction_protocol/transaction_initializer.rs +++ b/base_layer/core/src/transactions/transaction_protocol/transaction_initializer.rs @@ -276,25 +276,34 @@ impl SenderTransactionInitializer { fn get_total_metadata_size_for_outputs(&self) -> usize { let mut size = 0; - size += self.get_output_features().consensus_encode_exact_size() * self.num_recipients; size += self .sender_custom_outputs .iter() - .map(|o| o.features.consensus_encode_exact_size() + o.script.consensus_encode_exact_size()) + .map(|o| { + self.fee.weighting().round_up_metadata_size( + o.features.consensus_encode_exact_size() + o.script.consensus_encode_exact_size(), + ) + }) .sum::(); + // TODO: implement iter for FixedSet to avoid the clone size += self .recipient_scripts .clone() .into_vec() .iter() - .map(|script| script.consensus_encode_exact_size()) + .map(|script| { + self.fee.weighting().round_up_metadata_size( + self.get_recipient_output_features().consensus_encode_exact_size() + + script.consensus_encode_exact_size(), + ) + }) .sum::(); size } - fn get_output_features(&self) -> OutputFeatures { + fn get_recipient_output_features(&self) -> OutputFeatures { Default::default() } @@ -310,27 +319,29 @@ impl SenderTransactionInitializer { let total_amount = self.amounts.sum().ok_or("Not all amounts have been provided")?; let fee_per_gram = self.fee_per_gram.ok_or("Fee per gram was not provided")?; + // We require scripts for each recipient to be set to calculate the fee + if !self.recipient_scripts.is_full() { + return Err(format!( + "{} recipient script(s) are required", + self.recipient_scripts.size() + )); + } + let metadata_size_without_change = self.get_total_metadata_size_for_outputs(); let fee_without_change = self.fee() .calculate(fee_per_gram, 1, num_inputs, num_outputs, metadata_size_without_change); - let output_features = self.get_output_features(); + let output_features = self.get_recipient_output_features(); let change_metadata_size = self .change_script .as_ref() .map(|script| script.consensus_encode_exact_size()) .unwrap_or(0) + output_features.consensus_encode_exact_size(); + let change_metadata_size = self.fee().weighting().round_up_metadata_size(change_metadata_size); - let fee_with_change = self.fee().calculate( - fee_per_gram, - 1, - num_inputs, - num_outputs + 1, - metadata_size_without_change + change_metadata_size, - ); - let extra_fee = fee_with_change - fee_without_change; + let change_fee = self.fee().calculate(fee_per_gram, 0, 0, 1, change_metadata_size); // Subtract with a check on going negative let total_input_value = total_to_self + total_amount + fee_without_change; let change_amount = total_being_spent.checked_sub(total_input_value); @@ -341,7 +352,7 @@ impl SenderTransactionInitializer { )), Some(MicroTari(0)) => Ok((fee_without_change, MicroTari(0), None)), Some(v) => { - let change_amount = v.checked_sub(extra_fee); + let change_amount = v.checked_sub(change_fee); let change_sender_offset_private_key = PrivateKey::random(&mut OsRng); self.change_sender_offset_private_key = Some(change_sender_offset_private_key.clone()); // TODO: Add unique id if needed @@ -385,7 +396,7 @@ impl SenderTransactionInitializer { metadata_signature, 0, ); - Ok((fee_with_change, v, Some(change_unblinded_output))) + Ok((fee_without_change + change_fee, v, Some(change_unblinded_output))) }, } }, @@ -716,7 +727,9 @@ mod test { PrivateKey::random(&mut OsRng), ) .with_change_script(script, ExecutionStack::default(), PrivateKey::default()); - let expected_fee = builder.fee().calculate(MicroTari(20), 1, 1, 2, 0); + let expected_fee = builder + .fee() + .calculate(MicroTari(20), 1, 1, 2, p.get_size_for_default_metadata(2)); // We needed a change input, so this should fail let err = builder.build::(&factories, None, Some(u64::MAX)).unwrap_err(); assert_eq!(err.message, "Change spending key was not provided"); @@ -744,15 +757,21 @@ mod test { // Create some inputs let factories = CryptoFactories::default(); let p = TestParams::new(); - let (utxo, input) = create_test_input(MicroTari(500), 0, &factories.commitment); + let (utxo, input) = create_test_input(MicroTari(5000), 0, &factories.commitment); let constants = create_consensus_constants(0); - let expected_fee = Fee::from(*constants.transaction_weight()).calculate(MicroTari(4), 1, 1, 1, 0); + let expected_fee = Fee::from(*constants.transaction_weight()).calculate( + MicroTari(4), + 1, + 1, + 1, + p.get_size_for_default_metadata(1), + ); let output = create_unblinded_output( TariScript::default(), OutputFeatures::default(), p.clone(), - MicroTari(500) - expected_fee, + MicroTari(5000) - expected_fee, ); // Start the builder let mut builder = SenderTransactionInitializer::new(0, constants); @@ -867,7 +886,9 @@ mod test { // Create some inputs let factories = CryptoFactories::default(); let p = TestParams::new(); - let tx_fee = p.fee().calculate(MicroTari(1), 1, 1, 1, 0); + let tx_fee = p + .fee() + .calculate(MicroTari(1), 1, 1, 1, p.get_size_for_default_metadata(1)); let (utxo, input) = create_test_input(500 * uT + tx_fee, 0, &factories.commitment); let script = script!(Nop); let output = create_unblinded_output(script.clone(), OutputFeatures::default(), p.clone(), MicroTari(500)); @@ -926,7 +947,7 @@ mod test { let err = builder.build::(&factories, None, Some(u64::MAX)).unwrap_err(); assert_eq!( err.message, - "You are spending (471 µT) more than you're providing (400 µT)." + "You are spending (472 µT) more than you're providing (400 µT)." ); } @@ -987,7 +1008,13 @@ mod test { let script = script!(Nop); let constants = create_consensus_constants(0); - let expected_fee = Fee::from(*constants.transaction_weight()).calculate(fee_per_gram, 1, 2, 3, 0); + let expected_fee = Fee::from(*constants.transaction_weight()).calculate( + fee_per_gram, + 1, + 2, + 3, + p.get_size_for_default_metadata(3), + ); let output = create_unblinded_output( script.clone(), OutputFeatures::default(), diff --git a/base_layer/core/src/transactions/weight.rs b/base_layer/core/src/transactions/weight.rs index be0f4e54e2..e9c8e35e1b 100644 --- a/base_layer/core/src/transactions/weight.rs +++ b/base_layer/core/src/transactions/weight.rs @@ -48,8 +48,8 @@ impl WeightParams { pub const fn v2() -> Self { Self { - kernel_weight: 10, // ajd. +2 - input_weight: 8, // ajd. -3 + kernel_weight: 10, // adj. +2 + input_weight: 8, // adj. -3 output_weight: 53, // SAFETY: the value isn't 0. NonZeroU64::new(x).expect(...) is not const so cannot be used in const fn metadata_bytes_per_gram: Some(unsafe { NonZeroU64::new_unchecked(16) }), @@ -76,13 +76,15 @@ impl TransactionWeight { Self(WeightParams::v2()) } - /// Calculate the weight of a transaction based on the number of inputs and outputs + /// Calculate the weight in grams of a transaction based on the number of kernels, inputs, outputs and rounded up + /// metadata size. A warning to ensure that the _per output_ rounded up metadata size must be used or the + /// calculation will be incorrect. If possible, use calculate_body instead to ensure correctness. pub fn calculate( &self, num_kernels: usize, num_inputs: usize, num_outputs: usize, - metadata_byte_size: usize, + rounded_up_metadata_byte_size: usize, ) -> u64 { let params = self.params(); params.kernel_weight * num_kernels as u64 + @@ -90,19 +92,52 @@ impl TransactionWeight { params.output_weight * num_outputs as u64 + params .metadata_bytes_per_gram - .map(|per_gram| metadata_byte_size as u64 / per_gram.get()) + .map(|per_gram| rounded_up_metadata_byte_size as u64 / per_gram.get()) .unwrap_or(0) } pub fn calculate_body(&self, body: &AggregateBody) -> u64 { + let rounded_up_metadata_size = self.calculate_normalised_total_metadata_size(body); self.calculate( body.kernels().len(), body.inputs().len(), body.outputs().len(), - body.sum_metadata_size(), + rounded_up_metadata_size, ) } + fn calculate_normalised_total_metadata_size(&self, body: &AggregateBody) -> usize { + // When calculating the total block size vs each individual transaction the div operator in `calculate` above + // will yield a different result due to integer rounding. + // Where s_n is the metadata size for the nth output, p is per_gram + // (∑s_i) / p != (s_1/p) + (s_2/p) +....(s_n / p) + // We round up each output to the nearest p here to account for this + + body.outputs() + .iter() + .map(|o| { + let actual_size = o.get_metadata_size(); + // round up each output to nearest multiple of metadata_byte_per_gram + self.round_up_metadata_size(actual_size) + }) + .sum() + } + + pub fn round_up_metadata_size(&self, metadata_size: usize) -> usize { + self.params() + .metadata_bytes_per_gram + .and_then(|per_gram| { + let per_gram = per_gram.get() as usize; + let rem = metadata_size % per_gram; + if rem == 0 { + Some(metadata_size) + } else { + metadata_size.checked_add(per_gram - rem) + } + }) + .unwrap_or(metadata_size) + } + pub fn params(&self) -> &WeightParams { &self.0 } @@ -113,3 +148,18 @@ impl From for TransactionWeight { Self(params) } } + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn round_up_metadata_size() { + let weighting = TransactionWeight::latest(); + assert_eq!(weighting.round_up_metadata_size(0), 0); + assert_eq!(weighting.round_up_metadata_size(1), 16); + assert_eq!(weighting.round_up_metadata_size(16), 16); + assert_eq!(weighting.round_up_metadata_size(17), 32); + assert_eq!(weighting.round_up_metadata_size(usize::MAX), usize::MAX); + } +} diff --git a/base_layer/core/src/validation/block_validators/orphan.rs b/base_layer/core/src/validation/block_validators/orphan.rs index 39d9dced38..34621e3069 100644 --- a/base_layer/core/src/validation/block_validators/orphan.rs +++ b/base_layer/core/src/validation/block_validators/orphan.rs @@ -28,8 +28,14 @@ use crate::{ consensus::ConsensusManager, transactions::CryptoFactories, validation::{ - helpers, - helpers::{check_accounting_balance, check_block_weight, check_coinbase_output, check_sorting_and_duplicates}, + helpers::{ + check_accounting_balance, + check_block_weight, + check_coinbase_output, + check_kernel_lock_height, + check_maturity, + check_sorting_and_duplicates, + }, OrphanValidation, ValidationError, }, @@ -68,7 +74,12 @@ impl OrphanValidation for OrphanBlockValidator { } let block_id = if cfg!(debug_assertions) { - format!("block #{} ({})", height, block.hash().to_hex()) + format!( + "block #{} {} ({})", + height, + block.hash().to_hex(), + block.body.to_counts_string() + ) } else { format!("block #{}", height) }; @@ -82,7 +93,7 @@ impl OrphanValidation for OrphanBlockValidator { "Checking duplicate inputs and outputs on {}", block_id ); - check_sorting_and_duplicates(block)?; + check_sorting_and_duplicates(&block.body)?; trace!( target: LOG_TARGET, "SV - No duplicate inputs / outputs for {} ", @@ -90,8 +101,8 @@ impl OrphanValidation for OrphanBlockValidator { ); // Check that the inputs are are allowed to be spent - helpers::check_maturity(height, block.body.inputs())?; - helpers::check_kernel_lock_height(height, block.body.kernels())?; + check_maturity(height, block.body.inputs())?; + check_kernel_lock_height(height, block.body.kernels())?; trace!(target: LOG_TARGET, "SV - Output constraints are ok for {} ", &block_id); check_coinbase_output(block, &self.rules, &self.factories)?; trace!(target: LOG_TARGET, "SV - Coinbase output is ok for {} ", &block_id); diff --git a/base_layer/core/src/validation/helpers.rs b/base_layer/core/src/validation/helpers.rs index eac3a45068..bed2a1b805 100644 --- a/base_layer/core/src/validation/helpers.rs +++ b/base_layer/core/src/validation/helpers.rs @@ -210,7 +210,8 @@ pub fn check_target_difficulty( pub fn check_block_weight(block: &Block, consensus_constants: &ConsensusConstants) -> Result<(), ValidationError> { // The genesis block has a larger weight than other blocks may have so we have to exclude it here let block_weight = block.body.calculate_weight(consensus_constants.transaction_weight()); - if block_weight <= consensus_constants.get_max_block_transaction_weight() || block.header.height == 0 { + let max_weight = consensus_constants.get_max_block_transaction_weight(); + if block_weight <= max_weight || block.header.height == 0 { trace!( target: LOG_TARGET, "SV - Block contents for block #{} : {}; weight {}.", @@ -221,7 +222,11 @@ pub fn check_block_weight(block: &Block, consensus_constants: &ConsensusConstant Ok(()) } else { - Err(BlockValidationError::BlockTooLarge.into()) + Err(BlockValidationError::BlockTooLarge { + actual_weight: block_weight, + max_weight, + } + .into()) } } @@ -293,8 +298,7 @@ pub fn is_all_unique_and_sorted<'a, I: IntoIterator, T: PartialOrd } // This function checks for duplicate inputs and outputs. There should be no duplicate inputs or outputs in a block -pub fn check_sorting_and_duplicates(block: &Block) -> Result<(), ValidationError> { - let body = &block.body; +pub fn check_sorting_and_duplicates(body: &AggregateBody) -> Result<(), ValidationError> { if !is_all_unique_and_sorted(body.inputs()) { return Err(ValidationError::UnsortedOrDuplicateInput); } diff --git a/base_layer/core/tests/base_node_rpc.rs b/base_layer/core/tests/base_node_rpc.rs index f9e7d1df5d..a5862156c8 100644 --- a/base_layer/core/tests/base_node_rpc.rs +++ b/base_layer/core/tests/base_node_rpc.rs @@ -28,7 +28,7 @@ use tari_common::configuration::Network; use tari_comms::protocol::rpc::mock::RpcRequestMock; use tari_core::{ base_node::{ - comms_interface::{Broadcast, LocalNodeCommsInterface}, + comms_interface::LocalNodeCommsInterface, proto::wallet_rpc::{ TxLocation, TxQueryBatchResponse, @@ -175,11 +175,7 @@ async fn test_base_node_wallet_rpc() { .prepare_new_block(chain_block(block0.block(), vec![tx1.clone()], &consensus_manager)) .unwrap(); - base_node - .local_nci - .submit_block(block1.clone(), Broadcast::from(true)) - .await - .unwrap(); + base_node.local_nci.submit_block(block1.clone()).await.unwrap(); // Check that submiting Tx2 will now be accepted let msg = TransactionProto::try_from(tx2).unwrap(); @@ -225,11 +221,7 @@ async fn test_base_node_wallet_rpc() { block2.header.output_mmr_size += 1; block2.header.kernel_mmr_size += 1; - base_node - .local_nci - .submit_block(block2, Broadcast::from(true)) - .await - .unwrap(); + base_node.local_nci.submit_block(block2).await.unwrap(); // Query Tx1 which should be in block 1 with 1 confirmation let msg = SignatureProto::from(tx1_sig.clone()); @@ -368,33 +360,21 @@ async fn test_sync_utxos_by_block() { .prepare_new_block(chain_block(block0.block(), vec![tx1.clone()], &consensus_manager)) .unwrap(); - base_node - .local_nci - .submit_block(block1.clone(), Broadcast::from(true)) - .await - .unwrap(); + base_node.local_nci.submit_block(block1.clone()).await.unwrap(); let block2 = base_node .blockchain_db .prepare_new_block(chain_block(&block1, vec![tx2], &consensus_manager)) .unwrap(); - base_node - .local_nci - .submit_block(block2.clone(), Broadcast::from(true)) - .await - .unwrap(); + base_node.local_nci.submit_block(block2.clone()).await.unwrap(); let block3 = base_node .blockchain_db .prepare_new_block(chain_block(&block2, vec![tx3], &consensus_manager)) .unwrap(); - base_node - .local_nci - .submit_block(block3.clone(), Broadcast::from(true)) - .await - .unwrap(); + base_node.local_nci.submit_block(block3.clone()).await.unwrap(); // All blocks let msg = SyncUtxosByBlockRequest { diff --git a/base_layer/core/tests/helpers/nodes.rs b/base_layer/core/tests/helpers/nodes.rs index a0bdd07ab5..e8c089eac0 100644 --- a/base_layer/core/tests/helpers/nodes.rs +++ b/base_layer/core/tests/helpers/nodes.rs @@ -199,7 +199,7 @@ impl BaseNodeBuilder { let mempool = Mempool::new( self.mempool_config.unwrap_or_default(), consensus_manager.clone(), - Arc::new(mempool_validator), + Box::new(mempool_validator), ); let node_identity = self.node_identity.unwrap_or_else(|| random_node_identity()); let node_interfaces = setup_base_node_services( diff --git a/base_layer/core/tests/mempool.rs b/base_layer/core/tests/mempool.rs index 3230095068..9f3b2747b5 100644 --- a/base_layer/core/tests/mempool.rs +++ b/base_layer/core/tests/mempool.rs @@ -40,7 +40,6 @@ use tari_common_types::types::{Commitment, PrivateKey, PublicKey, Signature}; use tari_comms_dht::domain_message::OutboundDomainMessage; use tari_core::{ base_node::{ - comms_interface::Broadcast, service::BaseNodeServiceConfig, state_machine_service::states::{ListeningInfo, StateInfo, StatusInfo}, }, @@ -68,16 +67,16 @@ use tempfile::tempdir; #[allow(dead_code)] mod helpers; -#[test] +#[tokio::test] #[allow(clippy::identity_op)] -fn test_insert_and_process_published_block() { +async fn test_insert_and_process_published_block() { let network = Network::LocalNet; let (mut store, mut blocks, mut outputs, consensus_manager) = create_new_blockchain(network); let mempool_validator = TxInputAndMaturityValidator::new(store.clone()); let mempool = Mempool::new( MempoolConfig::default(), consensus_manager.clone(), - Arc::new(mempool_validator), + Box::new(mempool_validator), ); // Create a block with 4 outputs let txs = vec![txn_schema!( @@ -112,99 +111,109 @@ fn test_insert_and_process_published_block() { let tx6 = txn_schema!(from: vec![outputs[1][3].clone()], to: vec![1 * T], fee: 25*uT, lock: 0, features: OutputFeatures::default()); let tx6 = spend_utxos(tx6).0; - mempool.insert(orphan.clone()).unwrap(); - mempool.insert(tx2.clone()).unwrap(); - mempool.insert(tx3.clone()).unwrap(); - mempool.insert(tx5.clone()).unwrap(); - mempool.process_published_block(blocks[1].to_arc_block()).unwrap(); + mempool.insert(orphan.clone()).await.unwrap(); + mempool.insert(tx2.clone()).await.unwrap(); + mempool.insert(tx3.clone()).await.unwrap(); + mempool.insert(tx5.clone()).await.unwrap(); + mempool.process_published_block(blocks[1].block()).await.unwrap(); assert_eq!( mempool - .has_tx_with_excess_sig(orphan.body.kernels()[0].excess_sig.clone()) + .has_tx_with_excess_sig(&orphan.body.kernels()[0].excess_sig) + .await .unwrap(), TxStorageResponse::NotStored ); assert_eq!( mempool - .has_tx_with_excess_sig(tx2.body.kernels()[0].excess_sig.clone()) + .has_tx_with_excess_sig(&tx2.body.kernels()[0].excess_sig) + .await .unwrap(), TxStorageResponse::UnconfirmedPool ); assert_eq!( mempool - .has_tx_with_excess_sig(tx3.body.kernels()[0].excess_sig.clone()) + .has_tx_with_excess_sig(&tx3.body.kernels()[0].excess_sig) + .await .unwrap(), TxStorageResponse::NotStored ); assert_eq!( mempool - .has_tx_with_excess_sig(tx5.body.kernels()[0].excess_sig.clone()) + .has_tx_with_excess_sig(&tx5.body.kernels()[0].excess_sig) + .await .unwrap(), TxStorageResponse::NotStored ); assert_eq!( mempool - .has_tx_with_excess_sig(tx6.body.kernels()[0].excess_sig.clone()) + .has_tx_with_excess_sig(&tx6.body.kernels()[0].excess_sig) + .await .unwrap(), TxStorageResponse::NotStored ); - let snapshot_txs = mempool.snapshot().unwrap(); + let snapshot_txs = mempool.snapshot().await.unwrap(); assert_eq!(snapshot_txs.len(), 1); assert!(snapshot_txs.contains(&tx2)); - let stats = mempool.stats().unwrap(); + let stats = mempool.stats().await.unwrap(); assert_eq!(stats.total_txs, 1); assert_eq!(stats.unconfirmed_txs, 1); assert_eq!(stats.reorg_txs, 0); - assert_eq!( - stats.total_weight, - consensus_manager - .consensus_constants(0) - .transaction_weight() - .calculate(1, 1, 2, 0) + let expected_weight = consensus_manager.consensus_constants(0).transaction_weight().calculate( + 1, + 1, + 2, + TestParams::new().get_size_for_default_metadata(2), ); + assert_eq!(stats.total_weight, expected_weight); // Spend tx2, so it goes in Reorg pool generate_block(&store, &mut blocks, vec![tx2.deref().clone()], &consensus_manager).unwrap(); - mempool.process_published_block(blocks[2].to_arc_block()).unwrap(); + mempool.process_published_block(blocks[2].block()).await.unwrap(); assert_eq!( mempool - .has_tx_with_excess_sig(orphan.body.kernels()[0].excess_sig.clone()) + .has_tx_with_excess_sig(&orphan.body.kernels()[0].excess_sig) + .await .unwrap(), TxStorageResponse::NotStored ); assert_eq!( mempool - .has_tx_with_excess_sig(tx2.body.kernels()[0].excess_sig.clone()) + .has_tx_with_excess_sig(&tx2.body.kernels()[0].excess_sig) + .await .unwrap(), TxStorageResponse::ReorgPool ); assert_eq!( mempool - .has_tx_with_excess_sig(tx3.body.kernels()[0].excess_sig.clone()) + .has_tx_with_excess_sig(&tx3.body.kernels()[0].excess_sig) + .await .unwrap(), TxStorageResponse::NotStored ); assert_eq!( mempool - .has_tx_with_excess_sig(tx5.body.kernels()[0].excess_sig.clone()) + .has_tx_with_excess_sig(&tx5.body.kernels()[0].excess_sig) + .await .unwrap(), TxStorageResponse::NotStored ); assert_eq!( mempool - .has_tx_with_excess_sig(tx6.body.kernels()[0].excess_sig.clone()) + .has_tx_with_excess_sig(&tx6.body.kernels()[0].excess_sig) + .await .unwrap(), TxStorageResponse::NotStored ); - let snapshot_txs = mempool.snapshot().unwrap(); + let snapshot_txs = mempool.snapshot().await.unwrap(); assert_eq!(snapshot_txs.len(), 0); - let stats = mempool.stats().unwrap(); + let stats = mempool.stats().await.unwrap(); assert_eq!(stats.total_txs, 0); assert_eq!(stats.unconfirmed_txs, 0); assert_eq!(stats.reorg_txs, 1); @@ -220,7 +229,7 @@ async fn test_time_locked() { let mempool = Mempool::new( MempoolConfig::default(), consensus_manager.clone(), - Arc::new(mempool_validator), + Box::new(mempool_validator), ); // Create a block with 4 outputs let txs = vec![txn_schema!( @@ -228,7 +237,7 @@ async fn test_time_locked() { to: vec![2 * T, 2 * T, 2 * T, 2 * T], fee: 5*uT, lock: 0, features: OutputFeatures::default() )]; generate_new_block(&mut store, &mut blocks, &mut outputs, txs, &consensus_manager).unwrap(); - mempool.process_published_block(blocks[1].to_arc_block()).unwrap(); + mempool.process_published_block(blocks[1].block()).await.unwrap(); // Block height should be 1 let mut tx2 = txn_schema!(from: vec![outputs[1][0].clone()], to: vec![1*T], fee: 20*uT, lock: 0, features: OutputFeatures::default()); tx2.lock_height = 3; @@ -246,17 +255,20 @@ async fn test_time_locked() { // Tx2 should not go in, but Tx3 should assert_eq!( - mempool.insert(tx2.clone()).unwrap(), + mempool.insert(tx2.clone()).await.unwrap(), TxStorageResponse::NotStoredTimeLocked ); - assert_eq!(mempool.insert(tx3.clone()).unwrap(), TxStorageResponse::UnconfirmedPool); + assert_eq!( + mempool.insert(tx3.clone()).await.unwrap(), + TxStorageResponse::UnconfirmedPool + ); // Spend tx3, so that the height of the chain will increase generate_block(&store, &mut blocks, vec![tx3.deref().clone()], &consensus_manager).unwrap(); - mempool.process_published_block(blocks[2].to_arc_block()).unwrap(); + mempool.process_published_block(blocks[2].block()).await.unwrap(); // Block height increased, so tx2 should no go in. - assert_eq!(mempool.insert(tx2).unwrap(), TxStorageResponse::UnconfirmedPool); + assert_eq!(mempool.insert(tx2).await.unwrap(), TxStorageResponse::UnconfirmedPool); } #[tokio::test] @@ -268,7 +280,7 @@ async fn test_retrieve() { let mempool = Mempool::new( MempoolConfig::default(), consensus_manager.clone(), - Arc::new(mempool_validator), + Box::new(mempool_validator), ); let txs = vec![txn_schema!( from: vec![outputs[0][0].clone()], @@ -276,7 +288,7 @@ async fn test_retrieve() { )]; // "Mine" Block 1 generate_new_block(&mut store, &mut blocks, &mut outputs, txs, &consensus_manager).unwrap(); - mempool.process_published_block(blocks[1].to_arc_block()).unwrap(); + mempool.process_published_block(blocks[1].block()).await.unwrap(); // 1-Block, 8 UTXOs, empty mempool let txs = vec![ txn_schema!(from: vec![outputs[1][0].clone()], to: vec![], fee: 30*uT, lock: 0, features: OutputFeatures::default()), @@ -293,19 +305,19 @@ async fn test_retrieve() { features: OutputFeatures::with_maturity(3)), ]; let (tx, utxos) = schema_to_transaction(&txs); - tx.iter().for_each(|t| { - mempool.insert(t.clone()).unwrap(); - }); + for t in &tx { + mempool.insert(t.clone()).await.unwrap(); + } // 1-block, 8 UTXOs, 8 txs in mempool let weighting = consensus_manager.consensus_constants(0).transaction_weight(); let weight = tx[6].calculate_weight(weighting) + tx[2].calculate_weight(weighting) + tx[3].calculate_weight(weighting); - let retrieved_txs = mempool.retrieve(weight).unwrap(); + let retrieved_txs = mempool.retrieve(weight).await.unwrap(); assert_eq!(retrieved_txs.len(), 3); assert!(retrieved_txs.contains(&tx[6])); assert!(retrieved_txs.contains(&tx[2])); assert!(retrieved_txs.contains(&tx[3])); - let stats = mempool.stats().unwrap(); + let stats = mempool.stats().await.unwrap(); assert_eq!(stats.unconfirmed_txs, 7); // assert_eq!(stats.timelocked_txs, 1); assert_eq!(stats.reorg_txs, 0); @@ -320,9 +332,9 @@ async fn test_retrieve() { // "Mine" block 2 generate_block(&store, &mut blocks, block2_txns, &consensus_manager).unwrap(); outputs.push(utxos); - mempool.process_published_block(blocks[2].to_arc_block()).unwrap(); + mempool.process_published_block(blocks[2].block()).await.unwrap(); // 2-blocks, 2 unconfirmed txs in mempool - let stats = mempool.stats().unwrap(); + let stats = mempool.stats().await.unwrap(); assert_eq!(stats.unconfirmed_txs, 2); // assert_eq!(stats.timelocked_txs, 0); assert_eq!(stats.reorg_txs, 5); @@ -333,15 +345,15 @@ async fn test_retrieve() { txn_schema!(from: vec![outputs[2][8].clone()], to: vec![], fee: 40*uT, lock: 0, features: OutputFeatures::default()), ]; let (tx2, _) = schema_to_transaction(&txs); - tx2.iter().for_each(|t| { - mempool.insert(t.clone()).unwrap(); - }); + for t in &tx2 { + mempool.insert(t.clone()).await.unwrap(); + } // 2 blocks, 3 unconfirmed txs in mempool, 2 time locked // Top 2 txs are tx[3] (fee/g = 50) and tx2[1] (fee/g = 40). tx2[0] (fee/g = 80) is still not matured. let weight = tx[3].calculate_weight(weighting) + tx2[1].calculate_weight(weighting); - let retrieved_txs = mempool.retrieve(weight).unwrap(); - let stats = mempool.stats().unwrap(); + let retrieved_txs = mempool.retrieve(weight).await.unwrap(); + let stats = mempool.stats().await.unwrap(); assert_eq!(stats.unconfirmed_txs, 3); // assert_eq!(stats.timelocked_txs, 1); @@ -360,7 +372,7 @@ async fn test_zero_conf() { let mempool = Mempool::new( MempoolConfig::default(), consensus_manager.clone(), - Arc::new(mempool_validator), + Box::new(mempool_validator), ); let txs = vec![txn_schema!( from: vec![outputs[0][0].clone()], @@ -368,7 +380,7 @@ async fn test_zero_conf() { )]; // "Mine" Block 1 generate_new_block(&mut store, &mut blocks, &mut outputs, txs, &consensus_manager).unwrap(); - mempool.process_published_block(blocks[1].to_arc_block()).unwrap(); + mempool.process_published_block(blocks[1].block()).await.unwrap(); // This transaction graph will be created, containing 3 levels of zero-conf transactions, inheriting dependent // outputs from multiple parents @@ -417,15 +429,15 @@ async fn test_zero_conf() { features: OutputFeatures::default() )); assert_eq!( - mempool.insert(Arc::new(tx01.clone())).unwrap(), + mempool.insert(Arc::new(tx01.clone())).await.unwrap(), TxStorageResponse::UnconfirmedPool ); assert_eq!( - mempool.insert(Arc::new(tx03.clone())).unwrap(), + mempool.insert(Arc::new(tx03.clone())).await.unwrap(), TxStorageResponse::UnconfirmedPool ); assert_eq!( - mempool.insert(Arc::new(tx04.clone())).unwrap(), + mempool.insert(Arc::new(tx04.clone())).await.unwrap(), TxStorageResponse::UnconfirmedPool ); @@ -457,19 +469,19 @@ async fn test_zero_conf() { features: OutputFeatures::default() )); assert_eq!( - mempool.insert(Arc::new(tx11.clone())).unwrap(), + mempool.insert(Arc::new(tx11.clone())).await.unwrap(), TxStorageResponse::UnconfirmedPool ); assert_eq!( - mempool.insert(Arc::new(tx12.clone())).unwrap(), + mempool.insert(Arc::new(tx12.clone())).await.unwrap(), TxStorageResponse::NotStoredOrphan ); assert_eq!( - mempool.insert(Arc::new(tx13.clone())).unwrap(), + mempool.insert(Arc::new(tx13.clone())).await.unwrap(), TxStorageResponse::UnconfirmedPool ); assert_eq!( - mempool.insert(Arc::new(tx14.clone())).unwrap(), + mempool.insert(Arc::new(tx14.clone())).await.unwrap(), TxStorageResponse::UnconfirmedPool ); @@ -502,19 +514,19 @@ async fn test_zero_conf() { features: OutputFeatures::default() )); assert_eq!( - mempool.insert(Arc::new(tx21.clone())).unwrap(), + mempool.insert(Arc::new(tx21.clone())).await.unwrap(), TxStorageResponse::UnconfirmedPool ); assert_eq!( - mempool.insert(Arc::new(tx22.clone())).unwrap(), + mempool.insert(Arc::new(tx22.clone())).await.unwrap(), TxStorageResponse::NotStoredOrphan ); assert_eq!( - mempool.insert(Arc::new(tx23.clone())).unwrap(), + mempool.insert(Arc::new(tx23.clone())).await.unwrap(), TxStorageResponse::NotStoredOrphan ); assert_eq!( - mempool.insert(Arc::new(tx24.clone())).unwrap(), + mempool.insert(Arc::new(tx24.clone())).await.unwrap(), TxStorageResponse::UnconfirmedPool ); @@ -548,24 +560,27 @@ async fn test_zero_conf() { features: OutputFeatures::default() )); assert_eq!( - mempool.insert(Arc::new(tx31.clone())).unwrap(), + mempool.insert(Arc::new(tx31.clone())).await.unwrap(), TxStorageResponse::UnconfirmedPool ); assert_eq!( - mempool.insert(Arc::new(tx32.clone())).unwrap(), + mempool.insert(Arc::new(tx32.clone())).await.unwrap(), TxStorageResponse::NotStoredOrphan ); assert_eq!( - mempool.insert(Arc::new(tx33.clone())).unwrap(), + mempool.insert(Arc::new(tx33.clone())).await.unwrap(), TxStorageResponse::NotStoredOrphan ); assert_eq!( - mempool.insert(Arc::new(tx34.clone())).unwrap(), + mempool.insert(Arc::new(tx34.clone())).await.unwrap(), TxStorageResponse::UnconfirmedPool ); // Try to retrieve all transactions in the mempool (a couple of our transactions should be missing from retrieved) - let retrieved_txs = mempool.retrieve(mempool.stats().unwrap().total_weight).unwrap(); + let retrieved_txs = mempool + .retrieve(mempool.stats().await.unwrap().total_weight) + .await + .unwrap(); assert_eq!(retrieved_txs.len(), 10); assert!(retrieved_txs.contains(&Arc::new(tx01.clone()))); assert!(!retrieved_txs.contains(&Arc::new(tx02.clone()))); // Missing @@ -586,35 +601,38 @@ async fn test_zero_conf() { // Submit the missing original transactions assert_eq!( - mempool.insert(Arc::new(tx02.clone())).unwrap(), + mempool.insert(Arc::new(tx02.clone())).await.unwrap(), TxStorageResponse::UnconfirmedPool ); // Re-submit failed zero-conf level 1 transactions assert_eq!( - mempool.insert(Arc::new(tx12.clone())).unwrap(), + mempool.insert(Arc::new(tx12.clone())).await.unwrap(), TxStorageResponse::UnconfirmedPool ); // Re-submit failed zero-conf level 2 transactions assert_eq!( - mempool.insert(Arc::new(tx22.clone())).unwrap(), + mempool.insert(Arc::new(tx22.clone())).await.unwrap(), TxStorageResponse::UnconfirmedPool ); assert_eq!( - mempool.insert(Arc::new(tx23.clone())).unwrap(), + mempool.insert(Arc::new(tx23.clone())).await.unwrap(), TxStorageResponse::UnconfirmedPool ); // Re-submit failed zero-conf level 3 transactions assert_eq!( - mempool.insert(Arc::new(tx32.clone())).unwrap(), + mempool.insert(Arc::new(tx32.clone())).await.unwrap(), TxStorageResponse::UnconfirmedPool ); assert_eq!( - mempool.insert(Arc::new(tx33.clone())).unwrap(), + mempool.insert(Arc::new(tx33.clone())).await.unwrap(), TxStorageResponse::UnconfirmedPool ); // Try to retrieve all transactions in the mempool (all transactions should be retrieved) - let retrieved_txs = mempool.retrieve(mempool.stats().unwrap().total_weight).unwrap(); + let retrieved_txs = mempool + .retrieve(mempool.stats().await.unwrap().total_weight) + .await + .unwrap(); assert_eq!(retrieved_txs.len(), 16); assert!(retrieved_txs.contains(&Arc::new(tx01.clone()))); assert!(retrieved_txs.contains(&Arc::new(tx02.clone()))); @@ -635,7 +653,8 @@ async fn test_zero_conf() { // Verify that a higher priority transaction is not retrieved due to its zero-conf dependency instead of the lowest // priority transaction - let retrieved_txs = mempool.retrieve(mempool.stats().unwrap().total_weight - 1).unwrap(); + let weight = mempool.stats().await.unwrap().total_weight - 1; + let retrieved_txs = mempool.retrieve(weight).await.unwrap(); assert_eq!(retrieved_txs.len(), 15); assert!(retrieved_txs.contains(&Arc::new(tx01))); assert!(retrieved_txs.contains(&Arc::new(tx02))); @@ -664,7 +683,7 @@ async fn test_reorg() { let mempool = Mempool::new( MempoolConfig::default(), consensus_manager.clone(), - Arc::new(mempool_validator), + Box::new(mempool_validator), ); // "Mine" Block 1 @@ -672,7 +691,7 @@ async fn test_reorg() { txn_schema!(from: vec![outputs[0][0].clone()], to: vec![1 * T, 1 * T], fee: 25*uT, lock: 0, features: OutputFeatures::default()), ]; generate_new_block(&mut db, &mut blocks, &mut outputs, txs, &consensus_manager).unwrap(); - mempool.process_published_block(blocks[1].to_arc_block()).unwrap(); + mempool.process_published_block(blocks[1].block()).await.unwrap(); // "Mine" block 2 let schemas = vec![ @@ -682,14 +701,14 @@ async fn test_reorg() { ]; let (txns2, utxos) = schema_to_transaction(&schemas); outputs.push(utxos); - txns2.iter().for_each(|tx| { - mempool.insert(tx.clone()).unwrap(); - }); - let stats = mempool.stats().unwrap(); + for tx in &txns2 { + mempool.insert(tx.clone()).await.unwrap(); + } + let stats = mempool.stats().await.unwrap(); assert_eq!(stats.unconfirmed_txs, 3); let txns2 = txns2.iter().map(|t| t.deref().clone()).collect(); generate_block(&db, &mut blocks, txns2, &consensus_manager).unwrap(); - mempool.process_published_block(blocks[2].to_arc_block()).unwrap(); + mempool.process_published_block(blocks[2].block()).await.unwrap(); // "Mine" block 3 let schemas = vec![ @@ -699,9 +718,9 @@ async fn test_reorg() { ]; let (txns3, utxos) = schema_to_transaction(&schemas); outputs.push(utxos); - txns3.iter().for_each(|tx| { - mempool.insert(tx.clone()).unwrap(); - }); + for tx in &txns3 { + mempool.insert(tx.clone()).await.unwrap(); + } let txns3: Vec = txns3.iter().map(|t| t.deref().clone()).collect(); generate_block( @@ -711,9 +730,9 @@ async fn test_reorg() { &consensus_manager, ) .unwrap(); - mempool.process_published_block(blocks[3].to_arc_block()).unwrap(); + mempool.process_published_block(blocks[3].block()).await.unwrap(); - let stats = mempool.stats().unwrap(); + let stats = mempool.stats().await.unwrap(); assert_eq!(stats.unconfirmed_txs, 0); // assert_eq!(stats.timelocked_txs, 1); assert_eq!(stats.reorg_txs, 5); @@ -725,8 +744,9 @@ async fn test_reorg() { mempool .process_reorg(vec![blocks[3].to_arc_block()], vec![reorg_block3.into()]) + .await .unwrap(); - let stats = mempool.stats().unwrap(); + let stats = mempool.stats().await.unwrap(); assert_eq!(stats.unconfirmed_txs, 2); // assert_eq!(stats.timelocked_txs, 1); assert_eq!(stats.reorg_txs, 3); @@ -737,7 +757,7 @@ async fn test_reorg() { // test that process_reorg can handle the case when removed_blocks is empty // see https://github.com/tari-project/tari/issues/2101#issuecomment-680726940 - mempool.process_reorg(vec![], vec![reorg_block4.into()]).unwrap(); + mempool.process_reorg(vec![], vec![reorg_block4.into()]).await.unwrap(); } // TODO: This test returns 0 in the unconfirmed pool, so might not catch errors. It should be updated to return better @@ -774,13 +794,13 @@ async fn request_response_get_stats() { let (orphan2, _, _) = tx!(2*T, fee: 200*uT); let orphan2 = Arc::new(orphan2); - bob.mempool.insert(tx1).unwrap(); - bob.mempool.insert(orphan1).unwrap(); - bob.mempool.insert(orphan2).unwrap(); + bob.mempool.insert(tx1).await.unwrap(); + bob.mempool.insert(orphan1).await.unwrap(); + bob.mempool.insert(orphan2).await.unwrap(); // The coinbase tx cannot be spent until maturity, so txn1 will be in the timelocked pool. The other 2 txns are // orphans. - let stats = bob.mempool.stats().unwrap(); + let stats = bob.mempool.stats().await.unwrap(); assert_eq!(stats.total_txs, 0); assert_eq!(stats.unconfirmed_txs, 0); assert_eq!(stats.reorg_txs, 0); @@ -823,10 +843,10 @@ async fn request_response_get_tx_state_by_excess_sig() { let (orphan_tx, _, _) = tx!(1*T, fee: 100*uT); let tx = Arc::new(tx); let orphan_tx = Arc::new(orphan_tx); - bob_node.mempool.insert(tx.clone()).unwrap(); - carol_node.mempool.insert(tx.clone()).unwrap(); - bob_node.mempool.insert(orphan_tx.clone()).unwrap(); - carol_node.mempool.insert(orphan_tx.clone()).unwrap(); + bob_node.mempool.insert(tx.clone()).await.unwrap(); + carol_node.mempool.insert(tx.clone()).await.unwrap(); + bob_node.mempool.insert(orphan_tx.clone()).await.unwrap(); + carol_node.mempool.insert(orphan_tx.clone()).await.unwrap(); // Check that the transactions are in the expected pools. // Spending the coinbase utxo will be in the pending pool, because cb utxos have a maturity. @@ -908,8 +928,8 @@ async fn receive_and_propagate_transaction() { let (orphan, _, _) = tx!(1*T, fee: 100*uT); let tx_excess_sig = tx.body.kernels()[0].excess_sig.clone(); let orphan_excess_sig = orphan.body.kernels()[0].excess_sig.clone(); - assert!(alice_node.mempool.insert(Arc::new(tx.clone())).is_ok()); - assert!(alice_node.mempool.insert(Arc::new(orphan.clone())).is_ok()); + assert!(alice_node.mempool.insert(Arc::new(tx.clone())).await.is_ok()); + assert!(alice_node.mempool.insert(Arc::new(orphan.clone())).await.is_ok()); alice_node .outbound_message_service @@ -935,16 +955,13 @@ async fn receive_and_propagate_transaction() { .unwrap(); async_assert_eventually!( - bob_node.mempool.has_tx_with_excess_sig(tx_excess_sig.clone()).unwrap(), + bob_node.mempool.has_tx_with_excess_sig(&tx_excess_sig).await.unwrap(), expect = TxStorageResponse::NotStored, max_attempts = 20, interval = Duration::from_millis(1000) ); async_assert_eventually!( - carol_node - .mempool - .has_tx_with_excess_sig(tx_excess_sig.clone()) - .unwrap(), + carol_node.mempool.has_tx_with_excess_sig(&tx_excess_sig).await.unwrap(), expect = TxStorageResponse::NotStored, max_attempts = 10, interval = Duration::from_millis(1000) @@ -953,7 +970,8 @@ async fn receive_and_propagate_transaction() { async_assert_eventually!( carol_node .mempool - .has_tx_with_excess_sig(orphan_excess_sig.clone()) + .has_tx_with_excess_sig(&orphan_excess_sig) + .await .unwrap(), expect = TxStorageResponse::NotStored, max_attempts = 10, @@ -964,7 +982,8 @@ async fn receive_and_propagate_transaction() { async_assert_eventually!( bob_node .mempool - .has_tx_with_excess_sig(orphan_excess_sig.clone()) + .has_tx_with_excess_sig(&orphan_excess_sig) + .await .unwrap(), expect = TxStorageResponse::NotStored, ); @@ -985,7 +1004,7 @@ async fn consensus_validation_large_tx() { let mempool = Mempool::new( MempoolConfig::default(), consensus_manager.clone(), - Arc::new(mempool_validator), + Box::new(mempool_validator), ); // Create a block with 1 output let txs = vec![txn_schema!(from: vec![outputs[0][0].clone()], to: vec![5 * T])]; @@ -1081,7 +1100,7 @@ async fn consensus_validation_large_tx() { // check the tx weight is more than the max for 1 block assert!(weight > constants.get_max_block_transaction_weight()); - let response = mempool.insert(Arc::new(tx)).unwrap(); + let response = mempool.insert(Arc::new(tx)).await.unwrap(); // make sure the tx was not accepted into the mempool assert!(matches!(response, TxStorageResponse::NotStored)); } @@ -1099,7 +1118,7 @@ async fn consensus_validation_unique_id() { let mempool = Mempool::new( MempoolConfig::default(), consensus_manager.clone(), - Arc::new(mempool_validator), + Box::new(mempool_validator), ); // Create a block with 5 outputs @@ -1130,7 +1149,7 @@ async fn consensus_validation_unique_id() { ); let (tx, _, _) = spend_utxos(tx); let tx = Arc::new(tx); - let response = mempool.insert(tx).unwrap(); + let response = mempool.insert(tx).await.unwrap(); assert!(matches!(response, TxStorageResponse::NotStoredConsensus)); // a different unique_id should be fine @@ -1146,7 +1165,7 @@ async fn consensus_validation_unique_id() { ); let (tx, _, _) = spend_utxos(tx); let tx = Arc::new(tx); - let response = mempool.insert(tx).unwrap(); + let response = mempool.insert(tx).await.unwrap(); assert!(matches!(response, TxStorageResponse::UnconfirmedPool)); // a different asset should also be fine @@ -1163,7 +1182,7 @@ async fn consensus_validation_unique_id() { ); let (tx, _, _) = spend_utxos(tx); let tx = Arc::new(tx); - let response = mempool.insert(tx).unwrap(); + let response = mempool.insert(tx).await.unwrap(); assert!(matches!(response, TxStorageResponse::UnconfirmedPool)); // a transaction containing duplicates should be rejected @@ -1180,7 +1199,7 @@ async fn consensus_validation_unique_id() { ); let (tx, _, _) = spend_utxos(tx); let tx = Arc::new(tx); - let response = mempool.insert(tx).unwrap(); + let response = mempool.insert(tx).await.unwrap(); dbg!(&response); assert!(matches!(response, TxStorageResponse::NotStoredConsensus)); } @@ -1277,43 +1296,31 @@ async fn block_event_and_reorg_event_handling() { .unwrap(); // Add one empty block, so the coinbase UTXO is no longer time-locked. - assert!(bob - .local_nci - .submit_block(empty_block.clone(), Broadcast::from(true)) - .await - .is_ok()); - assert!(alice - .local_nci - .submit_block(empty_block.clone(), Broadcast::from(true)) - .await - .is_ok()); - alice.mempool.insert(Arc::new(tx1.clone())).unwrap(); - bob.mempool.insert(Arc::new(tx1.clone())).unwrap(); + assert!(bob.local_nci.submit_block(empty_block.clone(),).await.is_ok()); + assert!(alice.local_nci.submit_block(empty_block.clone(),).await.is_ok()); + alice.mempool.insert(Arc::new(tx1.clone())).await.unwrap(); + bob.mempool.insert(Arc::new(tx1.clone())).await.unwrap(); let mut block1 = bob .blockchain_db .prepare_new_block(chain_block(&empty_block, vec![tx1], &consensus_manager)) .unwrap(); find_header_with_achieved_difficulty(&mut block1.header, Difficulty::from(1)); // Add Block1 - tx1 will be moved to the ReorgPool. - assert!(bob - .local_nci - .submit_block(block1.clone(), Broadcast::from(true)) - .await - .is_ok()); + assert!(bob.local_nci.submit_block(block1.clone(),).await.is_ok()); async_assert_eventually!( - alice.mempool.has_tx_with_excess_sig(tx1_excess_sig.clone()).unwrap(), + alice.mempool.has_tx_with_excess_sig(&tx1_excess_sig).await.unwrap(), expect = TxStorageResponse::ReorgPool, max_attempts = 20, interval = Duration::from_millis(1000) ); - alice.mempool.insert(Arc::new(tx2a.clone())).unwrap(); - alice.mempool.insert(Arc::new(tx3a.clone())).unwrap(); - alice.mempool.insert(Arc::new(tx2b.clone())).unwrap(); - alice.mempool.insert(Arc::new(tx3b.clone())).unwrap(); - bob.mempool.insert(Arc::new(tx2a.clone())).unwrap(); - bob.mempool.insert(Arc::new(tx3a.clone())).unwrap(); - bob.mempool.insert(Arc::new(tx2b.clone())).unwrap(); - bob.mempool.insert(Arc::new(tx3b.clone())).unwrap(); + alice.mempool.insert(Arc::new(tx2a.clone())).await.unwrap(); + alice.mempool.insert(Arc::new(tx3a.clone())).await.unwrap(); + alice.mempool.insert(Arc::new(tx2b.clone())).await.unwrap(); + alice.mempool.insert(Arc::new(tx3b.clone())).await.unwrap(); + bob.mempool.insert(Arc::new(tx2a.clone())).await.unwrap(); + bob.mempool.insert(Arc::new(tx3a.clone())).await.unwrap(); + bob.mempool.insert(Arc::new(tx2b.clone())).await.unwrap(); + bob.mempool.insert(Arc::new(tx3b.clone())).await.unwrap(); let mut block2a = bob .blockchain_db @@ -1328,34 +1335,30 @@ async fn block_event_and_reorg_event_handling() { find_header_with_achieved_difficulty(&mut block2b.header, Difficulty::from(10)); // Add Block2a - tx2b and tx3b will be discarded as double spends. - assert!(bob - .local_nci - .submit_block(block2a.clone(), Broadcast::from(true)) - .await - .is_ok()); + assert!(bob.local_nci.submit_block(block2a.clone(),).await.is_ok()); async_assert_eventually!( - bob.mempool.has_tx_with_excess_sig(tx2a_excess_sig.clone()).unwrap(), + bob.mempool.has_tx_with_excess_sig(&tx2a_excess_sig).await.unwrap(), expect = TxStorageResponse::ReorgPool, max_attempts = 20, interval = Duration::from_millis(1000) ); async_assert_eventually!( - alice.mempool.has_tx_with_excess_sig(tx2a_excess_sig.clone()).unwrap(), + alice.mempool.has_tx_with_excess_sig(&tx2a_excess_sig).await.unwrap(), expect = TxStorageResponse::ReorgPool, max_attempts = 20, interval = Duration::from_millis(1000) ); assert_eq!( - alice.mempool.has_tx_with_excess_sig(tx3a_excess_sig.clone()).unwrap(), + alice.mempool.has_tx_with_excess_sig(&tx3a_excess_sig).await.unwrap(), TxStorageResponse::ReorgPool ); assert_eq!( - alice.mempool.has_tx_with_excess_sig(tx2b_excess_sig.clone()).unwrap(), + alice.mempool.has_tx_with_excess_sig(&tx2b_excess_sig).await.unwrap(), TxStorageResponse::ReorgPool ); assert_eq!( - alice.mempool.has_tx_with_excess_sig(tx3b_excess_sig.clone()).unwrap(), + alice.mempool.has_tx_with_excess_sig(&tx3b_excess_sig).await.unwrap(), TxStorageResponse::ReorgPool ); } diff --git a/base_layer/core/tests/node_comms_interface.rs b/base_layer/core/tests/node_comms_interface.rs index 923dea8f4b..e76448dc73 100644 --- a/base_layer/core/tests/node_comms_interface.rs +++ b/base_layer/core/tests/node_comms_interface.rs @@ -20,8 +20,6 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use std::sync::Arc; - use helpers::block_builders::append_block; use tari_common::configuration::Network; use tari_common_types::types::PublicKey; @@ -64,7 +62,7 @@ mod helpers; fn new_mempool() -> Mempool { let rules = create_consensus_rules(); let mempool_validator = MockValidator::new(true); - Mempool::new(MempoolConfig::default(), rules, Arc::new(mempool_validator)) + Mempool::new(MempoolConfig::default(), rules, Box::new(mempool_validator)) } #[tokio::test] @@ -324,7 +322,7 @@ async fn inbound_fetch_blocks_before_horizon_height() { let mempool = Mempool::new( MempoolConfig::default(), consensus_manager.clone(), - Arc::new(mempool_validator), + Box::new(mempool_validator), ); let (block_event_sender, _) = broadcast::channel(50); let (request_sender, _) = reply_channel::unbounded(); diff --git a/base_layer/core/tests/node_service.rs b/base_layer/core/tests/node_service.rs index b13863753f..bed9fbdb07 100644 --- a/base_layer/core/tests/node_service.rs +++ b/base_layer/core/tests/node_service.rs @@ -32,7 +32,7 @@ use tari_common::configuration::Network; use tari_comms::protocol::messaging::MessagingEvent; use tari_core::{ base_node::{ - comms_interface::{BlockEvent, Broadcast, CommsInterfaceError}, + comms_interface::{BlockEvent, CommsInterfaceError}, service::BaseNodeServiceConfig, state_machine_service::states::{ListeningInfo, StateInfo, StatusInfo}, }, @@ -159,17 +159,17 @@ async fn propagate_and_forward_many_valid_blocks() { tokio::join!(bob_block_event_fut, carol_block_event_fut, dan_block_event_fut); let block_hash = block.hash(); - if let BlockEvent::ValidBlockAdded(received_block, _, _) = &*bob_block_event.unwrap() { + if let BlockEvent::ValidBlockAdded(received_block, _) = &*bob_block_event.unwrap() { assert_eq!(&received_block.hash(), block_hash); } else { panic!("Bob's node did not receive and validate the expected block"); } - if let BlockEvent::ValidBlockAdded(received_block, _block_add_result, _) = &*carol_block_event.unwrap() { + if let BlockEvent::ValidBlockAdded(received_block, _block_add_result) = &*carol_block_event.unwrap() { assert_eq!(&received_block.hash(), block_hash); } else { panic!("Carol's node did not receive and validate the expected block"); } - if let BlockEvent::ValidBlockAdded(received_block, _block_add_result, _) = &*dan_block_event.unwrap() { + if let BlockEvent::ValidBlockAdded(received_block, _block_add_result) = &*dan_block_event.unwrap() { assert_eq!(&received_block.hash(), block_hash); } else { panic!("Dan's node did not receive and validate the expected block"); @@ -388,12 +388,12 @@ async fn propagate_and_forward_invalid_block() { let (bob_block_event, carol_block_event, dan_block_event) = tokio::join!(bob_block_event_fut, carol_block_event_fut, dan_block_event_fut); - if let BlockEvent::AddBlockFailed(received_block, _) = &*bob_block_event.unwrap() { + if let BlockEvent::AddBlockFailed(received_block) = &*bob_block_event.unwrap() { assert_eq!(&received_block.hash(), block1_hash); } else { panic!("Bob's node should have detected an invalid block"); } - if let BlockEvent::AddBlockFailed(received_block, _) = &*carol_block_event.unwrap() { + if let BlockEvent::AddBlockFailed(received_block) = &*carol_block_event.unwrap() { assert_eq!(&received_block.hash(), block1_hash); } else { panic!("Carol's node should have detected an invalid block"); @@ -479,8 +479,8 @@ async fn local_get_new_block_template_and_get_new_block() { txn_schema!(from: vec![outputs[2].clone()], to: vec![30_000 * uT, 40_000 * uT]), ]; let (txs, _) = schema_to_transaction(&schema); - assert!(node.mempool.insert(txs[0].clone()).is_ok()); - assert!(node.mempool.insert(txs[1].clone()).is_ok()); + node.mempool.insert(txs[0].clone()).await.unwrap(); + node.mempool.insert(txs[1].clone()).await.unwrap(); let block_template = node .local_nci @@ -528,11 +528,11 @@ async fn local_get_new_block_with_zero_conf() { txn_schema!(from: vec![outputs[2].clone()], to: vec![40_000 * uT], fee: 20*uT, lock: 0, features: OutputFeatures::default()), ); assert_eq!( - node.mempool.insert(Arc::new(tx01)).unwrap(), + node.mempool.insert(Arc::new(tx01)).await.unwrap(), TxStorageResponse::UnconfirmedPool ); assert_eq!( - node.mempool.insert(Arc::new(tx02)).unwrap(), + node.mempool.insert(Arc::new(tx02)).await.unwrap(), TxStorageResponse::UnconfirmedPool ); @@ -543,11 +543,11 @@ async fn local_get_new_block_with_zero_conf() { txn_schema!(from: tx02_out, to: vec![20_000 * uT], fee: 60*uT, lock: 0, features: OutputFeatures::default()), ); assert_eq!( - node.mempool.insert(Arc::new(tx11)).unwrap(), + node.mempool.insert(Arc::new(tx11)).await.unwrap(), TxStorageResponse::UnconfirmedPool ); assert_eq!( - node.mempool.insert(Arc::new(tx12)).unwrap(), + node.mempool.insert(Arc::new(tx12)).await.unwrap(), TxStorageResponse::UnconfirmedPool ); @@ -616,11 +616,11 @@ async fn local_get_new_block_with_combined_transaction() { let tx1 = tx01 + tx11; let tx2 = tx02 + tx12; assert_eq!( - node.mempool.insert(Arc::new(tx1)).unwrap(), + node.mempool.insert(Arc::new(tx1)).await.unwrap(), TxStorageResponse::UnconfirmedPool ); assert_eq!( - node.mempool.insert(Arc::new(tx2)).unwrap(), + node.mempool.insert(Arc::new(tx2)).await.unwrap(), TxStorageResponse::UnconfirmedPool ); @@ -666,13 +666,10 @@ async fn local_submit_block() { .unwrap(); block1.header.kernel_mmr_size += 1; block1.header.output_mmr_size += 1; - node.local_nci - .submit_block(block1.clone(), Broadcast::from(true)) - .await - .unwrap(); + node.local_nci.submit_block(block1.clone()).await.unwrap(); let event = event_stream_next(&mut event_stream, Duration::from_millis(20000)).await; - if let BlockEvent::ValidBlockAdded(received_block, result, _) = &*event.unwrap() { + if let BlockEvent::ValidBlockAdded(received_block, result) = &*event.unwrap() { assert_eq!(received_block.hash(), block1.hash()); result.assert_added(); } else { diff --git a/base_layer/core/tests/node_state_machine.rs b/base_layer/core/tests/node_state_machine.rs index 3335000e18..aeb7e42406 100644 --- a/base_layer/core/tests/node_state_machine.rs +++ b/base_layer/core/tests/node_state_machine.rs @@ -30,7 +30,6 @@ use helpers::{ use tari_common::configuration::Network; use tari_core::{ base_node::{ - comms_interface::Broadcast, service::BaseNodeServiceConfig, state_machine_service::{ states::{Listening, StateEvent, StatusInfo}, @@ -115,10 +114,7 @@ async fn test_listening_lagging() { .unwrap(); prev_block.header.output_mmr_size += 1; prev_block.header.kernel_mmr_size += 1; - bob_local_nci - .submit_block(prev_block, Broadcast::from(true)) - .await - .unwrap(); + bob_local_nci.submit_block(prev_block).await.unwrap(); assert_eq!(bob_db.get_height().unwrap(), 2); let next_event = time::timeout(Duration::from_secs(10), await_event_task) diff --git a/base_layer/wallet/src/output_manager_service/service.rs b/base_layer/wallet/src/output_manager_service/service.rs index d77e77ca01..1f539387a9 100644 --- a/base_layer/wallet/src/output_manager_service/service.rs +++ b/base_layer/wallet/src/output_manager_service/service.rs @@ -654,8 +654,13 @@ where ); // TODO: Include asset metadata here if required // We assume that default OutputFeatures and Nop TariScript is used - let metadata_byte_size = - OutputFeatures::default().consensus_encode_exact_size() + script![Nop].consensus_encode_exact_size(); + let metadata_byte_size = self + .resources + .consensus_constants + .transaction_weight() + .round_up_metadata_size( + OutputFeatures::default().consensus_encode_exact_size() + script![Nop].consensus_encode_exact_size(), + ); let utxo_selection = self .select_utxos( @@ -698,8 +703,13 @@ where fee_per_gram, ); let output_features = OutputFeatures::default(); - let metadata_byte_size = - output_features.consensus_encode_exact_size() + recipient_script.consensus_encode_exact_size(); + let metadata_byte_size = self + .resources + .consensus_constants + .transaction_weight() + .round_up_metadata_size( + output_features.consensus_encode_exact_size() + recipient_script.consensus_encode_exact_size(), + ); let input_selection = self .select_utxos( @@ -892,14 +902,17 @@ where ) -> Result<(TxId, Transaction), OutputManagerError> { let total_value = MicroTari(outputs.iter().fold(0u64, |running, out| running + out.value.as_u64())); let nop_script = script![Nop]; + let weighting = self.resources.consensus_constants.transaction_weight(); let metadata_byte_size = outputs.iter().fold(0usize, |total, output| { total + - output.features.consensus_encode_exact_size() + - output - .script - .as_ref() - .unwrap_or(&nop_script) - .consensus_encode_exact_size() + weighting.round_up_metadata_size( + output.features.consensus_encode_exact_size() + + output + .script + .as_ref() + .unwrap_or(&nop_script) + .consensus_encode_exact_size(), + ) }); let input_selection = self .select_utxos( @@ -1053,7 +1066,13 @@ where unique_id: unique_id.clone(), ..Default::default() }; - let metadata_byte_size = output_features.consensus_encode_exact_size() + script.consensus_encode_exact_size(); + let metadata_byte_size = self + .resources + .consensus_constants + .transaction_weight() + .round_up_metadata_size( + output_features.consensus_encode_exact_size() + script.consensus_encode_exact_size(), + ); let input_selection = self .select_utxos( @@ -1290,8 +1309,9 @@ where trace!(target: LOG_TARGET, "We found {} UTXOs to select from", uo.len()); // Assumes that default Outputfeatures are used for change utxo - let default_metadata_size = - OutputFeatures::default().consensus_encode_exact_size() + script![Nop].consensus_encode_exact_size(); + let default_metadata_size = fee_calc.weighting().round_up_metadata_size( + OutputFeatures::default().consensus_encode_exact_size() + script![Nop].consensus_encode_exact_size(), + ); let mut requires_change_output = false; for o in uo { utxos_total_value += o.unblinded_output.value; @@ -1369,7 +1389,13 @@ where let output_count = split_count; let script = script!(Nop); let output_features = OutputFeatures::default(); - let metadata_byte_size = output_features.consensus_encode_exact_size() + script.consensus_encode_exact_size(); + let metadata_byte_size = self + .resources + .consensus_constants + .transaction_weight() + .round_up_metadata_size( + output_features.consensus_encode_exact_size() + script.consensus_encode_exact_size(), + ); let total_split_amount = amount_per_split * split_count as u64; let input_selection = self diff --git a/base_layer/wallet/tests/output_manager_service_tests/service.rs b/base_layer/wallet/tests/output_manager_service_tests/service.rs index 5350139f15..eb8e939611 100644 --- a/base_layer/wallet/tests/output_manager_service_tests/service.rs +++ b/base_layer/wallet/tests/output_manager_service_tests/service.rs @@ -42,6 +42,7 @@ use tari_core::{ test_helpers::{create_unblinded_output, TestParams as TestParamsHelpers}, transaction::OutputFeatures, transaction_protocol::sender::TransactionSenderMessage, + weight::TransactionWeight, CryptoFactories, SenderTransactionProtocol, }, @@ -91,7 +92,9 @@ use crate::support::{ }; fn default_metadata_byte_size() -> usize { - OutputFeatures::default().consensus_encode_exact_size() + script![Nop].consensus_encode_exact_size() + TransactionWeight::latest().round_up_metadata_size( + OutputFeatures::default().consensus_encode_exact_size() + script![Nop].consensus_encode_exact_size(), + ) } #[allow(clippy::type_complexity)] @@ -645,8 +648,9 @@ async fn send_no_change() { let fee_per_gram = MicroTari::from(4); let constants = create_consensus_constants(0); - let fee_without_change = Fee::new(*constants.transaction_weight()).calculate(fee_per_gram, 1, 2, 1, 0); - let value1 = 500; + let fee_without_change = + Fee::new(*constants.transaction_weight()).calculate(fee_per_gram, 1, 2, 1, default_metadata_byte_size()); + let value1 = 5000; oms.add_output( create_unblinded_output( script!(Nop), @@ -658,7 +662,7 @@ async fn send_no_change() { ) .await .unwrap(); - let value2 = 800; + let value2 = 8000; oms.add_output( create_unblinded_output( script!(Nop), @@ -1229,7 +1233,7 @@ async fn test_txo_validation() { balance.pending_incoming_balance, MicroTari::from(output1_value) - MicroTari::from(900_000) - - MicroTari::from(1240) + //Output4 = output 1 -900_000 and 1240 for fees + MicroTari::from(1260) + //Output4 = output 1 -900_000 and 1260 for fees MicroTari::from(8_000_000) + MicroTari::from(16_000_000) ); @@ -1367,7 +1371,7 @@ async fn test_txo_validation() { balance.available_balance, MicroTari::from(output2_value) + MicroTari::from(output3_value) + MicroTari::from(output1_value) - MicroTari::from(900_000) - - MicroTari::from(1240) + //spent 900_000 and 1240 for fees + MicroTari::from(1260) + //spent 900_000 and 1260 for fees MicroTari::from(8_000_000) + //output 5 MicroTari::from(16_000_000) // output 6 ); @@ -1494,7 +1498,7 @@ async fn test_txo_validation() { assert_eq!(balance.pending_outgoing_balance, MicroTari::from(output1_value)); assert_eq!( balance.pending_incoming_balance, - MicroTari::from(output1_value) - MicroTari::from(901_240) + MicroTari::from(output1_value) - MicroTari::from(901_260) ); assert_eq!(MicroTari::from(0), balance.time_locked_balance.unwrap()); @@ -1552,7 +1556,7 @@ async fn test_txo_validation() { assert_eq!( balance.available_balance, MicroTari::from(output2_value) + MicroTari::from(output3_value) + MicroTari::from(output1_value) - - MicroTari::from(901_240) + MicroTari::from(901_260) ); assert_eq!(balance.pending_outgoing_balance, MicroTari::from(0)); assert_eq!(balance.pending_incoming_balance, MicroTari::from(0)); diff --git a/comms/src/lib.rs b/comms/src/lib.rs index 463fc0aadd..682ec21014 100644 --- a/comms/src/lib.rs +++ b/comms/src/lib.rs @@ -62,7 +62,7 @@ pub mod utils; pub mod test_utils; //---------------------------------- Re-exports --------------------------------------------// -// Rather than requiring dependant crates to import dependencies for use with `tari_comms` we re-export them here. +// Rather than requiring dependent crates to import dependencies for use with `tari_comms` we re-export them here. pub mod multiaddr { // Re-export so that client code does not have to have multiaddr as a dependency