Skip to content

Commit

Permalink
Fix issue where mempool is not cleared of invalid tx
Browse files Browse the repository at this point in the history
This commit fixes a logical error where a mempool reorg is not
being processed when more than one block is added and _no_ blocks are
removed. Previously when no blocks were removed, `handle_reorg` would
return `BlockAddResult::Ok` - even if multiple blocks were added. This
introduced a bug where transactions from the added blocks could be left
in the mempool. A test was added to `chain_storage` to ensure a reorg in
this case.

As a consequence of now forcing a reorg even if no blocks are removed,
an additional issue was uncovered with `Mempool::process_reorg` causing
a panic when `removed_blocks` was empty. A test has been added to catch
this case, and the code refactored to remove calls to `expect` in favor
of explicitly handling the `Option`s.
  • Loading branch information
Byron Hambly committed Aug 27, 2020
1 parent 587e177 commit 3524fc3
Show file tree
Hide file tree
Showing 4 changed files with 130 additions and 30 deletions.
33 changes: 23 additions & 10 deletions base_layer/core/src/chain_storage/blockchain_database.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1582,25 +1582,38 @@ fn handle_reorg<T: BlockchainBackend>(
.height -
1;
let removed_blocks = reorganize_chain(db, block_validator, fork_height, reorg_chain)?;
if removed_blocks.is_empty() {
Ok(BlockAddResult::Ok)
} else {
debug!(
let num_removed_blocks = removed_blocks.len();
let num_added_blocks = added_blocks.len();

// reorg is required when any blocks are removed or more than one are added
// see https://github.com/tari-project/tari/issues/2101
if num_removed_blocks > 0 || num_added_blocks > 1 {
info!(
target: LOG_TARGET,
"Chain reorg processed from (accum_diff:{}, hash:{}) to (accum_diff:{}, hash:{})",
"Chain reorg required from {} to {} (accum_diff:{}, hash:{}) to (accum_diff:{}, hash:{}). Number of \
blocks to remove: {}, to add: {}.
",
tip_header,
fork_tip_header,
tip_header.pow,
tip_header.hash().to_hex(),
fork_tip_header.pow,
fork_tip_hash.to_hex()
);
info!(
target: LOG_TARGET,
"Reorg from ({}) to ({})", tip_header, fork_tip_header
fork_tip_hash.to_hex(),
num_removed_blocks,
num_added_blocks,
);
Ok(BlockAddResult::ChainReorg((
Box::new(removed_blocks),
Box::new(added_blocks),
)))
} else {
trace!(
target: LOG_TARGET,
"No reorg required. Number of blocks to remove: {}, to add: {}.",
num_removed_blocks,
num_added_blocks,
);
Ok(BlockAddResult::Ok)
}
}

Expand Down
43 changes: 23 additions & 20 deletions base_layer/core/src/mempool/mempool_storage.rs
Original file line number Diff line number Diff line change
Expand Up @@ -166,32 +166,35 @@ where T: BlockchainBackend
);
}

let prev_tip_height = removed_blocks
.last()
.expect("Added empty set of blocks on reorg.")
.header
.height;
let new_tip_height = new_blocks
.last()
.expect("Removed empty set of blocks on reorg.")
.header
.height;
let previous_tip = removed_blocks.last().map(|block| block.header.height);
let new_tip = new_blocks.last().map(|block| block.header.height);

self.insert_txs(
self.reorg_pool
.remove_reorged_txs_and_discard_double_spends(removed_blocks, &new_blocks)?,
)?;
self.process_published_blocks(new_blocks)?;

if new_tip_height < prev_tip_height {
debug!(
target: LOG_TARGET,
"Checking for time locked transactions in unconfirmed pool as chain height was reduced from {} to {} \
during reorg.",
prev_tip_height,
new_tip_height,
);
self.pending_pool
.insert_txs(self.unconfirmed_pool.remove_timelocked(new_tip_height))?;
if let (Some(previous_tip_height), Some(new_tip_height)) = (previous_tip, new_tip) {
if new_tip_height < previous_tip_height {
debug!(
target: LOG_TARGET,
"Checking for time locked transactions in unconfirmed pool as chain height was reduced from {} to \
{} during reorg.",
previous_tip_height,
new_tip_height,
);
self.pending_pool
.insert_txs(self.unconfirmed_pool.remove_timelocked(new_tip_height))?;
} else {
debug!(
target: LOG_TARGET,
"No need to check for time locked transactions in unconfirmed pool. Previous tip height: {}. New \
tip height: {}.",
previous_tip_height,
new_tip_height,
);
}
}

Ok(())
Expand Down
76 changes: 76 additions & 0 deletions base_layer/core/tests/chain_storage_tests/chain_storage.rs
Original file line number Diff line number Diff line change
Expand Up @@ -834,6 +834,82 @@ fn handle_reorg() {
assert!(store.fetch_orphan(blocks[4].hash()).is_ok()); // B4
}

#[test]
fn handle_reorg_with_no_removed_blocks() {
// GB --> A1
// \--> B2 (?) --> B3)
// Initially, the main chain is GB->A1 with orphaned blocks B3. When B2 arrives late and is
// added to the blockchain then a reorg is triggered and the main chain is reorganized to GB->A1->B2->B3.

// Create Main Chain
let network = Network::LocalNet;
let (mut store, mut blocks, mut outputs, consensus_manager) = create_new_blockchain(network);

// Block A1
let txs = vec![txn_schema!(
from: vec![outputs[0][0].clone()],
to: vec![10 * T, 10 * T, 10 * T, 10 * T]
)];
assert!(generate_new_block_with_achieved_difficulty(
&mut store,
&mut blocks,
&mut outputs,
txs,
Difficulty::from(1),
&consensus_manager.consensus_constants()
)
.is_ok());

// Create Forked Chain 1
let consensus_manager_fork = ConsensusManagerBuilder::new(network)
.with_block(blocks[0].clone())
.build();
let mut orphan1_store = create_mem_db(&consensus_manager_fork); // GB
orphan1_store.add_block(blocks[1].clone()).unwrap(); // A1
let mut orphan1_blocks = vec![blocks[0].clone(), blocks[1].clone()];
let mut orphan1_outputs = vec![outputs[0].clone(), outputs[1].clone()];
// Block B2
let txs = vec![txn_schema!(from: vec![orphan1_outputs[1][0].clone()], to: vec![5 * T])];
assert!(generate_new_block_with_achieved_difficulty(
&mut orphan1_store,
&mut orphan1_blocks,
&mut orphan1_outputs,
txs,
Difficulty::from(1),
&consensus_manager.consensus_constants()
)
.is_ok());
// Block B3
let txs = vec![
txn_schema!(from: vec![orphan1_outputs[1][3].clone()], to: vec![3 * T]),
txn_schema!(from: vec![orphan1_outputs[2][0].clone()], to: vec![3 * T]),
];
assert!(generate_new_block_with_achieved_difficulty(
&mut orphan1_store,
&mut orphan1_blocks,
&mut orphan1_outputs,
txs,
Difficulty::from(1),
&consensus_manager.consensus_constants()
)
.is_ok());

// Now add the fork blocks B3 and B2 (out of order) to the first DB and ensure a reorg.
// see https://github.com/tari-project/tari/issues/2101#issuecomment-679188619
store.add_block(orphan1_blocks[3].clone()).unwrap(); // B3
let result = store.add_block(orphan1_blocks[2].clone()).unwrap(); // B2
match result {
BlockAddResult::Ok => panic!("Adding multiple blocks without removing any failed to cause a reorg!"),
BlockAddResult::ChainReorg((removed, added)) => {
assert_eq!(added.len(), 2);
assert_eq!(removed.len(), 0);
},
_ => assert!(false),
}

assert_eq!(store.fetch_tip_header().unwrap(), orphan1_blocks[3].header.clone());
}

#[test]
fn store_and_retrieve_blocks() {
let mmr_cache_config = MmrCacheConfig { rewind_hist_len: 2 };
Expand Down
8 changes: 8 additions & 0 deletions base_layer/core/tests/mempool.rs
Original file line number Diff line number Diff line change
Expand Up @@ -397,6 +397,14 @@ fn test_reorg() {
assert_eq!(stats.unconfirmed_txs, 2);
assert_eq!(stats.timelocked_txs, 1);
assert_eq!(stats.published_txs, 3);

// "Mine" block 4
let template = chain_block(&blocks[3], vec![], consensus_manager.consensus_constants());
let reorg_block4 = db.calculate_mmr_roots(template).unwrap();

// test that process_reorg can handle the case when removed_blocks is empty
// see https://github.com/tari-project/tari/issues/2101#issuecomment-680726940
mempool.process_reorg(vec![], vec![reorg_block4]).unwrap();
}

#[test]
Expand Down

0 comments on commit 3524fc3

Please sign in to comment.