From a4c71b4bde640cc600e557f280f9f9e7f27e6a26 Mon Sep 17 00:00:00 2001 From: SW van Heerden Date: Wed, 16 Nov 2022 19:11:18 +0200 Subject: [PATCH 1/7] reset broken sync --- .../src/chain_storage/blockchain_backend.rs | 2 + .../src/chain_storage/blockchain_database.rs | 92 ++++++++++++++----- .../core/src/chain_storage/lmdb_db/lmdb_db.rs | 5 + .../core/src/test_helpers/blockchain.rs | 4 + 4 files changed, 78 insertions(+), 25 deletions(-) diff --git a/base_layer/core/src/chain_storage/blockchain_backend.rs b/base_layer/core/src/chain_storage/blockchain_backend.rs index 9bcff45612..dea12c9747 100644 --- a/base_layer/core/src/chain_storage/blockchain_backend.rs +++ b/base_layer/core/src/chain_storage/blockchain_backend.rs @@ -154,6 +154,8 @@ pub trait BlockchainBackend: Send + Sync { /// Fetches an current tip orphan by hash or returns None if the orphan is not found or is not a tip of any /// alternate chain fn fetch_orphan_chain_tip_by_hash(&self, hash: &HashOutput) -> Result, ChainStorageError>; + /// Fetches all currently stored orphan tips, if none are stored, returns an empty vec. + fn fetch_all_orphan_chain_tips(&self) -> Result, ChainStorageError>; /// Fetch all orphans that have `hash` as a previous hash fn fetch_orphan_children_of(&self, hash: HashOutput) -> Result, ChainStorageError>; diff --git a/base_layer/core/src/chain_storage/blockchain_database.rs b/base_layer/core/src/chain_storage/blockchain_database.rs index 8affd89bd6..4fb70eee93 100644 --- a/base_layer/core/src/chain_storage/blockchain_database.rs +++ b/base_layer/core/src/chain_storage/blockchain_database.rs @@ -1111,6 +1111,20 @@ where B: BlockchainBackend rewind_to_hash(&mut *db, hash) } + /// This method will compare all chain tips the node currently knows about. This includes + /// all tips in the orphan pool and the main active chain. It will swap the main active + /// chain to the highest pow chain + /// This is typically used when an attempted sync failed to sync to the expected height and + /// we are not sure if the new chain is higher than the old one. + pub fn swap_to_highest_pow_chain(&self) -> Result<(), ChainStorageError> { + let mut db = self.db_write_access()?; + swap_to_highest_pow_chain( + &mut *db, + &*self.validators.block, + self.consensus_manager.chain_strength_comparer(), + ) + } + pub fn fetch_horizon_data(&self) -> Result { let db = self.db_read_access()?; Ok(db.fetch_horizon_data()?.unwrap_or_default()) @@ -2053,31 +2067,59 @@ fn reorganize_chain( Ok(removed_blocks) } -// fn hydrate_block( -// backend: &mut T, -// block: Arc, -// ) -> Result, ChainStorageError> { -// if !block.block().body.has_compact_inputs() { -// return Ok(block); -// } -// -// for input in block.block().body.inputs() { -// let output = backend.fetch_mmr_leaf(MmrTree::Utxo, input.mmr_index())?; -// let output = output.ok_or_else(|| ChainStorageError::ValueNotFound { -// entity: "Output".to_string(), -// field: "mmr_index".to_string(), -// value: input.mmr_index().to_string(), -// })?; -// let output = TransactionOutput::try_from(output)?; -// let input = TransactionInput::new_with_commitment(input.features(), output.commitment()); -// block.block_mut().body_mut().add_input(input); -// } -// backend.fetch_unspent_output_hash_by_commitment() -// let block = hydrate_block_from_db(backend, block_hash, block.header().clone())?; -// txn.delete_orphan(block_hash); -// backend.write(txn)?; -// Ok(block) -// } + +fn swap_to_highest_pow_chain( + db: &mut T, + block_validator: &dyn PostOrphanBodyValidation, + chain_strength_comparer: &dyn ChainStrengthComparer, +) -> Result<(), ChainStorageError> { + let metadata = db.fetch_chain_metadata()?; + // lets clear out all remaining headers that done have a matching block + // rewind to height will first delete the headers, then try delete from blocks, if we call this to the current + // height it will only trim the extra headers with no blocks + rewind_to_height(db, metadata.height_of_longest_chain())?; + let all_orphan_tips = db.fetch_all_orphan_chain_tips()?; + if all_orphan_tips.is_empty() { + // we have no orphan chain tips, we have trimmed remaining headers, we are on the best tip we have, so lets + // return ok + return Ok(()); + } + // Check the accumulated difficulty of the best fork chain compared to the main chain. + let best_fork_header = find_strongest_orphan_tip(all_orphan_tips, chain_strength_comparer).ok_or_else(|| { + // This should never happen because a block is always added to the orphan pool before + // checking, but just in case + warn!( + target: LOG_TARGET, + "Unable to find strongest orphan tip`. This should never happen.", + ); + ChainStorageError::InvalidOperation("No chain tips found in orphan pool".to_string()) + })?; + let tip_header = db.fetch_tip_header()?; + match chain_strength_comparer.compare(&best_fork_header, &tip_header) { + Ordering::Greater => { + debug!( + target: LOG_TARGET, + "Fork chain (accum_diff:{}, hash:{}) is stronger than the current tip (#{} ({})).", + best_fork_header.accumulated_data().total_accumulated_difficulty, + best_fork_header.accumulated_data().hash.to_hex(), + tip_header.height(), + tip_header.hash().to_hex() + ); + }, + Ordering::Less | Ordering::Equal => { + return Ok(()); + }, + } + + let reorg_chain = get_orphan_link_main_chain(db, best_fork_header.hash())?; + let fork_hash = reorg_chain + .front() + .expect("The new orphan block should be in the queue") + .header() + .prev_hash; + reorganize_chain(db, block_validator, fork_hash, &reorg_chain)?; + Ok(()) +} fn restore_reorged_chain( db: &mut T, diff --git a/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs b/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs index 444e6370d0..c63ac706b5 100644 --- a/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs +++ b/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs @@ -2298,6 +2298,11 @@ impl BlockchainBackend for LMDBDatabase { Ok(Some(chain_header)) } + fn fetch_all_orphan_chain_tips(&self) -> Result, ChainStorageError> { + let txn = self.read_transaction()?; + lmdb_filter_map_values(&txn, &self.orphan_chain_tips_db, |tip| tip) + } + fn fetch_orphan_children_of(&self, parent_hash: HashOutput) -> Result, ChainStorageError> { trace!( target: LOG_TARGET, diff --git a/base_layer/core/src/test_helpers/blockchain.rs b/base_layer/core/src/test_helpers/blockchain.rs index 6641c2d86f..d87828f32a 100644 --- a/base_layer/core/src/test_helpers/blockchain.rs +++ b/base_layer/core/src/test_helpers/blockchain.rs @@ -357,6 +357,10 @@ impl BlockchainBackend for TempDatabase { self.db.as_ref().unwrap().fetch_orphan_chain_tip_by_hash(hash) } + fn fetch_all_orphan_chain_tips(&self) -> Result, ChainStorageError> { + self.db.as_ref().unwrap().fetch_all_orphan_chain_tips() + } + fn fetch_orphan_children_of(&self, hash: HashOutput) -> Result, ChainStorageError> { self.db.as_ref().unwrap().fetch_orphan_children_of(hash) } From deb848d9d203bd5bae39761d4b0109c8c936cc28 Mon Sep 17 00:00:00 2001 From: SW van Heerden Date: Thu, 17 Nov 2022 22:34:28 +0200 Subject: [PATCH 2/7] fix test --- .../src/chain_storage/blockchain_database.rs | 22 ++--- .../core/src/chain_storage/lmdb_db/lmdb_db.rs | 27 +++++- .../chain_storage_tests/chain_storage.rs | 88 +++++++++++++++++++ 3 files changed, 126 insertions(+), 11 deletions(-) diff --git a/base_layer/core/src/chain_storage/blockchain_database.rs b/base_layer/core/src/chain_storage/blockchain_database.rs index 4fb70eee93..ea0c299323 100644 --- a/base_layer/core/src/chain_storage/blockchain_database.rs +++ b/base_layer/core/src/chain_storage/blockchain_database.rs @@ -1702,6 +1702,7 @@ fn check_for_valid_height(db: &T, height: u64) -> Result<( /// Removes blocks from the db from current tip to specified height. /// Returns the blocks removed, ordered from tip to height. +#[allow(clippy::too_many_lines)] fn rewind_to_height(db: &mut T, height: u64) -> Result>, ChainStorageError> { let last_header = db.fetch_last_header()?; @@ -1770,7 +1771,8 @@ fn rewind_to_height(db: &mut T, height: u64) -> Result(db: &mut T, height: u64) -> Result( } } - if let Some(block) = removed_blocks.first() { - // insert the new orphan chain tip - let mut txn = DbTransaction::new(); - let hash = *block.hash(); - debug!(target: LOG_TARGET, "Inserting new orphan chain tip: {}", hash.to_hex()); - txn.insert_orphan_chain_tip(hash); - backend.write(txn)?; - } - Ok(removed_blocks) } diff --git a/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs b/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs index c63ac706b5..e4bbcf0d32 100644 --- a/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs +++ b/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs @@ -2300,7 +2300,32 @@ impl BlockchainBackend for LMDBDatabase { fn fetch_all_orphan_chain_tips(&self) -> Result, ChainStorageError> { let txn = self.read_transaction()?; - lmdb_filter_map_values(&txn, &self.orphan_chain_tips_db, |tip| tip) + let tips: Vec = lmdb_filter_map_values(&txn, &self.orphan_chain_tips_db, Some)?; + let mut result = Vec::new(); + for hash in tips { + let orphan: Block = + lmdb_get(&txn, &self.orphans_db, hash.as_slice())?.ok_or_else(|| ChainStorageError::ValueNotFound { + entity: "Orphan", + field: "hash", + value: hash.to_hex(), + })?; + + let accumulated_data = lmdb_get(&txn, &self.orphan_header_accumulated_data_db, hash.as_slice())? + .ok_or_else(|| ChainStorageError::ValueNotFound { + entity: "Orphan accumulated data", + field: "hash", + value: hash.to_hex(), + })?; + let height = orphan.header.height; + let chain_header = ChainHeader::try_construct(orphan.header, accumulated_data).ok_or_else(|| { + ChainStorageError::DataInconsistencyDetected { + function: "fetch_orphan_chain_tip_by_hash", + details: format!("Accumulated data mismatch at height #{}", height), + } + })?; + result.push(chain_header); + } + Ok(result) } fn fetch_orphan_children_of(&self, parent_hash: HashOutput) -> Result, ChainStorageError> { diff --git a/base_layer/core/tests/chain_storage_tests/chain_storage.rs b/base_layer/core/tests/chain_storage_tests/chain_storage.rs index 2c6bfe471e..641f5d2e64 100644 --- a/base_layer/core/tests/chain_storage_tests/chain_storage.rs +++ b/base_layer/core/tests/chain_storage_tests/chain_storage.rs @@ -539,6 +539,94 @@ fn test_handle_tip_reorg() { assert!(store.fetch_orphan(*blocks[2].hash()).is_ok()); } +#[test] +fn test_handle_tip_reset() { + // GB --> A1 --> A2(Low PoW) [Main Chain] + // \--> B2(Highest PoW) [Forked Chain] + // Initially, the main chain is GB->A1->A2. B2 has a higher accumulated PoW and when B2 is added the main chain is + // reorged to GB->A1->B2 + + // Create Main Chain + let network = Network::LocalNet; + let (mut store, mut blocks, mut outputs, consensus_manager) = create_new_blockchain(network); + // Block A1 + let txs = vec![txn_schema!( + from: vec![outputs[0][0].clone()], + to: vec![10 * T, 10 * T, 10 * T, 10 * T] + )]; + generate_new_block_with_achieved_difficulty( + &mut store, + &mut blocks, + &mut outputs, + txs, + Difficulty::from(1), + &consensus_manager, + ) + .unwrap(); + // Block A2 + let txs = vec![txn_schema!(from: vec![outputs[1][3].clone()], to: vec![6 * T])]; + generate_new_block_with_achieved_difficulty( + &mut store, + &mut blocks, + &mut outputs, + txs, + Difficulty::from(3), + &consensus_manager, + ) + .unwrap(); + + // Create Forked Chain + + let mut orphan_store = create_store_with_consensus(consensus_manager.clone()); + orphan_store.add_block(blocks[1].to_arc_block()).unwrap(); + let mut orphan_blocks = vec![blocks[0].clone(), blocks[1].clone()]; + let mut orphan_outputs = vec![outputs[0].clone(), outputs[1].clone()]; + // Block B2 + let txs = vec![txn_schema!(from: vec![orphan_outputs[1][0].clone()], to: vec![5 * T])]; + generate_new_block_with_achieved_difficulty( + &mut orphan_store, + &mut orphan_blocks, + &mut orphan_outputs, + txs, + Difficulty::from(7), + &consensus_manager, + ) + .unwrap(); + + // Adding B2 to the main chain will produce a reorg to GB->A1->B2. + if let Ok(BlockAddResult::ChainReorg { .. }) = store.add_block(orphan_blocks[2].to_arc_block()) { + } else { + panic!(); + } + + assert_eq!(store.fetch_tip_header().unwrap().header().height, 2); + store.rewind_to_height(1).unwrap(); + assert_eq!(store.fetch_tip_header().unwrap().header().height, 1); + // both tips should be in the orphan pool + assert!(store.fetch_orphan(*orphan_blocks[2].hash()).is_ok()); + assert!(store.fetch_orphan(*blocks[2].hash()).is_ok()); + store.swap_to_highest_pow_chain().unwrap(); + // should no be on B2 + + assert_eq!(store.fetch_tip_header().unwrap().header().height, 2); + assert_eq!(store.fetch_tip_header().unwrap().hash(), orphan_blocks[2].hash()); + assert!(store.fetch_orphan(*blocks[2].hash()).is_ok()); + + store.swap_to_highest_pow_chain().unwrap(); + // Chain should not have swapped + assert_eq!(store.fetch_tip_header().unwrap().hash(), orphan_blocks[2].hash()); + assert!(store.fetch_orphan(*blocks[2].hash()).is_ok()); + + // lets reset to A1 again + store.rewind_to_height(1).unwrap(); + assert_eq!(store.fetch_tip_header().unwrap().header().height, 1); + store.cleanup_all_orphans().unwrap(); + store.swap_to_highest_pow_chain().unwrap(); + // current main chain should be the highest so is it still? + assert_eq!(store.fetch_tip_header().unwrap().header().height, 1); + assert_eq!(store.fetch_tip_header().unwrap().hash(), blocks[1].hash()); +} + #[test] #[allow(clippy::identity_op)] #[allow(clippy::too_many_lines)] From 6a132928b044d851c86abadfc06a5b18424f2437 Mon Sep 17 00:00:00 2001 From: SW van Heerden Date: Thu, 24 Nov 2022 15:25:02 +0200 Subject: [PATCH 3/7] add to async --- .../src/base_node/state_machine_service/states/block_sync.rs | 3 +++ base_layer/core/src/chain_storage/async_db.rs | 2 ++ 2 files changed, 5 insertions(+) diff --git a/base_layer/core/src/base_node/state_machine_service/states/block_sync.rs b/base_layer/core/src/base_node/state_machine_service/states/block_sync.rs index 3c2ee26525..8b7815b096 100644 --- a/base_layer/core/src/base_node/state_machine_service/states/block_sync.rs +++ b/base_layer/core/src/base_node/state_machine_service/states/block_sync.rs @@ -116,6 +116,9 @@ impl BlockSync { }); log_mdc::extend(mdc); warn!(target: LOG_TARGET, "Block sync failed: {}", err); + if let Err(e) = shared.db.swap_to_highest_pow_chain().await{ + error!(target: LOG_TARGET, "Failed to reset chain to highest proof of work: {}", e); + } StateEvent::BlockSyncFailed }, } diff --git a/base_layer/core/src/chain_storage/async_db.rs b/base_layer/core/src/chain_storage/async_db.rs index dff23d0536..f94efc1ac7 100644 --- a/base_layer/core/src/chain_storage/async_db.rs +++ b/base_layer/core/src/chain_storage/async_db.rs @@ -272,6 +272,8 @@ impl AsyncBlockchainDb { make_async_fn!(get_shard_key(height:u64, public_key: PublicKey) -> Option<[u8;32]>, "get_shard_key"); make_async_fn!(fetch_template_registrations>(range: T) -> Vec, "fetch_template_registrations"); + + make_async_fn!(swap_to_highest_pow_chain() -> (), "swap to highest proof-of-work chain"); } impl From> for AsyncBlockchainDb { From ab9d92b9b1d6483d815c514442b2ee162ac9f4b7 Mon Sep 17 00:00:00 2001 From: SW van Heerden Date: Fri, 25 Nov 2022 10:05:58 +0200 Subject: [PATCH 4/7] fmt --- .../base_node/state_machine_service/states/block_sync.rs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/base_layer/core/src/base_node/state_machine_service/states/block_sync.rs b/base_layer/core/src/base_node/state_machine_service/states/block_sync.rs index 8b7815b096..b5e3127dc3 100644 --- a/base_layer/core/src/base_node/state_machine_service/states/block_sync.rs +++ b/base_layer/core/src/base_node/state_machine_service/states/block_sync.rs @@ -116,8 +116,11 @@ impl BlockSync { }); log_mdc::extend(mdc); warn!(target: LOG_TARGET, "Block sync failed: {}", err); - if let Err(e) = shared.db.swap_to_highest_pow_chain().await{ - error!(target: LOG_TARGET, "Failed to reset chain to highest proof of work: {}", e); + if let Err(e) = shared.db.swap_to_highest_pow_chain().await { + error!( + target: LOG_TARGET, + "Failed to reset chain to highest proof of work: {}", e + ); } StateEvent::BlockSyncFailed }, From fad6820566afe12c0121bc51a73c20b2ec074580 Mon Sep 17 00:00:00 2001 From: SW van Heerden Date: Fri, 25 Nov 2022 13:53:07 +0200 Subject: [PATCH 5/7] combione reset pow and add block --- .../src/chain_storage/blockchain_database.rs | 250 ++++++------------ .../tests/blockchain_database.rs | 86 ------ 2 files changed, 81 insertions(+), 255 deletions(-) diff --git a/base_layer/core/src/chain_storage/blockchain_database.rs b/base_layer/core/src/chain_storage/blockchain_database.rs index ea0c299323..cd51ad852b 100644 --- a/base_layer/core/src/chain_storage/blockchain_database.rs +++ b/base_layer/core/src/chain_storage/blockchain_database.rs @@ -1120,9 +1120,11 @@ where B: BlockchainBackend let mut db = self.db_write_access()?; swap_to_highest_pow_chain( &mut *db, + &self.config, &*self.validators.block, self.consensus_manager.chain_strength_comparer(), - ) + )?; + Ok(()) } pub fn fetch_horizon_data(&self) -> Result { @@ -1864,151 +1866,9 @@ fn handle_possible_reorg( new_block: Arc, ) -> Result { let db_height = db.fetch_chain_metadata()?.height_of_longest_chain(); - let new_block_hash = new_block.hash(); - - let new_tips = insert_orphan_and_find_new_tips(db, new_block.clone(), header_validator, difficulty_calculator)?; - debug!( - target: LOG_TARGET, - "Added candidate block #{} ({}) to the orphan database. Best height is {}. New tips found: {} ", - new_block.header.height, - new_block_hash.to_hex(), - db_height, - new_tips.len() - ); - - if new_tips.is_empty() { - debug!( - target: LOG_TARGET, - "No reorg required, could not construct complete chain using block #{} ({}).", - new_block.header.height, - new_block_hash.to_hex() - ); - return Ok(BlockAddResult::OrphanBlock); - } - - // Check the accumulated difficulty of the best fork chain compared to the main chain. - let fork_header = find_strongest_orphan_tip(new_tips, chain_strength_comparer).ok_or_else(|| { - // This should never happen because a block is always added to the orphan pool before - // checking, but just in case - warn!( - target: LOG_TARGET, - "Unable to find strongest orphan tip when adding block `{}`. This should never happen.", - new_block_hash.to_hex() - ); - ChainStorageError::InvalidOperation("No chain tips found in orphan pool".to_string()) - })?; - - let tip_header = db.fetch_tip_header()?; - if fork_header.hash() == &new_block_hash { - debug!( - target: LOG_TARGET, - "Comparing candidate block #{} (accum_diff:{}, hash:{}) to main chain #{} (accum_diff: {}, hash: ({})).", - new_block.header.height, - fork_header.accumulated_data().total_accumulated_difficulty, - fork_header.accumulated_data().hash.to_hex(), - tip_header.header().height, - tip_header.accumulated_data().total_accumulated_difficulty, - tip_header.accumulated_data().hash.to_hex() - ); - } else { - debug!( - target: LOG_TARGET, - "Comparing fork (accum_diff:{}, hash:{}) with block #{} ({}) to main chain #{} (accum_diff: {}, hash: \ - ({})).", - fork_header.accumulated_data().total_accumulated_difficulty, - fork_header.accumulated_data().hash.to_hex(), - new_block.header.height, - new_block_hash.to_hex(), - tip_header.header().height, - tip_header.accumulated_data().total_accumulated_difficulty, - tip_header.accumulated_data().hash.to_hex() - ); - } - - match chain_strength_comparer.compare(&fork_header, &tip_header) { - Ordering::Greater => { - debug!( - target: LOG_TARGET, - "Fork chain (accum_diff:{}, hash:{}) is stronger than the current tip (#{} ({})).", - fork_header.accumulated_data().total_accumulated_difficulty, - fork_header.accumulated_data().hash.to_hex(), - tip_header.height(), - tip_header.hash().to_hex() - ); - }, - Ordering::Less | Ordering::Equal => { - debug!( - target: LOG_TARGET, - "Fork chain (accum_diff:{}, hash:{}) with block {} ({}) has a weaker difficulty.", - fork_header.accumulated_data().total_accumulated_difficulty, - fork_header.accumulated_data().hash.to_hex(), - new_block.header.height, - new_block_hash.to_hex(), - ); - debug!( - target: LOG_TARGET, - "Orphan block received: #{} ", new_block.header.height - ); - return Ok(BlockAddResult::OrphanBlock); - }, - } + insert_orphan_and_find_new_tips(db, new_block.clone(), header_validator, difficulty_calculator)?; - // TODO: We already have the first link in this chain, can be optimized to exclude it - let reorg_chain = get_orphan_link_main_chain(db, fork_header.hash())?; - - let fork_hash = reorg_chain - .front() - .expect("The new orphan block should be in the queue") - .header() - .prev_hash; - - let num_added_blocks = reorg_chain.len(); - let removed_blocks = reorganize_chain(db, block_validator, fork_hash, &reorg_chain)?; - let num_removed_blocks = removed_blocks.len(); - - // reorg is required when any blocks are removed or more than one are added - // see https://github.com/tari-project/tari/issues/2101 - if num_removed_blocks > 0 || num_added_blocks > 1 { - if config.track_reorgs { - let mut txn = DbTransaction::new(); - txn.insert_reorg(Reorg::from_reorged_blocks(&reorg_chain, &removed_blocks)); - if let Err(e) = db.write(txn) { - error!(target: LOG_TARGET, "Failed to track reorg: {}", e); - } - } - - log!( - target: LOG_TARGET, - if num_removed_blocks > 1 { - Level::Warn - } else { - Level::Info - }, // We want a warning if the number of removed blocks is at least 2. - "Chain reorg required from {} to {} (accum_diff:{}, hash:{}) to (accum_diff:{}, hash:{}). Number of \ - blocks to remove: {}, to add: {}.", - tip_header.header().height, - fork_header.header().height, - tip_header.accumulated_data().total_accumulated_difficulty, - tip_header.accumulated_data().hash.to_hex(), - fork_header.accumulated_data().total_accumulated_difficulty, - fork_header.accumulated_data().hash.to_hex(), - num_removed_blocks, - num_added_blocks, - ); - Ok(BlockAddResult::ChainReorg { - removed: removed_blocks, - added: reorg_chain.into(), - }) - } else { - trace!( - target: LOG_TARGET, - "No reorg required. Number of blocks to remove: {}, to add: {}.", - num_removed_blocks, - num_added_blocks, - ); - // NOTE: panic is not possible because get_orphan_link_main_chain cannot return an empty Vec (reorg_chain) - Ok(BlockAddResult::Ok(reorg_chain.front().unwrap().clone())) - } + swap_to_highest_pow_chain(db, config, block_validator, chain_strength_comparer) } /// Reorganize the main chain with the provided fork chain, starting at the specified height. @@ -2072,11 +1932,12 @@ fn reorganize_chain( fn swap_to_highest_pow_chain( db: &mut T, + config: &BlockchainDatabaseConfig, block_validator: &dyn PostOrphanBodyValidation, chain_strength_comparer: &dyn ChainStrengthComparer, -) -> Result<(), ChainStorageError> { +) -> Result { let metadata = db.fetch_chain_metadata()?; - // lets clear out all remaining headers that done have a matching block + // lets clear out all remaining headers that dont have a matching block // rewind to height will first delete the headers, then try delete from blocks, if we call this to the current // height it will only trim the extra headers with no blocks rewind_to_height(db, metadata.height_of_longest_chain())?; @@ -2084,7 +1945,7 @@ fn swap_to_highest_pow_chain( if all_orphan_tips.is_empty() { // we have no orphan chain tips, we have trimmed remaining headers, we are on the best tip we have, so lets // return ok - return Ok(()); + return Ok(BlockAddResult::OrphanBlock); } // Check the accumulated difficulty of the best fork chain compared to the main chain. let best_fork_header = find_strongest_orphan_tip(all_orphan_tips, chain_strength_comparer).ok_or_else(|| { @@ -2109,7 +1970,15 @@ fn swap_to_highest_pow_chain( ); }, Ordering::Less | Ordering::Equal => { - return Ok(()); + debug!( + target: LOG_TARGET, + "Fork chain (accum_diff:{}, hash:{}) with block {} ({}) has a weaker difficulty.", + best_fork_header.accumulated_data().total_accumulated_difficulty, + best_fork_header.accumulated_data().hash.to_hex(), + tip_header.header().height, + tip_header.hash().to_hex(), + ); + return Ok(BlockAddResult::OrphanBlock); }, } @@ -2119,8 +1988,54 @@ fn swap_to_highest_pow_chain( .expect("The new orphan block should be in the queue") .header() .prev_hash; - reorganize_chain(db, block_validator, fork_hash, &reorg_chain)?; - Ok(()) + + let num_added_blocks = reorg_chain.len(); + let removed_blocks = reorganize_chain(db, block_validator, fork_hash, &reorg_chain)?; + let num_removed_blocks = removed_blocks.len(); + + // reorg is required when any blocks are removed or more than one are added + // see https://github.com/tari-project/tari/issues/2101 + if num_removed_blocks > 0 || num_added_blocks > 1 { + if config.track_reorgs { + let mut txn = DbTransaction::new(); + txn.insert_reorg(Reorg::from_reorged_blocks(&reorg_chain, &removed_blocks)); + if let Err(e) = db.write(txn) { + error!(target: LOG_TARGET, "Failed to track reorg: {}", e); + } + } + + log!( + target: LOG_TARGET, + if num_removed_blocks > 1 { + Level::Warn + } else { + Level::Info + }, // We want a warning if the number of removed blocks is at least 2. + "Chain reorg required from {} to {} (accum_diff:{}, hash:{}) to (accum_diff:{}, hash:{}). Number of \ + blocks to remove: {}, to add: {}.", + tip_header.header().height, + best_fork_header.header().height, + tip_header.accumulated_data().total_accumulated_difficulty, + tip_header.accumulated_data().hash.to_hex(), + best_fork_header.accumulated_data().total_accumulated_difficulty, + best_fork_header.accumulated_data().hash.to_hex(), + num_removed_blocks, + num_added_blocks, + ); + Ok(BlockAddResult::ChainReorg { + removed: removed_blocks, + added: reorg_chain.into(), + }) + } else { + trace!( + target: LOG_TARGET, + "No reorg required. Number of blocks to remove: {}, to add: {}.", + num_removed_blocks, + num_added_blocks, + ); + // NOTE: panic is not possible because get_orphan_link_main_chain cannot return an empty Vec (reorg_chain) + Ok(BlockAddResult::Ok(reorg_chain.front().unwrap().clone())) + } } fn restore_reorged_chain( @@ -2154,12 +2069,12 @@ fn insert_orphan_and_find_new_tips( block: Arc, validator: &dyn HeaderValidation, difficulty_calculator: &DifficultyCalculator, -) -> Result, ChainStorageError> { +) -> Result<(), ChainStorageError> { let hash = block.hash(); // There cannot be any _new_ tips if we've seen this orphan block before if db.contains(&DbKey::OrphanBlock(hash))? { - return Ok(vec![]); + return Ok(()); } let parent = match db.fetch_orphan_chain_tip_by_hash(&block.header.prev_hash)? { @@ -2206,7 +2121,7 @@ fn insert_orphan_and_find_new_tips( txn.insert_orphan(block); db.write(txn)?; } - return Ok(vec![]); + return Ok(()); }, }, }; @@ -2239,7 +2154,7 @@ fn insert_orphan_and_find_new_tips( } db.write(txn)?; - Ok(tips) + Ok(()) } // Find the tip set of any orphans that have hash as an ancestor @@ -2653,15 +2568,13 @@ mod test { let (_, chain) = create_chained_blocks(&[("A->GB", 1u64, 120u64)], genesis_block); let block = chain.get("A").unwrap().clone(); let mut access = db.db_write_access().unwrap(); - let chain = insert_orphan_and_find_new_tips( + insert_orphan_and_find_new_tips( &mut *access, block.to_arc_block(), &validator, &db.difficulty_calculator, ) .unwrap(); - assert_eq!(chain.len(), 1); - assert_eq!(chain[0].hash(), block.hash()); let maybe_block = access.fetch_orphan_chain_tip_by_hash(block.hash()).unwrap(); assert_eq!(maybe_block.unwrap().header(), block.header()); @@ -2679,24 +2592,22 @@ mod test { let mut access = db.db_write_access().unwrap(); let block_d2 = orphan_chain.get("D2").unwrap().clone(); - let chain = insert_orphan_and_find_new_tips( + insert_orphan_and_find_new_tips( &mut *access, block_d2.to_arc_block(), &validator, &db.difficulty_calculator, ) .unwrap(); - assert!(chain.is_empty()); let block_e2 = orphan_chain.get("E2").unwrap().clone(); - let chain = insert_orphan_and_find_new_tips( + insert_orphan_and_find_new_tips( &mut *access, block_e2.to_arc_block(), &validator, &db.difficulty_calculator, ) .unwrap(); - assert!(chain.is_empty()); let maybe_block = access.fetch_orphan_children_of(*block_d2.hash()).unwrap(); assert_eq!(maybe_block[0], *block_e2.to_arc_block()); @@ -2713,28 +2624,29 @@ mod test { let mut access = db.db_write_access().unwrap(); let block = orphan_chain.get("B2").unwrap().clone(); - let chain = insert_orphan_and_find_new_tips( + insert_orphan_and_find_new_tips( &mut *access, block.to_arc_block(), &validator, &db.difficulty_calculator, ) .unwrap(); - assert_eq!(chain.len(), 1); - assert_eq!(chain[0].header(), block.header()); - assert_eq!(chain[0].accumulated_data().total_accumulated_difficulty, 4); - let fork_tip = access.fetch_orphan_chain_tip_by_hash(chain[0].hash()).unwrap().unwrap(); + let fork_tip = access.fetch_orphan_chain_tip_by_hash(block.hash()).unwrap().unwrap(); assert_eq!(fork_tip, block.to_chain_header()); + assert_eq!(fork_tip.accumulated_data().total_accumulated_difficulty, 4); + let all_tips = access.fetch_all_orphan_chain_tips().unwrap().len(); + assert_eq!(all_tips, 1); // Insert again (block was received more than once), no new tips - let chain = insert_orphan_and_find_new_tips( + insert_orphan_and_find_new_tips( &mut *access, block.to_arc_block(), &validator, &db.difficulty_calculator, ) .unwrap(); - assert_eq!(chain.len(), 0); + let all_tips = access.fetch_all_orphan_chain_tips().unwrap().len(); + assert_eq!(all_tips, 1); } } diff --git a/base_layer/core/src/chain_storage/tests/blockchain_database.rs b/base_layer/core/src/chain_storage/tests/blockchain_database.rs index 879991fe1e..100a1564af 100644 --- a/base_layer/core/src/chain_storage/tests/blockchain_database.rs +++ b/base_layer/core/src/chain_storage/tests/blockchain_database.rs @@ -368,92 +368,6 @@ mod fetch_block_hashes_from_header_tip { } } -mod add_block { - use super::*; - - #[test] - fn it_rejects_duplicate_commitments_in_the_utxo_set() { - let db = setup(); - let (blocks, outputs) = add_many_chained_blocks(5, &db); - - let prev_block = blocks.last().unwrap(); - // Used to help identify the output we're interrogating in this test - let features = OutputFeatures { - maturity: 1, - ..Default::default() - }; - let (txns, tx_outputs) = schema_to_transaction(&[txn_schema!( - from: vec![outputs[0].clone()], - to: vec![500 * T], - features: features - )]); - let mut prev_utxo = tx_outputs[0].clone(); - - let (block, _) = create_next_block(&db, prev_block, txns); - db.add_block(block.clone()).unwrap().assert_added(); - - let prev_block = block; - - let (txns, _) = schema_to_transaction(&[TransactionSchema { - from: vec![outputs[1].clone()], - to: vec![], - to_outputs: vec![prev_utxo.clone()], - fee: 5.into(), - lock_height: 0, - features, - script: tari_script::script![Nop], - covenant: Default::default(), - input_data: None, - input_version: None, - output_version: None, - }]); - let commitment_hex = txns[0] - .body - .outputs() - .iter() - .find(|o| o.features.maturity == 1) - .unwrap() - .commitment - .to_hex(); - - let (block, _) = create_next_block(&db, &prev_block, txns); - let err = db.add_block(block.clone()).unwrap_err(); - unpack_enum!(ChainStorageError::KeyExists { key, .. } = err); - assert_eq!(key, commitment_hex); - // Check rollback - let header = db.fetch_header(block.header.height).unwrap(); - assert!(header.is_none()); - - let (txns, _) = schema_to_transaction(&[txn_schema!(from: vec![prev_utxo.clone()], to: vec![50 * T])]); - let (block, _) = create_next_block(&db, &prev_block, txns); - let block = db.add_block(block).unwrap().assert_added(); - let prev_block = block.to_arc_block(); - - // Different metadata so that the output hash is different in txo_hash_to_index_db - prev_utxo.features = OutputFeatures { - metadata: vec![1], - ..Default::default() - }; - // Now we can reuse a commitment - let (txns, _) = schema_to_transaction(&[TransactionSchema { - from: vec![outputs[1].clone()], - to: vec![], - to_outputs: vec![prev_utxo], - fee: 5.into(), - lock_height: 0, - features: Default::default(), - script: tari_script::script![Nop], - covenant: Default::default(), - input_data: None, - input_version: None, - output_version: None, - }]); - - let (block, _) = create_next_block(&db, &prev_block, txns); - db.add_block(block).unwrap().assert_added(); - } -} - mod get_stats { use super::*; From ede91747e27f64d23f710f294ca5273496c7afd6 Mon Sep 17 00:00:00 2001 From: SW van Heerden Date: Fri, 25 Nov 2022 14:10:03 +0200 Subject: [PATCH 6/7] remove unused --- base_layer/core/src/chain_storage/blockchain_database.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/base_layer/core/src/chain_storage/blockchain_database.rs b/base_layer/core/src/chain_storage/blockchain_database.rs index cd51ad852b..5c9b2b819c 100644 --- a/base_layer/core/src/chain_storage/blockchain_database.rs +++ b/base_layer/core/src/chain_storage/blockchain_database.rs @@ -1865,9 +1865,7 @@ fn handle_possible_reorg( chain_strength_comparer: &dyn ChainStrengthComparer, new_block: Arc, ) -> Result { - let db_height = db.fetch_chain_metadata()?.height_of_longest_chain(); insert_orphan_and_find_new_tips(db, new_block.clone(), header_validator, difficulty_calculator)?; - swap_to_highest_pow_chain(db, config, block_validator, chain_strength_comparer) } From 64a5e73ea5198dd7e60b9d0caff2d3c035dc6596 Mon Sep 17 00:00:00 2001 From: SW van Heerden Date: Mon, 28 Nov 2022 12:07:21 +0200 Subject: [PATCH 7/7] clippy --- base_layer/core/src/chain_storage/blockchain_database.rs | 2 +- .../core/src/chain_storage/tests/blockchain_database.rs | 7 ++----- 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/base_layer/core/src/chain_storage/blockchain_database.rs b/base_layer/core/src/chain_storage/blockchain_database.rs index 5c9b2b819c..8e5072f9ed 100644 --- a/base_layer/core/src/chain_storage/blockchain_database.rs +++ b/base_layer/core/src/chain_storage/blockchain_database.rs @@ -1865,7 +1865,7 @@ fn handle_possible_reorg( chain_strength_comparer: &dyn ChainStrengthComparer, new_block: Arc, ) -> Result { - insert_orphan_and_find_new_tips(db, new_block.clone(), header_validator, difficulty_calculator)?; + insert_orphan_and_find_new_tips(db, new_block, header_validator, difficulty_calculator)?; swap_to_highest_pow_chain(db, config, block_validator, chain_strength_comparer) } diff --git a/base_layer/core/src/chain_storage/tests/blockchain_database.rs b/base_layer/core/src/chain_storage/tests/blockchain_database.rs index 100a1564af..62d9ee1374 100644 --- a/base_layer/core/src/chain_storage/tests/blockchain_database.rs +++ b/base_layer/core/src/chain_storage/tests/blockchain_database.rs @@ -22,9 +22,6 @@ // DAMAGE. use std::sync::Arc; -use tari_test_utils::unpack_enum; -use tari_utilities::hex::Hex; - use crate::{ blocks::{Block, BlockHeader, BlockHeaderAccumulatedData, ChainHeader, NewBlockTemplate}, chain_storage::{BlockchainDatabase, ChainStorageError}, @@ -36,8 +33,8 @@ use crate::{ }, transactions::{ tari_amount::T, - test_helpers::{schema_to_transaction, TransactionSchema}, - transaction_components::{OutputFeatures, Transaction, UnblindedOutput}, + test_helpers::schema_to_transaction, + transaction_components::{Transaction, UnblindedOutput}, }, txn_schema, };