diff --git a/base_layer/core/src/base_node/state_machine_service/states/block_sync.rs b/base_layer/core/src/base_node/state_machine_service/states/block_sync.rs index 3c2ee26525..b5e3127dc3 100644 --- a/base_layer/core/src/base_node/state_machine_service/states/block_sync.rs +++ b/base_layer/core/src/base_node/state_machine_service/states/block_sync.rs @@ -116,6 +116,12 @@ impl BlockSync { }); log_mdc::extend(mdc); warn!(target: LOG_TARGET, "Block sync failed: {}", err); + if let Err(e) = shared.db.swap_to_highest_pow_chain().await { + error!( + target: LOG_TARGET, + "Failed to reset chain to highest proof of work: {}", e + ); + } StateEvent::BlockSyncFailed }, } diff --git a/base_layer/core/src/chain_storage/async_db.rs b/base_layer/core/src/chain_storage/async_db.rs index dff23d0536..f94efc1ac7 100644 --- a/base_layer/core/src/chain_storage/async_db.rs +++ b/base_layer/core/src/chain_storage/async_db.rs @@ -272,6 +272,8 @@ impl AsyncBlockchainDb { make_async_fn!(get_shard_key(height:u64, public_key: PublicKey) -> Option<[u8;32]>, "get_shard_key"); make_async_fn!(fetch_template_registrations>(range: T) -> Vec, "fetch_template_registrations"); + + make_async_fn!(swap_to_highest_pow_chain() -> (), "swap to highest proof-of-work chain"); } impl From> for AsyncBlockchainDb { diff --git a/base_layer/core/src/chain_storage/blockchain_backend.rs b/base_layer/core/src/chain_storage/blockchain_backend.rs index 9bcff45612..dea12c9747 100644 --- a/base_layer/core/src/chain_storage/blockchain_backend.rs +++ b/base_layer/core/src/chain_storage/blockchain_backend.rs @@ -154,6 +154,8 @@ pub trait BlockchainBackend: Send + Sync { /// Fetches an current tip orphan by hash or returns None if the orphan is not found or is not a tip of any /// alternate chain fn fetch_orphan_chain_tip_by_hash(&self, hash: &HashOutput) -> Result, ChainStorageError>; + /// Fetches all currently stored orphan tips, if none are stored, returns an empty vec. + fn fetch_all_orphan_chain_tips(&self) -> Result, ChainStorageError>; /// Fetch all orphans that have `hash` as a previous hash fn fetch_orphan_children_of(&self, hash: HashOutput) -> Result, ChainStorageError>; diff --git a/base_layer/core/src/chain_storage/blockchain_database.rs b/base_layer/core/src/chain_storage/blockchain_database.rs index 8affd89bd6..8e5072f9ed 100644 --- a/base_layer/core/src/chain_storage/blockchain_database.rs +++ b/base_layer/core/src/chain_storage/blockchain_database.rs @@ -1111,6 +1111,22 @@ where B: BlockchainBackend rewind_to_hash(&mut *db, hash) } + /// This method will compare all chain tips the node currently knows about. This includes + /// all tips in the orphan pool and the main active chain. It will swap the main active + /// chain to the highest pow chain + /// This is typically used when an attempted sync failed to sync to the expected height and + /// we are not sure if the new chain is higher than the old one. + pub fn swap_to_highest_pow_chain(&self) -> Result<(), ChainStorageError> { + let mut db = self.db_write_access()?; + swap_to_highest_pow_chain( + &mut *db, + &self.config, + &*self.validators.block, + self.consensus_manager.chain_strength_comparer(), + )?; + Ok(()) + } + pub fn fetch_horizon_data(&self) -> Result { let db = self.db_read_access()?; Ok(db.fetch_horizon_data()?.unwrap_or_default()) @@ -1688,6 +1704,7 @@ fn check_for_valid_height(db: &T, height: u64) -> Result<( /// Removes blocks from the db from current tip to specified height. /// Returns the blocks removed, ordered from tip to height. +#[allow(clippy::too_many_lines)] fn rewind_to_height(db: &mut T, height: u64) -> Result>, ChainStorageError> { let last_header = db.fetch_last_header()?; @@ -1756,7 +1773,8 @@ fn rewind_to_height(db: &mut T, height: u64) -> Result(db: &mut T, height: u64) -> Result( chain_strength_comparer: &dyn ChainStrengthComparer, new_block: Arc, ) -> Result { - let db_height = db.fetch_chain_metadata()?.height_of_longest_chain(); - let new_block_hash = new_block.hash(); + insert_orphan_and_find_new_tips(db, new_block, header_validator, difficulty_calculator)?; + swap_to_highest_pow_chain(db, config, block_validator, chain_strength_comparer) +} - let new_tips = insert_orphan_and_find_new_tips(db, new_block.clone(), header_validator, difficulty_calculator)?; +/// Reorganize the main chain with the provided fork chain, starting at the specified height. +/// Returns the blocks that were removed (if any), ordered from tip to fork (ie. height highest to lowest). +fn reorganize_chain( + backend: &mut T, + block_validator: &dyn PostOrphanBodyValidation, + fork_hash: HashOutput, + chain: &VecDeque>, +) -> Result>, ChainStorageError> { + let removed_blocks = rewind_to_hash(backend, fork_hash)?; debug!( target: LOG_TARGET, - "Added candidate block #{} ({}) to the orphan database. Best height is {}. New tips found: {} ", - new_block.header.height, - new_block_hash.to_hex(), - db_height, - new_tips.len() + "Validate and add {} chain block(s) from block {}. Rewound blocks: [{}]", + chain.len(), + fork_hash.to_hex(), + removed_blocks + .iter() + .map(|b| b.height().to_string()) + .collect::>() + .join(", ") ); - if new_tips.is_empty() { - debug!( - target: LOG_TARGET, - "No reorg required, could not construct complete chain using block #{} ({}).", - new_block.header.height, - new_block_hash.to_hex() - ); - return Ok(BlockAddResult::OrphanBlock); + for block in chain { + let mut txn = DbTransaction::new(); + let block_hash = *block.hash(); + txn.delete_orphan(block_hash); + let chain_metadata = backend.fetch_chain_metadata()?; + if let Err(e) = block_validator.validate_body_for_valid_orphan(backend, block, &chain_metadata) { + warn!( + target: LOG_TARGET, + "Orphan block {} ({}) failed validation during chain reorg: {:?}", + block.header().height, + block_hash.to_hex(), + e + ); + remove_orphan(backend, block_hash)?; + + info!(target: LOG_TARGET, "Restoring previous chain after failed reorg."); + restore_reorged_chain(backend, fork_hash, removed_blocks)?; + return Err(e.into()); + } + + insert_best_block(&mut txn, block.clone())?; + // Failed to store the block - this should typically never happen unless there is a bug in the validator + // (e.g. does not catch a double spend). In any case, we still need to restore the chain to a + // good state before returning. + if let Err(e) = backend.write(txn) { + warn!( + target: LOG_TARGET, + "Failed to commit reorg chain: {:?}. Restoring last chain.", e + ); + + restore_reorged_chain(backend, fork_hash, removed_blocks)?; + return Err(e); + } } + Ok(removed_blocks) +} + +fn swap_to_highest_pow_chain( + db: &mut T, + config: &BlockchainDatabaseConfig, + block_validator: &dyn PostOrphanBodyValidation, + chain_strength_comparer: &dyn ChainStrengthComparer, +) -> Result { + let metadata = db.fetch_chain_metadata()?; + // lets clear out all remaining headers that dont have a matching block + // rewind to height will first delete the headers, then try delete from blocks, if we call this to the current + // height it will only trim the extra headers with no blocks + rewind_to_height(db, metadata.height_of_longest_chain())?; + let all_orphan_tips = db.fetch_all_orphan_chain_tips()?; + if all_orphan_tips.is_empty() { + // we have no orphan chain tips, we have trimmed remaining headers, we are on the best tip we have, so lets + // return ok + return Ok(BlockAddResult::OrphanBlock); + } // Check the accumulated difficulty of the best fork chain compared to the main chain. - let fork_header = find_strongest_orphan_tip(new_tips, chain_strength_comparer).ok_or_else(|| { + let best_fork_header = find_strongest_orphan_tip(all_orphan_tips, chain_strength_comparer).ok_or_else(|| { // This should never happen because a block is always added to the orphan pool before // checking, but just in case warn!( target: LOG_TARGET, - "Unable to find strongest orphan tip when adding block `{}`. This should never happen.", - new_block_hash.to_hex() + "Unable to find strongest orphan tip`. This should never happen.", ); ChainStorageError::InvalidOperation("No chain tips found in orphan pool".to_string()) })?; - let tip_header = db.fetch_tip_header()?; - if fork_header.hash() == &new_block_hash { - debug!( - target: LOG_TARGET, - "Comparing candidate block #{} (accum_diff:{}, hash:{}) to main chain #{} (accum_diff: {}, hash: ({})).", - new_block.header.height, - fork_header.accumulated_data().total_accumulated_difficulty, - fork_header.accumulated_data().hash.to_hex(), - tip_header.header().height, - tip_header.accumulated_data().total_accumulated_difficulty, - tip_header.accumulated_data().hash.to_hex() - ); - } else { - debug!( - target: LOG_TARGET, - "Comparing fork (accum_diff:{}, hash:{}) with block #{} ({}) to main chain #{} (accum_diff: {}, hash: \ - ({})).", - fork_header.accumulated_data().total_accumulated_difficulty, - fork_header.accumulated_data().hash.to_hex(), - new_block.header.height, - new_block_hash.to_hex(), - tip_header.header().height, - tip_header.accumulated_data().total_accumulated_difficulty, - tip_header.accumulated_data().hash.to_hex() - ); - } - - match chain_strength_comparer.compare(&fork_header, &tip_header) { + match chain_strength_comparer.compare(&best_fork_header, &tip_header) { Ordering::Greater => { debug!( target: LOG_TARGET, "Fork chain (accum_diff:{}, hash:{}) is stronger than the current tip (#{} ({})).", - fork_header.accumulated_data().total_accumulated_difficulty, - fork_header.accumulated_data().hash.to_hex(), + best_fork_header.accumulated_data().total_accumulated_difficulty, + best_fork_header.accumulated_data().hash.to_hex(), tip_header.height(), tip_header.hash().to_hex() ); @@ -1915,22 +1971,16 @@ fn handle_possible_reorg( debug!( target: LOG_TARGET, "Fork chain (accum_diff:{}, hash:{}) with block {} ({}) has a weaker difficulty.", - fork_header.accumulated_data().total_accumulated_difficulty, - fork_header.accumulated_data().hash.to_hex(), - new_block.header.height, - new_block_hash.to_hex(), - ); - debug!( - target: LOG_TARGET, - "Orphan block received: #{} ", new_block.header.height + best_fork_header.accumulated_data().total_accumulated_difficulty, + best_fork_header.accumulated_data().hash.to_hex(), + tip_header.header().height, + tip_header.hash().to_hex(), ); return Ok(BlockAddResult::OrphanBlock); }, } - // TODO: We already have the first link in this chain, can be optimized to exclude it - let reorg_chain = get_orphan_link_main_chain(db, fork_header.hash())?; - + let reorg_chain = get_orphan_link_main_chain(db, best_fork_header.hash())?; let fork_hash = reorg_chain .front() .expect("The new orphan block should be in the queue") @@ -1962,11 +2012,11 @@ fn handle_possible_reorg( "Chain reorg required from {} to {} (accum_diff:{}, hash:{}) to (accum_diff:{}, hash:{}). Number of \ blocks to remove: {}, to add: {}.", tip_header.header().height, - fork_header.header().height, + best_fork_header.header().height, tip_header.accumulated_data().total_accumulated_difficulty, tip_header.accumulated_data().hash.to_hex(), - fork_header.accumulated_data().total_accumulated_difficulty, - fork_header.accumulated_data().hash.to_hex(), + best_fork_header.accumulated_data().total_accumulated_difficulty, + best_fork_header.accumulated_data().hash.to_hex(), num_removed_blocks, num_added_blocks, ); @@ -1986,99 +2036,6 @@ fn handle_possible_reorg( } } -/// Reorganize the main chain with the provided fork chain, starting at the specified height. -/// Returns the blocks that were removed (if any), ordered from tip to fork (ie. height highest to lowest). -fn reorganize_chain( - backend: &mut T, - block_validator: &dyn PostOrphanBodyValidation, - fork_hash: HashOutput, - chain: &VecDeque>, -) -> Result>, ChainStorageError> { - let removed_blocks = rewind_to_hash(backend, fork_hash)?; - debug!( - target: LOG_TARGET, - "Validate and add {} chain block(s) from block {}. Rewound blocks: [{}]", - chain.len(), - fork_hash.to_hex(), - removed_blocks - .iter() - .map(|b| b.height().to_string()) - .collect::>() - .join(", ") - ); - - for block in chain { - let mut txn = DbTransaction::new(); - let block_hash = *block.hash(); - txn.delete_orphan(block_hash); - let chain_metadata = backend.fetch_chain_metadata()?; - if let Err(e) = block_validator.validate_body_for_valid_orphan(backend, block, &chain_metadata) { - warn!( - target: LOG_TARGET, - "Orphan block {} ({}) failed validation during chain reorg: {:?}", - block.header().height, - block_hash.to_hex(), - e - ); - remove_orphan(backend, block_hash)?; - - info!(target: LOG_TARGET, "Restoring previous chain after failed reorg."); - restore_reorged_chain(backend, fork_hash, removed_blocks)?; - return Err(e.into()); - } - - insert_best_block(&mut txn, block.clone())?; - // Failed to store the block - this should typically never happen unless there is a bug in the validator - // (e.g. does not catch a double spend). In any case, we still need to restore the chain to a - // good state before returning. - if let Err(e) = backend.write(txn) { - warn!( - target: LOG_TARGET, - "Failed to commit reorg chain: {:?}. Restoring last chain.", e - ); - - restore_reorged_chain(backend, fork_hash, removed_blocks)?; - return Err(e); - } - } - - if let Some(block) = removed_blocks.first() { - // insert the new orphan chain tip - let mut txn = DbTransaction::new(); - let hash = *block.hash(); - debug!(target: LOG_TARGET, "Inserting new orphan chain tip: {}", hash.to_hex()); - txn.insert_orphan_chain_tip(hash); - backend.write(txn)?; - } - - Ok(removed_blocks) -} -// fn hydrate_block( -// backend: &mut T, -// block: Arc, -// ) -> Result, ChainStorageError> { -// if !block.block().body.has_compact_inputs() { -// return Ok(block); -// } -// -// for input in block.block().body.inputs() { -// let output = backend.fetch_mmr_leaf(MmrTree::Utxo, input.mmr_index())?; -// let output = output.ok_or_else(|| ChainStorageError::ValueNotFound { -// entity: "Output".to_string(), -// field: "mmr_index".to_string(), -// value: input.mmr_index().to_string(), -// })?; -// let output = TransactionOutput::try_from(output)?; -// let input = TransactionInput::new_with_commitment(input.features(), output.commitment()); -// block.block_mut().body_mut().add_input(input); -// } -// backend.fetch_unspent_output_hash_by_commitment() -// let block = hydrate_block_from_db(backend, block_hash, block.header().clone())?; -// txn.delete_orphan(block_hash); -// backend.write(txn)?; -// Ok(block) -// } - fn restore_reorged_chain( db: &mut T, to_hash: HashOutput, @@ -2110,12 +2067,12 @@ fn insert_orphan_and_find_new_tips( block: Arc, validator: &dyn HeaderValidation, difficulty_calculator: &DifficultyCalculator, -) -> Result, ChainStorageError> { +) -> Result<(), ChainStorageError> { let hash = block.hash(); // There cannot be any _new_ tips if we've seen this orphan block before if db.contains(&DbKey::OrphanBlock(hash))? { - return Ok(vec![]); + return Ok(()); } let parent = match db.fetch_orphan_chain_tip_by_hash(&block.header.prev_hash)? { @@ -2162,7 +2119,7 @@ fn insert_orphan_and_find_new_tips( txn.insert_orphan(block); db.write(txn)?; } - return Ok(vec![]); + return Ok(()); }, }, }; @@ -2195,7 +2152,7 @@ fn insert_orphan_and_find_new_tips( } db.write(txn)?; - Ok(tips) + Ok(()) } // Find the tip set of any orphans that have hash as an ancestor @@ -2609,15 +2566,13 @@ mod test { let (_, chain) = create_chained_blocks(&[("A->GB", 1u64, 120u64)], genesis_block); let block = chain.get("A").unwrap().clone(); let mut access = db.db_write_access().unwrap(); - let chain = insert_orphan_and_find_new_tips( + insert_orphan_and_find_new_tips( &mut *access, block.to_arc_block(), &validator, &db.difficulty_calculator, ) .unwrap(); - assert_eq!(chain.len(), 1); - assert_eq!(chain[0].hash(), block.hash()); let maybe_block = access.fetch_orphan_chain_tip_by_hash(block.hash()).unwrap(); assert_eq!(maybe_block.unwrap().header(), block.header()); @@ -2635,24 +2590,22 @@ mod test { let mut access = db.db_write_access().unwrap(); let block_d2 = orphan_chain.get("D2").unwrap().clone(); - let chain = insert_orphan_and_find_new_tips( + insert_orphan_and_find_new_tips( &mut *access, block_d2.to_arc_block(), &validator, &db.difficulty_calculator, ) .unwrap(); - assert!(chain.is_empty()); let block_e2 = orphan_chain.get("E2").unwrap().clone(); - let chain = insert_orphan_and_find_new_tips( + insert_orphan_and_find_new_tips( &mut *access, block_e2.to_arc_block(), &validator, &db.difficulty_calculator, ) .unwrap(); - assert!(chain.is_empty()); let maybe_block = access.fetch_orphan_children_of(*block_d2.hash()).unwrap(); assert_eq!(maybe_block[0], *block_e2.to_arc_block()); @@ -2669,28 +2622,29 @@ mod test { let mut access = db.db_write_access().unwrap(); let block = orphan_chain.get("B2").unwrap().clone(); - let chain = insert_orphan_and_find_new_tips( + insert_orphan_and_find_new_tips( &mut *access, block.to_arc_block(), &validator, &db.difficulty_calculator, ) .unwrap(); - assert_eq!(chain.len(), 1); - assert_eq!(chain[0].header(), block.header()); - assert_eq!(chain[0].accumulated_data().total_accumulated_difficulty, 4); - let fork_tip = access.fetch_orphan_chain_tip_by_hash(chain[0].hash()).unwrap().unwrap(); + let fork_tip = access.fetch_orphan_chain_tip_by_hash(block.hash()).unwrap().unwrap(); assert_eq!(fork_tip, block.to_chain_header()); + assert_eq!(fork_tip.accumulated_data().total_accumulated_difficulty, 4); + let all_tips = access.fetch_all_orphan_chain_tips().unwrap().len(); + assert_eq!(all_tips, 1); // Insert again (block was received more than once), no new tips - let chain = insert_orphan_and_find_new_tips( + insert_orphan_and_find_new_tips( &mut *access, block.to_arc_block(), &validator, &db.difficulty_calculator, ) .unwrap(); - assert_eq!(chain.len(), 0); + let all_tips = access.fetch_all_orphan_chain_tips().unwrap().len(); + assert_eq!(all_tips, 1); } } diff --git a/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs b/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs index 444e6370d0..e4bbcf0d32 100644 --- a/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs +++ b/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs @@ -2298,6 +2298,36 @@ impl BlockchainBackend for LMDBDatabase { Ok(Some(chain_header)) } + fn fetch_all_orphan_chain_tips(&self) -> Result, ChainStorageError> { + let txn = self.read_transaction()?; + let tips: Vec = lmdb_filter_map_values(&txn, &self.orphan_chain_tips_db, Some)?; + let mut result = Vec::new(); + for hash in tips { + let orphan: Block = + lmdb_get(&txn, &self.orphans_db, hash.as_slice())?.ok_or_else(|| ChainStorageError::ValueNotFound { + entity: "Orphan", + field: "hash", + value: hash.to_hex(), + })?; + + let accumulated_data = lmdb_get(&txn, &self.orphan_header_accumulated_data_db, hash.as_slice())? + .ok_or_else(|| ChainStorageError::ValueNotFound { + entity: "Orphan accumulated data", + field: "hash", + value: hash.to_hex(), + })?; + let height = orphan.header.height; + let chain_header = ChainHeader::try_construct(orphan.header, accumulated_data).ok_or_else(|| { + ChainStorageError::DataInconsistencyDetected { + function: "fetch_orphan_chain_tip_by_hash", + details: format!("Accumulated data mismatch at height #{}", height), + } + })?; + result.push(chain_header); + } + Ok(result) + } + fn fetch_orphan_children_of(&self, parent_hash: HashOutput) -> Result, ChainStorageError> { trace!( target: LOG_TARGET, diff --git a/base_layer/core/src/chain_storage/tests/blockchain_database.rs b/base_layer/core/src/chain_storage/tests/blockchain_database.rs index 879991fe1e..62d9ee1374 100644 --- a/base_layer/core/src/chain_storage/tests/blockchain_database.rs +++ b/base_layer/core/src/chain_storage/tests/blockchain_database.rs @@ -22,9 +22,6 @@ // DAMAGE. use std::sync::Arc; -use tari_test_utils::unpack_enum; -use tari_utilities::hex::Hex; - use crate::{ blocks::{Block, BlockHeader, BlockHeaderAccumulatedData, ChainHeader, NewBlockTemplate}, chain_storage::{BlockchainDatabase, ChainStorageError}, @@ -36,8 +33,8 @@ use crate::{ }, transactions::{ tari_amount::T, - test_helpers::{schema_to_transaction, TransactionSchema}, - transaction_components::{OutputFeatures, Transaction, UnblindedOutput}, + test_helpers::schema_to_transaction, + transaction_components::{Transaction, UnblindedOutput}, }, txn_schema, }; @@ -368,92 +365,6 @@ mod fetch_block_hashes_from_header_tip { } } -mod add_block { - use super::*; - - #[test] - fn it_rejects_duplicate_commitments_in_the_utxo_set() { - let db = setup(); - let (blocks, outputs) = add_many_chained_blocks(5, &db); - - let prev_block = blocks.last().unwrap(); - // Used to help identify the output we're interrogating in this test - let features = OutputFeatures { - maturity: 1, - ..Default::default() - }; - let (txns, tx_outputs) = schema_to_transaction(&[txn_schema!( - from: vec![outputs[0].clone()], - to: vec![500 * T], - features: features - )]); - let mut prev_utxo = tx_outputs[0].clone(); - - let (block, _) = create_next_block(&db, prev_block, txns); - db.add_block(block.clone()).unwrap().assert_added(); - - let prev_block = block; - - let (txns, _) = schema_to_transaction(&[TransactionSchema { - from: vec![outputs[1].clone()], - to: vec![], - to_outputs: vec![prev_utxo.clone()], - fee: 5.into(), - lock_height: 0, - features, - script: tari_script::script![Nop], - covenant: Default::default(), - input_data: None, - input_version: None, - output_version: None, - }]); - let commitment_hex = txns[0] - .body - .outputs() - .iter() - .find(|o| o.features.maturity == 1) - .unwrap() - .commitment - .to_hex(); - - let (block, _) = create_next_block(&db, &prev_block, txns); - let err = db.add_block(block.clone()).unwrap_err(); - unpack_enum!(ChainStorageError::KeyExists { key, .. } = err); - assert_eq!(key, commitment_hex); - // Check rollback - let header = db.fetch_header(block.header.height).unwrap(); - assert!(header.is_none()); - - let (txns, _) = schema_to_transaction(&[txn_schema!(from: vec![prev_utxo.clone()], to: vec![50 * T])]); - let (block, _) = create_next_block(&db, &prev_block, txns); - let block = db.add_block(block).unwrap().assert_added(); - let prev_block = block.to_arc_block(); - - // Different metadata so that the output hash is different in txo_hash_to_index_db - prev_utxo.features = OutputFeatures { - metadata: vec![1], - ..Default::default() - }; - // Now we can reuse a commitment - let (txns, _) = schema_to_transaction(&[TransactionSchema { - from: vec![outputs[1].clone()], - to: vec![], - to_outputs: vec![prev_utxo], - fee: 5.into(), - lock_height: 0, - features: Default::default(), - script: tari_script::script![Nop], - covenant: Default::default(), - input_data: None, - input_version: None, - output_version: None, - }]); - - let (block, _) = create_next_block(&db, &prev_block, txns); - db.add_block(block).unwrap().assert_added(); - } -} - mod get_stats { use super::*; diff --git a/base_layer/core/src/test_helpers/blockchain.rs b/base_layer/core/src/test_helpers/blockchain.rs index 6641c2d86f..d87828f32a 100644 --- a/base_layer/core/src/test_helpers/blockchain.rs +++ b/base_layer/core/src/test_helpers/blockchain.rs @@ -357,6 +357,10 @@ impl BlockchainBackend for TempDatabase { self.db.as_ref().unwrap().fetch_orphan_chain_tip_by_hash(hash) } + fn fetch_all_orphan_chain_tips(&self) -> Result, ChainStorageError> { + self.db.as_ref().unwrap().fetch_all_orphan_chain_tips() + } + fn fetch_orphan_children_of(&self, hash: HashOutput) -> Result, ChainStorageError> { self.db.as_ref().unwrap().fetch_orphan_children_of(hash) } diff --git a/base_layer/core/tests/chain_storage_tests/chain_storage.rs b/base_layer/core/tests/chain_storage_tests/chain_storage.rs index 2c6bfe471e..641f5d2e64 100644 --- a/base_layer/core/tests/chain_storage_tests/chain_storage.rs +++ b/base_layer/core/tests/chain_storage_tests/chain_storage.rs @@ -539,6 +539,94 @@ fn test_handle_tip_reorg() { assert!(store.fetch_orphan(*blocks[2].hash()).is_ok()); } +#[test] +fn test_handle_tip_reset() { + // GB --> A1 --> A2(Low PoW) [Main Chain] + // \--> B2(Highest PoW) [Forked Chain] + // Initially, the main chain is GB->A1->A2. B2 has a higher accumulated PoW and when B2 is added the main chain is + // reorged to GB->A1->B2 + + // Create Main Chain + let network = Network::LocalNet; + let (mut store, mut blocks, mut outputs, consensus_manager) = create_new_blockchain(network); + // Block A1 + let txs = vec![txn_schema!( + from: vec![outputs[0][0].clone()], + to: vec![10 * T, 10 * T, 10 * T, 10 * T] + )]; + generate_new_block_with_achieved_difficulty( + &mut store, + &mut blocks, + &mut outputs, + txs, + Difficulty::from(1), + &consensus_manager, + ) + .unwrap(); + // Block A2 + let txs = vec![txn_schema!(from: vec![outputs[1][3].clone()], to: vec![6 * T])]; + generate_new_block_with_achieved_difficulty( + &mut store, + &mut blocks, + &mut outputs, + txs, + Difficulty::from(3), + &consensus_manager, + ) + .unwrap(); + + // Create Forked Chain + + let mut orphan_store = create_store_with_consensus(consensus_manager.clone()); + orphan_store.add_block(blocks[1].to_arc_block()).unwrap(); + let mut orphan_blocks = vec![blocks[0].clone(), blocks[1].clone()]; + let mut orphan_outputs = vec![outputs[0].clone(), outputs[1].clone()]; + // Block B2 + let txs = vec![txn_schema!(from: vec![orphan_outputs[1][0].clone()], to: vec![5 * T])]; + generate_new_block_with_achieved_difficulty( + &mut orphan_store, + &mut orphan_blocks, + &mut orphan_outputs, + txs, + Difficulty::from(7), + &consensus_manager, + ) + .unwrap(); + + // Adding B2 to the main chain will produce a reorg to GB->A1->B2. + if let Ok(BlockAddResult::ChainReorg { .. }) = store.add_block(orphan_blocks[2].to_arc_block()) { + } else { + panic!(); + } + + assert_eq!(store.fetch_tip_header().unwrap().header().height, 2); + store.rewind_to_height(1).unwrap(); + assert_eq!(store.fetch_tip_header().unwrap().header().height, 1); + // both tips should be in the orphan pool + assert!(store.fetch_orphan(*orphan_blocks[2].hash()).is_ok()); + assert!(store.fetch_orphan(*blocks[2].hash()).is_ok()); + store.swap_to_highest_pow_chain().unwrap(); + // should no be on B2 + + assert_eq!(store.fetch_tip_header().unwrap().header().height, 2); + assert_eq!(store.fetch_tip_header().unwrap().hash(), orphan_blocks[2].hash()); + assert!(store.fetch_orphan(*blocks[2].hash()).is_ok()); + + store.swap_to_highest_pow_chain().unwrap(); + // Chain should not have swapped + assert_eq!(store.fetch_tip_header().unwrap().hash(), orphan_blocks[2].hash()); + assert!(store.fetch_orphan(*blocks[2].hash()).is_ok()); + + // lets reset to A1 again + store.rewind_to_height(1).unwrap(); + assert_eq!(store.fetch_tip_header().unwrap().header().height, 1); + store.cleanup_all_orphans().unwrap(); + store.swap_to_highest_pow_chain().unwrap(); + // current main chain should be the highest so is it still? + assert_eq!(store.fetch_tip_header().unwrap().header().height, 1); + assert_eq!(store.fetch_tip_header().unwrap().hash(), blocks[1].hash()); +} + #[test] #[allow(clippy::identity_op)] #[allow(clippy::too_many_lines)]