Skip to content

Commit

Permalink
Minor core code cleanup and improved logging (#2000)
Browse files Browse the repository at this point in the history
Merge pull request #2000

Minor core code cleanup and improved logging
  • Loading branch information
CjS77 committed Jun 18, 2020
2 parents 29bc53e + 6d4c35b commit 8fac55d
Show file tree
Hide file tree
Showing 14 changed files with 389 additions and 335 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -316,11 +316,12 @@ where T: BlockchainBackend + 'static
) -> Result<(), CommsInterfaceError>
{
let (block, broadcast) = block_context;
let block_hash = block.hash();
debug!(
target: LOG_TARGET,
"Block #{} ({}) received from {}",
block.header.height,
block.hash().to_hex(),
block_hash.to_hex(),
source_peer
.as_ref()
.map(|p| format!("remote peer: {}", p))
Expand All @@ -336,7 +337,13 @@ where T: BlockchainBackend + 'static
BlockEvent::Verified((Box::new(block.clone()), block_add_result, *broadcast))
},
Err(e) => {
warn!(target: LOG_TARGET, "Block validation failed: {:?}", e);
warn!(
target: LOG_TARGET,
"Block #{} ({}) validation failed: {:?}",
block.header.height,
block_hash.to_hex(),
e
);
result = Err(CommsInterfaceError::ChainStorageError(e.clone()));
BlockEvent::Invalid((Box::new(block.clone()), e, *broadcast))
},
Expand Down
3 changes: 2 additions & 1 deletion base_layer/core/src/blocks/block.rs
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,8 @@ impl Block {
if coinbase_counter != 1 {
warn!(
target: LOG_TARGET,
"More then one coinbase found in block {}",
"{} coinbases found in block {}. Only a single coinbase is permitted.",
coinbase_counter,
self.hash().to_hex()
);
return Err(BlockValidationError::InvalidCoinbase);
Expand Down
115 changes: 69 additions & 46 deletions base_layer/core/src/chain_storage/blockchain_database.rs
Original file line number Diff line number Diff line change
Expand Up @@ -732,8 +732,8 @@ fn store_new_block<T: BlockchainBackend>(db: &mut RwLockWriteGuard<T>, block: Bl
// Insert block
txn.insert_header(header);
txn.spend_inputs(&inputs);
outputs.iter().for_each(|utxo| txn.insert_utxo(utxo.clone(), true));
kernels.iter().for_each(|k| txn.insert_kernel(k.clone(), true));
outputs.iter().for_each(|utxo| txn.insert_utxo(utxo.clone()));
kernels.iter().for_each(|k| txn.insert_kernel(k.clone()));
txn.commit_block();
commit(db, txn)?;
Ok(())
Expand Down Expand Up @@ -976,7 +976,7 @@ fn handle_possible_reorg<T: BlockchainBackend>(
// with the newly un-orphaned blocks from the reorg chain.
fn handle_reorg<T: BlockchainBackend>(
db: &mut RwLockWriteGuard<T>,
block_validator: &Arc<Validator<Block, T>>,
block_validator: &Validator<Block, T>,
accum_difficulty_validator: &Arc<Validator<Difficulty, T>>,
new_block: Block,
) -> Result<BlockAddResult, ChainStorageError>
Expand Down Expand Up @@ -1029,64 +1029,87 @@ fn handle_reorg<T: BlockchainBackend>(
);
}

if accum_difficulty_validator.validate(&fork_accum_difficulty, db).is_ok() {
// We've built the strongest orphan chain we can by going backwards and forwards from the new orphan block
// that is linked with the main chain.
let fork_tip_block = fetch_orphan(&**db, fork_tip_hash.clone())?;
let fork_tip_header = fork_tip_block.header.clone();
if fork_tip_hash != new_block_hash {
// New block is not the tip, find complete chain from tip to main chain.
reorg_chain = try_construct_fork(db, fork_tip_block)?;
}
let added_blocks: Vec<Block> = reorg_chain.iter().map(Clone::clone).collect();
let fork_height = reorg_chain
.front()
.expect("The new orphan block should be in the queue")
.header
.height -
1;
let removed_blocks = reorganize_chain(db, block_validator, fork_height, reorg_chain)?;
if removed_blocks.is_empty() {
return Ok(BlockAddResult::Ok);
} else {
match accum_difficulty_validator.validate(&fork_accum_difficulty, db) {
Ok(_) => {
debug!(
target: LOG_TARGET,
"Chain reorg processed from (accum_diff:{}, hash:{}) to (accum_diff:{}, hash:{})",
tip_header.pow,
tip_header.hash().to_hex(),
fork_tip_header.pow,
fork_tip_hash.to_hex()
"Accumulated difficulty validation PASSED for block #{} ({})",
new_block.header.height,
new_block_hash.to_hex()
);
info!(
},
Err(ValidationError::WeakerAccumulatedDifficulty) => {
debug!(
target: LOG_TARGET,
"Reorg from ({}) to ({})", tip_header, fork_tip_header
"Fork chain (accum_diff:{}, hash:{}) with block {} ({}) has a weaker accumulated difficulty.",
fork_accum_difficulty,
fork_tip_hash.to_hex(),
new_block.header.height,
new_block_hash.to_hex(),
);
return Ok(BlockAddResult::ChainReorg((
Box::new(removed_blocks),
Box::new(added_blocks),
)));
}
debug!(
target: LOG_TARGET,
"Orphan block received: #{}", new_block.header.height
);
return Ok(BlockAddResult::OrphanBlock);
},
Err(err) => {
error!(
target: LOG_TARGET,
"Failed to validate accumulated difficulty on forked chain (accum_diff:{}, hash:{}) with block {} \
({}): {:?}.",
fork_accum_difficulty,
fork_tip_hash.to_hex(),
new_block.header.height,
new_block_hash.to_hex(),
err
);
return Err(err.into());
},
}

// We've built the strongest orphan chain we can by going backwards and forwards from the new orphan block
// that is linked with the main chain.
let fork_tip_block = fetch_orphan(&**db, fork_tip_hash.clone())?;
let fork_tip_header = fork_tip_block.header.clone();
if fork_tip_hash != new_block_hash {
// New block is not the tip, find complete chain from tip to main chain.
reorg_chain = try_construct_fork(db, fork_tip_block)?;
}
let added_blocks: Vec<Block> = reorg_chain.iter().cloned().collect();
let fork_height = reorg_chain
.front()
.expect("The new orphan block should be in the queue")
.header
.height -
1;
let removed_blocks = reorganize_chain(db, block_validator, fork_height, reorg_chain)?;
if removed_blocks.is_empty() {
Ok(BlockAddResult::Ok)
} else {
debug!(
target: LOG_TARGET,
"Fork chain (accum_diff:{}, hash:{}) with block {} ({}) has a weaker accumulated difficulty.",
fork_accum_difficulty,
fork_tip_hash.to_hex(),
new_block.header.height,
new_block_hash.to_hex(),
"Chain reorg processed from (accum_diff:{}, hash:{}) to (accum_diff:{}, hash:{})",
tip_header.pow,
tip_header.hash().to_hex(),
fork_tip_header.pow,
fork_tip_hash.to_hex()
);
info!(
target: LOG_TARGET,
"Reorg from ({}) to ({})", tip_header, fork_tip_header
);
Ok(BlockAddResult::ChainReorg((
Box::new(removed_blocks),
Box::new(added_blocks),
)))
}
debug!(
target: LOG_TARGET,
"Orphan block received: #{}", new_block.header.height
);
Ok(BlockAddResult::OrphanBlock)
}

// Reorganize the main chain with the provided fork chain, starting at the specified height.
fn reorganize_chain<T: BlockchainBackend>(
db: &mut RwLockWriteGuard<T>,
block_validator: &Arc<Validator<Block, T>>,
block_validator: &Validator<Block, T>,
height: u64,
chain: VecDeque<Block>,
) -> Result<Vec<Block>, ChainStorageError>
Expand Down
16 changes: 8 additions & 8 deletions base_layer/core/src/chain_storage/db_transaction.rs
Original file line number Diff line number Diff line change
Expand Up @@ -77,9 +77,9 @@ impl DbTransaction {
}

/// Inserts a transaction kernel into the current transaction.
pub fn insert_kernel(&mut self, kernel: TransactionKernel, update_mmr: bool) {
pub fn insert_kernel(&mut self, kernel: TransactionKernel) {
let hash = kernel.hash();
self.insert(DbKeyValuePair::TransactionKernel(hash, Box::new(kernel), update_mmr));
self.insert(DbKeyValuePair::TransactionKernel(hash, Box::new(kernel)));
}

/// Inserts a block header into the current transaction.
Expand All @@ -89,15 +89,15 @@ impl DbTransaction {
}

/// Adds a UTXO into the current transaction and update the TXO MMR.
pub fn insert_utxo(&mut self, utxo: TransactionOutput, update_mmr: bool) {
pub fn insert_utxo(&mut self, utxo: TransactionOutput) {
let hash = utxo.hash();
self.insert(DbKeyValuePair::UnspentOutput(hash, Box::new(utxo), update_mmr));
self.insert(DbKeyValuePair::UnspentOutput(hash, Box::new(utxo)));
}

/// Adds a UTXO into the current transaction and update the TXO MMR. This is a test only function used to ensure we
/// block duplicate entries. This function does not calculate the hash function but accepts one as a variable.
pub fn insert_utxo_with_hash(&mut self, hash: Vec<u8>, utxo: TransactionOutput, update_mmr: bool) {
self.insert(DbKeyValuePair::UnspentOutput(hash, Box::new(utxo), update_mmr));
pub fn insert_utxo_with_hash(&mut self, hash: Vec<u8>, utxo: TransactionOutput) {
self.insert(DbKeyValuePair::UnspentOutput(hash, Box::new(utxo)));
}

/// Stores an orphan block. No checks are made as to whether this is actually an orphan. That responsibility lies
Expand Down Expand Up @@ -198,8 +198,8 @@ pub enum WriteOperation {
pub enum DbKeyValuePair {
Metadata(MetadataKey, MetadataValue),
BlockHeader(u64, Box<BlockHeader>),
UnspentOutput(HashOutput, Box<TransactionOutput>, bool),
TransactionKernel(HashOutput, Box<TransactionKernel>, bool),
UnspentOutput(HashOutput, Box<TransactionOutput>),
TransactionKernel(HashOutput, Box<TransactionKernel>),
OrphanBlock(HashOutput, Box<Block>),
}

Expand Down
Loading

0 comments on commit 8fac55d

Please sign in to comment.