Skip to content

Commit

Permalink
review comments
Browse files Browse the repository at this point in the history
  • Loading branch information
hansieodendaal committed Oct 26, 2023
1 parent 7121b61 commit 7d8aebb
Show file tree
Hide file tree
Showing 10 changed files with 108 additions and 83 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -522,7 +522,7 @@ where B: BlockchainBackend + 'static
}

async fn check_exists_and_not_bad_block(&self, block: FixedHash) -> Result<bool, CommsInterfaceError> {
if self.blockchain_db.block_exists(block).await? {
if self.blockchain_db.chain_block_or_orphan_block_exists(block).await? {
debug!(
target: LOG_TARGET,
"Block with hash `{}` already stored",
Expand Down
4 changes: 0 additions & 4 deletions base_layer/core/src/base_node/sync/rpc/service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -148,10 +148,6 @@ impl<B: BlockchainBackend + 'static> BaseNodeSyncService for BaseNodeSyncRpcServ
return Err(RpcStatus::not_found("Requested end block sync hash was not found"));
}

if start_height > metadata.height_of_longest_chain() {
return Ok(Streaming::empty());
}

let end_header = db
.fetch_header_by_block_hash(hash)
.await
Expand Down
5 changes: 2 additions & 3 deletions base_layer/core/src/base_node/sync/rpc/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ mod sync_blocks {
}

#[tokio::test]
async fn it_sends_an_empty_response() {
async fn it_does_not_send_empty_responses_with_bad_requests() {
let (service, db, rpc_request_mock, _tmp) = setup();

let (_, chain) = create_main_chain(&db, block_specs!(["A->GB"])).await;
Expand All @@ -86,8 +86,7 @@ mod sync_blocks {
end_hash: block.hash().to_vec(),
};
let req = rpc_request_mock.request_with_context(Default::default(), msg);
let mut streaming = service.sync_blocks(req).await.unwrap();
assert!(streaming.next().await.is_none());
assert!(service.sync_blocks(req).await.is_err());
}

#[tokio::test]
Expand Down
2 changes: 1 addition & 1 deletion base_layer/core/src/chain_storage/async_db.rs
Original file line number Diff line number Diff line change
Expand Up @@ -221,7 +221,7 @@ impl<B: BlockchainBackend + 'static> AsyncBlockchainDb<B> {

make_async_fn!(cleanup_all_orphans() -> (), "cleanup_all_orphans");

make_async_fn!(block_exists(block_hash: BlockHash) -> bool, "block_exists");
make_async_fn!(chain_block_or_orphan_block_exists(block_hash: BlockHash) -> bool, "block_exists");

make_async_fn!(bad_block_exists(block_hash: BlockHash) -> bool, "bad_block_exists");

Expand Down
34 changes: 17 additions & 17 deletions base_layer/core/src/chain_storage/blockchain_database.rs
Original file line number Diff line number Diff line change
Expand Up @@ -251,7 +251,7 @@ where B: BlockchainBackend
txn.set_horizon_data(kernel_sum, utxo_sum);
blockchain_db.write(txn)?;
blockchain_db.store_pruning_horizon(config.pruning_horizon)?;
} else if !blockchain_db.block_exists(genesis_block.accumulated_data().hash)? {
} else if !blockchain_db.chain_block_or_orphan_block_exists(genesis_block.accumulated_data().hash)? {
// Check the genesis block in the DB.
error!(
target: LOG_TARGET,
Expand Down Expand Up @@ -917,7 +917,7 @@ where B: BlockchainBackend
after_lock - before_lock,
);

if db.contains(&DbKey::BlockHash(block_hash))? {
if db.contains(&DbKey::HeaderHash(block_hash))? {
return Ok(BlockAddResult::BlockExists);
}
if db.bad_block_exists(block_hash)? {
Expand Down Expand Up @@ -1083,9 +1083,9 @@ where B: BlockchainBackend
}

/// Returns true if this block exists in the chain, or is orphaned.
pub fn block_exists(&self, hash: BlockHash) -> Result<bool, ChainStorageError> {
pub fn chain_block_or_orphan_block_exists(&self, hash: BlockHash) -> Result<bool, ChainStorageError> {
let db = self.db_read_access()?;
Ok(db.contains(&DbKey::BlockHash(hash))? || db.contains(&DbKey::OrphanBlock(hash))?)
Ok(db.fetch_block_accumulated_data(&hash)?.is_some() || db.contains(&DbKey::OrphanBlock(hash))?)
}

/// Returns true if this block exists in the chain, or is orphaned.
Expand Down Expand Up @@ -1404,7 +1404,7 @@ pub fn calculate_validator_node_mr(validator_nodes: &[(PublicKey, [u8; 32])]) ->
}

pub fn fetch_header<T: BlockchainBackend>(db: &T, block_num: u64) -> Result<BlockHeader, ChainStorageError> {
fetch!(db, block_num, BlockHeader)
fetch!(db, block_num, HeaderHeight)
}

pub fn fetch_headers<T: BlockchainBackend>(
Expand All @@ -1422,8 +1422,8 @@ pub fn fetch_headers<T: BlockchainBackend>(
#[allow(clippy::cast_possible_truncation)]
let mut headers = Vec::with_capacity((end_inclusive - start) as usize);
for h in start..=end_inclusive {
match db.fetch(&DbKey::BlockHeader(h))? {
Some(DbValue::BlockHeader(header)) => {
match db.fetch(&DbKey::HeaderHeight(h))? {
Some(DbValue::HeaderHeight(header)) => {
headers.push(*header);
},
Some(_) => unreachable!(),
Expand Down Expand Up @@ -1476,7 +1476,7 @@ fn fetch_header_by_block_hash<T: BlockchainBackend>(
db: &T,
hash: BlockHash,
) -> Result<Option<BlockHeader>, ChainStorageError> {
try_fetch!(db, hash, BlockHash)
try_fetch!(db, hash, HeaderHash)
}

fn fetch_orphan<T: BlockchainBackend>(db: &T, hash: BlockHash) -> Result<Block, ChainStorageError> {
Expand Down Expand Up @@ -2367,7 +2367,7 @@ fn get_orphan_link_main_chain<T: BlockchainBackend>(

// If this hash is part of the main chain, we're done - since curr_hash has already been set to the previous
// hash, the chain Vec does not include the fork block in common with both chains
if db.contains(&DbKey::BlockHash(curr_hash))? {
if db.contains(&DbKey::HeaderHash(curr_hash))? {
break;
}
}
Expand Down Expand Up @@ -2893,7 +2893,7 @@ mod test {
// Check 2b was added
let access = test.db_write_access();
let block = orphan_chain_b.get("2b").unwrap().clone();
assert!(access.contains(&DbKey::BlockHash(*block.hash())).unwrap());
assert!(access.contains(&DbKey::HeaderHash(*block.hash())).unwrap());

// Check 7d is the tip
let block = orphan_chain_d.get("7d").unwrap().clone();
Expand All @@ -2902,7 +2902,7 @@ mod test {
let metadata = access.fetch_chain_metadata().unwrap();
assert_eq!(metadata.best_block(), block.hash());
assert_eq!(metadata.height_of_longest_chain(), block.height());
assert!(access.contains(&DbKey::BlockHash(*block.hash())).unwrap());
assert!(access.contains(&DbKey::HeaderHash(*block.hash())).unwrap());

let mut all_blocks = main_chain
.into_iter()
Expand All @@ -2916,8 +2916,8 @@ mod test {
for (height, name) in expected_chain.iter().enumerate() {
let expected_block = all_blocks.get(*name).unwrap();
unpack_enum!(
DbValue::BlockHeader(found_block) =
access.fetch(&DbKey::BlockHeader(height as u64)).unwrap().unwrap()
DbValue::HeaderHeight(found_block) =
access.fetch(&DbKey::HeaderHeight(height as u64)).unwrap().unwrap()
);
assert_eq!(*found_block, *expected_block.header());
}
Expand Down Expand Up @@ -2988,7 +2988,7 @@ mod test {
// Check 2b was added
let access = test.db_write_access();
let block = orphan_chain_b.get("2b").unwrap().clone();
assert!(access.contains(&DbKey::BlockHash(*block.hash())).unwrap());
assert!(access.contains(&DbKey::HeaderHash(*block.hash())).unwrap());

// Check 12b is the tip
let block = orphan_chain_b.get("12b").unwrap().clone();
Expand All @@ -2997,7 +2997,7 @@ mod test {
let metadata = access.fetch_chain_metadata().unwrap();
assert_eq!(metadata.best_block(), block.hash());
assert_eq!(metadata.height_of_longest_chain(), block.height());
assert!(access.contains(&DbKey::BlockHash(*block.hash())).unwrap());
assert!(access.contains(&DbKey::HeaderHash(*block.hash())).unwrap());

let mut all_blocks = main_chain.into_iter().chain(orphan_chain_b).collect::<HashMap<_, _>>();
all_blocks.insert("GB".to_string(), genesis);
Expand All @@ -3008,8 +3008,8 @@ mod test {
for (height, name) in expected_chain.iter().enumerate() {
let expected_block = all_blocks.get(*name).unwrap();
unpack_enum!(
DbValue::BlockHeader(found_block) =
access.fetch(&DbKey::BlockHeader(height as u64)).unwrap().unwrap()
DbValue::HeaderHeight(found_block) =
access.fetch(&DbKey::HeaderHeight(height as u64)).unwrap().unwrap()
);
assert_eq!(*found_block, *expected_block.header());
}
Expand Down
20 changes: 10 additions & 10 deletions base_layer/core/src/chain_storage/db_transaction.rs
Original file line number Diff line number Diff line change
Expand Up @@ -464,16 +464,16 @@ impl fmt::Display for WriteOperation {

#[derive(Debug, Clone, PartialEq, Eq)]
pub enum DbKey {
BlockHeader(u64),
BlockHash(BlockHash),
HeaderHeight(u64),
HeaderHash(BlockHash),
OrphanBlock(HashOutput),
}

impl DbKey {
pub fn to_value_not_found_error(&self) -> ChainStorageError {
let (entity, field, value) = match self {
DbKey::BlockHeader(v) => ("BlockHeader", "Height", v.to_string()),
DbKey::BlockHash(v) => ("Block", "Hash", v.to_hex()),
DbKey::HeaderHeight(v) => ("BlockHeader", "Height", v.to_string()),
DbKey::HeaderHash(v) => ("Header", "Hash", v.to_hex()),
DbKey::OrphanBlock(v) => ("Orphan", "Hash", v.to_hex()),
};
ChainStorageError::ValueNotFound { entity, field, value }
Expand All @@ -482,16 +482,16 @@ impl DbKey {

#[derive(Debug)]
pub enum DbValue {
BlockHeader(Box<BlockHeader>),
BlockHash(Box<BlockHeader>),
HeaderHeight(Box<BlockHeader>),
HeaderHash(Box<BlockHeader>),
OrphanBlock(Box<Block>),
}

impl Display for DbValue {
fn fmt(&self, f: &mut Formatter) -> Result<(), Error> {
match self {
DbValue::BlockHeader(_) => f.write_str("Block header"),
DbValue::BlockHash(_) => f.write_str("Block hash"),
DbValue::HeaderHeight(_) => f.write_str("Header by height"),
DbValue::HeaderHash(_) => f.write_str("Header by hash"),
DbValue::OrphanBlock(_) => f.write_str("Orphan block"),
}
}
Expand All @@ -500,8 +500,8 @@ impl Display for DbValue {
impl Display for DbKey {
fn fmt(&self, f: &mut Formatter) -> Result<(), Error> {
match self {
DbKey::BlockHeader(v) => f.write_str(&format!("Block header (#{})", v)),
DbKey::BlockHash(v) => f.write_str(&format!("Block hash (#{})", v.to_hex())),
DbKey::HeaderHeight(v) => f.write_str(&format!("Header height (#{})", v)),
DbKey::HeaderHash(v) => f.write_str(&format!("Header hash (#{})", v.to_hex())),
DbKey::OrphanBlock(v) => f.write_str(&format!("Orphan block hash ({})", v.to_hex())),
}
}
Expand Down
12 changes: 6 additions & 6 deletions base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1806,11 +1806,11 @@ impl BlockchainBackend for LMDBDatabase {
fn fetch(&self, key: &DbKey) -> Result<Option<DbValue>, ChainStorageError> {
let txn = self.read_transaction()?;
let res = match key {
DbKey::BlockHeader(k) => {
DbKey::HeaderHeight(k) => {
let val: Option<BlockHeader> = lmdb_get(&txn, &self.headers_db, k)?;
val.map(|val| DbValue::BlockHeader(Box::new(val)))
val.map(|val| DbValue::HeaderHeight(Box::new(val)))
},
DbKey::BlockHash(hash) => {
DbKey::HeaderHash(hash) => {
let k: Option<u64> = self.fetch_height_from_hash(&txn, hash)?;
match k {
Some(k) => {
Expand All @@ -1821,7 +1821,7 @@ impl BlockchainBackend for LMDBDatabase {
k
);
let val: Option<BlockHeader> = lmdb_get(&txn, &self.headers_db, &k)?;
val.map(|val| DbValue::BlockHash(Box::new(val)))
val.map(|val| DbValue::HeaderHash(Box::new(val)))
},
None => {
trace!(
Expand All @@ -1843,8 +1843,8 @@ impl BlockchainBackend for LMDBDatabase {
fn contains(&self, key: &DbKey) -> Result<bool, ChainStorageError> {
let txn = self.read_transaction()?;
Ok(match key {
DbKey::BlockHeader(k) => lmdb_exists(&txn, &self.headers_db, k)?,
DbKey::BlockHash(h) => lmdb_exists(&txn, &self.block_hashes_db, h.deref())?,
DbKey::HeaderHeight(k) => lmdb_exists(&txn, &self.headers_db, k)?,
DbKey::HeaderHash(h) => lmdb_exists(&txn, &self.block_hashes_db, h.deref())?,
DbKey::OrphanBlock(k) => lmdb_exists(&txn, &self.orphans_db, k.deref())?,
})
}
Expand Down
93 changes: 56 additions & 37 deletions base_layer/core/tests/helpers/sync.rs
Original file line number Diff line number Diff line change
Expand Up @@ -160,6 +160,20 @@ pub enum WhatToDelete {
Headers,
}

// Private helper function to setup a delete a block transaction.
// Note: This private function will panic if the index is out of bounds - caller function's responsibility.
fn delete_block(txn: &mut DbTransaction, node: &NodeInterfaces, blocks: &[ChainBlock], index: usize) {
txn.delete_block(*blocks[index].hash());
txn.delete_orphan(*blocks[index].hash());
txn.set_best_block(
blocks[index + 1].height(),
blocks[index + 1].accumulated_data().hash,
blocks[index + 1].accumulated_data().total_accumulated_difficulty,
*node.blockchain_db.get_chain_metadata().unwrap().best_block(),
blocks[index + 1].to_chain_header().timestamp(),
);
}

// Delete blocks and headers in reverse order; the first block in the slice wil not be deleted
pub fn delete_some_blocks_and_headers(
blocks_with_anchor: &[ChainBlock],
Expand All @@ -170,43 +184,48 @@ pub fn delete_some_blocks_and_headers(
panic!("blocks must have at least 2 elements");
}
let mut blocks: Vec<_> = blocks_with_anchor.to_vec();
match instruction {
WhatToDelete::BlocksAndHeaders => {
node.blockchain_db.rewind_to_height(blocks[0].height()).unwrap();
for block in &blocks[1..] {
if node.blockchain_db.block_exists(*block.hash()).unwrap() {
let mut txn = DbTransaction::new();
txn.delete_orphan(*block.hash());
node.blockchain_db.write(txn).unwrap();
}
assert!(!node.blockchain_db.block_exists(*block.hash()).unwrap());
}
},
WhatToDelete::Blocks => {
let headers = blocks.iter().map(|b| b.to_chain_header()).collect::<Vec<_>>();
node.blockchain_db.rewind_to_height(blocks[0].height()).unwrap();
for block in &blocks[1..] {
if node.blockchain_db.block_exists(*block.hash()).unwrap() {
let mut txn = DbTransaction::new();
txn.delete_orphan(*block.hash());
node.blockchain_db.write(txn).unwrap();
}
assert!(!node.blockchain_db.block_exists(*block.hash()).unwrap());
}
node.blockchain_db.insert_valid_headers(headers[1..].to_vec()).unwrap();
// Note: This seems funny, as inserting the headers back into the db will cause this test to fail
// for block in &blocks[1..] {
// assert!(!node.blockchain_db.block_exists(*block.hash()).unwrap());
// }
},
WhatToDelete::Headers => {
blocks.reverse();
for block in blocks.iter().take(blocks.len() - 1) {
let mut txn = DbTransaction::new();
txn.delete_header(block.height());
node.blockchain_db.write(txn).unwrap();
}
},
blocks.reverse();
for i in 0..blocks.len() - 1 {
let mut txn = DbTransaction::new();
match instruction {
WhatToDelete::BlocksAndHeaders => {
delete_block(&mut txn, node, &blocks, i);
txn.delete_header(blocks[i].height());
},
WhatToDelete::Blocks => {
delete_block(&mut txn, node, &blocks, i);
},
WhatToDelete::Headers => {
txn.delete_header(blocks[i].height());
},
}
node.blockchain_db.write(txn).unwrap();
match instruction {
WhatToDelete::BlocksAndHeaders => {
assert!(!node
.blockchain_db
.chain_block_or_orphan_block_exists(*blocks[i].hash())
.unwrap());
assert!(node
.blockchain_db
.fetch_header_by_block_hash(*blocks[i].hash())
.unwrap()
.is_none());
},
WhatToDelete::Blocks => {
assert!(!node
.blockchain_db
.chain_block_or_orphan_block_exists(*blocks[i].hash())
.unwrap());
},
WhatToDelete::Headers => {
assert!(node
.blockchain_db
.fetch_header_by_block_hash(*blocks[i].hash())
.unwrap()
.is_none());
},
}
}
}

Expand Down
4 changes: 3 additions & 1 deletion base_layer/core/tests/tests/header_sync.rs
Original file line number Diff line number Diff line change
Expand Up @@ -253,6 +253,8 @@ async fn test_header_sync_uneven_headers_and_blocks_peer_lies_about_pow_no_ban()
let mut header_sync = sync::initialize_sync_headers_with_ping_pong_data(&alice_node, &bob_node);
// Remove blocks from Bpb's chain so his claimed metadata is better than what it actually is
sync::delete_some_blocks_and_headers(&blocks[4..=5], WhatToDelete::Blocks, &bob_node);
assert_eq!(bob_node.blockchain_db.get_height().unwrap(), 4);
assert_eq!(bob_node.blockchain_db.fetch_last_header().unwrap().height, 7);
assert!(
header_sync.clone().into_sync_peers()[0]
.claimed_chain_metadata()
Expand All @@ -267,7 +269,7 @@ async fn test_header_sync_uneven_headers_and_blocks_peer_lies_about_pow_no_ban()
match event {
StateEvent::HeadersSynchronized(_val, sync_result) => {
assert_eq!(sync_result.headers_returned, 0);
assert_eq!(sync_result.peer_fork_hash_index, 5);
assert_eq!(sync_result.peer_fork_hash_index, 3);
if let HeaderSyncStatus::InSyncOrAhead = sync_result.header_sync_status {
// Note: This behaviour is undetected! Bob cannot be banned here, and should be banned by block sync.
} else {
Expand Down
Loading

0 comments on commit 7d8aebb

Please sign in to comment.