Skip to content

Commit

Permalink
test: allow producing block on top of arbitrary prev block (#10093)
Browse files Browse the repository at this point in the history
For stateless validation, chunk execution will be delayed until next
block is processed: #9982. This impacts several tests assuming that
block processing includes chunk processing as well. For these tests, we
need to produce and process one more block to get execution results like
ChunkExtra and ExecutionOutcome.

To allow production of longer forks, I want to extend client API by
`produce_block_on` which can produce a block not just on top of head,
but on top of any existing block. As this block isn't immediately saved
or processed, it even doesn't break any guarantees.

## Testing

Impacted tests should still pass. Later stateless validation PRs will
rely on it.

---------

Co-authored-by: Longarithm <[email protected]>
  • Loading branch information
Longarithm and Looogarithm authored Nov 6, 2023
1 parent 2dd724e commit a26635a
Show file tree
Hide file tree
Showing 2 changed files with 90 additions and 67 deletions.
111 changes: 57 additions & 54 deletions chain/client/src/client.rs
Original file line number Diff line number Diff line change
Expand Up @@ -477,20 +477,19 @@ impl Client {

fn should_reschedule_block(
&self,
head: &Tip,
prev_hash: &CryptoHash,
prev_prev_hash: &CryptoHash,
next_height: BlockHeight,
height: BlockHeight,
known_height: BlockHeight,
account_id: &AccountId,
next_block_proposer: &AccountId,
) -> Result<bool, Error> {
if self.known_block_height(next_height, known_height) {
if self.known_block_height(height, known_height) {
return Ok(true);
}

if !self.is_me_block_producer(account_id, next_block_proposer) {
info!(target: "client", "Produce block: chain at {}, not block producer for next block.", next_height);
info!(target: "client", "Produce block: chain at {}, not block producer for next block.", height);
return Ok(true);
}

Expand All @@ -501,7 +500,7 @@ impl Client {
}
}

if self.epoch_manager.is_next_block_epoch_start(&head.last_block_hash)? {
if self.epoch_manager.is_next_block_epoch_start(&prev_hash)? {
if !self.chain.prev_block_is_caught_up(prev_prev_hash, prev_hash)? {
// Currently state for the chunks we are interested in this epoch
// are not yet caught up (e.g. still state syncing).
Expand Down Expand Up @@ -565,31 +564,40 @@ impl Client {
.count()
}

/// Produce block if we are block producer for given `next_height` block height.
/// Produce block if we are block producer for given block `height`.
/// Either returns produced block (not applied) or error.
pub fn produce_block(&mut self, next_height: BlockHeight) -> Result<Option<Block>, Error> {
let _span = tracing::debug_span!(target: "client", "produce_block", next_height).entered();
let known_height = self.chain.store().get_latest_known()?.height;
pub fn produce_block(&mut self, height: BlockHeight) -> Result<Option<Block>, Error> {
let _span = tracing::debug_span!(target: "client", "produce_block", height).entered();

let validator_signer = self
.validator_signer
.as_ref()
.ok_or_else(|| Error::BlockProducer("Called without block producer info.".to_string()))?
.clone();
let head = self.chain.head()?;
assert_eq!(
head.epoch_id,
self.epoch_manager.get_epoch_id_from_prev_block(&head.prev_block_hash).unwrap()
);

self.produce_block_on(height, head.last_block_hash)
}

/// Produce block for given `height` on top of block `prev_hash`.
/// Should be called either from `produce_block` or in tests.
pub fn produce_block_on(
&mut self,
height: BlockHeight,
prev_hash: CryptoHash,
) -> Result<Option<Block>, Error> {
let known_height = self.chain.store().get_latest_known()?.height;
let validator_signer = self
.validator_signer
.as_ref()
.ok_or_else(|| Error::BlockProducer("Called without block producer info.".to_string()))?
.clone();

// Check that we are were called at the block that we are producer for.
let epoch_id =
self.epoch_manager.get_epoch_id_from_prev_block(&head.last_block_hash).unwrap();
let next_block_proposer = self.epoch_manager.get_block_producer(&epoch_id, next_height)?;
let epoch_id = self.epoch_manager.get_epoch_id_from_prev_block(&prev_hash).unwrap();
let next_block_proposer = self.epoch_manager.get_block_producer(&epoch_id, height)?;

let prev = self.chain.get_block_header(&head.last_block_hash)?;
let prev_hash = head.last_block_hash;
let prev_height = head.height;
let prev = self.chain.get_block_header(&prev_hash)?;
let prev_height = prev.height();
let prev_prev_hash = *prev.prev_hash();
let prev_epoch_id = prev.epoch_id().clone();
let prev_next_bp_hash = *prev.next_bp_hash();
Expand All @@ -599,10 +607,9 @@ impl Client {
let _ = self.check_and_update_doomslug_tip()?;

if self.should_reschedule_block(
&head,
&prev_hash,
&prev_prev_hash,
next_height,
height,
known_height,
validator_signer.validator_id(),
&next_block_proposer,
Expand All @@ -611,7 +618,7 @@ impl Client {
}
let (validator_stake, _) = self.epoch_manager.get_validator_by_account_id(
&epoch_id,
&head.last_block_hash,
&prev_hash,
&next_block_proposer,
)?;

Expand All @@ -630,9 +637,9 @@ impl Client {
debug!(
target: "client",
validator=?validator_signer.validator_id(),
next_height=next_height,
height=height,
prev_height=prev.height(),
prev_hash=format_hash(head.last_block_hash),
prev_hash=format_hash(prev_hash),
new_chunks_count=new_chunks.len(),
new_chunks=?new_chunks.keys().sorted().collect_vec(),
"Producing block",
Expand All @@ -644,12 +651,12 @@ impl Client {
return Ok(None);
}

let mut approvals_map = self.doomslug.get_witness(&prev_hash, prev_height, next_height);
let mut approvals_map = self.doomslug.get_witness(&prev_hash, prev_height, height);

// At this point, the previous epoch hash must be available
let epoch_id = self
.epoch_manager
.get_epoch_id_from_prev_block(&head.last_block_hash)
.get_epoch_id_from_prev_block(&prev_hash)
.expect("Epoch hash should exist at this point");
let protocol_version = self
.epoch_manager
Expand All @@ -676,7 +683,7 @@ impl Client {

let next_epoch_id = self
.epoch_manager
.get_next_epoch_id_from_prev_block(&head.last_block_hash)
.get_next_epoch_id_from_prev_block(&prev_hash)
.expect("Epoch hash should exist at this point");

let protocol_version = self.epoch_manager.get_epoch_protocol_version(&epoch_id)?;
Expand Down Expand Up @@ -715,9 +722,9 @@ impl Client {

// Add debug information about the block production (and info on when did the chunks arrive).
self.block_production_info.record_block_production(
next_height,
height,
BlockProductionTracker::construct_chunk_collection_info(
next_height,
height,
&epoch_id,
chunks.len() as ShardId,
&new_chunks,
Expand All @@ -727,32 +734,29 @@ impl Client {

// Collect new chunks.
for (shard_id, (mut chunk_header, _, _)) in new_chunks {
*chunk_header.height_included_mut() = next_height;
*chunk_header.height_included_mut() = height;
chunks[shard_id as usize] = chunk_header;
}

let prev_header = &prev_block.header();

let next_epoch_id =
self.epoch_manager.get_next_epoch_id_from_prev_block(&head.last_block_hash)?;
let next_epoch_id = self.epoch_manager.get_next_epoch_id_from_prev_block(&prev_hash)?;

let minted_amount =
if self.epoch_manager.is_next_block_epoch_start(&head.last_block_hash)? {
Some(self.epoch_manager.get_epoch_minted_amount(&next_epoch_id)?)
} else {
None
};
let minted_amount = if self.epoch_manager.is_next_block_epoch_start(&prev_hash)? {
Some(self.epoch_manager.get_epoch_minted_amount(&next_epoch_id)?)
} else {
None
};

let epoch_sync_data_hash =
if self.epoch_manager.is_next_block_epoch_start(&head.last_block_hash)? {
Some(self.epoch_manager.get_epoch_sync_data_hash(
prev_block.hash(),
&epoch_id,
&next_epoch_id,
)?)
} else {
None
};
let epoch_sync_data_hash = if self.epoch_manager.is_next_block_epoch_start(&prev_hash)? {
Some(self.epoch_manager.get_epoch_sync_data_hash(
prev_block.hash(),
&epoch_id,
&next_epoch_id,
)?)
} else {
None
};

// Get all the current challenges.
// TODO(2445): Enable challenges when they are working correctly.
Expand All @@ -766,7 +770,7 @@ impl Client {
this_epoch_protocol_version,
next_epoch_protocol_version,
prev_header,
next_height,
height,
block_ordinal,
chunks,
epoch_id,
Expand All @@ -786,10 +790,9 @@ impl Client {
);

// Update latest known even before returning block out, to prevent race conditions.
self.chain.mut_store().save_latest_known(LatestKnown {
height: next_height,
seen: block.header().raw_timestamp(),
})?;
self.chain
.mut_store()
.save_latest_known(LatestKnown { height, seen: block.header().raw_timestamp() })?;

metrics::BLOCK_PRODUCED_TOTAL.inc();

Expand Down
46 changes: 33 additions & 13 deletions integration-tests/src/tests/client/process_blocks.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2370,7 +2370,10 @@ fn test_validate_chunk_extra() {
let accepted_blocks = env.clients[0].finish_block_in_processing(block2.hash());
assert_eq!(accepted_blocks.len(), 1);

// About to produce a block on top of block1. Validate that this chunk is legit.
// Produce a block on top of block1.
// Validate that result of chunk execution in `block1` is legit.
let b = env.clients[0].produce_block_on(next_height + 2, *block1.hash()).unwrap().unwrap();
env.clients[0].process_block_test(b.into(), Provenance::PRODUCED).unwrap();
let chunks = env.clients[0]
.get_chunk_headers_ready_for_inclusion(block1.header().epoch_id(), &block1.hash());
let chunk_extra =
Expand Down Expand Up @@ -3011,6 +3014,8 @@ fn test_query_final_state() {
assert!(account_state1.amount < TESTING_INIT_BALANCE - TESTING_INIT_STAKE);
}

// Check that if the same receipt is executed twice in forked chain, both outcomes are recorded
// but child receipt ids are different.
#[test]
fn test_fork_receipt_ids() {
let (mut env, tx_hash) = prepare_env_with_transaction();
Expand All @@ -3020,15 +3025,15 @@ fn test_fork_receipt_ids() {

// Construct two blocks that contain the same chunk and make the chunk unavailable.
let validator_signer = create_test_signer("test0");
let next_height = produced_block.header().height() + 1;
let (encoded_chunk, _, _) = create_chunk_on_height(&mut env.clients[0], next_height);
let mut block1 = env.clients[0].produce_block(next_height).unwrap().unwrap();
let mut block2 = env.clients[0].produce_block(next_height + 1).unwrap().unwrap();
let last_height = produced_block.header().height();
let (encoded_chunk, _, _) = create_chunk_on_height(&mut env.clients[0], last_height + 1);
let mut block1 = env.clients[0].produce_block(last_height + 1).unwrap().unwrap();
let mut block2 = env.clients[0].produce_block(last_height + 2).unwrap().unwrap();

// Process two blocks on two different forks that contain the same chunk.
for (i, block) in vec![&mut block2, &mut block1].into_iter().enumerate() {
for block in vec![&mut block2, &mut block1].into_iter() {
let mut chunk_header = encoded_chunk.cloned_header();
*chunk_header.height_included_mut() = next_height - i as BlockHeight + 1;
*chunk_header.height_included_mut() = block.header().height();
let chunk_headers = vec![chunk_header];
block.set_chunks(chunk_headers.clone());
block.mut_header().get_mut().inner_rest.chunk_headers_root =
Expand All @@ -3044,6 +3049,12 @@ fn test_fork_receipt_ids() {
env.clients[0].process_block_test(block.clone().into(), Provenance::NONE).unwrap();
}

// Ensure that in stateless validation protocol receipts in fork blocks are executed.
let block3 = env.clients[0].produce_block_on(last_height + 3, *block1.hash()).unwrap().unwrap();
let block4 = env.clients[0].produce_block_on(last_height + 4, *block2.hash()).unwrap().unwrap();
env.clients[0].process_block_test(block3.into(), Provenance::NONE).unwrap();
env.clients[0].process_block_test(block4.into(), Provenance::NONE).unwrap();

let transaction_execution_outcome =
env.clients[0].chain.mut_store().get_outcomes_by_id(&tx_hash).unwrap();
assert_eq!(transaction_execution_outcome.len(), 2);
Expand All @@ -3052,6 +3063,9 @@ fn test_fork_receipt_ids() {
assert_ne!(receipt_id0, receipt_id1);
}

// Check that in if receipt is executed twice in different forks, two execution
// outcomes are recorded, canonical chain outcome is correct and GC cleanups
// all outcomes.
#[test]
fn test_fork_execution_outcome() {
init_test_logger();
Expand All @@ -3069,13 +3083,13 @@ fn test_fork_execution_outcome() {
let validator_signer = create_test_signer("test0");
let next_height = last_height + 1;
let (encoded_chunk, _, _) = create_chunk_on_height(&mut env.clients[0], next_height);
let mut block1 = env.clients[0].produce_block(next_height).unwrap().unwrap();
let mut block2 = env.clients[0].produce_block(next_height + 1).unwrap().unwrap();
let mut block1 = env.clients[0].produce_block(last_height + 1).unwrap().unwrap();
let mut block2 = env.clients[0].produce_block(last_height + 2).unwrap().unwrap();

// Process two blocks on two different forks that contain the same chunk.
for (i, block) in vec![&mut block2, &mut block1].into_iter().enumerate() {
for block in vec![&mut block2, &mut block1].into_iter() {
let mut chunk_header = encoded_chunk.cloned_header();
*chunk_header.height_included_mut() = next_height - i as BlockHeight + 1;
*chunk_header.height_included_mut() = block.header().height();
let chunk_headers = vec![chunk_header];
block.set_chunks(chunk_headers.clone());
block.mut_header().get_mut().inner_rest.chunk_headers_root =
Expand All @@ -3091,6 +3105,11 @@ fn test_fork_execution_outcome() {
env.clients[0].process_block_test(block.clone().into(), Provenance::NONE).unwrap();
}

let block3 = env.clients[0].produce_block_on(last_height + 3, *block1.hash()).unwrap().unwrap();
let block4 = env.clients[0].produce_block_on(last_height + 4, *block2.hash()).unwrap().unwrap();
env.clients[0].process_block_test(block3.into(), Provenance::NONE).unwrap();
env.clients[0].process_block_test(block4.into(), Provenance::NONE).unwrap();

let transaction_execution_outcome =
env.clients[0].chain.mut_store().get_outcomes_by_id(&tx_hash).unwrap();
assert_eq!(transaction_execution_outcome.len(), 1);
Expand All @@ -3101,8 +3120,9 @@ fn test_fork_execution_outcome() {
let canonical_chain_outcome = env.clients[0].chain.get_execution_outcome(&receipt_id).unwrap();
assert_eq!(canonical_chain_outcome.block_hash, *block2.hash());

// make sure gc works properly
for i in 5..32 {
// Make sure that GC cleanups execution outcomes.
let epoch_length = env.clients[0].config.epoch_length;
for i in last_height + 5..last_height + 5 + epoch_length * 6 {
env.produce_block(0, i);
}
let transaction_execution_outcome =
Expand Down

0 comments on commit a26635a

Please sign in to comment.