diff --git a/core/src/consensus.rs b/core/src/consensus.rs index 2218c765558a37..698a17946ff69c 100644 --- a/core/src/consensus.rs +++ b/core/src/consensus.rs @@ -1354,19 +1354,29 @@ pub fn reconcile_blockstore_roots_with_external_source( Ordering::Greater => true, Ordering::Equal => false, Ordering::Less => panic!( - "last_blockstore_root({}) is skipped while traversing blockstore (currently at {}) from external root ({})!?", - last_blockstore_root, - current, - external_root + "last_blockstore_root({}) is skipped while traversing \ + blockstore (currently at {}) from external root ({:?})!?", + last_blockstore_root, current, external_source, ), }) + .map(|root| (root, None)) .collect(); if !new_roots.is_empty() { info!( "Reconciling slots as root based on external root: {:?} (external: {:?}, blockstore: {})", new_roots, external_source, last_blockstore_root ); - blockstore.set_roots(new_roots.iter())?; + + // Unfortunately, we can't supply duplicate-confirmed hashes, + // because it can't be guaranteed to be able to replay these slots + // under this code-path's limited condition (i.e. those shreds + // might not be available, etc...) also correctly overcoming this + // limitation is hard... + blockstore.mark_slots_as_if_rooted_normally(new_roots, false)?; + + // Update the caller-managed state of last root in blockstore. + // Repeated calls of this function should result in a no-op for + // the range of `new_roots`. *last_blockstore_root = blockstore.last_root(); } else { // This indicates we're in bad state; but still don't panic here. @@ -2848,9 +2858,9 @@ pub mod test { } #[test] - #[should_panic( - expected = "last_blockstore_root(3) is skipped while traversing blockstore (currently at 1) from external root (4)!?" - )] + #[should_panic(expected = "last_blockstore_root(3) is skipped while \ + traversing blockstore (currently at 1) from \ + external root (Tower(4))!?")] fn test_reconcile_blockstore_roots_with_tower_panic_no_common_root() { solana_logger::setup(); let blockstore_path = get_tmp_ledger_path!(); diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index 497b21995024de..71f4ef662f76e5 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -3019,6 +3019,22 @@ impl Blockstore { Ok(()) } + pub fn mark_slots_as_if_rooted_normally( + &self, + slots: Vec<(Slot, Option)>, + with_hash: bool, + ) -> Result<()> { + self.set_roots(slots.iter().map(|(slot, _hash)| slot))?; + if with_hash { + self.set_duplicate_confirmed_slots_and_hashes( + slots + .into_iter() + .map(|(slot, maybe_hash)| (slot, maybe_hash.unwrap())), + )?; + } + Ok(()) + } + pub fn is_dead(&self, slot: Slot) -> bool { matches!( self.db diff --git a/ledger/src/blockstore_processor.rs b/ledger/src/blockstore_processor.rs index 84a2a900849485..e50a01602a528a 100644 --- a/ledger/src/blockstore_processor.rs +++ b/ledger/src/blockstore_processor.rs @@ -1284,17 +1284,16 @@ fn load_frozen_forks( if new_root_bank.slot() == root { break; } // Found the last root in the chain, yay! assert!(new_root_bank.slot() > root); - rooted_slots.push((new_root_bank.slot(), new_root_bank.hash())); + rooted_slots.push((new_root_bank.slot(), Some(new_root_bank.hash()))); // As noted, the cluster confirmed root should be descended from // our last root; therefore parent should be set new_root_bank = new_root_bank.parent().unwrap(); } inc_new_counter_info!("load_frozen_forks-cluster-confirmed-root", rooted_slots.len()); if blockstore.is_primary_access() { - blockstore.set_roots(rooted_slots.iter().map(|(slot, _hash)| slot)) - .expect("Blockstore::set_roots should succeed"); - blockstore.set_duplicate_confirmed_slots_and_hashes(rooted_slots.into_iter()) - .expect("Blockstore::set_duplicate_confirmed should succeed"); + blockstore + .mark_slots_as_if_rooted_normally(rooted_slots, true) + .expect("Blockstore::mark_slots_as_if_rooted_normally() should succeed"); } Some(cluster_root_bank) } else { diff --git a/local-cluster/tests/local_cluster.rs b/local-cluster/tests/local_cluster.rs index d278df34e1fb9f..130f6dec972f14 100644 --- a/local-cluster/tests/local_cluster.rs +++ b/local-cluster/tests/local_cluster.rs @@ -2261,10 +2261,16 @@ fn test_hard_fork_with_gap_in_roots() { } } + // stop all nodes of the cluster let mut validator_a_info = cluster.lock().unwrap().exit_node(&validator_a_pubkey); let mut validator_b_info = cluster.lock().unwrap().exit_node(&validator_b_pubkey); + // hard fork slot is effectively a (possibly skipping) new root. + // assert that the precondition of validator a to test gap between + // blockstore and hard fork... let hard_fork_slot = min_last_vote - 5; + assert!(hard_fork_slot > root_in_tower(&val_a_ledger_path, &validator_a_pubkey).unwrap()); + let hard_fork_slots = Some(vec![hard_fork_slot]); let mut hard_forks = HardForks::default(); hard_forks.register(hard_fork_slot); @@ -2342,7 +2348,7 @@ fn test_hard_fork_with_gap_in_roots() { .unwrap() .collect::>(); // artifically restore the forcibly purged genesis only for the validator A just for the sake of - // following the final assertion. + // the final assertions. slots_a.push(genesis_slot); roots_a.push(genesis_slot); @@ -2353,7 +2359,9 @@ fn test_hard_fork_with_gap_in_roots() { .collect::>(); // compare them all! - assert_eq!((slots_a, roots_a), (slots_b, roots_b)); + assert_eq!((&slots_a, &roots_a), (&slots_b, &roots_b)); + assert_eq!(&slots_a[slots_a.len() - roots_a.len()..].to_vec(), &roots_a); + assert_eq!(&slots_b[slots_b.len() - roots_b.len()..].to_vec(), &roots_b); } #[test]