Skip to content

Commit

Permalink
blockstore: only consume duplicate proofs from root_slot + 1 on startup
Browse files Browse the repository at this point in the history
  • Loading branch information
AshwinSekar committed Jul 11, 2024
1 parent 6d30530 commit f46d2be
Show file tree
Hide file tree
Showing 2 changed files with 145 additions and 4 deletions.
145 changes: 143 additions & 2 deletions core/src/replay_stage.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1390,15 +1390,21 @@ impl ReplayStage {
) -> (ProgressMap, HeaviestSubtreeForkChoice) {
let (root_bank, frozen_banks, duplicate_slot_hashes) = {
let bank_forks = bank_forks.read().unwrap();
let root_bank = bank_forks.root_bank();
let duplicate_slots = blockstore
.duplicate_slots_iterator(bank_forks.root_bank().slot())
// It is important that the root bank is not marked as duplicate on initialization.
// Although this bank could contain a duplicate proof, the fact that it was rooted
// either during a previous run or artificially means that we should ignore any
// duplicate proofs for the root slot, thus we start consuming duplicate proofs
// from the root slot + 1
.duplicate_slots_iterator(root_bank.slot().saturating_add(1))
.unwrap();
let duplicate_slot_hashes = duplicate_slots.filter_map(|slot| {
let bank = bank_forks.get(slot)?;
Some((slot, bank.hash()))
});
(
bank_forks.root_bank(),
root_bank,
bank_forks.frozen_banks().values().cloned().collect(),
duplicate_slot_hashes.collect::<Vec<(Slot, Hash)>>(),
)
Expand Down Expand Up @@ -4521,6 +4527,9 @@ pub(crate) mod tests {
replay_stage::ReplayStage,
vote_simulator::{self, VoteSimulator},
},
blockstore_processor::{
confirm_full_slot, fill_blockstore_slot_with_ticks, process_bank_0, ProcessOptions,
},
crossbeam_channel::unbounded,
itertools::Itertools,
solana_entry::entry::{self, Entry},
Expand Down Expand Up @@ -9032,4 +9041,136 @@ pub(crate) mod tests {
assert_eq!(tower.vote_state, expected_tower.vote_state);
assert_eq!(tower.node_pubkey, expected_tower.node_pubkey);
}

#[test]
fn test_initialize_progress_and_fork_choice_with_duplicates() {
solana_logger::setup();
let GenesisConfigInfo {
mut genesis_config, ..
} = create_genesis_config(123);

let ticks_per_slot = 1;
genesis_config.ticks_per_slot = ticks_per_slot;
let (ledger_path, blockhash) =
solana_ledger::create_new_tmp_ledger_auto_delete!(&genesis_config);
let blockstore = Blockstore::open(ledger_path.path()).unwrap();

/*
Bank forks with:
slot 0
|
slot 1 -> Duplicate before restart, the restart slot
|
slot 2
|
slot 3 -> Duplicate before restart, artificially rooted
|
slot 4 -> Duplicate before restart, artificially rooted
|
slot 5 -> Duplicate before restart
|
slot 6
*/

let mut last_hash = blockhash;
for i in 0..6 {
last_hash =
fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, i + 1, i, last_hash);
}
// Artifically root
blockstore.set_roots([3, 4].iter()).unwrap();

// Set up bank0
let bank_forks = BankForks::new_rw_arc(Bank::new_for_tests(&genesis_config));
let bank0 = bank_forks.read().unwrap().get_with_scheduler(0).unwrap();
let recyclers = VerifyRecyclers::default();
let replay_tx_thread_pool = rayon::ThreadPoolBuilder::new()
.num_threads(1)
.thread_name(|i| format!("solReplayTx{i:02}"))
.build()
.expect("new rayon threadpool");

process_bank_0(
&bank0,
&blockstore,
&replay_tx_thread_pool,
&ProcessOptions::default(),
&recyclers,
None,
None,
);

// Mark block 1, 3 , 4, 5 as duplicate
blockstore.store_duplicate_slot(1, vec![], vec![]).unwrap();
blockstore.store_duplicate_slot(3, vec![], vec![]).unwrap();
blockstore.store_duplicate_slot(4, vec![], vec![]).unwrap();
blockstore.store_duplicate_slot(5, vec![], vec![]).unwrap();

let bank1 = bank_forks.write().unwrap().insert(Bank::new_from_parent(
bank0.clone_without_scheduler(),
&Pubkey::default(),
1,
));
confirm_full_slot(
&blockstore,
&bank1,
&replay_tx_thread_pool,
&ProcessOptions::default(),
&recyclers,
&mut ConfirmationProgress::new(bank0.last_blockhash()),
None,
None,
None,
&mut ExecuteTimings::default(),
)
.unwrap();

bank_forks
.write()
.unwrap()
.set_root(
1,
&solana_runtime::accounts_background_service::AbsRequestSender::default(),
None,
)
.unwrap();

let leader_schedule_cache = LeaderScheduleCache::new_from_bank(&bank1);

// process_blockstore_from_root() from slot 1 onwards
blockstore_processor::process_blockstore_from_root(
&blockstore,
&bank_forks,
&leader_schedule_cache,
&ProcessOptions::default(),
None,
None,
None,
&AbsRequestSender::default(),
)
.unwrap();

assert_eq!(bank_forks.read().unwrap().root(), 4);

// Verify that fork choice can be initialized and that the root is not marked duplicate
let (_progress, fork_choice) =
ReplayStage::initialize_progress_and_fork_choice_with_locked_bank_forks(
&bank_forks,
&Pubkey::new_unique(),
&Pubkey::new_unique(),
&blockstore,
);

let bank_forks = bank_forks.read().unwrap();
// 4 (the artificial root) is the tree root and no longer duplicate
assert_eq!(fork_choice.tree_root().0, 4);
assert!(fork_choice
.is_candidate(&(4, bank_forks.bank_hash(4).unwrap()))
.unwrap());

// 5 is still duplicate,
assert!(!fork_choice
.is_candidate(&(5, bank_forks.bank_hash(5).unwrap()))
.unwrap());
}
}
4 changes: 2 additions & 2 deletions ledger/src/blockstore_processor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1078,7 +1078,7 @@ fn verify_ticks(
}

#[allow(clippy::too_many_arguments)]
fn confirm_full_slot(
pub fn confirm_full_slot(
blockstore: &Blockstore,
bank: &BankWithScheduler,
replay_tx_thread_pool: &ThreadPool,
Expand Down Expand Up @@ -1684,7 +1684,7 @@ fn confirm_slot_entries(
}

// Special handling required for processing the entries in slot 0
fn process_bank_0(
pub fn process_bank_0(
bank0: &BankWithScheduler,
blockstore: &Blockstore,
replay_tx_thread_pool: &ThreadPool,
Expand Down

0 comments on commit f46d2be

Please sign in to comment.