-
Notifications
You must be signed in to change notification settings - Fork 256
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
blockstore: only consume duplicate proofs from root_slot + 1 on startup #1971
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -1390,15 +1390,21 @@ impl ReplayStage { | |
) -> (ProgressMap, HeaviestSubtreeForkChoice) { | ||
let (root_bank, frozen_banks, duplicate_slot_hashes) = { | ||
let bank_forks = bank_forks.read().unwrap(); | ||
let root_bank = bank_forks.root_bank(); | ||
let duplicate_slots = blockstore | ||
.duplicate_slots_iterator(bank_forks.root_bank().slot()) | ||
// It is important that the root bank is not marked as duplicate on initialization. | ||
// Although this bank could contain a duplicate proof, the fact that it was rooted | ||
// either during a previous run or artificially means that we should ignore any | ||
// duplicate proofs for the root slot, thus we start consuming duplicate proofs | ||
// from the root slot + 1 | ||
.duplicate_slots_iterator(root_bank.slot().saturating_add(1)) | ||
.unwrap(); | ||
let duplicate_slot_hashes = duplicate_slots.filter_map(|slot| { | ||
let bank = bank_forks.get(slot)?; | ||
Some((slot, bank.hash())) | ||
}); | ||
( | ||
bank_forks.root_bank(), | ||
root_bank, | ||
bank_forks.frozen_banks().values().cloned().collect(), | ||
duplicate_slot_hashes.collect::<Vec<(Slot, Hash)>>(), | ||
) | ||
|
@@ -4521,6 +4527,9 @@ pub(crate) mod tests { | |
replay_stage::ReplayStage, | ||
vote_simulator::{self, VoteSimulator}, | ||
}, | ||
blockstore_processor::{ | ||
confirm_full_slot, fill_blockstore_slot_with_ticks, process_bank_0, ProcessOptions, | ||
}, | ||
crossbeam_channel::unbounded, | ||
itertools::Itertools, | ||
solana_entry::entry::{self, Entry}, | ||
|
@@ -9032,4 +9041,136 @@ pub(crate) mod tests { | |
assert_eq!(tower.vote_state, expected_tower.vote_state); | ||
assert_eq!(tower.node_pubkey, expected_tower.node_pubkey); | ||
} | ||
|
||
#[test] | ||
fn test_initialize_progress_and_fork_choice_with_duplicates() { | ||
solana_logger::setup(); | ||
let GenesisConfigInfo { | ||
mut genesis_config, .. | ||
} = create_genesis_config(123); | ||
|
||
let ticks_per_slot = 1; | ||
genesis_config.ticks_per_slot = ticks_per_slot; | ||
let (ledger_path, blockhash) = | ||
solana_ledger::create_new_tmp_ledger_auto_delete!(&genesis_config); | ||
let blockstore = Blockstore::open(ledger_path.path()).unwrap(); | ||
|
||
/* | ||
Bank forks with: | ||
slot 0 | ||
| | ||
slot 1 -> Duplicate before restart, the restart slot | ||
| | ||
slot 2 | ||
| | ||
slot 3 -> Duplicate before restart, artificially rooted | ||
| | ||
slot 4 -> Duplicate before restart, artificially rooted | ||
| | ||
slot 5 -> Duplicate before restart | ||
| | ||
slot 6 | ||
*/ | ||
|
||
let mut last_hash = blockhash; | ||
for i in 0..6 { | ||
last_hash = | ||
fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, i + 1, i, last_hash); | ||
} | ||
// Artifically root 3 and 4 | ||
blockstore.set_roots([3, 4].iter()).unwrap(); | ||
|
||
// Set up bank0 | ||
let bank_forks = BankForks::new_rw_arc(Bank::new_for_tests(&genesis_config)); | ||
let bank0 = bank_forks.read().unwrap().get_with_scheduler(0).unwrap(); | ||
let recyclers = VerifyRecyclers::default(); | ||
let replay_tx_thread_pool = rayon::ThreadPoolBuilder::new() | ||
.num_threads(1) | ||
.thread_name(|i| format!("solReplayTx{i:02}")) | ||
.build() | ||
.expect("new rayon threadpool"); | ||
|
||
process_bank_0( | ||
steviez marked this conversation as resolved.
Show resolved
Hide resolved
|
||
&bank0, | ||
&blockstore, | ||
&replay_tx_thread_pool, | ||
&ProcessOptions::default(), | ||
&recyclers, | ||
None, | ||
None, | ||
); | ||
|
||
// Mark block 1, 3, 4, 5 as duplicate | ||
blockstore.store_duplicate_slot(1, vec![], vec![]).unwrap(); | ||
blockstore.store_duplicate_slot(3, vec![], vec![]).unwrap(); | ||
blockstore.store_duplicate_slot(4, vec![], vec![]).unwrap(); | ||
blockstore.store_duplicate_slot(5, vec![], vec![]).unwrap(); | ||
|
||
let bank1 = bank_forks.write().unwrap().insert(Bank::new_from_parent( | ||
bank0.clone_without_scheduler(), | ||
&Pubkey::default(), | ||
1, | ||
)); | ||
confirm_full_slot( | ||
&blockstore, | ||
&bank1, | ||
&replay_tx_thread_pool, | ||
&ProcessOptions::default(), | ||
&recyclers, | ||
&mut ConfirmationProgress::new(bank0.last_blockhash()), | ||
None, | ||
None, | ||
None, | ||
&mut ExecuteTimings::default(), | ||
) | ||
.unwrap(); | ||
|
||
bank_forks | ||
.write() | ||
.unwrap() | ||
.set_root( | ||
1, | ||
&solana_runtime::accounts_background_service::AbsRequestSender::default(), | ||
None, | ||
) | ||
.unwrap(); | ||
steviez marked this conversation as resolved.
Show resolved
Hide resolved
|
||
|
||
let leader_schedule_cache = LeaderScheduleCache::new_from_bank(&bank1); | ||
|
||
// process_blockstore_from_root() from slot 1 onwards | ||
blockstore_processor::process_blockstore_from_root( | ||
&blockstore, | ||
&bank_forks, | ||
&leader_schedule_cache, | ||
&ProcessOptions::default(), | ||
None, | ||
None, | ||
None, | ||
&AbsRequestSender::default(), | ||
) | ||
.unwrap(); | ||
|
||
assert_eq!(bank_forks.read().unwrap().root(), 4); | ||
|
||
// Verify that fork choice can be initialized and that the root is not marked duplicate | ||
let (_progress, fork_choice) = | ||
ReplayStage::initialize_progress_and_fork_choice_with_locked_bank_forks( | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. confirmed that this test panic's in the exact same spot as devnet outage without the change. |
||
&bank_forks, | ||
&Pubkey::new_unique(), | ||
&Pubkey::new_unique(), | ||
&blockstore, | ||
); | ||
|
||
let bank_forks = bank_forks.read().unwrap(); | ||
// 4 (the artificial root) is the tree root and no longer duplicate | ||
assert_eq!(fork_choice.tree_root().0, 4); | ||
assert!(fork_choice | ||
.is_candidate(&(4, bank_forks.bank_hash(4).unwrap())) | ||
.unwrap()); | ||
|
||
// 5 is still considered duplicate, so it is not a valid fork choice candidate | ||
assert!(!fork_choice | ||
.is_candidate(&(5, bank_forks.bank_hash(5).unwrap())) | ||
.unwrap()); | ||
} | ||
} |
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Wonder if we should be filtering out duplicate confirmed blocks here, or will replay naturally take care of them.
We only count landed votes toward duplicate confirmed so sounds like replay should cover this
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
replay should take care of any duplicate confirmed past the root bank. although they will be marked as duplicate initially on restart, once we replay the votes they will receive the same duplicate confirmed status they had before.
It seems like the duplicate confirmed column is only used for ancestor hashes repair? so there might be a temporary mismatch where we serve ancestor hashes repair while our local fork choice still has the slot marked as duplicate.