Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

v2.0: blockstore: only consume duplicate proofs from root_slot + 1 on startup (backport of #1971) #2114

Merged
merged 1 commit into from
Jul 17, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions core/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -92,6 +92,7 @@ serde_json = { workspace = true }
serial_test = { workspace = true }
# See order-crates-for-publishing.py for using this unusual `path = "."`
solana-core = { path = ".", features = ["dev-context-only-utils"] }
solana-ledger = { workspace = true, features = ["dev-context-only-utils"] }
solana-logger = { workspace = true }
solana-poh = { workspace = true, features = ["dev-context-only-utils"] }
solana-program-runtime = { workspace = true }
Expand Down
145 changes: 143 additions & 2 deletions core/src/replay_stage.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1390,15 +1390,21 @@ impl ReplayStage {
) -> (ProgressMap, HeaviestSubtreeForkChoice) {
let (root_bank, frozen_banks, duplicate_slot_hashes) = {
let bank_forks = bank_forks.read().unwrap();
let root_bank = bank_forks.root_bank();
let duplicate_slots = blockstore
.duplicate_slots_iterator(bank_forks.root_bank().slot())
// It is important that the root bank is not marked as duplicate on initialization.
// Although this bank could contain a duplicate proof, the fact that it was rooted
// either during a previous run or artificially means that we should ignore any
// duplicate proofs for the root slot, thus we start consuming duplicate proofs
// from the root slot + 1
.duplicate_slots_iterator(root_bank.slot().saturating_add(1))

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is the only functional change, remaining is test code.

This backport ensures that nodes are able to restart cleanly in the presence of duplicate blocks. The root chosen upon restart could have previously been a duplicate block that was resolved, or chosen as a restart slot anyway. It is important that we ignore the duplicate column for the restart root slot, otherwise we will panic on replay initialization.

.unwrap();
let duplicate_slot_hashes = duplicate_slots.filter_map(|slot| {
let bank = bank_forks.get(slot)?;
Some((slot, bank.hash()))
});
(
bank_forks.root_bank(),
root_bank,
bank_forks.frozen_banks().values().cloned().collect(),
duplicate_slot_hashes.collect::<Vec<(Slot, Hash)>>(),
)
Expand Down Expand Up @@ -4521,6 +4527,9 @@ pub(crate) mod tests {
replay_stage::ReplayStage,
vote_simulator::{self, VoteSimulator},
},
blockstore_processor::{
confirm_full_slot, fill_blockstore_slot_with_ticks, process_bank_0, ProcessOptions,
},
crossbeam_channel::unbounded,
itertools::Itertools,
solana_entry::entry::{self, Entry},
Expand Down Expand Up @@ -9032,4 +9041,136 @@ pub(crate) mod tests {
assert_eq!(tower.vote_state, expected_tower.vote_state);
assert_eq!(tower.node_pubkey, expected_tower.node_pubkey);
}

#[test]
fn test_initialize_progress_and_fork_choice_with_duplicates() {
solana_logger::setup();
let GenesisConfigInfo {
mut genesis_config, ..
} = create_genesis_config(123);

let ticks_per_slot = 1;
genesis_config.ticks_per_slot = ticks_per_slot;
let (ledger_path, blockhash) =
solana_ledger::create_new_tmp_ledger_auto_delete!(&genesis_config);
let blockstore = Blockstore::open(ledger_path.path()).unwrap();

/*
Bank forks with:
slot 0
|
slot 1 -> Duplicate before restart, the restart slot
|
slot 2
|
slot 3 -> Duplicate before restart, artificially rooted
|
slot 4 -> Duplicate before restart, artificially rooted
|
slot 5 -> Duplicate before restart
|
slot 6
*/

let mut last_hash = blockhash;
for i in 0..6 {
last_hash =
fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, i + 1, i, last_hash);
}
// Artifically root 3 and 4
blockstore.set_roots([3, 4].iter()).unwrap();

// Set up bank0
let bank_forks = BankForks::new_rw_arc(Bank::new_for_tests(&genesis_config));
let bank0 = bank_forks.read().unwrap().get_with_scheduler(0).unwrap();
let recyclers = VerifyRecyclers::default();
let replay_tx_thread_pool = rayon::ThreadPoolBuilder::new()
.num_threads(1)
.thread_name(|i| format!("solReplayTx{i:02}"))
.build()
.expect("new rayon threadpool");

process_bank_0(
&bank0,
&blockstore,
&replay_tx_thread_pool,
&ProcessOptions::default(),
&recyclers,
None,
None,
);

// Mark block 1, 3, 4, 5 as duplicate
blockstore.store_duplicate_slot(1, vec![], vec![]).unwrap();
blockstore.store_duplicate_slot(3, vec![], vec![]).unwrap();
blockstore.store_duplicate_slot(4, vec![], vec![]).unwrap();
blockstore.store_duplicate_slot(5, vec![], vec![]).unwrap();

let bank1 = bank_forks.write().unwrap().insert(Bank::new_from_parent(
bank0.clone_without_scheduler(),
&Pubkey::default(),
1,
));
confirm_full_slot(
&blockstore,
&bank1,
&replay_tx_thread_pool,
&ProcessOptions::default(),
&recyclers,
&mut ConfirmationProgress::new(bank0.last_blockhash()),
None,
None,
None,
&mut ExecuteTimings::default(),
)
.unwrap();

bank_forks
.write()
.unwrap()
.set_root(
1,
&solana_runtime::accounts_background_service::AbsRequestSender::default(),
None,
)
.unwrap();

let leader_schedule_cache = LeaderScheduleCache::new_from_bank(&bank1);

// process_blockstore_from_root() from slot 1 onwards
blockstore_processor::process_blockstore_from_root(
&blockstore,
&bank_forks,
&leader_schedule_cache,
&ProcessOptions::default(),
None,
None,
None,
&AbsRequestSender::default(),
)
.unwrap();

assert_eq!(bank_forks.read().unwrap().root(), 4);

// Verify that fork choice can be initialized and that the root is not marked duplicate
let (_progress, fork_choice) =
ReplayStage::initialize_progress_and_fork_choice_with_locked_bank_forks(
&bank_forks,
&Pubkey::new_unique(),
&Pubkey::new_unique(),
&blockstore,
);

let bank_forks = bank_forks.read().unwrap();
// 4 (the artificial root) is the tree root and no longer duplicate
assert_eq!(fork_choice.tree_root().0, 4);
assert!(fork_choice
.is_candidate(&(4, bank_forks.bank_hash(4).unwrap()))
.unwrap());

// 5 is still considered duplicate, so it is not a valid fork choice candidate
assert!(!fork_choice
.is_candidate(&(5, bank_forks.bank_hash(5).unwrap()))
.unwrap());
}
}
1 change: 1 addition & 0 deletions ledger/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ mockall = { workspace = true }
num_cpus = { workspace = true }
num_enum = { workspace = true }
prost = { workspace = true }
qualifier_attr = { workspace = true }
rand = { workspace = true }
rand_chacha = { workspace = true }
rayon = { workspace = true }
Expand Down
4 changes: 4 additions & 0 deletions ledger/src/blockstore_processor.rs
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
#[cfg(feature = "dev-context-only-utils")]
use qualifier_attr::qualifiers;
use {
crate::{
block_error::BlockError,
Expand Down Expand Up @@ -1077,6 +1079,7 @@ fn verify_ticks(
}

#[allow(clippy::too_many_arguments)]
#[cfg_attr(feature = "dev-context-only-utils", qualifiers(pub))]
fn confirm_full_slot(
blockstore: &Blockstore,
bank: &BankWithScheduler,
Expand Down Expand Up @@ -1683,6 +1686,7 @@ fn confirm_slot_entries(
}

// Special handling required for processing the entries in slot 0
#[cfg_attr(feature = "dev-context-only-utils", qualifiers(pub))]
fn process_bank_0(
bank0: &BankWithScheduler,
blockstore: &Blockstore,
Expand Down
1 change: 1 addition & 0 deletions programs/sbf/Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.