Skip to content

Commit

Permalink
local-cluster: fix flaky optimistic_confirmation tests
Browse files Browse the repository at this point in the history
  • Loading branch information
AshwinSekar committed Feb 28, 2024
1 parent 1408182 commit e667cfb
Show file tree
Hide file tree
Showing 2 changed files with 35 additions and 8 deletions.
2 changes: 1 addition & 1 deletion ledger/src/leader_schedule.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ pub struct FixedSchedule {
}

/// Stake-weighted leader schedule for one epoch.
#[derive(Debug, Default, PartialEq, Eq)]
#[derive(Debug, Default, PartialEq, Eq, Clone)]
pub struct LeaderSchedule {
slot_leaders: Vec<Pubkey>,
// Inverted index from pubkeys to indices where they are the leader.
Expand Down
41 changes: 34 additions & 7 deletions local-cluster/tests/local_cluster.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3122,7 +3122,7 @@ fn test_optimistic_confirmation_violation_without_tower() {
// `A` should not be able to generate a switching proof.
//
fn do_test_optimistic_confirmation_violation_with_or_without_tower(with_tower: bool) {
solana_logger::setup_with("debug");
solana_logger::setup_with("info");

// First set up the cluster with 4 nodes
let slots_per_epoch = 2048;
Expand Down Expand Up @@ -3172,29 +3172,36 @@ fn do_test_optimistic_confirmation_violation_with_or_without_tower(with_tower: b
// below only for slots <= `next_slot_on_a`, validator A will not know how it's last vote chains
// to the other forks, and may violate switching proofs on restart.
let mut default_config = ValidatorConfig::default_for_test();
// Split leader schedule 50-50 between validators B and C, don't give validator A any slots because
// Ensure B can make leader blocks up till the fork slot, and give remaining to C, don't give validator A any slots because
// it's going to be deleting its ledger, so may create versions of slots it's already created, but
// on a different fork.
let validator_to_slots = vec![
// Ensure validator b is leader for slots <= `next_slot_on_a`
(validator_b_pubkey, next_slot_on_a as usize + 1),
(validator_c_pubkey, next_slot_on_a as usize + 1),
(validator_c_pubkey, DEFAULT_SLOTS_PER_EPOCH as usize),
];
// Trick C into not producing any blocks, in case it takes too long to kill it
let c_validator_to_slots = vec![(validator_b_pubkey, DEFAULT_SLOTS_PER_EPOCH as usize)];

let c_leader_schedule = create_custom_leader_schedule(c_validator_to_slots.into_iter());
let leader_schedule = create_custom_leader_schedule(validator_to_slots.into_iter());
for slot in 0..=next_slot_on_a {
assert_eq!(leader_schedule[slot], validator_b_pubkey);
}

default_config.fixed_leader_schedule = Some(FixedSchedule {
leader_schedule: Arc::new(leader_schedule),
leader_schedule: Arc::new(leader_schedule.clone()),
});
let mut validator_configs =
make_identical_validator_configs(&default_config, node_stakes.len());

// Disable voting on validators C, and D
validator_configs[2].voting_disabled = true;
validator_configs[3].voting_disabled = true;
// C should not produce any blocks at this time
validator_configs[2].fixed_leader_schedule = Some(FixedSchedule {
leader_schedule: Arc::new(c_leader_schedule),
});

let mut config = ClusterConfig {
cluster_lamports: DEFAULT_CLUSTER_LAMPORTS + node_stakes.iter().sum::<u64>(),
Expand Down Expand Up @@ -3336,17 +3343,36 @@ fn do_test_optimistic_confirmation_violation_with_or_without_tower(with_tower: b
// Run validator C only to make it produce and vote on its own fork.
info!("Restart validator C again!!!");
validator_c_info.config.voting_disabled = false;
// C should now produce blocks
validator_c_info.config.fixed_leader_schedule = Some(FixedSchedule {
leader_schedule: Arc::new(leader_schedule),
});
cluster.restart_node(
&validator_c_pubkey,
validator_c_info,
SocketAddrSpace::Unspecified,
);

let mut votes_on_c_fork = std::collections::BTreeSet::new(); // S4 and S5
for _ in 0..100 {
let mut last_vote = 0;
let now = Instant::now();
loop {
let elapsed = now.elapsed();
assert!(
elapsed <= Duration::from_secs(30),
"C failed to create a fork past {} in {} second,s
last_vote {},
votes_on_c_fork: {:?}",
base_slot,
elapsed.as_secs(),
last_vote,
votes_on_c_fork,
);
sleep(Duration::from_millis(100));

if let Some((last_vote, _)) = last_vote_in_tower(&val_c_ledger_path, &validator_c_pubkey) {
if let Some((latest_vote, _)) = last_vote_in_tower(&val_c_ledger_path, &validator_c_pubkey)
{
last_vote = latest_vote;
if last_vote != base_slot {
votes_on_c_fork.insert(last_vote);
// Collect 4 votes
Expand All @@ -3357,7 +3383,7 @@ fn do_test_optimistic_confirmation_violation_with_or_without_tower(with_tower: b
}
}
assert!(!votes_on_c_fork.is_empty());
info!("collected validator C's votes: {:?}", votes_on_c_fork);
info!("Collected validator C's votes: {:?}", votes_on_c_fork);

// Step 4:
// verify whether there was violation or not
Expand All @@ -3375,6 +3401,7 @@ fn do_test_optimistic_confirmation_violation_with_or_without_tower(with_tower: b
sleep(Duration::from_millis(100));

if let Some((last_vote, _)) = last_vote_in_tower(&val_a_ledger_path, &validator_a_pubkey) {
info!(">>>A's LAST VOTE {}", last_vote);
a_votes.push(last_vote);
let blockstore = open_blockstore(&val_a_ledger_path);
let mut ancestors = AncestorIterator::new(last_vote, &blockstore);
Expand Down

0 comments on commit e667cfb

Please sign in to comment.