Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Configurable hashes per tick #29659

Merged
merged 1 commit into from
Jan 12, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions entry/src/poh.rs
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,10 @@ impl Poh {
*self = Poh::new_with_slot_info(hash, hashes_per_tick, tick_number);
}

pub fn hashes_per_tick(&self) -> u64 {
self.hashes_per_tick
}

pub fn target_poh_time(&self, target_ns_per_tick: u64) -> Instant {
assert!(self.hashes_per_tick > 0);
let offset_tick_ns = target_ns_per_tick * self.tick_number;
Expand Down
31 changes: 24 additions & 7 deletions poh/src/poh_recorder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -225,7 +225,6 @@ pub struct PohRecorder {
id: Pubkey,
blockstore: Arc<Blockstore>,
leader_schedule_cache: Arc<LeaderScheduleCache>,
poh_config: PohConfig,
ticks_per_slot: u64,
target_ns_per_tick: u64,
record_lock_contention_us: u64,
Expand Down Expand Up @@ -460,13 +459,11 @@ impl PohRecorder {
))
}

// synchronize PoH with a bank
pub fn reset(&mut self, reset_bank: Arc<Bank>, next_leader_slot: Option<(Slot, Slot)>) {
self.clear_bank();
fn reset_poh(&mut self, reset_bank: Arc<Bank>, reset_start_bank: bool) {
let blockhash = reset_bank.last_blockhash();
let poh_hash = {
let mut poh = self.poh.lock().unwrap();
poh.reset(blockhash, self.poh_config.hashes_per_tick);
poh.reset(blockhash, *reset_bank.hashes_per_tick());
poh.hash
};
info!(
Expand All @@ -479,9 +476,17 @@ impl PohRecorder {
);

self.tick_cache = vec![];
self.start_bank = reset_bank;
if reset_start_bank {
self.start_bank = reset_bank;
}
self.tick_height = (self.start_slot() + 1) * self.ticks_per_slot;
self.start_tick_height = self.tick_height + 1;
}

// synchronize PoH with a bank
pub fn reset(&mut self, reset_bank: Arc<Bank>, next_leader_slot: Option<(Slot, Slot)>) {
self.clear_bank();
self.reset_poh(reset_bank, true);

if let Some(ref sender) = self.poh_timing_point_sender {
// start_slot() is the parent slot. current slot is start_slot() + 1.
Expand Down Expand Up @@ -513,6 +518,19 @@ impl PohRecorder {
};
trace!("new working bank");
assert_eq!(working_bank.bank.ticks_per_slot(), self.ticks_per_slot());
if let Some(hashes_per_tick) = *working_bank.bank.hashes_per_tick() {
if self.poh.lock().unwrap().hashes_per_tick() != hashes_per_tick {
// We must clear/reset poh when changing hashes per tick because it's
// possible there are ticks in the cache created with the old hashes per
// tick value that would get flushed later. This would corrupt the leader's
// block and it would be disregarded by the network.
info!(
"resetting poh due to hashes per tick change detected at {}",
working_bank.bank.slot()
);
self.reset_poh(working_bank.clone().bank, false);
}
}
self.working_bank = Some(working_bank);

// send poh slot start timing point
Expand Down Expand Up @@ -865,7 +883,6 @@ impl PohRecorder {
leader_schedule_cache: leader_schedule_cache.clone(),
ticks_per_slot,
target_ns_per_tick,
poh_config: poh_config.clone(),
record_lock_contention_us: 0,
flush_cache_tick_us: 0,
flush_cache_no_tick_us: 0,
Expand Down
52 changes: 45 additions & 7 deletions runtime/src/bank.rs
Original file line number Diff line number Diff line change
Expand Up @@ -109,9 +109,10 @@ use {
account_utils::StateMut,
bpf_loader_upgradeable::{self, UpgradeableLoaderState},
clock::{
BankId, Epoch, Slot, SlotCount, SlotIndex, UnixTimestamp, DEFAULT_TICKS_PER_SECOND,
INITIAL_RENT_EPOCH, MAX_PROCESSING_AGE, MAX_TRANSACTION_FORWARDING_DELAY,
MAX_TRANSACTION_FORWARDING_DELAY_GPU, SECONDS_PER_DAY,
BankId, Epoch, Slot, SlotCount, SlotIndex, UnixTimestamp, DEFAULT_HASHES_PER_TICK,
DEFAULT_TICKS_PER_SECOND, INITIAL_RENT_EPOCH, MAX_PROCESSING_AGE,
MAX_TRANSACTION_FORWARDING_DELAY, MAX_TRANSACTION_FORWARDING_DELAY_GPU,
SECONDS_PER_DAY,
},
ed25519_program,
epoch_info::EpochInfo,
Expand Down Expand Up @@ -1921,10 +1922,6 @@ impl Bank {
"Bank snapshot genesis creation time does not match genesis.bin creation time.\
The snapshot and genesis.bin might pertain to different clusters"
);
assert_eq!(
bank.hashes_per_tick,
genesis_config.poh_config.hashes_per_tick
);
assert_eq!(bank.ticks_per_slot, genesis_config.ticks_per_slot);
assert_eq!(
bank.ns_per_slot,
Expand Down Expand Up @@ -7483,6 +7480,19 @@ impl Bank {
const ACCOUNTS_DATA_LEN: u64 = 50_000_000_000;
self.accounts_data_size_initial = ACCOUNTS_DATA_LEN;
}

if new_feature_activations.contains(&feature_set::update_hashes_per_tick::id()) {
self.apply_updated_hashes_per_tick(DEFAULT_HASHES_PER_TICK);
}
}

fn apply_updated_hashes_per_tick(&mut self, hashes_per_tick: u64) {
info!(
"Activating update_hashes_per_tick {} at slot {}",
hashes_per_tick,
self.slot(),
);
self.hashes_per_tick = Some(hashes_per_tick);
}

fn adjust_sysvar_balance_for_rent(&self, account: &mut AccountSharedData) {
Expand Down Expand Up @@ -20272,4 +20282,32 @@ pub(crate) mod tests {
)),
);
}

#[test]
fn test_feature_activation_idempotent() {
let mut genesis_config = GenesisConfig::default();
const HASHES_PER_TICK_START: u64 = 3;
genesis_config.poh_config.hashes_per_tick = Some(HASHES_PER_TICK_START);

let mut bank = Bank::new_for_tests(&genesis_config);
assert_eq!(bank.hashes_per_tick, Some(HASHES_PER_TICK_START));

// Don't activate feature
bank.apply_feature_activations(ApplyFeatureActivationsCaller::NewFromParent, false);
assert_eq!(bank.hashes_per_tick, Some(HASHES_PER_TICK_START));

// Activate feature
let feature_account_balance =
std::cmp::max(genesis_config.rent.minimum_balance(Feature::size_of()), 1);
bank.store_account(
&feature_set::update_hashes_per_tick::id(),
&feature::create_account(&Feature { activated_at: None }, feature_account_balance),
);
bank.apply_feature_activations(ApplyFeatureActivationsCaller::NewFromParent, false);
assert_eq!(bank.hashes_per_tick, Some(DEFAULT_HASHES_PER_TICK));

// Activate feature "again"
bank.apply_feature_activations(ApplyFeatureActivationsCaller::NewFromParent, false);
assert_eq!(bank.hashes_per_tick, Some(DEFAULT_HASHES_PER_TICK));
}
}
5 changes: 5 additions & 0 deletions sdk/src/feature_set.rs
Original file line number Diff line number Diff line change
Expand Up @@ -578,6 +578,10 @@ pub mod move_serialized_len_ptr_in_cpi {
solana_sdk::declare_id!("74CoWuBmt3rUVUrCb2JiSTvh6nXyBWUsK4SaMj3CtE3T");
}

pub mod update_hashes_per_tick {
solana_sdk::declare_id!("3uFHb9oKdGfgZGJK9EHaAXN4USvnQtAFC13Fh5gGFS5B");
}

lazy_static! {
/// Map of feature identifiers to user-visible description
pub static ref FEATURE_NAMES: HashMap<Pubkey, &'static str> = [
Expand Down Expand Up @@ -717,6 +721,7 @@ lazy_static! {
(enable_turbine_fanout_experiments::id(), "enable turbine fanout experiments #29393"),
(disable_turbine_fanout_experiments::id(), "disable turbine fanout experiments #29393"),
(move_serialized_len_ptr_in_cpi::id(), "cpi ignore serialized_len_ptr #29592"),
(update_hashes_per_tick::id(), "Update desired hashes per tick on epoch boundary"),
/*************** ADD NEW FEATURES HERE ***************/
]
.iter()
Expand Down