Skip to content

Commit

Permalink
Merge remote-tracking branch 'upstream/master'
Browse files Browse the repository at this point in the history
  • Loading branch information
lijunwangs committed Apr 6, 2024
2 parents d0cb618 + 01460ef commit cf0336f
Show file tree
Hide file tree
Showing 26 changed files with 656 additions and 370 deletions.
2 changes: 2 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,8 @@ Release channels have their own copy of this changelog:
* `solana-rpc-client-api`: `RpcFilterError` depends on `base64` version 0.22, so users may need to upgrade to `base64` version 0.22
* Changed default value for `--health-check-slot-distance` from 150 to 128
* CLI: Can specify `--with-compute-unit-price` and `--max-sign-attempts` during program deployment
* RPC's `simulateTransaction` now returns an extra `replacementBlockhash` field in the response
when the `replaceRecentBlockhash` config param is `true` (#380)

## [1.18.0]
* Changes
Expand Down
4 changes: 2 additions & 2 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -173,7 +173,7 @@ bzip2 = "0.4.4"
caps = "0.5.5"
cargo_metadata = "0.15.4"
cc = "1.0.83"
chrono = { version = "0.4.34", default-features = false }
chrono = { version = "0.4.37", default-features = false }
chrono-humanize = "0.2.3"
clap = "2.33.1"
console = "0.15.8"
Expand Down
62 changes: 41 additions & 21 deletions accounts-db/src/accounts_db.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7874,38 +7874,46 @@ impl AccountsDb {
.entry(*slot)
.or_default()
.insert(account_info.offset());
if let Some(expected_slot) = expected_slot {
assert_eq!(*slot, expected_slot);
}
}
if let Some(expected_slot) = expected_slot {
assert_eq!(reclaimed_offsets.len(), 1);
assert!(reclaimed_offsets.contains_key(&expected_slot));
}

reclaimed_offsets.iter().for_each(|(slot, offsets)| {
let mut check_for_shrink = true;
if let Some(store) = self
.storage
.get_account_storage_entry(*slot, account_info.store_id())
.get_slot_storage_entry(*slot)
{
assert_eq!(
*slot, store.slot(),
"AccountsDB::accounts_index corrupted. Storage pointed to: {}, expected: {}, should only point to one slot",
store.slot(), *slot
);
let offset = account_info.offset();
let account = store.accounts.get_account(offset).unwrap();
let stored_size = account.0.stored_size();
let count = store.remove_account(stored_size, reset_accounts);
if count == 0 {
self.dirty_stores.insert(*slot, store.clone());
dead_slots.insert(*slot);
} else if Self::is_shrinking_productive(*slot, &store)
&& self.is_candidate_for_shrink(&store, false)
{
// Checking that this single storage entry is ready for shrinking,
// should be a sufficient indication that the slot is ready to be shrunk
// because slots should only have one storage entry, namely the one that was
// created by `flush_slot_cache()`.
let mut offsets = offsets.iter().cloned().collect::<Vec<_>>();
// sort so offsets are in order. This improves efficiency of loading the accounts.
offsets.sort_unstable();
offsets.iter().for_each(|offset| {
let account = store.accounts.get_account(*offset).unwrap();
let stored_size = account.0.stored_size();
let count = store.remove_account(stored_size, reset_accounts);
if count == 0 {
self.dirty_stores.insert(*slot, store.clone());
dead_slots.insert(*slot);
} else if check_for_shrink && Self::is_shrinking_productive(*slot, &store)
&& self.is_candidate_for_shrink(&store, false)
{
new_shrink_candidates.insert(*slot);
// Checking that this single storage entry is ready for shrinking,
// should be a sufficient indication that the slot is ready to be shrunk
// because slots should only have one storage entry, namely the one that was
// created by `flush_slot_cache()`.
new_shrink_candidates.insert(*slot);
check_for_shrink = false;
}
}
});
}
}
});
measure.stop();
self.clean_accounts_stats
.remove_dead_accounts_remove_us
Expand Down Expand Up @@ -8221,6 +8229,8 @@ impl AccountsDb {
read_only_cache_misses,
read_only_cache_evicts,
read_only_cache_load_us,
read_only_cache_store_us,
read_only_cache_evict_us,
) = self.read_only_accounts_cache.get_and_reset_stats();
datapoint_info!(
"accounts_db_store_timings",
Expand Down Expand Up @@ -8292,6 +8302,16 @@ impl AccountsDb {
read_only_cache_load_us,
i64
),
(
"read_only_accounts_cache_store_us",
read_only_cache_store_us,
i64
),
(
"read_only_accounts_cache_evict_us",
read_only_cache_evict_us,
i64
),
(
"calc_stored_meta_us",
self.stats.calc_stored_meta.swap(0, Ordering::Relaxed),
Expand Down
34 changes: 23 additions & 11 deletions accounts-db/src/read_only_accounts_cache.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
use {
dashmap::{mapref::entry::Entry, DashMap},
index_list::{Index, IndexList},
solana_measure::measure_us,
solana_measure::{measure::Measure, measure_us},
solana_sdk::{
account::{AccountSharedData, ReadableAccount},
clock::Slot,
Expand Down Expand Up @@ -36,6 +36,8 @@ struct ReadOnlyCacheStats {
misses: AtomicU64,
evicts: AtomicU64,
load_us: AtomicU64,
store_us: AtomicU64,
evict_us: AtomicU64,
}

impl ReadOnlyCacheStats {
Expand All @@ -44,15 +46,19 @@ impl ReadOnlyCacheStats {
self.misses.store(0, Ordering::Relaxed);
self.evicts.store(0, Ordering::Relaxed);
self.load_us.store(0, Ordering::Relaxed);
self.store_us.store(0, Ordering::Relaxed);
self.evict_us.store(0, Ordering::Relaxed);
}

fn get_and_reset_stats(&self) -> (u64, u64, u64, u64) {
fn get_and_reset_stats(&self) -> (u64, u64, u64, u64, u64, u64) {
let hits = self.hits.swap(0, Ordering::Relaxed);
let misses = self.misses.swap(0, Ordering::Relaxed);
let evicts = self.evicts.swap(0, Ordering::Relaxed);
let load_us = self.load_us.swap(0, Ordering::Relaxed);
let store_us = self.store_us.swap(0, Ordering::Relaxed);
let evict_us = self.evict_us.swap(0, Ordering::Relaxed);

(hits, misses, evicts, load_us)
(hits, misses, evicts, load_us, store_us, evict_us)
}
}

Expand Down Expand Up @@ -136,6 +142,7 @@ impl ReadOnlyAccountsCache {
}

pub(crate) fn store(&self, pubkey: Pubkey, slot: Slot, account: AccountSharedData) {
let measure_store = Measure::start("");
self.highest_slot_stored.fetch_max(slot, Ordering::Release);
let key = (pubkey, slot);
let account_size = self.account_size(&account);
Expand All @@ -162,14 +169,19 @@ impl ReadOnlyAccountsCache {
};
// Evict entries from the front of the queue.
let mut num_evicts = 0;
while self.data_size.load(Ordering::Relaxed) > self.max_data_size {
let Some(&(pubkey, slot)) = self.queue.lock().unwrap().get_first() else {
break;
};
num_evicts += 1;
self.remove(pubkey, slot);
}
let (_, evict_us) = measure_us!({
while self.data_size.load(Ordering::Relaxed) > self.max_data_size {
let Some(&(pubkey, slot)) = self.queue.lock().unwrap().get_first() else {
break;
};
num_evicts += 1;
self.remove(pubkey, slot);
}
});
let store_us = measure_store.end_as_us();
self.stats.evicts.fetch_add(num_evicts, Ordering::Relaxed);
self.stats.evict_us.fetch_add(evict_us, Ordering::Relaxed);
self.stats.store_us.fetch_add(store_us, Ordering::Relaxed);
}

/// true if any pubkeys could have ever been stored into the cache at `slot`
Expand Down Expand Up @@ -208,7 +220,7 @@ impl ReadOnlyAccountsCache {
self.data_size.load(Ordering::Relaxed)
}

pub(crate) fn get_and_reset_stats(&self) -> (u64, u64, u64, u64) {
pub(crate) fn get_and_reset_stats(&self) -> (u64, u64, u64, u64, u64, u64) {
self.stats.get_and_reset_stats()
}
}
Expand Down
13 changes: 8 additions & 5 deletions cli-output/src/display.rs
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
use {
crate::cli_output::CliSignatureVerificationStatus,
base64::{prelude::BASE64_STANDARD, Engine},
chrono::{Local, NaiveDateTime, SecondsFormat, TimeZone, Utc},
chrono::{DateTime, Local, SecondsFormat, TimeZone, Utc},
console::style,
indicatif::{ProgressBar, ProgressStyle},
solana_cli_config::SettingType,
Expand Down Expand Up @@ -715,10 +715,8 @@ pub fn new_spinner_progress_bar() -> ProgressBar {
}

pub fn unix_timestamp_to_string(unix_timestamp: UnixTimestamp) -> String {
match NaiveDateTime::from_timestamp_opt(unix_timestamp, 0) {
Some(ndt) => Utc
.from_utc_datetime(&ndt)
.to_rfc3339_opts(SecondsFormat::Secs, true),
match DateTime::from_timestamp(unix_timestamp, 0) {
Some(ndt) => ndt.to_rfc3339_opts(SecondsFormat::Secs, true),
None => format!("UnixTimestamp {unix_timestamp}"),
}
}
Expand Down Expand Up @@ -976,4 +974,9 @@ Rewards:
"abcdefghijklmnopqrstuvwxyz12345 (1111..1111)"
);
}

#[test]
fn test_unix_timestamp_to_string() {
assert_eq!(unix_timestamp_to_string(1628633791), "2021-08-10T22:16:31Z");
}
}
6 changes: 5 additions & 1 deletion ledger-tool/src/program.rs
Original file line number Diff line number Diff line change
Expand Up @@ -522,7 +522,11 @@ pub fn program(ledger_path: &Path, matches: &ArgMatches<'_>) {
let mut loaded_programs =
bank.new_program_cache_for_tx_batch_for_slot(bank.slot() + DELAY_VISIBILITY_SLOT_OFFSET);
for key in cached_account_keys {
loaded_programs.replenish(key, bank.load_program(&key, false, bank.epoch()));
loaded_programs.replenish(
key,
bank.load_program(&key, false, bank.epoch())
.expect("Couldn't find program account"),
);
debug!("Loaded program {}", key);
}
invoke_context.programs_loaded_for_tx_batch = &loaded_programs;
Expand Down
4 changes: 2 additions & 2 deletions local-cluster/src/cluster.rs
Original file line number Diff line number Diff line change
Expand Up @@ -59,10 +59,10 @@ pub trait Cluster {
&mut self,
pubkey: &Pubkey,
cluster_validator_info: &mut ClusterValidatorInfo,
) -> (Node, Option<ContactInfo>);
) -> (Node, Vec<ContactInfo>);
fn restart_node_with_context(
cluster_validator_info: ClusterValidatorInfo,
restart_context: (Node, Option<ContactInfo>),
restart_context: (Node, Vec<ContactInfo>),
socket_addr_space: SocketAddrSpace,
) -> ClusterValidatorInfo;
fn add_node(&mut self, pubkey: &Pubkey, cluster_validator_info: ClusterValidatorInfo);
Expand Down
46 changes: 29 additions & 17 deletions local-cluster/src/local_cluster.rs
Original file line number Diff line number Diff line change
Expand Up @@ -586,7 +586,7 @@ impl LocalCluster {
let alive_node_contact_infos = self.discover_nodes(socket_addr_space, test_name);
info!(
"{} looking minimum root {} on all nodes",
min_root, test_name
test_name, min_root
);
cluster_tests::check_min_slot_is_rooted(
min_root,
Expand Down Expand Up @@ -907,23 +907,36 @@ impl Cluster for LocalCluster {
&mut self,
pubkey: &Pubkey,
cluster_validator_info: &mut ClusterValidatorInfo,
) -> (Node, Option<ContactInfo>) {
) -> (Node, Vec<ContactInfo>) {
// Update the stored ContactInfo for this node
let node = Node::new_localhost_with_pubkey(pubkey);
cluster_validator_info.info.contact_info = node.info.clone();
cluster_validator_info.config.rpc_addrs =
Some((node.info.rpc().unwrap(), node.info.rpc_pubsub().unwrap()));

let entry_point_info = {
if pubkey == self.entry_point_info.pubkey() {
self.entry_point_info = node.info.clone();
None
} else {
Some(self.entry_point_info.clone())
}
};
if pubkey == self.entry_point_info.pubkey() {
self.entry_point_info = node.info.clone();
}

let mut is_entrypoint_alive = false;
let mut entry_point_infos: Vec<ContactInfo> = self
.validators
.values()
.map(|validator| {
// Should not be restarting a validator that is still alive
assert!(validator.info.contact_info.pubkey() != pubkey);
if validator.info.contact_info.pubkey() == self.entry_point_info.pubkey() {
is_entrypoint_alive = true;
}
validator.info.contact_info.clone()
})
.collect();

(node, entry_point_info)
if !is_entrypoint_alive {
entry_point_infos.push(self.entry_point_info.clone());
}

(node, entry_point_infos)
}

fn set_entry_point(&mut self, entry_point_info: ContactInfo) {
Expand Down Expand Up @@ -951,7 +964,7 @@ impl Cluster for LocalCluster {

fn restart_node_with_context(
mut cluster_validator_info: ClusterValidatorInfo,
(node, entry_point_info): (Node, Option<ContactInfo>),
(node, entry_point_infos): (Node, Vec<ContactInfo>),
socket_addr_space: SocketAddrSpace,
) -> ClusterValidatorInfo {
// Restart the node
Expand All @@ -966,11 +979,10 @@ impl Cluster for LocalCluster {
&validator_info.ledger_path,
&validator_info.voting_keypair.pubkey(),
Arc::new(RwLock::new(vec![validator_info.voting_keypair.clone()])),
entry_point_info
.map(|entry_point_info| {
vec![LegacyContactInfo::try_from(&entry_point_info).unwrap()]
})
.unwrap_or_default(),
entry_point_infos
.into_iter()
.map(|entry_point_info| LegacyContactInfo::try_from(&entry_point_info).unwrap())
.collect(),
&safe_clone_config(&cluster_validator_info.config),
true, // should_check_duplicate_instance
None, // rpc_to_plugin_manager_receiver
Expand Down
Loading

0 comments on commit cf0336f

Please sign in to comment.