Skip to content

Commit

Permalink
Hash stored accounts in bg (#16157)
Browse files Browse the repository at this point in the history
* lazy calculate account hash

* push to bg thread

* remove deadlock

* logs

* format

* some cleanup on aisle 9

* format, fix up some metrics

* fix test, remove legacy function only there for tests

* cleanup

* remove unused store_hasher

* Switch to crossbeam

* clippy

* format

* use iter()

* rework from feedback

* hash_slot -> slot

* hash(cluster_type)

Co-authored-by: Carl Lin <[email protected]>
  • Loading branch information
jeffwashington and carllin authored Mar 31, 2021
1 parent 6f3926b commit f374b35
Show file tree
Hide file tree
Showing 4 changed files with 233 additions and 87 deletions.
62 changes: 53 additions & 9 deletions runtime/src/accounts_cache.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ use dashmap::DashMap;
use solana_sdk::{
account::{AccountSharedData, ReadableAccount},
clock::Slot,
genesis_config::ClusterType,
hash::Hash,
pubkey::Pubkey,
};
Expand Down Expand Up @@ -47,7 +48,13 @@ impl SlotCacheInner {
);
}

pub fn insert(&self, pubkey: &Pubkey, account: AccountSharedData, hash: Hash) {
pub fn insert(
&self,
pubkey: &Pubkey,
account: AccountSharedData,
hash: Option<Hash>,
slot: Slot,
) -> CachedAccount {
if self.cache.contains_key(pubkey) {
self.same_account_writes.fetch_add(1, Ordering::Relaxed);
self.same_account_writes_size
Expand All @@ -56,7 +63,14 @@ impl SlotCacheInner {
self.unique_account_writes_size
.fetch_add(account.data().len() as u64, Ordering::Relaxed);
}
self.cache.insert(*pubkey, CachedAccount { account, hash });
let item = Arc::new(CachedAccountInner {
account,
hash: RwLock::new(hash),
slot,
pubkey: *pubkey,
});
self.cache.insert(*pubkey, item.clone());
item
}

pub fn get_cloned(&self, pubkey: &Pubkey) -> Option<CachedAccount> {
Expand Down Expand Up @@ -89,10 +103,34 @@ impl Deref for SlotCacheInner {
}
}

#[derive(Debug, Clone)]
pub struct CachedAccount {
pub type CachedAccount = Arc<CachedAccountInner>;

#[derive(Debug)]
pub struct CachedAccountInner {
pub account: AccountSharedData,
pub hash: Hash,
hash: RwLock<Option<Hash>>,
slot: Slot,
pubkey: Pubkey,
}

impl CachedAccountInner {
pub fn hash(&self, cluster_type: ClusterType) -> Hash {
let hash = self.hash.read().unwrap();
match *hash {
Some(hash) => hash,
None => {
drop(hash);
let hash = crate::accounts_db::AccountsDb::hash_account(
self.slot,
&self.account,
&self.pubkey,
&cluster_type,
);
*self.hash.write().unwrap() = Some(hash);
hash
}
}
}
}

#[derive(Debug, Default)]
Expand Down Expand Up @@ -128,7 +166,13 @@ impl AccountsCache {
);
}

pub fn store(&self, slot: Slot, pubkey: &Pubkey, account: AccountSharedData, hash: Hash) {
pub fn store(
&self,
slot: Slot,
pubkey: &Pubkey,
account: AccountSharedData,
hash: Option<Hash>,
) -> CachedAccount {
let slot_cache = self.slot_cache(slot).unwrap_or_else(||
// DashMap entry.or_insert() returns a RefMut, essentially a write lock,
// which is dropped after this block ends, minimizing time held by the lock.
Expand All @@ -140,7 +184,7 @@ impl AccountsCache {
.or_insert(Arc::new(SlotCacheInner::default()))
.clone());

slot_cache.insert(pubkey, account, hash);
slot_cache.insert(pubkey, account, hash, slot)
}

pub fn load(&self, slot: Slot, pubkey: &Pubkey) -> Option<CachedAccount> {
Expand Down Expand Up @@ -241,7 +285,7 @@ pub mod tests {
inserted_slot,
&Pubkey::new_unique(),
AccountSharedData::new(1, 0, &Pubkey::default()),
Hash::default(),
Some(Hash::default()),
);
// If the cache is told the size limit is 0, it should return the one slot
let removed = cache.remove_slots_le(0);
Expand All @@ -259,7 +303,7 @@ pub mod tests {
inserted_slot,
&Pubkey::new_unique(),
AccountSharedData::new(1, 0, &Pubkey::default()),
Hash::default(),
Some(Hash::default()),
);

// If the cache is told the size limit is 0, it should return nothing because there's only
Expand Down
Loading

0 comments on commit f374b35

Please sign in to comment.