Skip to content

Commit

Permalink
Shares accounts hash cache data between full and incremental
Browse files Browse the repository at this point in the history
  • Loading branch information
brooksprumo committed Sep 6, 2023
1 parent a3dc3eb commit a7c7f8a
Show file tree
Hide file tree
Showing 2 changed files with 23 additions and 21 deletions.
27 changes: 13 additions & 14 deletions accounts-db/src/accounts_db.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1487,8 +1487,7 @@ pub struct AccountsDb {
#[allow(dead_code)]
base_working_temp_dir: Option<TempDir>,

full_accounts_hash_cache_path: PathBuf,
incremental_accounts_hash_cache_path: PathBuf,
accounts_hash_cache_path: PathBuf,
transient_accounts_hash_cache_path: PathBuf,

pub shrink_paths: RwLock<Option<Vec<PathBuf>>>,
Expand Down Expand Up @@ -2487,9 +2486,8 @@ impl AccountsDb {
paths: vec![],
base_working_path,
base_working_temp_dir,
full_accounts_hash_cache_path: accounts_hash_cache_path.join("full"),
incremental_accounts_hash_cache_path: accounts_hash_cache_path.join("incremental"),
transient_accounts_hash_cache_path: accounts_hash_cache_path.join("transient"),
accounts_hash_cache_path,
shrink_paths: RwLock::new(None),
temp_paths: None,
file_size: DEFAULT_FILE_SIZE,
Expand Down Expand Up @@ -7626,18 +7624,20 @@ impl AccountsDb {
fn get_cache_hash_data(
accounts_hash_cache_path: PathBuf,
config: &CalcAccountsHashConfig<'_>,
kind: CalcAccountsHashKind,
slot: Slot,
) -> CacheHashData {
if !config.store_detailed_debug_info_on_failure {
CacheHashData::new(accounts_hash_cache_path)
let accounts_hash_cache_path = if !config.store_detailed_debug_info_on_failure {
accounts_hash_cache_path
} else {
// this path executes when we are failing with a hash mismatch
let failed_dir = accounts_hash_cache_path
.join("failed_calculate_accounts_hash_cache")
.join(slot.to_string());
let _ = std::fs::remove_dir_all(&failed_dir);
CacheHashData::new(failed_dir)
}
_ = std::fs::remove_dir_all(&failed_dir);
failed_dir
};
CacheHashData::new(accounts_hash_cache_path, kind == CalcAccountsHashKind::Full)
}

// modeled after calculate_accounts_delta_hash
Expand All @@ -7653,7 +7653,6 @@ impl AccountsDb {
storages,
stats,
CalcAccountsHashKind::Full,
self.full_accounts_hash_cache_path.clone(),
)?;
let AccountsHashKind::Full(accounts_hash) = accounts_hash else {
panic!("calculate_accounts_hash_from_storages must return a FullAccountsHash");
Expand Down Expand Up @@ -7681,7 +7680,6 @@ impl AccountsDb {
storages,
stats,
CalcAccountsHashKind::Incremental,
self.incremental_accounts_hash_cache_path.clone(),
)?;
let AccountsHashKind::Incremental(incremental_accounts_hash) = accounts_hash else {
panic!("calculate_incremental_accounts_hash must return an IncrementalAccountsHash");
Expand All @@ -7695,7 +7693,6 @@ impl AccountsDb {
storages: &SortedStorages<'_>,
mut stats: HashStats,
kind: CalcAccountsHashKind,
accounts_hash_cache_path: PathBuf,
) -> Result<(AccountsHashKind, u64), AccountsHashVerificationError> {
let total_time = Measure::start("");
let _guard = self.active_stats.activate(ActiveStatItem::Hash);
Expand All @@ -7705,10 +7702,12 @@ impl AccountsDb {

let slot = storages.max_slot_inclusive();
let use_bg_thread_pool = config.use_bg_thread_pool;
let accounts_hash_cache_path = self.accounts_hash_cache_path.clone();
let scan_and_hash = || {
let (cache_hash_data, cache_hash_data_us) = measure_us!(Self::get_cache_hash_data(
accounts_hash_cache_path,
config,
kind,
slot
));
stats.cache_hash_data_us += cache_hash_data_us;
Expand Down Expand Up @@ -9971,7 +9970,7 @@ pub mod tests {
let temp_dir = TempDir::new().unwrap();
let accounts_hash_cache_path = temp_dir.path().to_path_buf();
self.scan_snapshot_stores_with_cache(
&CacheHashData::new(accounts_hash_cache_path),
&CacheHashData::new(accounts_hash_cache_path, true),
storage,
stats,
bins,
Expand Down Expand Up @@ -11011,7 +11010,7 @@ pub mod tests {
};

let result = accounts_db.scan_account_storage_no_bank(
&CacheHashData::new(accounts_hash_cache_path),
&CacheHashData::new(accounts_hash_cache_path, true),
&CalcAccountsHashConfig::default(),
&get_storage_refs(&[storage]),
test_scan,
Expand Down
17 changes: 10 additions & 7 deletions accounts-db/src/cache_hash_data.rs
Original file line number Diff line number Diff line change
Expand Up @@ -196,29 +196,32 @@ impl CacheHashDataFile {
}
}

pub type PreExistingCacheFiles = HashSet<PathBuf>;
pub struct CacheHashData {
cache_dir: PathBuf,
pre_existing_cache_files: Arc<Mutex<PreExistingCacheFiles>>,
pre_existing_cache_files: Arc<Mutex<HashSet<PathBuf>>>,
should_delete_old_cache_files_on_drop: bool,
pub stats: Arc<CacheHashDataStats>,
}

impl Drop for CacheHashData {
fn drop(&mut self) {
self.delete_old_cache_files();
if self.should_delete_old_cache_files_on_drop {
self.delete_old_cache_files();
}
self.stats.report();
}
}

impl CacheHashData {
pub fn new(cache_dir: PathBuf) -> CacheHashData {
pub fn new(cache_dir: PathBuf, should_delete_old_cache_files_on_drop: bool) -> CacheHashData {
std::fs::create_dir_all(&cache_dir).unwrap_or_else(|err| {
panic!("error creating cache dir {}: {err}", cache_dir.display())
});

let result = CacheHashData {
cache_dir,
pre_existing_cache_files: Arc::new(Mutex::new(PreExistingCacheFiles::default())),
pre_existing_cache_files: Arc::new(Mutex::new(HashSet::default())),
should_delete_old_cache_files_on_drop,
stats: Arc::default(),
};

Expand Down Expand Up @@ -305,7 +308,7 @@ impl CacheHashData {
reference.map()
}

pub(crate) fn pre_existing_cache_file_will_be_used(&self, file_name: impl AsRef<Path>) {
fn pre_existing_cache_file_will_be_used(&self, file_name: impl AsRef<Path>) {
self.pre_existing_cache_files
.lock()
.unwrap()
Expand Down Expand Up @@ -421,7 +424,7 @@ pub mod tests {
data_this_pass.push(this_bin_data);
}
}
let cache = CacheHashData::new(cache_dir.clone());
let cache = CacheHashData::new(cache_dir.clone(), true);
let file_name = PathBuf::from("test");
cache.save(&file_name, &data_this_pass).unwrap();
cache.get_cache_files();
Expand Down

0 comments on commit a7c7f8a

Please sign in to comment.