Skip to content

Commit

Permalink
fix(storage-proofs-porep): use file locking for chache generation (#1179
Browse files Browse the repository at this point in the history
)
  • Loading branch information
dignifiedquire authored Jun 19, 2020
1 parent 96755e8 commit fa103a9
Show file tree
Hide file tree
Showing 3 changed files with 57 additions and 51 deletions.
21 changes: 17 additions & 4 deletions storage-proofs/core/src/parameter_cache.rs
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ pub const PARAMETER_METADATA_EXT: &str = "meta";
pub const VERIFYING_KEY_EXT: &str = "vk";

#[derive(Debug)]
struct LockedFile(File);
pub struct LockedFile(File);

// TODO: use in memory lock as well, as file locks do not guarantee exclusive access across OSes.

Expand All @@ -47,6 +47,19 @@ impl LockedFile {

Ok(LockedFile(f))
}

pub fn open_shared_read<P: AsRef<Path>>(p: P) -> io::Result<Self> {
let f = fs::OpenOptions::new().read(true).open(p)?;
f.lock_shared()?;

Ok(LockedFile(f))
}
}

impl AsRef<File> for LockedFile {
fn as_ref(&self) -> &File {
&self.0
}
}

impl io::Write for LockedFile {
Expand Down Expand Up @@ -328,21 +341,21 @@ fn write_cached_params(
})
}

fn with_exclusive_lock<T>(
pub fn with_exclusive_lock<T>(
file_path: &PathBuf,
f: impl FnOnce(&mut LockedFile) -> Result<T>,
) -> Result<T> {
with_open_file(file_path, LockedFile::open_exclusive, f)
}

fn with_exclusive_read_lock<T>(
pub fn with_exclusive_read_lock<T>(
file_path: &PathBuf,
f: impl FnOnce(&mut LockedFile) -> Result<T>,
) -> Result<T> {
with_open_file(file_path, LockedFile::open_exclusive_read, f)
}

fn with_open_file<'a, T>(
pub fn with_open_file<'a, T>(
file_path: &'a PathBuf,
open_file: impl FnOnce(&'a PathBuf) -> io::Result<LockedFile>,
f: impl FnOnce(&mut LockedFile) -> Result<T>,
Expand Down
4 changes: 2 additions & 2 deletions storage-proofs/core/src/settings.rs
Original file line number Diff line number Diff line change
Expand Up @@ -28,15 +28,15 @@ pub struct Settings {
impl Default for Settings {
fn default() -> Self {
Settings {
maximize_caching: false,
maximize_caching: true,
pedersen_hash_exp_window_size: 16,
use_gpu_column_builder: false,
max_gpu_column_batch_size: 400_000,
column_write_batch_size: 262_144,
use_gpu_tree_builder: false,
max_gpu_tree_batch_size: 700_000,
rows_to_discard: 2,
sdr_parents_cache_size: 2048,
sdr_parents_cache_size: 2_048,
}
}
}
Expand Down
83 changes: 38 additions & 45 deletions storage-proofs/porep/src/stacked/vanilla/cache.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ use storage_proofs_core::{
drgraph::BASE_DEGREE,
error::Result,
hasher::Hasher,
parameter_cache::{ParameterSetMetadata, VERSION},
parameter_cache::{with_exclusive_lock, LockedFile, ParameterSetMetadata, VERSION},
};

use super::graph::{StackedGraph, DEGREE};
Expand Down Expand Up @@ -41,7 +41,7 @@ struct CacheData {
/// Len in nodes.
len: u32,
/// The underlyling file.
file: std::fs::File,
file: LockedFile,
}

impl CacheData {
Expand All @@ -61,7 +61,7 @@ impl CacheData {
memmap::MmapOptions::new()
.offset(offset as u64)
.len(len)
.map(&self.file)
.map(self.file.as_ref())
.context("could not shift mmap}")?
};
self.offset = new_offset;
Expand Down Expand Up @@ -98,12 +98,10 @@ impl CacheData {
fn open(offset: u32, len: u32, path: &PathBuf) -> Result<Self> {
let min_cache_size = (offset + len) as usize * DEGREE * NODE_BYTES;

let file = std::fs::OpenOptions::new()
.read(true)
.open(&path)
let file = LockedFile::open_shared_read(path)
.with_context(|| format!("could not open path={}", path.display()))?;

let actual_len = file.metadata()?.len();
let actual_len = file.as_ref().metadata()?.len();
if actual_len < min_cache_size as u64 {
bail!(
"corrupted cache: {}, expected at least {}, got {} bytes",
Expand All @@ -117,7 +115,7 @@ impl CacheData {
memmap::MmapOptions::new()
.offset((offset as usize * DEGREE * NODE_BYTES) as u64)
.len(len as usize * DEGREE * NODE_BYTES)
.map(&file)
.map(file.as_ref())
.with_context(|| format!("could not mmap path={}", path.display()))?
};

Expand Down Expand Up @@ -171,43 +169,38 @@ impl ParentCache {
{
info!("parent cache: generating {}", path.display());

std::fs::create_dir_all(PARENT_CACHE_DIR).context("unable to crate parent cache dir")?;

let file = std::fs::OpenOptions::new()
.read(true)
.write(true)
.create(true)
.open(&path)
.with_context(|| format!("could not open path={}", path.display()))?;

let cache_size = cache_entries as usize * NODE_BYTES * DEGREE;
file.set_len(cache_size as u64)
.with_context(|| format!("failed to set length: {}", cache_size))?;

let mut data = unsafe {
memmap::MmapOptions::new()
.map_mut(&file)
.with_context(|| format!("could not mmap path={}", path.display()))?
};

data.par_chunks_mut(DEGREE * NODE_BYTES)
.enumerate()
.try_for_each(|(node, entry)| -> Result<()> {
let mut parents = [0u32; DEGREE];
graph
.base_graph()
.parents(node, &mut parents[..BASE_DEGREE])?;
graph.generate_expanded_parents(node, &mut parents[BASE_DEGREE..]);

LittleEndian::write_u32_into(&parents, entry);
Ok(())
})?;

info!("parent cache: generated");
data.flush().context("failed to flush parent cache")?;
drop(data);

info!("parent cache: written to disk");
with_exclusive_lock(&path.clone(), |file| {
let cache_size = cache_entries as usize * NODE_BYTES * DEGREE;
file.as_ref()
.set_len(cache_size as u64)
.with_context(|| format!("failed to set length: {}", cache_size))?;

let mut data = unsafe {
memmap::MmapOptions::new()
.map_mut(file.as_ref())
.with_context(|| format!("could not mmap path={}", path.display()))?
};

data.par_chunks_mut(DEGREE * NODE_BYTES)
.enumerate()
.try_for_each(|(node, entry)| -> Result<()> {
let mut parents = [0u32; DEGREE];
graph
.base_graph()
.parents(node, &mut parents[..BASE_DEGREE])?;
graph.generate_expanded_parents(node, &mut parents[BASE_DEGREE..]);

LittleEndian::write_u32_into(&parents, entry);
Ok(())
})?;

info!("parent cache: generated");
data.flush().context("failed to flush parent cache")?;
drop(data);

info!("parent cache: written to disk");
Ok(())
})?;

Ok(ParentCache {
cache: CacheData::open(0, len, &path)?,
Expand Down

0 comments on commit fa103a9

Please sign in to comment.