diff --git a/storage-proofs/core/src/parameter_cache.rs b/storage-proofs/core/src/parameter_cache.rs
index 518989297..0538c643c 100644
--- a/storage-proofs/core/src/parameter_cache.rs
+++ b/storage-proofs/core/src/parameter_cache.rs
@@ -25,7 +25,7 @@ pub const PARAMETER_METADATA_EXT: &str = "meta";
 pub const VERIFYING_KEY_EXT: &str = "vk";
 
 #[derive(Debug)]
-struct LockedFile(File);
+pub struct LockedFile(File);
 
 // TODO: use in memory lock as well, as file locks do not guarantee exclusive access across OSes.
 
@@ -47,6 +47,19 @@ impl LockedFile {
 
         Ok(LockedFile(f))
     }
+
+    pub fn open_shared_read<P: AsRef<Path>>(p: P) -> io::Result<Self> {
+        let f = fs::OpenOptions::new().read(true).open(p)?;
+        f.lock_shared()?;
+
+        Ok(LockedFile(f))
+    }
+}
+
+impl AsRef<File> for LockedFile {
+    fn as_ref(&self) -> &File {
+        &self.0
+    }
 }
 
 impl io::Write for LockedFile {
@@ -328,21 +341,21 @@ fn write_cached_params(
     })
 }
 
-fn with_exclusive_lock<T>(
+pub fn with_exclusive_lock<T>(
     file_path: &PathBuf,
     f: impl FnOnce(&mut LockedFile) -> Result<T>,
 ) -> Result<T> {
     with_open_file(file_path, LockedFile::open_exclusive, f)
 }
 
-fn with_exclusive_read_lock<T>(
+pub fn with_exclusive_read_lock<T>(
     file_path: &PathBuf,
     f: impl FnOnce(&mut LockedFile) -> Result<T>,
 ) -> Result<T> {
     with_open_file(file_path, LockedFile::open_exclusive_read, f)
 }
 
-fn with_open_file<'a, T>(
+pub fn with_open_file<'a, T>(
     file_path: &'a PathBuf,
     open_file: impl FnOnce(&'a PathBuf) -> io::Result<LockedFile>,
     f: impl FnOnce(&mut LockedFile) -> Result<T>,
diff --git a/storage-proofs/core/src/settings.rs b/storage-proofs/core/src/settings.rs
index 89a31edb8..98369dfb4 100644
--- a/storage-proofs/core/src/settings.rs
+++ b/storage-proofs/core/src/settings.rs
@@ -28,7 +28,7 @@ pub struct Settings {
 impl Default for Settings {
     fn default() -> Self {
         Settings {
-            maximize_caching: false,
+            maximize_caching: true,
             pedersen_hash_exp_window_size: 16,
             use_gpu_column_builder: false,
             max_gpu_column_batch_size: 400_000,
@@ -36,7 +36,7 @@ impl Default for Settings {
             use_gpu_tree_builder: false,
             max_gpu_tree_batch_size: 700_000,
             rows_to_discard: 2,
-            sdr_parents_cache_size: 2048,
+            sdr_parents_cache_size: 2_048,
         }
     }
 }
diff --git a/storage-proofs/porep/src/stacked/vanilla/cache.rs b/storage-proofs/porep/src/stacked/vanilla/cache.rs
index acede8be2..c91f1a6b6 100644
--- a/storage-proofs/porep/src/stacked/vanilla/cache.rs
+++ b/storage-proofs/porep/src/stacked/vanilla/cache.rs
@@ -11,7 +11,7 @@ use storage_proofs_core::{
     drgraph::BASE_DEGREE,
     error::Result,
     hasher::Hasher,
-    parameter_cache::{ParameterSetMetadata, VERSION},
+    parameter_cache::{with_exclusive_lock, LockedFile, ParameterSetMetadata, VERSION},
 };
 
 use super::graph::{StackedGraph, DEGREE};
@@ -41,7 +41,7 @@ struct CacheData {
     /// Len in nodes.
     len: u32,
     /// The underlyling file.
-    file: std::fs::File,
+    file: LockedFile,
 }
 
 impl CacheData {
@@ -61,7 +61,7 @@ impl CacheData {
             memmap::MmapOptions::new()
                 .offset(offset as u64)
                 .len(len)
-                .map(&self.file)
+                .map(self.file.as_ref())
                 .context("could not shift mmap}")?
         };
         self.offset = new_offset;
@@ -98,12 +98,10 @@ impl CacheData {
     fn open(offset: u32, len: u32, path: &PathBuf) -> Result<Self> {
         let min_cache_size = (offset + len) as usize * DEGREE * NODE_BYTES;
 
-        let file = std::fs::OpenOptions::new()
-            .read(true)
-            .open(&path)
+        let file = LockedFile::open_shared_read(path)
             .with_context(|| format!("could not open path={}", path.display()))?;
 
-        let actual_len = file.metadata()?.len();
+        let actual_len = file.as_ref().metadata()?.len();
         if actual_len < min_cache_size as u64 {
             bail!(
                 "corrupted cache: {}, expected at least {}, got {} bytes",
@@ -117,7 +115,7 @@ impl CacheData {
             memmap::MmapOptions::new()
                 .offset((offset as usize * DEGREE * NODE_BYTES) as u64)
                 .len(len as usize * DEGREE * NODE_BYTES)
-                .map(&file)
+                .map(file.as_ref())
                 .with_context(|| format!("could not mmap path={}", path.display()))?
         };
 
@@ -171,43 +169,38 @@ impl ParentCache {
     {
         info!("parent cache: generating {}", path.display());
 
-        std::fs::create_dir_all(PARENT_CACHE_DIR).context("unable to crate parent cache dir")?;
-
-        let file = std::fs::OpenOptions::new()
-            .read(true)
-            .write(true)
-            .create(true)
-            .open(&path)
-            .with_context(|| format!("could not open path={}", path.display()))?;
-
-        let cache_size = cache_entries as usize * NODE_BYTES * DEGREE;
-        file.set_len(cache_size as u64)
-            .with_context(|| format!("failed to set length: {}", cache_size))?;
-
-        let mut data = unsafe {
-            memmap::MmapOptions::new()
-                .map_mut(&file)
-                .with_context(|| format!("could not mmap path={}", path.display()))?
-        };
-
-        data.par_chunks_mut(DEGREE * NODE_BYTES)
-            .enumerate()
-            .try_for_each(|(node, entry)| -> Result<()> {
-                let mut parents = [0u32; DEGREE];
-                graph
-                    .base_graph()
-                    .parents(node, &mut parents[..BASE_DEGREE])?;
-                graph.generate_expanded_parents(node, &mut parents[BASE_DEGREE..]);
-
-                LittleEndian::write_u32_into(&parents, entry);
-                Ok(())
-            })?;
-
-        info!("parent cache: generated");
-        data.flush().context("failed to flush parent cache")?;
-        drop(data);
-
-        info!("parent cache: written to disk");
+        with_exclusive_lock(&path.clone(), |file| {
+            let cache_size = cache_entries as usize * NODE_BYTES * DEGREE;
+            file.as_ref()
+                .set_len(cache_size as u64)
+                .with_context(|| format!("failed to set length: {}", cache_size))?;
+
+            let mut data = unsafe {
+                memmap::MmapOptions::new()
+                    .map_mut(file.as_ref())
+                    .with_context(|| format!("could not mmap path={}", path.display()))?
+            };
+
+            data.par_chunks_mut(DEGREE * NODE_BYTES)
+                .enumerate()
+                .try_for_each(|(node, entry)| -> Result<()> {
+                    let mut parents = [0u32; DEGREE];
+                    graph
+                        .base_graph()
+                        .parents(node, &mut parents[..BASE_DEGREE])?;
+                    graph.generate_expanded_parents(node, &mut parents[BASE_DEGREE..]);
+
+                    LittleEndian::write_u32_into(&parents, entry);
+                    Ok(())
+                })?;
+
+            info!("parent cache: generated");
+            data.flush().context("failed to flush parent cache")?;
+            drop(data);
+
+            info!("parent cache: written to disk");
+            Ok(())
+        })?;
 
         Ok(ParentCache {
             cache: CacheData::open(0, len, &path)?,