Skip to content

Commit

Permalink
fix: cleanups after rebase
Browse files Browse the repository at this point in the history
  • Loading branch information
cryptonemo committed Dec 5, 2023
1 parent 33a1247 commit 311d566
Show file tree
Hide file tree
Showing 2 changed files with 0 additions and 180 deletions.
115 changes: 0 additions & 115 deletions filecoin-proofs/src/api/update.rs
Original file line number Diff line number Diff line change
Expand Up @@ -54,121 +54,6 @@ pub struct SectorUpdateProofInputs {
pub comm_d_new: Commitment,
}

impl SectorUpdateProofInputs {
pub fn write_bytes(&self, dest: &mut [u8]) {
dest[0..32].copy_from_slice(&self.comm_r_old[..]);
dest[33..64].copy_from_slice(&self.comm_r_new[..]);
dest[65..96].copy_from_slice(&self.comm_d_new[..]);
}

pub fn update_commitment(&self, hasher: &mut Sha256) {
hasher.update(self.comm_r_old);
hasher.update(self.comm_r_new);
hasher.update(self.comm_d_new);
}
}

/// Given a list of public inputs and a target_len, make sure that the inputs list is padded to the target_len size.
fn pad_inputs_to_target(
sector_update_inputs: &[Vec<Fr>],
num_inputs_per_proof: usize,
target_len: usize,
) -> Result<Vec<Vec<Fr>>> {
ensure!(
!sector_update_inputs.is_empty(),
"cannot aggregate with empty public inputs"
);

let mut num_inputs = sector_update_inputs.len();
let mut new_inputs = sector_update_inputs.to_owned();

trace!(
"pad_inputs_to_target target_len {}, inputs len {}",
target_len,
num_inputs
);
if target_len != num_inputs {
ensure!(
target_len > num_inputs,
"target len must be greater than actual num inputs"
);
let duplicate_inputs =
&sector_update_inputs[(num_inputs - num_inputs_per_proof)..num_inputs];

trace!("padding inputs from {} to {}", num_inputs, target_len);
while target_len != num_inputs {
new_inputs.extend_from_slice(duplicate_inputs);
num_inputs += num_inputs_per_proof;
ensure!(
num_inputs <= target_len,
"num_inputs extended beyond target"
);
}
}

Ok(new_inputs)
}

// Instantiates p_aux from the specified cache_dir for access to comm_c and comm_r_last
fn get_p_aux<Tree: 'static + MerkleTreeTrait<Hasher = TreeRHasher>>(
cache_path: &Path,
) -> Result<PersistentAux<<Tree::Hasher as Hasher>::Domain>> {
let p_aux_path = cache_path.join(CacheKey::PAux.to_string());
let p_aux_bytes = fs::read(&p_aux_path)
.with_context(|| format!("could not read file p_aux={:?}", p_aux_path))?;

let p_aux = deserialize(&p_aux_bytes)?;

Ok(p_aux)
}

fn persist_p_aux<Tree: 'static + MerkleTreeTrait<Hasher = TreeRHasher>>(
p_aux: &PersistentAux<<Tree::Hasher as Hasher>::Domain>,
cache_path: &Path,
) -> Result<()> {
let p_aux_path = cache_path.join(CacheKey::PAux.to_string());
let mut f_p_aux = File::create(&p_aux_path)
.with_context(|| format!("could not create file p_aux={:?}", p_aux_path))?;
let p_aux_bytes = serialize(&p_aux)?;
f_p_aux
.write_all(&p_aux_bytes)
.with_context(|| format!("could not write to file p_aux={:?}", p_aux_path))?;

Ok(())
}

// Instantiates t_aux from the specified cache_dir for access to
// labels and tree_d, tree_c, tree_r_last store configs
fn get_t_aux<Tree: 'static + MerkleTreeTrait<Hasher = TreeRHasher>>(
cache_path: &Path,
) -> Result<TemporaryAux<Tree, DefaultPieceHasher>> {
let t_aux_path = cache_path.join(CacheKey::TAux.to_string());
trace!("Instantiating TemporaryAux from {:?}", cache_path);
let t_aux_bytes = fs::read(&t_aux_path)
.with_context(|| format!("could not read file t_aux={:?}", t_aux_path))?;

let mut res: TemporaryAux<Tree, DefaultPieceHasher> = deserialize(&t_aux_bytes)?;
res.set_cache_path(cache_path);
trace!("Set TemporaryAux cache_path to {:?}", cache_path);

Ok(res)
}

fn persist_t_aux<Tree: 'static + MerkleTreeTrait<Hasher = TreeRHasher>>(
t_aux: &TemporaryAux<Tree, DefaultPieceHasher>,
cache_path: &Path,
) -> Result<()> {
let t_aux_path = cache_path.join(CacheKey::TAux.to_string());
let mut f_t_aux = File::create(&t_aux_path)
.with_context(|| format!("could not create file t_aux={:?}", t_aux_path))?;
let t_aux_bytes = serialize(&t_aux)?;
f_t_aux
.write_all(&t_aux_bytes)
.with_context(|| format!("could not write to file t_aux={:?}", t_aux_path))?;

Ok(())
}

// Re-instantiate a t_aux with the new cache path, then use the tree_d
// and tree_r_last configs from it. This is done to preserve the
// original tree configuration info (in particular, the
Expand Down
65 changes: 0 additions & 65 deletions filecoin-proofs/tests/api.rs
Original file line number Diff line number Diff line change
Expand Up @@ -885,71 +885,6 @@ fn aggregate_sector_update_proofs<Tree: 'static + MerkleTreeTrait<Hasher = TreeR
Ok(())
}

#[test]
#[ignore]
fn test_sector_update_proof_aggregation_1011_2kib() -> Result<()> {
let proofs_to_aggregate = 1011; // Requires auto-padding

let api_version = ApiVersion::V1_2_0;
let porep_id = ARBITRARY_POREP_ID_V1_2_0;
assert!(!is_legacy_porep_id(porep_id));

let porep_config = porep_config(SECTOR_SIZE_2_KIB, porep_id, api_version);
aggregate_sector_update_proofs::<SectorShape2KiB>(&porep_config, proofs_to_aggregate)
}

#[test]
#[ignore]
fn test_sector_update_proof_aggregation_33_4kib() -> Result<()> {
let proofs_to_aggregate = 33; // Requires auto-padding

let api_version = ApiVersion::V1_2_0;
let porep_id = ARBITRARY_POREP_ID_V1_2_0;
assert!(!is_legacy_porep_id(porep_id));

let porep_config = porep_config(SECTOR_SIZE_4_KIB, porep_id, api_version);
aggregate_sector_update_proofs::<SectorShape4KiB>(&porep_config, proofs_to_aggregate)
}

#[test]
#[ignore]
fn test_sector_update_proof_aggregation_508_16kib() -> Result<()> {
let proofs_to_aggregate = 508; // Requires auto-padding

let api_version = ApiVersion::V1_2_0;
let porep_id = ARBITRARY_POREP_ID_V1_2_0;
assert!(!is_legacy_porep_id(porep_id));

let porep_config = porep_config(SECTOR_SIZE_16_KIB, porep_id, api_version);
aggregate_sector_update_proofs::<SectorShape16KiB>(&porep_config, proofs_to_aggregate)
}

#[test]
#[ignore]
fn test_sector_update_proof_aggregation_64_8mib() -> Result<()> {
let proofs_to_aggregate = 64;

let api_version = ApiVersion::V1_2_0;
let porep_id = ARBITRARY_POREP_ID_V1_2_0;
assert!(!is_legacy_porep_id(porep_id));

let porep_config = porep_config(SECTOR_SIZE_8_MIB, porep_id, api_version);
aggregate_sector_update_proofs::<SectorShape8MiB>(&porep_config, proofs_to_aggregate)
}

#[test]
#[ignore]
fn test_sector_update_proof_aggregation_818_32kib() -> Result<()> {
let proofs_to_aggregate = 818; // Requires auto-padding

let api_version = ApiVersion::V1_2_0;
let porep_id = ARBITRARY_POREP_ID_V1_2_0;
assert!(!is_legacy_porep_id(porep_id));

let porep_config = porep_config(SECTOR_SIZE_32_KIB, porep_id, api_version);
aggregate_sector_update_proofs::<SectorShape32KiB>(&porep_config, proofs_to_aggregate)
}

fn get_layer_file_paths(cache_dir: &tempfile::TempDir) -> Vec<PathBuf> {
let mut list: Vec<_> = read_dir(cache_dir)
.unwrap_or_else(|_| panic!("failed to read directory {:?}", cache_dir))
Expand Down

0 comments on commit 311d566

Please sign in to comment.