diff --git a/CHANGELOG.md b/CHANGELOG.md index 1a3d40eaa..e5390b63f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -36,6 +36,7 @@ and follow [semantic versioning](https://semver.org/) for our releases. - [#343](https://github.com/EspressoSystems/jellyfish/pull/343) Rescue parameter for `ark_bn254::Fq` - [#362](https://github.com/EspressoSystems/jellyfish/pull/362) Derive Eq, Hash at a bunch of places - [#381](https://github.com/EspressoSystems/jellyfish/pull/381) VID take iterator instead of slice +- [#389](https://github.com/EspressoSystems/jellyfish/pull/389) Hello-world namespace support for ADVZ VID scheme ### Changed diff --git a/primitives/Cargo.toml b/primitives/Cargo.toml index 1959fb5b5..2c4aaa09d 100644 --- a/primitives/Cargo.toml +++ b/primitives/Cargo.toml @@ -117,6 +117,9 @@ std = [ "jf-relation/std", ] print-trace = ["ark-std/print-trace"] +kzg-print-trace = [ + "print-trace", +] # leave disabled to reduce pollution in downstream users of KZG (such as VID) parallel = [ "ark-ff/parallel", "ark-ec/parallel", diff --git a/primitives/benches/advz.rs b/primitives/benches/advz.rs index 7017e7a17..d700d3b84 100644 --- a/primitives/benches/advz.rs +++ b/primitives/benches/advz.rs @@ -61,8 +61,11 @@ mod feature_gated { // run all benches for each payload_byte_lens for len in payload_byte_lens { // random payload data - let mut payload_bytes = vec![0u8; len]; - rng.fill_bytes(&mut payload_bytes); + let payload_bytes = { + let mut payload_bytes = vec![0u8; len]; + rng.fill_bytes(&mut payload_bytes); + payload_bytes + }; let benchmark_group_name = |op_name| format!("advz_{}_{}_{}KB", pairing_name, op_name, len / KB); @@ -103,13 +106,17 @@ mod feature_gated { for (poly_degree, num_storage_nodes) in vid_sizes_iter.clone() { let advz = Advz::::new(poly_degree, num_storage_nodes, &srs).unwrap(); let disperse = advz.disperse(&payload_bytes).unwrap(); - let (shares, common) = (disperse.shares, disperse.common); + let (shares, common, commit) = (disperse.shares, disperse.common, disperse.commit); grp.bench_with_input( BenchmarkId::from_parameter(num_storage_nodes), &num_storage_nodes, |b, _| { // verify only the 0th share - b.iter(|| advz.verify_share(&shares[0], &common).unwrap().unwrap()); + b.iter(|| { + advz.verify_share(&shares[0], &common, &commit) + .unwrap() + .unwrap() + }); }, ); } diff --git a/primitives/src/pcs/univariate_kzg/mod.rs b/primitives/src/pcs/univariate_kzg/mod.rs index 3a0a1cadc..8ab7ff7ec 100644 --- a/primitives/src/pcs/univariate_kzg/mod.rs +++ b/primitives/src/pcs/univariate_kzg/mod.rs @@ -92,6 +92,8 @@ impl PolynomialCommitmentScheme for UnivariateKzgPCS { poly: &Self::Polynomial, ) -> Result { let prover_param = prover_param.borrow(); + + #[cfg(feature = "kzg-print-trace")] let commit_time = start_timer!(|| format!("Committing to polynomial of degree {} ", poly.degree())); @@ -105,14 +107,20 @@ impl PolynomialCommitmentScheme for UnivariateKzgPCS { let (num_leading_zeros, plain_coeffs) = skip_leading_zeros_and_convert_to_bigints(poly); - let msm_time = start_timer!(|| "MSM to compute commitment to plaintext poly"); + #[cfg(feature = "kzg-print-trace")] + let msm_time = start_timer!(|| "MSM to compute commitment to plaintext + poly"); + let commitment = E::G1::msm_bigint( &prover_param.powers_of_g[num_leading_zeros..], &plain_coeffs, ) .into_affine(); + + #[cfg(feature = "kzg-print-trace")] end_timer!(msm_time); + #[cfg(feature = "kzg-print-trace")] end_timer!(commit_time); Ok(Commitment(commitment)) } @@ -405,9 +413,15 @@ fn skip_leading_zeros_and_convert_to_bigints(p: &[F]) -> Vec { - let to_bigint_time = start_timer!(|| "Converting polynomial coeffs to bigints"); + #[cfg(feature = "kzg-print-trace")] + let to_bigint_time = start_timer!(|| "Converting polynomial coeffs to + bigints"); + let coeffs = p.iter().map(|s| s.into_bigint()).collect::>(); + + #[cfg(feature = "kzg-print-trace")] end_timer!(to_bigint_time); + coeffs } diff --git a/primitives/src/vid/mod.rs b/primitives/src/vid.rs similarity index 76% rename from primitives/src/vid/mod.rs rename to primitives/src/vid.rs index 00d1a7a40..d3e7750b8 100644 --- a/primitives/src/vid/mod.rs +++ b/primitives/src/vid.rs @@ -7,63 +7,37 @@ //! Trait and implementation for a Verifiable Information Retrieval (VID). /// See section 1.3--1.4 for intro to VID semantics. use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; -use ark_std::{borrow::Borrow, error::Error, fmt::Debug, hash::Hash, string::String, vec::Vec}; +use ark_std::{error::Error, fmt::Debug, hash::Hash, string::String, vec::Vec}; use displaydoc::Display; use serde::{Deserialize, Serialize}; -pub mod advz; - -/// The error type for `VidScheme` methods. -#[derive(Display, Debug)] -pub enum VidError { - /// invalid args: {0} - Argument(String), - /// internal error: {0} - Internal(anyhow::Error), -} - -impl Error for VidError {} - -/// Convenience wrapper to convert any error into a [`VidError`]. -/// -/// Private fn so as not to expose error conversion API outside this crate -/// as per [stackoverflow](https://stackoverflow.com/a/70057677). -/// -/// # No-std support -/// `no_std` mode requires `.map_err(vid)` to convert from a non-`anyhow` error -/// as per [`anyhow` docs](https://docs.rs/anyhow/latest/anyhow/index.html#no-std-support), -fn vid(e: E) -> VidError -where - E: ark_std::fmt::Display + Debug + Send + Sync + 'static, -{ - VidError::Internal(anyhow::anyhow!(e)) -} - -/// Convenience [`Result`] wrapper for [`VidError`]. -pub type VidResult = Result; - /// VID: Verifiable Information Dispersal pub trait VidScheme { /// Payload commitment. - type Commit: Clone + Debug + Eq + PartialEq + Sync; // TODO https://github.com/EspressoSystems/jellyfish/issues/253 + type Commit: Clone + Debug + Eq + PartialEq + Hash + Sync; // TODO https://github.com/EspressoSystems/jellyfish/issues/253 /// Share-specific data sent to a storage node. - type Share: Clone + Debug + Eq + Sync; // TODO https://github.com/EspressoSystems/jellyfish/issues/253 + type Share: Clone + Debug + Eq + PartialEq + Hash + Sync; // TODO https://github.com/EspressoSystems/jellyfish/issues/253 /// Common data sent to all storage nodes. - type Common: CanonicalSerialize + CanonicalDeserialize + Clone + Eq + PartialEq + Sync; // TODO https://github.com/EspressoSystems/jellyfish/issues/253 + type Common: CanonicalSerialize + + CanonicalDeserialize + + Clone + + Debug + + Eq + + PartialEq + + Hash + + Sync; // TODO https://github.com/EspressoSystems/jellyfish/issues/253 /// Compute a payload commitment - fn commit_only(&self, payload: I) -> VidResult + fn commit_only(&self, payload: B) -> VidResult where - I: IntoIterator, - I::Item: Borrow; + B: AsRef<[u8]>; /// Compute shares to send to the storage nodes - fn disperse(&self, payload: I) -> VidResult> + fn disperse(&self, payload: B) -> VidResult> where - I: IntoIterator, - I::Item: Borrow; + B: AsRef<[u8]>; /// Verify a share. Used by both storage node and retrieval client. /// Why is return type a nested `Result`? See @@ -71,8 +45,12 @@ pub trait VidScheme { /// - VidResult::Err in case of actual error /// - VidResult::Ok(Result::Err) if verification fails /// - VidResult::Ok(Result::Ok) if verification succeeds - fn verify_share(&self, share: &Self::Share, common: &Self::Common) - -> VidResult>; + fn verify_share( + &self, + share: &Self::Share, + common: &Self::Common, + commit: &Self::Commit, + ) -> VidResult>; /// Recover payload from shares. /// Do not verify shares or check recovered payload against anything. @@ -105,3 +83,38 @@ pub struct VidDisperse { /// VID payload commitment. pub commit: V::Commit, } + +pub mod payload_prover; + +pub mod advz; // instantiation of `VidScheme` + +// BOILERPLATE: error handling + +/// The error type for `VidScheme` methods. +#[derive(Display, Debug)] +pub enum VidError { + /// invalid args: {0} + Argument(String), + /// internal error: {0} + Internal(anyhow::Error), +} + +impl Error for VidError {} + +/// Convenience wrapper to convert any error into a [`VidError`]. +/// +/// Private fn so as not to expose error conversion API outside this crate +/// as per [stackoverflow](https://stackoverflow.com/a/70057677). +/// +/// # No-std support +/// `no_std` mode requires `.map_err(vid)` to convert from a non-`anyhow` error +/// as per [`anyhow` docs](https://docs.rs/anyhow/latest/anyhow/index.html#no-std-support), +fn vid(e: E) -> VidError +where + E: ark_std::fmt::Display + Debug + Send + Sync + 'static, +{ + VidError::Internal(anyhow::anyhow!(e)) +} + +/// Convenience [`Result`] wrapper for [`VidError`]. +pub type VidResult = Result; diff --git a/primitives/src/vid/advz.rs b/primitives/src/vid/advz.rs index acc14b855..e9a3151c7 100644 --- a/primitives/src/vid/advz.rs +++ b/primitives/src/vid/advz.rs @@ -10,6 +10,7 @@ use super::{vid, VidDisperse, VidError, VidResult, VidScheme}; use crate::{ + alloc::string::ToString, merkle_tree::{hasher::HasherMerkleTree, MerkleCommitment, MerkleTreeScheme}, pcs::{ prelude::UnivariateKzgPCS, PolynomialCommitmentScheme, StructuredReferenceString, @@ -27,20 +28,25 @@ use ark_poly::{DenseUVPolynomial, EvaluationDomain, Radix2EvaluationDomain}; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, Write}; use ark_std::{ borrow::Borrow, + end_timer, fmt::Debug, format, marker::PhantomData, ops::{Add, Mul}, - vec, + start_timer, vec, vec::Vec, Zero, }; +use bytes_to_field::{bytes_to_field, field_to_bytes}; use derivative::Derivative; use digest::{crypto_common::Output, Digest, DynDigest}; use itertools::Itertools; -use jf_utils::{bytes_to_field, canonical, field_to_bytes}; +use jf_utils::canonical; use serde::{Deserialize, Serialize}; +mod bytes_to_field; +pub mod payload_prover; + /// The [ADVZ VID scheme](https://eprint.iacr.org/2021/1500), a concrete impl for [`VidScheme`]. /// /// - `H` is any [`Digest`]-compatible hash function @@ -168,7 +174,7 @@ where #[serde(with = "canonical")] poly_commits: Vec, all_evals_digest: V::NodeValue, - elems_len: usize, + bytes_len: usize, } // We take great pains to maintain abstraction by relying only on traits and not @@ -195,21 +201,20 @@ where type Share = Share; type Common = Common; - fn commit_only(&self, payload: I) -> VidResult + fn commit_only(&self, payload: B) -> VidResult where - I: IntoIterator, - I::Item: Borrow, + B: AsRef<[u8]>, { + let payload = payload.as_ref(); + + // Can't use `Self::poly_commits_hash()`` here because `P::commit()`` returns + // `Result`` instead of `P::Commitment`. + // There's probably an idiomatic way to do this using eg. + // itertools::process_results() but the code is unreadable. let mut hasher = H::new(); let elems_iter = bytes_to_field::<_, P::Evaluation>(payload); - for coeffs_iter in elems_iter.chunks(self.payload_chunk_size).into_iter() { - // TODO TEMPORARY: use FFT to encode polynomials in eval form - // Remove these FFTs after we get KZG in eval form - // https://github.com/EspressoSystems/jellyfish/issues/339 - let mut coeffs: Vec<_> = coeffs_iter.collect(); - self.eval_domain.fft_in_place(&mut coeffs); - - let poly = DenseUVPolynomial::from_coefficients_vec(coeffs); + for evals_iter in elems_iter.chunks(self.payload_chunk_size).into_iter() { + let poly = self.polynomial(evals_iter); let commitment = P::commit(&self.ck, &poly).map_err(vid)?; commitment .serialize_uncompressed(&mut hasher) @@ -218,127 +223,33 @@ where Ok(hasher.finalize()) } - fn disperse(&self, payload: I) -> VidResult> + fn disperse(&self, payload: B) -> VidResult> where - I: IntoIterator, - I::Item: Borrow, + B: AsRef<[u8]>, { - self.disperse_from_elems(bytes_to_field::<_, P::Evaluation>(payload)) - } + let payload = payload.as_ref(); + let payload_len = payload.len(); + let disperse_time = start_timer!(|| format!( + "VID disperse {} payload bytes to {} nodes", + payload_len, self.num_storage_nodes + )); - fn verify_share( - &self, - share: &Self::Share, - common: &Self::Common, - ) -> VidResult> { - // check arguments - if share.evals.len() != common.poly_commits.len() { - return Err(VidError::Argument(format!( - "(share eval, common poly commit) lengths differ ({},{})", - share.evals.len(), - common.poly_commits.len() - ))); - } - if share.index >= self.num_storage_nodes { - return Ok(Err(())); // not an arg error - } - - // verify eval proof - if V::verify( - common.all_evals_digest, - &V::Index::from(share.index as u64), - &share.evals_proof, - ) - .map_err(vid)? - .is_err() - { - return Ok(Err(())); - } - - let pseudorandom_scalar = Self::pseudorandom_scalar(common)?; - - // Compute aggregate polynomial [commitment|evaluation] - // as a pseudorandom linear combo of [commitments|evaluations] - // via evaluation of the polynomial whose coefficients are - // [commitments|evaluations] and whose input point is the pseudorandom - // scalar. - let aggregate_poly_commit = P::Commitment::from( - polynomial_eval( - common - .poly_commits - .iter() - .map(|x| CurveMultiplier(x.as_ref())), - pseudorandom_scalar, - ) - .into(), - ); - let aggregate_eval = - polynomial_eval(share.evals.iter().map(FieldMultiplier), pseudorandom_scalar); - - // verify aggregate proof - Ok(P::verify( - &self.vk, - &aggregate_poly_commit, - &self.multi_open_domain.element(share.index), - &aggregate_eval, - &share.aggregate_proof, - ) - .map_err(vid)? - .then_some(()) - .ok_or(())) - } - - fn recover_payload(&self, shares: &[Self::Share], common: &Self::Common) -> VidResult> { - // TODO can we avoid collect() here? - Ok(field_to_bytes(self.recover_elems(shares, common)?).collect()) - } -} - -impl GenericAdvz -where - P: UnivariatePCS::Evaluation>, - P::Evaluation: PrimeField, - P::Polynomial: DenseUVPolynomial, - P::Commitment: From + AsRef, - T: AffineRepr, - H: Digest + DynDigest + Default + Clone + Write, - V: MerkleTreeScheme>, - V::MembershipProof: Sync + Debug, /* TODO https://github.com/EspressoSystems/jellyfish/issues/253 */ - V::Index: From, -{ - /// Same as [`VidScheme::disperse`] except `payload` iterates over - /// field elements. - pub fn disperse_from_elems(&self, payload: I) -> VidResult> - where - I: IntoIterator, - I::Item: Borrow, - { // partition payload into polynomial coefficients // and count `elems_len` for later - let elems_iter = payload.into_iter().map(|elem| *elem.borrow()); - let mut elems_len = 0; + let bytes_to_polys_time = start_timer!(|| "encode payload bytes into polynomials"); + let elems_iter = bytes_to_field::<_, P::Evaluation>(payload); let mut polys = Vec::new(); - for coeffs_iter in elems_iter.chunks(self.payload_chunk_size).into_iter() { - // TODO TEMPORARY: use FFT to encode polynomials in eval form - // Remove these FFTs after we get KZG in eval form - // https://github.com/EspressoSystems/jellyfish/issues/339 - let mut coeffs: Vec<_> = coeffs_iter.collect(); - let pre_fft_len = coeffs.len(); - self.eval_domain.fft_in_place(&mut coeffs); - - // sanity check: the fft did not resize coeffs. - // If pre_fft_len != self.payload_chunk_size then we must be in the final chunk. - // In that case coeffs.len() could be anything, so there's nothing to sanity - // check. - if pre_fft_len == self.payload_chunk_size { - assert_eq!(coeffs.len(), pre_fft_len); - } - - elems_len += pre_fft_len; - polys.push(DenseUVPolynomial::from_coefficients_vec(coeffs)); + for evals_iter in elems_iter.chunks(self.payload_chunk_size).into_iter() { + polys.push(self.polynomial(evals_iter)); } + end_timer!(bytes_to_polys_time); // evaluate polynomials + let all_storage_node_evals_timer = start_timer!(|| format!( + "compute all storage node evals for {} polynomials of degree {}", + polys.len(), + self.payload_chunk_size + )); let all_storage_node_evals = { let mut all_storage_node_evals = vec![Vec::with_capacity(polys.len()); self.num_storage_nodes]; @@ -363,9 +274,12 @@ where all_storage_node_evals }; + end_timer!(all_storage_node_evals_timer); // vector commitment to polynomial evaluations // TODO why do I need to compute the height of the merkle tree? + let all_evals_commit_timer = + start_timer!(|| "compute merkle root of all storage node evals"); let height: usize = all_storage_node_evals .len() .checked_ilog(V::ARITY) @@ -380,7 +294,9 @@ where .expect("num_storage_nodes log base arity should fit into usize"); let height = height + 1; // avoid fully qualified syntax for try_into() let all_evals_commit = V::from_elems(height, &all_storage_node_evals).map_err(vid)?; + end_timer!(all_evals_commit_timer); + let common_timer = start_timer!(|| format!("compute {} KZG commitments", polys.len())); let common = Common { poly_commits: polys .iter() @@ -388,21 +304,12 @@ where .collect::>() .map_err(vid)?, all_evals_digest: all_evals_commit.commitment().digest(), - elems_len, - }; - - let commit = { - let mut hasher = H::new(); - for poly_commit in common.poly_commits.iter() { - // TODO compiler bug? `as` should not be needed here! - (poly_commit as &P::Commitment) - .serialize_uncompressed(&mut hasher) - .map_err(vid)?; - } - hasher.finalize() + bytes_len: payload_len, }; + end_timer!(common_timer); - let pseudorandom_scalar = Self::pseudorandom_scalar(&common)?; + let commit = Self::poly_commits_hash(common.poly_commits.iter())?; + let pseudorandom_scalar = Self::pseudorandom_scalar(&common, &commit)?; // Compute aggregate polynomial // as a pseudorandom linear combo of polynomials @@ -411,6 +318,10 @@ where let aggregate_poly = polynomial_eval(polys.iter().map(PolynomialMultiplier), pseudorandom_scalar); + let agg_proofs_timer = start_timer!(|| format!( + "compute aggregate proofs for {} storage nodes", + self.num_storage_nodes + )); let aggregate_proofs = P::multi_open_rou_proofs( &self.ck, &aggregate_poly, @@ -418,7 +329,9 @@ where &self.multi_open_domain, ) .map_err(vid)?; + end_timer!(agg_proofs_timer); + let assemblage_timer = start_timer!(|| "assemble shares for dispersal"); let shares = all_storage_node_evals .into_iter() .zip(aggregate_proofs) @@ -436,7 +349,9 @@ where }) }) .collect::>()?; + end_timer!(assemblage_timer); + end_timer!(disperse_time); Ok(VidDisperse { shares, common, @@ -444,13 +359,78 @@ where }) } - /// Same as [`VidScheme::recover_payload`] except returns a [`Vec`] of field - /// elements. - pub fn recover_elems( + fn verify_share( &self, - shares: &[::Share], - common: &::Common, - ) -> VidResult> { + share: &Self::Share, + common: &Self::Common, + commit: &Self::Commit, + ) -> VidResult> { + // check arguments + if share.evals.len() != common.poly_commits.len() { + return Err(VidError::Argument(format!( + "(share eval, common poly commit) lengths differ ({},{})", + share.evals.len(), + common.poly_commits.len() + ))); + } + if share.index >= self.num_storage_nodes { + return Ok(Err(())); // not an arg error + } + + // check `common` against `commit` + let commit_rebuilt = Self::poly_commits_hash(common.poly_commits.iter())?; + if commit_rebuilt != *commit { + return Err(VidError::Argument( + "commit inconsistent with common".to_string(), + )); + } + + // verify eval proof + if V::verify( + common.all_evals_digest, + &V::Index::from(share.index as u64), + &share.evals_proof, + ) + .map_err(vid)? + .is_err() + { + return Ok(Err(())); + } + + let pseudorandom_scalar = Self::pseudorandom_scalar(common, commit)?; + + // Compute aggregate polynomial [commitment|evaluation] + // as a pseudorandom linear combo of [commitments|evaluations] + // via evaluation of the polynomial whose coefficients are + // [commitments|evaluations] and whose input point is the pseudorandom + // scalar. + let aggregate_poly_commit = P::Commitment::from( + polynomial_eval( + common + .poly_commits + .iter() + .map(|x| CurveMultiplier(x.as_ref())), + pseudorandom_scalar, + ) + .into(), + ); + let aggregate_eval = + polynomial_eval(share.evals.iter().map(FieldMultiplier), pseudorandom_scalar); + + // verify aggregate proof + Ok(P::verify( + &self.vk, + &aggregate_poly_commit, + &self.multi_open_domain.element(share.index), + &aggregate_eval, + &share.aggregate_proof, + ) + .map_err(vid)? + .then_some(()) + .ok_or(())) + } + + fn recover_payload(&self, shares: &[Self::Share], common: &Self::Common) -> VidResult> { if shares.len() < self.payload_chunk_size { return Err(VidError::Argument(format!( "not enough shares {}, expected at least {}", @@ -479,8 +459,8 @@ where ))); } - let result_len = num_polys * self.payload_chunk_size; - let mut result = Vec::with_capacity(result_len); + let elems_capacity = num_polys * self.payload_chunk_size; + let mut elems = Vec::with_capacity(elems_capacity); for i in 0..num_polys { let mut coeffs = reed_solomon_erasure_decode_rou( shares.iter().map(|s| (s.index, s.evals[i])), @@ -492,22 +472,36 @@ where // TODO TEMPORARY: use FFT to encode polynomials in eval form // Remove these FFTs after we get KZG in eval form // https://github.com/EspressoSystems/jellyfish/issues/339 - self.eval_domain.ifft_in_place(&mut coeffs); + self.eval_domain.fft_in_place(&mut coeffs); - result.append(&mut coeffs); + elems.append(&mut coeffs); } - assert_eq!(result.len(), result_len); - result.truncate(common.elems_len); - Ok(result) + assert_eq!(elems.len(), elems_capacity); + + let mut payload: Vec<_> = field_to_bytes(elems).collect(); + payload.truncate(common.bytes_len); + Ok(payload) } +} - fn pseudorandom_scalar(common: &::Common) -> VidResult { +impl GenericAdvz +where + P: UnivariatePCS::Evaluation>, + P::Evaluation: PrimeField, + P::Polynomial: DenseUVPolynomial, + P::Commitment: From + AsRef, + T: AffineRepr, + H: Digest + DynDigest + Default + Clone + Write, + V: MerkleTreeScheme>, + V::MembershipProof: Sync + Debug, /* TODO https://github.com/EspressoSystems/jellyfish/issues/253 */ + V::Index: From, +{ + fn pseudorandom_scalar( + common: &::Common, + commit: &::Commit, + ) -> VidResult { let mut hasher = H::new(); - for poly_commit in common.poly_commits.iter() { - poly_commit - .serialize_uncompressed(&mut hasher) - .map_err(vid)?; - } + commit.serialize_uncompressed(&mut hasher).map_err(vid)?; common .all_evals_digest .serialize_uncompressed(&mut hasher) @@ -528,6 +522,44 @@ where .ok_or_else(|| anyhow!("hash_to_field output is empty")) .map_err(vid)?) } + + fn polynomial(&self, coeffs: I) -> P::Polynomial + where + I: Iterator, + I::Item: Borrow, + { + // TODO TEMPORARY: use FFT to encode polynomials in eval form + // Remove these FFTs after we get KZG in eval form + // https://github.com/EspressoSystems/jellyfish/issues/339 + let mut coeffs_vec: Vec<_> = coeffs.map(|c| *c.borrow()).collect(); + let pre_fft_len = coeffs_vec.len(); + self.eval_domain.ifft_in_place(&mut coeffs_vec); + + // sanity check: the fft did not resize coeffs. + // If pre_fft_len != self.payload_chunk_size then we were not given the correct + // number of coeffs. In that case coeffs.len() could be anything, so + // there's nothing to sanity check. + if pre_fft_len == self.payload_chunk_size { + assert_eq!(coeffs_vec.len(), pre_fft_len); + } + + DenseUVPolynomial::from_coefficients_vec(coeffs_vec) + } + + fn poly_commits_hash(poly_commits: I) -> VidResult<::Commit> + where + I: Iterator, + I::Item: Borrow, + { + let mut hasher = H::new(); + for poly_commit in poly_commits { + poly_commit + .borrow() + .serialize_uncompressed(&mut hasher) + .map_err(vid)?; + } + Ok(hasher.finalize()) + } } // `From` impls for `VidError` @@ -633,14 +665,26 @@ mod tests { rand::{CryptoRng, RngCore}, vec, }; - use digest::{generic_array::ArrayLength, OutputSizeUser}; use sha2::Sha256; + #[test] + fn disperse_timer() { + // run with 'print-trace' feature to see timer output + let (payload_chunk_size, num_storage_nodes) = (256, 512); + let mut rng = jf_utils::test_rng(); + let srs = init_srs(payload_chunk_size, &mut rng); + let advz = + Advz::::new(payload_chunk_size, num_storage_nodes, srs).unwrap(); + let payload_random = init_random_payload(1 << 20, &mut rng); + + let _ = advz.disperse(&payload_random); + } + #[test] fn sad_path_verify_share_corrupt_share() { let (advz, bytes_random) = avdz_init(); let disperse = advz.disperse(&bytes_random).unwrap(); - let (shares, common) = (disperse.shares, disperse.common); + let (shares, common, commit) = (disperse.shares, disperse.common, disperse.commit); for (i, share) in shares.iter().enumerate() { // missing share eval @@ -650,7 +694,7 @@ mod tests { ..share.clone() }; assert_arg_err( - advz.verify_share(&share_missing_eval, &common), + advz.verify_share(&share_missing_eval, &common, &commit), "1 missing share should be arg error", ); } @@ -659,7 +703,7 @@ mod tests { { let mut share_bad_eval = share.clone(); share_bad_eval.evals[0].double_in_place(); - advz.verify_share(&share_bad_eval, &common) + advz.verify_share(&share_bad_eval, &common, &commit) .unwrap() .expect_err("bad share value should fail verification"); } @@ -670,7 +714,7 @@ mod tests { index: (share.index + 1) % advz.num_storage_nodes, ..share.clone() }; - advz.verify_share(&share_bad_index, &common) + advz.verify_share(&share_bad_index, &common, &commit) .unwrap() .expect_err("bad share index should fail verification"); } @@ -681,7 +725,7 @@ mod tests { index: share.index + advz.num_storage_nodes, ..share.clone() }; - advz.verify_share(&share_bad_index, &common) + advz.verify_share(&share_bad_index, &common, &commit) .unwrap() .expect_err("bad share index should fail verification"); } @@ -695,7 +739,7 @@ mod tests { evals_proof: shares[(i + 1) % shares.len()].evals_proof.clone(), ..share.clone() }; - advz.verify_share(&share_bad_evals_proof, &common) + advz.verify_share(&share_bad_evals_proof, &common, &commit) .unwrap() .expect_err("bad share evals proof should fail verification"); } @@ -706,7 +750,7 @@ mod tests { fn sad_path_verify_share_corrupt_commit() { let (advz, bytes_random) = avdz_init(); let disperse = advz.disperse(&bytes_random).unwrap(); - let (shares, common) = (disperse.shares, disperse.common); + let (shares, common, commit) = (disperse.shares, disperse.common, disperse.commit); // missing commit let common_missing_item = Common { @@ -714,7 +758,7 @@ mod tests { ..common.clone() }; assert_arg_err( - advz.verify_share(&shares[0], &common_missing_item), + advz.verify_share(&shares[0], &common_missing_item, &commit), "1 missing commit should be arg error", ); @@ -724,9 +768,10 @@ mod tests { corrupted.poly_commits[0] = ::G1Affine::zero().into(); corrupted }; - advz.verify_share(&shares[0], &common_1_poly_corruption) - .unwrap() - .expect_err("1 corrupt poly_commit should fail verification"); + assert_arg_err( + advz.verify_share(&shares[0], &common_1_poly_corruption, &commit), + "corrupted commit should be arg error", + ); // 1 corrupt commit, all_evals_digest let common_1_digest_corruption = { @@ -742,7 +787,7 @@ mod tests { .expect("digest deserialization should succeed"); corrupted }; - advz.verify_share(&shares[0], &common_1_digest_corruption) + advz.verify_share(&shares[0], &common_1_digest_corruption, &commit) .unwrap() .expect_err("1 corrupt all_evals_digest should fail verification"); } @@ -751,19 +796,25 @@ mod tests { fn sad_path_verify_share_corrupt_share_and_commit() { let (advz, bytes_random) = avdz_init(); let disperse = advz.disperse(&bytes_random).unwrap(); - let (mut shares, mut common) = (disperse.shares, disperse.common); + let (mut shares, mut common, commit) = (disperse.shares, disperse.common, disperse.commit); common.poly_commits.pop(); shares[0].evals.pop(); // equal nonzero lengths for common, share - advz.verify_share(&shares[0], &common).unwrap().unwrap_err(); + assert_arg_err( + advz.verify_share(&shares[0], &common, &commit), + "common inconsistent with commit should be arg error", + ); common.poly_commits.clear(); shares[0].evals.clear(); // zero length for common, share - advz.verify_share(&shares[0], &common).unwrap().unwrap_err(); + assert_arg_err( + advz.verify_share(&shares[0], &common, &commit), + "expect arg error for common inconsistent with commit", + ); } #[test] @@ -817,85 +868,26 @@ mod tests { } } - fn prove_namespace_generic() - where - E: Pairing, - H: Digest + DynDigest + Default + Clone + Write, - <::OutputSize as ArrayLength>::ArrayType: Copy, - { - // play with these items - let (payload_chunk_size, num_storage_nodes) = (4, 6); - let num_polys = 4; - - // more items as a function of the above - let payload_elems_len = num_polys * payload_chunk_size; - let payload_bytes_len = payload_elems_len * modulus_byte_len::(); - let mut rng = jf_utils::test_rng(); - let payload_bytes = init_random_bytes(payload_bytes_len, &mut rng); - let srs = init_srs(payload_elems_len, &mut rng); - - let advz = Advz::::new(payload_chunk_size, num_storage_nodes, srs).unwrap(); - let d = advz.disperse(&payload_bytes).unwrap(); - - // TEST: verify "namespaces" (each namespace is a polynomial) - // This test is currently trivial: we simply repeat the commit computation. - // In the future there will be a proper API that can be tested meaningfully. - - // encode payload as field elements, partition into polynomials, compute - // commitments, compare against VID common data - let elems_iter = bytes_to_field::<_, E::ScalarField>(payload_bytes); - for (coeffs_iter, poly_commit) in elems_iter - .chunks(payload_chunk_size) - .into_iter() - .zip(d.common.poly_commits.iter()) - { - let mut coeffs: Vec<_> = coeffs_iter.collect(); - advz.eval_domain.fft_in_place(&mut coeffs); - - let poly = as PolynomialCommitmentScheme>::Polynomial::from_coefficients_vec(coeffs); - let my_poly_commit = UnivariateKzgPCS::::commit(&advz.ck, &poly).unwrap(); - assert_eq!(my_poly_commit, *poly_commit); - } - - // compute payload commitment and verify - let commit = { - let mut hasher = H::new(); - for poly_commit in d.common.poly_commits.iter() { - // TODO compiler bug? `as` should not be needed here! - (poly_commit as & as PolynomialCommitmentScheme>::Commitment) - .serialize_uncompressed(&mut hasher) - .unwrap(); - } - hasher.finalize() - }; - assert_eq!(commit, d.commit); - } - - #[test] - fn prove_namespace() { - prove_namespace_generic::(); - } - /// Routine initialization tasks. /// /// Returns the following tuple: /// 1. An initialized [`Advz`] instance. /// 2. A `Vec` filled with random bytes. - fn avdz_init() -> (Advz, Vec) { + pub(super) fn avdz_init() -> (Advz, Vec) { let (payload_chunk_size, num_storage_nodes) = (4, 6); let mut rng = jf_utils::test_rng(); let srs = init_srs(payload_chunk_size, &mut rng); let advz = Advz::new(payload_chunk_size, num_storage_nodes, srs).unwrap(); - let bytes_random = init_random_bytes(4000, &mut rng); + let bytes_random = init_random_payload(4000, &mut rng); (advz, bytes_random) } /// Convenience wrapper to assert [`VidError::Argument`] return value. - fn assert_arg_err(res: VidResult, msg: &str) { + pub(super) fn assert_arg_err(res: VidResult, msg: &str) { assert!(matches!(res, Err(Argument(_))), "{}", msg); } - fn init_random_bytes(len: usize, rng: &mut R) -> Vec + pub(super) fn init_random_payload(len: usize, rng: &mut R) -> Vec where R: RngCore + CryptoRng, { @@ -904,7 +896,7 @@ mod tests { bytes_random } - fn init_srs(num_coeffs: usize, rng: &mut R) -> UnivariateUniversalParams + pub(super) fn init_srs(num_coeffs: usize, rng: &mut R) -> UnivariateUniversalParams where E: Pairing, R: RngCore + CryptoRng, @@ -912,12 +904,4 @@ mod tests { UnivariateKzgPCS::gen_srs_for_testing(rng, checked_fft_size(num_coeffs - 1).unwrap()) .unwrap() } - - fn modulus_byte_len() -> usize - where - E: Pairing, - { - usize::try_from((< as PolynomialCommitmentScheme>::Evaluation as Field>::BasePrimeField - ::MODULUS_BIT_SIZE - 7)/8 + 1).unwrap() - } } diff --git a/primitives/src/vid/advz/bytes_to_field.rs b/primitives/src/vid/advz/bytes_to_field.rs new file mode 100644 index 000000000..f12e0d7af --- /dev/null +++ b/primitives/src/vid/advz/bytes_to_field.rs @@ -0,0 +1,235 @@ +use ark_ff::{BigInteger, PrimeField}; +use ark_std::{ + borrow::Borrow, + iter::Take, + marker::PhantomData, + vec::{IntoIter, Vec}, +}; + +/// Deterministic, infallible, invertible iterator adaptor to convert from +/// arbitrary bytes to field elements. +/// +/// The final field element is padded with zero bytes as needed. +/// +/// # Example +/// +/// [doctest ignored because it's a private module.] +/// ```ignore +/// # use jf_primitives::vid::advz::{bytes_to_field}; +/// # use ark_ed_on_bn254::Fr as Fr254; +/// let bytes = [1, 2, 3]; +/// let mut elems_iter = bytes_to_field::<_, Fr254>(bytes); +/// assert_eq!(elems_iter.next(), Some(Fr254::from(197121u64))); +/// assert_eq!(elems_iter.next(), None); +/// ``` +/// +/// # Panics +/// +/// Panics only under conditions that should be checkable at compile time: +/// +/// - The [`PrimeField`] modulus bit length is too small to hold a `u64`. +/// - The [`PrimeField`] byte length is too large to fit inside a `usize`. +/// +/// If any of the above conditions holds then this function *always* panics. +pub fn bytes_to_field(bytes: I) -> impl Iterator +where + F: PrimeField, + I: IntoIterator, + I::Item: Borrow, +{ + BytesToField::new(bytes.into_iter()) +} + +/// Deterministic, infallible inverse of [`bytes_to_field`]. +/// +/// The composition of [`field_to_bytes`] with [`bytes_to_field`] might contain +/// extra zero bytes. +/// +/// # Example +/// +/// [doctest ignored because it's a private module.] +/// ```ignore +/// # use jf_primitives::vid::advz::{bytes_to_field, field_to_bytes}; +/// # use ark_ed_on_bn254::Fr as Fr254; +/// let bytes = [1, 2, 3]; +/// let mut bytes_iter = field_to_bytes(bytes_to_field::<_, Fr254>(bytes)); +/// assert_eq!(bytes_iter.next(), Some(1)); +/// assert_eq!(bytes_iter.next(), Some(2)); +/// assert_eq!(bytes_iter.next(), Some(3)); +/// for _ in 0..28 { +/// assert_eq!(bytes_iter.next(), Some(0)); +/// } +/// assert_eq!(bytes_iter.next(), None); +/// ``` +/// +/// ## Panics +/// +/// Panics under the conditions listed at [`bytes_to_field`]. +pub fn field_to_bytes(elems: I) -> impl Iterator +where + F: PrimeField, + I: IntoIterator, + I::Item: Borrow, +{ + FieldToBytes::new(elems.into_iter()) +} + +struct BytesToField { + bytes_iter: I, + elem_byte_capacity: usize, + _phantom: PhantomData, +} + +impl BytesToField +where + F: PrimeField, +{ + fn new(bytes_iter: I) -> Self { + Self { + bytes_iter, + elem_byte_capacity: elem_byte_capacity::(), + _phantom: PhantomData, + } + } +} + +impl Iterator for BytesToField +where + I: Iterator, + I::Item: Borrow, + F: PrimeField, +{ + type Item = F; + + fn next(&mut self) -> Option { + let mut elem_bytes = Vec::with_capacity(self.elem_byte_capacity); + for _ in 0..elem_bytes.capacity() { + if let Some(byte) = self.bytes_iter.next() { + elem_bytes.push(*byte.borrow()); + } else { + break; + } + } + if elem_bytes.is_empty() { + None + } else { + Some(F::from_le_bytes_mod_order(&elem_bytes)) + } + } +} + +struct FieldToBytes { + elems_iter: I, + bytes_iter: Take>, + elem_byte_capacity: usize, + _phantom: PhantomData, +} + +impl FieldToBytes +where + F: PrimeField, +{ + fn new(elems_iter: I) -> Self { + Self { + elems_iter, + bytes_iter: Vec::new().into_iter().take(0), + elem_byte_capacity: elem_byte_capacity::(), + _phantom: PhantomData, + } + } +} + +impl Iterator for FieldToBytes +where + I: Iterator, + I::Item: Borrow, + F: PrimeField, +{ + type Item = u8; + + fn next(&mut self) -> Option { + if let Some(byte) = self.bytes_iter.next() { + return Some(byte); + } + if let Some(elem) = self.elems_iter.next() { + self.bytes_iter = elem + .borrow() + .into_bigint() + .to_bytes_le() + .into_iter() + .take(self.elem_byte_capacity); + return self.bytes_iter.next(); + } + None + } +} + +/// Return the number of bytes that can be encoded into a generic [`PrimeField`] +/// parameter. +/// +/// Returns the byte length of the [`PrimeField`] modulus minus 1. +/// +/// It should be possible to do all this at compile time but I don't know how. +/// Want to panic on overflow, so use checked arithetic and type conversion. +pub fn elem_byte_capacity() -> usize { + usize::try_from((F::MODULUS_BIT_SIZE - 1) / 8) + .expect("prime field modulus byte len should fit into usize") +} + +#[cfg(test)] +mod tests { + use super::{bytes_to_field, field_to_bytes, PrimeField, Vec}; + use ark_ed_on_bls12_377::Fr as Fr377; + use ark_ed_on_bls12_381::Fr as Fr381; + use ark_ed_on_bn254::Fr as Fr254; + use ark_std::rand::RngCore; + + fn bytes_to_field_iter() { + let byte_lens = [0, 1, 2, 16, 31, 32, 33, 48, 65, 100, 200, 5000]; + + let max_len = *byte_lens.iter().max().unwrap(); + let mut bytes = Vec::with_capacity(max_len); + // TODO pre-allocate space for elems, owned, borrowed + let mut rng = jf_utils::test_rng(); + + for len in byte_lens { + // fill bytes with random bytes and trailing zeros + bytes.resize(len, 0); + rng.fill_bytes(&mut bytes); + + // round trip, owned: + // bytes as Iterator, elems as Iterator + let owned: Vec<_> = field_to_bytes(bytes_to_field::<_, F>(bytes.clone())) + .take(bytes.len()) + .collect(); + assert_eq!(owned, bytes); + + // round trip, borrowed: + // bytes as Iterator, elems as Iterator + let elems: Vec<_> = bytes_to_field::<_, F>(bytes.iter()).collect(); + let borrowed: Vec<_> = field_to_bytes::<_, F>(elems.iter()) + .take(bytes.len()) + .collect(); + assert_eq!(borrowed, bytes); + } + + // empty input -> empty output + let bytes = Vec::new(); + assert!(bytes.iter().next().is_none()); + let mut elems_iter = bytes_to_field::<_, F>(bytes.iter()); + assert!(elems_iter.next().is_none()); + + // 1-item input -> 1-item output + let bytes = [42u8; 1]; + let mut elems_iter = bytes_to_field::<_, F>(bytes.iter()); + assert_eq!(elems_iter.next().unwrap(), F::from(42u64)); + assert!(elems_iter.next().is_none()); + } + + #[test] + fn test_bytes_field_elems_iter() { + bytes_to_field_iter::(); + bytes_to_field_iter::(); + bytes_to_field_iter::(); + } +} diff --git a/primitives/src/vid/advz/payload_prover.rs b/primitives/src/vid/advz/payload_prover.rs new file mode 100644 index 000000000..18e98de23 --- /dev/null +++ b/primitives/src/vid/advz/payload_prover.rs @@ -0,0 +1,519 @@ +// Copyright (c) 2023 Espresso Systems (espressosys.com) +// This file is part of the Jellyfish library. + +// You should have received a copy of the MIT License +// along with the Jellyfish library. If not, see . + +//! Implementations of [`PayloadProver`] for `Advz`. +//! +//! Two implementations: +//! 1. `PROOF = `[`SmallRangeProof`]: Useful for small sub-slices of `payload` +//! such as an individual transaction within a block. Not snark-friendly +//! because it requires a pairing. Consists of metadata required to verify a +//! KZG batch proof. +//! 2. `PROOF = `[`LargeRangeProof`]: Useful for large sub-slices of `payload` +//! such as a complete namespace. Snark-friendly because it does not require +//! a pairing. Consists of metadata required to rebuild a KZG commitment. + +use ark_poly::EvaluationDomain; + +use super::{ + bytes_to_field, bytes_to_field::elem_byte_capacity, AffineRepr, Debug, DenseUVPolynomial, + Digest, DynDigest, GenericAdvz, MerkleTreeScheme, PolynomialCommitmentScheme, PrimeField, + UnivariatePCS, Vec, VidResult, Write, +}; +use crate::{ + alloc::string::ToString, + vid::{ + payload_prover::{PayloadProver, Statement}, + vid, VidError, VidScheme, + }, +}; +use ark_std::{format, ops::Range}; + +/// A proof intended for use on small payload subslices. +/// +/// KZG batch proofs and accompanying metadata. +/// +/// TODO use batch proof instead of `Vec

` +pub struct SmallRangeProof

{ + proofs: Vec

, + prefix_bytes: Vec, + suffix_bytes: Vec, + chunk_range: Range, +} + +/// A proof intended for use on large payload subslices. +/// +/// Metadata needed to recover a KZG commitment. +pub struct LargeRangeProof { + prefix_elems: Vec, + suffix_elems: Vec, + prefix_bytes: Vec, + suffix_bytes: Vec, + chunk_range: Range, +} + +impl PayloadProver> for GenericAdvz +where + // TODO ugly trait bounds https://github.com/EspressoSystems/jellyfish/issues/253 + P: UnivariatePCS::Evaluation>, + P::Evaluation: PrimeField, + P::Polynomial: DenseUVPolynomial, + P::Commitment: From + AsRef, + T: AffineRepr, + H: Digest + DynDigest + Default + Clone + Write, + V: MerkleTreeScheme>, + V::MembershipProof: Sync + Debug, + V::Index: From, +{ + fn payload_proof( + &self, + payload: B, + range: Range, + ) -> VidResult> + where + B: AsRef<[u8]>, + { + let payload = payload.as_ref(); + check_range_nonempty_and_inside_payload(payload, &range)?; + + // index conversion + let range_elem = self.range_byte_to_elem(&range); + let range_poly = self.range_elem_to_poly(&range_elem); + let start_namespace_byte = self.index_poly_to_byte(range_poly.start); + let offset_elem = range_elem.start - self.index_byte_to_elem(start_namespace_byte); + let range_elem_byte = self.range_elem_to_byte(&range_elem); + + check_range_poly(&range_poly)?; + + // grab the polynomial that contains `range` + // TODO allow precomputation: https://github.com/EspressoSystems/jellyfish/issues/397 + let polynomial = self.polynomial( + bytes_to_field::<_, P::Evaluation>(payload[start_namespace_byte..].iter()) + .take(self.payload_chunk_size), + ); + + // prepare list of input points + // perf: can't avoid use of `skip` + let points: Vec<_> = { + self.eval_domain + .elements() + .skip(offset_elem) + .take(range_elem.len()) + .collect() + }; + + let (proofs, _evals) = P::multi_open(&self.ck, &polynomial, &points).map_err(vid)?; + + Ok(SmallRangeProof { + proofs, + prefix_bytes: payload[range_elem_byte.start..range.start].to_vec(), + suffix_bytes: payload[range.end..range_elem_byte.end].to_vec(), + chunk_range: range, + }) + } + + fn payload_verify( + &self, + stmt: Statement, + proof: &SmallRangeProof, + ) -> VidResult> { + Self::check_stmt_proof_consistency(&stmt, &proof.chunk_range)?; + + // index conversion + let range_elem = self.range_byte_to_elem(&proof.chunk_range); + let range_poly = self.range_elem_to_poly(&range_elem); + let start_namespace_byte = self.index_poly_to_byte(range_poly.start); + let offset_elem = range_elem.start - self.index_byte_to_elem(start_namespace_byte); + + check_range_poly(&range_poly)?; + Self::check_common_commit_consistency(stmt.common, stmt.commit)?; + + // prepare list of data elems + let data_elems: Vec<_> = bytes_to_field::<_, P::Evaluation>( + proof + .prefix_bytes + .iter() + .chain(stmt.payload_subslice) + .chain(proof.suffix_bytes.iter()), + ) + .collect(); + + // prepare list of input points + // perf: can't avoid use of `skip` + let points: Vec<_> = { + self.eval_domain + .elements() + .skip(offset_elem) + .take(range_elem.len()) + .collect() + }; + + // verify proof + // TODO naive verify for multi_open https://github.com/EspressoSystems/jellyfish/issues/387 + if data_elems.len() != proof.proofs.len() { + return Err(VidError::Argument(format!( + "data len {} differs from proof len {}", + data_elems.len(), + proof.proofs.len() + ))); + } + assert_eq!(data_elems.len(), points.len()); // sanity + let poly_commit = &stmt.common.poly_commits[range_poly.start]; + for (point, (elem, pf)) in points + .iter() + .zip(data_elems.iter().zip(proof.proofs.iter())) + { + if !P::verify(&self.vk, poly_commit, point, elem, pf).map_err(vid)? { + return Ok(Err(())); + } + } + Ok(Ok(())) + } +} + +impl PayloadProver> for GenericAdvz +where + // TODO ugly trait bounds https://github.com/EspressoSystems/jellyfish/issues/253 + P: UnivariatePCS::Evaluation>, + P::Evaluation: PrimeField, + P::Polynomial: DenseUVPolynomial, + P::Commitment: From + AsRef, + T: AffineRepr, + H: Digest + DynDigest + Default + Clone + Write, + V: MerkleTreeScheme>, + V::MembershipProof: Sync + Debug, + V::Index: From, +{ + fn payload_proof( + &self, + payload: B, + range: Range, + ) -> VidResult> + where + B: AsRef<[u8]>, + { + let payload = payload.as_ref(); + check_range_nonempty_and_inside_payload(payload, &range)?; + + // index conversion + let range_elem = self.range_byte_to_elem(&range); + let range_poly = self.range_elem_to_poly(&range_elem); + let start_namespace_byte = self.index_poly_to_byte(range_poly.start); + let offset_elem = range_elem.start - self.index_byte_to_elem(start_namespace_byte); + let range_elem_byte = self.range_elem_to_byte(&range_elem); + + check_range_poly(&range_poly)?; + + // compute the prefix and suffix elems + let mut elems_iter = + bytes_to_field::<_, P::Evaluation>(payload[start_namespace_byte..].iter()) + .take(self.payload_chunk_size); + let prefix: Vec<_> = elems_iter.by_ref().take(offset_elem).collect(); + let suffix: Vec<_> = elems_iter.skip(range_elem.len()).collect(); + + Ok(LargeRangeProof { + prefix_elems: prefix, + suffix_elems: suffix, + prefix_bytes: payload[range_elem_byte.start..range.start].to_vec(), + suffix_bytes: payload[range.end..range_elem_byte.end].to_vec(), + chunk_range: range, + }) + } + + fn payload_verify( + &self, + stmt: Statement, + proof: &LargeRangeProof, + ) -> VidResult> { + Self::check_stmt_proof_consistency(&stmt, &proof.chunk_range)?; + + // index conversion + let range_poly = self.range_byte_to_poly(&proof.chunk_range); + + check_range_poly(&range_poly)?; + Self::check_common_commit_consistency(stmt.common, stmt.commit)?; + + // rebuild the poly commit, check against `common` + let poly_commit = { + let poly = self.polynomial( + proof + .prefix_elems + .iter() + .cloned() + .chain(bytes_to_field::<_, P::Evaluation>( + proof + .prefix_bytes + .iter() + .chain(stmt.payload_subslice) + .chain(proof.suffix_bytes.iter()), + )) + .chain(proof.suffix_elems.iter().cloned()), + ); + P::commit(&self.ck, &poly).map_err(vid)? + }; + if poly_commit != stmt.common.poly_commits[range_poly.start] { + return Ok(Err(())); + } + + Ok(Ok(())) + } +} + +impl GenericAdvz +where + // TODO ugly trait bounds https://github.com/EspressoSystems/jellyfish/issues/253 + P: UnivariatePCS::Evaluation>, + P::Evaluation: PrimeField, + P::Polynomial: DenseUVPolynomial, + P::Commitment: From + AsRef, + T: AffineRepr, + H: Digest + DynDigest + Default + Clone + Write, + V: MerkleTreeScheme>, + V::MembershipProof: Sync + Debug, + V::Index: From, +{ + // lots of index manipulation + fn index_byte_to_elem(&self, index: usize) -> usize { + index_coarsen(index, elem_byte_capacity::()) + } + fn index_poly_to_byte(&self, index: usize) -> usize { + index_refine( + index, + self.payload_chunk_size * elem_byte_capacity::(), + ) + } + fn range_byte_to_elem(&self, range: &Range) -> Range { + range_coarsen(range, elem_byte_capacity::()) + } + fn range_elem_to_byte(&self, range: &Range) -> Range { + range_refine(range, elem_byte_capacity::()) + } + fn range_elem_to_poly(&self, range: &Range) -> Range { + range_coarsen(range, self.payload_chunk_size) + } + fn range_byte_to_poly(&self, range: &Range) -> Range { + range_coarsen( + range, + self.payload_chunk_size * elem_byte_capacity::(), + ) + } + + fn check_common_commit_consistency( + common: &::Common, + commit: &::Commit, + ) -> VidResult<()> { + if *commit != Self::poly_commits_hash(common.poly_commits.iter())? { + return Err(VidError::Argument( + "common inconsistent with commit".to_string(), + )); + } + Ok(()) + } + + fn check_stmt_proof_consistency( + stmt: &Statement, + proof_range: &Range, + ) -> VidResult<()> { + if stmt.range.is_empty() { + return Err(VidError::Argument(format!( + "empty range ({},{})", + stmt.range.start, stmt.range.end + ))); + } + if stmt.payload_subslice.len() != stmt.range.len() { + return Err(VidError::Argument(format!( + "payload_subslice length {} inconsistent with range length {}", + stmt.payload_subslice.len(), + stmt.range.len() + ))); + } + if stmt.range != *proof_range { + return Err(VidError::Argument(format!( + "statement range ({},{}) differs from proof range ({},{})", + stmt.range.start, stmt.range.end, proof_range.start, proof_range.end, + ))); + } + Ok(()) + } +} + +fn range_coarsen(range: &Range, denominator: usize) -> Range { + assert!(!range.is_empty(), "{:?}", range); + Range { + start: index_coarsen(range.start, denominator), + end: index_coarsen(range.end - 1, denominator) + 1, + } +} + +fn range_refine(range: &Range, multiplier: usize) -> Range { + assert!(!range.is_empty(), "{:?}", range); + Range { + start: index_refine(range.start, multiplier), + end: index_refine(range.end, multiplier), + } +} + +fn index_coarsen(index: usize, denominator: usize) -> usize { + index / denominator +} + +fn index_refine(index: usize, multiplier: usize) -> usize { + index * multiplier +} + +fn check_range_nonempty_and_inside_payload(payload: &[u8], range: &Range) -> VidResult<()> { + if range.is_empty() { + return Err(VidError::Argument(format!( + "empty range ({}..{})", + range.start, range.end + ))); + } + if range.end > payload.len() { + return Err(VidError::Argument(format!( + "range ({}..{}) out of bounds for payload len {}", + range.start, + range.end, + payload.len() + ))); + } + Ok(()) +} + +fn check_range_poly(range_poly: &Range) -> VidResult<()> { + // TODO TEMPORARY: forbid requests that span multiple polynomials + if range_poly.len() != 1 { + return Err(VidError::Argument(format!( + "request spans {} polynomials, expect 1", + range_poly.len() + ))); + } + Ok(()) +} + +#[cfg(test)] +mod tests { + use crate::vid::{ + advz::{ + bytes_to_field::elem_byte_capacity, + payload_prover::{LargeRangeProof, SmallRangeProof, Statement}, + tests::*, + *, + }, + payload_prover::PayloadProver, + }; + use ark_bls12_381::Bls12_381; + use ark_std::{ops::Range, println, rand::Rng}; + use digest::{generic_array::ArrayLength, OutputSizeUser}; + use sha2::Sha256; + + fn correctness_generic() + where + E: Pairing, + H: Digest + DynDigest + Default + Clone + Write, + <::OutputSize as ArrayLength>::ArrayType: Copy, + { + // play with these items + let (payload_chunk_size, num_storage_nodes) = (4, 6); + let num_polys = 4; + + // more items as a function of the above + let payload_elems_len = num_polys * payload_chunk_size; + let payload_bytes_len = payload_elems_len * elem_byte_capacity::(); + let poly_bytes_len = payload_chunk_size * elem_byte_capacity::(); + let mut rng = jf_utils::test_rng(); + let payload = init_random_payload(payload_bytes_len, &mut rng); + let srs = init_srs(payload_elems_len, &mut rng); + + let advz = Advz::::new(payload_chunk_size, num_storage_nodes, srs).unwrap(); + let d = advz.disperse(&payload).unwrap(); + + // TEST: prove data ranges for this paylaod + // it takes too long to test all combos of (polynomial, start, len) + // so do some edge cases and random cases + let edge_cases = vec![ + Range { start: 0, end: 1 }, + Range { start: 0, end: 2 }, + Range { + start: 0, + end: poly_bytes_len - 1, + }, + Range { + start: 0, + end: poly_bytes_len, + }, + Range { start: 1, end: 2 }, + Range { start: 1, end: 3 }, + Range { + start: 1, + end: poly_bytes_len - 1, + }, + Range { + start: 1, + end: poly_bytes_len, + }, + Range { + start: poly_bytes_len - 2, + end: poly_bytes_len - 1, + }, + Range { + start: poly_bytes_len - 2, + end: poly_bytes_len, + }, + Range { + start: poly_bytes_len - 1, + end: poly_bytes_len, + }, + ]; + let random_cases = { + let num_cases = edge_cases.len(); + let mut random_cases = Vec::with_capacity(num_cases); + for _ in 0..num_cases { + let start = rng.gen_range(0..poly_bytes_len - 1); + let end = rng.gen_range(start + 1..poly_bytes_len); + random_cases.push(Range { start, end }); + } + random_cases + }; + let all_cases = [(edge_cases, "edge"), (random_cases, "rand")]; + + for poly in 0..num_polys { + let poly_offset = poly * poly_bytes_len; + + for cases in all_cases.iter() { + for range in cases.0.iter() { + let range = Range { + start: range.start + poly_offset, + end: range.end + poly_offset, + }; + println!("poly {} {} case: {:?}", poly, cases.1, range); + + let stmt = Statement { + payload_subslice: &payload[range.clone()], + range: range.clone(), + commit: &d.commit, + common: &d.common, + }; + + let small_range_proof: SmallRangeProof<_> = + advz.payload_proof(&payload, range.clone()).unwrap(); + advz.payload_verify(stmt.clone(), &small_range_proof) + .unwrap() + .unwrap(); + + let large_range_proof: LargeRangeProof<_> = + advz.payload_proof(&payload, range.clone()).unwrap(); + advz.payload_verify(stmt, &large_range_proof) + .unwrap() + .unwrap(); + } + } + } + } + + #[test] + fn correctness() { + correctness_generic::(); + } +} diff --git a/primitives/src/vid/payload_prover.rs b/primitives/src/vid/payload_prover.rs new file mode 100644 index 000000000..d6218d4aa --- /dev/null +++ b/primitives/src/vid/payload_prover.rs @@ -0,0 +1,73 @@ +// Copyright (c) 2023 Espresso Systems (espressosys.com) +// This file is part of the Jellyfish library. + +// You should have received a copy of the MIT License +// along with the Jellyfish library. If not, see . + +//! Trait for additional functionality in Verifiable Information Retrieval (VID) +//! to make and verify a proof of correctness of an arbitrary sub-slice of data +//! from a payload. + +use super::{VidResult, VidScheme}; +use ark_std::ops::Range; + +/// Payload proof functionality for [`VidScheme`]. +pub trait PayloadProver: VidScheme { + /// Compute a proof for a sub-slice of payload data. + fn payload_proof(&self, payload: B, range: Range) -> VidResult + where + B: AsRef<[u8]>; + + /// Verify a proof made by `payload_proof`. + /// + /// `chunk` is the payload sub-slice for which a proof was generated via + /// `payload_proof` using `range`. In other words, `chunk` should equal + /// `payload[range.start..range.end]`. + fn payload_verify(&self, stmt: Statement, proof: &PROOF) -> VidResult>; +} + +/// A convenience struct to reduce the list of arguments to +/// [`PayloadProver::payload_verify`]. It's the statement proved by +/// [`PayloadProver::payload_proof`]. +/// +/// # Why the `?Sized` bound? +/// Rust hates you: +// TODO: figure out how to derive basic things like Clone, Debug, etc. +// Nothing works with the combo of both type parameter `V` and lifetime 'a. +// #[derive(Derivative)] +// #[derivative( +// Clone(bound = "V::Common: Clone, V::Commit:Clone"), +// // Debug(bound = "for<'b> &'b V::Common: ark_std::fmt::Debug, for<'b> &'b +// V::Commit: ark_std::fmt::Debug"), +// // Eq(bound = ""), +// // Hash(bound = ""), +// // PartialEq(bound = "") +// )] +pub struct Statement<'a, V> +where + V: VidScheme + ?Sized, +{ + /// The subslice `payload[range.start..range.end]` from a call to + /// [`PayloadProver::payload_proof`]. + pub payload_subslice: &'a [u8], + /// The range used to make [`Self::payload_subslice`]. + pub range: Range, + /// VID commitment against which the proof will be checked. + pub commit: &'a V::Commit, + /// VID data against which the proof will be checked. + pub common: &'a V::Common, +} + +impl<'a, V> Clone for Statement<'a, V> +where + V: VidScheme, +{ + fn clone(&self) -> Self { + Self { + payload_subslice: self.payload_subslice, + range: self.range.clone(), + commit: self.commit, + common: self.common, + } + } +} diff --git a/primitives/tests/advz.rs b/primitives/tests/advz.rs index 88db3a096..d442e09e0 100644 --- a/primitives/tests/advz.rs +++ b/primitives/tests/advz.rs @@ -13,7 +13,7 @@ mod vid; fn round_trip() { // play with these items let vid_sizes = [(2, 3), (8, 11)]; - let byte_lens = [0, 1, 2, 16, 32, 47, 48, 49, 64, 100, 400]; + let payload_byte_lens = [0, 1, 2, 16, 32, 47, 48, 49, 64, 100, 400]; // more items as a function of the above let supported_degree = vid_sizes.iter().max_by_key(|v| v.0).unwrap().0 - 1; @@ -35,7 +35,7 @@ fn round_trip() { Advz::::new(payload_chunk_size, num_storage_nodes, &srs).unwrap() }, &vid_sizes, - &byte_lens, + &payload_byte_lens, &mut rng, ); } diff --git a/primitives/tests/vid/mod.rs b/primitives/tests/vid/mod.rs index 2152a3d84..2fc0df056 100644 --- a/primitives/tests/vid/mod.rs +++ b/primitives/tests/vid/mod.rs @@ -29,8 +29,11 @@ pub fn round_trip( payload_chunk_size, num_storage_nodes, len ); - let mut bytes_random = vec![0u8; len]; - rng.fill_bytes(&mut bytes_random); + let bytes_random = { + let mut bytes_random = vec![0u8; len]; + rng.fill_bytes(&mut bytes_random); + bytes_random + }; let disperse = vid.disperse(&bytes_random).unwrap(); let (mut shares, common, commit) = (disperse.shares, disperse.common, disperse.commit); @@ -38,7 +41,7 @@ pub fn round_trip( assert_eq!(commit, vid.commit_only(&bytes_random).unwrap()); for share in shares.iter() { - vid.verify_share(share, &common).unwrap().unwrap(); + vid.verify_share(share, &common, &commit).unwrap().unwrap(); } // sample a random subset of shares with size payload_chunk_size diff --git a/relation/src/gadgets/emulated.rs b/relation/src/gadgets/emulated.rs index 95cb9c433..ec77bedc0 100644 --- a/relation/src/gadgets/emulated.rs +++ b/relation/src/gadgets/emulated.rs @@ -19,7 +19,7 @@ use core::marker::PhantomData; use itertools::izip; use num_bigint::BigUint; -/// Parameters needed for emulating field operations over [`F`]. +/// Parameters needed for emulating field operations over [`PrimeField`]. pub trait EmulationConfig: PrimeField { /// Log2 of the other CRT modulus is 2^T. const T: usize; diff --git a/utilities/src/conversion.rs b/utilities/src/conversion.rs index 377901059..194149871 100644 --- a/utilities/src/conversion.rs +++ b/utilities/src/conversion.rs @@ -9,10 +9,9 @@ use ark_ff::{BigInteger, Field, PrimeField}; use ark_std::{ borrow::Borrow, cmp::min, - iter::{once, repeat, Peekable, Take}, - marker::PhantomData, - mem, vec, - vec::{IntoIter, Vec}, + iter::{once, repeat}, + mem, + vec::Vec, }; use sha2::{Digest, Sha512}; @@ -109,20 +108,22 @@ where /// `u64`. /// - Partition `bytes` into chunks of length P, where P is the field /// characteristic byte length minus 1. -/// - Convert each chunk into [`BasePrimeField`] via -/// [`from_le_bytes_mod_order`]. Reduction modulo the field characteristic is -/// guaranteed not to occur because chunk byte length is sufficiently small. -/// - Collect [`BasePrimeField`] elements into [`Field`] elements and append to -/// result. +/// - Convert each chunk into [`Field::BasePrimeField`] via +/// [`PrimeField::from_le_bytes_mod_order`]. Reduction modulo the field +/// characteristic is guaranteed not to occur because chunk byte length is +/// sufficiently small. +/// - Collect [`Field::BasePrimeField`] elements into [`Field`] elements and +/// append to result. /// - If `bytes` is empty then result is empty. /// /// # Panics /// /// Panics only under conditions that should be checkable at compile time: /// -/// - The [`BasePrimeField`] modulus bit length is too small to hold a `u64`. -/// - The byte length of a single [`BasePrimeField`] element fails to fit inside -/// a `usize`. +/// - The [`Field::BasePrimeField`] modulus bit length is too small to hold a +/// `u64`. +/// - The byte length of a single [`Field::BasePrimeField`] element fails to fit +/// inside a `usize`. /// - The extension degree of the [`Field`] fails to fit inside a `usize`. /// - The byte length of a [`Field`] element fails to fit inside a `usize`. /// @@ -290,262 +291,6 @@ fn compile_time_checks() -> (usize, usize, usize) { (primefield_bytes_len, extension_degree, field_bytes_len) } -/// Deterministic, infallible, invertible iterator adaptor to convert from -/// arbitrary bytes to field elements. -/// -/// # TODO doc test -/// -/// # How it works -/// -/// Returns an iterator over [`PrimeField`] items defined as follows: -/// - For each call to `next()`: -/// - Consume P-1 items from `bytes` where P is the field characteristic byte -/// length. (Consume all remaining B items from `bytes` if B < P-1.) -/// - Convert the consumed bytes into a [`PrimeField`] via -/// [`from_le_bytes_mod_order`]. Reduction modulo the field characteristic -/// is guaranteed not to occur because we consumed at most P-1 bytes. -/// - Return the resulting [`PrimeField`] item. -/// - The returned iterator has an additional item that encodes the number of -/// input items consumed in order to produce the final output item. -/// - If `bytes` is empty then result is empty. -/// -/// # Panics -/// -/// Panics only under conditions that should be checkable at compile time: -/// -/// - The [`PrimeField`] modulus bit length is too small to hold a `u64`. -/// - The [`PrimeField`] byte length is too large to fit inside a `usize`. -/// -/// If any of the above conditions holds then this function *always* panics. -pub fn bytes_to_field(bytes: I) -> impl Iterator -where - F: PrimeField, - I: IntoIterator, - I::Item: Borrow, -{ - BytesToField::new(bytes.into_iter()) -} - -/// Deterministic, infallible inverse of [`bytes_to_field`]. -/// -/// This function is not invertible because [`bytes_to_field`] is not onto. -/// -/// ## Panics -/// -/// Panics under the conditions listed at [`bytes_to_field`]. -pub fn field_to_bytes(elems: I) -> impl Iterator -where - F: PrimeField, - I: IntoIterator, - I::Item: Borrow, -{ - FieldToBytes::new(elems.into_iter()) -} - -struct BytesToField -where - I: Iterator, -{ - bytes_iter: Peekable, - final_byte_len: Option, - done: bool, - new: bool, - _phantom: PhantomData, - primefield_bytes_len: usize, -} - -impl BytesToField -where - I: Iterator, -{ - fn new(iter: I) -> Self { - let (primefield_bytes_len, ..) = compile_time_checks::(); - Self { - bytes_iter: iter.peekable(), - final_byte_len: None, - done: false, - new: true, - _phantom: PhantomData, - primefield_bytes_len, - } - } -} - -impl Iterator for BytesToField -where - I: Iterator, - I::Item: Borrow, - F: PrimeField, -{ - type Item = F; - - fn next(&mut self) -> Option { - if self.done { - // we don't support iterators that return `Some` after returning `None` - return None; - } - - if let Some(len) = self.final_byte_len { - // iterator is done. final field elem encodes length. - self.done = true; - return Some(F::from(len as u64)); - } - - if self.new && self.bytes_iter.peek().is_none() { - // zero-length iterator - self.done = true; - return None; - } - - // TODO const generics: use [u8; primefield_bytes_len] - let mut field_elem_bytes = vec![0u8; self.primefield_bytes_len]; - for (i, b) in field_elem_bytes.iter_mut().enumerate() { - if let Some(byte) = self.bytes_iter.next() { - *b = *byte.borrow(); - } else { - self.final_byte_len = Some(i); - break; - } - } - Some(F::from_le_bytes_mod_order(&field_elem_bytes)) - } -} - -struct FieldToBytes { - elems_iter: I, - state: FieldToBytesState, - primefield_bytes_len: usize, -} - -enum FieldToBytesState { - New, - Typical { - bytes_iter: Take>, - next_elem: F, - next_next_elem: F, - }, - Final { - bytes_iter: Take>, - }, -} - -impl FieldToBytes { - fn new(elems_iter: I) -> Self { - let (primefield_bytes_len, ..) = compile_time_checks::(); - Self { - elems_iter, - state: FieldToBytesState::New, - primefield_bytes_len, - } - } - - fn elem_to_usize(elem: F) -> usize { - usize::try_from(u64::from_le_bytes( - elem.into_bigint().to_bytes_le()[..mem::size_of::()] - .try_into() - .expect("conversion from [u8] to u64 should succeed"), - )) - .expect("result len conversion from u64 to usize should succeed") - } - - fn elem_to_bytes_iter(elem: F) -> IntoIter { - elem.into_bigint().to_bytes_le().into_iter() - } -} - -impl Iterator for FieldToBytes -where - I: Iterator, - I::Item: Borrow, - F: PrimeField, -{ - type Item = u8; - - fn next(&mut self) -> Option { - use FieldToBytesState::{Final, New, Typical}; - match &mut self.state { - New => { - let cur_elem = if let Some(elem) = self.elems_iter.next() { - *elem.borrow() - } else { - // length-0 iterator - // move to `Final` state with an empty iterator - self.state = Final { - bytes_iter: Vec::new().into_iter().take(0), - }; - return None; - }; - - let bytes_iter = Self::elem_to_bytes_iter(cur_elem); - - let next_elem = if let Some(elem) = self.elems_iter.next() { - *elem.borrow() - } else { - // length-1 iterator: we never produced this - // move to `Final` state with primefield_bytes_len bytes from the sole elem - let mut bytes_iter = bytes_iter.take(self.primefield_bytes_len); - let ret = bytes_iter.next(); - self.state = Final { bytes_iter }; - return ret; - }; - - let next_next_elem = if let Some(elem) = self.elems_iter.next() { - *elem.borrow() - } else { - // length-2 iterator - let final_byte_len = Self::elem_to_usize(next_elem); - let mut bytes_iter = bytes_iter.take(final_byte_len); - let ret = bytes_iter.next(); - self.state = Final { bytes_iter }; - return ret; - }; - - // length >2 iterator - let mut bytes_iter = bytes_iter.take(self.primefield_bytes_len); - let ret = bytes_iter.next(); - self.state = Typical { - bytes_iter, - next_elem, - next_next_elem, - }; - ret - }, - Typical { - bytes_iter, - next_elem, - next_next_elem, - } => { - let ret = bytes_iter.next(); - if ret.is_some() { - return ret; - } - - let bytes_iter = Self::elem_to_bytes_iter(*next_elem); - - if let Some(elem) = self.elems_iter.next() { - // advance to the next field element - let mut bytes_iter = bytes_iter.take(self.primefield_bytes_len); - let ret = bytes_iter.next(); - self.state = Typical { - bytes_iter, - next_elem: *next_next_elem, - next_next_elem: *elem.borrow(), - }; - return ret; - } - - // done - let final_byte_len = Self::elem_to_usize(*next_next_elem); - let mut bytes_iter = bytes_iter.take(final_byte_len); - let ret = bytes_iter.next(); - self.state = Final { bytes_iter }; - ret - }, - Final { bytes_iter } => bytes_iter.next(), - } - } -} - #[cfg(test)] mod tests { use crate::test_rng; @@ -621,65 +366,6 @@ mod tests { } } - fn bytes_field_elems_iter() { - // copied from bytes_field_elems() - - let lengths = [0, 1, 2, 16, 31, 32, 33, 48, 65, 100, 200, 5000]; - let trailing_zeros_lengths = [0, 1, 2, 5, 50]; - - let max_len = *lengths.iter().max().unwrap(); - let max_trailing_zeros_len = *trailing_zeros_lengths.iter().max().unwrap(); - let mut bytes = Vec::with_capacity(max_len + max_trailing_zeros_len); - let mut elems: Vec = Vec::with_capacity(max_len); - let mut rng = test_rng(); - - for len in lengths { - for trailing_zeros_len in trailing_zeros_lengths { - // fill bytes with random bytes and trailing zeros - bytes.resize(len + trailing_zeros_len, 0); - rng.fill_bytes(&mut bytes[..len]); - bytes[len..].fill(0); - - // debug - // println!("byte_len: {}, trailing_zeros: {}", len, trailing_zeros_len); - // println!("bytes: {:?}", bytes); - // let encoded: Vec = bytes_to_field(bytes.iter()).collect(); - // println!("encoded: {:?}", encoded); - // let result: Vec<_> = bytes_from_field(encoded).collect(); - // println!("result: {:?}", result); - - // round trip: bytes as Iterator, elems as Iterator - let result_clone: Vec<_> = - field_to_bytes(bytes_to_field::<_, F>(bytes.clone())).collect(); - assert_eq!(result_clone, bytes); - - // round trip: bytes as Iterator, elems as Iterator - let encoded: Vec<_> = bytes_to_field::<_, F>(bytes.iter()).collect(); - let result_borrow: Vec<_> = field_to_bytes::<_, F>(encoded.iter()).collect(); - assert_eq!(result_borrow, bytes); - } - - // test infallibility of bytes_from_field - // with random field elements - elems.resize(len, F::zero()); - elems.iter_mut().for_each(|e| *e = F::rand(&mut rng)); - let _: Vec = field_to_bytes::<_, F>(elems.iter()).collect(); - } - - // empty input -> empty output - let bytes = Vec::new(); - assert!(bytes.iter().next().is_none()); - let mut elems_iter = bytes_to_field::<_, F>(bytes.iter()); - assert!(elems_iter.next().is_none()); - - // smallest non-empty input -> 2-item output - let bytes = [42u8; 1]; - let mut elems_iter = bytes_to_field::<_, F>(bytes.iter()); - assert_eq!(elems_iter.next().unwrap(), F::from(42u64)); - assert_eq!(elems_iter.next().unwrap(), F::from(1u64)); - assert!(elems_iter.next().is_none()); - } - #[test] fn test_bytes_field_elems() { bytes_field_elems::(); @@ -689,11 +375,4 @@ mod tests { bytes_field_elems::(); bytes_field_elems::(); } - - #[test] - fn test_bytes_field_elems_iter() { - bytes_field_elems_iter::(); - bytes_field_elems_iter::(); - bytes_field_elems_iter::(); - } }