diff --git a/benches/dalek_benchmarks.rs b/benches/dalek_benchmarks.rs index bb47634f5..451844688 100644 --- a/benches/dalek_benchmarks.rs +++ b/benches/dalek_benchmarks.rs @@ -20,7 +20,7 @@ static MULTISCALAR_SIZES: [usize; 13] = [1, 2, 4, 8, 16, 32, 64, 128, 256, 384, mod edwards_benches { use super::*; - use curve25519_dalek::edwards; + use curve25519_dalek::edwards::EdwardsPoint; fn compress(c: &mut Criterion) { @@ -47,7 +47,7 @@ mod edwards_benches { let B = &constants::ED25519_BASEPOINT_POINT; let s = Scalar::from(897987897u64).invert(); c.bench_function("Constant-time variable-base scalar mul", move |b| { - b.iter(|| B * &s) + b.iter(|| B * s) }); } @@ -77,7 +77,7 @@ mod edwards_benches { mod multiscalar_benches { use super::*; - use curve25519_dalek::edwards; + use curve25519_dalek::edwards::EdwardsPoint; use curve25519_dalek::edwards::VartimeEdwardsPrecomputation; use curve25519_dalek::traits::MultiscalarMul; diff --git a/src/backend/serial/scalar_mul/pippenger.rs b/src/backend/serial/scalar_mul/pippenger.rs index 0cae2a1bb..bffe1402f 100644 --- a/src/backend/serial/scalar_mul/pippenger.rs +++ b/src/backend/serial/scalar_mul/pippenger.rs @@ -94,19 +94,16 @@ impl VartimeMultiscalarMul for Pippenger { // Collect optimized scalars and points in buffers for repeated access // (scanning the whole set per digit position). let scalars = scalars - .into_iter() .map(|s| s.borrow().to_radix_2w(w)); let points = points .into_iter() .map(|p| p.map(|P| P.to_projective_niels())); - let scalars_points = scalars.zip(points).map(|(s,maybe_p)| maybe_p.map(|p| (s,p) ) ) - .collect::>>(); - let scalars_points = match scalars_points { - Some(sp) => sp, - None => return None, - }; + let scalars_points = scalars + .zip(points) + .map(|(s, maybe_p)| maybe_p.map(|p| (s, p))) + .collect::>>()?; // Prepare 2^w/2 buckets. // buckets[i] corresponds to a multiplication factor (i+1). @@ -160,8 +157,7 @@ impl VartimeMultiscalarMul for Pippenger { Some( columns - .fold(hi_column, |total, p| total.mul_by_pow_2(w as u32) + p) - .into(), + .fold(hi_column, |total, p| total.mul_by_pow_2(w as u32) + p), ) } } diff --git a/src/backend/serial/scalar_mul/precomputed_straus.rs b/src/backend/serial/scalar_mul/precomputed_straus.rs index 9c66c9aaf..97f5e860b 100644 --- a/src/backend/serial/scalar_mul/precomputed_straus.rs +++ b/src/backend/serial/scalar_mul/precomputed_straus.rs @@ -67,14 +67,10 @@ impl VartimePrecomputedMultiscalarMul for VartimePrecomputedStraus { .map(|c| c.borrow().non_adjacent_form(5)) .collect::>(); - let dynamic_lookup_tables = match dynamic_points + let dynamic_lookup_tables = dynamic_points .into_iter() .map(|P_opt| P_opt.map(|P| NafLookupTable5::::from(&P))) - .collect::>>() - { - Some(x) => x, - None => return None, - }; + .collect::>>()?; let sp = self.static_lookup_tables.len(); let dp = dynamic_lookup_tables.len(); diff --git a/src/backend/serial/scalar_mul/straus.rs b/src/backend/serial/scalar_mul/straus.rs index 862cf2573..6f0786ef0 100644 --- a/src/backend/serial/scalar_mul/straus.rs +++ b/src/backend/serial/scalar_mul/straus.rs @@ -168,14 +168,10 @@ impl VartimeMultiscalarMul for Straus { .map(|c| c.borrow().non_adjacent_form(5)) .collect(); - let lookup_tables = match points + let lookup_tables = points .into_iter() .map(|P_opt| P_opt.map(|P| NafLookupTable5::::from(&P))) - .collect::>>() - { - Some(x) => x, - None => return None, - }; + .collect::>>()?; let mut r = ProjectivePoint::identity(); diff --git a/src/backend/vector/scalar_mul/pippenger.rs b/src/backend/vector/scalar_mul/pippenger.rs index f834a66a5..7f9e24156 100644 --- a/src/backend/vector/scalar_mul/pippenger.rs +++ b/src/backend/vector/scalar_mul/pippenger.rs @@ -58,12 +58,10 @@ impl VartimeMultiscalarMul for Pippenger { .into_iter() .map(|p| p.map(|P| CachedPoint::from(ExtendedPoint::from(P)))); - let scalars_points = scalars.zip(points).map(|(s,maybe_p)| maybe_p.map(|p| (s,p) ) ) - .collect::>>(); - let scalars_points = match scalars_points { - Some(sp) => sp, - None => return None, - }; + let scalars_points = scalars + .zip(points) + .map(|(s, maybe_p)| maybe_p.map(|p| (s, p))) + .collect::>>()?; // Prepare 2^w/2 buckets. // buckets[i] corresponds to a multiplication factor (i+1). diff --git a/src/backend/vector/scalar_mul/precomputed_straus.rs b/src/backend/vector/scalar_mul/precomputed_straus.rs index cc1404aac..2c6fdf5ed 100644 --- a/src/backend/vector/scalar_mul/precomputed_straus.rs +++ b/src/backend/vector/scalar_mul/precomputed_straus.rs @@ -66,14 +66,10 @@ impl VartimePrecomputedMultiscalarMul for VartimePrecomputedStraus { .map(|c| c.borrow().non_adjacent_form(5)) .collect::>(); - let dynamic_lookup_tables = match dynamic_points + let dynamic_lookup_tables = dynamic_points .into_iter() .map(|P_opt| P_opt.map(|P| NafLookupTable5::::from(&P))) - .collect::>>() - { - Some(x) => x, - None => return None, - }; + .collect::>>()?; let sp = self.static_lookup_tables.len(); let dp = dynamic_lookup_tables.len(); diff --git a/src/backend/vector/scalar_mul/straus.rs b/src/backend/vector/scalar_mul/straus.rs index 285a5fd43..7a1167b36 100644 --- a/src/backend/vector/scalar_mul/straus.rs +++ b/src/backend/vector/scalar_mul/straus.rs @@ -83,14 +83,10 @@ impl VartimeMultiscalarMul for Straus { .into_iter() .map(|c| c.borrow().non_adjacent_form(5)) .collect(); - let lookup_tables: Vec<_> = match points + let lookup_tables: Vec<_> = points .into_iter() .map(|P_opt| P_opt.map(|P| NafLookupTable5::::from(&P))) - .collect::>>() - { - Some(x) => x, - None => return None, - }; + .collect::>>()?; let mut Q = ExtendedPoint::identity(); diff --git a/src/edwards.rs b/src/edwards.rs index e03230b4f..33520744b 100644 --- a/src/edwards.rs +++ b/src/edwards.rs @@ -195,7 +195,7 @@ impl CompressedEdwardsY { let compressed_sign_bit = Choice::from(self.as_bytes()[31] >> 7); X.conditional_negate(compressed_sign_bit); - Some(EdwardsPoint{ X: X, Y: Y, Z: Z, T: &X * &Y }) + Some(EdwardsPoint{ X, Y, Z, T: &X * &Y }) } } @@ -449,7 +449,7 @@ impl EdwardsPoint { AffineNielsPoint{ y_plus_x: &y + &x, y_minus_x: &y - &x, - xy2d: xy2d + xy2d } } @@ -810,7 +810,7 @@ impl<'a, 'b> Mul<&'a EdwardsBasepointTable> for &'b Scalar { /// Construct an `EdwardsPoint` from a `Scalar` \\(a\\) by /// computing the multiple \\(aB\\) of this basepoint \\(B\\). fn mul(self, basepoint_table: &'a EdwardsBasepointTable) -> EdwardsPoint { - basepoint_table * &self + basepoint_table * self } } @@ -908,7 +908,7 @@ impl EdwardsPoint { /// assert_eq!((P+Q).is_torsion_free(), false); /// ``` pub fn is_torsion_free(&self) -> bool { - (self * &constants::BASEPOINT_ORDER).is_identity() + (self * constants::BASEPOINT_ORDER).is_identity() } } @@ -1181,7 +1181,7 @@ mod test { // Test that sum works on owning iterators let s = Scalar::from(2u64); - let mapped = vec.iter().map(|x| x * &s); + let mapped = vec.iter().map(|x| x * s); let sum: EdwardsPoint = mapped.sum(); assert_eq!(sum, &P1 * &s + &P2 * &s); @@ -1204,10 +1204,10 @@ mod test { #[test] fn is_small_order() { // The basepoint has large prime order - assert!(constants::ED25519_BASEPOINT_POINT.is_small_order() == false); + assert!(!constants::ED25519_BASEPOINT_POINT.is_small_order()); // constants::EIGHT_TORSION has all points of small order. for torsion_point in &constants::EIGHT_TORSION { - assert!(torsion_point.is_small_order() == true); + assert!(torsion_point.is_small_order()); } } @@ -1219,8 +1219,8 @@ mod test { #[test] fn is_identity() { - assert!( EdwardsPoint::identity().is_identity() == true); - assert!(constants::ED25519_BASEPOINT_POINT.is_identity() == false); + assert!( EdwardsPoint::identity().is_identity()); + assert!(!constants::ED25519_BASEPOINT_POINT.is_identity()); } /// Rust's debug builds have overflow and underflow trapping, diff --git a/src/ristretto.rs b/src/ristretto.rs index 3774c993b..a14cac62e 100644 --- a/src/ristretto.rs +++ b/src/ristretto.rs @@ -297,9 +297,9 @@ impl CompressedRistretto { let t = &x * &y; if ok.unwrap_u8() == 0u8 || t.is_negative().unwrap_u8() == 1u8 || y.is_zero().unwrap_u8() == 1u8 { - return None; + None } else { - return Some(RistrettoPoint(EdwardsPoint{X: x, Y: y, Z: one, T: t})); + Some(RistrettoPoint(EdwardsPoint{X: x, Y: y, Z: one, T: t})) } } } @@ -529,11 +529,11 @@ impl RistrettoPoint { let eg = &e * &g; let fh = &f * &h; - BatchCompressState{ e: e, f: f, g: g, h: h, eg: eg, fh: fh } + BatchCompressState{ e, f, g, h, eg, fh } } } - let states: Vec = points.into_iter().map(|P| BatchCompressState::from(P)).collect(); + let states: Vec = points.into_iter().map(BatchCompressState::from).collect(); let mut invs: Vec = states.iter().map(|state| state.efgh()).collect(); @@ -847,7 +847,7 @@ impl<'a, 'b> Mul<&'b Scalar> for &'a RistrettoPoint { type Output = RistrettoPoint; /// Scalar multiplication: compute `scalar * self`. fn mul(self, scalar: &'b Scalar) -> RistrettoPoint { - RistrettoPoint(&self.0 * scalar) + RistrettoPoint(self.0 * scalar) } } @@ -856,7 +856,7 @@ impl<'a, 'b> Mul<&'b RistrettoPoint> for &'a Scalar { /// Scalar multiplication: compute `self * scalar`. fn mul(self, point: &'b RistrettoPoint) -> RistrettoPoint { - RistrettoPoint(self * &point.0) + RistrettoPoint(self * point.0) } } @@ -902,7 +902,7 @@ impl VartimeMultiscalarMul for RistrettoPoint { { let extended_points = points.into_iter().map(|opt_P| opt_P.map(|P| P.borrow().0)); - EdwardsPoint::optional_multiscalar_mul(scalars, extended_points).map(|P| RistrettoPoint(P)) + EdwardsPoint::optional_multiscalar_mul(scalars, extended_points).map(RistrettoPoint) } } @@ -948,7 +948,7 @@ impl VartimePrecomputedMultiscalarMul for VartimeRistrettoPrecomputation { dynamic_scalars, dynamic_points.into_iter().map(|P_opt| P_opt.map(|P| P.0)), ) - .map(|P_ed| RistrettoPoint(P_ed)) + .map(RistrettoPoint) } } @@ -1081,7 +1081,7 @@ mod test { use scalar::Scalar; use constants; use edwards::CompressedEdwardsY; - use traits::{Identity, ValidityCheck}; + use traits::{Identity}; use super::*; #[test] @@ -1136,7 +1136,7 @@ mod test { // Test that sum works on owning iterators let s = Scalar::from(2u64); - let mapped = vec.iter().map(|x| x * &s); + let mapped = vec.iter().map(|x| x * s); let sum: RistrettoPoint = mapped.sum(); assert_eq!(sum, &P1 * &s + &P2 * &s); diff --git a/src/scalar.rs b/src/scalar.rs index b5253acf0..c2c700027 100644 --- a/src/scalar.rs +++ b/src/scalar.rs @@ -202,7 +202,7 @@ impl Scalar { /// modulo the group order \\( \ell \\). pub fn from_bytes_mod_order(bytes: [u8; 32]) -> Scalar { // Temporarily allow s_unreduced.bytes > 2^255 ... - let s_unreduced = Scalar{bytes: bytes}; + let s_unreduced = Scalar{bytes}; // Then reduce mod the group order and return the reduced representative. let s = s_unreduced.reduce(); @@ -242,7 +242,7 @@ impl Scalar { /// require specific bit-patterns when performing scalar /// multiplication. pub fn from_bits(bytes: [u8; 32]) -> Scalar { - let mut s = Scalar{bytes: bytes}; + let mut s = Scalar{bytes}; // Ensure that s < 2^255 by masking the high bit s.bytes[31] &= 0b0111_1111; @@ -799,7 +799,7 @@ impl Scalar { // Pass through the vector backwards to compute the inverses // in place - for (input, scratch) in inputs.iter_mut().rev().zip(scratch.into_iter().rev()) { + for (input, scratch) in inputs.iter_mut().rev().zip(scratch.iter().rev()) { let tmp = UnpackedScalar::montgomery_mul(&acc, &input.unpack()); *input = UnpackedScalar::montgomery_mul(&acc, &scratch).pack(); acc = tmp;