From bf8640e273ab0f0ea80db00d478769c78bcf0dd8 Mon Sep 17 00:00:00 2001 From: Josh Stone Date: Thu, 19 Mar 2020 18:02:04 -0700 Subject: [PATCH 1/4] Make the shift algorithms generic on PrimInt --- src/algorithms.rs | 74 ++++++++++++++++++++++++++++++++--------------- 1 file changed, 50 insertions(+), 24 deletions(-) diff --git a/src/algorithms.rs b/src/algorithms.rs index 7d978cc8..d6a5625a 100644 --- a/src/algorithms.rs +++ b/src/algorithms.rs @@ -3,7 +3,7 @@ use core::cmp; use core::cmp::Ordering::{self, Equal, Greater, Less}; use core::iter::repeat; use core::mem; -use num_traits::{One, Zero}; +use num_traits::{One, PrimInt, Zero}; use crate::biguint::biguint_from_vec; use crate::biguint::BigUint; @@ -720,34 +720,46 @@ fn div_rem_core(mut a: BigUint, b: &BigUint) -> (BigUint, BigUint) { /// Find last set bit /// fls(0) == 0, fls(u32::MAX) == 32 -pub(crate) fn fls(v: T) -> usize { +pub(crate) fn fls(v: T) -> usize { mem::size_of::() * 8 - v.leading_zeros() as usize } -pub(crate) fn ilog2(v: T) -> usize { +pub(crate) fn ilog2(v: T) -> usize { fls(v) - 1 } #[inline] -pub(crate) fn biguint_shl(n: Cow<'_, BigUint>, bits: usize) -> BigUint { - let n_unit = bits / big_digit::BITS; - let mut data = match n_unit { +pub(crate) fn biguint_shl(n: Cow<'_, BigUint>, shift: T) -> BigUint { + if shift < T::zero() { + panic!("attempt to shift left with negative"); + } + if n.is_zero() { + return n.into_owned(); + } + let bits = T::from(big_digit::BITS).unwrap(); + let digits = (shift / bits).to_usize().expect("capacity overflow"); + let shift = (shift % bits).to_u8().unwrap(); + biguint_shl2(n, digits, shift) +} + +fn biguint_shl2(n: Cow<'_, BigUint>, digits: usize, shift: u8) -> BigUint { + let mut data = match digits { 0 => n.into_owned().data, _ => { - let len = n_unit + n.data.len() + 1; + let len = digits.saturating_add(n.data.len() + 1); let mut data = Vec::with_capacity(len); - data.extend(repeat(0).take(n_unit)); - data.extend(n.data.iter().cloned()); + data.extend(repeat(0).take(digits)); + data.extend(n.data.iter()); data } }; - let n_bits = bits % big_digit::BITS; - if n_bits > 0 { + if shift > 0 { let mut carry = 0; - for elem in data[n_unit..].iter_mut() { - let new_carry = *elem >> (big_digit::BITS - n_bits); - *elem = (*elem << n_bits) | carry; + let carry_shift = big_digit::BITS as u8 - shift; + for elem in data[digits..].iter_mut() { + let new_carry = *elem >> carry_shift; + *elem = (*elem << shift) | carry; carry = new_carry; } if carry != 0 { @@ -759,25 +771,39 @@ pub(crate) fn biguint_shl(n: Cow<'_, BigUint>, bits: usize) -> BigUint { } #[inline] -pub(crate) fn biguint_shr(n: Cow<'_, BigUint>, bits: usize) -> BigUint { - let n_unit = bits / big_digit::BITS; - if n_unit >= n.data.len() { - return Zero::zero(); +pub(crate) fn biguint_shr(n: Cow<'_, BigUint>, shift: T) -> BigUint { + if shift < T::zero() { + panic!("attempt to shift right with negative"); + } + if n.is_zero() { + return n.into_owned(); + } + let bits = T::from(big_digit::BITS).unwrap(); + let digits = (shift / bits).to_usize().unwrap_or(core::usize::MAX); + let shift = (shift % bits).to_u8().unwrap(); + biguint_shr2(n, digits, shift) +} + +fn biguint_shr2(n: Cow<'_, BigUint>, digits: usize, shift: u8) -> BigUint { + if digits >= n.data.len() { + let mut n = n.into_owned(); + n.set_zero(); + return n; } let mut data = match n { - Cow::Borrowed(n) => n.data[n_unit..].to_vec(), + Cow::Borrowed(n) => n.data[digits..].to_vec(), Cow::Owned(mut n) => { - n.data.drain(..n_unit); + n.data.drain(..digits); n.data } }; - let n_bits = bits % big_digit::BITS; - if n_bits > 0 { + if shift > 0 { let mut borrow = 0; + let borrow_shift = big_digit::BITS as u8 - shift; for elem in data.iter_mut().rev() { - let new_borrow = *elem << (big_digit::BITS - n_bits); - *elem = (*elem >> n_bits) | borrow; + let new_borrow = *elem << borrow_shift; + *elem = (*elem >> shift) | borrow; borrow = new_borrow; } } From cb1c2ddb0d91f1c086e6102760e75c6010c1b2e5 Mon Sep 17 00:00:00 2001 From: Josh Stone Date: Thu, 19 Mar 2020 20:51:23 -0700 Subject: [PATCH 2/4] Implement more shift RHS for BigUint --- benches/bigint.rs | 4 +- src/biguint.rs | 113 +++++++++++++++++++++++++++++----------------- tests/biguint.rs | 22 ++++----- 3 files changed, 85 insertions(+), 54 deletions(-) diff --git a/benches/bigint.rs b/benches/bigint.rs index 4e168602..57ea228e 100644 --- a/benches/bigint.rs +++ b/benches/bigint.rs @@ -293,7 +293,7 @@ fn rand_131072(b: &mut Bencher) { #[bench] fn shl(b: &mut Bencher) { - let n = BigUint::one() << 1000; + let n = BigUint::one() << 1000u32; b.iter(|| { let mut m = n.clone(); for i in 0..50 { @@ -304,7 +304,7 @@ fn shl(b: &mut Bencher) { #[bench] fn shr(b: &mut Bencher) { - let n = BigUint::one() << 2000; + let n = BigUint::one() << 2000u32; b.iter(|| { let mut m = n.clone(); for i in 0..50 { diff --git a/src/biguint.rs b/src/biguint.rs index 7b90e912..53684185 100644 --- a/src/biguint.rs +++ b/src/biguint.rs @@ -464,56 +464,87 @@ impl<'a> BitXorAssign<&'a BigUint> for BigUint { } } -impl Shl for BigUint { - type Output = BigUint; +macro_rules! impl_shift { + (@ref $Shx:ident :: $shx:ident, $ShxAssign:ident :: $shx_assign:ident, $rhs:ty) => { + impl<'b> $Shx<&'b $rhs> for BigUint { + type Output = BigUint; - #[inline] - fn shl(self, rhs: usize) -> BigUint { - biguint_shl(Cow::Owned(self), rhs) - } -} -impl<'a> Shl for &'a BigUint { - type Output = BigUint; + #[inline] + fn $shx(self, rhs: &'b $rhs) -> BigUint { + $Shx::$shx(self, *rhs) + } + } + impl<'a, 'b> $Shx<&'b $rhs> for &'a BigUint { + type Output = BigUint; - #[inline] - fn shl(self, rhs: usize) -> BigUint { - biguint_shl(Cow::Borrowed(self), rhs) - } -} + #[inline] + fn $shx(self, rhs: &'b $rhs) -> BigUint { + $Shx::$shx(self, *rhs) + } + } + impl<'b> $ShxAssign<&'b $rhs> for BigUint { + #[inline] + fn $shx_assign(&mut self, rhs: &'b $rhs) { + $ShxAssign::$shx_assign(self, *rhs); + } + } + }; + ($($rhs:ty),+) => {$( + impl Shl<$rhs> for BigUint { + type Output = BigUint; -impl ShlAssign for BigUint { - #[inline] - fn shl_assign(&mut self, rhs: usize) { - let n = mem::replace(self, BigUint::zero()); - *self = n << rhs; - } -} + #[inline] + fn shl(self, rhs: $rhs) -> BigUint { + biguint_shl(Cow::Owned(self), rhs) + } + } + impl<'a> Shl<$rhs> for &'a BigUint { + type Output = BigUint; -impl Shr for BigUint { - type Output = BigUint; + #[inline] + fn shl(self, rhs: $rhs) -> BigUint { + biguint_shl(Cow::Borrowed(self), rhs) + } + } + impl ShlAssign<$rhs> for BigUint { + #[inline] + fn shl_assign(&mut self, rhs: $rhs) { + let n = mem::replace(self, BigUint::zero()); + *self = n << rhs; + } + } + impl_shift! { @ref Shl::shl, ShlAssign::shl_assign, $rhs } - #[inline] - fn shr(self, rhs: usize) -> BigUint { - biguint_shr(Cow::Owned(self), rhs) - } -} -impl<'a> Shr for &'a BigUint { - type Output = BigUint; + impl Shr<$rhs> for BigUint { + type Output = BigUint; - #[inline] - fn shr(self, rhs: usize) -> BigUint { - biguint_shr(Cow::Borrowed(self), rhs) - } -} + #[inline] + fn shr(self, rhs: $rhs) -> BigUint { + biguint_shr(Cow::Owned(self), rhs) + } + } + impl<'a> Shr<$rhs> for &'a BigUint { + type Output = BigUint; -impl ShrAssign for BigUint { - #[inline] - fn shr_assign(&mut self, rhs: usize) { - let n = mem::replace(self, BigUint::zero()); - *self = n >> rhs; - } + #[inline] + fn shr(self, rhs: $rhs) -> BigUint { + biguint_shr(Cow::Borrowed(self), rhs) + } + } + impl ShrAssign<$rhs> for BigUint { + #[inline] + fn shr_assign(&mut self, rhs: $rhs) { + let n = mem::replace(self, BigUint::zero()); + *self = n >> rhs; + } + } + impl_shift! { @ref Shr::shr, ShrAssign::shr_assign, $rhs } + )*}; } +impl_shift! { u8, u16, u32, u64, u128, usize } +impl_shift! { i8, i16, i32, i64, i128, isize } + impl Zero for BigUint { #[inline] fn zero() -> BigUint { diff --git a/tests/biguint.rs b/tests/biguint.rs index 97b75d29..3e455e77 100644 --- a/tests/biguint.rs +++ b/tests/biguint.rs @@ -671,12 +671,12 @@ fn test_convert_f32() { assert_eq!(BigUint::from_f32(f32::MIN), None); // largest BigUint that will round to a finite f32 value - let big_num = (BigUint::one() << 128) - BigUint::one() - (BigUint::one() << (128 - 25)); + let big_num = (BigUint::one() << 128u8) - 1u8 - (BigUint::one() << (128u8 - 25)); assert_eq!(big_num.to_f32(), Some(f32::MAX)); - assert_eq!((big_num + BigUint::one()).to_f32(), None); + assert_eq!((big_num + 1u8).to_f32(), None); - assert_eq!(((BigUint::one() << 128) - BigUint::one()).to_f32(), None); - assert_eq!((BigUint::one() << 128).to_f32(), None); + assert_eq!(((BigUint::one() << 128u8) - 1u8).to_f32(), None); + assert_eq!((BigUint::one() << 128u8).to_f32(), None); } #[test] @@ -744,12 +744,12 @@ fn test_convert_f64() { assert_eq!(BigUint::from_f64(f64::MIN), None); // largest BigUint that will round to a finite f64 value - let big_num = (BigUint::one() << 1024) - BigUint::one() - (BigUint::one() << (1024 - 54)); + let big_num = (BigUint::one() << 1024u16) - 1u8 - (BigUint::one() << (1024u16 - 54)); assert_eq!(big_num.to_f64(), Some(f64::MAX)); - assert_eq!((big_num + BigUint::one()).to_f64(), None); + assert_eq!((big_num + 1u8).to_f64(), None); - assert_eq!(((BigInt::one() << 1024) - BigInt::one()).to_f64(), None); - assert_eq!((BigUint::one() << 1024).to_f64(), None); + assert_eq!(((BigUint::one() << 1024u16) - 1u8).to_f64(), None); + assert_eq!((BigUint::one() << 1024u16).to_f64(), None); } #[test] @@ -1087,8 +1087,8 @@ fn test_is_even() { assert!(thousand.is_even()); assert!(big.is_even()); assert!(bigger.is_odd()); - assert!((&one << 64).is_even()); - assert!(((&one << 64) + one).is_odd()); + assert!((&one << 64u8).is_even()); + assert!(((&one << 64u8) + one).is_odd()); } fn to_str_pairs() -> Vec<(BigUint, Vec<(u32, String)>)> { @@ -1668,7 +1668,7 @@ fn test_bits() { let n: BigUint = BigUint::from_str_radix("4000000000", 16).unwrap(); assert_eq!(n.bits(), 39); let one: BigUint = One::one(); - assert_eq!((one << 426).bits(), 427); + assert_eq!((one << 426u16).bits(), 427); } #[test] From 82b9d5cec3526bb3e6ee922d53cc8cab76fe41f8 Mon Sep 17 00:00:00 2001 From: Josh Stone Date: Fri, 20 Mar 2020 10:17:34 -0700 Subject: [PATCH 3/4] Implement more shift RHS for BigInt --- src/bigint.rs | 144 ++++++++++++++++++++++++++++++------------------ tests/bigint.rs | 28 +++++----- tests/modpow.rs | 2 +- 3 files changed, 105 insertions(+), 69 deletions(-) diff --git a/src/bigint.rs b/src/bigint.rs index 3fa8dce3..3a5d226f 100644 --- a/src/bigint.rs +++ b/src/bigint.rs @@ -25,7 +25,7 @@ use serde; use num_integer::{Integer, Roots}; use num_traits::{ - CheckedAdd, CheckedDiv, CheckedMul, CheckedSub, FromPrimitive, Num, One, Pow, Signed, + CheckedAdd, CheckedDiv, CheckedMul, CheckedSub, FromPrimitive, Num, One, Pow, PrimInt, Signed, ToPrimitive, Zero, }; @@ -779,69 +779,105 @@ impl Num for BigInt { } } -impl Shl for BigInt { - type Output = BigInt; +macro_rules! impl_shift { + (@ref $Shx:ident :: $shx:ident, $ShxAssign:ident :: $shx_assign:ident, $rhs:ty) => { + impl<'b> $Shx<&'b $rhs> for BigInt { + type Output = BigInt; - #[inline] - fn shl(mut self, rhs: usize) -> BigInt { - self <<= rhs; - self - } -} + #[inline] + fn $shx(self, rhs: &'b $rhs) -> BigInt { + $Shx::$shx(self, *rhs) + } + } + impl<'a, 'b> $Shx<&'b $rhs> for &'a BigInt { + type Output = BigInt; -impl<'a> Shl for &'a BigInt { - type Output = BigInt; + #[inline] + fn $shx(self, rhs: &'b $rhs) -> BigInt { + $Shx::$shx(self, *rhs) + } + } + impl<'b> $ShxAssign<&'b $rhs> for BigInt { + #[inline] + fn $shx_assign(&mut self, rhs: &'b $rhs) { + $ShxAssign::$shx_assign(self, *rhs); + } + } + }; + ($($rhs:ty),+) => {$( + impl Shl<$rhs> for BigInt { + type Output = BigInt; - #[inline] - fn shl(self, rhs: usize) -> BigInt { - BigInt::from_biguint(self.sign, &self.data << rhs) - } -} + #[inline] + fn shl(self, rhs: $rhs) -> BigInt { + BigInt::from_biguint(self.sign, self.data << rhs) + } + } + impl<'a> Shl<$rhs> for &'a BigInt { + type Output = BigInt; -impl ShlAssign for BigInt { - #[inline] - fn shl_assign(&mut self, rhs: usize) { - self.data <<= rhs; - } -} + #[inline] + fn shl(self, rhs: $rhs) -> BigInt { + BigInt::from_biguint(self.sign, &self.data << rhs) + } + } + impl ShlAssign<$rhs> for BigInt { + #[inline] + fn shl_assign(&mut self, rhs: $rhs) { + self.data <<= rhs + } + } + impl_shift! { @ref Shl::shl, ShlAssign::shl_assign, $rhs } -// Negative values need a rounding adjustment if there are any ones in the -// bits that are getting shifted out. -fn shr_round_down(i: &BigInt, rhs: usize) -> bool { - i.is_negative() && i.trailing_zeros().map(|n| n < rhs).unwrap_or(false) -} + impl Shr<$rhs> for BigInt { + type Output = BigInt; -impl Shr for BigInt { - type Output = BigInt; + #[inline] + fn shr(self, rhs: $rhs) -> BigInt { + let round_down = shr_round_down(&self, rhs); + let data = self.data >> rhs; + let data = if round_down { data + 1u8 } else { data }; + BigInt::from_biguint(self.sign, data) + } + } + impl<'a> Shr<$rhs> for &'a BigInt { + type Output = BigInt; - #[inline] - fn shr(mut self, rhs: usize) -> BigInt { - self >>= rhs; - self - } + #[inline] + fn shr(self, rhs: $rhs) -> BigInt { + let round_down = shr_round_down(self, rhs); + let data = &self.data >> rhs; + let data = if round_down { data + 1u8 } else { data }; + BigInt::from_biguint(self.sign, data) + } + } + impl ShrAssign<$rhs> for BigInt { + #[inline] + fn shr_assign(&mut self, rhs: $rhs) { + let round_down = shr_round_down(self, rhs); + self.data >>= rhs; + if round_down { + self.data += 1u8; + } else if self.data.is_zero() { + self.sign = NoSign; + } + } + } + impl_shift! { @ref Shr::shr, ShrAssign::shr_assign, $rhs } + )*}; } -impl<'a> Shr for &'a BigInt { - type Output = BigInt; - - #[inline] - fn shr(self, rhs: usize) -> BigInt { - let round_down = shr_round_down(self, rhs); - let data = &self.data >> rhs; - BigInt::from_biguint(self.sign, if round_down { data + 1u8 } else { data }) - } -} +impl_shift! { u8, u16, u32, u64, u128, usize } +impl_shift! { i8, i16, i32, i64, i128, isize } -impl ShrAssign for BigInt { - #[inline] - fn shr_assign(&mut self, rhs: usize) { - let round_down = shr_round_down(self, rhs); - self.data >>= rhs; - if round_down { - self.data += 1u8; - } else if self.data.is_zero() { - self.sign = NoSign; - } +// Negative values need a rounding adjustment if there are any ones in the +// bits that are getting shifted out. +fn shr_round_down(i: &BigInt, shift: T) -> bool { + if i.is_negative() { + let zeros = i.trailing_zeros().expect("negative values are non-zero"); + shift > T::zero() && shift.to_usize().map(|shift| zeros < shift).unwrap_or(true) + } else { + false } } diff --git a/tests/bigint.rs b/tests/bigint.rs index b3648bf7..9b3dd18e 100644 --- a/tests/bigint.rs +++ b/tests/bigint.rs @@ -448,16 +448,16 @@ fn test_convert_f32() { assert_eq!(BigInt::from_f32(f32::NEG_INFINITY), None); // largest BigInt that will round to a finite f32 value - let big_num = (BigInt::one() << 128) - BigInt::one() - (BigInt::one() << (128 - 25)); + let big_num = (BigInt::one() << 128u8) - 1u8 - (BigInt::one() << (128u8 - 25)); assert_eq!(big_num.to_f32(), Some(f32::MAX)); - assert_eq!((&big_num + BigInt::one()).to_f32(), None); + assert_eq!((&big_num + 1u8).to_f32(), None); assert_eq!((-&big_num).to_f32(), Some(f32::MIN)); - assert_eq!(((-&big_num) - BigInt::one()).to_f32(), None); + assert_eq!(((-&big_num) - 1u8).to_f32(), None); - assert_eq!(((BigInt::one() << 128) - BigInt::one()).to_f32(), None); - assert_eq!((BigInt::one() << 128).to_f32(), None); - assert_eq!((-((BigInt::one() << 128) - BigInt::one())).to_f32(), None); - assert_eq!((-(BigInt::one() << 128)).to_f32(), None); + assert_eq!(((BigInt::one() << 128u8) - 1u8).to_f32(), None); + assert_eq!((BigInt::one() << 128u8).to_f32(), None); + assert_eq!((-((BigInt::one() << 128u8) - 1u8)).to_f32(), None); + assert_eq!((-(BigInt::one() << 128u8)).to_f32(), None); } #[test] @@ -529,16 +529,16 @@ fn test_convert_f64() { assert_eq!(BigInt::from_f64(f64::NEG_INFINITY), None); // largest BigInt that will round to a finite f64 value - let big_num = (BigInt::one() << 1024) - BigInt::one() - (BigInt::one() << (1024 - 54)); + let big_num = (BigInt::one() << 1024u16) - 1u8 - (BigInt::one() << (1024u16 - 54)); assert_eq!(big_num.to_f64(), Some(f64::MAX)); - assert_eq!((&big_num + BigInt::one()).to_f64(), None); + assert_eq!((&big_num + 1u8).to_f64(), None); assert_eq!((-&big_num).to_f64(), Some(f64::MIN)); - assert_eq!(((-&big_num) - BigInt::one()).to_f64(), None); + assert_eq!(((-&big_num) - 1u8).to_f64(), None); - assert_eq!(((BigInt::one() << 1024) - BigInt::one()).to_f64(), None); - assert_eq!((BigInt::one() << 1024).to_f64(), None); - assert_eq!((-((BigInt::one() << 1024) - BigInt::one())).to_f64(), None); - assert_eq!((-(BigInt::one() << 1024)).to_f64(), None); + assert_eq!(((BigInt::one() << 1024u16) - 1u8).to_f64(), None); + assert_eq!((BigInt::one() << 1024u16).to_f64(), None); + assert_eq!((-((BigInt::one() << 1024u16) - 1u8)).to_f64(), None); + assert_eq!((-(BigInt::one() << 1024u16)).to_f64(), None); } #[test] diff --git a/tests/modpow.rs b/tests/modpow.rs index d80d8262..276f066e 100644 --- a/tests/modpow.rs +++ b/tests/modpow.rs @@ -117,7 +117,7 @@ mod bigint { fn check(b: &BigInt, e: &BigInt, m: &BigInt, r: &BigInt) { assert_eq!(&b.modpow(e, m), r, "{} ** {} (mod {}) != {}", b, e, m, r); - let even_m = m << 1; + let even_m = m << 1u8; let even_modpow = b.modpow(e, m); assert!(even_modpow.abs() < even_m.abs()); assert_eq!(&even_modpow.mod_floor(&m), r); From e827fd5f459c4ee6c430d671f0adb2cbe7d8ddab Mon Sep 17 00:00:00 2001 From: Josh Stone Date: Fri, 20 Mar 2020 13:27:55 -0700 Subject: [PATCH 4/4] Reduce allocations in shift benchmarks --- benches/bigint.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/benches/bigint.rs b/benches/bigint.rs index 57ea228e..e1ec45e5 100644 --- a/benches/bigint.rs +++ b/benches/bigint.rs @@ -294,8 +294,9 @@ fn rand_131072(b: &mut Bencher) { #[bench] fn shl(b: &mut Bencher) { let n = BigUint::one() << 1000u32; + let mut m = n.clone(); b.iter(|| { - let mut m = n.clone(); + m.clone_from(&n); for i in 0..50 { m <<= i; } @@ -305,8 +306,9 @@ fn shl(b: &mut Bencher) { #[bench] fn shr(b: &mut Bencher) { let n = BigUint::one() << 2000u32; + let mut m = n.clone(); b.iter(|| { - let mut m = n.clone(); + m.clone_from(&n); for i in 0..50 { m >>= i; }