1 #[allow(deprecated, unused_imports)]
2 use alloc::borrow::Cow;
3 use alloc::string::String;
4 use alloc::vec::Vec;
5 use core::cmp::Ordering::{self, Equal, Greater, Less};
6 use core::default::Default;
7 use core::hash::{Hash, Hasher};
8 use core::iter::{Product, Sum};
9 use core::ops::{
10 Add, AddAssign, BitAnd, BitAndAssign, BitOr, BitOrAssign, BitXor, BitXorAssign, Div, DivAssign,
11 Mul, MulAssign, Neg, Rem, RemAssign, Shl, ShlAssign, Shr, ShrAssign, Sub, SubAssign,
12 };
13 use core::str::{self, FromStr};
14 use core::{cmp, fmt, mem};
15 use core::{f32, f64};
16 use core::{u32, u64, u8};
17
18 #[cfg(feature = "serde")]
19 use serde;
20
21 #[cfg(feature = "zeroize")]
22 use zeroize::Zeroize;
23
24 #[cfg(feature = "std")]
sqrt(a: f64) -> f6425 fn sqrt(a: f64) -> f64 {
26 a.sqrt()
27 }
28
29 #[cfg(not(feature = "std"))]
sqrt(a: f64) -> f6430 fn sqrt(a: f64) -> f64 {
31 libm::sqrt(a)
32 }
33
34 #[cfg(feature = "std")]
ln(a: f64) -> f6435 fn ln(a: f64) -> f64 {
36 a.ln()
37 }
38
39 #[cfg(not(feature = "std"))]
ln(a: f64) -> f6440 fn ln(a: f64) -> f64 {
41 libm::log(a)
42 }
43
44 #[cfg(feature = "std")]
cbrt(a: f64) -> f6445 fn cbrt(a: f64) -> f64 {
46 a.cbrt()
47 }
48
49 #[cfg(not(feature = "std"))]
cbrt(a: f64) -> f6450 fn cbrt(a: f64) -> f64 {
51 libm::cbrt(a)
52 }
53
54 #[cfg(feature = "std")]
exp(a: f64) -> f6455 fn exp(a: f64) -> f64 {
56 a.exp()
57 }
58
59 #[cfg(not(feature = "std"))]
exp(a: f64) -> f6460 fn exp(a: f64) -> f64 {
61 libm::exp(a)
62 }
63
64 use integer::{Integer, Roots};
65 use num_traits::float::FloatCore;
66 use num_traits::{
67 CheckedAdd, CheckedDiv, CheckedMul, CheckedSub, FromPrimitive, Num, One, Pow, ToPrimitive,
68 Unsigned, Zero,
69 };
70
71 use BigInt;
72
73 use big_digit::{self, BigDigit};
74
75 use smallvec::SmallVec;
76
77 #[path = "monty.rs"]
78 mod monty;
79
80 use self::monty::monty_modpow;
81 use super::VEC_SIZE;
82 use crate::algorithms::{__add2, __sub2rev, add2, sub2, sub2rev};
83 use crate::algorithms::{biguint_shl, biguint_shr};
84 use crate::algorithms::{cmp_slice, fls, idiv_ceil, ilog2};
85 use crate::algorithms::{div_rem, div_rem_digit, mac_with_carry, mul3, scalar_mul};
86 use crate::algorithms::{extended_gcd, mod_inverse};
87 use crate::traits::{ExtendedGcd, ModInverse};
88
89 use ParseBigIntError;
90 use UsizePromotion;
91
92 /// A big unsigned integer type.
93 #[derive(Clone, Debug)]
94 #[cfg_attr(feature = "zeroize", derive(Zeroize))]
95 pub struct BigUint {
96 pub(crate) data: SmallVec<[BigDigit; VEC_SIZE]>,
97 }
98
99 impl PartialEq for BigUint {
100 #[inline]
eq(&self, other: &BigUint) -> bool101 fn eq(&self, other: &BigUint) -> bool {
102 match self.cmp(other) {
103 Equal => true,
104 _ => false,
105 }
106 }
107 }
108 impl Eq for BigUint {}
109
110 impl Hash for BigUint {
hash<H: Hasher>(&self, state: &mut H)111 fn hash<H: Hasher>(&self, state: &mut H) {
112 self.data.hash(state);
113 }
114 }
115
116 impl PartialOrd for BigUint {
117 #[inline]
partial_cmp(&self, other: &BigUint) -> Option<Ordering>118 fn partial_cmp(&self, other: &BigUint) -> Option<Ordering> {
119 Some(self.cmp(other))
120 }
121 }
122
123 impl Ord for BigUint {
124 #[inline]
cmp(&self, other: &BigUint) -> Ordering125 fn cmp(&self, other: &BigUint) -> Ordering {
126 cmp_slice(&self.data[..], &other.data[..])
127 }
128 }
129
130 impl Default for BigUint {
131 #[inline]
default() -> BigUint132 fn default() -> BigUint {
133 Zero::zero()
134 }
135 }
136
137 impl fmt::Display for BigUint {
fmt(&self, f: &mut fmt::Formatter) -> fmt::Result138 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
139 f.pad_integral(true, "", &self.to_str_radix(10))
140 }
141 }
142
143 impl fmt::LowerHex for BigUint {
fmt(&self, f: &mut fmt::Formatter) -> fmt::Result144 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
145 f.pad_integral(true, "0x", &self.to_str_radix(16))
146 }
147 }
148
149 impl fmt::UpperHex for BigUint {
fmt(&self, f: &mut fmt::Formatter) -> fmt::Result150 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
151 let mut s = self.to_str_radix(16);
152 s.make_ascii_uppercase();
153 f.pad_integral(true, "0x", &s)
154 }
155 }
156
157 impl fmt::Binary for BigUint {
fmt(&self, f: &mut fmt::Formatter) -> fmt::Result158 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
159 f.pad_integral(true, "0b", &self.to_str_radix(2))
160 }
161 }
162
163 impl fmt::Octal for BigUint {
fmt(&self, f: &mut fmt::Formatter) -> fmt::Result164 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
165 f.pad_integral(true, "0o", &self.to_str_radix(8))
166 }
167 }
168
169 impl FromStr for BigUint {
170 type Err = ParseBigIntError;
171
172 #[inline]
from_str(s: &str) -> Result<BigUint, ParseBigIntError>173 fn from_str(s: &str) -> Result<BigUint, ParseBigIntError> {
174 BigUint::from_str_radix(s, 10)
175 }
176 }
177
178 // Convert from a power of two radix (bits == ilog2(radix)) where bits evenly divides
179 // BigDigit::BITS
from_bitwise_digits_le(v: &[u8], bits: usize) -> BigUint180 fn from_bitwise_digits_le(v: &[u8], bits: usize) -> BigUint {
181 debug_assert!(!v.is_empty() && bits <= 8 && big_digit::BITS % bits == 0);
182 debug_assert!(v.iter().all(|&c| (c as BigDigit) < (1 << bits)));
183
184 let digits_per_big_digit = big_digit::BITS / bits;
185
186 let data = v
187 .chunks(digits_per_big_digit)
188 .map(|chunk| {
189 chunk
190 .iter()
191 .rev()
192 .fold(0, |acc, &c| (acc << bits) | c as BigDigit)
193 })
194 .collect();
195
196 BigUint::new_native(data)
197 }
198
199 // Convert from a power of two radix (bits == ilog2(radix)) where bits doesn't evenly divide
200 // BigDigit::BITS
from_inexact_bitwise_digits_le(v: &[u8], bits: usize) -> BigUint201 fn from_inexact_bitwise_digits_le(v: &[u8], bits: usize) -> BigUint {
202 debug_assert!(!v.is_empty() && bits <= 8 && big_digit::BITS % bits != 0);
203 debug_assert!(v.iter().all(|&c| (c as BigDigit) < (1 << bits)));
204
205 let big_digits = (v.len() * bits + big_digit::BITS - 1) / big_digit::BITS;
206 let mut data = SmallVec::with_capacity(big_digits);
207
208 let mut d = 0;
209 let mut dbits = 0; // number of bits we currently have in d
210
211 // walk v accumululating bits in d; whenever we accumulate big_digit::BITS in d, spit out a
212 // big_digit:
213 for &c in v {
214 d |= (c as BigDigit) << dbits;
215 dbits += bits;
216
217 if dbits >= big_digit::BITS {
218 data.push(d);
219 dbits -= big_digit::BITS;
220 // if dbits was > big_digit::BITS, we dropped some of the bits in c (they couldn't fit
221 // in d) - grab the bits we lost here:
222 d = (c as BigDigit) >> (bits - dbits);
223 }
224 }
225
226 if dbits > 0 {
227 debug_assert!(dbits < big_digit::BITS);
228 data.push(d as BigDigit);
229 }
230
231 BigUint::new_native(data)
232 }
233
234 // Read little-endian radix digits
from_radix_digits_be(v: &[u8], radix: u32) -> BigUint235 fn from_radix_digits_be(v: &[u8], radix: u32) -> BigUint {
236 debug_assert!(!v.is_empty() && !radix.is_power_of_two());
237 debug_assert!(v.iter().all(|&c| (c as u32) < radix));
238
239 // Estimate how big the result will be, so we can pre-allocate it.
240 let bits = ilog2(radix) * v.len();
241 let big_digits = idiv_ceil(bits, big_digit::BITS);
242 let mut data = SmallVec::with_capacity(big_digits);
243
244 let (base, power) = get_radix_base(radix);
245 let radix = radix as BigDigit;
246
247 let r = v.len() % power;
248 let i = if r == 0 { power } else { r };
249 let (head, tail) = v.split_at(i);
250
251 let first = head.iter().fold(0, |acc, &d| acc * radix + d as BigDigit);
252 data.push(first);
253
254 debug_assert!(tail.len() % power == 0);
255 for chunk in tail.chunks(power) {
256 if data.last() != Some(&0) {
257 data.push(0);
258 }
259
260 let mut carry = 0;
261 for d in data.iter_mut() {
262 *d = mac_with_carry(0, *d, base, &mut carry);
263 }
264 debug_assert!(carry == 0);
265
266 let n = chunk.iter().fold(0, |acc, &d| acc * radix + d as BigDigit);
267 add2(&mut data, &[n]);
268 }
269
270 BigUint::new_native(data)
271 }
272
273 impl Num for BigUint {
274 type FromStrRadixErr = ParseBigIntError;
275
276 /// Creates and initializes a `BigUint`.
from_str_radix(s: &str, radix: u32) -> Result<BigUint, ParseBigIntError>277 fn from_str_radix(s: &str, radix: u32) -> Result<BigUint, ParseBigIntError> {
278 assert!(2 <= radix && radix <= 36, "The radix must be within 2...36");
279 let mut s = s;
280 if s.starts_with('+') {
281 let tail = &s[1..];
282 if !tail.starts_with('+') {
283 s = tail
284 }
285 }
286
287 if s.is_empty() {
288 return Err(ParseBigIntError::empty());
289 }
290
291 if s.starts_with('_') {
292 // Must lead with a real digit!
293 return Err(ParseBigIntError::invalid());
294 }
295
296 // First normalize all characters to plain digit values
297 let mut v = Vec::with_capacity(s.len());
298 for b in s.bytes() {
299 let d = match b {
300 b'0'..=b'9' => b - b'0',
301 b'a'..=b'z' => b - b'a' + 10,
302 b'A'..=b'Z' => b - b'A' + 10,
303 b'_' => continue,
304 _ => u8::MAX,
305 };
306 if d < radix as u8 {
307 v.push(d);
308 } else {
309 return Err(ParseBigIntError::invalid());
310 }
311 }
312
313 let res = if radix.is_power_of_two() {
314 // Powers of two can use bitwise masks and shifting instead of multiplication
315 let bits = ilog2(radix);
316 v.reverse();
317 if big_digit::BITS % bits == 0 {
318 from_bitwise_digits_le(&v, bits)
319 } else {
320 from_inexact_bitwise_digits_le(&v, bits)
321 }
322 } else {
323 from_radix_digits_be(&v, radix)
324 };
325 Ok(res)
326 }
327 }
328
329 forward_val_val_binop!(impl BitAnd for BigUint, bitand);
330 forward_ref_val_binop!(impl BitAnd for BigUint, bitand);
331
332 // do not use forward_ref_ref_binop_commutative! for bitand so that we can
333 // clone the smaller value rather than the larger, avoiding over-allocation
334 impl<'a, 'b> BitAnd<&'b BigUint> for &'a BigUint {
335 type Output = BigUint;
336
337 #[inline]
bitand(self, other: &BigUint) -> BigUint338 fn bitand(self, other: &BigUint) -> BigUint {
339 // forward to val-ref, choosing the smaller to clone
340 if self.data.len() <= other.data.len() {
341 self.clone() & other
342 } else {
343 other.clone() & self
344 }
345 }
346 }
347
348 forward_val_assign!(impl BitAndAssign for BigUint, bitand_assign);
349
350 impl<'a> BitAnd<&'a BigUint> for BigUint {
351 type Output = BigUint;
352
353 #[inline]
bitand(mut self, other: &BigUint) -> BigUint354 fn bitand(mut self, other: &BigUint) -> BigUint {
355 self &= other;
356 self
357 }
358 }
359 impl<'a> BitAndAssign<&'a BigUint> for BigUint {
360 #[inline]
bitand_assign(&mut self, other: &BigUint)361 fn bitand_assign(&mut self, other: &BigUint) {
362 for (ai, &bi) in self.data.iter_mut().zip(other.data.iter()) {
363 *ai &= bi;
364 }
365 self.data.truncate(other.data.len());
366 self.normalize();
367 }
368 }
369
370 forward_all_binop_to_val_ref_commutative!(impl BitOr for BigUint, bitor);
371 forward_val_assign!(impl BitOrAssign for BigUint, bitor_assign);
372
373 impl<'a> BitOr<&'a BigUint> for BigUint {
374 type Output = BigUint;
375
bitor(mut self, other: &BigUint) -> BigUint376 fn bitor(mut self, other: &BigUint) -> BigUint {
377 self |= other;
378 self
379 }
380 }
381 impl<'a> BitOrAssign<&'a BigUint> for BigUint {
382 #[inline]
bitor_assign(&mut self, other: &BigUint)383 fn bitor_assign(&mut self, other: &BigUint) {
384 for (ai, &bi) in self.data.iter_mut().zip(other.data.iter()) {
385 *ai |= bi;
386 }
387 if other.data.len() > self.data.len() {
388 let extra = &other.data[self.data.len()..];
389 self.data.extend(extra.iter().cloned());
390 }
391 }
392 }
393
394 forward_all_binop_to_val_ref_commutative!(impl BitXor for BigUint, bitxor);
395 forward_val_assign!(impl BitXorAssign for BigUint, bitxor_assign);
396
397 impl<'a> BitXor<&'a BigUint> for BigUint {
398 type Output = BigUint;
399
bitxor(mut self, other: &BigUint) -> BigUint400 fn bitxor(mut self, other: &BigUint) -> BigUint {
401 self ^= other;
402 self
403 }
404 }
405 impl<'a> BitXorAssign<&'a BigUint> for BigUint {
406 #[inline]
bitxor_assign(&mut self, other: &BigUint)407 fn bitxor_assign(&mut self, other: &BigUint) {
408 for (ai, &bi) in self.data.iter_mut().zip(other.data.iter()) {
409 *ai ^= bi;
410 }
411 if other.data.len() > self.data.len() {
412 let extra = &other.data[self.data.len()..];
413 self.data.extend(extra.iter().cloned());
414 }
415 self.normalize();
416 }
417 }
418
419 impl Shl<usize> for BigUint {
420 type Output = BigUint;
421
422 #[inline]
shl(self, rhs: usize) -> BigUint423 fn shl(self, rhs: usize) -> BigUint {
424 biguint_shl(Cow::Owned(self), rhs)
425 }
426 }
427 impl<'a> Shl<usize> for &'a BigUint {
428 type Output = BigUint;
429
430 #[inline]
shl(self, rhs: usize) -> BigUint431 fn shl(self, rhs: usize) -> BigUint {
432 biguint_shl(Cow::Borrowed(self), rhs)
433 }
434 }
435
436 impl ShlAssign<usize> for BigUint {
437 #[inline]
shl_assign(&mut self, rhs: usize)438 fn shl_assign(&mut self, rhs: usize) {
439 let n = mem::replace(self, BigUint::zero());
440 *self = n << rhs;
441 }
442 }
443
444 impl Shr<usize> for BigUint {
445 type Output = BigUint;
446
447 #[inline]
shr(self, rhs: usize) -> BigUint448 fn shr(self, rhs: usize) -> BigUint {
449 biguint_shr(Cow::Owned(self), rhs)
450 }
451 }
452 impl<'a> Shr<usize> for &'a BigUint {
453 type Output = BigUint;
454
455 #[inline]
shr(self, rhs: usize) -> BigUint456 fn shr(self, rhs: usize) -> BigUint {
457 biguint_shr(Cow::Borrowed(self), rhs)
458 }
459 }
460
461 impl ShrAssign<usize> for BigUint {
462 #[inline]
shr_assign(&mut self, rhs: usize)463 fn shr_assign(&mut self, rhs: usize) {
464 let n = mem::replace(self, BigUint::zero());
465 *self = n >> rhs;
466 }
467 }
468
469 impl Zero for BigUint {
470 #[inline]
zero() -> BigUint471 fn zero() -> BigUint {
472 BigUint::new(Vec::new())
473 }
474
475 #[inline]
is_zero(&self) -> bool476 fn is_zero(&self) -> bool {
477 self.data.is_empty()
478 }
479 }
480
481 impl One for BigUint {
482 #[inline]
one() -> BigUint483 fn one() -> BigUint {
484 BigUint::new(vec![1])
485 }
486
487 #[inline]
is_one(&self) -> bool488 fn is_one(&self) -> bool {
489 self.data[..] == [1]
490 }
491 }
492
493 impl Unsigned for BigUint {}
494
495 macro_rules! pow_impl {
496 ($T:ty) => {
497 impl<'a> Pow<$T> for &'a BigUint {
498 type Output = BigUint;
499
500 #[inline]
501 fn pow(self, mut exp: $T) -> Self::Output {
502 if exp == 0 {
503 return BigUint::one();
504 }
505 let mut base = self.clone();
506
507 while exp & 1 == 0 {
508 base = &base * &base;
509 exp >>= 1;
510 }
511
512 if exp == 1 {
513 return base;
514 }
515
516 let mut acc = base.clone();
517 while exp > 1 {
518 exp >>= 1;
519 base = &base * &base;
520 if exp & 1 == 1 {
521 acc = &acc * &base;
522 }
523 }
524 acc
525 }
526 }
527
528 impl<'a, 'b> Pow<&'b $T> for &'a BigUint {
529 type Output = BigUint;
530
531 #[inline]
532 fn pow(self, exp: &$T) -> Self::Output {
533 self.pow(*exp)
534 }
535 }
536 };
537 }
538
539 pow_impl!(u8);
540 pow_impl!(u16);
541 pow_impl!(u32);
542 pow_impl!(u64);
543 pow_impl!(usize);
544 #[cfg(has_i128)]
545 pow_impl!(u128);
546
547 forward_all_binop_to_val_ref_commutative!(impl Add for BigUint, add);
548 forward_val_assign!(impl AddAssign for BigUint, add_assign);
549
550 impl<'a> Add<&'a BigUint> for BigUint {
551 type Output = BigUint;
552
add(mut self, other: &BigUint) -> BigUint553 fn add(mut self, other: &BigUint) -> BigUint {
554 self += other;
555 self
556 }
557 }
558 impl<'a> AddAssign<&'a BigUint> for BigUint {
559 #[inline]
add_assign(&mut self, other: &BigUint)560 fn add_assign(&mut self, other: &BigUint) {
561 let self_len = self.data.len();
562 let carry = if self_len < other.data.len() {
563 let lo_carry = __add2(&mut self.data[..], &other.data[..self_len]);
564 self.data.extend_from_slice(&other.data[self_len..]);
565 __add2(&mut self.data[self_len..], &[lo_carry])
566 } else {
567 __add2(&mut self.data[..], &other.data[..])
568 };
569 if carry != 0 {
570 self.data.push(carry);
571 }
572 }
573 }
574
575 promote_unsigned_scalars!(impl Add for BigUint, add);
576 promote_unsigned_scalars_assign!(impl AddAssign for BigUint, add_assign);
577 forward_all_scalar_binop_to_val_val_commutative!(impl Add<u32> for BigUint, add);
578 forward_all_scalar_binop_to_val_val_commutative!(impl Add<u64> for BigUint, add);
579 #[cfg(has_i128)]
580 forward_all_scalar_binop_to_val_val_commutative!(impl Add<u128> for BigUint, add);
581
582 impl Add<u32> for BigUint {
583 type Output = BigUint;
584
585 #[inline]
add(mut self, other: u32) -> BigUint586 fn add(mut self, other: u32) -> BigUint {
587 self += other;
588 self
589 }
590 }
591
592 impl AddAssign<u32> for BigUint {
593 #[inline]
add_assign(&mut self, other: u32)594 fn add_assign(&mut self, other: u32) {
595 if other != 0 {
596 if self.data.len() == 0 {
597 self.data.push(0);
598 }
599
600 let carry = __add2(&mut self.data, &[other as BigDigit]);
601 if carry != 0 {
602 self.data.push(carry);
603 }
604 }
605 }
606 }
607
608 impl Add<u64> for BigUint {
609 type Output = BigUint;
610
611 #[inline]
add(mut self, other: u64) -> BigUint612 fn add(mut self, other: u64) -> BigUint {
613 self += other;
614 self
615 }
616 }
617
618 impl AddAssign<u64> for BigUint {
619 #[cfg(not(feature = "u64_digit"))]
620 #[inline]
add_assign(&mut self, other: u64)621 fn add_assign(&mut self, other: u64) {
622 let (hi, lo) = big_digit::from_doublebigdigit(other);
623 if hi == 0 {
624 *self += lo;
625 } else {
626 while self.data.len() < 2 {
627 self.data.push(0);
628 }
629
630 let carry = __add2(&mut self.data, &[lo, hi]);
631 if carry != 0 {
632 self.data.push(carry);
633 }
634 }
635 }
636
637 #[cfg(feature = "u64_digit")]
638 #[inline]
add_assign(&mut self, other: u64)639 fn add_assign(&mut self, other: u64) {
640 if other != 0 {
641 if self.data.len() == 0 {
642 self.data.push(0);
643 }
644
645 let carry = __add2(&mut self.data, &[other as BigDigit]);
646 if carry != 0 {
647 self.data.push(carry);
648 }
649 }
650 }
651 }
652
653 #[cfg(has_i128)]
654 impl Add<u128> for BigUint {
655 type Output = BigUint;
656
657 #[inline]
add(mut self, other: u128) -> BigUint658 fn add(mut self, other: u128) -> BigUint {
659 self += other;
660 self
661 }
662 }
663
664 #[cfg(has_i128)]
665 impl AddAssign<u128> for BigUint {
666 #[cfg(not(feature = "u64_digit"))]
667 #[inline]
add_assign(&mut self, other: u128)668 fn add_assign(&mut self, other: u128) {
669 if other <= u64::max_value() as u128 {
670 *self += other as u64
671 } else {
672 let (a, b, c, d) = u32_from_u128(other);
673 let carry = if a > 0 {
674 while self.data.len() < 4 {
675 self.data.push(0);
676 }
677 __add2(&mut self.data, &[d, c, b, a])
678 } else {
679 debug_assert!(b > 0);
680 while self.data.len() < 3 {
681 self.data.push(0);
682 }
683 __add2(&mut self.data, &[d, c, b])
684 };
685
686 if carry != 0 {
687 self.data.push(carry);
688 }
689 }
690 }
691
692 #[cfg(feature = "u64_digit")]
693 #[inline]
add_assign(&mut self, other: u128)694 fn add_assign(&mut self, other: u128) {
695 let (hi, lo) = big_digit::from_doublebigdigit(other);
696 if hi == 0 {
697 *self += lo;
698 } else {
699 while self.data.len() < 2 {
700 self.data.push(0);
701 }
702
703 let carry = __add2(&mut self.data, &[lo, hi]);
704 if carry != 0 {
705 self.data.push(carry);
706 }
707 }
708 }
709 }
710
711 forward_val_val_binop!(impl Sub for BigUint, sub);
712 forward_ref_ref_binop!(impl Sub for BigUint, sub);
713 forward_val_assign!(impl SubAssign for BigUint, sub_assign);
714
715 impl<'a> Sub<&'a BigUint> for BigUint {
716 type Output = BigUint;
717
sub(mut self, other: &BigUint) -> BigUint718 fn sub(mut self, other: &BigUint) -> BigUint {
719 self -= other;
720 self
721 }
722 }
723 impl<'a> SubAssign<&'a BigUint> for BigUint {
sub_assign(&mut self, other: &'a BigUint)724 fn sub_assign(&mut self, other: &'a BigUint) {
725 sub2(&mut self.data[..], &other.data[..]);
726 self.normalize();
727 }
728 }
729
730 impl<'a> Sub<BigUint> for &'a BigUint {
731 type Output = BigUint;
732
sub(self, mut other: BigUint) -> BigUint733 fn sub(self, mut other: BigUint) -> BigUint {
734 let other_len = other.data.len();
735 if other_len < self.data.len() {
736 let lo_borrow = __sub2rev(&self.data[..other_len], &mut other.data);
737 other.data.extend_from_slice(&self.data[other_len..]);
738 if lo_borrow != 0 {
739 sub2(&mut other.data[other_len..], &[1])
740 }
741 } else {
742 sub2rev(&self.data[..], &mut other.data[..]);
743 }
744 other.normalized()
745 }
746 }
747
748 promote_unsigned_scalars!(impl Sub for BigUint, sub);
749 promote_unsigned_scalars_assign!(impl SubAssign for BigUint, sub_assign);
750 forward_all_scalar_binop_to_val_val!(impl Sub<u32> for BigUint, sub);
751 forward_all_scalar_binop_to_val_val!(impl Sub<u64> for BigUint, sub);
752 #[cfg(has_i128)]
753 forward_all_scalar_binop_to_val_val!(impl Sub<u128> for BigUint, sub);
754
755 impl Sub<u32> for BigUint {
756 type Output = BigUint;
757
758 #[inline]
sub(mut self, other: u32) -> BigUint759 fn sub(mut self, other: u32) -> BigUint {
760 self -= other;
761 self
762 }
763 }
764 impl SubAssign<u32> for BigUint {
sub_assign(&mut self, other: u32)765 fn sub_assign(&mut self, other: u32) {
766 sub2(&mut self.data[..], &[other as BigDigit]);
767 self.normalize();
768 }
769 }
770
771 impl Sub<BigUint> for u32 {
772 type Output = BigUint;
773
774 #[cfg(not(feature = "u64_digit"))]
775 #[inline]
sub(self, mut other: BigUint) -> BigUint776 fn sub(self, mut other: BigUint) -> BigUint {
777 if other.data.len() == 0 {
778 other.data.push(self);
779 } else {
780 sub2rev(&[self], &mut other.data[..]);
781 }
782 other.normalized()
783 }
784
785 #[cfg(feature = "u64_digit")]
786 #[inline]
sub(self, mut other: BigUint) -> BigUint787 fn sub(self, mut other: BigUint) -> BigUint {
788 if other.data.len() == 0 {
789 other.data.push(self as BigDigit);
790 } else {
791 sub2rev(&[self as BigDigit], &mut other.data[..]);
792 }
793 other.normalized()
794 }
795 }
796
797 impl Sub<u64> for BigUint {
798 type Output = BigUint;
799
800 #[inline]
sub(mut self, other: u64) -> BigUint801 fn sub(mut self, other: u64) -> BigUint {
802 self -= other;
803 self
804 }
805 }
806
807 impl SubAssign<u64> for BigUint {
808 #[cfg(not(feature = "u64_digit"))]
809 #[inline]
sub_assign(&mut self, other: u64)810 fn sub_assign(&mut self, other: u64) {
811 let (hi, lo) = big_digit::from_doublebigdigit(other);
812 sub2(&mut self.data[..], &[lo, hi]);
813 self.normalize();
814 }
815
816 #[cfg(feature = "u64_digit")]
817 #[inline]
sub_assign(&mut self, other: u64)818 fn sub_assign(&mut self, other: u64) {
819 sub2(&mut self.data[..], &[other as BigDigit]);
820 self.normalize();
821 }
822 }
823
824 impl Sub<BigUint> for u64 {
825 type Output = BigUint;
826
827 #[cfg(not(feature = "u64_digit"))]
828 #[inline]
sub(self, mut other: BigUint) -> BigUint829 fn sub(self, mut other: BigUint) -> BigUint {
830 while other.data.len() < 2 {
831 other.data.push(0);
832 }
833
834 let (hi, lo) = big_digit::from_doublebigdigit(self);
835 sub2rev(&[lo, hi], &mut other.data[..]);
836 other.normalized()
837 }
838
839 #[cfg(feature = "u64_digit")]
840 #[inline]
sub(self, mut other: BigUint) -> BigUint841 fn sub(self, mut other: BigUint) -> BigUint {
842 if other.data.len() == 0 {
843 other.data.push(self);
844 } else {
845 sub2rev(&[self], &mut other.data[..]);
846 }
847 other.normalized()
848 }
849 }
850
851 #[cfg(has_i128)]
852 impl Sub<u128> for BigUint {
853 type Output = BigUint;
854
855 #[inline]
sub(mut self, other: u128) -> BigUint856 fn sub(mut self, other: u128) -> BigUint {
857 self -= other;
858 self
859 }
860 }
861 #[cfg(has_i128)]
862 impl SubAssign<u128> for BigUint {
863 #[cfg(not(feature = "u64_digit"))]
864 #[inline]
sub_assign(&mut self, other: u128)865 fn sub_assign(&mut self, other: u128) {
866 let (a, b, c, d) = u32_from_u128(other);
867 sub2(&mut self.data[..], &[d, c, b, a]);
868 self.normalize();
869 }
870
871 #[cfg(feature = "u64_digit")]
872 #[inline]
sub_assign(&mut self, other: u128)873 fn sub_assign(&mut self, other: u128) {
874 let (hi, lo) = big_digit::from_doublebigdigit(other);
875 sub2(&mut self.data[..], &[lo, hi]);
876 self.normalize();
877 }
878 }
879
880 #[cfg(has_i128)]
881 impl Sub<BigUint> for u128 {
882 type Output = BigUint;
883
884 #[cfg(not(feature = "u64_digit"))]
885 #[inline]
sub(self, mut other: BigUint) -> BigUint886 fn sub(self, mut other: BigUint) -> BigUint {
887 while other.data.len() < 4 {
888 other.data.push(0);
889 }
890
891 let (a, b, c, d) = u32_from_u128(self);
892 sub2rev(&[d, c, b, a], &mut other.data[..]);
893 other.normalized()
894 }
895
896 #[cfg(feature = "u64_digit")]
897 #[inline]
sub(self, mut other: BigUint) -> BigUint898 fn sub(self, mut other: BigUint) -> BigUint {
899 while other.data.len() < 2 {
900 other.data.push(0);
901 }
902
903 let (hi, lo) = big_digit::from_doublebigdigit(self);
904 sub2rev(&[lo, hi], &mut other.data[..]);
905 other.normalized()
906 }
907 }
908
909 forward_all_binop_to_ref_ref!(impl Mul for BigUint, mul);
910 forward_val_assign!(impl MulAssign for BigUint, mul_assign);
911
912 impl<'a, 'b> Mul<&'b BigUint> for &'a BigUint {
913 type Output = BigUint;
914
915 #[inline]
mul(self, other: &BigUint) -> BigUint916 fn mul(self, other: &BigUint) -> BigUint {
917 mul3(&self.data[..], &other.data[..])
918 }
919 }
920
921 impl<'a, 'b> Mul<&'a BigInt> for &'b BigUint {
922 type Output = BigInt;
923
924 #[inline]
mul(self, other: &BigInt) -> BigInt925 fn mul(self, other: &BigInt) -> BigInt {
926 BigInt {
927 data: mul3(&self.data[..], &other.digits()[..]),
928 sign: other.sign,
929 }
930 }
931 }
932
933 impl<'a> MulAssign<&'a BigUint> for BigUint {
934 #[inline]
mul_assign(&mut self, other: &'a BigUint)935 fn mul_assign(&mut self, other: &'a BigUint) {
936 *self = &*self * other
937 }
938 }
939
940 promote_unsigned_scalars!(impl Mul for BigUint, mul);
941 promote_unsigned_scalars_assign!(impl MulAssign for BigUint, mul_assign);
942 forward_all_scalar_binop_to_val_val_commutative!(impl Mul<u32> for BigUint, mul);
943 forward_all_scalar_binop_to_val_val_commutative!(impl Mul<u64> for BigUint, mul);
944 #[cfg(has_i128)]
945 forward_all_scalar_binop_to_val_val_commutative!(impl Mul<u128> for BigUint, mul);
946
947 impl Mul<u32> for BigUint {
948 type Output = BigUint;
949
950 #[inline]
mul(mut self, other: u32) -> BigUint951 fn mul(mut self, other: u32) -> BigUint {
952 self *= other;
953 self
954 }
955 }
956 impl MulAssign<u32> for BigUint {
957 #[inline]
mul_assign(&mut self, other: u32)958 fn mul_assign(&mut self, other: u32) {
959 if other == 0 {
960 self.data.clear();
961 } else {
962 let carry = scalar_mul(&mut self.data[..], other as BigDigit);
963 if carry != 0 {
964 self.data.push(carry);
965 }
966 }
967 }
968 }
969
970 impl Mul<u64> for BigUint {
971 type Output = BigUint;
972
973 #[inline]
mul(mut self, other: u64) -> BigUint974 fn mul(mut self, other: u64) -> BigUint {
975 self *= other;
976 self
977 }
978 }
979 impl MulAssign<u64> for BigUint {
980 #[cfg(not(feature = "u64_digit"))]
981 #[inline]
mul_assign(&mut self, other: u64)982 fn mul_assign(&mut self, other: u64) {
983 if other == 0 {
984 self.data.clear();
985 } else if other <= BigDigit::max_value() as u64 {
986 *self *= other as BigDigit
987 } else {
988 let (hi, lo) = big_digit::from_doublebigdigit(other);
989 *self = mul3(&self.data[..], &[lo, hi])
990 }
991 }
992
993 #[cfg(feature = "u64_digit")]
994 #[inline]
mul_assign(&mut self, other: u64)995 fn mul_assign(&mut self, other: u64) {
996 if other == 0 {
997 self.data.clear();
998 } else {
999 let carry = scalar_mul(&mut self.data[..], other as BigDigit);
1000 if carry != 0 {
1001 self.data.push(carry);
1002 }
1003 }
1004 }
1005 }
1006
1007 #[cfg(has_i128)]
1008 impl Mul<u128> for BigUint {
1009 type Output = BigUint;
1010
1011 #[inline]
mul(mut self, other: u128) -> BigUint1012 fn mul(mut self, other: u128) -> BigUint {
1013 self *= other;
1014 self
1015 }
1016 }
1017 #[cfg(has_i128)]
1018 impl MulAssign<u128> for BigUint {
1019 #[cfg(not(feature = "u64_digit"))]
1020 #[inline]
mul_assign(&mut self, other: u128)1021 fn mul_assign(&mut self, other: u128) {
1022 if other == 0 {
1023 self.data.clear();
1024 } else if other <= BigDigit::max_value() as u128 {
1025 *self *= other as BigDigit
1026 } else {
1027 let (a, b, c, d) = u32_from_u128(other);
1028 *self = mul3(&self.data[..], &[d, c, b, a])
1029 }
1030 }
1031
1032 #[cfg(feature = "u64_digit")]
1033 #[inline]
mul_assign(&mut self, other: u128)1034 fn mul_assign(&mut self, other: u128) {
1035 if other == 0 {
1036 self.data.clear();
1037 } else if other <= BigDigit::max_value() as u128 {
1038 *self *= other as BigDigit
1039 } else {
1040 let (hi, lo) = big_digit::from_doublebigdigit(other);
1041 *self = mul3(&self.data[..], &[lo, hi])
1042 }
1043 }
1044 }
1045
1046 forward_all_binop_to_ref_ref!(impl Div for BigUint, div);
1047 forward_val_assign!(impl DivAssign for BigUint, div_assign);
1048
1049 impl<'a, 'b> Div<&'b BigUint> for &'a BigUint {
1050 type Output = BigUint;
1051
1052 #[inline]
div(self, other: &BigUint) -> BigUint1053 fn div(self, other: &BigUint) -> BigUint {
1054 let (q, _) = self.div_rem(other);
1055 q
1056 }
1057 }
1058 impl<'a> DivAssign<&'a BigUint> for BigUint {
1059 #[inline]
div_assign(&mut self, other: &'a BigUint)1060 fn div_assign(&mut self, other: &'a BigUint) {
1061 *self = &*self / other;
1062 }
1063 }
1064
1065 promote_unsigned_scalars!(impl Div for BigUint, div);
1066 promote_unsigned_scalars_assign!(impl DivAssign for BigUint, div_assign);
1067 forward_all_scalar_binop_to_val_val!(impl Div<u32> for BigUint, div);
1068 forward_all_scalar_binop_to_val_val!(impl Div<u64> for BigUint, div);
1069 #[cfg(has_i128)]
1070 forward_all_scalar_binop_to_val_val!(impl Div<u128> for BigUint, div);
1071
1072 impl Div<u32> for BigUint {
1073 type Output = BigUint;
1074
1075 #[inline]
div(self, other: u32) -> BigUint1076 fn div(self, other: u32) -> BigUint {
1077 let (q, _) = div_rem_digit(self, other as BigDigit);
1078 q
1079 }
1080 }
1081 impl DivAssign<u32> for BigUint {
1082 #[inline]
div_assign(&mut self, other: u32)1083 fn div_assign(&mut self, other: u32) {
1084 *self = &*self / other;
1085 }
1086 }
1087
1088 impl Div<BigUint> for u32 {
1089 type Output = BigUint;
1090
1091 #[inline]
div(self, other: BigUint) -> BigUint1092 fn div(self, other: BigUint) -> BigUint {
1093 match other.data.len() {
1094 0 => panic!(),
1095 1 => From::from(self as BigDigit / other.data[0]),
1096 _ => Zero::zero(),
1097 }
1098 }
1099 }
1100
1101 impl Div<u64> for BigUint {
1102 type Output = BigUint;
1103
1104 #[inline]
div(self, other: u64) -> BigUint1105 fn div(self, other: u64) -> BigUint {
1106 let (q, _) = self.div_rem(&From::from(other));
1107 q
1108 }
1109 }
1110 impl DivAssign<u64> for BigUint {
1111 #[inline]
div_assign(&mut self, other: u64)1112 fn div_assign(&mut self, other: u64) {
1113 *self = &*self / other;
1114 }
1115 }
1116
1117 impl Div<BigUint> for u64 {
1118 type Output = BigUint;
1119
1120 #[cfg(not(feature = "u64_digit"))]
1121 #[inline]
div(self, other: BigUint) -> BigUint1122 fn div(self, other: BigUint) -> BigUint {
1123 match other.data.len() {
1124 0 => panic!(),
1125 1 => From::from(self / other.data[0] as u64),
1126 2 => From::from(self / big_digit::to_doublebigdigit(other.data[1], other.data[0])),
1127 _ => Zero::zero(),
1128 }
1129 }
1130
1131 #[cfg(feature = "u64_digit")]
1132 #[inline]
div(self, other: BigUint) -> BigUint1133 fn div(self, other: BigUint) -> BigUint {
1134 match other.data.len() {
1135 0 => panic!(),
1136 1 => From::from(self / other.data[0]),
1137 _ => Zero::zero(),
1138 }
1139 }
1140 }
1141
1142 #[cfg(has_i128)]
1143 impl Div<u128> for BigUint {
1144 type Output = BigUint;
1145
1146 #[inline]
div(self, other: u128) -> BigUint1147 fn div(self, other: u128) -> BigUint {
1148 let (q, _) = self.div_rem(&From::from(other));
1149 q
1150 }
1151 }
1152
1153 #[cfg(has_i128)]
1154 impl DivAssign<u128> for BigUint {
1155 #[inline]
div_assign(&mut self, other: u128)1156 fn div_assign(&mut self, other: u128) {
1157 *self = &*self / other;
1158 }
1159 }
1160
1161 #[cfg(has_i128)]
1162 impl Div<BigUint> for u128 {
1163 type Output = BigUint;
1164
1165 #[cfg(not(feature = "u64_digit"))]
1166 #[inline]
div(self, other: BigUint) -> BigUint1167 fn div(self, other: BigUint) -> BigUint {
1168 match other.data.len() {
1169 0 => panic!(),
1170 1 => From::from(self / other.data[0] as u128),
1171 2 => From::from(
1172 self / big_digit::to_doublebigdigit(other.data[1], other.data[0]) as u128,
1173 ),
1174 3 => From::from(self / u32_to_u128(0, other.data[2], other.data[1], other.data[0])),
1175 4 => From::from(
1176 self / u32_to_u128(other.data[3], other.data[2], other.data[1], other.data[0]),
1177 ),
1178 _ => Zero::zero(),
1179 }
1180 }
1181
1182 #[cfg(feature = "u64_digit")]
1183 #[inline]
div(self, other: BigUint) -> BigUint1184 fn div(self, other: BigUint) -> BigUint {
1185 match other.data.len() {
1186 0 => panic!(),
1187 1 => From::from(self / other.data[0] as u128),
1188 2 => From::from(self / big_digit::to_doublebigdigit(other.data[1], other.data[0])),
1189 _ => Zero::zero(),
1190 }
1191 }
1192 }
1193
1194 forward_all_binop_to_ref_ref!(impl Rem for BigUint, rem);
1195 forward_val_assign!(impl RemAssign for BigUint, rem_assign);
1196
1197 impl<'a, 'b> Rem<&'b BigUint> for &'a BigUint {
1198 type Output = BigUint;
1199
1200 #[inline]
rem(self, other: &BigUint) -> BigUint1201 fn rem(self, other: &BigUint) -> BigUint {
1202 let (_, r) = self.div_rem(other);
1203 r
1204 }
1205 }
1206 impl<'a> RemAssign<&'a BigUint> for BigUint {
1207 #[inline]
rem_assign(&mut self, other: &BigUint)1208 fn rem_assign(&mut self, other: &BigUint) {
1209 *self = &*self % other;
1210 }
1211 }
1212
1213 promote_unsigned_scalars!(impl Rem for BigUint, rem);
1214 promote_unsigned_scalars_assign!(impl RemAssign for BigUint, rem_assign);
1215 forward_all_scalar_binop_to_val_val!(impl Rem<u32> for BigUint, rem);
1216 forward_all_scalar_binop_to_val_val!(impl Rem<u64> for BigUint, rem);
1217 #[cfg(has_i128)]
1218 forward_all_scalar_binop_to_val_val!(impl Rem<u128> for BigUint, rem);
1219
1220 impl Rem<u32> for BigUint {
1221 type Output = BigUint;
1222
1223 #[inline]
rem(self, other: u32) -> BigUint1224 fn rem(self, other: u32) -> BigUint {
1225 let (_, r) = div_rem_digit(self, other as BigDigit);
1226 From::from(r)
1227 }
1228 }
1229 impl RemAssign<u32> for BigUint {
1230 #[inline]
rem_assign(&mut self, other: u32)1231 fn rem_assign(&mut self, other: u32) {
1232 *self = &*self % other;
1233 }
1234 }
1235
1236 impl Rem<BigUint> for u32 {
1237 type Output = BigUint;
1238
1239 #[inline]
rem(mut self, other: BigUint) -> BigUint1240 fn rem(mut self, other: BigUint) -> BigUint {
1241 self %= other;
1242 From::from(self)
1243 }
1244 }
1245
1246 macro_rules! impl_rem_assign_scalar {
1247 ($scalar:ty, $to_scalar:ident) => {
1248 forward_val_assign_scalar!(impl RemAssign for BigUint, $scalar, rem_assign);
1249 impl<'a> RemAssign<&'a BigUint> for $scalar {
1250 #[inline]
1251 fn rem_assign(&mut self, other: &BigUint) {
1252 *self = match other.$to_scalar() {
1253 None => *self,
1254 Some(0) => panic!(),
1255 Some(v) => *self % v
1256 };
1257 }
1258 }
1259 }
1260 }
1261 // we can scalar %= BigUint for any scalar, including signed types
1262 #[cfg(has_i128)]
1263 impl_rem_assign_scalar!(u128, to_u128);
1264 impl_rem_assign_scalar!(usize, to_usize);
1265 impl_rem_assign_scalar!(u64, to_u64);
1266 impl_rem_assign_scalar!(u32, to_u32);
1267 impl_rem_assign_scalar!(u16, to_u16);
1268 impl_rem_assign_scalar!(u8, to_u8);
1269 #[cfg(has_i128)]
1270 impl_rem_assign_scalar!(i128, to_i128);
1271 impl_rem_assign_scalar!(isize, to_isize);
1272 impl_rem_assign_scalar!(i64, to_i64);
1273 impl_rem_assign_scalar!(i32, to_i32);
1274 impl_rem_assign_scalar!(i16, to_i16);
1275 impl_rem_assign_scalar!(i8, to_i8);
1276
1277 impl Rem<u64> for BigUint {
1278 type Output = BigUint;
1279
1280 #[inline]
rem(self, other: u64) -> BigUint1281 fn rem(self, other: u64) -> BigUint {
1282 let (_, r) = self.div_rem(&From::from(other));
1283 r
1284 }
1285 }
1286 impl RemAssign<u64> for BigUint {
1287 #[inline]
rem_assign(&mut self, other: u64)1288 fn rem_assign(&mut self, other: u64) {
1289 *self = &*self % other;
1290 }
1291 }
1292
1293 impl Rem<BigUint> for u64 {
1294 type Output = BigUint;
1295
1296 #[inline]
rem(mut self, other: BigUint) -> BigUint1297 fn rem(mut self, other: BigUint) -> BigUint {
1298 self %= other;
1299 From::from(self)
1300 }
1301 }
1302
1303 #[cfg(has_i128)]
1304 impl Rem<u128> for BigUint {
1305 type Output = BigUint;
1306
1307 #[inline]
rem(self, other: u128) -> BigUint1308 fn rem(self, other: u128) -> BigUint {
1309 let (_, r) = self.div_rem(&From::from(other));
1310 r
1311 }
1312 }
1313 #[cfg(has_i128)]
1314 impl RemAssign<u128> for BigUint {
1315 #[inline]
rem_assign(&mut self, other: u128)1316 fn rem_assign(&mut self, other: u128) {
1317 *self = &*self % other;
1318 }
1319 }
1320
1321 #[cfg(has_i128)]
1322 impl Rem<BigUint> for u128 {
1323 type Output = BigUint;
1324
1325 #[inline]
rem(mut self, other: BigUint) -> BigUint1326 fn rem(mut self, other: BigUint) -> BigUint {
1327 self %= other;
1328 From::from(self)
1329 }
1330 }
1331
1332 impl Neg for BigUint {
1333 type Output = BigUint;
1334
1335 #[inline]
neg(self) -> BigUint1336 fn neg(self) -> BigUint {
1337 panic!()
1338 }
1339 }
1340
1341 impl<'a> Neg for &'a BigUint {
1342 type Output = BigUint;
1343
1344 #[inline]
neg(self) -> BigUint1345 fn neg(self) -> BigUint {
1346 panic!()
1347 }
1348 }
1349
1350 impl CheckedAdd for BigUint {
1351 #[inline]
checked_add(&self, v: &BigUint) -> Option<BigUint>1352 fn checked_add(&self, v: &BigUint) -> Option<BigUint> {
1353 Some(self.add(v))
1354 }
1355 }
1356
1357 impl CheckedSub for BigUint {
1358 #[inline]
checked_sub(&self, v: &BigUint) -> Option<BigUint>1359 fn checked_sub(&self, v: &BigUint) -> Option<BigUint> {
1360 match self.cmp(v) {
1361 Less => None,
1362 Equal => Some(Zero::zero()),
1363 Greater => Some(self.sub(v)),
1364 }
1365 }
1366 }
1367
1368 impl CheckedMul for BigUint {
1369 #[inline]
checked_mul(&self, v: &BigUint) -> Option<BigUint>1370 fn checked_mul(&self, v: &BigUint) -> Option<BigUint> {
1371 Some(self.mul(v))
1372 }
1373 }
1374
1375 impl CheckedDiv for BigUint {
1376 #[inline]
checked_div(&self, v: &BigUint) -> Option<BigUint>1377 fn checked_div(&self, v: &BigUint) -> Option<BigUint> {
1378 if v.is_zero() {
1379 None
1380 } else {
1381 Some(self.div(v))
1382 }
1383 }
1384 }
1385
1386 impl Integer for BigUint {
1387 #[inline]
div_rem(&self, other: &BigUint) -> (BigUint, BigUint)1388 fn div_rem(&self, other: &BigUint) -> (BigUint, BigUint) {
1389 div_rem(self, other)
1390 }
1391
1392 #[inline]
div_floor(&self, other: &BigUint) -> BigUint1393 fn div_floor(&self, other: &BigUint) -> BigUint {
1394 let (d, _) = div_rem(self, other);
1395 d
1396 }
1397
1398 #[inline]
mod_floor(&self, other: &BigUint) -> BigUint1399 fn mod_floor(&self, other: &BigUint) -> BigUint {
1400 let (_, m) = div_rem(self, other);
1401 m
1402 }
1403
1404 #[inline]
div_mod_floor(&self, other: &BigUint) -> (BigUint, BigUint)1405 fn div_mod_floor(&self, other: &BigUint) -> (BigUint, BigUint) {
1406 div_rem(self, other)
1407 }
1408
1409 /// Calculates the Greatest Common Divisor (GCD) of the number and `other`.
1410 ///
1411 /// The result is always positive.
1412 #[inline]
gcd(&self, other: &Self) -> Self1413 fn gcd(&self, other: &Self) -> Self {
1414 let (res, _, _) = extended_gcd(Cow::Borrowed(self), Cow::Borrowed(other), false);
1415 res.into_biguint().unwrap()
1416 }
1417
1418 /// Calculates the Lowest Common Multiple (LCM) of the number and `other`.
1419 #[inline]
lcm(&self, other: &BigUint) -> BigUint1420 fn lcm(&self, other: &BigUint) -> BigUint {
1421 self / self.gcd(other) * other
1422 }
1423
1424 /// Deprecated, use `is_multiple_of` instead.
1425 #[inline]
divides(&self, other: &BigUint) -> bool1426 fn divides(&self, other: &BigUint) -> bool {
1427 self.is_multiple_of(other)
1428 }
1429
1430 /// Returns `true` if the number is a multiple of `other`.
1431 #[inline]
is_multiple_of(&self, other: &BigUint) -> bool1432 fn is_multiple_of(&self, other: &BigUint) -> bool {
1433 (self % other).is_zero()
1434 }
1435
1436 /// Returns `true` if the number is divisible by `2`.
1437 #[inline]
is_even(&self) -> bool1438 fn is_even(&self) -> bool {
1439 // Considering only the last digit.
1440 match self.data.first() {
1441 Some(x) => x.is_even(),
1442 None => true,
1443 }
1444 }
1445
1446 /// Returns `true` if the number is not divisible by `2`.
1447 #[inline]
is_odd(&self) -> bool1448 fn is_odd(&self) -> bool {
1449 !self.is_even()
1450 }
1451 }
1452
1453 #[inline]
fixpoint<F>(mut x: BigUint, max_bits: usize, f: F) -> BigUint where F: Fn(&BigUint) -> BigUint,1454 fn fixpoint<F>(mut x: BigUint, max_bits: usize, f: F) -> BigUint
1455 where
1456 F: Fn(&BigUint) -> BigUint,
1457 {
1458 let mut xn = f(&x);
1459
1460 // If the value increased, then the initial guess must have been low.
1461 // Repeat until we reverse course.
1462 while x < xn {
1463 // Sometimes an increase will go way too far, especially with large
1464 // powers, and then take a long time to walk back. We know an upper
1465 // bound based on bit size, so saturate on that.
1466 x = if xn.bits() > max_bits {
1467 BigUint::one() << max_bits
1468 } else {
1469 xn
1470 };
1471 xn = f(&x);
1472 }
1473
1474 // Now keep repeating while the estimate is decreasing.
1475 while x > xn {
1476 x = xn;
1477 xn = f(&x);
1478 }
1479 x
1480 }
1481
1482 impl Roots for BigUint {
1483 // nth_root, sqrt and cbrt use Newton's method to compute
1484 // principal root of a given degree for a given integer.
1485
1486 // Reference:
1487 // Brent & Zimmermann, Modern Computer Arithmetic, v0.5.9, Algorithm 1.14
nth_root(&self, n: u32) -> Self1488 fn nth_root(&self, n: u32) -> Self {
1489 assert!(n > 0, "root degree n must be at least 1");
1490
1491 if self.is_zero() || self.is_one() {
1492 return self.clone();
1493 }
1494
1495 match n {
1496 // Optimize for small n
1497 1 => return self.clone(),
1498 2 => return self.sqrt(),
1499 3 => return self.cbrt(),
1500 _ => (),
1501 }
1502
1503 // The root of non-zero values less than 2ⁿ can only be 1.
1504 let bits = self.bits();
1505 if bits <= n as usize {
1506 return BigUint::one();
1507 }
1508
1509 // If we fit in `u64`, compute the root that way.
1510 if let Some(x) = self.to_u64() {
1511 return x.nth_root(n).into();
1512 }
1513
1514 let max_bits = bits / n as usize + 1;
1515
1516 let guess = if let Some(f) = self.to_f64() {
1517 // We fit in `f64` (lossy), so get a better initial guess from that.
1518 BigUint::from_f64(exp(ln(f) / f64::from(n))).unwrap()
1519 } else {
1520 // Try to guess by scaling down such that it does fit in `f64`.
1521 // With some (x * 2ⁿᵏ), its nth root ≈ (ⁿ√x * 2ᵏ)
1522 let nsz = n as usize;
1523 let extra_bits = bits - (f64::MAX_EXP as usize - 1);
1524 let root_scale = (extra_bits + (nsz - 1)) / nsz;
1525 let scale = root_scale * nsz;
1526 if scale < bits && bits - scale > nsz {
1527 (self >> scale).nth_root(n) << root_scale
1528 } else {
1529 BigUint::one() << max_bits
1530 }
1531 };
1532
1533 let n_min_1 = n - 1;
1534 fixpoint(guess, max_bits, move |s| {
1535 let q = self / s.pow(n_min_1);
1536 let t = n_min_1 * s + q;
1537 t / n
1538 })
1539 }
1540
1541 // Reference:
1542 // Brent & Zimmermann, Modern Computer Arithmetic, v0.5.9, Algorithm 1.13
sqrt(&self) -> Self1543 fn sqrt(&self) -> Self {
1544 if self.is_zero() || self.is_one() {
1545 return self.clone();
1546 }
1547
1548 // If we fit in `u64`, compute the root that way.
1549 if let Some(x) = self.to_u64() {
1550 return x.sqrt().into();
1551 }
1552
1553 let bits = self.bits();
1554 let max_bits = bits / 2 as usize + 1;
1555
1556 let guess = if let Some(f) = self.to_f64() {
1557 // We fit in `f64` (lossy), so get a better initial guess from that.
1558 BigUint::from_f64(sqrt(f)).unwrap()
1559 } else {
1560 // Try to guess by scaling down such that it does fit in `f64`.
1561 // With some (x * 2²ᵏ), its sqrt ≈ (√x * 2ᵏ)
1562 let extra_bits = bits - (f64::MAX_EXP as usize - 1);
1563 let root_scale = (extra_bits + 1) / 2;
1564 let scale = root_scale * 2;
1565 (self >> scale).sqrt() << root_scale
1566 };
1567
1568 fixpoint(guess, max_bits, move |s| {
1569 let q = self / s;
1570 let t = s + q;
1571 t >> 1
1572 })
1573 }
1574
cbrt(&self) -> Self1575 fn cbrt(&self) -> Self {
1576 if self.is_zero() || self.is_one() {
1577 return self.clone();
1578 }
1579
1580 // If we fit in `u64`, compute the root that way.
1581 if let Some(x) = self.to_u64() {
1582 return x.cbrt().into();
1583 }
1584
1585 let bits = self.bits();
1586 let max_bits = bits / 3 as usize + 1;
1587
1588 let guess = if let Some(f) = self.to_f64() {
1589 // We fit in `f64` (lossy), so get a better initial guess from that.
1590 BigUint::from_f64(cbrt(f)).unwrap()
1591 } else {
1592 // Try to guess by scaling down such that it does fit in `f64`.
1593 // With some (x * 2³ᵏ), its cbrt ≈ (∛x * 2ᵏ)
1594 let extra_bits = bits - (f64::MAX_EXP as usize - 1);
1595 let root_scale = (extra_bits + 2) / 3;
1596 let scale = root_scale * 3;
1597 (self >> scale).cbrt() << root_scale
1598 };
1599
1600 fixpoint(guess, max_bits, move |s| {
1601 let q = self / (s * s);
1602 let t = (s << 1) + q;
1603 t / 3u32
1604 })
1605 }
1606 }
1607
high_bits_to_u64(v: &BigUint) -> u641608 fn high_bits_to_u64(v: &BigUint) -> u64 {
1609 match v.data.len() {
1610 0 => 0,
1611 1 => v.data[0] as u64,
1612 _ => {
1613 let mut bits = v.bits();
1614 let mut ret = 0u64;
1615 let mut ret_bits = 0;
1616
1617 for d in v.data.iter().rev() {
1618 let digit_bits = (bits - 1) % big_digit::BITS + 1;
1619 let bits_want = cmp::min(64 - ret_bits, digit_bits);
1620
1621 if bits_want != 64 {
1622 ret <<= bits_want;
1623 }
1624 ret |= *d as u64 >> (digit_bits - bits_want);
1625 ret_bits += bits_want;
1626 bits -= bits_want;
1627
1628 if ret_bits == 64 {
1629 break;
1630 }
1631 }
1632
1633 ret
1634 }
1635 }
1636 }
1637
1638 impl ToPrimitive for BigUint {
1639 #[inline]
to_i64(&self) -> Option<i64>1640 fn to_i64(&self) -> Option<i64> {
1641 self.to_u64().as_ref().and_then(u64::to_i64)
1642 }
1643
1644 #[inline]
1645 #[cfg(has_i128)]
to_i128(&self) -> Option<i128>1646 fn to_i128(&self) -> Option<i128> {
1647 self.to_u128().as_ref().and_then(u128::to_i128)
1648 }
1649
1650 #[inline]
to_u64(&self) -> Option<u64>1651 fn to_u64(&self) -> Option<u64> {
1652 let mut ret: u64 = 0;
1653 let mut bits = 0;
1654
1655 for i in self.data.iter() {
1656 if bits >= 64 {
1657 return None;
1658 }
1659
1660 ret += (*i as u64) << bits;
1661 bits += big_digit::BITS;
1662 }
1663
1664 Some(ret)
1665 }
1666
1667 #[inline]
1668 #[cfg(has_i128)]
to_u128(&self) -> Option<u128>1669 fn to_u128(&self) -> Option<u128> {
1670 let mut ret: u128 = 0;
1671 let mut bits = 0;
1672
1673 for i in self.data.iter() {
1674 if bits >= 128 {
1675 return None;
1676 }
1677
1678 ret |= (*i as u128) << bits;
1679 bits += big_digit::BITS;
1680 }
1681
1682 Some(ret)
1683 }
1684
1685 #[inline]
to_f32(&self) -> Option<f32>1686 fn to_f32(&self) -> Option<f32> {
1687 let mantissa = high_bits_to_u64(self);
1688 let exponent = self.bits() - fls(mantissa);
1689
1690 if exponent > f32::MAX_EXP as usize {
1691 None
1692 } else {
1693 let ret = (mantissa as f32) * 2.0f32.powi(exponent as i32);
1694 if ret.is_infinite() {
1695 None
1696 } else {
1697 Some(ret)
1698 }
1699 }
1700 }
1701
1702 #[inline]
to_f64(&self) -> Option<f64>1703 fn to_f64(&self) -> Option<f64> {
1704 let mantissa = high_bits_to_u64(self);
1705 let exponent = self.bits() - fls(mantissa);
1706
1707 if exponent > f64::MAX_EXP as usize {
1708 None
1709 } else {
1710 let ret = (mantissa as f64) * 2.0f64.powi(exponent as i32);
1711 if ret.is_infinite() {
1712 None
1713 } else {
1714 Some(ret)
1715 }
1716 }
1717 }
1718 }
1719
1720 impl FromPrimitive for BigUint {
1721 #[inline]
from_i64(n: i64) -> Option<BigUint>1722 fn from_i64(n: i64) -> Option<BigUint> {
1723 if n >= 0 {
1724 Some(BigUint::from(n as u64))
1725 } else {
1726 None
1727 }
1728 }
1729
1730 #[inline]
1731 #[cfg(has_i128)]
from_i128(n: i128) -> Option<BigUint>1732 fn from_i128(n: i128) -> Option<BigUint> {
1733 if n >= 0 {
1734 Some(BigUint::from(n as u128))
1735 } else {
1736 None
1737 }
1738 }
1739
1740 #[inline]
from_u64(n: u64) -> Option<BigUint>1741 fn from_u64(n: u64) -> Option<BigUint> {
1742 Some(BigUint::from(n))
1743 }
1744
1745 #[inline]
1746 #[cfg(has_i128)]
from_u128(n: u128) -> Option<BigUint>1747 fn from_u128(n: u128) -> Option<BigUint> {
1748 Some(BigUint::from(n))
1749 }
1750
1751 #[inline]
from_f64(mut n: f64) -> Option<BigUint>1752 fn from_f64(mut n: f64) -> Option<BigUint> {
1753 // handle NAN, INFINITY, NEG_INFINITY
1754 if !n.is_finite() {
1755 return None;
1756 }
1757
1758 // match the rounding of casting from float to int
1759 n = FloatCore::trunc(n);
1760
1761 // handle 0.x, -0.x
1762 if n.is_zero() {
1763 return Some(BigUint::zero());
1764 }
1765
1766 let (mantissa, exponent, sign) = FloatCore::integer_decode(n);
1767
1768 if sign == -1 {
1769 return None;
1770 }
1771
1772 let mut ret = BigUint::from(mantissa);
1773 if exponent > 0 {
1774 ret = ret << exponent as usize;
1775 } else if exponent < 0 {
1776 ret = ret >> (-exponent) as usize;
1777 }
1778 Some(ret)
1779 }
1780 }
1781
1782 #[cfg(not(feature = "u64_digit"))]
1783 impl From<u64> for BigUint {
1784 #[inline]
from(mut n: u64) -> Self1785 fn from(mut n: u64) -> Self {
1786 let mut ret: BigUint = Zero::zero();
1787
1788 while n != 0 {
1789 ret.data.push(n as BigDigit);
1790 // don't overflow if BITS is 64:
1791 n = (n >> 1) >> (big_digit::BITS - 1);
1792 }
1793
1794 ret
1795 }
1796 }
1797
1798 #[cfg(feature = "u64_digit")]
1799 impl From<u64> for BigUint {
1800 #[inline]
from(n: u64) -> Self1801 fn from(n: u64) -> Self {
1802 BigUint::new_native(smallvec![n])
1803 }
1804 }
1805
1806 #[cfg(has_i128)]
1807 impl From<u128> for BigUint {
1808 #[inline]
from(mut n: u128) -> Self1809 fn from(mut n: u128) -> Self {
1810 let mut ret: BigUint = Zero::zero();
1811
1812 while n != 0 {
1813 ret.data.push(n as BigDigit);
1814 n >>= big_digit::BITS;
1815 }
1816
1817 ret
1818 }
1819 }
1820
1821 macro_rules! impl_biguint_from_uint {
1822 ($T:ty) => {
1823 impl From<$T> for BigUint {
1824 #[inline]
1825 fn from(n: $T) -> Self {
1826 BigUint::from(n as u64)
1827 }
1828 }
1829 };
1830 }
1831
1832 impl_biguint_from_uint!(u8);
1833 impl_biguint_from_uint!(u16);
1834 impl_biguint_from_uint!(u32);
1835 impl_biguint_from_uint!(usize);
1836
1837 /// A generic trait for converting a value to a `BigUint`.
1838 pub trait ToBigUint {
1839 /// Converts the value of `self` to a `BigUint`.
to_biguint(&self) -> Option<BigUint>1840 fn to_biguint(&self) -> Option<BigUint>;
1841 }
1842
1843 impl ToBigUint for BigUint {
1844 #[inline]
to_biguint(&self) -> Option<BigUint>1845 fn to_biguint(&self) -> Option<BigUint> {
1846 Some(self.clone())
1847 }
1848 }
1849
1850 /// A generic trait for converting a value to a `BigUint`, and consuming the value.
1851 pub trait IntoBigUint {
1852 /// Converts the value of `self` to a `BigUint`.
into_biguint(self) -> Option<BigUint>1853 fn into_biguint(self) -> Option<BigUint>;
1854 }
1855
1856 impl IntoBigUint for BigUint {
1857 #[inline]
into_biguint(self) -> Option<BigUint>1858 fn into_biguint(self) -> Option<BigUint> {
1859 Some(self)
1860 }
1861 }
1862
1863 macro_rules! impl_to_biguint {
1864 ($T:ty, $from_ty:path) => {
1865 impl ToBigUint for $T {
1866 #[inline]
1867 fn to_biguint(&self) -> Option<BigUint> {
1868 $from_ty(*self)
1869 }
1870 }
1871
1872 impl IntoBigUint for $T {
1873 #[inline]
1874 fn into_biguint(self) -> Option<BigUint> {
1875 $from_ty(self)
1876 }
1877 }
1878 };
1879 }
1880
1881 impl_to_biguint!(isize, FromPrimitive::from_isize);
1882 impl_to_biguint!(i8, FromPrimitive::from_i8);
1883 impl_to_biguint!(i16, FromPrimitive::from_i16);
1884 impl_to_biguint!(i32, FromPrimitive::from_i32);
1885 impl_to_biguint!(i64, FromPrimitive::from_i64);
1886 #[cfg(has_i128)]
1887 impl_to_biguint!(i128, FromPrimitive::from_i128);
1888
1889 impl_to_biguint!(usize, FromPrimitive::from_usize);
1890 impl_to_biguint!(u8, FromPrimitive::from_u8);
1891 impl_to_biguint!(u16, FromPrimitive::from_u16);
1892 impl_to_biguint!(u32, FromPrimitive::from_u32);
1893 impl_to_biguint!(u64, FromPrimitive::from_u64);
1894 #[cfg(has_i128)]
1895 impl_to_biguint!(u128, FromPrimitive::from_u128);
1896
1897 impl_to_biguint!(f32, FromPrimitive::from_f32);
1898 impl_to_biguint!(f64, FromPrimitive::from_f64);
1899
1900 // Extract bitwise digits that evenly divide BigDigit
to_bitwise_digits_le(u: &BigUint, bits: usize) -> Vec<u8>1901 fn to_bitwise_digits_le(u: &BigUint, bits: usize) -> Vec<u8> {
1902 debug_assert!(!u.is_zero() && bits <= 8 && big_digit::BITS % bits == 0);
1903
1904 let last_i = u.data.len() - 1;
1905 let mask: BigDigit = (1 << bits) - 1;
1906 let digits_per_big_digit = big_digit::BITS / bits;
1907 let digits = (u.bits() + bits - 1) / bits;
1908 let mut res = Vec::with_capacity(digits);
1909
1910 for mut r in u.data[..last_i].iter().cloned() {
1911 for _ in 0..digits_per_big_digit {
1912 res.push((r & mask) as u8);
1913 r >>= bits;
1914 }
1915 }
1916
1917 let mut r = u.data[last_i];
1918 while r != 0 {
1919 res.push((r & mask) as u8);
1920 r >>= bits;
1921 }
1922
1923 res
1924 }
1925
1926 // Extract bitwise digits that don't evenly divide BigDigit
to_inexact_bitwise_digits_le(u: &BigUint, bits: usize) -> Vec<u8>1927 fn to_inexact_bitwise_digits_le(u: &BigUint, bits: usize) -> Vec<u8> {
1928 debug_assert!(!u.is_zero() && bits <= 8 && big_digit::BITS % bits != 0);
1929
1930 let mask: BigDigit = (1 << bits) - 1;
1931 let digits = (u.bits() + bits - 1) / bits;
1932 let mut res = Vec::with_capacity(digits);
1933
1934 let mut r = 0;
1935 let mut rbits = 0;
1936
1937 for c in &u.data {
1938 r |= *c << rbits;
1939 rbits += big_digit::BITS;
1940
1941 while rbits >= bits {
1942 res.push((r & mask) as u8);
1943 r >>= bits;
1944
1945 // r had more bits than it could fit - grab the bits we lost
1946 if rbits > big_digit::BITS {
1947 r = *c >> (big_digit::BITS - (rbits - bits));
1948 }
1949
1950 rbits -= bits;
1951 }
1952 }
1953
1954 if rbits != 0 {
1955 res.push(r as u8);
1956 }
1957
1958 while let Some(&0) = res.last() {
1959 res.pop();
1960 }
1961
1962 res
1963 }
1964
1965 // Extract little-endian radix digits
1966 #[inline(always)] // forced inline to get const-prop for radix=10
to_radix_digits_le(u: &BigUint, radix: u32) -> Vec<u8>1967 fn to_radix_digits_le(u: &BigUint, radix: u32) -> Vec<u8> {
1968 debug_assert!(!u.is_zero() && !radix.is_power_of_two());
1969
1970 // Estimate how big the result will be, so we can pre-allocate it.
1971 let bits = ilog2(radix);
1972 let radix_digits = idiv_ceil(u.bits(), bits);
1973 let mut res = Vec::with_capacity(radix_digits as usize);
1974 let mut digits = u.clone();
1975
1976 let (base, power) = get_radix_base(radix);
1977 let radix = radix as BigDigit;
1978
1979 while digits.data.len() > 1 {
1980 let (q, mut r) = div_rem_digit(digits, base);
1981 for _ in 0..power {
1982 res.push((r % radix) as u8);
1983 r /= radix;
1984 }
1985 digits = q;
1986 }
1987
1988 let mut r = digits.data[0];
1989 while r != 0 {
1990 res.push((r % radix) as u8);
1991 r /= radix;
1992 }
1993
1994 res
1995 }
1996
to_radix_le(u: &BigUint, radix: u32) -> Vec<u8>1997 pub fn to_radix_le(u: &BigUint, radix: u32) -> Vec<u8> {
1998 if u.is_zero() {
1999 vec![0]
2000 } else if radix.is_power_of_two() {
2001 // Powers of two can use bitwise masks and shifting instead of division
2002 let bits = ilog2(radix);
2003 if big_digit::BITS % bits == 0 {
2004 to_bitwise_digits_le(u, bits)
2005 } else {
2006 to_inexact_bitwise_digits_le(u, bits)
2007 }
2008 } else if radix == 10 {
2009 // 10 is so common that it's worth separating out for const-propagation.
2010 // Optimizers can often turn constant division into a faster multiplication.
2011 to_radix_digits_le(u, 10)
2012 } else {
2013 to_radix_digits_le(u, radix)
2014 }
2015 }
2016
to_str_radix_reversed(u: &BigUint, radix: u32) -> Vec<u8>2017 pub fn to_str_radix_reversed(u: &BigUint, radix: u32) -> Vec<u8> {
2018 assert!(2 <= radix && radix <= 36, "The radix must be within 2...36");
2019
2020 if u.is_zero() {
2021 return vec![b'0'];
2022 }
2023
2024 let mut res = to_radix_le(u, radix);
2025
2026 // Now convert everything to ASCII digits.
2027 for r in &mut res {
2028 debug_assert!((*r as u32) < radix);
2029 if *r < 10 {
2030 *r += b'0';
2031 } else {
2032 *r += b'a' - 10;
2033 }
2034 }
2035 res
2036 }
2037
2038 #[cfg(not(feature = "u64_digit"))]
2039 #[inline]
ensure_big_digit(raw: Vec<u32>) -> SmallVec<[BigDigit; VEC_SIZE]>2040 fn ensure_big_digit(raw: Vec<u32>) -> SmallVec<[BigDigit; VEC_SIZE]> {
2041 raw.into()
2042 }
2043
2044 #[cfg(feature = "u64_digit")]
2045 #[inline]
ensure_big_digit(raw: Vec<u32>) -> SmallVec<[BigDigit; VEC_SIZE]>2046 fn ensure_big_digit(raw: Vec<u32>) -> SmallVec<[BigDigit; VEC_SIZE]> {
2047 ensure_big_digit_slice(&raw)
2048 }
2049
2050 #[cfg(feature = "u64_digit")]
2051 #[inline]
ensure_big_digit_slice(raw: &[u32]) -> SmallVec<[BigDigit; VEC_SIZE]>2052 fn ensure_big_digit_slice(raw: &[u32]) -> SmallVec<[BigDigit; VEC_SIZE]> {
2053 raw.chunks(2)
2054 .map(|chunk| {
2055 // raw could have odd length
2056 if chunk.len() < 2 {
2057 chunk[0] as BigDigit
2058 } else {
2059 BigDigit::from(chunk[0]) | (BigDigit::from(chunk[1]) << 32)
2060 }
2061 })
2062 .collect()
2063 }
2064
2065 impl BigUint {
2066 /// Creates and initializes a `BigUint`.
2067 ///
2068 /// The digits are in little-endian base 2<sup>32</sup>.
2069 #[inline]
new(digits: Vec<u32>) -> BigUint2070 pub fn new(digits: Vec<u32>) -> BigUint {
2071 Self::new_native(ensure_big_digit(digits))
2072 }
2073
2074 /// Creates and initializes a `BigUint`.
2075 ///
2076 /// The digits are in little-endian base matching `BigDigit`.
2077 #[inline]
new_native(digits: SmallVec<[BigDigit; VEC_SIZE]>) -> BigUint2078 pub fn new_native(digits: SmallVec<[BigDigit; VEC_SIZE]>) -> BigUint {
2079 BigUint { data: digits }.normalized()
2080 }
2081
2082 /// Creates and initializes a `BigUint`.
2083 ///
2084 /// The digits are in little-endian base 2<sup>32</sup>.
2085 #[inline]
from_slice(slice: &[u32]) -> BigUint2086 pub fn from_slice(slice: &[u32]) -> BigUint {
2087 BigUint::new(slice.to_vec())
2088 }
2089
2090 /// Creates and initializes a `BigUint`.
2091 ///
2092 /// The digits are in little-endian base matching `BigDigit`
2093 #[inline]
from_slice_native(slice: &[BigDigit]) -> BigUint2094 pub fn from_slice_native(slice: &[BigDigit]) -> BigUint {
2095 BigUint::new_native(slice.into())
2096 }
2097
get_limb(&self, i: usize) -> BigDigit2098 pub fn get_limb(&self, i: usize) -> BigDigit {
2099 self.data[i]
2100 }
2101
2102 /// Assign a value to a `BigUint`.
2103 ///
2104 /// The digits are in little-endian base 2<sup>32</sup>.
2105 #[cfg(not(feature = "u64_digit"))]
2106 #[inline]
assign_from_slice(&mut self, slice: &[u32])2107 pub fn assign_from_slice(&mut self, slice: &[u32]) {
2108 self.assign_from_slice_native(slice);
2109 }
2110 #[cfg(feature = "u64_digit")]
2111 #[inline]
assign_from_slice(&mut self, slice: &[u32])2112 pub fn assign_from_slice(&mut self, slice: &[u32]) {
2113 let slice_digits = ensure_big_digit_slice(slice);
2114 self.assign_from_slice_native(&slice_digits);
2115 }
2116
2117 /// Assign a value to a `BigUint`.
2118 ///
2119 /// The digits are in little-endian with the base matching `BigDigit`.
2120 #[inline]
assign_from_slice_native(&mut self, slice: &[BigDigit])2121 pub fn assign_from_slice_native(&mut self, slice: &[BigDigit]) {
2122 self.data.resize(slice.len(), 0);
2123 self.data.clone_from_slice(slice);
2124 self.normalize();
2125 }
2126
2127 /// Creates and initializes a `BigUint`.
2128 ///
2129 /// The bytes are in big-endian byte order.
2130 ///
2131 /// # Examples
2132 ///
2133 /// ```
2134 /// use num_bigint_dig::BigUint;
2135 ///
2136 /// assert_eq!(BigUint::from_bytes_be(b"A"),
2137 /// BigUint::parse_bytes(b"65", 10).unwrap());
2138 /// assert_eq!(BigUint::from_bytes_be(b"AA"),
2139 /// BigUint::parse_bytes(b"16705", 10).unwrap());
2140 /// assert_eq!(BigUint::from_bytes_be(b"AB"),
2141 /// BigUint::parse_bytes(b"16706", 10).unwrap());
2142 /// assert_eq!(BigUint::from_bytes_be(b"Hello world!"),
2143 /// BigUint::parse_bytes(b"22405534230753963835153736737", 10).unwrap());
2144 /// ```
2145 #[inline]
from_bytes_be(bytes: &[u8]) -> BigUint2146 pub fn from_bytes_be(bytes: &[u8]) -> BigUint {
2147 if bytes.is_empty() {
2148 Zero::zero()
2149 } else {
2150 let mut v = bytes.to_vec();
2151 v.reverse();
2152 BigUint::from_bytes_le(&*v)
2153 }
2154 }
2155
2156 /// Creates and initializes a `BigUint`.
2157 ///
2158 /// The bytes are in little-endian byte order.
2159 #[inline]
from_bytes_le(bytes: &[u8]) -> BigUint2160 pub fn from_bytes_le(bytes: &[u8]) -> BigUint {
2161 if bytes.is_empty() {
2162 Zero::zero()
2163 } else {
2164 from_bitwise_digits_le(bytes, 8)
2165 }
2166 }
2167
2168 /// Creates and initializes a `BigUint`. The input slice must contain
2169 /// ascii/utf8 characters in [0-9a-zA-Z].
2170 /// `radix` must be in the range `2...36`.
2171 ///
2172 /// The function `from_str_radix` from the `Num` trait provides the same logic
2173 /// for `&str` buffers.
2174 ///
2175 /// # Examples
2176 ///
2177 /// ```
2178 /// use num_bigint_dig::{BigUint, ToBigUint};
2179 ///
2180 /// assert_eq!(BigUint::parse_bytes(b"1234", 10), ToBigUint::to_biguint(&1234));
2181 /// assert_eq!(BigUint::parse_bytes(b"ABCD", 16), ToBigUint::to_biguint(&0xABCD));
2182 /// assert_eq!(BigUint::parse_bytes(b"G", 16), None);
2183 /// ```
2184 #[inline]
parse_bytes(buf: &[u8], radix: u32) -> Option<BigUint>2185 pub fn parse_bytes(buf: &[u8], radix: u32) -> Option<BigUint> {
2186 str::from_utf8(buf)
2187 .ok()
2188 .and_then(|s| BigUint::from_str_radix(s, radix).ok())
2189 }
2190
2191 /// Creates and initializes a `BigUint`. Each u8 of the input slice is
2192 /// interpreted as one digit of the number
2193 /// and must therefore be less than `radix`.
2194 ///
2195 /// The bytes are in big-endian byte order.
2196 /// `radix` must be in the range `2...256`.
2197 ///
2198 /// # Examples
2199 ///
2200 /// ```
2201 /// use num_bigint_dig::{BigUint};
2202 ///
2203 /// let inbase190 = &[15, 33, 125, 12, 14];
2204 /// let a = BigUint::from_radix_be(inbase190, 190).unwrap();
2205 /// assert_eq!(a.to_radix_be(190), inbase190);
2206 /// ```
from_radix_be(buf: &[u8], radix: u32) -> Option<BigUint>2207 pub fn from_radix_be(buf: &[u8], radix: u32) -> Option<BigUint> {
2208 assert!(
2209 2 <= radix && radix <= 256,
2210 "The radix must be within 2...256"
2211 );
2212
2213 if radix != 256 && buf.iter().any(|&b| b >= radix as u8) {
2214 return None;
2215 }
2216
2217 let res = if radix.is_power_of_two() {
2218 // Powers of two can use bitwise masks and shifting instead of multiplication
2219 let bits = ilog2(radix);
2220 let mut v = Vec::from(buf);
2221 v.reverse();
2222 if big_digit::BITS % bits == 0 {
2223 from_bitwise_digits_le(&v, bits)
2224 } else {
2225 from_inexact_bitwise_digits_le(&v, bits)
2226 }
2227 } else {
2228 from_radix_digits_be(buf, radix)
2229 };
2230
2231 Some(res)
2232 }
2233
2234 /// Creates and initializes a `BigUint`. Each u8 of the input slice is
2235 /// interpreted as one digit of the number
2236 /// and must therefore be less than `radix`.
2237 ///
2238 /// The bytes are in little-endian byte order.
2239 /// `radix` must be in the range `2...256`.
2240 ///
2241 /// # Examples
2242 ///
2243 /// ```
2244 /// use num_bigint_dig::{BigUint};
2245 ///
2246 /// let inbase190 = &[14, 12, 125, 33, 15];
2247 /// let a = BigUint::from_radix_be(inbase190, 190).unwrap();
2248 /// assert_eq!(a.to_radix_be(190), inbase190);
2249 /// ```
from_radix_le(buf: &[u8], radix: u32) -> Option<BigUint>2250 pub fn from_radix_le(buf: &[u8], radix: u32) -> Option<BigUint> {
2251 assert!(
2252 2 <= radix && radix <= 256,
2253 "The radix must be within 2...256"
2254 );
2255
2256 if radix != 256 && buf.iter().any(|&b| b >= radix as u8) {
2257 return None;
2258 }
2259
2260 let res = if radix.is_power_of_two() {
2261 // Powers of two can use bitwise masks and shifting instead of multiplication
2262 let bits = ilog2(radix);
2263 if big_digit::BITS % bits == 0 {
2264 from_bitwise_digits_le(buf, bits)
2265 } else {
2266 from_inexact_bitwise_digits_le(buf, bits)
2267 }
2268 } else {
2269 let mut v = Vec::from(buf);
2270 v.reverse();
2271 from_radix_digits_be(&v, radix)
2272 };
2273
2274 Some(res)
2275 }
2276
2277 /// Returns the byte representation of the `BigUint` in big-endian byte order.
2278 ///
2279 /// # Examples
2280 ///
2281 /// ```
2282 /// use num_bigint_dig::BigUint;
2283 ///
2284 /// let i = BigUint::parse_bytes(b"1125", 10).unwrap();
2285 /// assert_eq!(i.to_bytes_be(), vec![4, 101]);
2286 /// ```
2287 #[inline]
to_bytes_be(&self) -> Vec<u8>2288 pub fn to_bytes_be(&self) -> Vec<u8> {
2289 let mut v = self.to_bytes_le();
2290 v.reverse();
2291 v
2292 }
2293
2294 /// Returns the byte representation of the `BigUint` in little-endian byte order.
2295 ///
2296 /// # Examples
2297 ///
2298 /// ```
2299 /// use num_bigint_dig::BigUint;
2300 ///
2301 /// let i = BigUint::parse_bytes(b"1125", 10).unwrap();
2302 /// assert_eq!(i.to_bytes_le(), vec![101, 4]);
2303 /// ```
2304 #[inline]
to_bytes_le(&self) -> Vec<u8>2305 pub fn to_bytes_le(&self) -> Vec<u8> {
2306 if self.is_zero() {
2307 vec![0]
2308 } else {
2309 to_bitwise_digits_le(self, 8)
2310 }
2311 }
2312
2313 /// Returns the integer formatted as a string in the given radix.
2314 /// `radix` must be in the range `2...36`.
2315 ///
2316 /// # Examples
2317 ///
2318 /// ```
2319 /// use num_bigint_dig::BigUint;
2320 ///
2321 /// let i = BigUint::parse_bytes(b"ff", 16).unwrap();
2322 /// assert_eq!(i.to_str_radix(16), "ff");
2323 /// ```
2324 #[inline]
to_str_radix(&self, radix: u32) -> String2325 pub fn to_str_radix(&self, radix: u32) -> String {
2326 let mut v = to_str_radix_reversed(self, radix);
2327 v.reverse();
2328 unsafe { String::from_utf8_unchecked(v) }
2329 }
2330
2331 /// Returns the integer in the requested base in big-endian digit order.
2332 /// The output is not given in a human readable alphabet but as a zero
2333 /// based u8 number.
2334 /// `radix` must be in the range `2...256`.
2335 ///
2336 /// # Examples
2337 ///
2338 /// ```
2339 /// use num_bigint_dig::BigUint;
2340 ///
2341 /// assert_eq!(BigUint::from(0xFFFFu64).to_radix_be(159),
2342 /// vec![2, 94, 27]);
2343 /// // 0xFFFF = 65535 = 2*(159^2) + 94*159 + 27
2344 /// ```
2345 #[inline]
to_radix_be(&self, radix: u32) -> Vec<u8>2346 pub fn to_radix_be(&self, radix: u32) -> Vec<u8> {
2347 let mut v = to_radix_le(self, radix);
2348 v.reverse();
2349 v
2350 }
2351
2352 /// Returns the integer in the requested base in little-endian digit order.
2353 /// The output is not given in a human readable alphabet but as a zero
2354 /// based u8 number.
2355 /// `radix` must be in the range `2...256`.
2356 ///
2357 /// # Examples
2358 ///
2359 /// ```
2360 /// use num_bigint_dig::BigUint;
2361 ///
2362 /// assert_eq!(BigUint::from(0xFFFFu64).to_radix_le(159),
2363 /// vec![27, 94, 2]);
2364 /// // 0xFFFF = 65535 = 27 + 94*159 + 2*(159^2)
2365 /// ```
2366 #[inline]
to_radix_le(&self, radix: u32) -> Vec<u8>2367 pub fn to_radix_le(&self, radix: u32) -> Vec<u8> {
2368 to_radix_le(self, radix)
2369 }
2370
2371 /// Determines the fewest bits necessary to express the `BigUint`.
2372 #[inline]
bits(&self) -> usize2373 pub fn bits(&self) -> usize {
2374 if self.is_zero() {
2375 return 0;
2376 }
2377 let zeros = self.data.last().unwrap().leading_zeros();
2378 self.data.len() * big_digit::BITS - zeros as usize
2379 }
2380
2381 /// Strips off trailing zero bigdigits - comparisons require the last element in the vector to
2382 /// be nonzero.
2383 #[inline]
normalize(&mut self)2384 pub(crate) fn normalize(&mut self) {
2385 while let Some(&0) = self.data.last() {
2386 self.data.pop();
2387 }
2388 }
2389
2390 /// Returns a normalized `BigUint`.
2391 #[inline]
normalized(mut self) -> BigUint2392 pub(crate) fn normalized(mut self) -> BigUint {
2393 self.normalize();
2394 self
2395 }
2396
2397 /// Returns `(self ^ exponent) % modulus`.
2398 ///
2399 /// Panics if the modulus is zero.
modpow(&self, exponent: &Self, modulus: &Self) -> Self2400 pub fn modpow(&self, exponent: &Self, modulus: &Self) -> Self {
2401 assert!(!modulus.is_zero(), "divide by zero!");
2402
2403 // For an odd modulus, we can use Montgomery multiplication in base 2^32.
2404 if modulus.is_odd() {
2405 return monty_modpow(self, exponent, modulus);
2406 }
2407
2408 // Otherwise do basically the same as `num::pow`, but with a modulus.
2409 let one = BigUint::one();
2410 if exponent.is_zero() {
2411 return one;
2412 }
2413
2414 let mut base = self % modulus;
2415 let mut exp = exponent.clone();
2416 while exp.is_even() {
2417 base = &base * &base % modulus;
2418 exp >>= 1;
2419 }
2420 if exp == one {
2421 return base;
2422 }
2423
2424 let mut acc = base.clone();
2425 while exp > one {
2426 exp >>= 1;
2427 base = &base * &base % modulus;
2428 if exp.is_odd() {
2429 acc = acc * &base % modulus;
2430 }
2431 }
2432 acc
2433 }
2434
2435 /// Returns the truncated principal square root of `self` --
2436 /// see [Roots::sqrt](https://docs.rs/num-integer/0.1/num_integer/trait.Roots.html#method.sqrt)
sqrt(&self) -> Self2437 pub fn sqrt(&self) -> Self {
2438 Roots::sqrt(self)
2439 }
2440
2441 /// Returns the truncated principal cube root of `self` --
2442 /// see [Roots::cbrt](https://docs.rs/num-integer/0.1/num_integer/trait.Roots.html#method.cbrt).
cbrt(&self) -> Self2443 pub fn cbrt(&self) -> Self {
2444 Roots::cbrt(self)
2445 }
2446
2447 /// Returns the truncated principal `n`th root of `self` --
2448 /// see [Roots::nth_root](https://docs.rs/num-integer/0.1/num_integer/trait.Roots.html#tymethod.nth_root).
nth_root(&self, n: u32) -> Self2449 pub fn nth_root(&self, n: u32) -> Self {
2450 Roots::nth_root(self, n)
2451 }
2452
trailing_zeros(&self) -> Option<usize>2453 pub fn trailing_zeros(&self) -> Option<usize> {
2454 trailing_zeros(self)
2455 }
2456
2457 /// Sets the value to the provided digit, reusing internal storage.
set_digit(&mut self, digit: BigDigit)2458 pub fn set_digit(&mut self, digit: BigDigit) {
2459 if self.is_zero() {
2460 self.data.resize(1, digit);
2461 } else {
2462 self.data.resize(1, 0);
2463 self.data[0] = digit;
2464 }
2465 }
2466 }
2467
2468 /// Returns the number of least-significant bits that are zero,
2469 /// or `None` if the entire number is zero.
trailing_zeros(u: &BigUint) -> Option<usize>2470 pub fn trailing_zeros(u: &BigUint) -> Option<usize> {
2471 u.data
2472 .iter()
2473 .enumerate()
2474 .find(|&(_, &digit)| digit != 0)
2475 .map(|(i, digit)| i * big_digit::BITS + digit.trailing_zeros() as usize)
2476 }
2477
2478 impl_sum_iter_type!(BigUint);
2479 impl_product_iter_type!(BigUint);
2480
2481 pub trait IntDigits {
digits(&self) -> &[BigDigit]2482 fn digits(&self) -> &[BigDigit];
digits_mut(&mut self) -> &mut SmallVec<[BigDigit; VEC_SIZE]>2483 fn digits_mut(&mut self) -> &mut SmallVec<[BigDigit; VEC_SIZE]>;
normalize(&mut self)2484 fn normalize(&mut self);
capacity(&self) -> usize2485 fn capacity(&self) -> usize;
len(&self) -> usize2486 fn len(&self) -> usize;
2487 }
2488
2489 impl IntDigits for BigUint {
2490 #[inline]
digits(&self) -> &[BigDigit]2491 fn digits(&self) -> &[BigDigit] {
2492 &self.data
2493 }
2494 #[inline]
digits_mut(&mut self) -> &mut SmallVec<[BigDigit; VEC_SIZE]>2495 fn digits_mut(&mut self) -> &mut SmallVec<[BigDigit; VEC_SIZE]> {
2496 &mut self.data
2497 }
2498 #[inline]
normalize(&mut self)2499 fn normalize(&mut self) {
2500 self.normalize();
2501 }
2502 #[inline]
capacity(&self) -> usize2503 fn capacity(&self) -> usize {
2504 self.data.capacity()
2505 }
2506 #[inline]
len(&self) -> usize2507 fn len(&self) -> usize {
2508 self.data.len()
2509 }
2510 }
2511
2512 /// Combine four `u32`s into a single `u128`.
2513 #[cfg(has_i128)]
2514 #[inline]
2515 #[allow(dead_code)]
u32_to_u128(a: u32, b: u32, c: u32, d: u32) -> u1282516 fn u32_to_u128(a: u32, b: u32, c: u32, d: u32) -> u128 {
2517 u128::from(d) | (u128::from(c) << 32) | (u128::from(b) << 64) | (u128::from(a) << 96)
2518 }
2519
2520 /// Split a single `u128` into four `u32`.
2521 #[cfg(has_i128)]
2522 #[inline]
2523 #[allow(dead_code)]
u32_from_u128(n: u128) -> (u32, u32, u32, u32)2524 fn u32_from_u128(n: u128) -> (u32, u32, u32, u32) {
2525 (
2526 (n >> 96) as u32,
2527 (n >> 64) as u32,
2528 (n >> 32) as u32,
2529 n as u32,
2530 )
2531 }
2532
2533 #[cfg(feature = "serde")]
2534 #[cfg(not(feature = "u64_digit"))]
2535 impl serde::Serialize for BigUint {
serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: serde::Serializer,2536 fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
2537 where
2538 S: serde::Serializer,
2539 {
2540 // Note: do not change the serialization format, or it may break forward
2541 // and backward compatibility of serialized data! If we ever change the
2542 // internal representation, we should still serialize in base-`u32`.
2543 let data: &[u32] = &self.data.as_slice();
2544 data.serialize(serializer)
2545 }
2546 }
2547
2548 #[cfg(feature = "serde")]
2549 #[cfg(feature = "u64_digit")]
2550 impl serde::Serialize for BigUint {
serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: serde::Serializer,2551 fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
2552 where
2553 S: serde::Serializer,
2554 {
2555 let last = if self.data.is_empty() {
2556 0
2557 } else {
2558 self.data.len() - 1
2559 };
2560 let data: Vec<u32> = self
2561 .data
2562 .iter()
2563 .enumerate()
2564 .flat_map(|(i, n)| {
2565 if i == last && n < &(u32::MAX as u64) {
2566 vec![*n as u32]
2567 } else {
2568 vec![*n as u32, (n >> 32) as u32]
2569 }
2570 })
2571 .collect();
2572 data.serialize(serializer)
2573 }
2574 }
2575
2576 #[cfg(feature = "serde")]
2577 impl<'de> serde::Deserialize<'de> for BigUint {
deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: serde::Deserializer<'de>,2578 fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
2579 where
2580 D: serde::Deserializer<'de>,
2581 {
2582 let data: Vec<u32> = Vec::deserialize(deserializer)?;
2583 Ok(BigUint::new(data))
2584 }
2585 }
2586
2587 /// Returns the greatest power of the radix <= big_digit::BASE
2588 #[inline]
get_radix_base(radix: u32) -> (BigDigit, usize)2589 fn get_radix_base(radix: u32) -> (BigDigit, usize) {
2590 debug_assert!(
2591 2 <= radix && radix <= 256,
2592 "The radix must be within 2...256"
2593 );
2594 debug_assert!(!radix.is_power_of_two());
2595
2596 // To generate this table:
2597 // for radix in 2u64..257 {
2598 // let mut power = big_digit::BITS / fls(radix as u64);
2599 // let mut base = radix.pow(power as u32);
2600 //
2601 // while let Some(b) = base.checked_mul(radix) {
2602 // if b > big_digit::MAX {
2603 // break;
2604 // }
2605 // base = b;
2606 // power += 1;
2607 // }
2608 //
2609 // println!("({:10}, {:2}), // {:2}", base, power, radix);
2610 // }
2611 // and
2612 // for radix in 2u64..257 {
2613 // let mut power = 64 / fls(radix as u64);
2614 // let mut base = radix.pow(power as u32);
2615 //
2616 // while let Some(b) = base.checked_mul(radix) {
2617 // base = b;
2618 // power += 1;
2619 // }
2620 //
2621 // println!("({:20}, {:2}), // {:2}", base, power, radix);
2622 // }
2623 match big_digit::BITS {
2624 32 => {
2625 const BASES: [(u32, usize); 257] = [
2626 (0, 0),
2627 (0, 0),
2628 (0, 0), // 2
2629 (3_486_784_401, 20), // 3
2630 (0, 0), // 4
2631 (1_220_703_125, 13), // 5
2632 (2_176_782_336, 12), // 6
2633 (1_977_326_743, 11), // 7
2634 (0, 0), // 8
2635 (3_486_784_401, 10), // 9
2636 (1_000_000_000, 9), // 10
2637 (2_357_947_691, 9), // 11
2638 (429_981_696, 8), // 12
2639 (815_730_721, 8), // 13
2640 (1_475_789_056, 8), // 14
2641 (2_562_890_625, 8), // 15
2642 (0, 0), // 16
2643 (410_338_673, 7), // 17
2644 (612_220_032, 7), // 18
2645 (893_871_739, 7), // 19
2646 (1_280_000_000, 7), // 20
2647 (1_801_088_541, 7), // 21
2648 (2_494_357_888, 7), // 22
2649 (3_404_825_447, 7), // 23
2650 (191_102_976, 6), // 24
2651 (244_140_625, 6), // 25
2652 (308_915_776, 6), // 26
2653 (387_420_489, 6), // 27
2654 (481_890_304, 6), // 28
2655 (594_823_321, 6), // 29
2656 (729_000_000, 6), // 30
2657 (887_503_681, 6), // 31
2658 (0, 0), // 32
2659 (1_291_467_969, 6), // 33
2660 (1_544_804_416, 6), // 34
2661 (1_838_265_625, 6), // 35
2662 (2_176_782_336, 6), // 36
2663 (2_565_726_409, 6), // 37
2664 (3_010_936_384, 6), // 38
2665 (3_518_743_761, 6), // 39
2666 (4_096_000_000, 6), // 40
2667 (115_856_201, 5), // 41
2668 (130_691_232, 5), // 42
2669 (147_008_443, 5), // 43
2670 (164_916_224, 5), // 44
2671 (184_528_125, 5), // 45
2672 (205_962_976, 5), // 46
2673 (229_345_007, 5), // 47
2674 (254_803_968, 5), // 48
2675 (282_475_249, 5), // 49
2676 (312_500_000, 5), // 50
2677 (345_025_251, 5), // 51
2678 (380_204_032, 5), // 52
2679 (418_195_493, 5), // 53
2680 (459_165_024, 5), // 54
2681 (503_284_375, 5), // 55
2682 (550_731_776, 5), // 56
2683 (601_692_057, 5), // 57
2684 (656_356_768, 5), // 58
2685 (714_924_299, 5), // 59
2686 (777_600_000, 5), // 60
2687 (844_596_301, 5), // 61
2688 (916_132_832, 5), // 62
2689 (992_436_543, 5), // 63
2690 (0, 0), // 64
2691 (1_160_290_625, 5), // 65
2692 (1_252_332_576, 5), // 66
2693 (1_350_125_107, 5), // 67
2694 (1_453_933_568, 5), // 68
2695 (1_564_031_349, 5), // 69
2696 (1_680_700_000, 5), // 70
2697 (1_804_229_351, 5), // 71
2698 (1_934_917_632, 5), // 72
2699 (2_073_071_593, 5), // 73
2700 (2_219_006_624, 5), // 74
2701 (2_373_046_875, 5), // 75
2702 (2_535_525_376, 5), // 76
2703 (2_706_784_157, 5), // 77
2704 (2_887_174_368, 5), // 78
2705 (3_077_056_399, 5), // 79
2706 (3_276_800_000, 5), // 80
2707 (3_486_784_401, 5), // 81
2708 (3_707_398_432, 5), // 82
2709 (3_939_040_643, 5), // 83
2710 (4_182_119_424, 5), // 84
2711 (52_200_625, 4), // 85
2712 (54_700_816, 4), // 86
2713 (57_289_761, 4), // 87
2714 (59_969_536, 4), // 88
2715 (62_742_241, 4), // 89
2716 (65_610_000, 4), // 90
2717 (68_574_961, 4), // 91
2718 (71_639_296, 4), // 92
2719 (74_805_201, 4), // 93
2720 (78_074_896, 4), // 94
2721 (81_450_625, 4), // 95
2722 (84_934_656, 4), // 96
2723 (88_529_281, 4), // 97
2724 (92_236_816, 4), // 98
2725 (96_059_601, 4), // 99
2726 (100_000_000, 4), // 100
2727 (104_060_401, 4), // 101
2728 (108_243_216, 4), // 102
2729 (112_550_881, 4), // 103
2730 (116_985_856, 4), // 104
2731 (121_550_625, 4), // 105
2732 (126_247_696, 4), // 106
2733 (131_079_601, 4), // 107
2734 (136_048_896, 4), // 108
2735 (141_158_161, 4), // 109
2736 (146_410_000, 4), // 110
2737 (151_807_041, 4), // 111
2738 (157_351_936, 4), // 112
2739 (163_047_361, 4), // 113
2740 (168_896_016, 4), // 114
2741 (174_900_625, 4), // 115
2742 (181_063_936, 4), // 116
2743 (187_388_721, 4), // 117
2744 (193_877_776, 4), // 118
2745 (200_533_921, 4), // 119
2746 (207_360_000, 4), // 120
2747 (214_358_881, 4), // 121
2748 (221_533_456, 4), // 122
2749 (228_886_641, 4), // 123
2750 (236_421_376, 4), // 124
2751 (244_140_625, 4), // 125
2752 (252_047_376, 4), // 126
2753 (260_144_641, 4), // 127
2754 (0, 0), // 128
2755 (276_922_881, 4), // 129
2756 (285_610_000, 4), // 130
2757 (294_499_921, 4), // 131
2758 (303_595_776, 4), // 132
2759 (312_900_721, 4), // 133
2760 (322_417_936, 4), // 134
2761 (332_150_625, 4), // 135
2762 (342_102_016, 4), // 136
2763 (352_275_361, 4), // 137
2764 (362_673_936, 4), // 138
2765 (373_301_041, 4), // 139
2766 (384_160_000, 4), // 140
2767 (395_254_161, 4), // 141
2768 (406_586_896, 4), // 142
2769 (418_161_601, 4), // 143
2770 (429_981_696, 4), // 144
2771 (442_050_625, 4), // 145
2772 (454_371_856, 4), // 146
2773 (466_948_881, 4), // 147
2774 (479_785_216, 4), // 148
2775 (492_884_401, 4), // 149
2776 (506_250_000, 4), // 150
2777 (519_885_601, 4), // 151
2778 (533_794_816, 4), // 152
2779 (547_981_281, 4), // 153
2780 (562_448_656, 4), // 154
2781 (577_200_625, 4), // 155
2782 (592_240_896, 4), // 156
2783 (607_573_201, 4), // 157
2784 (623_201_296, 4), // 158
2785 (639_128_961, 4), // 159
2786 (655_360_000, 4), // 160
2787 (671_898_241, 4), // 161
2788 (688_747_536, 4), // 162
2789 (705_911_761, 4), // 163
2790 (723_394_816, 4), // 164
2791 (741_200_625, 4), // 165
2792 (759_333_136, 4), // 166
2793 (777_796_321, 4), // 167
2794 (796_594_176, 4), // 168
2795 (815_730_721, 4), // 169
2796 (835_210_000, 4), // 170
2797 (855_036_081, 4), // 171
2798 (875_213_056, 4), // 172
2799 (895_745_041, 4), // 173
2800 (916_636_176, 4), // 174
2801 (937_890_625, 4), // 175
2802 (959_512_576, 4), // 176
2803 (981_506_241, 4), // 177
2804 (1_003_875_856, 4), // 178
2805 (1_026_625_681, 4), // 179
2806 (1_049_760_000, 4), // 180
2807 (1_073_283_121, 4), // 181
2808 (1_097_199_376, 4), // 182
2809 (1_121_513_121, 4), // 183
2810 (1_146_228_736, 4), // 184
2811 (1_171_350_625, 4), // 185
2812 (1_196_883_216, 4), // 186
2813 (1_222_830_961, 4), // 187
2814 (1_249_198_336, 4), // 188
2815 (1_275_989_841, 4), // 189
2816 (1_303_210_000, 4), // 190
2817 (1_330_863_361, 4), // 191
2818 (1_358_954_496, 4), // 192
2819 (1_387_488_001, 4), // 193
2820 (1_416_468_496, 4), // 194
2821 (1_445_900_625, 4), // 195
2822 (1_475_789_056, 4), // 196
2823 (1_506_138_481, 4), // 197
2824 (1_536_953_616, 4), // 198
2825 (1_568_239_201, 4), // 199
2826 (1_600_000_000, 4), // 200
2827 (1_632_240_801, 4), // 201
2828 (1_664_966_416, 4), // 202
2829 (1_698_181_681, 4), // 203
2830 (1_731_891_456, 4), // 204
2831 (1_766_100_625, 4), // 205
2832 (1_800_814_096, 4), // 206
2833 (1_836_036_801, 4), // 207
2834 (1_871_773_696, 4), // 208
2835 (1_908_029_761, 4), // 209
2836 (1_944_810_000, 4), // 210
2837 (1_982_119_441, 4), // 211
2838 (2_019_963_136, 4), // 212
2839 (2_058_346_161, 4), // 213
2840 (2_097_273_616, 4), // 214
2841 (2_136_750_625, 4), // 215
2842 (2_176_782_336, 4), // 216
2843 (2_217_373_921, 4), // 217
2844 (2_258_530_576, 4), // 218
2845 (2_300_257_521, 4), // 219
2846 (2_342_560_000, 4), // 220
2847 (2_385_443_281, 4), // 221
2848 (2_428_912_656, 4), // 222
2849 (2_472_973_441, 4), // 223
2850 (2_517_630_976, 4), // 224
2851 (2_562_890_625, 4), // 225
2852 (2_608_757_776, 4), // 226
2853 (2_655_237_841, 4), // 227
2854 (2_702_336_256, 4), // 228
2855 (2_750_058_481, 4), // 229
2856 (2_798_410_000, 4), // 230
2857 (2_847_396_321, 4), // 231
2858 (2_897_022_976, 4), // 232
2859 (2_947_295_521, 4), // 233
2860 (2_998_219_536, 4), // 234
2861 (3_049_800_625, 4), // 235
2862 (3_102_044_416, 4), // 236
2863 (3_154_956_561, 4), // 237
2864 (3_208_542_736, 4), // 238
2865 (3_262_808_641, 4), // 239
2866 (3_317_760_000, 4), // 240
2867 (3_373_402_561, 4), // 241
2868 (3_429_742_096, 4), // 242
2869 (3_486_784_401, 4), // 243
2870 (3_544_535_296, 4), // 244
2871 (3_603_000_625, 4), // 245
2872 (3_662_186_256, 4), // 246
2873 (3_722_098_081, 4), // 247
2874 (3_782_742_016, 4), // 248
2875 (3_844_124_001, 4), // 249
2876 (3_906_250_000, 4), // 250
2877 (3_969_126_001, 4), // 251
2878 (4_032_758_016, 4), // 252
2879 (4_097_152_081, 4), // 253
2880 (4_162_314_256, 4), // 254
2881 (4_228_250_625, 4), // 255
2882 (0, 0), // 256
2883 ];
2884
2885 let (base, power) = BASES[radix as usize];
2886 (base as BigDigit, power)
2887 }
2888 64 => {
2889 const BASES: [(u64, usize); 257] = [
2890 (0, 0),
2891 (0, 0),
2892 (9_223_372_036_854_775_808, 63), // 2
2893 (12_157_665_459_056_928_801, 40), // 3
2894 (4_611_686_018_427_387_904, 31), // 4
2895 (7_450_580_596_923_828_125, 27), // 5
2896 (4_738_381_338_321_616_896, 24), // 6
2897 (3_909_821_048_582_988_049, 22), // 7
2898 (9_223_372_036_854_775_808, 21), // 8
2899 (12_157_665_459_056_928_801, 20), // 9
2900 (10_000_000_000_000_000_000, 19), // 10
2901 (5_559_917_313_492_231_481, 18), // 11
2902 (2_218_611_106_740_436_992, 17), // 12
2903 (8_650_415_919_381_337_933, 17), // 13
2904 (2_177_953_337_809_371_136, 16), // 14
2905 (6_568_408_355_712_890_625, 16), // 15
2906 (1_152_921_504_606_846_976, 15), // 16
2907 (2_862_423_051_509_815_793, 15), // 17
2908 (6_746_640_616_477_458_432, 15), // 18
2909 (15_181_127_029_874_798_299, 15), // 19
2910 (1_638_400_000_000_000_000, 14), // 20
2911 (3_243_919_932_521_508_681, 14), // 21
2912 (6_221_821_273_427_820_544, 14), // 22
2913 (11_592_836_324_538_749_809, 14), // 23
2914 (876_488_338_465_357_824, 13), // 24
2915 (1_490_116_119_384_765_625, 13), // 25
2916 (2_481_152_873_203_736_576, 13), // 26
2917 (4_052_555_153_018_976_267, 13), // 27
2918 (6_502_111_422_497_947_648, 13), // 28
2919 (10_260_628_712_958_602_189, 13), // 29
2920 (15_943_230_000_000_000_000, 13), // 30
2921 (787_662_783_788_549_761, 12), // 31
2922 (1_152_921_504_606_846_976, 12), // 32
2923 (1_667_889_514_952_984_961, 12), // 33
2924 (2_386_420_683_693_101_056, 12), // 34
2925 (3_379_220_508_056_640_625, 12), // 35
2926 (4_738_381_338_321_616_896, 12), // 36
2927 (6_582_952_005_840_035_281, 12), // 37
2928 (9_065_737_908_494_995_456, 12), // 38
2929 (12_381_557_655_576_425_121, 12), // 39
2930 (16_777_216_000_000_000_000, 12), // 40
2931 (550_329_031_716_248_441, 11), // 41
2932 (717_368_321_110_468_608, 11), // 42
2933 (929_293_739_471_222_707, 11), // 43
2934 (1_196_683_881_290_399_744, 11), // 44
2935 (1_532_278_301_220_703_125, 11), // 45
2936 (1_951_354_384_207_722_496, 11), // 46
2937 (2_472_159_215_084_012_303, 11), // 47
2938 (3_116_402_981_210_161_152, 11), // 48
2939 (3_909_821_048_582_988_049, 11), // 49
2940 (4_882_812_500_000_000_000, 11), // 50
2941 (6_071_163_615_208_263_051, 11), // 51
2942 (7_516_865_509_350_965_248, 11), // 52
2943 (9_269_035_929_372_191_597, 11), // 53
2944 (11_384_956_040_305_711_104, 11), // 54
2945 (13_931_233_916_552_734_375, 11), // 55
2946 (16_985_107_389_382_393_856, 11), // 56
2947 (362_033_331_456_891_249, 10), // 57
2948 (430_804_206_899_405_824, 10), // 58
2949 (511_116_753_300_641_401, 10), // 59
2950 (604_661_760_000_000_000, 10), // 60
2951 (713_342_911_662_882_601, 10), // 61
2952 (839_299_365_868_340_224, 10), // 62
2953 (984_930_291_881_790_849, 10), // 63
2954 (1_152_921_504_606_846_976, 10), // 64
2955 (1_346_274_334_462_890_625, 10), // 65
2956 (1_568_336_880_910_795_776, 10), // 66
2957 (1_822_837_804_551_761_449, 10), // 67
2958 (2_113_922_820_157_210_624, 10), // 68
2959 (2_446_194_060_654_759_801, 10), // 69
2960 (2_824_752_490_000_000_000, 10), // 70
2961 (3_255_243_551_009_881_201, 10), // 71
2962 (3_743_906_242_624_487_424, 10), // 72
2963 (4_297_625_829_703_557_649, 10), // 73
2964 (4_923_990_397_355_877_376, 10), // 74
2965 (5_631_351_470_947_265_625, 10), // 75
2966 (6_428_888_932_339_941_376, 10), // 76
2967 (7_326_680_472_586_200_649, 10), // 77
2968 (8_335_775_831_236_199_424, 10), // 78
2969 (9_468_276_082_626_847_201, 10), // 79
2970 (10_737_418_240_000_000_000, 10), // 80
2971 (12_157_665_459_056_928_801, 10), // 81
2972 (13_744_803_133_596_058_624, 10), // 82
2973 (15_516_041_187_205_853_449, 10), // 83
2974 (17_490_122_876_598_091_776, 10), // 84
2975 (231_616_946_283_203_125, 9), // 85
2976 (257_327_417_311_663_616, 9), // 86
2977 (285_544_154_243_029_527, 9), // 87
2978 (316_478_381_828_866_048, 9), // 88
2979 (350_356_403_707_485_209, 9), // 89
2980 (387_420_489_000_000_000, 9), // 90
2981 (427_929_800_129_788_411, 9), // 91
2982 (472_161_363_286_556_672, 9), // 92
2983 (520_411_082_988_487_293, 9), // 93
2984 (572_994_802_228_616_704, 9), // 94
2985 (630_249_409_724_609_375, 9), // 95
2986 (692_533_995_824_480_256, 9), // 96
2987 (760_231_058_654_565_217, 9), // 97
2988 (833_747_762_130_149_888, 9), // 98
2989 (913_517_247_483_640_899, 9), // 99
2990 (1_000_000_000_000_000_000, 9), // 100
2991 (1_093_685_272_684_360_901, 9), // 101
2992 (1_195_092_568_622_310_912, 9), // 102
2993 (1_304_773_183_829_244_583, 9), // 103
2994 (1_423_311_812_421_484_544, 9), // 104
2995 (1_551_328_215_978_515_625, 9), // 105
2996 (1_689_478_959_002_692_096, 9), // 106
2997 (1_838_459_212_420_154_507, 9), // 107
2998 (1_999_004_627_104_432_128, 9), // 108
2999 (2_171_893_279_442_309_389, 9), // 109
3000 (2_357_947_691_000_000_000, 9), // 110
3001 (2_558_036_924_386_500_591, 9), // 111
3002 (2_773_078_757_450_186_752, 9), // 112
3003 (3_004_041_937_984_268_273, 9), // 113
3004 (3_251_948_521_156_637_184, 9), // 114
3005 (3_517_876_291_919_921_875, 9), // 115
3006 (3_802_961_274_698_203_136, 9), // 116
3007 (4_108_400_332_687_853_397, 9), // 117
3008 (4_435_453_859_151_328_768, 9), // 118
3009 (4_785_448_563_124_474_679, 9), // 119
3010 (5_159_780_352_000_000_000, 9), // 120
3011 (5_559_917_313_492_231_481, 9), // 121
3012 (5_987_402_799_531_080_192, 9), // 122
3013 (6_443_858_614_676_334_363, 9), // 123
3014 (6_930_988_311_686_938_624, 9), // 124
3015 (7_450_580_596_923_828_125, 9), // 125
3016 (8_004_512_848_309_157_376, 9), // 126
3017 (8_594_754_748_609_397_887, 9), // 127
3018 (9_223_372_036_854_775_808, 9), // 128
3019 (9_892_530_380_752_880_769, 9), // 129
3020 (10_604_499_373_000_000_000, 9), // 130
3021 (11_361_656_654_439_817_571, 9), // 131
3022 (12_166_492_167_065_567_232, 9), // 132
3023 (13_021_612_539_908_538_853, 9), // 133
3024 (13_929_745_610_903_012_864, 9), // 134
3025 (14_893_745_087_865_234_375, 9), // 135
3026 (15_916_595_351_771_938_816, 9), // 136
3027 (17_001_416_405_572_203_977, 9), // 137
3028 (18_151_468_971_815_029_248, 9), // 138
3029 (139_353_667_211_683_681, 8), // 139
3030 (147_578_905_600_000_000, 8), // 140
3031 (156_225_851_787_813_921, 8), // 141
3032 (165_312_903_998_914_816, 8), // 142
3033 (174_859_124_550_883_201, 8), // 143
3034 (184_884_258_895_036_416, 8), // 144
3035 (195_408_755_062_890_625, 8), // 145
3036 (206_453_783_524_884_736, 8), // 146
3037 (218_041_257_467_152_161, 8), // 147
3038 (230_193_853_492_166_656, 8), // 148
3039 (242_935_032_749_128_801, 8), // 149
3040 (256_289_062_500_000_000, 8), // 150
3041 (270_281_038_127_131_201, 8), // 151
3042 (284_936_905_588_473_856, 8), // 152
3043 (300_283_484_326_400_961, 8), // 153
3044 (316_348_490_636_206_336, 8), // 154
3045 (333_160_561_500_390_625, 8), // 155
3046 (350_749_278_894_882_816, 8), // 156
3047 (369_145_194_573_386_401, 8), // 157
3048 (388_379_855_336_079_616, 8), // 158
3049 (408_485_828_788_939_521, 8), // 159
3050 (429_496_729_600_000_000, 8), // 160
3051 (451_447_246_258_894_081, 8), // 161
3052 (474_373_168_346_071_296, 8), // 162
3053 (498_311_414_318_121_121, 8), // 163
3054 (523_300_059_815_673_856, 8), // 164
3055 (549_378_366_500_390_625, 8), // 165
3056 (576_586_811_427_594_496, 8), // 166
3057 (604_967_116_961_135_041, 8), // 167
3058 (634_562_281_237_118_976, 8), // 168
3059 (665_416_609_183_179_841, 8), // 169
3060 (697_575_744_100_000_000, 8), // 170
3061 (731_086_699_811_838_561, 8), // 171
3062 (765_997_893_392_859_136, 8), // 172
3063 (802_359_178_476_091_681, 8), // 173
3064 (840_221_879_151_902_976, 8), // 174
3065 (879_638_824_462_890_625, 8), // 175
3066 (920_664_383_502_155_776, 8), // 176
3067 (963_354_501_121_950_081, 8), // 177
3068 (1_007_766_734_259_732_736, 8), // 178
3069 (1_053_960_288_888_713_761, 8), // 179
3070 (1_101_996_057_600_000_000, 8), // 180
3071 (1_151_936_657_823_500_641, 8), // 181
3072 (1_203_846_470_694_789_376, 8), // 182
3073 (1_257_791_680_575_160_641, 8), // 183
3074 (1_313_840_315_232_157_696, 8), // 184
3075 (1_372_062_286_687_890_625, 8), // 185
3076 (1_432_529_432_742_502_656, 8), // 186
3077 (1_495_315_559_180_183_521, 8), // 187
3078 (1_560_496_482_665_168_896, 8), // 188
3079 (1_628_150_074_335_205_281, 8), // 189
3080 (1_698_356_304_100_000_000, 8), // 190
3081 (1_771_197_285_652_216_321, 8), // 191
3082 (1_846_757_322_198_614_016, 8), // 192
3083 (1_925_122_952_918_976_001, 8), // 193
3084 (2_006_383_000_160_502_016, 8), // 194
3085 (2_090_628_617_375_390_625, 8), // 195
3086 (2_177_953_337_809_371_136, 8), // 196
3087 (2_268_453_123_948_987_361, 8), // 197
3088 (2_362_226_417_735_475_456, 8), // 198
3089 (2_459_374_191_553_118_401, 8), // 199
3090 (2_560_000_000_000_000_000, 8), // 200
3091 (2_664_210_032_449_121_601, 8), // 201
3092 (2_772_113_166_407_885_056, 8), // 202
3093 (2_883_821_021_683_985_761, 8), // 203
3094 (2_999_448_015_365_799_936, 8), // 204
3095 (3_119_111_417_625_390_625, 8), // 205
3096 (3_242_931_408_352_297_216, 8), // 206
3097 (3_371_031_134_626_313_601, 8), // 207
3098 (3_503_536_769_037_500_416, 8), // 208
3099 (3_640_577_568_861_717_121, 8), // 209
3100 (3_782_285_936_100_000_000, 8), // 210
3101 (3_928_797_478_390_152_481, 8), // 211
3102 (4_080_251_070_798_954_496, 8), // 212
3103 (4_236_788_918_503_437_921, 8), // 213
3104 (4_398_556_620_369_715_456, 8), // 214
3105 (4_565_703_233_437_890_625, 8), // 215
3106 (4_738_381_338_321_616_896, 8), // 216
3107 (4_916_747_105_530_914_241, 8), // 217
3108 (5_100_960_362_726_891_776, 8), // 218
3109 (5_291_184_662_917_065_441, 8), // 219
3110 (5_487_587_353_600_000_000, 8), // 220
3111 (5_690_339_646_868_044_961, 8), // 221
3112 (5_899_616_690_476_974_336, 8), // 222
3113 (6_115_597_639_891_380_481, 8), // 223
3114 (6_338_465_731_314_712_576, 8), // 224
3115 (6_568_408_355_712_890_625, 8), // 225
3116 (6_805_617_133_840_466_176, 8), // 226
3117 (7_050_287_992_278_341_281, 8), // 227
3118 (7_302_621_240_492_097_536, 8), // 228
3119 (7_562_821_648_920_027_361, 8), // 229
3120 (7_831_098_528_100_000_000, 8), // 230
3121 (8_107_665_808_844_335_041, 8), // 231
3122 (8_392_742_123_471_896_576, 8), // 232
3123 (8_686_550_888_106_661_441, 8), // 233
3124 (8_989_320_386_052_055_296, 8), // 234
3125 (9_301_283_852_250_390_625, 8), // 235
3126 (9_622_679_558_836_781_056, 8), // 236
3127 (9_953_750_901_796_946_721, 8), // 237
3128 (10_294_746_488_738_365_696, 8), // 238
3129 (10_645_920_227_784_266_881, 8), // 239
3130 (11_007_531_417_600_000_000, 8), // 240
3131 (11_379_844_838_561_358_721, 8), // 241
3132 (11_763_130_845_074_473_216, 8), // 242
3133 (12_157_665_459_056_928_801, 8), // 243
3134 (12_563_730_464_589_807_616, 8), // 244
3135 (12_981_613_503_750_390_625, 8), // 245
3136 (13_411_608_173_635_297_536, 8), // 246
3137 (13_854_014_124_583_882_561, 8), // 247
3138 (14_309_137_159_611_744_256, 8), // 248
3139 (14_777_289_335_064_248_001, 8), // 249
3140 (15_258_789_062_500_000_000, 8), // 250
3141 (15_753_961_211_814_252_001, 8), // 251
3142 (16_263_137_215_612_256_256, 8), // 252
3143 (16_786_655_174_842_630_561, 8), // 253
3144 (17_324_859_965_700_833_536, 8), // 254
3145 (17_878_103_347_812_890_625, 8), // 255
3146 (72_057_594_037_927_936, 7), // 256
3147 ];
3148
3149 let (base, power) = BASES[radix as usize];
3150 (base as BigDigit, power)
3151 }
3152 _ => panic!("Invalid bigdigit size"),
3153 }
3154 }
3155
3156 #[cfg(not(feature = "u64_digit"))]
3157 #[test]
test_from_slice()3158 fn test_from_slice() {
3159 fn check(slice: &[u32], data: &[BigDigit]) {
3160 assert_eq!(&BigUint::from_slice(slice).data[..], data);
3161 }
3162 check(&[1], &[1]);
3163 check(&[0, 0, 0], &[]);
3164 check(&[1, 2, 0, 0], &[1, 2]);
3165 check(&[0, 0, 1, 2], &[0, 0, 1, 2]);
3166 check(&[0, 0, 1, 2, 0, 0], &[0, 0, 1, 2]);
3167 check(&[-1i32 as u32], &[-1i32 as BigDigit]);
3168 }
3169
3170 #[cfg(feature = "u64_digit")]
3171 #[test]
test_from_slice()3172 fn test_from_slice() {
3173 fn check(slice: &[u32], data: &[BigDigit]) {
3174 assert_eq!(
3175 &BigUint::from_slice(slice).data[..],
3176 data,
3177 "from {:?}, to {:?}",
3178 slice,
3179 data
3180 );
3181 }
3182 check(&[1], &[1]);
3183 check(&[0, 0, 0], &[]);
3184 check(&[1, 2], &[8_589_934_593]);
3185 check(&[1, 2, 0, 0], &[8_589_934_593]);
3186 check(&[0, 0, 1, 2], &[0, 8_589_934_593]);
3187 check(&[0, 0, 1, 2, 0, 0], &[0, 8_589_934_593]);
3188 check(&[-1i32 as u32], &[(-1i32 as u32) as BigDigit]);
3189 }
3190
3191 #[test]
test_from_slice_native()3192 fn test_from_slice_native() {
3193 fn check(slice: &[BigDigit], data: &[BigDigit]) {
3194 assert!(&BigUint::from_slice_native(slice).data[..] == data);
3195 }
3196 check(&[1], &[1]);
3197 check(&[0, 0, 0], &[]);
3198 check(&[1, 2, 0, 0], &[1, 2]);
3199 check(&[0, 0, 1, 2], &[0, 0, 1, 2]);
3200 check(&[0, 0, 1, 2, 0, 0], &[0, 0, 1, 2]);
3201 check(&[-1i32 as BigDigit], &[-1i32 as BigDigit]);
3202 }
3203
3204 #[test]
test_assign_from_slice_native()3205 fn test_assign_from_slice_native() {
3206 fn check(slice: &[BigDigit], data: &[BigDigit]) {
3207 let mut p = BigUint::from_slice_native(&[2627, 0, 9182, 42]);
3208 p.assign_from_slice_native(slice);
3209 assert!(&p.data[..] == data);
3210 }
3211 check(&[1], &[1]);
3212 check(&[0, 0, 0], &[]);
3213 check(&[1, 2, 0, 0], &[1, 2]);
3214 check(&[0, 0, 1, 2], &[0, 0, 1, 2]);
3215 check(&[0, 0, 1, 2, 0, 0], &[0, 0, 1, 2]);
3216 check(&[-1i32 as BigDigit], &[-1i32 as BigDigit]);
3217 }
3218
3219 #[cfg(has_i128)]
3220 #[test]
test_u32_u128()3221 fn test_u32_u128() {
3222 assert_eq!(u32_from_u128(0u128), (0, 0, 0, 0));
3223 assert_eq!(
3224 u32_from_u128(u128::max_value()),
3225 (
3226 u32::max_value(),
3227 u32::max_value(),
3228 u32::max_value(),
3229 u32::max_value()
3230 )
3231 );
3232
3233 assert_eq!(
3234 u32_from_u128(u32::max_value() as u128),
3235 (0, 0, 0, u32::max_value())
3236 );
3237
3238 assert_eq!(
3239 u32_from_u128(u64::max_value() as u128),
3240 (0, 0, u32::max_value(), u32::max_value())
3241 );
3242
3243 assert_eq!(
3244 u32_from_u128((u64::max_value() as u128) + u32::max_value() as u128),
3245 (0, 1, 0, u32::max_value() - 1)
3246 );
3247
3248 assert_eq!(u32_from_u128(36_893_488_151_714_070_528), (0, 2, 1, 0));
3249 }
3250
3251 #[cfg(has_i128)]
3252 #[test]
test_u128_u32_roundtrip()3253 fn test_u128_u32_roundtrip() {
3254 // roundtrips
3255 let values = vec![
3256 0u128,
3257 1u128,
3258 u64::max_value() as u128 * 3,
3259 u32::max_value() as u128,
3260 u64::max_value() as u128,
3261 (u64::max_value() as u128) + u32::max_value() as u128,
3262 u128::max_value(),
3263 ];
3264
3265 for val in &values {
3266 let (a, b, c, d) = u32_from_u128(*val);
3267 assert_eq!(u32_to_u128(a, b, c, d), *val);
3268 }
3269 }
3270
3271 // Mod Inverse
3272
3273 impl<'a> ModInverse<&'a BigUint> for BigUint {
3274 type Output = BigInt;
mod_inverse(self, m: &'a BigUint) -> Option<BigInt>3275 fn mod_inverse(self, m: &'a BigUint) -> Option<BigInt> {
3276 mod_inverse(Cow::Owned(self), Cow::Borrowed(m))
3277 }
3278 }
3279
3280 impl ModInverse<BigUint> for BigUint {
3281 type Output = BigInt;
mod_inverse(self, m: BigUint) -> Option<BigInt>3282 fn mod_inverse(self, m: BigUint) -> Option<BigInt> {
3283 mod_inverse(Cow::Owned(self), Cow::Owned(m))
3284 }
3285 }
3286
3287 impl<'a> ModInverse<&'a BigInt> for BigUint {
3288 type Output = BigInt;
mod_inverse(self, m: &'a BigInt) -> Option<BigInt>3289 fn mod_inverse(self, m: &'a BigInt) -> Option<BigInt> {
3290 mod_inverse(Cow::Owned(self), Cow::Owned(m.to_biguint().unwrap()))
3291 }
3292 }
3293 impl ModInverse<BigInt> for BigUint {
3294 type Output = BigInt;
mod_inverse(self, m: BigInt) -> Option<BigInt>3295 fn mod_inverse(self, m: BigInt) -> Option<BigInt> {
3296 mod_inverse(Cow::Owned(self), Cow::Owned(m.into_biguint().unwrap()))
3297 }
3298 }
3299
3300 impl<'a, 'b> ModInverse<&'b BigUint> for &'a BigUint {
3301 type Output = BigInt;
3302
mod_inverse(self, m: &'b BigUint) -> Option<BigInt>3303 fn mod_inverse(self, m: &'b BigUint) -> Option<BigInt> {
3304 mod_inverse(Cow::Borrowed(self), Cow::Borrowed(m))
3305 }
3306 }
3307
3308 impl<'a> ModInverse<BigUint> for &'a BigUint {
3309 type Output = BigInt;
3310
mod_inverse(self, m: BigUint) -> Option<BigInt>3311 fn mod_inverse(self, m: BigUint) -> Option<BigInt> {
3312 mod_inverse(Cow::Borrowed(self), Cow::Owned(m))
3313 }
3314 }
3315
3316 impl<'a, 'b> ModInverse<&'b BigInt> for &'a BigUint {
3317 type Output = BigInt;
3318
mod_inverse(self, m: &'b BigInt) -> Option<BigInt>3319 fn mod_inverse(self, m: &'b BigInt) -> Option<BigInt> {
3320 mod_inverse(Cow::Borrowed(self), Cow::Owned(m.to_biguint().unwrap()))
3321 }
3322 }
3323
3324 // Extended GCD
3325
3326 impl<'a> ExtendedGcd<&'a BigUint> for BigUint {
extended_gcd(self, other: &'a BigUint) -> (BigInt, BigInt, BigInt)3327 fn extended_gcd(self, other: &'a BigUint) -> (BigInt, BigInt, BigInt) {
3328 let (a, b, c) = extended_gcd(Cow::Owned(self), Cow::Borrowed(other), true);
3329 (a, b.unwrap(), c.unwrap())
3330 }
3331 }
3332
3333 impl<'a> ExtendedGcd<&'a BigInt> for BigUint {
extended_gcd(self, other: &'a BigInt) -> (BigInt, BigInt, BigInt)3334 fn extended_gcd(self, other: &'a BigInt) -> (BigInt, BigInt, BigInt) {
3335 let (a, b, c) = extended_gcd(
3336 Cow::Owned(self),
3337 Cow::Owned(other.to_biguint().unwrap()),
3338 true,
3339 );
3340 (a, b.unwrap(), c.unwrap())
3341 }
3342 }
3343
3344 impl<'a, 'b> ExtendedGcd<&'b BigInt> for &'a BigUint {
extended_gcd(self, other: &'b BigInt) -> (BigInt, BigInt, BigInt)3345 fn extended_gcd(self, other: &'b BigInt) -> (BigInt, BigInt, BigInt) {
3346 let (a, b, c) = extended_gcd(
3347 Cow::Borrowed(self),
3348 Cow::Owned(other.to_biguint().unwrap()),
3349 true,
3350 );
3351 (a, b.unwrap(), c.unwrap())
3352 }
3353 }
3354
3355 impl<'a, 'b> ExtendedGcd<&'b BigUint> for &'a BigUint {
extended_gcd(self, other: &'b BigUint) -> (BigInt, BigInt, BigInt)3356 fn extended_gcd(self, other: &'b BigUint) -> (BigInt, BigInt, BigInt) {
3357 let (a, b, c) = extended_gcd(Cow::Borrowed(self), Cow::Borrowed(other), true);
3358 (a, b.unwrap(), c.unwrap())
3359 }
3360 }
3361
3362 #[test]
test_set_digit()3363 fn test_set_digit() {
3364 let mut a = BigUint::new(vec![3]);
3365 a.set_digit(4);
3366 assert_eq!(a.data.len(), 1);
3367 assert_eq!(a.data[0], 4);
3368
3369 let mut a = BigUint::new(vec![3, 2]);
3370 a.set_digit(4);
3371 assert_eq!(a.data.len(), 1);
3372 assert_eq!(a.data[0], 4);
3373
3374 let mut a = BigUint::new(vec![]);
3375 a.set_digit(4);
3376 assert_eq!(a.data.len(), 1);
3377 assert_eq!(a.data[0], 4);
3378 }
3379