1 //! Building-blocks for arbitrary-precision math.
2 //!
3 //! These algorithms assume little-endian order for the large integer
4 //! buffers, so for a `vec![0, 1, 2, 3]`, `3` is the most significant limb,
5 //! and `0` is the least significant limb.
6 
7 // We have a lot of higher-order, efficient math routines that may
8 // come in handy later. These aren't trivial to implement, so keep them.
9 #![allow(dead_code)]
10 
11 use util::*;
12 
13 // ALIASES
14 // -------
15 
16 //  Type for a single limb of the big integer.
17 //
18 //  A limb is analogous to a digit in base10, except, it stores 32-bit
19 //  or 64-bit numbers instead.
20 //
21 //  This should be all-known 64-bit platforms supported by Rust.
22 //      https://forge.rust-lang.org/platform-support.html
23 //
24 //  Platforms where native 128-bit multiplication is explicitly supported:
25 //      - x86_64 (Supported via `MUL`).
26 //      - mips64 (Supported via `DMULTU`, which `HI` and `LO` can be read-from).
27 //
28 //  Platforms where native 64-bit multiplication is supported and
29 //  you can extract hi-lo for 64-bit multiplications.
30 //      aarch64 (Requires `UMULH` and `MUL` to capture high and low bits).
31 //      powerpc64 (Requires `MULHDU` and `MULLD` to capture high and low bits).
32 //
33 //  Platforms where native 128-bit multiplication is not supported,
34 //  requiring software emulation.
35 //      sparc64 (`UMUL` only supported double-word arguments).
36 cfg_if! {
37 if #[cfg(limb_width_64)] {
38     pub type Limb = u64;
39     type Wide = u128;
40     type SignedWide = i128;
41 } else {
42     pub type Limb = u32;
43     type Wide = u64;
44     type SignedWide = i64;
45 }}   // cfg_if
46 
47 /// Cast to limb type.
48 #[inline]
as_limb<T: Integer>(t: T) -> Limb49 pub(super) fn as_limb<T: Integer>(t: T)
50     -> Limb
51 {
52     as_cast(t)
53 }
54 
55 /// Cast to wide type.
56 #[inline]
as_wide<T: Integer>(t: T) -> Wide57 fn as_wide<T: Integer>(t: T)
58     -> Wide
59 {
60     as_cast(t)
61 }
62 
63 /// Cast tosigned wide type.
64 #[inline]
as_signed_wide<T: Integer>(t: T) -> SignedWide65 fn as_signed_wide<T: Integer>(t: T)
66     -> SignedWide
67 {
68     as_cast(t)
69 }
70 
71 // SPLIT
72 // -----
73 
74 /// Split u16 into limbs, in little-endian order.
75 #[inline]
split_u16(x: u16) -> [Limb; 1]76 fn split_u16(x: u16) -> [Limb; 1] {
77     [as_limb(x)]
78 }
79 
80 /// Split u32 into limbs, in little-endian order.
81 #[inline]
split_u32(x: u32) -> [Limb; 1]82 fn split_u32(x: u32) -> [Limb; 1] {
83     [as_limb(x)]
84 }
85 
86 /// Split u64 into limbs, in little-endian order.
87 #[cfg(limb_width_32)]
88 #[inline]
split_u64(x: u64) -> [Limb; 2]89 fn split_u64(x: u64) -> [Limb; 2] {
90     [as_limb(x), as_limb(x >> 32)]
91 }
92 
93 /// Split u64 into limbs, in little-endian order.
94 #[cfg(limb_width_64)]
95 #[inline]
split_u64(x: u64) -> [Limb; 1]96 fn split_u64(x: u64) -> [Limb; 1] {
97     [as_limb(x)]
98 }
99 
100 /// Split u128 into limbs, in little-endian order.
101 #[cfg(all(has_i128, limb_width_32))]
102 #[inline]
split_u128(x: u128) -> [Limb; 4]103 fn split_u128(x: u128) -> [Limb; 4] {
104     [as_limb(x), as_limb(x >> 32), as_limb(x >> 64), as_limb(x >> 96)]
105 }
106 
107 /// Split u128 into limbs, in little-endian order.
108 #[cfg(all(has_i128, limb_width_64))]
109 #[inline]
split_u128(x: u128) -> [Limb; 2]110 fn split_u128(x: u128) -> [Limb; 2] {
111     [as_limb(x), as_limb(x >> 64)]
112 }
113 
114 // HI BITS
115 // -------
116 
117 // NONZERO
118 
119 /// Check if any of the remaining bits are non-zero.
120 #[inline]
nonzero<T: Integer>(x: &[T], rindex: usize) -> bool121 pub fn nonzero<T: Integer>(x: &[T], rindex: usize) -> bool {
122     let len = x.len();
123     let slc = &x[..len-rindex];
124     slc.iter().rev().any(|&x| x != T::ZERO)
125 }
126 
127 // HI16
128 
129 /// Shift 16-bit integer to high 16-bits.
130 #[inline]
u16_to_hi16_1(r0: u16) -> (u16, bool)131 fn u16_to_hi16_1(r0: u16) -> (u16, bool) {
132     debug_assert!(r0 != 0);
133     let ls = r0.leading_zeros();
134     (r0 << ls, false)
135 }
136 
137 /// Shift 2 16-bit integers to high 16-bits.
138 #[inline]
u16_to_hi16_2(r0: u16, r1: u16) -> (u16, bool)139 fn u16_to_hi16_2(r0: u16, r1: u16) -> (u16, bool) {
140     debug_assert!(r0 != 0);
141     let ls = r0.leading_zeros();
142     let rs = 16 - ls;
143     let v = match ls {
144         0 => r0,
145         _ => (r0 << ls) | (r1 >> rs),
146     };
147     let n = r1 << ls != 0;
148     (v, n)
149 }
150 
151 /// Shift 32-bit integer to high 16-bits.
152 #[inline]
u32_to_hi16_1(r0: u32) -> (u16, bool)153 fn u32_to_hi16_1(r0: u32) -> (u16, bool) {
154     let r0 = u32_to_hi32_1(r0).0;
155     ((r0 >> 16).as_u16(), r0.as_u16() != 0)
156 }
157 
158 /// Shift 2 32-bit integers to high 16-bits.
159 #[inline]
u32_to_hi16_2(r0: u32, r1: u32) -> (u16, bool)160 fn u32_to_hi16_2(r0: u32, r1: u32) -> (u16, bool) {
161     let (r0, n) = u32_to_hi32_2(r0, r1);
162     ((r0 >> 16).as_u16(), n || r0.as_u16() != 0)
163 }
164 
165 /// Shift 64-bit integer to high 16-bits.
166 #[inline]
u64_to_hi16_1(r0: u64) -> (u16, bool)167 fn u64_to_hi16_1(r0: u64) -> (u16, bool) {
168     let r0 = u64_to_hi64_1(r0).0;
169     ((r0 >> 48).as_u16(), r0.as_u16() != 0)
170 }
171 
172 /// Shift 2 64-bit integers to high 16-bits.
173 #[inline]
u64_to_hi16_2(r0: u64, r1: u64) -> (u16, bool)174 fn u64_to_hi16_2(r0: u64, r1: u64) -> (u16, bool) {
175     let (r0, n) = u64_to_hi64_2(r0, r1);
176     ((r0 >> 48).as_u16(), n || r0.as_u16() != 0)
177 }
178 
179 /// Trait to export the high 16-bits from a little-endian slice.
180 trait Hi16<T>: SliceLike<T> {
181     /// Get the hi16 bits from a 1-limb slice.
hi16_1(&self) -> (u16, bool)182     fn hi16_1(&self) -> (u16, bool);
183 
184     /// Get the hi16 bits from a 2-limb slice.
hi16_2(&self) -> (u16, bool)185     fn hi16_2(&self) -> (u16, bool);
186 
187     /// High-level exporter to extract the high 16 bits from a little-endian slice.
hi16(&self) -> (u16, bool)188     fn hi16(&self) -> (u16, bool) {
189         match self.len() {
190             0 => (0, false),
191             1 => self.hi16_1(),
192             _ => self.hi16_2(),
193         }
194     }
195 }
196 
197 impl Hi16<u16> for [u16] {
198     #[inline]
hi16_1(&self) -> (u16, bool)199     fn hi16_1(&self) -> (u16, bool) {
200         debug_assert!(self.len() == 1);
201         let rview = self.rview();
202         let r0 = rview[0];
203         u16_to_hi16_1(r0)
204     }
205 
206     #[inline]
hi16_2(&self) -> (u16, bool)207     fn hi16_2(&self) -> (u16, bool) {
208         debug_assert!(self.len() == 2);
209         let rview = self.rview();
210         let r0 = rview[0];
211         let r1 = rview[1];
212         let (v, n) = u16_to_hi16_2(r0, r1);
213         (v, n || nonzero(self, 2))
214     }
215 }
216 
217 impl Hi16<u32> for [u32] {
218     #[inline]
hi16_1(&self) -> (u16, bool)219     fn hi16_1(&self) -> (u16, bool) {
220         debug_assert!(self.len() == 1);
221         let rview = self.rview();
222         let r0 = rview[0];
223         u32_to_hi16_1(r0)
224     }
225 
226     #[inline]
hi16_2(&self) -> (u16, bool)227     fn hi16_2(&self) -> (u16, bool) {
228         debug_assert!(self.len() == 2);
229         let rview = self.rview();
230         let r0 = rview[0];
231         let r1 = rview[1];
232         let (v, n) = u32_to_hi16_2(r0, r1);
233         (v, n || nonzero(self, 2))
234     }
235 }
236 
237 impl Hi16<u64> for [u64] {
238     #[inline]
hi16_1(&self) -> (u16, bool)239     fn hi16_1(&self) -> (u16, bool) {
240         debug_assert!(self.len() == 1);
241         let rview = self.rview();
242         let r0 = rview[0];
243         u64_to_hi16_1(r0)
244     }
245 
246     #[inline]
hi16_2(&self) -> (u16, bool)247     fn hi16_2(&self) -> (u16, bool) {
248         debug_assert!(self.len() == 2);
249         let rview = self.rview();
250         let r0 = rview[0];
251         let r1 = rview[1];
252         let (v, n) = u64_to_hi16_2(r0, r1);
253         (v, n || nonzero(self, 2))
254     }
255 }
256 
257 // HI32
258 
259 /// Shift 32-bit integer to high 32-bits.
260 #[inline]
u32_to_hi32_1(r0: u32) -> (u32, bool)261 fn u32_to_hi32_1(r0: u32) -> (u32, bool) {
262     debug_assert!(r0 != 0);
263     let ls = r0.leading_zeros();
264     (r0 << ls, false)
265 }
266 
267 /// Shift 2 32-bit integers to high 32-bits.
268 #[inline]
u32_to_hi32_2(r0: u32, r1: u32) -> (u32, bool)269 fn u32_to_hi32_2(r0: u32, r1: u32) -> (u32, bool) {
270     debug_assert!(r0 != 0);
271     let ls = r0.leading_zeros();
272     let rs = 32 - ls;
273     let v = match ls {
274         0 => r0,
275         _ => (r0 << ls) | (r1 >> rs),
276     };
277     let n = r1 << ls != 0;
278     (v, n)
279 }
280 
281 /// Shift 64-bit integer to high 32-bits.
282 #[inline]
u64_to_hi32_1(r0: u64) -> (u32, bool)283 fn u64_to_hi32_1(r0: u64) -> (u32, bool) {
284     let r0 = u64_to_hi64_1(r0).0;
285     ((r0 >> 32).as_u32(), r0.as_u32() != 0)
286 }
287 
288 /// Shift 2 64-bit integers to high 32-bits.
289 #[inline]
u64_to_hi32_2(r0: u64, r1: u64) -> (u32, bool)290 fn u64_to_hi32_2(r0: u64, r1: u64) -> (u32, bool) {
291     let (r0, n) = u64_to_hi64_2(r0, r1);
292     ((r0 >> 32).as_u32(), n || r0.as_u32() != 0)
293 }
294 
295 /// Trait to export the high 32-bits from a little-endian slice.
296 trait Hi32<T>: SliceLike<T> {
297     /// Get the hi32 bits from a 1-limb slice.
hi32_1(&self) -> (u32, bool)298     fn hi32_1(&self) -> (u32, bool);
299 
300     /// Get the hi32 bits from a 2-limb slice.
hi32_2(&self) -> (u32, bool)301     fn hi32_2(&self) -> (u32, bool);
302 
303     /// Get the hi32 bits from a 3-limb slice.
hi32_3(&self) -> (u32, bool)304     fn hi32_3(&self) -> (u32, bool);
305 
306     /// High-level exporter to extract the high 32 bits from a little-endian slice.
hi32(&self) -> (u32, bool)307     fn hi32(&self) -> (u32, bool) {
308         match self.len() {
309             0 => (0, false),
310             1 => self.hi32_1(),
311             2 => self.hi32_2(),
312             _ => self.hi32_3(),
313         }
314     }
315 }
316 
317 impl Hi32<u16> for [u16] {
318     #[inline]
hi32_1(&self) -> (u32, bool)319     fn hi32_1(&self) -> (u32, bool) {
320         debug_assert!(self.len() == 1);
321         let rview = self.rview();
322         u32_to_hi32_1(rview[0].as_u32())
323     }
324 
325     #[inline]
hi32_2(&self) -> (u32, bool)326     fn hi32_2(&self) -> (u32, bool) {
327         debug_assert!(self.len() == 2);
328         let rview = self.rview();
329         let r0 = rview[0].as_u32() << 16;
330         let r1 = rview[1].as_u32();
331         u32_to_hi32_1(r0 | r1)
332     }
333 
334     #[inline]
hi32_3(&self) -> (u32, bool)335     fn hi32_3(&self) -> (u32, bool) {
336         debug_assert!(self.len() >= 3);
337         let rview = self.rview();
338         let r0 = rview[0].as_u32();
339         let r1 = rview[1].as_u32() << 16;
340         let r2 = rview[2].as_u32();
341         let (v, n) = u32_to_hi32_2( r0, r1 | r2);
342         (v, n || nonzero(self, 3))
343     }
344 }
345 
346 impl Hi32<u32> for [u32] {
347     #[inline]
hi32_1(&self) -> (u32, bool)348     fn hi32_1(&self) -> (u32, bool) {
349         debug_assert!(self.len() == 1);
350         let rview = self.rview();
351         let r0 = rview[0];
352         u32_to_hi32_1(r0)
353     }
354 
355     #[inline]
hi32_2(&self) -> (u32, bool)356     fn hi32_2(&self) -> (u32, bool) {
357         debug_assert!(self.len() >= 2);
358         let rview = self.rview();
359         let r0 = rview[0];
360         let r1 = rview[1];
361         let (v, n) = u32_to_hi32_2(r0, r1);
362         (v, n || nonzero(self, 2))
363     }
364 
365     #[inline]
hi32_3(&self) -> (u32, bool)366     fn hi32_3(&self) -> (u32, bool) {
367         self.hi32_2()
368     }
369 }
370 
371 impl Hi32<u64> for [u64] {
372     #[inline]
hi32_1(&self) -> (u32, bool)373     fn hi32_1(&self) -> (u32, bool) {
374         debug_assert!(self.len() == 1);
375         let rview = self.rview();
376         let r0 = rview[0];
377         u64_to_hi32_1(r0)
378     }
379 
380     #[inline]
hi32_2(&self) -> (u32, bool)381     fn hi32_2(&self) -> (u32, bool) {
382         debug_assert!(self.len() >= 2);
383         let rview = self.rview();
384         let r0 = rview[0];
385         let r1 = rview[1];
386         let (v, n) = u64_to_hi32_2(r0, r1);
387         (v, n || nonzero(self, 2))
388     }
389 
390     #[inline]
hi32_3(&self) -> (u32, bool)391     fn hi32_3(&self) -> (u32, bool) {
392         self.hi32_2()
393     }
394 }
395 
396 // HI64
397 
398 /// Shift 64-bit integer to high 64-bits.
399 #[inline]
u64_to_hi64_1(r0: u64) -> (u64, bool)400 fn u64_to_hi64_1(r0: u64) -> (u64, bool) {
401     debug_assert!(r0 != 0);
402     let ls = r0.leading_zeros();
403     (r0 << ls, false)
404 }
405 
406 /// Shift 2 64-bit integers to high 64-bits.
407 #[inline]
u64_to_hi64_2(r0: u64, r1: u64) -> (u64, bool)408 fn u64_to_hi64_2(r0: u64, r1: u64) -> (u64, bool) {
409     debug_assert!(r0 != 0);
410     let ls = r0.leading_zeros();
411     let rs = 64 - ls;
412     let v = match ls {
413         0 => r0,
414         _ => (r0 << ls) | (r1 >> rs),
415     };
416     let n = r1 << ls != 0;
417     (v, n)
418 }
419 
420 /// Trait to export the high 64-bits from a little-endian slice.
421 trait Hi64<T>: SliceLike<T> {
422     /// Get the hi64 bits from a 1-limb slice.
hi64_1(&self) -> (u64, bool)423     fn hi64_1(&self) -> (u64, bool);
424 
425     /// Get the hi64 bits from a 2-limb slice.
hi64_2(&self) -> (u64, bool)426     fn hi64_2(&self) -> (u64, bool);
427 
428     /// Get the hi64 bits from a 3-limb slice.
hi64_3(&self) -> (u64, bool)429     fn hi64_3(&self) -> (u64, bool);
430 
431     /// Get the hi64 bits from a 4-limb slice.
hi64_4(&self) -> (u64, bool)432     fn hi64_4(&self) -> (u64, bool);
433 
434     /// Get the hi64 bits from a 5-limb slice.
hi64_5(&self) -> (u64, bool)435     fn hi64_5(&self) -> (u64, bool);
436 
437     /// High-level exporter to extract the high 64 bits from a little-endian slice.
hi64(&self) -> (u64, bool)438     fn hi64(&self) -> (u64, bool) {
439         match self.len() {
440             0 => (0, false),
441             1 => self.hi64_1(),
442             2 => self.hi64_2(),
443             3 => self.hi64_3(),
444             4 => self.hi64_4(),
445             _ => self.hi64_5(),
446         }
447     }
448 }
449 
450 impl Hi64<u16> for [u16] {
451     #[inline]
hi64_1(&self) -> (u64, bool)452     fn hi64_1(&self) -> (u64, bool) {
453         debug_assert!(self.len() == 1);
454         let rview = self.rview();
455         let r0 = rview[0].as_u64();
456         u64_to_hi64_1(r0)
457     }
458 
459     #[inline]
hi64_2(&self) -> (u64, bool)460     fn hi64_2(&self) -> (u64, bool) {
461         debug_assert!(self.len() == 2);
462         let rview = self.rview();
463         let r0 = rview[0].as_u64() << 16;
464         let r1 = rview[1].as_u64();
465         u64_to_hi64_1(r0 | r1)
466     }
467 
468     #[inline]
hi64_3(&self) -> (u64, bool)469     fn hi64_3(&self) -> (u64, bool) {
470         debug_assert!(self.len() == 3);
471         let rview = self.rview();
472         let r0 = rview[0].as_u64() << 32;
473         let r1 = rview[1].as_u64() << 16;
474         let r2 = rview[2].as_u64();
475         u64_to_hi64_1(r0 | r1 | r2)
476     }
477 
478     #[inline]
hi64_4(&self) -> (u64, bool)479     fn hi64_4(&self) -> (u64, bool) {
480         debug_assert!(self.len() == 4);
481         let rview = self.rview();
482         let r0 = rview[0].as_u64() << 48;
483         let r1 = rview[1].as_u64() << 32;
484         let r2 = rview[2].as_u64() << 16;
485         let r3 = rview[3].as_u64();
486         u64_to_hi64_1(r0 | r1 | r2 | r3)
487     }
488 
489     #[inline]
hi64_5(&self) -> (u64, bool)490     fn hi64_5(&self) -> (u64, bool) {
491         debug_assert!(self.len() >= 5);
492         let rview = self.rview();
493         let r0 = rview[0].as_u64();
494         let r1 = rview[1].as_u64() << 48;
495         let r2 = rview[2].as_u64() << 32;
496         let r3 = rview[3].as_u64() << 16;
497         let r4 = rview[4].as_u64();
498         let (v, n) = u64_to_hi64_2(r0, r1 | r2 | r3 | r4);
499         (v, n || nonzero(self, 5))
500     }
501 }
502 
503 impl Hi64<u32> for [u32] {
504     #[inline]
hi64_1(&self) -> (u64, bool)505     fn hi64_1(&self) -> (u64, bool) {
506         debug_assert!(self.len() == 1);
507         let rview = self.rview();
508         let r0 = rview[0].as_u64();
509         u64_to_hi64_1(r0)
510     }
511 
512     #[inline]
hi64_2(&self) -> (u64, bool)513     fn hi64_2(&self) -> (u64, bool) {
514         debug_assert!(self.len() == 2);
515         let rview = self.rview();
516         let r0 = rview[0].as_u64() << 32;
517         let r1 = rview[1].as_u64();
518         u64_to_hi64_1(r0 | r1)
519     }
520 
521     #[inline]
hi64_3(&self) -> (u64, bool)522     fn hi64_3(&self) -> (u64, bool) {
523         debug_assert!(self.len() >= 3);
524         let rview = self.rview();
525         let r0 = rview[0].as_u64();
526         let r1 = rview[1].as_u64() << 32;
527         let r2 = rview[2].as_u64();
528         let (v, n) = u64_to_hi64_2(r0, r1 | r2);
529         (v, n || nonzero(self, 3))
530     }
531 
532     #[inline]
hi64_4(&self) -> (u64, bool)533     fn hi64_4(&self) -> (u64, bool) {
534         self.hi64_3()
535     }
536 
537     #[inline]
hi64_5(&self) -> (u64, bool)538     fn hi64_5(&self) -> (u64, bool) {
539         self.hi64_3()
540     }
541 }
542 
543 impl Hi64<u64> for [u64] {
544     #[inline]
hi64_1(&self) -> (u64, bool)545     fn hi64_1(&self) -> (u64, bool) {
546         debug_assert!(self.len() == 1);
547         let rview = self.rview();
548         let r0 = rview[0];
549         u64_to_hi64_1(r0)
550     }
551 
552     #[inline]
hi64_2(&self) -> (u64, bool)553     fn hi64_2(&self) -> (u64, bool) {
554         debug_assert!(self.len() >= 2);
555         let rview = self.rview();
556         let r0 = rview[0];
557         let r1 = rview[1];
558         let (v, n) = u64_to_hi64_2(r0, r1);
559         (v, n || nonzero(self, 2))
560     }
561 
562     #[inline]
hi64_3(&self) -> (u64, bool)563     fn hi64_3(&self) -> (u64, bool) {
564         self.hi64_2()
565     }
566 
567     #[inline]
hi64_4(&self) -> (u64, bool)568     fn hi64_4(&self) -> (u64, bool) {
569         self.hi64_2()
570     }
571 
572     #[inline]
hi64_5(&self) -> (u64, bool)573     fn hi64_5(&self) -> (u64, bool) {
574         self.hi64_2()
575     }
576 }
577 
578 // HI128
579 
580 /// Shift 128-bit integer to high 128-bits.
581 #[cfg(has_i128)]
582 #[inline]
u128_to_hi128_1(r0: u128) -> (u128, bool)583 fn u128_to_hi128_1(r0: u128) -> (u128, bool) {
584     let ls = r0.leading_zeros();
585     (r0 << ls, false)
586 }
587 
588 /// Shift 2 128-bit integers to high 128-bits.
589 #[cfg(has_i128)]
590 #[inline]
u128_to_hi128_2(r0: u128, r1: u128) -> (u128, bool)591 fn u128_to_hi128_2(r0: u128, r1: u128) -> (u128, bool) {
592     let ls = r0.leading_zeros();
593     let rs = 128 - ls;
594     let v = (r0 << ls) | (r1 >> rs);
595     let n = r1 << ls != 0;
596     (v, n)
597 }
598 
599 /// Trait to export the high 128-bits from a little-endian slice.
600 #[cfg(has_i128)]
601 trait Hi128<T>: SliceLike<T> {
602     /// Get the hi128 bits from a 1-limb slice.
hi128_1(&self) -> (u128, bool)603     fn hi128_1(&self) -> (u128, bool);
604 
605     /// Get the hi128 bits from a 2-limb slice.
hi128_2(&self) -> (u128, bool)606     fn hi128_2(&self) -> (u128, bool);
607 
608     /// Get the hi128 bits from a 3-limb slice.
hi128_3(&self) -> (u128, bool)609     fn hi128_3(&self) -> (u128, bool);
610 
611     /// Get the hi128 bits from a 4-limb slice.
hi128_4(&self) -> (u128, bool)612     fn hi128_4(&self) -> (u128, bool);
613 
614     /// Get the hi128 bits from a 5-limb slice.
hi128_5(&self) -> (u128, bool)615     fn hi128_5(&self) -> (u128, bool);
616 
617     /// Get the hi128 bits from a 5-limb slice.
hi128_6(&self) -> (u128, bool)618     fn hi128_6(&self) -> (u128, bool);
619 
620     /// Get the hi128 bits from a 5-limb slice.
hi128_7(&self) -> (u128, bool)621     fn hi128_7(&self) -> (u128, bool);
622 
623     /// Get the hi128 bits from a 5-limb slice.
hi128_8(&self) -> (u128, bool)624     fn hi128_8(&self) -> (u128, bool);
625 
626     /// Get the hi128 bits from a 5-limb slice.
hi128_9(&self) -> (u128, bool)627     fn hi128_9(&self) -> (u128, bool);
628 
629     /// High-level exporter to extract the high 128 bits from a little-endian slice.
hi128(&self) -> (u128, bool)630     fn hi128(&self) -> (u128, bool) {
631         match self.len() {
632             0 => (0, false),
633             1 => self.hi128_1(),
634             2 => self.hi128_2(),
635             3 => self.hi128_3(),
636             4 => self.hi128_4(),
637             6 => self.hi128_6(),
638             7 => self.hi128_7(),
639             8 => self.hi128_8(),
640             _ => self.hi128_9(),
641         }
642     }
643 }
644 
645 #[cfg(has_i128)]
646 impl Hi128<u16> for [u16] {
647     #[inline]
hi128_1(&self) -> (u128, bool)648     fn hi128_1(&self) -> (u128, bool) {
649         debug_assert!(self.len() == 1);
650         let rview = self.rview();
651         let r0 = rview[0].as_u128();
652         u128_to_hi128_1(r0)
653     }
654 
655     #[inline]
hi128_2(&self) -> (u128, bool)656     fn hi128_2(&self) -> (u128, bool) {
657         debug_assert!(self.len() == 2);
658         let rview = self.rview();
659         let r0 = rview[0].as_u128() << 16;
660         let r1 = rview[1].as_u128();
661         u128_to_hi128_1(r0 | r1)
662     }
663 
664     #[inline]
hi128_3(&self) -> (u128, bool)665     fn hi128_3(&self) -> (u128, bool) {
666         debug_assert!(self.len() == 3);
667         let rview = self.rview();
668         let r0 = rview[0].as_u128() << 32;
669         let r1 = rview[1].as_u128() << 16;
670         let r2 = rview[2].as_u128();
671         u128_to_hi128_1(r0 | r1 | r2)
672     }
673 
674     #[inline]
hi128_4(&self) -> (u128, bool)675     fn hi128_4(&self) -> (u128, bool) {
676         debug_assert!(self.len() == 4);
677         let rview = self.rview();
678         let r0 = rview[0].as_u128() << 48;
679         let r1 = rview[1].as_u128() << 32;
680         let r2 = rview[2].as_u128() << 16;
681         let r3 = rview[3].as_u128();
682         u128_to_hi128_1(r0 | r1 | r2 | r3)
683     }
684 
685     #[inline]
hi128_5(&self) -> (u128, bool)686     fn hi128_5(&self) -> (u128, bool) {
687         debug_assert!(self.len() == 5);
688         let rview = self.rview();
689         let r0 = rview[0].as_u128() << 64;
690         let r1 = rview[1].as_u128() << 48;
691         let r2 = rview[2].as_u128() << 32;
692         let r3 = rview[3].as_u128() << 16;
693         let r4 = rview[4].as_u128();
694         u128_to_hi128_1(r0 | r1 | r2 | r3 | r4)
695     }
696 
697     #[inline]
hi128_6(&self) -> (u128, bool)698     fn hi128_6(&self) -> (u128, bool) {
699         debug_assert!(self.len() == 6);
700         let rview = self.rview();
701         let r0 = rview[0].as_u128() << 80;
702         let r1 = rview[1].as_u128() << 64;
703         let r2 = rview[2].as_u128() << 48;
704         let r3 = rview[3].as_u128() << 32;
705         let r4 = rview[4].as_u128() << 16;
706         let r5 = rview[5].as_u128();
707         u128_to_hi128_1(r0 | r1 | r2 | r3 | r4 | r5)
708     }
709 
710     #[inline]
hi128_7(&self) -> (u128, bool)711     fn hi128_7(&self) -> (u128, bool) {
712         debug_assert!(self.len() == 7);
713         let rview = self.rview();
714         let r0 = rview[0].as_u128() << 96;
715         let r1 = rview[1].as_u128() << 80;
716         let r2 = rview[2].as_u128() << 64;
717         let r3 = rview[3].as_u128() << 48;
718         let r4 = rview[4].as_u128() << 32;
719         let r5 = rview[5].as_u128() << 16;
720         let r6 = rview[6].as_u128();
721         u128_to_hi128_1(r0 | r1 | r2 | r3 | r4 | r5 | r6)
722     }
723 
724     #[inline]
hi128_8(&self) -> (u128, bool)725     fn hi128_8(&self) -> (u128, bool) {
726         debug_assert!(self.len() == 8);
727         let rview = self.rview();
728         let r0 = rview[0].as_u128() << 112;
729         let r1 = rview[1].as_u128() << 96;
730         let r2 = rview[2].as_u128() << 80;
731         let r3 = rview[3].as_u128() << 64;
732         let r4 = rview[4].as_u128() << 48;
733         let r5 = rview[5].as_u128() << 32;
734         let r6 = rview[6].as_u128() << 16;
735         let r7 = rview[7].as_u128();
736         u128_to_hi128_1(r0 | r1 | r2 | r3 | r4 | r5 | r6 | r7)
737     }
738 
739     #[inline]
hi128_9(&self) -> (u128, bool)740     fn hi128_9(&self) -> (u128, bool) {
741         debug_assert!(self.len() >= 9);
742         let rview = self.rview();
743         let r0 = rview[0].as_u128();
744         let r1 = rview[1].as_u128() << 112;
745         let r2 = rview[2].as_u128() << 96;
746         let r3 = rview[3].as_u128() << 80;
747         let r4 = rview[4].as_u128() << 64;
748         let r5 = rview[5].as_u128() << 48;
749         let r6 = rview[6].as_u128() << 32;
750         let r7 = rview[7].as_u128() << 16;
751         let r8 = rview[8].as_u128();
752         let (v, n) = u128_to_hi128_2(r0, r1 | r2 | r3 | r4 | r5 | r6 | r7 | r8);
753         (v, n || nonzero(self, 9))
754     }
755 }
756 
757 #[cfg(has_i128)]
758 impl Hi128<u32> for [u32] {
759     #[inline]
hi128_1(&self) -> (u128, bool)760     fn hi128_1(&self) -> (u128, bool) {
761         debug_assert!(self.len() == 1);
762         let rview = self.rview();
763         let r0 = rview[0].as_u128();
764         u128_to_hi128_1(r0)
765     }
766 
767     #[inline]
hi128_2(&self) -> (u128, bool)768     fn hi128_2(&self) -> (u128, bool) {
769         debug_assert!(self.len() == 2);
770         let rview = self.rview();
771         let r0 = rview[0].as_u128() << 32;
772         let r1 = rview[1].as_u128();
773         u128_to_hi128_1(r0 | r1)
774     }
775 
776     #[inline]
hi128_3(&self) -> (u128, bool)777     fn hi128_3(&self) -> (u128, bool) {
778         debug_assert!(self.len() == 3);
779         let rview = self.rview();
780         let r0 = rview[0].as_u128() << 64;
781         let r1 = rview[1].as_u128() << 32;
782         let r2 = rview[2].as_u128();
783         u128_to_hi128_1(r0 | r1 | r2)
784     }
785 
786     #[inline]
hi128_4(&self) -> (u128, bool)787     fn hi128_4(&self) -> (u128, bool) {
788         debug_assert!(self.len() == 4);
789         let rview = self.rview();
790         let r0 = rview[0].as_u128() << 96;
791         let r1 = rview[1].as_u128() << 64;
792         let r2 = rview[2].as_u128() << 32;
793         let r3 = rview[3].as_u128();
794         u128_to_hi128_1(r0 | r1 | r2 | r3)
795     }
796 
797     #[inline]
hi128_5(&self) -> (u128, bool)798     fn hi128_5(&self) -> (u128, bool) {
799         debug_assert!(self.len() >= 5);
800         let rview = self.rview();
801         let r0 = rview[0].as_u128();
802         let r1 = rview[1].as_u128() << 96;
803         let r2 = rview[2].as_u128() << 64;
804         let r3 = rview[3].as_u128() << 32;
805         let r4 = rview[4].as_u128();
806         let (v, n) = u128_to_hi128_2(r0, r1 | r2 | r3 | r4);
807         (v, n || nonzero(self, 5))
808     }
809 
810     #[inline]
hi128_6(&self) -> (u128, bool)811     fn hi128_6(&self) -> (u128, bool) {
812         self.hi128_5()
813     }
814 
815     #[inline]
hi128_7(&self) -> (u128, bool)816     fn hi128_7(&self) -> (u128, bool) {
817         self.hi128_5()
818     }
819 
820     #[inline]
hi128_8(&self) -> (u128, bool)821     fn hi128_8(&self) -> (u128, bool) {
822         self.hi128_5()
823     }
824 
825     #[inline]
hi128_9(&self) -> (u128, bool)826     fn hi128_9(&self) -> (u128, bool) {
827         self.hi128_5()
828     }
829 }
830 
831 #[cfg(has_i128)]
832 impl Hi128<u64> for [u64] {
833     #[inline]
hi128_1(&self) -> (u128, bool)834     fn hi128_1(&self) -> (u128, bool) {
835         debug_assert!(self.len() == 1);
836         let rview = self.rview();
837         let r0 = rview[0].as_u128();
838         u128_to_hi128_1(r0)
839     }
840 
841     #[inline]
hi128_2(&self) -> (u128, bool)842     fn hi128_2(&self) -> (u128, bool) {
843         debug_assert!(self.len() == 2);
844         let rview = self.rview();
845         let r0 = rview[0].as_u128() << 64;
846         let r1 = rview[1].as_u128();
847         u128_to_hi128_1(r0 | r1)
848     }
849 
850     #[inline]
hi128_3(&self) -> (u128, bool)851     fn hi128_3(&self) -> (u128, bool) {
852         debug_assert!(self.len() >= 3);
853         let rview = self.rview();
854         let r0 = rview[0].as_u128();
855         let r1 = rview[1].as_u128() << 64;
856         let r2 = rview[2].as_u128();
857         let (v, n) = u128_to_hi128_2(r0, r1 | r2);
858         (v, n || nonzero(self, 3))
859     }
860 
861     #[inline]
hi128_4(&self) -> (u128, bool)862     fn hi128_4(&self) -> (u128, bool) {
863         self.hi128_3()
864     }
865 
866     #[inline]
hi128_5(&self) -> (u128, bool)867     fn hi128_5(&self) -> (u128, bool) {
868         self.hi128_3()
869     }
870 
871     #[inline]
hi128_6(&self) -> (u128, bool)872     fn hi128_6(&self) -> (u128, bool) {
873         self.hi128_3()
874     }
875 
876     #[inline]
hi128_7(&self) -> (u128, bool)877     fn hi128_7(&self) -> (u128, bool) {
878         self.hi128_3()
879     }
880 
881     #[inline]
hi128_8(&self) -> (u128, bool)882     fn hi128_8(&self) -> (u128, bool) {
883         self.hi128_3()
884     }
885 
886     #[inline]
hi128_9(&self) -> (u128, bool)887     fn hi128_9(&self) -> (u128, bool) {
888         self.hi128_3()
889     }
890 }
891 
892 // SCALAR
893 // ------
894 
895 // Scalar-to-scalar operations, for building-blocks for arbitrary-precision
896 // operations.
897 
898 pub(in atof::algorithm) mod scalar {
899 
900 use super::*;
901 
902 // ADDITION
903 
904 /// Add two small integers and return the resulting value and if overflow happens.
905 #[inline]
add(x: Limb, y: Limb) -> (Limb, bool)906 pub fn add(x: Limb, y: Limb)
907     -> (Limb, bool)
908 {
909     x.overflowing_add(y)
910 }
911 
912 /// AddAssign two small integers and return if overflow happens.
913 #[inline]
iadd(x: &mut Limb, y: Limb) -> bool914 pub fn iadd(x: &mut Limb, y: Limb)
915     -> bool
916 {
917     let t = add(*x, y);
918     *x = t.0;
919     t.1
920 }
921 
922 // SUBTRACTION
923 
924 /// Subtract two small integers and return the resulting value and if overflow happens.
925 #[inline]
sub(x: Limb, y: Limb) -> (Limb, bool)926 pub fn sub(x: Limb, y: Limb)
927     -> (Limb, bool)
928 {
929     x.overflowing_sub(y)
930 }
931 
932 /// SubAssign two small integers and return if overflow happens.
933 #[inline]
isub(x: &mut Limb, y: Limb) -> bool934 pub fn isub(x: &mut Limb, y: Limb)
935     -> bool
936 {
937     let t = sub(*x, y);
938     *x = t.0;
939     t.1
940 }
941 
942 // MULTIPLICATION
943 
944 /// Multiply two small integers (with carry) (and return the overflow contribution).
945 ///
946 /// Returns the (low, high) components.
947 #[inline]
mul(x: Limb, y: Limb, carry: Limb) -> (Limb, Limb)948 pub fn mul(x: Limb, y: Limb, carry: Limb)
949     -> (Limb, Limb)
950 {
951     // Cannot overflow, as long as wide is 2x as wide. This is because
952     // the following is always true:
953     // `Wide::max_value() - (Narrow::max_value() * Narrow::max_value()) >= Narrow::max_value()`
954     let z: Wide = as_wide(x) * as_wide(y) + as_wide(carry);
955     (as_limb(z), as_limb(z >> <Limb as Integer>::BITS))
956 }
957 
958 /// Multiply two small integers (with carry) (and return if overflow happens).
959 #[inline]
imul(x: &mut Limb, y: Limb, carry: Limb) -> Limb960 pub fn imul(x: &mut Limb, y: Limb, carry: Limb)
961     -> Limb
962 {
963     let t = mul(*x, y, carry);
964     *x = t.0;
965     t.1
966 }
967 
968 // DIVISION
969 
970 /// Divide two small integers (with remainder) (and return the remainder contribution).
971 ///
972 /// Returns the (value, remainder) components.
973 #[inline]
div(x: Limb, y: Limb, rem: Limb) -> (Limb, Limb)974 pub fn div(x: Limb, y: Limb, rem: Limb)
975     -> (Limb, Limb)
976 {
977     // Cannot overflow, as long as wide is 2x as wide.
978     let x = as_wide(x) | (as_wide(rem) << <Limb as Integer>::BITS);
979     let y = as_wide(y);
980     (as_limb(x / y), as_limb(x % y))
981 }
982 
983 /// DivAssign two small integers and return the remainder.
984 #[inline]
idiv(x: &mut Limb, y: Limb, rem: Limb) -> Limb985 pub fn idiv(x: &mut Limb, y: Limb, rem: Limb)
986     -> Limb
987 {
988     let t = div(*x, y, rem);
989     *x = t.0;
990     t.1
991 }
992 
993 }   // scalar
994 
995 // SMALL
996 // -----
997 
998 // Large-to-small operations, to modify a big integer from a native scalar.
999 
1000 pub(in atof::algorithm) mod small {
1001 
1002 use lib::iter;
1003 use super::*;
1004 use super::super::small_powers::*;
1005 use super::super::large_powers::*;
1006 
1007 // PROPERTIES
1008 
1009 /// Get the number of leading zero values in the storage.
1010 /// Assumes the value is normalized.
1011 #[inline]
leading_zero_limbs(_: &[Limb]) -> usize1012 pub fn leading_zero_limbs(_: &[Limb]) -> usize {
1013     0
1014 }
1015 
1016 /// Get the number of trailing zero values in the storage.
1017 /// Assumes the value is normalized.
1018 #[inline]
trailing_zero_limbs(x: &[Limb]) -> usize1019 pub fn trailing_zero_limbs(x: &[Limb]) -> usize {
1020     let mut iter = x.iter().enumerate();
1021     let opt = iter.find(|&tup| !tup.1.is_zero());
1022     let value = opt
1023         .map(|t| t.0)
1024         .unwrap_or(x.len());
1025 
1026     value
1027 }
1028 
1029 /// Get number of leading zero bits in the storage.
1030 #[inline]
leading_zeros(x: &[Limb]) -> usize1031 pub fn leading_zeros(x: &[Limb]) -> usize {
1032     if x.is_empty() {
1033         0
1034     } else {
1035         x.rindex(0).leading_zeros().as_usize()
1036     }
1037 }
1038 
1039 /// Get number of trailing zero bits in the storage.
1040 /// Assumes the value is normalized.
1041 #[inline]
trailing_zeros(x: &[Limb]) -> usize1042 pub fn trailing_zeros(x: &[Limb]) -> usize {
1043     // Get the index of the last non-zero value
1044     let index = trailing_zero_limbs(x);
1045     let mut count = index.saturating_mul(<Limb as Integer>::BITS);
1046     if let Some(value) = x.get(index) {
1047         count = count.saturating_add(value.trailing_zeros().as_usize());
1048     }
1049     count
1050 }
1051 
1052 // BIT LENGTH
1053 
1054 /// Calculate the bit-length of the big-integer.
1055 #[inline]
bit_length(x: &[Limb]) -> usize1056 pub fn bit_length(x: &[Limb]) -> usize {
1057     // Avoid overflowing, calculate via total number of bits
1058     // minus leading zero bits.
1059     let nlz = leading_zeros(x);
1060     <Limb as Integer>::BITS.checked_mul(x.len())
1061         .map(|v| v - nlz)
1062         .unwrap_or(usize::max_value())
1063 }
1064 
1065 // BIT LENGTH
1066 
1067 /// Calculate the limb-length of the big-integer.
1068 #[inline]
limb_length(x: &[Limb]) -> usize1069 pub fn limb_length(x: &[Limb]) -> usize {
1070     x.len()
1071 }
1072 
1073 // SHR
1074 
1075 /// Shift-right bits inside a buffer and returns the truncated bits.
1076 ///
1077 /// Returns the truncated bits.
1078 ///
1079 /// Assumes `n < <Limb as Integer>::BITS`, IE, internally shifting bits.
1080 #[inline]
ishr_bits<T>(x: &mut T, n: usize) -> Limb where T: CloneableVecLike<Limb>1081 pub fn ishr_bits<T>(x: &mut T, n: usize)
1082     -> Limb
1083     where T: CloneableVecLike<Limb>
1084 {
1085     // Need to shift by the number of `bits % <Limb as Integer>::BITS`.
1086     let bits = <Limb as Integer>::BITS;
1087     debug_assert!(n < bits && n != 0);
1088 
1089     // Internally, for each item, we shift left by n, and add the previous
1090     // right shifted limb-bits.
1091     // For example, we transform (for u8) shifted right 2, to:
1092     //      b10100100 b01000010
1093     //        b101001 b00010000
1094     let lshift = bits - n;
1095     let rshift = n;
1096     let mut prev: Limb = 0;
1097     for xi in x.iter_mut().rev() {
1098         let tmp = *xi;
1099         *xi >>= rshift;
1100         *xi |= prev << lshift;
1101         prev = tmp;
1102     }
1103 
1104     prev & lower_n_mask(as_limb(rshift))
1105 }
1106 
1107 /// Shift-right `n` limbs inside a buffer and returns if all the truncated limbs are zero.
1108 ///
1109 /// Assumes `n` is not 0.
1110 #[inline]
ishr_limbs<T>(x: &mut T, n: usize) -> bool where T: CloneableVecLike<Limb>1111 pub fn ishr_limbs<T>(x: &mut T, n: usize)
1112     -> bool
1113     where T: CloneableVecLike<Limb>
1114 {
1115     debug_assert!(n != 0);
1116 
1117     if n >= x.len() {
1118         x.clear();
1119         false
1120     } else {
1121         let is_zero = (&x[..n]).iter().all(|v| v.is_zero());
1122         x.remove_many(0..n);
1123         is_zero
1124     }
1125 }
1126 
1127 /// Shift-left buffer by n bits and return if we should round-up.
ishr<T>(x: &mut T, n: usize) -> bool where T: CloneableVecLike<Limb>1128 pub fn ishr<T>(x: &mut T, n: usize)
1129     -> bool
1130     where T: CloneableVecLike<Limb>
1131 {
1132     let bits = <Limb as Integer>::BITS;
1133     // Need to pad with zeros for the number of `bits / <Limb as Integer>::BITS`,
1134     // and shift-left with carry for `bits % <Limb as Integer>::BITS`.
1135     let rem = n % bits;
1136     let div = n / bits;
1137     let is_zero = match div.is_zero() {
1138         true  => true,
1139         false => ishr_limbs(x, div),
1140     };
1141     let truncated = match rem.is_zero() {
1142         true  => 0,
1143         false => ishr_bits(x, rem),
1144     };
1145 
1146     // Calculate if we need to roundup.
1147     let roundup = {
1148         let halfway = lower_n_halfway(as_limb(rem));
1149         if truncated > halfway {
1150             // Above halfway
1151             true
1152         } else if truncated == halfway {
1153             // Exactly halfway, if !is_zero, we have a tie-breaker,
1154             // otherwise, we follow round-to-nearest, tie-even rules.
1155             // Cannot be empty, since truncated is non-zero.
1156             !is_zero || x[0].is_odd()
1157         } else {
1158             // Below halfway
1159             false
1160         }
1161     };
1162 
1163     // Normalize the data
1164     normalize(x);
1165 
1166     roundup
1167 }
1168 
1169 /// Shift-left buffer by n bits.
1170 #[inline]
shr<T>(x: &[Limb], n: usize) -> (T, bool) where T: CloneableVecLike<Limb>1171 pub fn shr<T>(x: &[Limb], n: usize)
1172     -> (T, bool)
1173     where T: CloneableVecLike<Limb>
1174 {
1175     let mut z = T::default();
1176     z.extend_from_slice(x);
1177     let roundup = ishr(&mut z, n);
1178     (z, roundup)
1179 }
1180 
1181 // SHL
1182 
1183 /// Shift-left bits inside a buffer.
1184 ///
1185 /// Assumes `n < <Limb as Integer>::BITS`, IE, internally shifting bits.
1186 #[inline]
ishl_bits<T>(x: &mut T, n: usize) where T: CloneableVecLike<Limb>1187 pub fn ishl_bits<T>(x: &mut T, n: usize)
1188     where T: CloneableVecLike<Limb>
1189 {
1190     // Need to shift by the number of `bits % <Limb as Integer>::BITS)`.
1191     let bits = <Limb as Integer>::BITS;
1192     debug_assert!(n < bits);
1193     if n.is_zero() {
1194         return;
1195     }
1196 
1197     // Internally, for each item, we shift left by n, and add the previous
1198     // right shifted limb-bits.
1199     // For example, we transform (for u8) shifted left 2, to:
1200     //      b10100100 b01000010
1201     //      b10 b10010001 b00001000
1202     let rshift = bits - n;
1203     let lshift = n;
1204     let mut prev: Limb = 0;
1205     for xi in x.iter_mut() {
1206         let tmp = *xi;
1207         *xi <<= lshift;
1208         *xi |= prev >> rshift;
1209         prev = tmp;
1210     }
1211 
1212     // Always push the carry, even if it creates a non-normal result.
1213     let carry = prev >> rshift;
1214     if carry != 0 {
1215         x.push(carry);
1216     }
1217 }
1218 
1219 /// Shift-left bits inside a buffer.
1220 ///
1221 /// Assumes `n < <Limb as Integer>::BITS`, IE, internally shifting bits.
1222 #[inline]
shl_bits<T>(x: &[Limb], n: usize) -> T where T: CloneableVecLike<Limb>1223 pub fn shl_bits<T>(x: &[Limb], n: usize)
1224     -> T
1225     where T: CloneableVecLike<Limb>
1226 {
1227     let mut z = T::default();
1228     z.extend_from_slice(x);
1229     ishl_bits(&mut z, n);
1230     z
1231 }
1232 
1233 /// Shift-left `n` digits inside a buffer.
1234 ///
1235 /// Assumes `n` is not 0.
1236 #[inline]
ishl_limbs<T>(x: &mut T, n: usize) where T: CloneableVecLike<Limb>1237 pub fn ishl_limbs<T>(x: &mut T, n: usize)
1238     where T: CloneableVecLike<Limb>
1239 {
1240     debug_assert!(n != 0);
1241     if !x.is_empty() {
1242         x.insert_many(0, iter::repeat(0).take(n));
1243     }
1244 }
1245 
1246 /// Shift-left buffer by n bits.
1247 #[inline]
ishl<T>(x: &mut T, n: usize) where T: CloneableVecLike<Limb>1248 pub fn ishl<T>(x: &mut T, n: usize)
1249     where T: CloneableVecLike<Limb>
1250 {
1251     let bits = <Limb as Integer>::BITS;
1252     // Need to pad with zeros for the number of `bits / <Limb as Integer>::BITS`,
1253     // and shift-left with carry for `bits % <Limb as Integer>::BITS`.
1254     let rem = n % bits;
1255     let div = n / bits;
1256     ishl_bits(x, rem);
1257     if !div.is_zero() {
1258         ishl_limbs(x, div);
1259     }
1260 }
1261 
1262 /// Shift-left buffer by n bits.
1263 #[inline]
shl<T>(x: &[Limb], n: usize) -> T where T: CloneableVecLike<Limb>1264 pub fn shl<T>(x: &[Limb], n: usize)
1265     -> T
1266     where T: CloneableVecLike<Limb>
1267 {
1268     let mut z = T::default();
1269     z.extend_from_slice(x);
1270     ishl(&mut z, n);
1271     z
1272 }
1273 
1274 // NORMALIZE
1275 
1276 /// Normalize the container by popping any leading zeros.
1277 #[inline]
normalize<T>(x: &mut T) where T: CloneableVecLike<Limb>1278 pub fn normalize<T>(x: &mut T)
1279     where T: CloneableVecLike<Limb>
1280 {
1281     // Remove leading zero if we cause underflow. Since we're dividing
1282     // by a small power, we have at max 1 int removed.
1283     while !x.is_empty() && x.rindex(0).is_zero() {
1284         x.pop();
1285     }
1286 }
1287 
1288 /// ADDITION
1289 
1290 /// Implied AddAssign implementation for adding a small integer to bigint.
1291 ///
1292 /// Allows us to choose a start-index in x to store, to allow incrementing
1293 /// from a non-zero start.
iadd_impl<T>(x: &mut T, y: Limb, xstart: usize) where T: CloneableVecLike<Limb>1294 pub fn iadd_impl<T>(x: &mut T, y: Limb, xstart: usize)
1295     where T: CloneableVecLike<Limb>
1296 {
1297     if x.len() <= xstart {
1298         x.push(y);
1299     } else {
1300         // Initial add
1301         let mut carry = scalar::iadd(&mut x[xstart], y);
1302 
1303         // Increment until overflow stops occurring.
1304         let mut size = xstart + 1;
1305         while carry && size < x.len() {
1306             carry = scalar::iadd(&mut x[size], 1);
1307             size += 1;
1308         }
1309 
1310         // If we overflowed the buffer entirely, need to add 1 to the end
1311         // of the buffer.
1312         if carry {
1313             x.push(1);
1314         }
1315     }
1316 }
1317 
1318 /// AddAssign small integer to bigint.
1319 #[inline]
iadd<T>(x: &mut T, y: Limb) where T: CloneableVecLike<Limb>1320 pub fn iadd<T>(x: &mut T, y: Limb)
1321     where T: CloneableVecLike<Limb>
1322 {
1323     iadd_impl(x, y, 0);
1324 }
1325 
1326 /// Add small integer to bigint.
1327 #[inline]
add<T>(x: &[Limb], y: Limb) -> T where T: CloneableVecLike<Limb>1328 pub fn add<T>(x: &[Limb], y: Limb)
1329     -> T
1330     where T: CloneableVecLike<Limb>
1331 {
1332     let mut z = T::default();
1333     z.extend_from_slice(x);
1334     iadd(&mut z, y);
1335     z
1336 }
1337 
1338 // SUBTRACTION
1339 
1340 /// SubAssign small integer to bigint.
1341 /// Does not do overflowing subtraction.
isub_impl<T>(x: &mut T, y: Limb, xstart: usize) where T: CloneableVecLike<Limb>1342 pub fn isub_impl<T>(x: &mut T, y: Limb, xstart: usize)
1343     where T: CloneableVecLike<Limb>
1344 {
1345     debug_assert!(x.len() > xstart && (x[xstart] >= y || x.len() > xstart+1));
1346 
1347     // Initial subtraction
1348     let mut carry = scalar::isub(&mut x[xstart], y);
1349 
1350     // Increment until overflow stops occurring.
1351     let mut size = xstart + 1;
1352     while carry && size < x.len() {
1353         carry = scalar::isub(&mut x[size], 1);
1354         size += 1;
1355     }
1356     normalize(x);
1357 }
1358 
1359 /// SubAssign small integer to bigint.
1360 /// Does not do overflowing subtraction.
1361 #[inline]
isub<T>(x: &mut T, y: Limb) where T: CloneableVecLike<Limb>1362 pub fn isub<T>(x: &mut T, y: Limb)
1363     where T: CloneableVecLike<Limb>
1364 {
1365     isub_impl(x, y, 0);
1366 }
1367 
1368 /// Sub small integer to bigint.
1369 #[inline]
sub<T>(x: &[Limb], y: Limb) -> T where T: CloneableVecLike<Limb>1370 pub fn sub<T>(x: &[Limb], y: Limb)
1371     -> T
1372     where T: CloneableVecLike<Limb>
1373 {
1374     let mut z = T::default();
1375     z.extend_from_slice(x);
1376     isub(&mut z, y);
1377     z
1378 }
1379 
1380 // MULTIPLICATION
1381 
1382 /// MulAssign small integer to bigint.
1383 #[inline]
imul<T>(x: &mut T, y: Limb) where T: CloneableVecLike<Limb>1384 pub fn imul<T>(x: &mut T, y: Limb)
1385     where T: CloneableVecLike<Limb>
1386 {
1387     // Multiply iteratively over all elements, adding the carry each time.
1388     let mut carry: Limb = 0;
1389     for xi in x.iter_mut() {
1390         carry = scalar::imul(xi, y, carry);
1391     }
1392 
1393     // Overflow of value, add to end.
1394     if carry != 0 {
1395         x.push(carry);
1396     }
1397 }
1398 
1399 /// Mul small integer to bigint.
1400 #[inline]
mul<T>(x: &[Limb], y: Limb) -> T where T: CloneableVecLike<Limb>1401 pub fn mul<T>(x: &[Limb], y: Limb)
1402     -> T
1403     where T: CloneableVecLike<Limb>
1404 {
1405     let mut z = T::default();
1406     z.extend_from_slice(x);
1407     imul(&mut z, y);
1408     z
1409 }
1410 
1411 /// MulAssign by a power.
1412 ///
1413 /// Theoretically...
1414 ///
1415 /// Use an exponentiation by squaring method, since it reduces the time
1416 /// complexity of the multiplication to ~`O(log(n))` for the squaring,
1417 /// and `O(n*m)` for the result. Since `m` is typically a lower-order
1418 /// factor, this significantly reduces the number of multiplications
1419 /// we need to do. Iteratively multiplying by small powers follows
1420 /// the nth triangular number series, which scales as `O(p^2)`, but
1421 /// where `p` is `n+m`. In short, it scales very poorly.
1422 ///
1423 /// Practically....
1424 ///
1425 /// Exponentiation by Squaring:
1426 ///     running 2 tests
1427 ///     test bigcomp_f32_lexical ... bench:       1,018 ns/iter (+/- 78)
1428 ///     test bigcomp_f64_lexical ... bench:       3,639 ns/iter (+/- 1,007)
1429 ///
1430 /// Exponentiation by Iterative Small Powers:
1431 ///     running 2 tests
1432 ///     test bigcomp_f32_lexical ... bench:         518 ns/iter (+/- 31)
1433 ///     test bigcomp_f64_lexical ... bench:         583 ns/iter (+/- 47)
1434 ///
1435 /// Exponentiation by Iterative Large Powers (of 2):
1436 ///     running 2 tests
1437 ///     test bigcomp_f32_lexical ... bench:         671 ns/iter (+/- 31)
1438 ///     test bigcomp_f64_lexical ... bench:       1,394 ns/iter (+/- 47)
1439 ///
1440 /// Even using worst-case scenarios, exponentiation by squaring is
1441 /// significantly slower for our workloads. Just multiply by small powers,
1442 /// in simple cases, and use precalculated large powers in other cases.
imul_power<T>(x: &mut T, radix: u32, n: u32) where T: CloneableVecLike<Limb>1443 pub fn imul_power<T>(x: &mut T, radix: u32, n: u32)
1444     where T: CloneableVecLike<Limb>
1445 {
1446     use super::large::KARATSUBA_CUTOFF;
1447 
1448     let small_powers = get_small_powers(radix);
1449     let large_powers = get_large_powers(radix);
1450 
1451     if n == 0 {
1452         // No exponent, just return.
1453         // The 0-index of the large powers is `2^0`, which is 1, so we want
1454         // to make sure we don't take that path with a literal 0.
1455         return;
1456     }
1457 
1458     // We want to use the asymptotically faster algorithm if we're going
1459     // to be using Karabatsu multiplication sometime during the result,
1460     // otherwise, just use exponentiation by squaring.
1461     let bit_length = 32 - n.leading_zeros().as_usize();
1462     debug_assert!(bit_length != 0 && bit_length <= large_powers.len());
1463     if x.len() + large_powers[bit_length-1].len() < 2*KARATSUBA_CUTOFF {
1464         // We can use iterative small powers to make this faster for the
1465         // easy cases.
1466 
1467         // Multiply by the largest small power until n < step.
1468         let step = small_powers.len() - 1;
1469         let power = small_powers[step];
1470         let mut n = n.as_usize();
1471         while n >= step {
1472             imul(x, power);
1473             n -= step;
1474         }
1475 
1476         // Multiply by the remainder.
1477         imul(x, small_powers[n]);
1478     } else {
1479         // In theory, this code should be asymptotically a lot faster,
1480         // in practice, our small::imul seems to be the limiting step,
1481         // and large imul is slow as well.
1482 
1483         // Multiply by higher order powers.
1484         let mut idx: usize = 0;
1485         let mut bit: usize = 1;
1486         let mut n = n.as_usize();
1487         while n != 0 {
1488             if n & bit != 0 {
1489                 debug_assert!(idx < large_powers.len());
1490                 large::imul(x, large_powers[idx]);
1491                 n ^= bit;
1492             }
1493             idx += 1;
1494             bit <<= 1;
1495         }
1496     }
1497 }
1498 
1499 /// Mul by a power.
1500 #[inline]
mul_power<T>(x: &[Limb], radix: u32, n: u32) -> T where T: CloneableVecLike<Limb>1501 pub fn mul_power<T>(x: &[Limb], radix: u32, n: u32)
1502     -> T
1503     where T: CloneableVecLike<Limb>
1504 {
1505     let mut z = T::default();
1506     z.extend_from_slice(x);
1507     imul_power(&mut z, radix, n);
1508     z
1509 }
1510 
1511 /// DIVISION
1512 
1513 /// DivAssign small integer to bigint and get the remainder.
idiv<T>(x: &mut T, y: Limb) -> Limb where T: CloneableVecLike<Limb>1514 pub fn idiv<T>(x: &mut T, y: Limb)
1515     -> Limb
1516     where T: CloneableVecLike<Limb>
1517 {
1518     // Divide iteratively over all elements, adding the carry each time.
1519     let mut rem: Limb = 0;
1520     for xi in x.iter_mut().rev() {
1521         rem = scalar::idiv(xi, y, rem);
1522     }
1523     normalize(x);
1524 
1525     rem
1526 }
1527 
1528 /// Div small integer to bigint and get the remainder.
1529 #[inline]
div<T>(x: &[Limb], y: Limb) -> (T, Limb) where T: CloneableVecLike<Limb>1530 pub fn div<T>(x: &[Limb], y: Limb)
1531     -> (T, Limb)
1532     where T: CloneableVecLike<Limb>
1533 {
1534     let mut z = T::default();
1535     z.extend_from_slice(x);
1536     let rem = idiv(&mut z, y);
1537     (z, rem)
1538 }
1539 
1540 // POWER
1541 
1542 /// Calculate x^n, using exponentiation by squaring.
1543 ///
1544 /// This algorithm is slow, using `mul_power` should generally be preferred,
1545 /// as although it's not asymptotically faster, it precalculates a lot
1546 /// of results.
ipow<T>(x: &mut T, mut n: Limb) where T: CloneableVecLike<Limb>1547 pub fn ipow<T>(x: &mut T, mut n: Limb)
1548     where T: CloneableVecLike<Limb>
1549 {
1550     // Store `x` as 1, and switch `base` to `x`.
1551     let mut base = T::default();
1552     base.push(1);
1553     mem::swap(x, &mut base);
1554 
1555     // Do main algorithm.
1556     loop {
1557         if n.is_odd() {
1558             large::imul(x, &base);
1559         }
1560         n /= 2;
1561 
1562         // We need to break as a post-condition, since the real work
1563         // is in the `imul` and `mul` algorithms.
1564         if n.is_zero() {
1565             break;
1566         } else {
1567             base = large::mul(&base, &base);
1568         }
1569     }
1570 }
1571 
1572 /// Calculate x^n, using exponentiation by squaring.
1573 #[inline]
pow<T>(x: &[Limb], n: Limb) -> T where T: CloneableVecLike<Limb>1574 pub fn pow<T>(x: &[Limb], n: Limb)
1575     -> T
1576     where T: CloneableVecLike<Limb>
1577 {
1578     let mut z = T::default();
1579     z.extend_from_slice(x);
1580     ipow(&mut z, n);
1581     z
1582 }
1583 
1584 }   // small
1585 
1586 // LARGE
1587 // -----
1588 
1589 // Large-to-large operations, to modify a big integer from a native scalar.
1590 
1591 pub(in atof::algorithm) mod large {
1592 
1593 use lib::cmp;
1594 use super::*;
1595 
1596 // RELATIVE OPERATORS
1597 
1598 /// Compare `x` to `y`, in little-endian order.
1599 #[inline]
compare(x: &[Limb], y: &[Limb]) -> cmp::Ordering1600 pub fn compare(x: &[Limb], y: &[Limb]) -> cmp::Ordering {
1601     if x.len() > y.len() {
1602         return cmp::Ordering::Greater;
1603     } else if x.len() < y.len() {
1604         return cmp::Ordering::Less;
1605     } else {
1606         let iter = x.iter().rev().zip(y.iter().rev());
1607         for (&xi, &yi) in iter {
1608             if xi > yi {
1609                 return cmp::Ordering::Greater;
1610             } else if xi < yi {
1611                 return cmp::Ordering::Less;
1612             }
1613         }
1614         // Equal case.
1615         return cmp::Ordering::Equal;
1616     }
1617 }
1618 
1619 /// Check if x is greater than y.
1620 #[inline]
greater(x: &[Limb], y: &[Limb]) -> bool1621 pub fn greater(x: &[Limb], y: &[Limb]) -> bool {
1622     compare(x, y) == cmp::Ordering::Greater
1623 }
1624 
1625 /// Check if x is greater than or equal to y.
1626 #[inline]
greater_equal(x: &[Limb], y: &[Limb]) -> bool1627 pub fn greater_equal(x: &[Limb], y: &[Limb]) -> bool {
1628     !less(x, y)
1629 }
1630 
1631 /// Check if x is less than y.
1632 #[inline]
less(x: &[Limb], y: &[Limb]) -> bool1633 pub fn less(x: &[Limb], y: &[Limb]) -> bool {
1634     compare(x, y) == cmp::Ordering::Less
1635 }
1636 
1637 /// Check if x is less than or equal to y.
1638 #[inline]
less_equal(x: &[Limb], y: &[Limb]) -> bool1639 pub fn less_equal(x: &[Limb], y: &[Limb]) -> bool {
1640     !greater(x, y)
1641 }
1642 
1643 /// Check if x is equal to y.
1644 /// Slightly optimized for equality comparisons, since it reduces the number
1645 /// of comparisons relative to `compare`.
1646 #[inline]
equal(x: &[Limb], y: &[Limb]) -> bool1647 pub fn equal(x: &[Limb], y: &[Limb]) -> bool {
1648     let mut iter = x.iter().rev().zip(y.iter().rev());
1649     x.len() == y.len() && iter.all(|(&xi, &yi)| xi == yi)
1650 }
1651 
1652 /// ADDITION
1653 
1654 /// Implied AddAssign implementation for bigints.
1655 ///
1656 /// Allows us to choose a start-index in x to store, so we can avoid
1657 /// padding the buffer with zeros when not needed, optimized for vectors.
iadd_impl<T>(x: &mut T, y: &[Limb], xstart: usize) where T: CloneableVecLike<Limb>1658 pub fn iadd_impl<T>(x: &mut T, y: &[Limb], xstart: usize)
1659     where T: CloneableVecLike<Limb>
1660 {
1661     // The effective x buffer is from `xstart..x.len()`, so we need to treat
1662     // that as the current range. If the effective y buffer is longer, need
1663     // to resize to that, + the start index.
1664     if y.len() > x.len() - xstart {
1665         x.resize(y.len() + xstart, 0);
1666     }
1667 
1668     // Iteratively add elements from y to x.
1669     let mut carry = false;
1670     for (xi, yi) in (&mut x[xstart..]).iter_mut().zip(y.iter()) {
1671         // Only one op of the two can overflow, since we added at max
1672         // Limb::max_value() + Limb::max_value(). Add the previous carry,
1673         // and store the current carry for the next.
1674         let mut tmp = scalar::iadd(xi, *yi);
1675         if carry {
1676             tmp |= scalar::iadd(xi, 1);
1677         }
1678         carry = tmp;
1679     }
1680 
1681     // Overflow from the previous bit.
1682     if carry {
1683         small::iadd_impl(x, 1, y.len()+xstart);
1684     }
1685 }
1686 
1687 /// AddAssign bigint to bigint.
1688 #[inline]
iadd<T>(x: &mut T, y: &[Limb]) where T: CloneableVecLike<Limb>1689 pub fn iadd<T>(x: &mut T, y: &[Limb])
1690     where T: CloneableVecLike<Limb>
1691 {
1692     iadd_impl(x, y, 0)
1693 }
1694 
1695 /// Add bigint to bigint.
1696 #[inline]
add<T>(x: &[Limb], y: &[Limb]) -> T where T: CloneableVecLike<Limb>1697 pub fn add<T>(x: &[Limb], y: &[Limb])
1698     -> T
1699     where T: CloneableVecLike<Limb>
1700 {
1701     let mut z = T::default();
1702     z.extend_from_slice(x);
1703     iadd(&mut z, y);
1704     z
1705 }
1706 
1707 // SUBTRACTION
1708 
1709 /// SubAssign bigint to bigint.
isub<T>(x: &mut T, y: &[Limb]) where T: CloneableVecLike<Limb>1710 pub fn isub<T>(x: &mut T, y: &[Limb])
1711     where T: CloneableVecLike<Limb>
1712 {
1713     // Basic underflow checks.
1714     debug_assert!(greater_equal(x, y));
1715 
1716     // Iteratively add elements from y to x.
1717     let mut carry = false;
1718     for (xi, yi) in x.iter_mut().zip(y.iter()) {
1719         // Only one op of the two can overflow, since we added at max
1720         // Limb::max_value() + Limb::max_value(). Add the previous carry,
1721         // and store the current carry for the next.
1722         let mut tmp = scalar::isub(xi, *yi);
1723         if carry {
1724             tmp |= scalar::isub(xi, 1);
1725         }
1726         carry = tmp;
1727     }
1728 
1729     if carry {
1730         small::isub_impl(x, 1, y.len());
1731     } else {
1732         small::normalize(x);
1733     }
1734 }
1735 
1736 /// Sub bigint to bigint.
1737 #[inline]
sub<T>(x: &[Limb], y: &[Limb]) -> T where T: CloneableVecLike<Limb>1738 pub fn sub<T>(x: &[Limb], y: &[Limb])
1739     -> T
1740     where T: CloneableVecLike<Limb>
1741 {
1742     let mut z = T::default();
1743     z.extend_from_slice(x);
1744     isub(&mut z, y);
1745     z
1746 }
1747 
1748 // MULTIPLICATIION
1749 
1750 /// Number of digits to bottom-out to asymptotically slow algorithms.
1751 ///
1752 /// Karatsuba tends to out-perform long-multiplication at ~320-640 bits,
1753 /// so we go halfway, while Newton division tends to out-perform
1754 /// Algorithm D at ~1024 bits. We can toggle this for optimal performance.
1755 pub const KARATSUBA_CUTOFF: usize = 32;
1756 
1757 /// Grade-school multiplication algorithm.
1758 ///
1759 /// Slow, naive algorithm, using limb-bit bases and just shifting left for
1760 /// each iteration. This could be optimized with numerous other algorithms,
1761 /// but it's extremely simple, and works in O(n*m) time, which is fine
1762 /// by me. Each iteration, of which there are `m` iterations, requires
1763 /// `n` multiplications, and `n` additions, or grade-school multiplication.
long_mul<T>(x: &[Limb], y: &[Limb]) -> T where T: CloneableVecLike<Limb>1764 fn long_mul<T>(x: &[Limb], y: &[Limb])
1765     -> T
1766     where T: CloneableVecLike<Limb>
1767 {
1768     // Using the immutable value, multiply by all the scalars in y, using
1769     // the algorithm defined above. Use a single buffer to avoid
1770     // frequent reallocations. Handle the first case to avoid a redundant
1771     // addition, since we know y.len() >= 1.
1772     let mut z: T = small::mul(x, y[0]);
1773     z.resize(x.len() + y.len(), 0);
1774 
1775     // Handle the iterative cases.
1776     for (i, &yi) in y[1..].iter().enumerate() {
1777         let zi: T = small::mul(x, yi);
1778         iadd_impl(&mut z, &zi, i+1);
1779     }
1780 
1781     small::normalize(&mut z);
1782 
1783     z
1784 }
1785 
1786 /// Split two buffers into halfway, into (lo, hi).
1787 #[inline]
karatsuba_split<'a>(z: &'a [Limb], m: usize) -> (&'a [Limb], &'a [Limb])1788 pub fn karatsuba_split<'a>(z: &'a [Limb], m: usize)
1789     -> (&'a [Limb], &'a [Limb])
1790 {
1791     (&z[..m], &z[m..])
1792 }
1793 
1794 /// Karatsuba multiplication algorithm with roughly equal input sizes.
1795 ///
1796 /// Assumes `y.len() >= x.len()`.
karatsuba_mul<T>(x: &[Limb], y: &[Limb]) -> T where T: CloneableVecLike<Limb>1797 fn karatsuba_mul<T>(x: &[Limb], y: &[Limb])
1798     -> T
1799     where T: CloneableVecLike<Limb>
1800 {
1801     if y.len() <= KARATSUBA_CUTOFF {
1802         // Bottom-out to long division for small cases.
1803         long_mul(x, y)
1804     } else if x.len() < y.len() / 2 {
1805         karatsuba_uneven_mul(x, y)
1806     } else {
1807         // Do our 3 multiplications.
1808         let m = y.len() / 2;
1809         let (xl, xh) = karatsuba_split(x, m);
1810         let (yl, yh) = karatsuba_split(y, m);
1811         let sumx: T = add(xl, xh);
1812         let sumy: T = add(yl, yh);
1813         let z0: T = karatsuba_mul(xl, yl);
1814         let mut z1: T = karatsuba_mul(&sumx, &sumy);
1815         let z2: T = karatsuba_mul(xh, yh);
1816         // Properly scale z1, which is `z1 - z2 - zo`.
1817         isub(&mut z1, &z2);
1818         isub(&mut z1, &z0);
1819 
1820         // Create our result, which is equal to, in little-endian order:
1821         // [z0, z1 - z2 - z0, z2]
1822         //  z1 must be shifted m digits (2^(32m)) over.
1823         //  z2 must be shifted 2*m digits (2^(64m)) over.
1824         let mut result = T::default();
1825         let len = z0.len().max(m + z1.len()).max(2*m + z2.len());
1826         result.reserve_exact(len);
1827         result.extend_from_slice(&z0);
1828         iadd_impl(&mut result, &z1, m);
1829         iadd_impl(&mut result, &z2, 2*m);
1830 
1831         result
1832     }
1833 }
1834 
1835 /// Karatsuba multiplication algorithm where y is substantially larger than x.
1836 ///
1837 /// Assumes `y.len() >= x.len()`.
karatsuba_uneven_mul<T>(x: &[Limb], mut y: &[Limb]) -> T where T: CloneableVecLike<Limb>1838 fn karatsuba_uneven_mul<T>(x: &[Limb], mut y: &[Limb])
1839     -> T
1840     where T: CloneableVecLike<Limb>
1841 {
1842     let mut result = T::default();
1843     result.resize(x.len() + y.len(), 0);
1844 
1845     // This effectively is like grade-school multiplication between
1846     // two numbers, except we're using splits on `y`, and the intermediate
1847     // step is a Karatsuba multiplication.
1848     let mut start = 0;
1849     while y.len() != 0 {
1850         let m = x.len().min(y.len());
1851         let (yl, yh) = karatsuba_split(y, m);
1852         let prod: T = karatsuba_mul(x, yl);
1853         iadd_impl(&mut result, &prod, start);
1854         y = yh;
1855         start += m;
1856     }
1857     small::normalize(&mut result);
1858 
1859     result
1860 }
1861 
1862 /// Forwarder to the proper Karatsuba algorithm.
1863 #[inline]
karatsuba_mul_fwd<T>(x: &[Limb], y: &[Limb]) -> T where T: CloneableVecLike<Limb>1864 fn karatsuba_mul_fwd<T>(x: &[Limb], y: &[Limb])
1865     -> T
1866     where T: CloneableVecLike<Limb>
1867 {
1868     if x.len() < y.len() {
1869         karatsuba_mul(x, y)
1870     } else {
1871         karatsuba_mul(y, x)
1872     }
1873 }
1874 
1875 /// MulAssign bigint to bigint.
1876 #[inline]
imul<T>(x: &mut T, y: &[Limb]) where T: CloneableVecLike<Limb>1877 pub fn imul<T>(x: &mut T, y: &[Limb])
1878     where T: CloneableVecLike<Limb>
1879 {
1880     if y.len() == 1 {
1881         small::imul(x, y[0]);
1882     } else {
1883         // We're not really in a condition where using Karatsuba
1884         // multiplication makes sense, so we're just going to use long
1885         // division. ~20% speedup compared to:
1886         //      *x = karatsuba_mul_fwd(x, y);
1887         *x = karatsuba_mul_fwd(x, y);
1888     }
1889 }
1890 
1891 /// Mul bigint to bigint.
1892 #[inline]
mul<T>(x: &[Limb], y: &[Limb]) -> T where T: CloneableVecLike<Limb>1893 pub fn mul<T>(x: &[Limb], y: &[Limb])
1894     -> T
1895     where T: CloneableVecLike<Limb>
1896 {
1897     let mut z = T::default();
1898     z.extend_from_slice(x);
1899     imul(&mut z, y);
1900     z
1901 }
1902 
1903 // DIVISION
1904 
1905 /// Constants for algorithm D.
1906 const ALGORITHM_D_B: Wide = 1 << <Limb as Integer>::BITS;
1907 const ALGORITHM_D_M: Wide = ALGORITHM_D_B - 1;
1908 
1909 /// Calculate qhat (an estimate for the quotient).
1910 ///
1911 /// This is step D3 in Algorithm D in "The Art of Computer Programming".
1912 /// Assumes `x.len() > y.len()` and `y.len() >= 2`.
1913 ///
1914 /// * `j`   - Current index on the iteration of the loop.
1915 #[inline]
calculate_qhat(x: &[Limb], y: &[Limb], j: usize) -> Wide1916 fn calculate_qhat(x: &[Limb], y: &[Limb], j: usize)
1917     -> Wide
1918 {
1919     let n = y.len();
1920 
1921     // Estimate qhat of q[j]
1922     // Original Code:
1923     //  qhat = (x[j+n]*B + x[j+n-1])/y[n-1];
1924     //  rhat = (x[j+n]*B + x[j+n-1]) - qhat*y[n-1];
1925     let x_jn = as_wide(x[j+n]);
1926     let x_jn1 = as_wide(x[j+n-1]);
1927     let num = (x_jn << <Limb as Integer>::BITS) + x_jn1;
1928     let den = as_wide(y[n-1]);
1929     let mut qhat = num / den;
1930     let mut rhat = num - qhat * den;
1931 
1932     // Scale qhat and rhat
1933     // Original Code:
1934     //  again:
1935     //    if (qhat >= B || qhat*y[n-2] > B*rhat + x[j+n-2])
1936     //    { qhat = qhat - 1;
1937     //      rhat = rhat + y[n-1];
1938     //      if (rhat < B) goto again;
1939     //    }
1940     let x_jn2 = as_wide(x[j+n-2]);
1941     let y_n2 = as_wide(y[n-2]);
1942     let y_n1 = as_wide(y[n-1]);
1943     // This only happens when the leading bit of qhat is set.
1944     while qhat >= ALGORITHM_D_B || qhat * y_n2 > (rhat << <Limb as Integer>::BITS) + x_jn2 {
1945         qhat -= 1;
1946         rhat += y_n1;
1947         if rhat >= ALGORITHM_D_B {
1948             break;
1949         }
1950     }
1951 
1952     qhat
1953 }
1954 
1955 /// Multiply and subtract.
1956 ///
1957 /// This is step D4 in Algorithm D in "The Art of Computer Programming",
1958 /// and returns the remainder.
1959 #[inline]
multiply_and_subtract<T>(x: &mut T, y: &T, qhat: Wide, j: usize) -> SignedWide where T: CloneableVecLike<Limb>1960 fn multiply_and_subtract<T>(x: &mut T, y: &T, qhat: Wide, j: usize)
1961     -> SignedWide
1962     where T: CloneableVecLike<Limb>
1963 {
1964     let n = y.len();
1965 
1966     // Multiply and subtract
1967     // Original Code:
1968     //  k = 0;
1969     //  for (i = 0; i < n; i++) {
1970     //     p = qhat*y[i];
1971     //     t = x[i+j] - k - (p & 0xFFFFFFFFLL);
1972     //     x[i+j] = t;
1973     //     k = (p >> 32) - (t >> 32);
1974     //  }
1975     //  t = x[j+n] - k;
1976     //  x[j+n] = t;
1977     let mut k: SignedWide = 0;
1978     let mut t: SignedWide;
1979     for i in 0..n {
1980         let x_ij = as_signed_wide(x[i+j]);
1981         let y_i = as_wide(y[i]);
1982         let p = qhat * y_i;
1983         t = x_ij.wrapping_sub(k).wrapping_sub(as_signed_wide(p & ALGORITHM_D_M));
1984         x[i+j] = as_limb(t);
1985         k = as_signed_wide(p >> <Limb as Integer>::BITS) - (t >> <Limb as Integer>::BITS);
1986     }
1987     t = as_signed_wide(x[j+n]) - k;
1988     x[j+n] = as_limb(t);
1989 
1990     t
1991 }
1992 
1993 /// Calculate the quotient from the estimate and the test.
1994 ///
1995 /// This is a mix of step D5 and D6 in Algorithm D, so the algorithm
1996 /// may work for single passes, without a quotient buffer.
1997 #[inline]
test_quotient(qhat: Wide, t: SignedWide) -> Wide1998 fn test_quotient(qhat: Wide, t: SignedWide)
1999     -> Wide
2000 {
2001     if t < 0 {
2002         qhat - 1
2003     } else {
2004         qhat
2005     }
2006 }
2007 
2008 /// Add back.
2009 ///
2010 /// This is step D6 in Algorithm D in "The Art of Computer Programming",
2011 /// and adds back the remainder on the very unlikely scenario we overestimated
2012 /// the quotient by 1. Subtract 1 from the quotient, and add back the
2013 /// remainder.
2014 ///
2015 /// This step should be specifically debugged, due to its low likelihood,
2016 /// since the probability is ~2/b, where b in this case is 2^32 or 2^64.
2017 #[inline]
add_back<T>(x: &mut T, y: &T, mut t: SignedWide, j: usize) where T: CloneableVecLike<Limb>2018 fn add_back<T>(x: &mut T, y: &T, mut t: SignedWide, j: usize)
2019     where T: CloneableVecLike<Limb>
2020 {
2021     let n = y.len();
2022 
2023     // Store quotient digits
2024     // If we subtracted too much, add back.
2025     // Original Code:
2026     //  q[j] = qhat;              // Store quotient digit.
2027     //  if (t < 0) {              // If we subtracted too
2028     //     q[j] = q[j] - 1;       // much, add back.
2029     //     k = 0;
2030     //     for (i = 0; i < n; i++) {
2031     //        t = (unsigned long long)x[i+j] + y[i] + k;
2032     //        x[i+j] = t;
2033     //        k = t >> 32;
2034     //     }
2035     //     x[j+n] = x[j+n] + k;
2036     //  }
2037     if t < 0 {
2038         let mut k: SignedWide = 0;
2039         for i in 0..n {
2040             t = as_signed_wide(as_wide(x[i+j]) + as_wide(y[i])) + k;
2041             x[i+j] = as_limb(t);
2042             k = t >> <Limb as Integer>::BITS;
2043         }
2044         let x_jn = as_signed_wide(x[j+n]) + k;
2045         x[j+n] = as_limb(x_jn);
2046     }
2047 }
2048 
2049 /// Calculate the remainder from the quotient.
2050 ///
2051 /// This is step D8 in Algorithm D in "The Art of Computer Programming",
2052 /// and "unnormalizes" to calculate the remainder from the quotient.
2053 #[inline]
calculate_remainder<T>(x: &[Limb], y: &[Limb], s: usize) -> T where T: CloneableVecLike<Limb>2054 fn calculate_remainder<T>(x: &[Limb], y: &[Limb], s: usize)
2055     -> T
2056     where T: CloneableVecLike<Limb>
2057 {
2058     // Calculate the remainder.
2059     // Original Code:
2060     //  for (i = 0; i < n-1; i++)
2061     //     r[i] = (x[i] >> s) | ((unsigned long long)x[i+1] << (32-s));
2062     //  r[n-1] = x[n-1] >> s;
2063     let n = y.len();
2064     let mut r = T::default();
2065     r.reserve_exact(n);
2066     let rs = <Limb as Integer>::BITS - s;
2067     for i in 0..n-1 {
2068         let xi = as_wide(x[i]) >> s;
2069         let xi1 = as_wide(x[i+1]) << rs;
2070         let ri = xi | xi1;
2071         r.push(as_limb(ri));
2072     }
2073     let x_n1 = x[n-1] >> s;
2074     r.push(as_limb(x_n1));
2075 
2076     r
2077 }
2078 
2079 /// Implementation of Knuth's Algorithm D, and return the quotient and remainder.
2080 ///
2081 /// `x` is the dividend, and `y` is the divisor.
2082 /// Assumes `x.len() > y.len()` and `y.len() >= 2`.
2083 ///
2084 /// Based off the Hacker's Delight implementation of Knuth's Algorithm D
2085 /// in "The Art of Computer Programming".
2086 ///     http://www.hackersdelight.org/hdcodetxt/divmnu64.c.txt
2087 ///
2088 /// All Hacker's Delight code is public domain, so this routine shall
2089 /// also be placed in the public domain. See:
2090 ///     https://www.hackersdelight.org/permissions.htm
algorithm_d_div<T>(x: &[Limb], y: &[Limb]) -> (T, T) where T: CloneableVecLike<Limb>2091 fn algorithm_d_div<T>(x: &[Limb], y: &[Limb])
2092     -> (T, T)
2093     where T: CloneableVecLike<Limb>
2094 {
2095     // Normalize the divisor so the leading-bit is set to 1.
2096     // x is the dividend, y is the divisor.
2097     // Need a leading zero on the numerator.
2098     let s = y.rindex(0).leading_zeros().as_usize();
2099     let m = x.len();
2100     let n = y.len();
2101     let mut xn: T = small::shl_bits(x, s);
2102     let yn: T = small::shl_bits(y, s);
2103     xn.push(0);
2104 
2105     // Store certain variables for the algorithm.
2106     let mut q = T::default();
2107     q.resize(m-n+1, 0);
2108     for j in (0..m-n+1).rev() {
2109         // Estimate the quotient
2110         let mut qhat = calculate_qhat(&xn, &yn, j);
2111         if qhat != 0 {
2112             let t = multiply_and_subtract(&mut xn, &yn, qhat, j);
2113             qhat = test_quotient(qhat, t);
2114             add_back(&mut xn, &yn, t, j);
2115         }
2116         q[j] = as_limb(qhat);
2117     }
2118     let mut r = calculate_remainder(&xn, &yn, s);
2119 
2120     // Normalize our results
2121     small::normalize(&mut q);
2122     small::normalize(&mut r);
2123 
2124     (q, r)
2125 }
2126 
2127 /// DivAssign bigint to bigint.
idiv<T>(x: &mut T, y: &[Limb]) -> T where T: CloneableVecLike<Limb>2128 pub fn idiv<T>(x: &mut T, y: &[Limb])
2129     -> T
2130     where T: CloneableVecLike<Limb>
2131 {
2132     debug_assert!(y.len() != 0);
2133 
2134     if x.len() < y.len() {
2135         // Can optimize easily, since the quotient is 0,
2136         // and the remainder is x. Put before `y.len() == 1`, since
2137         // it optimizes when `x.len() == 0` nicely.
2138         let mut r = T::default();
2139         mem::swap(x, &mut r);
2140         r
2141     } else if y.len() == 1 {
2142         // Can optimize for division by a small value.
2143         let mut r = T::default();
2144         r.push(small::idiv(x, y[0]));
2145         r
2146     } else {
2147         let (q, r) = algorithm_d_div(x, y);
2148         *x = q;
2149         r
2150     }
2151 }
2152 
2153 /// Div bigint to bigint.
2154 #[inline]
div<T>(x: &[Limb], y: &[Limb]) -> (T, T) where T: CloneableVecLike<Limb>2155 pub fn div<T>(x: &[Limb], y: &[Limb])
2156     -> (T, T)
2157     where T: CloneableVecLike<Limb>
2158 {
2159     let mut z = T::default();
2160     z.extend_from_slice(x);
2161     let rem = idiv(&mut z, y);
2162     (z, rem)
2163 }
2164 
2165 /// Emit a single digit for the quotient and store the remainder in-place.
2166 ///
2167 /// An extremely efficient division algorithm for small quotients, requiring
2168 /// you to know the full range of the quotient prior to use. For example,
2169 /// with a quotient that can range from [0, 10), you must have 4 leading
2170 /// zeros in the divisor, so we can use a single-limb division to get
2171 /// an accurate estimate of the quotient. Since we always underestimate
2172 /// the quotient, we can add 1 and then emit the digit.
2173 ///
2174 /// Requires a non-normalized denominator, with at least [1-6] leading
2175 /// zeros, depending on the base (for example, 1 for base2, 6 for base36).
2176 ///
2177 /// Adapted from David M. Gay's dtoa, and therefore under an MIT license:
2178 ///     www.netlib.org/fp/dtoa.c
quorem<T>(x: &mut T, y: &T) -> Limb where T: CloneableVecLike<Limb>2179 pub fn quorem<T>(x: &mut T, y: &T)
2180     -> Limb
2181     where T: CloneableVecLike<Limb>
2182 {
2183     debug_assert!(y.len() > 0);
2184     let mask = as_wide(Limb::max_value());
2185 
2186     // Numerator is smaller the denominator, quotient always 0.
2187     let m = x.len();
2188     let n = y.len();
2189     if m < n {
2190         return 0;
2191     }
2192 
2193     // Calculate our initial estimate for q
2194     let mut q = x[m-1] / (y[n-1] + 1);
2195 
2196     // Need to calculate the remainder if we don't have a 0 quotient.
2197     if q != 0 {
2198         let mut borrow: Wide = 0;
2199         let mut carry: Wide = 0;
2200         for j in 0..m {
2201             let p = as_wide(y[j]) * as_wide(q) + carry;
2202             carry = p >> <Limb as Integer>::BITS;
2203             let t = as_wide(x[j]).wrapping_sub(p & mask).wrapping_sub(borrow);
2204             borrow = (t >> <Limb as Integer>::BITS) & 1;
2205             x[j] = as_limb(t);
2206         }
2207         small::normalize(x);
2208     }
2209 
2210     // Check if we under-estimated x.
2211     if greater_equal(x, y) {
2212         q += 1;
2213         let mut borrow: Wide = 0;
2214         let mut carry: Wide = 0;
2215         for j in 0..m {
2216             let p = as_wide(y[j]) + carry;
2217             carry = p >> <Limb as Integer>::BITS;
2218             let t = as_wide(x[j]).wrapping_sub(p & mask).wrapping_sub(borrow);
2219             borrow = (t >> <Limb as Integer>::BITS) & 1;
2220             x[j] = as_limb(t);
2221         }
2222         small::normalize(x);
2223     }
2224 
2225     q
2226 }
2227 
2228 }   // large
2229 
2230 use lib::cmp;
2231 use super::small_powers::*;
2232 use super::large_powers::*;
2233 
2234 /// Generate the imul_pown wrappers.
2235 macro_rules! imul_power {
2236     ($name:ident, $base:expr) => (
2237         /// Multiply by a power of $base.
2238         #[inline]
2239         fn $name(&mut self, n: u32) {
2240             self.imul_power_impl($base, n)
2241         }
2242     );
2243 }
2244 
2245 // TRAITS
2246 // ------
2247 
2248 /// Traits for shared operations for big integers.
2249 ///
2250 /// None of these are implemented using normal traits, since these
2251 /// are very expensive operations, and we want to deliberately
2252 /// and explicitly use these functions.
2253 pub(in atof::algorithm) trait SharedOps: Clone + Sized + Default {
2254     /// Underlying storage type for a SmallOps.
2255     type StorageType: CloneableVecLike<Limb>;
2256 
2257     // DATA
2258 
2259     /// Get access to the underlying data
data<'a>(&'a self) -> &'a Self::StorageType2260     fn data<'a>(&'a self) -> &'a Self::StorageType;
2261 
2262     /// Get access to the underlying data
data_mut<'a>(&'a mut self) -> &'a mut Self::StorageType2263     fn data_mut<'a>(&'a mut self) -> &'a mut Self::StorageType;
2264 
2265     // ZERO
2266 
2267     /// Check if the value is a normalized 0.
2268     #[inline]
is_zero(&self) -> bool2269     fn is_zero(&self) -> bool {
2270         self.limb_length() == 0
2271     }
2272 
2273     // RELATIVE OPERATIONS
2274 
2275     /// Compare self to y.
2276     #[inline]
compare(&self, y: &Self) -> cmp::Ordering2277     fn compare(&self, y: &Self) -> cmp::Ordering {
2278         large::compare(self.data(), y.data())
2279     }
2280 
2281     /// Check if self is greater than y.
2282     #[inline]
greater(&self, y: &Self) -> bool2283     fn greater(&self, y: &Self) -> bool {
2284         large::greater(self.data(), y.data())
2285     }
2286 
2287     /// Check if self is greater than or equal to y.
2288     #[inline]
greater_equal(&self, y: &Self) -> bool2289     fn greater_equal(&self, y: &Self) -> bool {
2290         large::greater_equal(self.data(), y.data())
2291     }
2292 
2293     /// Check if self is less than y.
2294     #[inline]
less(&self, y: &Self) -> bool2295     fn less(&self, y: &Self) -> bool {
2296         large::less(self.data(), y.data())
2297     }
2298 
2299     /// Check if self is less than or equal to y.
2300     #[inline]
less_equal(&self, y: &Self) -> bool2301     fn less_equal(&self, y: &Self) -> bool {
2302         large::less_equal(self.data(), y.data())
2303     }
2304 
2305     /// Check if self is equal to y.
2306     #[inline]
equal(&self, y: &Self) -> bool2307     fn equal(&self, y: &Self) -> bool {
2308         large::equal(self.data(), y.data())
2309     }
2310 
2311     // PROPERTIES
2312 
2313     /// Get the number of leading zero digits in the storage.
2314     /// Assumes the value is normalized.
2315     #[inline]
leading_zero_limbs(&self) -> usize2316     fn leading_zero_limbs(&self) -> usize {
2317         small::leading_zero_limbs(self.data())
2318     }
2319 
2320     /// Get the number of trailing zero digits in the storage.
2321     /// Assumes the value is normalized.
2322     #[inline]
trailing_zero_limbs(&self) -> usize2323     fn trailing_zero_limbs(&self) -> usize {
2324         small::trailing_zero_limbs(self.data())
2325     }
2326 
2327     /// Get number of leading zero bits in the storage.
2328     /// Assumes the value is normalized.
2329     #[inline]
leading_zeros(&self) -> usize2330     fn leading_zeros(&self) -> usize {
2331         small::leading_zeros(self.data())
2332     }
2333 
2334     /// Get number of trailing zero bits in the storage.
2335     /// Assumes the value is normalized.
2336     #[inline]
trailing_zeros(&self) -> usize2337     fn trailing_zeros(&self) -> usize {
2338         small::trailing_zeros(self.data())
2339     }
2340 
2341     /// Calculate the bit-length of the big-integer.
2342     /// Returns usize::max_value() if the value overflows,
2343     /// IE, if `self.data().len() > usize::max_value() / 8`.
2344     #[inline]
bit_length(&self) -> usize2345     fn bit_length(&self) -> usize {
2346         small::bit_length(self.data())
2347     }
2348 
2349     /// Calculate the digit-length of the big-integer.
2350     #[inline]
limb_length(&self) -> usize2351     fn limb_length(&self) -> usize {
2352         small::limb_length(self.data())
2353     }
2354 
2355     /// Get the high 16-bits from the bigint and if there are remaining bits.
2356     #[inline]
hi16(&self) -> (u16, bool)2357     fn hi16(&self) -> (u16, bool) {
2358         self.data().as_slice().hi16()
2359     }
2360 
2361     /// Get the high 32-bits from the bigint and if there are remaining bits.
2362     #[inline]
hi32(&self) -> (u32, bool)2363     fn hi32(&self) -> (u32, bool) {
2364         self.data().as_slice().hi32()
2365     }
2366 
2367     /// Get the high 64-bits from the bigint and if there are remaining bits.
2368     #[inline]
hi64(&self) -> (u64, bool)2369     fn hi64(&self) -> (u64, bool) {
2370         self.data().as_slice().hi64()
2371     }
2372 
2373     /// Get the high 128-bits from the bigint and if there are remaining bits.
2374     #[cfg(has_i128)]
2375     #[inline]
hi128(&self) -> (u128, bool)2376     fn hi128(&self) -> (u128, bool) {
2377         self.data().as_slice().hi128()
2378     }
2379 
2380     /// Pad the buffer with zeros to the least-significant bits.
2381     #[inline]
pad_zero_digits(&mut self, n: usize) -> usize2382     fn pad_zero_digits(&mut self, n: usize) -> usize {
2383         small::ishl_limbs(self.data_mut(), n);
2384         n
2385     }
2386 
2387     // INTEGER CONVERSIONS
2388 
2389     // CREATION
2390 
2391     /// Create new big integer from u16.
2392     #[inline]
from_u16(x: u16) -> Self2393     fn from_u16(x: u16) -> Self {
2394         let mut v = Self::default();
2395         let slc = split_u16(x);
2396         v.data_mut().extend_from_slice(&slc);
2397         v.normalize();
2398         v
2399     }
2400 
2401     /// Create new big integer from u32.
2402     #[inline]
from_u32(x: u32) -> Self2403     fn from_u32(x: u32) -> Self {
2404         let mut v = Self::default();
2405         let slc = split_u32(x);
2406         v.data_mut().extend_from_slice(&slc);
2407         v.normalize();
2408         v
2409     }
2410 
2411     /// Create new big integer from u64.
2412     #[inline]
from_u64(x: u64) -> Self2413     fn from_u64(x: u64) -> Self {
2414         let mut v = Self::default();
2415         let slc = split_u64(x);
2416         v.data_mut().extend_from_slice(&slc);
2417         v.normalize();
2418         v
2419     }
2420 
2421     /// Create new big integer from u128.
2422     #[cfg(has_i128)]
2423     #[inline]
from_u128(x: u128) -> Self2424     fn from_u128(x: u128) -> Self {
2425         let mut v = Self::default();
2426         let slc = split_u128(x);
2427         v.data_mut().extend_from_slice(&slc);
2428         v.normalize();
2429         v
2430     }
2431 
2432     // NORMALIZE
2433 
2434     /// Normalize the integer, so any leading zero values are removed.
2435     #[inline]
normalize(&mut self)2436     fn normalize(&mut self) {
2437         small::normalize(self.data_mut());
2438     }
2439 
2440     /// Get if the big integer is normalized.
2441     #[inline]
is_normalized(&self) -> bool2442     fn is_normalized(&self) -> bool {
2443         self.data().is_empty() || !self.data().rindex(0).is_zero()
2444     }
2445 
2446     // SHIFTS
2447 
2448     /// Shift-left the entire buffer n bits.
2449     #[inline]
ishl(&mut self, n: usize)2450     fn ishl(&mut self, n: usize) {
2451         small::ishl(self.data_mut(), n);
2452     }
2453 
2454     /// Shift-left the entire buffer n bits.
shl(&self, n: usize) -> Self2455     fn shl(&self, n: usize) -> Self {
2456         let mut x = self.clone();
2457         x.ishl(n);
2458         x
2459     }
2460 
2461     /// Shift-right the entire buffer n bits.
ishr(&mut self, n: usize, mut roundup: bool)2462     fn ishr(&mut self, n: usize, mut roundup: bool) {
2463         roundup &= small::ishr(self.data_mut(), n);
2464 
2465         // Round-up the least significant bit.
2466         if roundup {
2467             if self.data().is_empty() {
2468                 self.data_mut().push(1);
2469             } else {
2470                 self.data_mut()[0] += 1;
2471             }
2472         }
2473     }
2474 
2475     /// Shift-right the entire buffer n bits.
shr(&self, n: usize, roundup: bool) -> Self2476     fn shr(&self, n: usize, roundup: bool) -> Self {
2477         let mut x = self.clone();
2478         x.ishr(n, roundup);
2479         x
2480     }
2481 }
2482 
2483 /// Trait for small operations for arbitrary-precision numbers.
2484 pub(in atof::algorithm) trait SmallOps: SharedOps {
2485     // SMALL POWERS
2486 
2487     /// Get the small powers from the radix.
2488     #[inline]
small_powers(radix: u32) -> &'static [Limb]2489     fn small_powers(radix: u32) -> &'static [Limb] {
2490         get_small_powers(radix)
2491     }
2492 
2493     /// Get the large powers from the radix.
2494     #[inline]
large_powers(radix: u32) -> &'static [&'static [Limb]]2495     fn large_powers(radix: u32) -> &'static [&'static [Limb]] {
2496         get_large_powers(radix)
2497     }
2498 
2499     // ADDITION
2500 
2501     /// AddAssign small integer.
2502     #[inline]
iadd_small(&mut self, y: Limb)2503     fn iadd_small(&mut self, y: Limb) {
2504         small::iadd(self.data_mut(), y);
2505     }
2506 
2507     /// Add small integer to a copy of self.
2508     #[inline]
add_small(&self, y: Limb) -> Self2509     fn add_small(&self, y: Limb) -> Self {
2510         let mut x = self.clone();
2511         x.iadd_small(y);
2512         x
2513     }
2514 
2515     // SUBTRACTION
2516 
2517     /// SubAssign small integer.
2518     /// Warning: Does no overflow checking, x must be >= y.
2519     #[inline]
isub_small(&mut self, y: Limb)2520     fn isub_small(&mut self, y: Limb) {
2521         small::isub(self.data_mut(), y);
2522     }
2523 
2524     /// Sub small integer to a copy of self.
2525     /// Warning: Does no overflow checking, x must be >= y.
2526     #[inline]
sub_small(&mut self, y: Limb) -> Self2527     fn sub_small(&mut self, y: Limb) -> Self {
2528         let mut x = self.clone();
2529         x.isub_small(y);
2530         x
2531     }
2532 
2533     // MULTIPLICATION
2534 
2535     /// MulAssign small integer.
2536     #[inline]
imul_small(&mut self, y: Limb)2537     fn imul_small(&mut self, y: Limb) {
2538         small::imul(self.data_mut(), y);
2539     }
2540 
2541     /// Mul small integer to a copy of self.
2542     #[inline]
mul_small(&self, y: Limb) -> Self2543     fn mul_small(&self, y: Limb) -> Self {
2544         let mut x = self.clone();
2545         x.imul_small(y);
2546         x
2547     }
2548 
2549     /// MulAssign by a power.
2550     #[inline]
imul_power_impl(&mut self, radix: u32, n: u32)2551     fn imul_power_impl(&mut self, radix: u32, n: u32) {
2552         small::imul_power(self.data_mut(), radix, n);
2553     }
2554 
imul_power(&mut self, radix: u32, n: u32)2555     fn imul_power(&mut self, radix: u32, n: u32) {
2556         match radix {
2557             2  => self.imul_pow2(n),
2558             3  => self.imul_pow3(n),
2559             4  => self.imul_pow4(n),
2560             5  => self.imul_pow5(n),
2561             6  => self.imul_pow6(n),
2562             7  => self.imul_pow7(n),
2563             8  => self.imul_pow8(n),
2564             9  => self.imul_pow9(n),
2565             10 => self.imul_pow10(n),
2566             11 => self.imul_pow11(n),
2567             12 => self.imul_pow12(n),
2568             13 => self.imul_pow13(n),
2569             14 => self.imul_pow14(n),
2570             15 => self.imul_pow15(n),
2571             16 => self.imul_pow16(n),
2572             17 => self.imul_pow17(n),
2573             18 => self.imul_pow18(n),
2574             19 => self.imul_pow19(n),
2575             20 => self.imul_pow20(n),
2576             21 => self.imul_pow21(n),
2577             22 => self.imul_pow22(n),
2578             23 => self.imul_pow23(n),
2579             24 => self.imul_pow24(n),
2580             25 => self.imul_pow25(n),
2581             26 => self.imul_pow26(n),
2582             27 => self.imul_pow27(n),
2583             28 => self.imul_pow28(n),
2584             29 => self.imul_pow29(n),
2585             30 => self.imul_pow30(n),
2586             31 => self.imul_pow31(n),
2587             32 => self.imul_pow32(n),
2588             33 => self.imul_pow33(n),
2589             34 => self.imul_pow34(n),
2590             35 => self.imul_pow35(n),
2591             36 => self.imul_pow36(n),
2592             _  => unreachable!()
2593         }
2594     }
2595 
2596     /// Multiply by a power of 2.
imul_pow2(&mut self, n: u32)2597     fn imul_pow2(&mut self, n: u32) {
2598         self.ishl(n.as_usize())
2599     }
2600 
2601     imul_power!(imul_pow3, 3);
2602 
2603     /// Multiply by a power of 4.
2604     #[inline]
imul_pow4(&mut self, n: u32)2605     fn imul_pow4(&mut self, n: u32) {
2606         self.imul_pow2(2*n);
2607     }
2608 
2609     imul_power!(imul_pow5, 5);
2610 
2611     /// Multiply by a power of 6.
2612     #[inline]
imul_pow6(&mut self, n: u32)2613     fn imul_pow6(&mut self, n: u32) {
2614         self.imul_pow3(n);
2615         self.imul_pow2(n);
2616     }
2617 
2618     imul_power!(imul_pow7, 7);
2619 
2620     /// Multiply by a power of 8.
2621     #[inline]
imul_pow8(&mut self, n: u32)2622     fn imul_pow8(&mut self, n: u32) {
2623         self.imul_pow2(3*n);
2624     }
2625 
2626     /// Multiply by a power of 9.
2627     #[inline]
imul_pow9(&mut self, n: u32)2628     fn imul_pow9(&mut self, n: u32) {
2629         self.imul_pow3(n);
2630         self.imul_pow3(n);
2631     }
2632 
2633     /// Multiply by a power of 10.
2634     #[inline]
imul_pow10(&mut self, n: u32)2635     fn imul_pow10(&mut self, n: u32) {
2636         self.imul_pow5(n);
2637         self.imul_pow2(n);
2638     }
2639 
2640     imul_power!(imul_pow11, 11);
2641 
2642     /// Multiply by a power of 12.
2643     #[inline]
imul_pow12(&mut self, n: u32)2644     fn imul_pow12(&mut self, n: u32) {
2645         self.imul_pow3(n);
2646         self.imul_pow4(n);
2647     }
2648 
2649     imul_power!(imul_pow13, 13);
2650 
2651     /// Multiply by a power of 14.
2652     #[inline]
imul_pow14(&mut self, n: u32)2653     fn imul_pow14(&mut self, n: u32) {
2654         self.imul_pow7(n);
2655         self.imul_pow2(n);
2656     }
2657 
2658     /// Multiply by a power of 15.
2659     #[inline]
imul_pow15(&mut self, n: u32)2660     fn imul_pow15(&mut self, n: u32) {
2661         self.imul_pow3(n);
2662         self.imul_pow5(n);
2663     }
2664 
2665     /// Multiply by a power of 16.
2666     #[inline]
imul_pow16(&mut self, n: u32)2667     fn imul_pow16(&mut self, n: u32) {
2668         self.imul_pow2(4*n);
2669     }
2670 
2671     imul_power!(imul_pow17, 17);
2672 
2673     /// Multiply by a power of 18.
2674     #[inline]
imul_pow18(&mut self, n: u32)2675     fn imul_pow18(&mut self, n: u32) {
2676         self.imul_pow9(n);
2677         self.imul_pow2(n);
2678     }
2679 
2680     imul_power!(imul_pow19, 19);
2681 
2682     /// Multiply by a power of 20.
2683     #[inline]
imul_pow20(&mut self, n: u32)2684     fn imul_pow20(&mut self, n: u32) {
2685         self.imul_pow5(n);
2686         self.imul_pow4(n);
2687     }
2688 
2689     /// Multiply by a power of 21.
2690     #[inline]
imul_pow21(&mut self, n: u32)2691     fn imul_pow21(&mut self, n: u32) {
2692         self.imul_pow3(n);
2693         self.imul_pow7(n);
2694     }
2695 
2696     /// Multiply by a power of 22.
2697     #[inline]
imul_pow22(&mut self, n: u32)2698     fn imul_pow22(&mut self, n: u32) {
2699         self.imul_pow11(n);
2700         self.imul_pow2(n);
2701     }
2702 
2703     imul_power!(imul_pow23, 23);
2704 
2705     /// Multiply by a power of 24.
2706     #[inline]
imul_pow24(&mut self, n: u32)2707     fn imul_pow24(&mut self, n: u32) {
2708         self.imul_pow3(n);
2709         self.imul_pow8(n);
2710     }
2711 
2712     /// Multiply by a power of 25.
2713     #[inline]
imul_pow25(&mut self, n: u32)2714     fn imul_pow25(&mut self, n: u32) {
2715         self.imul_pow5(n);
2716         self.imul_pow5(n);
2717     }
2718 
2719     /// Multiply by a power of 26.
2720     #[inline]
imul_pow26(&mut self, n: u32)2721     fn imul_pow26(&mut self, n: u32) {
2722         self.imul_pow13(n);
2723         self.imul_pow2(n);
2724     }
2725 
2726     /// Multiply by a power of 27.
2727     #[inline]
imul_pow27(&mut self, n: u32)2728     fn imul_pow27(&mut self, n: u32) {
2729         self.imul_pow9(n);
2730         self.imul_pow3(n);
2731     }
2732 
2733     /// Multiply by a power of 28.
2734     #[inline]
imul_pow28(&mut self, n: u32)2735     fn imul_pow28(&mut self, n: u32) {
2736         self.imul_pow7(n);
2737         self.imul_pow4(n);
2738     }
2739 
2740     imul_power!(imul_pow29, 29);
2741 
2742     /// Multiply by a power of 30.
2743     #[inline]
imul_pow30(&mut self, n: u32)2744     fn imul_pow30(&mut self, n: u32) {
2745         self.imul_pow15(n);
2746         self.imul_pow2(n);
2747     }
2748 
2749     imul_power!(imul_pow31, 31);
2750 
2751     /// Multiply by a power of 32.
2752     #[inline]
imul_pow32(&mut self, n: u32)2753     fn imul_pow32(&mut self, n: u32) {
2754         self.imul_pow2(5*n);
2755     }
2756 
2757     /// Multiply by a power of 33.
2758     #[inline]
imul_pow33(&mut self, n: u32)2759     fn imul_pow33(&mut self, n: u32) {
2760         self.imul_pow3(n);
2761         self.imul_pow11(n);
2762     }
2763 
2764     /// Multiply by a power of 34.
2765     #[inline]
imul_pow34(&mut self, n: u32)2766     fn imul_pow34(&mut self, n: u32) {
2767         self.imul_pow17(n);
2768         self.imul_pow2(n);
2769     }
2770 
2771     /// Multiply by a power of 35.
2772     #[inline]
imul_pow35(&mut self, n: u32)2773     fn imul_pow35(&mut self, n: u32) {
2774         self.imul_pow5(n);
2775         self.imul_pow7(n);
2776     }
2777 
2778     /// Multiply by a power of 36.
2779     #[inline]
imul_pow36(&mut self, n: u32)2780     fn imul_pow36(&mut self, n: u32) {
2781         self.imul_pow9(n);
2782         self.imul_pow4(n);
2783     }
2784 
2785     // DIVISION
2786 
2787     /// DivAssign small integer, and return the remainder.
2788     #[inline]
idiv_small(&mut self, y: Limb) -> Limb2789     fn idiv_small(&mut self, y: Limb) -> Limb {
2790         small::idiv(self.data_mut(), y)
2791     }
2792 
2793     /// Div small integer to a copy of self, and return the remainder.
2794     #[inline]
div_small(&self, y: Limb) -> (Self, Limb)2795     fn div_small(&self, y: Limb) -> (Self, Limb) {
2796         let mut x = self.clone();
2797         let rem = x.idiv_small(y);
2798         (x, rem)
2799     }
2800 
2801     // POWER
2802 
2803     /// Calculate self^n
2804     #[inline]
ipow(&mut self, n: Limb)2805     fn ipow(&mut self, n: Limb) {
2806         small::ipow(self.data_mut(), n);
2807     }
2808 
2809     /// Calculate self^n
2810     #[inline]
pow(&self, n: Limb) -> Self2811     fn pow(&self, n: Limb) -> Self {
2812         let mut x = self.clone();
2813         x.ipow(n);
2814         x
2815     }
2816 }
2817 
2818 /// Trait for large operations for arbitrary-precision numbers.
2819 pub(in atof::algorithm) trait LargeOps: SmallOps {
2820     // ADDITION
2821 
2822     /// AddAssign large integer.
2823     #[inline]
iadd_large(&mut self, y: &Self)2824     fn iadd_large(&mut self, y: &Self) {
2825         large::iadd(self.data_mut(), y.data());
2826     }
2827 
2828     /// Add large integer to a copy of self.
2829     #[inline]
add_large(&mut self, y: &Self) -> Self2830     fn add_large(&mut self, y: &Self) -> Self {
2831         let mut x = self.clone();
2832         x.iadd_large(y);
2833         x
2834     }
2835 
2836     // SUBTRACTION
2837 
2838     /// SubAssign large integer.
2839     /// Warning: Does no overflow checking, x must be >= y.
2840     #[inline]
isub_large(&mut self, y: &Self)2841     fn isub_large(&mut self, y: &Self) {
2842         large::isub(self.data_mut(), y.data());
2843     }
2844 
2845     /// Sub large integer to a copy of self.
2846     /// Warning: Does no overflow checking, x must be >= y.
2847     #[inline]
sub_large(&mut self, y: &Self) -> Self2848     fn sub_large(&mut self, y: &Self) -> Self {
2849         let mut x = self.clone();
2850         x.isub_large(y);
2851         x
2852     }
2853 
2854     // MULTIPLICATION
2855 
2856     /// MulAssign large integer.
2857     #[inline]
imul_large(&mut self, y: &Self)2858     fn imul_large(&mut self, y: &Self) {
2859         large::imul(self.data_mut(), y.data());
2860     }
2861 
2862     /// Mul large integer to a copy of self.
2863     #[inline]
mul_large(&mut self, y: &Self) -> Self2864     fn mul_large(&mut self, y: &Self) -> Self {
2865         let mut x = self.clone();
2866         x.imul_large(y);
2867         x
2868     }
2869 
2870     // DIVISION
2871 
2872     /// DivAssign large integer and get remainder.
2873     #[inline]
idiv_large(&mut self, y: &Self) -> Self2874     fn idiv_large(&mut self, y: &Self) -> Self {
2875         let mut rem = Self::default();
2876         *rem.data_mut() = large::idiv(self.data_mut(), y.data());
2877         rem
2878     }
2879 
2880     /// Div large integer to a copy of self and get quotient and remainder.
2881     #[inline]
div_large(&mut self, y: &Self) -> (Self, Self)2882     fn div_large(&mut self, y: &Self) -> (Self, Self) {
2883         let mut x = self.clone();
2884         let rem = x.idiv_large(y);
2885         (x, rem)
2886     }
2887 
2888     /// Calculate the fast quotient for a single limb-bit quotient.
2889     ///
2890     /// This requires a non-normalized divisor, where there at least
2891     /// `integral_binary_factor` 0 bits set, to ensure at maximum a single
2892     /// digit will be produced for a single base.
2893     ///
2894     /// Warning: This is not a general-purpose division algorithm,
2895     /// it is highly specialized for peeling off singular digits.
2896     #[inline]
quorem(&mut self, y: &Self) -> Limb2897     fn quorem(&mut self, y: &Self) -> Limb {
2898         large::quorem(self.data_mut(), y.data())
2899     }
2900 }
2901 
2902 #[cfg(test)]
2903 mod tests {
2904     use util::test::*;
2905     use super::*;
2906 
2907     #[derive(Clone, Default)]
2908     struct Bigint {
2909         data: DataType,
2910     }
2911 
2912     impl Bigint {
2913         #[inline]
new() -> Bigint2914         pub fn new() -> Bigint {
2915             Bigint { data: stackvec![] }
2916         }
2917     }
2918 
2919     impl SharedOps for Bigint {
2920         type StorageType = DataType;
2921 
2922         #[inline]
data<'a>(&'a self) -> &'a Self::StorageType2923         fn data<'a>(&'a self) -> &'a Self::StorageType {
2924             &self.data
2925         }
2926 
2927         #[inline]
data_mut<'a>(&'a mut self) -> &'a mut Self::StorageType2928         fn data_mut<'a>(&'a mut self) -> &'a mut Self::StorageType {
2929             &mut self.data
2930         }
2931     }
2932 
2933     impl SmallOps for Bigint {
2934     }
2935 
2936     impl LargeOps for Bigint {
2937     }
2938 
2939     // SHARED OPS
2940 
2941     #[test]
greater_test()2942     fn greater_test() {
2943         // Simple
2944         let x = Bigint { data: from_u32(&[1]) };
2945         let y = Bigint { data: from_u32(&[2]) };
2946         assert!(!x.greater(&y));
2947         assert!(!x.greater(&x));
2948         assert!(y.greater(&x));
2949 
2950         // Check asymmetric
2951         let x = Bigint { data: from_u32(&[5, 1]) };
2952         let y = Bigint { data: from_u32(&[2]) };
2953         assert!(x.greater(&y));
2954         assert!(!x.greater(&x));
2955         assert!(!y.greater(&x));
2956 
2957         // Check when we use reverse ordering properly.
2958         let x = Bigint { data: from_u32(&[5, 1, 9]) };
2959         let y = Bigint { data: from_u32(&[6, 2, 8]) };
2960         assert!(x.greater(&y));
2961         assert!(!x.greater(&x));
2962         assert!(!y.greater(&x));
2963 
2964         // Complex scenario, check it properly uses reverse ordering.
2965         let x = Bigint { data: from_u32(&[0, 1, 9]) };
2966         let y = Bigint { data: from_u32(&[4294967295, 0, 9]) };
2967         assert!(x.greater(&y));
2968         assert!(!x.greater(&x));
2969         assert!(!y.greater(&x));
2970     }
2971 
2972     #[test]
greater_equal_test()2973     fn greater_equal_test() {
2974         // Simple
2975         let x = Bigint { data: from_u32(&[1]) };
2976         let y = Bigint { data: from_u32(&[2]) };
2977         assert!(!x.greater_equal(&y));
2978         assert!(x.greater_equal(&x));
2979         assert!(y.greater_equal(&x));
2980 
2981         // Check asymmetric
2982         let x = Bigint { data: from_u32(&[5, 1]) };
2983         let y = Bigint { data: from_u32(&[2]) };
2984         assert!(x.greater_equal(&y));
2985         assert!(x.greater_equal(&x));
2986         assert!(!y.greater_equal(&x));
2987 
2988         // Check when we use reverse ordering properly.
2989         let x = Bigint { data: from_u32(&[5, 1, 9]) };
2990         let y = Bigint { data: from_u32(&[6, 2, 8]) };
2991         assert!(x.greater_equal(&y));
2992         assert!(x.greater_equal(&x));
2993         assert!(!y.greater_equal(&x));
2994 
2995         // Complex scenario, check it properly uses reverse ordering.
2996         let x = Bigint { data: from_u32(&[0, 1, 9]) };
2997         let y = Bigint { data: from_u32(&[4294967295, 0, 9]) };
2998         assert!(x.greater_equal(&y));
2999         assert!(x.greater_equal(&x));
3000         assert!(!y.greater_equal(&x));
3001     }
3002 
3003     #[test]
equal_test()3004     fn equal_test() {
3005         // Simple
3006         let x = Bigint { data: from_u32(&[1]) };
3007         let y = Bigint { data: from_u32(&[2]) };
3008         assert!(!x.equal(&y));
3009         assert!(x.equal(&x));
3010         assert!(!y.equal(&x));
3011 
3012         // Check asymmetric
3013         let x = Bigint { data: from_u32(&[5, 1]) };
3014         let y = Bigint { data: from_u32(&[2]) };
3015         assert!(!x.equal(&y));
3016         assert!(x.equal(&x));
3017         assert!(!y.equal(&x));
3018 
3019         // Check when we use reverse ordering properly.
3020         let x = Bigint { data: from_u32(&[5, 1, 9]) };
3021         let y = Bigint { data: from_u32(&[6, 2, 8]) };
3022         assert!(!x.equal(&y));
3023         assert!(x.equal(&x));
3024         assert!(!y.equal(&x));
3025 
3026         // Complex scenario, check it properly uses reverse ordering.
3027         let x = Bigint { data: from_u32(&[0, 1, 9]) };
3028         let y = Bigint { data: from_u32(&[4294967295, 0, 9]) };
3029         assert!(!x.equal(&y));
3030         assert!(x.equal(&x));
3031         assert!(!y.equal(&x));
3032     }
3033 
3034     #[test]
less_test()3035     fn less_test() {
3036         // Simple
3037         let x = Bigint { data: from_u32(&[1]) };
3038         let y = Bigint { data: from_u32(&[2]) };
3039         assert!(x.less(&y));
3040         assert!(!x.less(&x));
3041         assert!(!y.less(&x));
3042 
3043         // Check asymmetric
3044         let x = Bigint { data: from_u32(&[5, 1]) };
3045         let y = Bigint { data: from_u32(&[2]) };
3046         assert!(!x.less(&y));
3047         assert!(!x.less(&x));
3048         assert!(y.less(&x));
3049 
3050         // Check when we use reverse ordering properly.
3051         let x = Bigint { data: from_u32(&[5, 1, 9]) };
3052         let y = Bigint { data: from_u32(&[6, 2, 8]) };
3053         assert!(!x.less(&y));
3054         assert!(!x.less(&x));
3055         assert!(y.less(&x));
3056 
3057         // Complex scenario, check it properly uses reverse ordering.
3058         let x = Bigint { data: from_u32(&[0, 1, 9]) };
3059         let y = Bigint { data: from_u32(&[4294967295, 0, 9]) };
3060         assert!(!x.less(&y));
3061         assert!(!x.less(&x));
3062         assert!(y.less(&x));
3063     }
3064 
3065     #[test]
less_equal_test()3066     fn less_equal_test() {
3067         // Simple
3068         let x = Bigint { data: from_u32(&[1]) };
3069         let y = Bigint { data: from_u32(&[2]) };
3070         assert!(x.less_equal(&y));
3071         assert!(x.less_equal(&x));
3072         assert!(!y.less_equal(&x));
3073 
3074         // Check asymmetric
3075         let x = Bigint { data: from_u32(&[5, 1]) };
3076         let y = Bigint { data: from_u32(&[2]) };
3077         assert!(!x.less_equal(&y));
3078         assert!(x.less_equal(&x));
3079         assert!(y.less_equal(&x));
3080 
3081         // Check when we use reverse ordering properly.
3082         let x = Bigint { data: from_u32(&[5, 1, 9]) };
3083         let y = Bigint { data: from_u32(&[6, 2, 8]) };
3084         assert!(!x.less_equal(&y));
3085         assert!(x.less_equal(&x));
3086         assert!(y.less_equal(&x));
3087 
3088         // Complex scenario, check it properly uses reverse ordering.
3089         let x = Bigint { data: from_u32(&[0, 1, 9]) };
3090         let y = Bigint { data: from_u32(&[4294967295, 0, 9]) };
3091         assert!(!x.less_equal(&y));
3092         assert!(x.less_equal(&x));
3093         assert!(y.less_equal(&x));
3094     }
3095 
3096     #[test]
leading_zero_limbs_test()3097     fn leading_zero_limbs_test() {
3098         assert_eq!(Bigint::new().leading_zero_limbs(), 0);
3099 
3100         assert_eq!(Bigint::from_u16(0xF).leading_zero_limbs(), 0);
3101         assert_eq!(Bigint::from_u32(0xFF).leading_zero_limbs(), 0);
3102         assert_eq!(Bigint::from_u64(0xFF00000000).leading_zero_limbs(), 0);
3103         assert_eq!(Bigint::from_u128(0xFF000000000000000000000000).leading_zero_limbs(), 0);
3104 
3105         assert_eq!(Bigint::from_u16(0xF).leading_zero_limbs(), 0);
3106         assert_eq!(Bigint::from_u32(0xF).leading_zero_limbs(), 0);
3107         assert_eq!(Bigint::from_u64(0xF00000000).leading_zero_limbs(), 0);
3108         assert_eq!(Bigint::from_u128(0xF000000000000000000000000).leading_zero_limbs(), 0);
3109 
3110         assert_eq!(Bigint::from_u16(0xF0).leading_zero_limbs(), 0);
3111         assert_eq!(Bigint::from_u32(0xF0).leading_zero_limbs(), 0);
3112         assert_eq!(Bigint::from_u64(0xF000000000).leading_zero_limbs(), 0);
3113         assert_eq!(Bigint::from_u128(0xF0000000000000000000000000).leading_zero_limbs(), 0);
3114     }
3115 
3116     #[test]
trailing_zero_limbs_test()3117     fn trailing_zero_limbs_test() {
3118         assert_eq!(Bigint::new().trailing_zero_limbs(), 0);
3119 
3120         assert_eq!(Bigint { data: stackvec![0xFF] }.trailing_zero_limbs(), 0);
3121         assert_eq!(Bigint { data: stackvec![0, 0xFF000] }.trailing_zero_limbs(), 1);
3122         assert_eq!(Bigint { data: stackvec![0, 0, 0, 0xFF000] }.trailing_zero_limbs(), 3);
3123     }
3124 
3125     #[test]
leading_zeros_test()3126     fn leading_zeros_test() {
3127         assert_eq!(Bigint::new().leading_zeros(), 0);
3128 
3129         assert_eq!(Bigint::from_u16(0xFF).leading_zeros(), <Limb as Integer>::BITS-8);
3130         assert_eq!(Bigint::from_u32(0xFF).leading_zeros(), <Limb as Integer>::BITS-8);
3131         assert_eq!(Bigint::from_u64(0xFF00000000).leading_zeros(), 24);
3132         assert_eq!(Bigint::from_u128(0xFF000000000000000000000000).leading_zeros(), 24);
3133 
3134         assert_eq!(Bigint::from_u16(0xF).leading_zeros(), <Limb as Integer>::BITS-4);
3135         assert_eq!(Bigint::from_u32(0xF).leading_zeros(), <Limb as Integer>::BITS-4);
3136         assert_eq!(Bigint::from_u64(0xF00000000).leading_zeros(), 28);
3137         assert_eq!(Bigint::from_u128(0xF000000000000000000000000).leading_zeros(), 28);
3138 
3139         assert_eq!(Bigint::from_u16(0xF0).leading_zeros(), <Limb as Integer>::BITS-8);
3140         assert_eq!(Bigint::from_u32(0xF0).leading_zeros(), <Limb as Integer>::BITS-8);
3141         assert_eq!(Bigint::from_u64(0xF000000000).leading_zeros(), 24);
3142         assert_eq!(Bigint::from_u128(0xF0000000000000000000000000).leading_zeros(), 24);
3143     }
3144 
3145     #[test]
trailing_zeros_test()3146     fn trailing_zeros_test() {
3147         assert_eq!(Bigint::new().trailing_zeros(), 0);
3148 
3149         assert_eq!(Bigint::from_u16(0xFF).trailing_zeros(), 0);
3150         assert_eq!(Bigint::from_u32(0xFF).trailing_zeros(), 0);
3151         assert_eq!(Bigint::from_u64(0xFF00000000).trailing_zeros(), 32);
3152         assert_eq!(Bigint::from_u128(0xFF000000000000000000000000).trailing_zeros(), 96);
3153 
3154         assert_eq!(Bigint::from_u16(0xF).trailing_zeros(), 0);
3155         assert_eq!(Bigint::from_u32(0xF).trailing_zeros(), 0);
3156         assert_eq!(Bigint::from_u64(0xF00000000).trailing_zeros(), 32);
3157         assert_eq!(Bigint::from_u128(0xF000000000000000000000000).trailing_zeros(), 96);
3158 
3159         assert_eq!(Bigint::from_u16(0xF0).trailing_zeros(), 4);
3160         assert_eq!(Bigint::from_u32(0xF0).trailing_zeros(), 4);
3161         assert_eq!(Bigint::from_u64(0xF000000000).trailing_zeros(), 36);
3162         assert_eq!(Bigint::from_u128(0xF0000000000000000000000000).trailing_zeros(), 100);
3163     }
3164 
3165     #[test]
hi32_test()3166     fn hi32_test() {
3167         assert_eq!(Bigint::from_u16(0xA).hi32(), (0xA0000000, false));
3168         assert_eq!(Bigint::from_u32(0xAB).hi32(), (0xAB000000, false));
3169         assert_eq!(Bigint::from_u64(0xAB00000000).hi32(), (0xAB000000, false));
3170         assert_eq!(Bigint::from_u64(0xA23456789A).hi32(), (0xA2345678, true));
3171     }
3172 
3173     #[test]
hi64_test()3174     fn hi64_test() {
3175         assert_eq!(Bigint::from_u16(0xA).hi64(), (0xA000000000000000, false));
3176         assert_eq!(Bigint::from_u32(0xAB).hi64(), (0xAB00000000000000, false));
3177         assert_eq!(Bigint::from_u64(0xAB00000000).hi64(), (0xAB00000000000000, false));
3178         assert_eq!(Bigint::from_u64(0xA23456789A).hi64(), (0xA23456789A000000, false));
3179         assert_eq!(Bigint::from_u128(0xABCDEF0123456789ABCDEF0123).hi64(), (0xABCDEF0123456789, true));
3180     }
3181 
3182     #[test]
hi128_test()3183     fn hi128_test() {
3184         assert_eq!(Bigint::from_u128(0xABCDEF0123456789ABCDEF0123).hi128(), (0xABCDEF0123456789ABCDEF0123000000, false));
3185         assert_eq!(Bigint::from_u128(0xABCDEF0123456789ABCDEF0123456789).hi128(), (0xABCDEF0123456789ABCDEF0123456789, false));
3186         assert_eq!(Bigint { data: from_u32(&[0x34567890, 0xBCDEF012, 0x3456789A, 0xBCDEF012, 0xA]) }.hi128(), (0xABCDEF0123456789ABCDEF0123456789, false));
3187         assert_eq!(Bigint { data: from_u32(&[0x34567891, 0xBCDEF012, 0x3456789A, 0xBCDEF012, 0xA]) }.hi128(), (0xABCDEF0123456789ABCDEF0123456789, true));
3188     }
3189 
3190     #[test]
pad_zero_digits_test()3191     fn pad_zero_digits_test() {
3192         let mut x = Bigint { data: stackvec![0, 0, 0, 1] };
3193         x.pad_zero_digits(3);
3194         assert_eq!(x.data.as_slice(), &[0, 0, 0, 0, 0, 0, 1]);
3195 
3196         let mut x = Bigint { data: stackvec![1] };
3197         x.pad_zero_digits(1);
3198         assert_eq!(x.data.as_slice(), &[0, 1]);
3199     }
3200 
3201     #[test]
shl_test()3202     fn shl_test() {
3203         // Pattern generated via `''.join(["1" +"0"*i for i in range(20)])`
3204         let mut big = Bigint { data: from_u32(&[0xD2210408]) };
3205         big.ishl(5);
3206         assert_eq!(big.data, from_u32(&[0x44208100, 0x1A]));
3207         big.ishl(32);
3208         assert_eq!(big.data, from_u32(&[0, 0x44208100, 0x1A]));
3209         big.ishl(27);
3210         assert_eq!(big.data, from_u32(&[0, 0, 0xD2210408]));
3211 
3212         // 96-bits of previous pattern
3213         let mut big = Bigint { data: from_u32(&[0x20020010, 0x8040100, 0xD2210408]) };
3214         big.ishl(5);
3215         assert_eq!(big.data, from_u32(&[0x400200, 0x802004, 0x44208101, 0x1A]));
3216         big.ishl(32);
3217         assert_eq!(big.data, from_u32(&[0, 0x400200, 0x802004, 0x44208101, 0x1A]));
3218         big.ishl(27);
3219         assert_eq!(big.data, from_u32(&[0, 0, 0x20020010, 0x8040100, 0xD2210408]));
3220     }
3221 
3222     #[test]
shr_test()3223     fn shr_test() {
3224         // Simple case.
3225         let mut big = Bigint { data: from_u32(&[0xD2210408]) };
3226         big.ishr(5, false);
3227         assert_eq!(big.data, from_u32(&[0x6910820]));
3228         big.ishr(27, false);
3229         assert_eq!(big.data, from_u32(&[]));
3230 
3231         // Pattern generated via `''.join(["1" +"0"*i for i in range(20)])`
3232         let mut big = Bigint { data: from_u32(&[0x20020010, 0x8040100, 0xD2210408]) };
3233         big.ishr(5, false);
3234         assert_eq!(big.data, from_u32(&[0x1001000, 0x40402008, 0x6910820]));
3235         big.ishr(32, false);
3236         assert_eq!(big.data, from_u32(&[0x40402008, 0x6910820]));
3237         big.ishr(27, false);
3238         assert_eq!(big.data, from_u32(&[0xD2210408]));
3239 
3240         // Check no-roundup with halfway and even
3241         let mut big = Bigint { data: from_u32(&[0xD2210408]) };
3242         big.ishr(3, true);
3243         assert_eq!(big.data, from_u32(&[0x1A442081]));
3244         big.ishr(1, true);
3245         assert_eq!(big.data, from_u32(&[0xD221040]));
3246 
3247         let mut big = Bigint { data: from_u32(&[0xD2210408]) };
3248         big.ishr(4, true);
3249         assert_eq!(big.data, from_u32(&[0xD221040]));
3250 
3251         // Check roundup with halfway and odd
3252         let mut big = Bigint { data: from_u32(&[0xD2210438]) };
3253         big.ishr(3, true);
3254         assert_eq!(big.data, from_u32(&[0x1A442087]));
3255         big.ishr(1, true);
3256         assert_eq!(big.data, from_u32(&[0xD221044]));
3257 
3258         let mut big = Bigint { data: from_u32(&[0xD2210438]) };
3259         big.ishr(5, true);
3260         assert_eq!(big.data, from_u32(&[0x6910822]));
3261     }
3262 
3263     #[test]
bit_length_test()3264     fn bit_length_test() {
3265         let x = Bigint { data: from_u32(&[0, 0, 0, 1]) };
3266         assert_eq!(x.bit_length(), 97);
3267 
3268         let x = Bigint { data: from_u32(&[0, 0, 0, 3]) };
3269         assert_eq!(x.bit_length(), 98);
3270 
3271         let x = Bigint { data: from_u32(&[1<<31]) };
3272         assert_eq!(x.bit_length(), 32);
3273     }
3274 
3275     // SMALL OPS
3276 
3277     #[test]
iadd_small_test()3278     fn iadd_small_test() {
3279         // Overflow check (single)
3280         // This should set all the internal data values to 0, the top
3281         // value to (1<<31), and the bottom value to (4>>1).
3282         // This is because the max_value + 1 leads to all 0s, we set the
3283         // topmost bit to 1.
3284         let mut x = Bigint { data: from_u32(&[4294967295]) };
3285         x.iadd_small(5);
3286         assert_eq!(x.data, from_u32(&[4, 1]));
3287 
3288         // No overflow, single value
3289         let mut x = Bigint { data: from_u32(&[5]) };
3290         x.iadd_small(7);
3291         assert_eq!(x.data, from_u32(&[12]));
3292 
3293         // Single carry, internal overflow
3294         let mut x = Bigint::from_u64(0x80000000FFFFFFFF);
3295         x.iadd_small(7);
3296         assert_eq!(x.data, from_u32(&[6, 0x80000001]));
3297 
3298         // Double carry, overflow
3299         let mut x = Bigint::from_u64(0xFFFFFFFFFFFFFFFF);
3300         x.iadd_small(7);
3301         assert_eq!(x.data, from_u32(&[6, 0, 1]));
3302     }
3303 
3304     #[test]
isub_small_test()3305     fn isub_small_test() {
3306         // Overflow check (single)
3307         let mut x = Bigint { data: from_u32(&[4, 1]) };
3308         x.isub_small(5);
3309         assert_eq!(x.data, from_u32(&[4294967295]));
3310 
3311         // No overflow, single value
3312         let mut x = Bigint { data: from_u32(&[12]) };
3313         x.isub_small(7);
3314         assert_eq!(x.data, from_u32(&[5]));
3315 
3316         // Single carry, internal overflow
3317         let mut x = Bigint { data: from_u32(&[6, 0x80000001]) };
3318         x.isub_small(7);
3319         assert_eq!(x.data, from_u32(&[0xFFFFFFFF, 0x80000000]));
3320 
3321         // Double carry, overflow
3322         let mut x = Bigint { data: from_u32(&[6, 0, 1]) };
3323         x.isub_small(7);
3324         assert_eq!(x.data, from_u32(&[0xFFFFFFFF, 0xFFFFFFFF]));
3325     }
3326 
3327     #[test]
imul_small_test()3328     fn imul_small_test() {
3329         // No overflow check, 1-int.
3330         let mut x = Bigint { data: from_u32(&[5]) };
3331         x.imul_small(7);
3332         assert_eq!(x.data, from_u32(&[35]));
3333 
3334         // No overflow check, 2-ints.
3335         let mut x = Bigint::from_u64(0x4000000040000);
3336         x.imul_small(5);
3337         assert_eq!(x.data, from_u32(&[0x00140000, 0x140000]));
3338 
3339         // Overflow, 1 carry.
3340         let mut x = Bigint { data: from_u32(&[0x33333334]) };
3341         x.imul_small(5);
3342         assert_eq!(x.data, from_u32(&[4, 1]));
3343 
3344         // Overflow, 1 carry, internal.
3345         let mut x = Bigint::from_u64(0x133333334);
3346         x.imul_small(5);
3347         assert_eq!(x.data, from_u32(&[4, 6]));
3348 
3349         // Overflow, 2 carries.
3350         let mut x = Bigint::from_u64(0x3333333333333334);
3351         x.imul_small(5);
3352         assert_eq!(x.data, from_u32(&[4, 0, 1]));
3353     }
3354 
3355     #[test]
idiv_small_test()3356     fn idiv_small_test() {
3357         let mut x = Bigint { data: from_u32(&[4]) };
3358         assert_eq!(x.idiv_small(7), 4);
3359         assert_eq!(x.data, from_u32(&[]));
3360 
3361         let mut x = Bigint { data: from_u32(&[3]) };
3362         assert_eq!(x.idiv_small(7), 3);
3363         assert_eq!(x.data, from_u32(&[]));
3364 
3365         // Check roundup, odd, halfway
3366         let mut x = Bigint { data: from_u32(&[15]) };
3367         assert_eq!(x.idiv_small(10), 5);
3368         assert_eq!(x.data, from_u32(&[1]));
3369 
3370         // Check 1 carry.
3371         let mut x = Bigint::from_u64(0x133333334);
3372         assert_eq!(x.idiv_small(5), 1);
3373         assert_eq!(x.data, from_u32(&[0x3D70A3D7]));
3374 
3375         // Check 2 carries.
3376         let mut x = Bigint::from_u64(0x3333333333333334);
3377         assert_eq!(x.idiv_small(5), 4);
3378         assert_eq!(x.data, from_u32(&[0xD70A3D70, 0xA3D70A3]));
3379     }
3380 
3381     #[test]
ipow_test()3382     fn ipow_test() {
3383         let x = Bigint { data: from_u32(&[5]) };
3384         assert_eq!(x.pow(2).data, from_u32(&[25]));
3385         assert_eq!(x.pow(15).data, from_u32(&[452807053, 7]));
3386         assert_eq!(x.pow(16).data, from_u32(&[2264035265, 35]));
3387         assert_eq!(x.pow(17).data, from_u32(&[2730241733, 177]));
3388         assert_eq!(x.pow(302).data, from_u32(&[2443090281, 2149694430, 2297493928, 1584384001, 1279504719, 1930002239, 3312868939, 3735173465, 3523274756, 2025818732, 1641675015, 2431239749, 4292780461, 3719612855, 4174476133, 3296847770, 2677357556, 638848153, 2198928114, 3285049351, 2159526706, 626302612]));
3389     }
3390 
3391     // LARGE OPS
3392 
3393     #[test]
iadd_large_test()3394     fn iadd_large_test() {
3395         // Overflow, both single values
3396         let mut x = Bigint { data: from_u32(&[4294967295]) };
3397         let y = Bigint { data: from_u32(&[5]) };
3398         x.iadd_large(&y);
3399         assert_eq!(x.data, from_u32(&[4, 1]));
3400 
3401         // No overflow, single value
3402         let mut x = Bigint { data: from_u32(&[5]) };
3403         let y = Bigint { data: from_u32(&[7]) };
3404         x.iadd_large(&y);
3405         assert_eq!(x.data, from_u32(&[12]));
3406 
3407         // Single carry, internal overflow
3408         let mut x = Bigint::from_u64(0x80000000FFFFFFFF);
3409         let y = Bigint { data: from_u32(&[7]) };
3410         x.iadd_large(&y);
3411         assert_eq!(x.data, from_u32(&[6, 0x80000001]));
3412 
3413         // 1st overflows, 2nd doesn't.
3414         let mut x = Bigint::from_u64(0x7FFFFFFFFFFFFFFF);
3415         let y = Bigint::from_u64(0x7FFFFFFFFFFFFFFF);
3416         x.iadd_large(&y);
3417         assert_eq!(x.data, from_u32(&[0xFFFFFFFE, 0xFFFFFFFF]));
3418 
3419         // Both overflow.
3420         let mut x = Bigint::from_u64(0x8FFFFFFFFFFFFFFF);
3421         let y = Bigint::from_u64(0x7FFFFFFFFFFFFFFF);
3422         x.iadd_large(&y);
3423         assert_eq!(x.data, from_u32(&[0xFFFFFFFE, 0x0FFFFFFF, 1]));
3424     }
3425 
3426     #[test]
isub_large_test()3427     fn isub_large_test() {
3428         // Overflow, both single values
3429         let mut x = Bigint { data: from_u32(&[4, 1]) };
3430         let y = Bigint { data: from_u32(&[5]) };
3431         x.isub_large(&y);
3432         assert_eq!(x.data, from_u32(&[4294967295]));
3433 
3434         // No overflow, single value
3435         let mut x = Bigint { data: from_u32(&[12]) };
3436         let y = Bigint { data: from_u32(&[7]) };
3437         x.isub_large(&y);
3438         assert_eq!(x.data, from_u32(&[5]));
3439 
3440         // Single carry, internal overflow
3441         let mut x = Bigint { data: from_u32(&[6, 0x80000001]) };
3442         let y = Bigint { data: from_u32(&[7]) };
3443         x.isub_large(&y);
3444         assert_eq!(x.data, from_u32(&[0xFFFFFFFF, 0x80000000]));
3445 
3446         // Zeros out.
3447         let mut x = Bigint { data: from_u32(&[0xFFFFFFFF, 0x7FFFFFFF]) };
3448         let y = Bigint { data: from_u32(&[0xFFFFFFFF, 0x7FFFFFFF]) };
3449         x.isub_large(&y);
3450         assert_eq!(x.data, from_u32(&[]));
3451 
3452         // 1st overflows, 2nd doesn't.
3453         let mut x = Bigint { data: from_u32(&[0xFFFFFFFE, 0x80000000]) };
3454         let y = Bigint { data: from_u32(&[0xFFFFFFFF, 0x7FFFFFFF]) };
3455         x.isub_large(&y);
3456         assert_eq!(x.data, from_u32(&[0xFFFFFFFF]));
3457     }
3458 
3459     #[test]
imul_large_test()3460     fn imul_large_test() {
3461         // Test by empty
3462         let mut x = Bigint { data: from_u32(&[0xFFFFFFFF]) };
3463         let y = Bigint { data: from_u32(&[]) };
3464         x.imul_large(&y);
3465         assert_eq!(x.data, from_u32(&[]));
3466 
3467         // Simple case
3468         let mut x = Bigint { data: from_u32(&[0xFFFFFFFF]) };
3469         let y = Bigint { data: from_u32(&[5]) };
3470         x.imul_large(&y);
3471         assert_eq!(x.data, from_u32(&[0xFFFFFFFB, 0x4]));
3472 
3473         // Large u32, but still just as easy.
3474         let mut x = Bigint { data: from_u32(&[0xFFFFFFFF]) };
3475         let y = Bigint { data: from_u32(&[0xFFFFFFFE]) };
3476         x.imul_large(&y);
3477         assert_eq!(x.data, from_u32(&[0x2, 0xFFFFFFFD]));
3478 
3479         // Let's multiply two large values together
3480         let mut x = Bigint { data: from_u32(&[0xFFFFFFFE, 0x0FFFFFFF, 1]) };
3481         let y = Bigint { data: from_u32(&[0x99999999, 0x99999999, 0xCCCD9999, 0xCCCC]) };
3482         x.imul_large(&y);
3483         assert_eq!(x.data, from_u32(&[0xCCCCCCCE, 0x5CCCCCCC, 0x9997FFFF, 0x33319999, 0x999A7333, 0xD999]));
3484     }
3485 
3486     #[test]
imul_karatsuba_mul_test()3487     fn imul_karatsuba_mul_test() {
3488         // Test cases triggered to use `karatsuba_mul`.
3489         let mut x = Bigint { data: from_u32(&[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]) };
3490         let y = Bigint { data: from_u32(&[4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]) };
3491         x.imul_large(&y);
3492         assert_eq!(x.data, from_u32(&[4, 13, 28, 50, 80, 119, 168, 228, 300, 385, 484, 598, 728, 875, 1040, 1224, 1340, 1435, 1508, 1558, 1584, 1585, 1560, 1508, 1428, 1319, 1180, 1010, 808, 573, 304]));
3493 
3494         // Test cases to use karatsuba_uneven_mul
3495         let mut x = Bigint { data: from_u32(&[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]) };
3496         let y = Bigint { data: from_u32(&[4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37]) };
3497         x.imul_large(&y);
3498         assert_eq!(x.data, from_u32(&[4, 13, 28, 50, 80, 119, 168, 228, 300, 385, 484, 598, 728, 875, 1040, 1224, 1360, 1496, 1632, 1768, 1904, 2040, 2176, 2312, 2448, 2584, 2720, 2856, 2992, 3128, 3264, 3400, 3536, 3672, 3770, 3829, 3848, 3826, 3762, 3655, 3504, 3308, 3066, 2777, 2440, 2054, 1618, 1131, 592]));
3499     }
3500 
3501     #[test]
idiv_large_test()3502     fn idiv_large_test() {
3503         // Simple case.
3504         let mut x = Bigint { data: from_u32(&[0xFFFFFFFF]) };
3505         let y = Bigint { data: from_u32(&[5]) };
3506         let rem = x.idiv_large(&y);
3507         assert_eq!(x.data, from_u32(&[0x33333333]));
3508         assert_eq!(rem.data, from_u32(&[0]));
3509 
3510         // Two integer case
3511         let mut x = Bigint { data: from_u32(&[0x2, 0xFFFFFFFF]) };
3512         let y = Bigint { data: from_u32(&[0xFFFFFFFE]) };
3513         let rem = x.idiv_large(&y);
3514         assert_eq!(x.data, from_u32(&[1, 1]));
3515         assert_eq!(rem.data, from_u32(&[4]));
3516 
3517         // Larger large case
3518         let mut x = Bigint { data: from_u32(&[0xCCCCCCCF, 0x5CCCCCCC, 0x9997FFFF, 0x33319999, 0x999A7333, 0xD999]) };
3519         let y = Bigint { data: from_u32(&[0x99999999, 0x99999999, 0xCCCD9999, 0xCCCC]) };
3520         let rem = x.idiv_large(&y);
3521         assert_eq!(x.data, from_u32(&[0xFFFFFFFE, 0x0FFFFFFF, 1]));
3522         assert_eq!(rem.data, from_u32(&[1]));
3523 
3524         // Extremely large cases, examples from Karatsuba multiplication.
3525         let mut x = Bigint { data: from_u32(&[4, 13, 29, 50, 80, 119, 168, 228, 300, 385, 484, 598, 728, 875, 1040, 1224, 1340, 1435, 1508, 1558, 1584, 1585, 1560, 1508, 1428, 1319, 1180, 1010, 808, 573, 304]) };
3526         let y = Bigint { data: from_u32(&[4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]) };
3527         let rem = x.idiv_large(&y);
3528         assert_eq!(x.data, from_u32(&[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]));
3529         assert_eq!(rem.data, from_u32(&[0, 0, 1]));
3530     }
3531 
3532     #[test]
quorem_test()3533     fn quorem_test() {
3534         let mut x = Bigint::from_u128(42535295865117307932921825928971026432);
3535         let y = Bigint::from_u128(17218479456385750618067377696052635483);
3536         assert_eq!(x.quorem(&y), 2);
3537         assert_eq!(x.data, from_u32(&[1873752394, 3049207402, 3024501058, 102215382]));
3538     }
3539 }
3540