1 //! Building-blocks for arbitrary-precision math.
2 //!
3 //! These algorithms assume little-endian order for the large integer
4 //! buffers, so for a `vec![0, 1, 2, 3]`, `3` is the most significant limb,
5 //! and `0` is the least significant limb.
6
7 use crate::large_powers;
8 use crate::lib::ops::RangeBounds;
9 use crate::lib::{cmp, iter, mem, ops, ptr};
10 use crate::num::*;
11 use crate::slice::*;
12 use crate::small_powers::*;
13
14 // ALIASES
15 // -------
16
17 // Type for a single limb of the big integer.
18 //
19 // A limb is analogous to a digit in base10, except, it stores 32-bit
20 // or 64-bit numbers instead.
21 //
22 // This should be all-known 64-bit platforms supported by Rust.
23 // https://forge.rust-lang.org/platform-support.html
24 //
25 // Platforms where native 128-bit multiplication is explicitly supported:
26 // - x86_64 (Supported via `MUL`).
27 // - mips64 (Supported via `DMULTU`, which `HI` and `LO` can be read-from).
28 //
29 // Platforms where native 64-bit multiplication is supported and
30 // you can extract hi-lo for 64-bit multiplications.
31 // aarch64 (Requires `UMULH` and `MUL` to capture high and low bits).
32 // powerpc64 (Requires `MULHDU` and `MULLD` to capture high and low bits).
33 //
34 // Platforms where native 128-bit multiplication is not supported,
35 // requiring software emulation.
36 // sparc64 (`UMUL` only supported double-word arguments).
37
38 // 32-BIT LIMB
39 #[cfg(limb_width_32)]
40 pub type Limb = u32;
41
42 #[cfg(limb_width_32)]
43 pub const POW5_LIMB: &[Limb] = &POW5_32;
44
45 #[cfg(limb_width_32)]
46 pub const POW10_LIMB: &[Limb] = &POW10_32;
47
48 #[cfg(limb_width_32)]
49 type Wide = u64;
50
51 // 64-BIT LIMB
52 #[cfg(limb_width_64)]
53 pub type Limb = u64;
54
55 #[cfg(limb_width_64)]
56 pub const POW5_LIMB: &[Limb] = &POW5_64;
57
58 #[cfg(limb_width_64)]
59 pub const POW10_LIMB: &[Limb] = &POW10_64;
60
61 #[cfg(limb_width_64)]
62 type Wide = u128;
63
64 // Maximum denominator is 767 mantissa digits + 324 exponent,
65 // or 1091 digits, or approximately 3600 bits (round up to 4k).
66 #[cfg(all(feature = "no_alloc", limb_width_32))]
67 pub(crate) type LimbVecType = arrayvec::ArrayVec<[Limb; 128]>;
68
69 #[cfg(all(feature = "no_alloc", limb_width_64))]
70 pub(crate) type LimbVecType = arrayvec::ArrayVec<[Limb; 64]>;
71
72 #[cfg(not(feature = "no_alloc"))]
73 pub(crate) type LimbVecType = crate::lib::Vec<Limb>;
74
75 /// Cast to limb type.
76 #[inline(always)]
as_limb<T: Integer>(t: T) -> Limb77 pub(crate) fn as_limb<T: Integer>(t: T) -> Limb {
78 Limb::as_cast(t)
79 }
80
81 /// Cast to wide type.
82 #[inline(always)]
as_wide<T: Integer>(t: T) -> Wide83 fn as_wide<T: Integer>(t: T) -> Wide {
84 Wide::as_cast(t)
85 }
86
87 // SPLIT
88 // -----
89
90 /// Split u64 into limbs, in little-endian order.
91 #[inline]
92 #[cfg(limb_width_32)]
split_u64(x: u64) -> [Limb; 2]93 fn split_u64(x: u64) -> [Limb; 2] {
94 [as_limb(x), as_limb(x >> 32)]
95 }
96
97 /// Split u64 into limbs, in little-endian order.
98 #[inline]
99 #[cfg(limb_width_64)]
split_u64(x: u64) -> [Limb; 1]100 fn split_u64(x: u64) -> [Limb; 1] {
101 [as_limb(x)]
102 }
103
104 // HI64
105 // ----
106
107 // NONZERO
108
109 /// Check if any of the remaining bits are non-zero.
110 #[inline]
nonzero<T: Integer>(x: &[T], rindex: usize) -> bool111 pub fn nonzero<T: Integer>(x: &[T], rindex: usize) -> bool {
112 let len = x.len();
113 let slc = &x[..len - rindex];
114 slc.iter().rev().any(|&x| x != T::ZERO)
115 }
116
117 /// Shift 64-bit integer to high 64-bits.
118 #[inline]
u64_to_hi64_1(r0: u64) -> (u64, bool)119 fn u64_to_hi64_1(r0: u64) -> (u64, bool) {
120 debug_assert!(r0 != 0);
121 let ls = r0.leading_zeros();
122 (r0 << ls, false)
123 }
124
125 /// Shift 2 64-bit integers to high 64-bits.
126 #[inline]
u64_to_hi64_2(r0: u64, r1: u64) -> (u64, bool)127 fn u64_to_hi64_2(r0: u64, r1: u64) -> (u64, bool) {
128 debug_assert!(r0 != 0);
129 let ls = r0.leading_zeros();
130 let rs = 64 - ls;
131 let v = match ls {
132 0 => r0,
133 _ => (r0 << ls) | (r1 >> rs),
134 };
135 let n = r1 << ls != 0;
136 (v, n)
137 }
138
139 /// Trait to export the high 64-bits from a little-endian slice.
140 trait Hi64<T>: Slice<T> {
141 /// Get the hi64 bits from a 1-limb slice.
hi64_1(&self) -> (u64, bool)142 fn hi64_1(&self) -> (u64, bool);
143
144 /// Get the hi64 bits from a 2-limb slice.
hi64_2(&self) -> (u64, bool)145 fn hi64_2(&self) -> (u64, bool);
146
147 /// Get the hi64 bits from a 3-limb slice.
hi64_3(&self) -> (u64, bool)148 fn hi64_3(&self) -> (u64, bool);
149
150 /// Get the hi64 bits from a 4-limb slice.
hi64_4(&self) -> (u64, bool)151 fn hi64_4(&self) -> (u64, bool);
152
153 /// Get the hi64 bits from a 5-limb slice.
hi64_5(&self) -> (u64, bool)154 fn hi64_5(&self) -> (u64, bool);
155
156 /// High-level exporter to extract the high 64 bits from a little-endian slice.
157 #[inline]
hi64(&self) -> (u64, bool)158 fn hi64(&self) -> (u64, bool) {
159 match self.len() {
160 0 => (0, false),
161 1 => self.hi64_1(),
162 2 => self.hi64_2(),
163 3 => self.hi64_3(),
164 4 => self.hi64_4(),
165 _ => self.hi64_5(),
166 }
167 }
168 }
169
170 impl Hi64<u32> for [u32] {
171 #[inline]
hi64_1(&self) -> (u64, bool)172 fn hi64_1(&self) -> (u64, bool) {
173 debug_assert!(self.len() == 1);
174 let rview = self.rview();
175 let r0 = rview[0].as_u64();
176 u64_to_hi64_1(r0)
177 }
178
179 #[inline]
hi64_2(&self) -> (u64, bool)180 fn hi64_2(&self) -> (u64, bool) {
181 debug_assert!(self.len() == 2);
182 let rview = self.rview();
183 let r0 = rview[0].as_u64() << 32;
184 let r1 = rview[1].as_u64();
185 u64_to_hi64_1(r0 | r1)
186 }
187
188 #[inline]
hi64_3(&self) -> (u64, bool)189 fn hi64_3(&self) -> (u64, bool) {
190 debug_assert!(self.len() >= 3);
191 let rview = self.rview();
192 let r0 = rview[0].as_u64();
193 let r1 = rview[1].as_u64() << 32;
194 let r2 = rview[2].as_u64();
195 let (v, n) = u64_to_hi64_2(r0, r1 | r2);
196 (v, n || nonzero(self, 3))
197 }
198
199 #[inline]
hi64_4(&self) -> (u64, bool)200 fn hi64_4(&self) -> (u64, bool) {
201 self.hi64_3()
202 }
203
204 #[inline]
hi64_5(&self) -> (u64, bool)205 fn hi64_5(&self) -> (u64, bool) {
206 self.hi64_3()
207 }
208 }
209
210 impl Hi64<u64> for [u64] {
211 #[inline]
hi64_1(&self) -> (u64, bool)212 fn hi64_1(&self) -> (u64, bool) {
213 debug_assert!(self.len() == 1);
214 let rview = self.rview();
215 let r0 = rview[0];
216 u64_to_hi64_1(r0)
217 }
218
219 #[inline]
hi64_2(&self) -> (u64, bool)220 fn hi64_2(&self) -> (u64, bool) {
221 debug_assert!(self.len() >= 2);
222 let rview = self.rview();
223 let r0 = rview[0];
224 let r1 = rview[1];
225 let (v, n) = u64_to_hi64_2(r0, r1);
226 (v, n || nonzero(self, 2))
227 }
228
229 #[inline]
hi64_3(&self) -> (u64, bool)230 fn hi64_3(&self) -> (u64, bool) {
231 self.hi64_2()
232 }
233
234 #[inline]
hi64_4(&self) -> (u64, bool)235 fn hi64_4(&self) -> (u64, bool) {
236 self.hi64_2()
237 }
238
239 #[inline]
hi64_5(&self) -> (u64, bool)240 fn hi64_5(&self) -> (u64, bool) {
241 self.hi64_2()
242 }
243 }
244
245 // SEQUENCES
246 // ---------
247
248 /// Insert multiple elements at position `index`.
249 ///
250 /// Shifts all elements before index to the back of the iterator.
251 /// It uses size hints to try to minimize the number of moves,
252 /// however, it does not rely on them. We cannot internally allocate, so
253 /// if we overstep the lower_size_bound, we have to do extensive
254 /// moves to shift each item back incrementally.
255 ///
256 /// This implementation is adapted from [`smallvec`], which has a battle-tested
257 /// implementation that has been revised for at least a security advisory
258 /// warning. Smallvec is similarly licensed under an MIT/Apache dual license.
259 ///
260 /// [`smallvec`]: https://github.com/servo/rust-smallvec
insert_many<Iter>(vec: &mut LimbVecType, index: usize, iterable: Iter) where Iter: iter::IntoIterator<Item = Limb>,261 pub fn insert_many<Iter>(vec: &mut LimbVecType, index: usize, iterable: Iter)
262 where
263 Iter: iter::IntoIterator<Item = Limb>,
264 {
265 let mut iter = iterable.into_iter();
266 if index == vec.len() {
267 return vec.extend(iter);
268 }
269
270 let (lower_size_bound, _) = iter.size_hint();
271 assert!(lower_size_bound <= core::isize::MAX as usize); // Ensure offset is indexable
272 assert!(index + lower_size_bound >= index); // Protect against overflow
273
274 let mut num_added = 0;
275 let old_len = vec.len();
276 assert!(index <= old_len);
277
278 unsafe {
279 // Reserve space for `lower_size_bound` elements.
280 reserve(vec, lower_size_bound);
281 let start = vec.as_mut_ptr();
282 let ptr = start.add(index);
283
284 // Move the trailing elements.
285 ptr::copy(ptr, ptr.add(lower_size_bound), old_len - index);
286
287 // In case the iterator panics, don't double-drop the items we just copied above.
288 vec.set_len(0);
289 let mut guard = DropOnPanic {
290 start,
291 skip: index..(index + lower_size_bound),
292 len: old_len + lower_size_bound,
293 };
294
295 while num_added < lower_size_bound {
296 let element = match iter.next() {
297 Some(x) => x,
298 None => break,
299 };
300 let cur = ptr.add(num_added);
301 ptr::write(cur, element);
302 guard.skip.start += 1;
303 num_added += 1;
304 }
305
306 if num_added < lower_size_bound {
307 // Iterator provided fewer elements than the hint. Move the tail backward.
308 ptr::copy(ptr.add(lower_size_bound), ptr.add(num_added), old_len - index);
309 }
310 // There are no more duplicate or uninitialized slots, so the guard is not needed.
311 vec.set_len(old_len + num_added);
312 mem::forget(guard);
313 }
314
315 // Insert any remaining elements one-by-one.
316 for element in iter {
317 vec.insert(index + num_added, element);
318 num_added += 1;
319 }
320
321 struct DropOnPanic<T> {
322 start: *mut T,
323 skip: ops::Range<usize>, // Space we copied-out-of, but haven't written-to yet.
324 len: usize,
325 }
326
327 impl<T> DropOnPanic<T> {
328 // Contains methods for Rustc versions < 1.35.0.
329 // Remove when support is dropped for < 1.35.0.
330 #[inline]
331 fn contains(&self, item: &usize) -> bool {
332 (match self.skip.start_bound() {
333 ops::Bound::Included(ref start) => *start <= item,
334 ops::Bound::Excluded(ref start) => *start < item,
335 ops::Bound::Unbounded => true,
336 }) && (match self.skip.end_bound() {
337 ops::Bound::Included(ref end) => item <= *end,
338 ops::Bound::Excluded(ref end) => item < *end,
339 ops::Bound::Unbounded => true,
340 })
341 }
342 }
343
344 impl<T> Drop for DropOnPanic<T> {
345 fn drop(&mut self) {
346 for i in 0..self.len {
347 if !self.contains(&i) {
348 unsafe {
349 ptr::drop_in_place(self.start.add(i));
350 }
351 }
352 }
353 }
354 }
355 }
356
357 /// Resize arrayvec to size.
358 #[inline]
359 #[cfg(feature = "no_alloc")]
resize(vec: &mut LimbVecType, len: usize, value: Limb)360 fn resize(vec: &mut LimbVecType, len: usize, value: Limb) {
361 assert!(len <= vec.capacity());
362 let old_len = vec.len();
363 if len > old_len {
364 vec.extend(iter::repeat(value).take(len - old_len));
365 } else {
366 vec.truncate(len);
367 }
368 }
369
370 /// Resize vec to size.
371 #[inline]
372 #[cfg(not(feature = "no_alloc"))]
resize(vec: &mut LimbVecType, len: usize, value: Limb)373 fn resize(vec: &mut LimbVecType, len: usize, value: Limb) {
374 vec.resize(len, value)
375 }
376
377 /// Reserve arrayvec capacity.
378 #[inline]
379 #[cfg(feature = "no_alloc")]
reserve(vec: &mut LimbVecType, capacity: usize)380 pub(crate) fn reserve(vec: &mut LimbVecType, capacity: usize) {
381 assert!(vec.len() + capacity <= vec.capacity());
382 }
383
384 /// Reserve vec capacity.
385 #[inline]
386 #[cfg(not(feature = "no_alloc"))]
reserve(vec: &mut LimbVecType, capacity: usize)387 pub(crate) fn reserve(vec: &mut LimbVecType, capacity: usize) {
388 vec.reserve(capacity)
389 }
390
391 /// Reserve exact arrayvec capacity.
392 #[inline]
393 #[cfg(feature = "no_alloc")]
reserve_exact(vec: &mut LimbVecType, capacity: usize)394 fn reserve_exact(vec: &mut LimbVecType, capacity: usize) {
395 assert!(vec.len() + capacity <= vec.capacity());
396 }
397
398 /// Reserve exact vec capacity.
399 #[inline]
400 #[cfg(not(feature = "no_alloc"))]
reserve_exact(vec: &mut LimbVecType, capacity: usize)401 fn reserve_exact(vec: &mut LimbVecType, capacity: usize) {
402 vec.reserve_exact(capacity)
403 }
404
405 // SCALAR
406 // ------
407
408 // Scalar-to-scalar operations, for building-blocks for arbitrary-precision
409 // operations.
410
411 mod scalar {
412 use super::*;
413
414 // ADDITION
415
416 /// Add two small integers and return the resulting value and if overflow happens.
417 #[inline]
add(x: Limb, y: Limb) -> (Limb, bool)418 pub fn add(x: Limb, y: Limb) -> (Limb, bool) {
419 x.overflowing_add(y)
420 }
421
422 /// AddAssign two small integers and return if overflow happens.
423 #[inline]
iadd(x: &mut Limb, y: Limb) -> bool424 pub fn iadd(x: &mut Limb, y: Limb) -> bool {
425 let t = add(*x, y);
426 *x = t.0;
427 t.1
428 }
429
430 // SUBTRACTION
431
432 /// Subtract two small integers and return the resulting value and if overflow happens.
433 #[inline]
sub(x: Limb, y: Limb) -> (Limb, bool)434 pub fn sub(x: Limb, y: Limb) -> (Limb, bool) {
435 x.overflowing_sub(y)
436 }
437
438 /// SubAssign two small integers and return if overflow happens.
439 #[inline]
isub(x: &mut Limb, y: Limb) -> bool440 pub fn isub(x: &mut Limb, y: Limb) -> bool {
441 let t = sub(*x, y);
442 *x = t.0;
443 t.1
444 }
445
446 // MULTIPLICATION
447
448 /// Multiply two small integers (with carry) (and return the overflow contribution).
449 ///
450 /// Returns the (low, high) components.
451 #[inline]
mul(x: Limb, y: Limb, carry: Limb) -> (Limb, Limb)452 pub fn mul(x: Limb, y: Limb, carry: Limb) -> (Limb, Limb) {
453 // Cannot overflow, as long as wide is 2x as wide. This is because
454 // the following is always true:
455 // `Wide::max_value() - (Narrow::max_value() * Narrow::max_value()) >= Narrow::max_value()`
456 let z: Wide = as_wide(x) * as_wide(y) + as_wide(carry);
457 let bits = mem::size_of::<Limb>() * 8;
458 (as_limb(z), as_limb(z >> bits))
459 }
460
461 /// Multiply two small integers (with carry) (and return if overflow happens).
462 #[inline]
imul(x: &mut Limb, y: Limb, carry: Limb) -> Limb463 pub fn imul(x: &mut Limb, y: Limb, carry: Limb) -> Limb {
464 let t = mul(*x, y, carry);
465 *x = t.0;
466 t.1
467 }
468 } // scalar
469
470 // SMALL
471 // -----
472
473 // Large-to-small operations, to modify a big integer from a native scalar.
474
475 mod small {
476 use super::*;
477
478 // MULTIPLICATIION
479
480 /// ADDITION
481
482 /// Implied AddAssign implementation for adding a small integer to bigint.
483 ///
484 /// Allows us to choose a start-index in x to store, to allow incrementing
485 /// from a non-zero start.
486 #[inline]
iadd_impl(x: &mut LimbVecType, y: Limb, xstart: usize)487 pub fn iadd_impl(x: &mut LimbVecType, y: Limb, xstart: usize) {
488 if x.len() <= xstart {
489 x.push(y);
490 } else {
491 // Initial add
492 let mut carry = scalar::iadd(&mut x[xstart], y);
493
494 // Increment until overflow stops occurring.
495 let mut size = xstart + 1;
496 while carry && size < x.len() {
497 carry = scalar::iadd(&mut x[size], 1);
498 size += 1;
499 }
500
501 // If we overflowed the buffer entirely, need to add 1 to the end
502 // of the buffer.
503 if carry {
504 x.push(1);
505 }
506 }
507 }
508
509 /// AddAssign small integer to bigint.
510 #[inline]
iadd(x: &mut LimbVecType, y: Limb)511 pub fn iadd(x: &mut LimbVecType, y: Limb) {
512 iadd_impl(x, y, 0);
513 }
514
515 // SUBTRACTION
516
517 /// SubAssign small integer to bigint.
518 /// Does not do overflowing subtraction.
519 #[inline]
isub_impl(x: &mut LimbVecType, y: Limb, xstart: usize)520 pub fn isub_impl(x: &mut LimbVecType, y: Limb, xstart: usize) {
521 debug_assert!(x.len() > xstart && (x[xstart] >= y || x.len() > xstart + 1));
522
523 // Initial subtraction
524 let mut carry = scalar::isub(&mut x[xstart], y);
525
526 // Increment until overflow stops occurring.
527 let mut size = xstart + 1;
528 while carry && size < x.len() {
529 carry = scalar::isub(&mut x[size], 1);
530 size += 1;
531 }
532 normalize(x);
533 }
534
535 // MULTIPLICATION
536
537 /// MulAssign small integer to bigint.
538 #[inline]
imul(x: &mut LimbVecType, y: Limb)539 pub fn imul(x: &mut LimbVecType, y: Limb) {
540 // Multiply iteratively over all elements, adding the carry each time.
541 let mut carry: Limb = 0;
542 for xi in x.iter_mut() {
543 carry = scalar::imul(xi, y, carry);
544 }
545
546 // Overflow of value, add to end.
547 if carry != 0 {
548 x.push(carry);
549 }
550 }
551
552 /// Mul small integer to bigint.
553 #[inline]
mul(x: &[Limb], y: Limb) -> LimbVecType554 pub fn mul(x: &[Limb], y: Limb) -> LimbVecType {
555 let mut z = LimbVecType::default();
556 z.extend(x.iter().cloned());
557 imul(&mut z, y);
558 z
559 }
560
561 /// MulAssign by a power.
562 ///
563 /// Theoretically...
564 ///
565 /// Use an exponentiation by squaring method, since it reduces the time
566 /// complexity of the multiplication to ~`O(log(n))` for the squaring,
567 /// and `O(n*m)` for the result. Since `m` is typically a lower-order
568 /// factor, this significantly reduces the number of multiplications
569 /// we need to do. Iteratively multiplying by small powers follows
570 /// the nth triangular number series, which scales as `O(p^2)`, but
571 /// where `p` is `n+m`. In short, it scales very poorly.
572 ///
573 /// Practically....
574 ///
575 /// Exponentiation by Squaring:
576 /// running 2 tests
577 /// test bigcomp_f32_lexical ... bench: 1,018 ns/iter (+/- 78)
578 /// test bigcomp_f64_lexical ... bench: 3,639 ns/iter (+/- 1,007)
579 ///
580 /// Exponentiation by Iterative Small Powers:
581 /// running 2 tests
582 /// test bigcomp_f32_lexical ... bench: 518 ns/iter (+/- 31)
583 /// test bigcomp_f64_lexical ... bench: 583 ns/iter (+/- 47)
584 ///
585 /// Exponentiation by Iterative Large Powers (of 2):
586 /// running 2 tests
587 /// test bigcomp_f32_lexical ... bench: 671 ns/iter (+/- 31)
588 /// test bigcomp_f64_lexical ... bench: 1,394 ns/iter (+/- 47)
589 ///
590 /// Even using worst-case scenarios, exponentiation by squaring is
591 /// significantly slower for our workloads. Just multiply by small powers,
592 /// in simple cases, and use precalculated large powers in other cases.
imul_pow5(x: &mut LimbVecType, n: u32)593 pub fn imul_pow5(x: &mut LimbVecType, n: u32) {
594 use super::large::KARATSUBA_CUTOFF;
595
596 let small_powers = POW5_LIMB;
597 let large_powers = large_powers::POW5;
598
599 if n == 0 {
600 // No exponent, just return.
601 // The 0-index of the large powers is `2^0`, which is 1, so we want
602 // to make sure we don't take that path with a literal 0.
603 return;
604 }
605
606 // We want to use the asymptotically faster algorithm if we're going
607 // to be using Karabatsu multiplication sometime during the result,
608 // otherwise, just use exponentiation by squaring.
609 let bit_length = 32 - n.leading_zeros().as_usize();
610 debug_assert!(bit_length != 0 && bit_length <= large_powers.len());
611 if x.len() + large_powers[bit_length - 1].len() < 2 * KARATSUBA_CUTOFF {
612 // We can use iterative small powers to make this faster for the
613 // easy cases.
614
615 // Multiply by the largest small power until n < step.
616 let step = small_powers.len() - 1;
617 let power = small_powers[step];
618 let mut n = n.as_usize();
619 while n >= step {
620 imul(x, power);
621 n -= step;
622 }
623
624 // Multiply by the remainder.
625 imul(x, small_powers[n]);
626 } else {
627 // In theory, this code should be asymptotically a lot faster,
628 // in practice, our small::imul seems to be the limiting step,
629 // and large imul is slow as well.
630
631 // Multiply by higher order powers.
632 let mut idx: usize = 0;
633 let mut bit: usize = 1;
634 let mut n = n.as_usize();
635 while n != 0 {
636 if n & bit != 0 {
637 debug_assert!(idx < large_powers.len());
638 large::imul(x, large_powers[idx]);
639 n ^= bit;
640 }
641 idx += 1;
642 bit <<= 1;
643 }
644 }
645 }
646
647 // BIT LENGTH
648
649 /// Get number of leading zero bits in the storage.
650 #[inline]
leading_zeros(x: &[Limb]) -> usize651 pub fn leading_zeros(x: &[Limb]) -> usize {
652 if x.is_empty() {
653 0
654 } else {
655 x.rindex(0).leading_zeros().as_usize()
656 }
657 }
658
659 /// Calculate the bit-length of the big-integer.
660 #[inline]
bit_length(x: &[Limb]) -> usize661 pub fn bit_length(x: &[Limb]) -> usize {
662 let bits = mem::size_of::<Limb>() * 8;
663 // Avoid overflowing, calculate via total number of bits
664 // minus leading zero bits.
665 let nlz = leading_zeros(x);
666 bits.checked_mul(x.len()).map(|v| v - nlz).unwrap_or(usize::max_value())
667 }
668
669 // SHL
670
671 /// Shift-left bits inside a buffer.
672 ///
673 /// Assumes `n < Limb::BITS`, IE, internally shifting bits.
674 #[inline]
ishl_bits(x: &mut LimbVecType, n: usize)675 pub fn ishl_bits(x: &mut LimbVecType, n: usize) {
676 // Need to shift by the number of `bits % Limb::BITS)`.
677 let bits = mem::size_of::<Limb>() * 8;
678 debug_assert!(n < bits);
679 if n == 0 {
680 return;
681 }
682
683 // Internally, for each item, we shift left by n, and add the previous
684 // right shifted limb-bits.
685 // For example, we transform (for u8) shifted left 2, to:
686 // b10100100 b01000010
687 // b10 b10010001 b00001000
688 let rshift = bits - n;
689 let lshift = n;
690 let mut prev: Limb = 0;
691 for xi in x.iter_mut() {
692 let tmp = *xi;
693 *xi <<= lshift;
694 *xi |= prev >> rshift;
695 prev = tmp;
696 }
697
698 // Always push the carry, even if it creates a non-normal result.
699 let carry = prev >> rshift;
700 if carry != 0 {
701 x.push(carry);
702 }
703 }
704
705 /// Shift-left `n` digits inside a buffer.
706 ///
707 /// Assumes `n` is not 0.
708 #[inline]
ishl_limbs(x: &mut LimbVecType, n: usize)709 pub fn ishl_limbs(x: &mut LimbVecType, n: usize) {
710 debug_assert!(n != 0);
711 if !x.is_empty() {
712 insert_many(x, 0, iter::repeat(0).take(n));
713 }
714 }
715
716 /// Shift-left buffer by n bits.
717 #[inline]
ishl(x: &mut LimbVecType, n: usize)718 pub fn ishl(x: &mut LimbVecType, n: usize) {
719 let bits = mem::size_of::<Limb>() * 8;
720 // Need to pad with zeros for the number of `bits / Limb::BITS`,
721 // and shift-left with carry for `bits % Limb::BITS`.
722 let rem = n % bits;
723 let div = n / bits;
724 ishl_bits(x, rem);
725 if div != 0 {
726 ishl_limbs(x, div);
727 }
728 }
729
730 // NORMALIZE
731
732 /// Normalize the container by popping any leading zeros.
733 #[inline]
normalize(x: &mut LimbVecType)734 pub fn normalize(x: &mut LimbVecType) {
735 // Remove leading zero if we cause underflow. Since we're dividing
736 // by a small power, we have at max 1 int removed.
737 while !x.is_empty() && *x.rindex(0) == 0 {
738 x.pop();
739 }
740 }
741 } // small
742
743 // LARGE
744 // -----
745
746 // Large-to-large operations, to modify a big integer from a native scalar.
747
748 mod large {
749 use super::*;
750
751 // RELATIVE OPERATORS
752
753 /// Compare `x` to `y`, in little-endian order.
754 #[inline]
compare(x: &[Limb], y: &[Limb]) -> cmp::Ordering755 pub fn compare(x: &[Limb], y: &[Limb]) -> cmp::Ordering {
756 if x.len() > y.len() {
757 return cmp::Ordering::Greater;
758 } else if x.len() < y.len() {
759 return cmp::Ordering::Less;
760 } else {
761 let iter = x.iter().rev().zip(y.iter().rev());
762 for (&xi, &yi) in iter {
763 if xi > yi {
764 return cmp::Ordering::Greater;
765 } else if xi < yi {
766 return cmp::Ordering::Less;
767 }
768 }
769 // Equal case.
770 return cmp::Ordering::Equal;
771 }
772 }
773
774 /// Check if x is less than y.
775 #[inline]
less(x: &[Limb], y: &[Limb]) -> bool776 pub fn less(x: &[Limb], y: &[Limb]) -> bool {
777 compare(x, y) == cmp::Ordering::Less
778 }
779
780 /// Check if x is greater than or equal to y.
781 #[inline]
greater_equal(x: &[Limb], y: &[Limb]) -> bool782 pub fn greater_equal(x: &[Limb], y: &[Limb]) -> bool {
783 !less(x, y)
784 }
785
786 // ADDITION
787
788 /// Implied AddAssign implementation for bigints.
789 ///
790 /// Allows us to choose a start-index in x to store, so we can avoid
791 /// padding the buffer with zeros when not needed, optimized for vectors.
iadd_impl(x: &mut LimbVecType, y: &[Limb], xstart: usize)792 pub fn iadd_impl(x: &mut LimbVecType, y: &[Limb], xstart: usize) {
793 // The effective x buffer is from `xstart..x.len()`, so we need to treat
794 // that as the current range. If the effective y buffer is longer, need
795 // to resize to that, + the start index.
796 if y.len() > x.len() - xstart {
797 resize(x, y.len() + xstart, 0);
798 }
799
800 // Iteratively add elements from y to x.
801 let mut carry = false;
802 for (xi, yi) in (&mut x[xstart..]).iter_mut().zip(y.iter()) {
803 // Only one op of the two can overflow, since we added at max
804 // Limb::max_value() + Limb::max_value(). Add the previous carry,
805 // and store the current carry for the next.
806 let mut tmp = scalar::iadd(xi, *yi);
807 if carry {
808 tmp |= scalar::iadd(xi, 1);
809 }
810 carry = tmp;
811 }
812
813 // Overflow from the previous bit.
814 if carry {
815 small::iadd_impl(x, 1, y.len() + xstart);
816 }
817 }
818
819 /// AddAssign bigint to bigint.
820 #[inline]
iadd(x: &mut LimbVecType, y: &[Limb])821 pub fn iadd(x: &mut LimbVecType, y: &[Limb]) {
822 iadd_impl(x, y, 0)
823 }
824
825 /// Add bigint to bigint.
826 #[inline]
add(x: &[Limb], y: &[Limb]) -> LimbVecType827 pub fn add(x: &[Limb], y: &[Limb]) -> LimbVecType {
828 let mut z = LimbVecType::default();
829 z.extend(x.iter().cloned());
830 iadd(&mut z, y);
831 z
832 }
833
834 // SUBTRACTION
835
836 /// SubAssign bigint to bigint.
isub(x: &mut LimbVecType, y: &[Limb])837 pub fn isub(x: &mut LimbVecType, y: &[Limb]) {
838 // Basic underflow checks.
839 debug_assert!(greater_equal(x, y));
840
841 // Iteratively add elements from y to x.
842 let mut carry = false;
843 for (xi, yi) in x.iter_mut().zip(y.iter()) {
844 // Only one op of the two can overflow, since we added at max
845 // Limb::max_value() + Limb::max_value(). Add the previous carry,
846 // and store the current carry for the next.
847 let mut tmp = scalar::isub(xi, *yi);
848 if carry {
849 tmp |= scalar::isub(xi, 1);
850 }
851 carry = tmp;
852 }
853
854 if carry {
855 small::isub_impl(x, 1, y.len());
856 } else {
857 small::normalize(x);
858 }
859 }
860
861 // MULTIPLICATION
862
863 /// Number of digits to bottom-out to asymptotically slow algorithms.
864 ///
865 /// Karatsuba tends to out-perform long-multiplication at ~320-640 bits,
866 /// so we go halfway, while Newton division tends to out-perform
867 /// Algorithm D at ~1024 bits. We can toggle this for optimal performance.
868 pub const KARATSUBA_CUTOFF: usize = 32;
869
870 /// Grade-school multiplication algorithm.
871 ///
872 /// Slow, naive algorithm, using limb-bit bases and just shifting left for
873 /// each iteration. This could be optimized with numerous other algorithms,
874 /// but it's extremely simple, and works in O(n*m) time, which is fine
875 /// by me. Each iteration, of which there are `m` iterations, requires
876 /// `n` multiplications, and `n` additions, or grade-school multiplication.
long_mul(x: &[Limb], y: &[Limb]) -> LimbVecType877 fn long_mul(x: &[Limb], y: &[Limb]) -> LimbVecType {
878 // Using the immutable value, multiply by all the scalars in y, using
879 // the algorithm defined above. Use a single buffer to avoid
880 // frequent reallocations. Handle the first case to avoid a redundant
881 // addition, since we know y.len() >= 1.
882 let mut z: LimbVecType = small::mul(x, y[0]);
883 resize(&mut z, x.len() + y.len(), 0);
884
885 // Handle the iterative cases.
886 for (i, &yi) in y[1..].iter().enumerate() {
887 let zi: LimbVecType = small::mul(x, yi);
888 iadd_impl(&mut z, &zi, i + 1);
889 }
890
891 small::normalize(&mut z);
892
893 z
894 }
895
896 /// Split two buffers into halfway, into (lo, hi).
897 #[inline]
karatsuba_split<'a>(z: &'a [Limb], m: usize) -> (&'a [Limb], &'a [Limb])898 pub fn karatsuba_split<'a>(z: &'a [Limb], m: usize) -> (&'a [Limb], &'a [Limb]) {
899 (&z[..m], &z[m..])
900 }
901
902 /// Karatsuba multiplication algorithm with roughly equal input sizes.
903 ///
904 /// Assumes `y.len() >= x.len()`.
karatsuba_mul(x: &[Limb], y: &[Limb]) -> LimbVecType905 fn karatsuba_mul(x: &[Limb], y: &[Limb]) -> LimbVecType {
906 if y.len() <= KARATSUBA_CUTOFF {
907 // Bottom-out to long division for small cases.
908 long_mul(x, y)
909 } else if x.len() < y.len() / 2 {
910 karatsuba_uneven_mul(x, y)
911 } else {
912 // Do our 3 multiplications.
913 let m = y.len() / 2;
914 let (xl, xh) = karatsuba_split(x, m);
915 let (yl, yh) = karatsuba_split(y, m);
916 let sumx = add(xl, xh);
917 let sumy = add(yl, yh);
918 let z0 = karatsuba_mul(xl, yl);
919 let mut z1 = karatsuba_mul(&sumx, &sumy);
920 let z2 = karatsuba_mul(xh, yh);
921 // Properly scale z1, which is `z1 - z2 - zo`.
922 isub(&mut z1, &z2);
923 isub(&mut z1, &z0);
924
925 // Create our result, which is equal to, in little-endian order:
926 // [z0, z1 - z2 - z0, z2]
927 // z1 must be shifted m digits (2^(32m)) over.
928 // z2 must be shifted 2*m digits (2^(64m)) over.
929 let mut result = LimbVecType::default();
930 let len = z0.len().max(m + z1.len()).max(2 * m + z2.len());
931 reserve_exact(&mut result, len);
932 result.extend(z0.iter().cloned());
933 iadd_impl(&mut result, &z1, m);
934 iadd_impl(&mut result, &z2, 2 * m);
935
936 result
937 }
938 }
939
940 /// Karatsuba multiplication algorithm where y is substantially larger than x.
941 ///
942 /// Assumes `y.len() >= x.len()`.
karatsuba_uneven_mul(x: &[Limb], mut y: &[Limb]) -> LimbVecType943 fn karatsuba_uneven_mul(x: &[Limb], mut y: &[Limb]) -> LimbVecType {
944 let mut result = LimbVecType::default();
945 resize(&mut result, x.len() + y.len(), 0);
946
947 // This effectively is like grade-school multiplication between
948 // two numbers, except we're using splits on `y`, and the intermediate
949 // step is a Karatsuba multiplication.
950 let mut start = 0;
951 while y.len() != 0 {
952 let m = x.len().min(y.len());
953 let (yl, yh) = karatsuba_split(y, m);
954 let prod = karatsuba_mul(x, yl);
955 iadd_impl(&mut result, &prod, start);
956 y = yh;
957 start += m;
958 }
959 small::normalize(&mut result);
960
961 result
962 }
963
964 /// Forwarder to the proper Karatsuba algorithm.
965 #[inline]
karatsuba_mul_fwd(x: &[Limb], y: &[Limb]) -> LimbVecType966 fn karatsuba_mul_fwd(x: &[Limb], y: &[Limb]) -> LimbVecType {
967 if x.len() < y.len() {
968 karatsuba_mul(x, y)
969 } else {
970 karatsuba_mul(y, x)
971 }
972 }
973
974 /// MulAssign bigint to bigint.
975 #[inline]
imul(x: &mut LimbVecType, y: &[Limb])976 pub fn imul(x: &mut LimbVecType, y: &[Limb]) {
977 if y.len() == 1 {
978 small::imul(x, y[0]);
979 } else {
980 // We're not really in a condition where using Karatsuba
981 // multiplication makes sense, so we're just going to use long
982 // division. ~20% speedup compared to:
983 // *x = karatsuba_mul_fwd(x, y);
984 *x = karatsuba_mul_fwd(x, y);
985 }
986 }
987 } // large
988
989 // TRAITS
990 // ------
991
992 /// Traits for shared operations for big integers.
993 ///
994 /// None of these are implemented using normal traits, since these
995 /// are very expensive operations, and we want to deliberately
996 /// and explicitly use these functions.
997 pub(crate) trait Math: Clone + Sized + Default {
998 // DATA
999
1000 /// Get access to the underlying data
data<'a>(&'a self) -> &'a LimbVecType1001 fn data<'a>(&'a self) -> &'a LimbVecType;
1002
1003 /// Get access to the underlying data
data_mut<'a>(&'a mut self) -> &'a mut LimbVecType1004 fn data_mut<'a>(&'a mut self) -> &'a mut LimbVecType;
1005
1006 // RELATIVE OPERATIONS
1007
1008 /// Compare self to y.
1009 #[inline]
compare(&self, y: &Self) -> cmp::Ordering1010 fn compare(&self, y: &Self) -> cmp::Ordering {
1011 large::compare(self.data(), y.data())
1012 }
1013
1014 // PROPERTIES
1015
1016 /// Get the high 64-bits from the bigint and if there are remaining bits.
1017 #[inline]
hi64(&self) -> (u64, bool)1018 fn hi64(&self) -> (u64, bool) {
1019 self.data().as_slice().hi64()
1020 }
1021
1022 /// Calculate the bit-length of the big-integer.
1023 /// Returns usize::max_value() if the value overflows,
1024 /// IE, if `self.data().len() > usize::max_value() / 8`.
1025 #[inline]
bit_length(&self) -> usize1026 fn bit_length(&self) -> usize {
1027 small::bit_length(self.data())
1028 }
1029
1030 // INTEGER CONVERSIONS
1031
1032 /// Create new big integer from u64.
1033 #[inline]
from_u64(x: u64) -> Self1034 fn from_u64(x: u64) -> Self {
1035 let mut v = Self::default();
1036 let slc = split_u64(x);
1037 v.data_mut().extend(slc.iter().cloned());
1038 v.normalize();
1039 v
1040 }
1041
1042 // NORMALIZE
1043
1044 /// Normalize the integer, so any leading zero values are removed.
1045 #[inline]
normalize(&mut self)1046 fn normalize(&mut self) {
1047 small::normalize(self.data_mut());
1048 }
1049
1050 // ADDITION
1051
1052 /// AddAssign small integer.
1053 #[inline]
iadd_small(&mut self, y: Limb)1054 fn iadd_small(&mut self, y: Limb) {
1055 small::iadd(self.data_mut(), y);
1056 }
1057
1058 // MULTIPLICATION
1059
1060 /// MulAssign small integer.
1061 #[inline]
imul_small(&mut self, y: Limb)1062 fn imul_small(&mut self, y: Limb) {
1063 small::imul(self.data_mut(), y);
1064 }
1065
1066 /// Multiply by a power of 2.
1067 #[inline]
imul_pow2(&mut self, n: u32)1068 fn imul_pow2(&mut self, n: u32) {
1069 self.ishl(n.as_usize())
1070 }
1071
1072 /// Multiply by a power of 5.
1073 #[inline]
imul_pow5(&mut self, n: u32)1074 fn imul_pow5(&mut self, n: u32) {
1075 small::imul_pow5(self.data_mut(), n)
1076 }
1077
1078 /// MulAssign by a power of 10.
1079 #[inline]
imul_pow10(&mut self, n: u32)1080 fn imul_pow10(&mut self, n: u32) {
1081 self.imul_pow5(n);
1082 self.imul_pow2(n);
1083 }
1084
1085 // SHIFTS
1086
1087 /// Shift-left the entire buffer n bits.
1088 #[inline]
ishl(&mut self, n: usize)1089 fn ishl(&mut self, n: usize) {
1090 small::ishl(self.data_mut(), n);
1091 }
1092 }
1093
1094 // TESTS
1095 // -----
1096
1097 #[cfg(test)]
1098 mod tests {
1099 use super::*;
1100
1101 #[derive(Clone, Default)]
1102 struct Bigint {
1103 data: LimbVecType,
1104 }
1105
1106 impl Math for Bigint {
1107 #[inline]
data<'a>(&'a self) -> &'a LimbVecType1108 fn data<'a>(&'a self) -> &'a LimbVecType {
1109 &self.data
1110 }
1111
1112 #[inline]
data_mut<'a>(&'a mut self) -> &'a mut LimbVecType1113 fn data_mut<'a>(&'a mut self) -> &'a mut LimbVecType {
1114 &mut self.data
1115 }
1116 }
1117
1118 #[cfg(limb_width_32)]
from_u32(x: &[u32]) -> LimbVecType1119 pub(crate) fn from_u32(x: &[u32]) -> LimbVecType {
1120 x.iter().cloned().collect()
1121 }
1122
1123 #[cfg(limb_width_64)]
from_u32(x: &[u32]) -> LimbVecType1124 pub(crate) fn from_u32(x: &[u32]) -> LimbVecType {
1125 let mut v = LimbVecType::default();
1126 for xi in x.chunks(2) {
1127 match xi.len() {
1128 1 => v.push(xi[0] as u64),
1129 2 => v.push(((xi[1] as u64) << 32) | (xi[0] as u64)),
1130 _ => unreachable!(),
1131 }
1132 }
1133
1134 v
1135 }
1136
1137 #[test]
compare_test()1138 fn compare_test() {
1139 // Simple
1140 let x = Bigint {
1141 data: from_u32(&[1]),
1142 };
1143 let y = Bigint {
1144 data: from_u32(&[2]),
1145 };
1146 assert_eq!(x.compare(&y), cmp::Ordering::Less);
1147 assert_eq!(x.compare(&x), cmp::Ordering::Equal);
1148 assert_eq!(y.compare(&x), cmp::Ordering::Greater);
1149
1150 // Check asymmetric
1151 let x = Bigint {
1152 data: from_u32(&[5, 1]),
1153 };
1154 let y = Bigint {
1155 data: from_u32(&[2]),
1156 };
1157 assert_eq!(x.compare(&y), cmp::Ordering::Greater);
1158 assert_eq!(x.compare(&x), cmp::Ordering::Equal);
1159 assert_eq!(y.compare(&x), cmp::Ordering::Less);
1160
1161 // Check when we use reverse ordering properly.
1162 let x = Bigint {
1163 data: from_u32(&[5, 1, 9]),
1164 };
1165 let y = Bigint {
1166 data: from_u32(&[6, 2, 8]),
1167 };
1168 assert_eq!(x.compare(&y), cmp::Ordering::Greater);
1169 assert_eq!(x.compare(&x), cmp::Ordering::Equal);
1170 assert_eq!(y.compare(&x), cmp::Ordering::Less);
1171
1172 // Complex scenario, check it properly uses reverse ordering.
1173 let x = Bigint {
1174 data: from_u32(&[0, 1, 9]),
1175 };
1176 let y = Bigint {
1177 data: from_u32(&[4294967295, 0, 9]),
1178 };
1179 assert_eq!(x.compare(&y), cmp::Ordering::Greater);
1180 assert_eq!(x.compare(&x), cmp::Ordering::Equal);
1181 assert_eq!(y.compare(&x), cmp::Ordering::Less);
1182 }
1183
1184 #[test]
hi64_test()1185 fn hi64_test() {
1186 assert_eq!(Bigint::from_u64(0xA).hi64(), (0xA000000000000000, false));
1187 assert_eq!(Bigint::from_u64(0xAB).hi64(), (0xAB00000000000000, false));
1188 assert_eq!(Bigint::from_u64(0xAB00000000).hi64(), (0xAB00000000000000, false));
1189 assert_eq!(Bigint::from_u64(0xA23456789A).hi64(), (0xA23456789A000000, false));
1190 }
1191
1192 #[test]
bit_length_test()1193 fn bit_length_test() {
1194 let x = Bigint {
1195 data: from_u32(&[0, 0, 0, 1]),
1196 };
1197 assert_eq!(x.bit_length(), 97);
1198
1199 let x = Bigint {
1200 data: from_u32(&[0, 0, 0, 3]),
1201 };
1202 assert_eq!(x.bit_length(), 98);
1203
1204 let x = Bigint {
1205 data: from_u32(&[1 << 31]),
1206 };
1207 assert_eq!(x.bit_length(), 32);
1208 }
1209
1210 #[test]
iadd_small_test()1211 fn iadd_small_test() {
1212 // Overflow check (single)
1213 // This should set all the internal data values to 0, the top
1214 // value to (1<<31), and the bottom value to (4>>1).
1215 // This is because the max_value + 1 leads to all 0s, we set the
1216 // topmost bit to 1.
1217 let mut x = Bigint {
1218 data: from_u32(&[4294967295]),
1219 };
1220 x.iadd_small(5);
1221 assert_eq!(x.data, from_u32(&[4, 1]));
1222
1223 // No overflow, single value
1224 let mut x = Bigint {
1225 data: from_u32(&[5]),
1226 };
1227 x.iadd_small(7);
1228 assert_eq!(x.data, from_u32(&[12]));
1229
1230 // Single carry, internal overflow
1231 let mut x = Bigint::from_u64(0x80000000FFFFFFFF);
1232 x.iadd_small(7);
1233 assert_eq!(x.data, from_u32(&[6, 0x80000001]));
1234
1235 // Double carry, overflow
1236 let mut x = Bigint::from_u64(0xFFFFFFFFFFFFFFFF);
1237 x.iadd_small(7);
1238 assert_eq!(x.data, from_u32(&[6, 0, 1]));
1239 }
1240
1241 #[test]
imul_small_test()1242 fn imul_small_test() {
1243 // No overflow check, 1-int.
1244 let mut x = Bigint {
1245 data: from_u32(&[5]),
1246 };
1247 x.imul_small(7);
1248 assert_eq!(x.data, from_u32(&[35]));
1249
1250 // No overflow check, 2-ints.
1251 let mut x = Bigint::from_u64(0x4000000040000);
1252 x.imul_small(5);
1253 assert_eq!(x.data, from_u32(&[0x00140000, 0x140000]));
1254
1255 // Overflow, 1 carry.
1256 let mut x = Bigint {
1257 data: from_u32(&[0x33333334]),
1258 };
1259 x.imul_small(5);
1260 assert_eq!(x.data, from_u32(&[4, 1]));
1261
1262 // Overflow, 1 carry, internal.
1263 let mut x = Bigint::from_u64(0x133333334);
1264 x.imul_small(5);
1265 assert_eq!(x.data, from_u32(&[4, 6]));
1266
1267 // Overflow, 2 carries.
1268 let mut x = Bigint::from_u64(0x3333333333333334);
1269 x.imul_small(5);
1270 assert_eq!(x.data, from_u32(&[4, 0, 1]));
1271 }
1272
1273 #[test]
shl_test()1274 fn shl_test() {
1275 // Pattern generated via `''.join(["1" +"0"*i for i in range(20)])`
1276 let mut big = Bigint {
1277 data: from_u32(&[0xD2210408]),
1278 };
1279 big.ishl(5);
1280 assert_eq!(big.data, from_u32(&[0x44208100, 0x1A]));
1281 big.ishl(32);
1282 assert_eq!(big.data, from_u32(&[0, 0x44208100, 0x1A]));
1283 big.ishl(27);
1284 assert_eq!(big.data, from_u32(&[0, 0, 0xD2210408]));
1285
1286 // 96-bits of previous pattern
1287 let mut big = Bigint {
1288 data: from_u32(&[0x20020010, 0x8040100, 0xD2210408]),
1289 };
1290 big.ishl(5);
1291 assert_eq!(big.data, from_u32(&[0x400200, 0x802004, 0x44208101, 0x1A]));
1292 big.ishl(32);
1293 assert_eq!(big.data, from_u32(&[0, 0x400200, 0x802004, 0x44208101, 0x1A]));
1294 big.ishl(27);
1295 assert_eq!(big.data, from_u32(&[0, 0, 0x20020010, 0x8040100, 0xD2210408]));
1296 }
1297 }
1298