1 /* mini-gmp, a minimalistic implementation of a GNU GMP subset.
2
3 Contributed to the GNU project by Niels Möller
4
5 Copyright 1991-1997, 1999-2014 Free Software Foundation, Inc.
6
7 This file is part of the GNU MP Library.
8
9 The GNU MP Library is free software; you can redistribute it and/or modify
10 it under the terms of either:
11
12 * the GNU Lesser General Public License as published by the Free
13 Software Foundation; either version 3 of the License, or (at your
14 option) any later version.
15
16 or
17
18 * the GNU General Public License as published by the Free Software
19 Foundation; either version 2 of the License, or (at your option) any
20 later version.
21
22 or both in parallel, as here.
23
24 The GNU MP Library is distributed in the hope that it will be useful, but
25 WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
26 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
27 for more details.
28
29 You should have received copies of the GNU General Public License and the
30 GNU Lesser General Public License along with the GNU MP Library. If not,
31 see https://www.gnu.org/licenses/. */
32
33 /* NOTE: All functions in this file which are not declared in
34 mini-gmp.h are internal, and are not intended to be compatible
35 neither with GMP nor with future versions of mini-gmp. */
36
37 /* Much of the material copied from GMP files, including: gmp-impl.h,
38 longlong.h, mpn/generic/add_n.c, mpn/generic/addmul_1.c,
39 mpn/generic/lshift.c, mpn/generic/mul_1.c,
40 mpn/generic/mul_basecase.c, mpn/generic/rshift.c,
41 mpn/generic/sbpi1_div_qr.c, mpn/generic/sub_n.c,
42 mpn/generic/submul_1.c. */
43
44 #include <assert.h>
45 #include <ctype.h>
46 #include <limits.h>
47 #include <stdio.h>
48 #include <stdlib.h>
49 #include <string.h>
50
51 #include "mini-gmp.h"
52
53
54 /* Macros */
55 #define GMP_LIMB_BITS (sizeof(mp_limb_t) * CHAR_BIT)
56
57 #define GMP_LIMB_MAX (~ (mp_limb_t) 0)
58 #define GMP_LIMB_HIGHBIT ((mp_limb_t) 1 << (GMP_LIMB_BITS - 1))
59
60 #define GMP_HLIMB_BIT ((mp_limb_t) 1 << (GMP_LIMB_BITS / 2))
61 #define GMP_LLIMB_MASK (GMP_HLIMB_BIT - 1)
62
63 #define GMP_ULONG_BITS (sizeof(unsigned long) * CHAR_BIT)
64 #define GMP_ULONG_HIGHBIT ((unsigned long) 1 << (GMP_ULONG_BITS - 1))
65
66 #define GMP_ABS(x) ((x) >= 0 ? (x) : -(x))
67 #define GMP_NEG_CAST(T,x) (-((T)((x) + 1) - 1))
68
69 #define GMP_MIN(a, b) ((a) < (b) ? (a) : (b))
70 #define GMP_MAX(a, b) ((a) > (b) ? (a) : (b))
71
72 #define gmp_assert_nocarry(x) do { \
73 mp_limb_t __cy = x; \
74 assert (__cy == 0); \
75 (void)(__cy); \
76 } while (0)
77
78 #define gmp_clz(count, x) do { \
79 mp_limb_t __clz_x = (x); \
80 unsigned __clz_c; \
81 for (__clz_c = 0; \
82 (__clz_x & ((mp_limb_t) 0xff << (GMP_LIMB_BITS - 8))) == 0; \
83 __clz_c += 8) \
84 __clz_x <<= 8; \
85 for (; (__clz_x & GMP_LIMB_HIGHBIT) == 0; __clz_c++) \
86 __clz_x <<= 1; \
87 (count) = __clz_c; \
88 } while (0)
89
90 #define gmp_ctz(count, x) do { \
91 mp_limb_t __ctz_x = (x); \
92 unsigned __ctz_c = 0; \
93 gmp_clz (__ctz_c, __ctz_x & - __ctz_x); \
94 (count) = GMP_LIMB_BITS - 1 - __ctz_c; \
95 } while (0)
96
97 #define gmp_add_ssaaaa(sh, sl, ah, al, bh, bl) \
98 do { \
99 mp_limb_t __x; \
100 __x = (al) + (bl); \
101 (sh) = (ah) + (bh) + (__x < (al)); \
102 (sl) = __x; \
103 } while (0)
104
105 #define gmp_sub_ddmmss(sh, sl, ah, al, bh, bl) \
106 do { \
107 mp_limb_t __x; \
108 __x = (al) - (bl); \
109 (sh) = (ah) - (bh) - ((al) < (bl)); \
110 (sl) = __x; \
111 } while (0)
112
113 #define gmp_umul_ppmm(w1, w0, u, v) \
114 do { \
115 mp_limb_t __x0, __x1, __x2, __x3; \
116 unsigned __ul, __vl, __uh, __vh; \
117 mp_limb_t __u = (u), __v = (v); \
118 \
119 __ul = __u & GMP_LLIMB_MASK; \
120 __uh = __u >> (GMP_LIMB_BITS / 2); \
121 __vl = __v & GMP_LLIMB_MASK; \
122 __vh = __v >> (GMP_LIMB_BITS / 2); \
123 \
124 __x0 = (mp_limb_t) __ul * __vl; \
125 __x1 = (mp_limb_t) __ul * __vh; \
126 __x2 = (mp_limb_t) __uh * __vl; \
127 __x3 = (mp_limb_t) __uh * __vh; \
128 \
129 __x1 += __x0 >> (GMP_LIMB_BITS / 2);/* this can't give carry */ \
130 __x1 += __x2; /* but this indeed can */ \
131 if (__x1 < __x2) /* did we get it? */ \
132 __x3 += GMP_HLIMB_BIT; /* yes, add it in the proper pos. */ \
133 \
134 (w1) = __x3 + (__x1 >> (GMP_LIMB_BITS / 2)); \
135 (w0) = (__x1 << (GMP_LIMB_BITS / 2)) + (__x0 & GMP_LLIMB_MASK); \
136 } while (0)
137
138 #define gmp_udiv_qrnnd_preinv(q, r, nh, nl, d, di) \
139 do { \
140 mp_limb_t _qh, _ql, _r, _mask; \
141 gmp_umul_ppmm (_qh, _ql, (nh), (di)); \
142 gmp_add_ssaaaa (_qh, _ql, _qh, _ql, (nh) + 1, (nl)); \
143 _r = (nl) - _qh * (d); \
144 _mask = -(mp_limb_t) (_r > _ql); /* both > and >= are OK */ \
145 _qh += _mask; \
146 _r += _mask & (d); \
147 if (_r >= (d)) \
148 { \
149 _r -= (d); \
150 _qh++; \
151 } \
152 \
153 (r) = _r; \
154 (q) = _qh; \
155 } while (0)
156
157 #define gmp_udiv_qr_3by2(q, r1, r0, n2, n1, n0, d1, d0, dinv) \
158 do { \
159 mp_limb_t _q0, _t1, _t0, _mask; \
160 gmp_umul_ppmm ((q), _q0, (n2), (dinv)); \
161 gmp_add_ssaaaa ((q), _q0, (q), _q0, (n2), (n1)); \
162 \
163 /* Compute the two most significant limbs of n - q'd */ \
164 (r1) = (n1) - (d1) * (q); \
165 gmp_sub_ddmmss ((r1), (r0), (r1), (n0), (d1), (d0)); \
166 gmp_umul_ppmm (_t1, _t0, (d0), (q)); \
167 gmp_sub_ddmmss ((r1), (r0), (r1), (r0), _t1, _t0); \
168 (q)++; \
169 \
170 /* Conditionally adjust q and the remainders */ \
171 _mask = - (mp_limb_t) ((r1) >= _q0); \
172 (q) += _mask; \
173 gmp_add_ssaaaa ((r1), (r0), (r1), (r0), _mask & (d1), _mask & (d0)); \
174 if ((r1) >= (d1)) \
175 { \
176 if ((r1) > (d1) || (r0) >= (d0)) \
177 { \
178 (q)++; \
179 gmp_sub_ddmmss ((r1), (r0), (r1), (r0), (d1), (d0)); \
180 } \
181 } \
182 } while (0)
183
184 /* Swap macros. */
185 #define MP_LIMB_T_SWAP(x, y) \
186 do { \
187 mp_limb_t __mp_limb_t_swap__tmp = (x); \
188 (x) = (y); \
189 (y) = __mp_limb_t_swap__tmp; \
190 } while (0)
191 #define MP_SIZE_T_SWAP(x, y) \
192 do { \
193 mp_size_t __mp_size_t_swap__tmp = (x); \
194 (x) = (y); \
195 (y) = __mp_size_t_swap__tmp; \
196 } while (0)
197 #define MP_BITCNT_T_SWAP(x,y) \
198 do { \
199 mp_bitcnt_t __mp_bitcnt_t_swap__tmp = (x); \
200 (x) = (y); \
201 (y) = __mp_bitcnt_t_swap__tmp; \
202 } while (0)
203 #define MP_PTR_SWAP(x, y) \
204 do { \
205 mp_ptr __mp_ptr_swap__tmp = (x); \
206 (x) = (y); \
207 (y) = __mp_ptr_swap__tmp; \
208 } while (0)
209 #define MP_SRCPTR_SWAP(x, y) \
210 do { \
211 mp_srcptr __mp_srcptr_swap__tmp = (x); \
212 (x) = (y); \
213 (y) = __mp_srcptr_swap__tmp; \
214 } while (0)
215
216 #define MPN_PTR_SWAP(xp,xs, yp,ys) \
217 do { \
218 MP_PTR_SWAP (xp, yp); \
219 MP_SIZE_T_SWAP (xs, ys); \
220 } while(0)
221 #define MPN_SRCPTR_SWAP(xp,xs, yp,ys) \
222 do { \
223 MP_SRCPTR_SWAP (xp, yp); \
224 MP_SIZE_T_SWAP (xs, ys); \
225 } while(0)
226
227 #define MPZ_PTR_SWAP(x, y) \
228 do { \
229 mpz_ptr __mpz_ptr_swap__tmp = (x); \
230 (x) = (y); \
231 (y) = __mpz_ptr_swap__tmp; \
232 } while (0)
233 #define MPZ_SRCPTR_SWAP(x, y) \
234 do { \
235 mpz_srcptr __mpz_srcptr_swap__tmp = (x); \
236 (x) = (y); \
237 (y) = __mpz_srcptr_swap__tmp; \
238 } while (0)
239
240 const int mp_bits_per_limb = GMP_LIMB_BITS;
241
242
243 /* Memory allocation and other helper functions. */
244 static void
gmp_die(const char * msg)245 gmp_die (const char *msg)
246 {
247 fprintf (stderr, "%s\n", msg);
248 abort();
249 }
250
251 static void *
gmp_default_alloc(size_t size)252 gmp_default_alloc (size_t size)
253 {
254 void *p;
255
256 assert (size > 0);
257
258 p = malloc (size);
259 if (!p)
260 gmp_die("gmp_default_alloc: Virtual memory exhausted.");
261
262 return p;
263 }
264
265 static void *
gmp_default_realloc(void * old,size_t old_size,size_t new_size)266 gmp_default_realloc (void *old, size_t old_size, size_t new_size)
267 {
268 mp_ptr p;
269
270 p = realloc (old, new_size);
271
272 if (!p)
273 gmp_die("gmp_default_realoc: Virtual memory exhausted.");
274
275 return p;
276 }
277
278 static void
gmp_default_free(void * p,size_t size)279 gmp_default_free (void *p, size_t size)
280 {
281 free (p);
282 }
283
284 static void * (*gmp_allocate_func) (size_t) = gmp_default_alloc;
285 static void * (*gmp_reallocate_func) (void *, size_t, size_t) = gmp_default_realloc;
286 static void (*gmp_free_func) (void *, size_t) = gmp_default_free;
287
288 void
mp_get_memory_functions(void * (** alloc_func)(size_t),void * (** realloc_func)(void *,size_t,size_t),void (** free_func)(void *,size_t))289 mp_get_memory_functions (void *(**alloc_func) (size_t),
290 void *(**realloc_func) (void *, size_t, size_t),
291 void (**free_func) (void *, size_t))
292 {
293 if (alloc_func)
294 *alloc_func = gmp_allocate_func;
295
296 if (realloc_func)
297 *realloc_func = gmp_reallocate_func;
298
299 if (free_func)
300 *free_func = gmp_free_func;
301 }
302
303 void
mp_set_memory_functions(void * (* alloc_func)(size_t),void * (* realloc_func)(void *,size_t,size_t),void (* free_func)(void *,size_t))304 mp_set_memory_functions (void *(*alloc_func) (size_t),
305 void *(*realloc_func) (void *, size_t, size_t),
306 void (*free_func) (void *, size_t))
307 {
308 if (!alloc_func)
309 alloc_func = gmp_default_alloc;
310 if (!realloc_func)
311 realloc_func = gmp_default_realloc;
312 if (!free_func)
313 free_func = gmp_default_free;
314
315 gmp_allocate_func = alloc_func;
316 gmp_reallocate_func = realloc_func;
317 gmp_free_func = free_func;
318 }
319
320 #define gmp_xalloc(size) ((*gmp_allocate_func)((size)))
321 #define gmp_free(p) ((*gmp_free_func) ((p), 0))
322
323 static mp_ptr
gmp_xalloc_limbs(mp_size_t size)324 gmp_xalloc_limbs (mp_size_t size)
325 {
326 return gmp_xalloc (size * sizeof (mp_limb_t));
327 }
328
329 static mp_ptr
gmp_xrealloc_limbs(mp_ptr old,mp_size_t size)330 gmp_xrealloc_limbs (mp_ptr old, mp_size_t size)
331 {
332 assert (size > 0);
333 return (*gmp_reallocate_func) (old, 0, size * sizeof (mp_limb_t));
334 }
335
336
337 /* MPN interface */
338
339 void
mpn_copyi(mp_ptr d,mp_srcptr s,mp_size_t n)340 mpn_copyi (mp_ptr d, mp_srcptr s, mp_size_t n)
341 {
342 mp_size_t i;
343 for (i = 0; i < n; i++)
344 d[i] = s[i];
345 }
346
347 void
mpn_copyd(mp_ptr d,mp_srcptr s,mp_size_t n)348 mpn_copyd (mp_ptr d, mp_srcptr s, mp_size_t n)
349 {
350 while (--n >= 0)
351 d[n] = s[n];
352 }
353
354 int
mpn_cmp(mp_srcptr ap,mp_srcptr bp,mp_size_t n)355 mpn_cmp (mp_srcptr ap, mp_srcptr bp, mp_size_t n)
356 {
357 while (--n >= 0)
358 {
359 if (ap[n] != bp[n])
360 return ap[n] > bp[n] ? 1 : -1;
361 }
362 return 0;
363 }
364
365 static int
mpn_cmp4(mp_srcptr ap,mp_size_t an,mp_srcptr bp,mp_size_t bn)366 mpn_cmp4 (mp_srcptr ap, mp_size_t an, mp_srcptr bp, mp_size_t bn)
367 {
368 if (an != bn)
369 return an < bn ? -1 : 1;
370 else
371 return mpn_cmp (ap, bp, an);
372 }
373
374 static mp_size_t
mpn_normalized_size(mp_srcptr xp,mp_size_t n)375 mpn_normalized_size (mp_srcptr xp, mp_size_t n)
376 {
377 while (n > 0 && xp[n-1] == 0)
378 --n;
379 return n;
380 }
381
382 #define mpn_zero_p(xp, n) (mpn_normalized_size ((xp), (n)) == 0)
383
384 void
mpn_zero(mp_ptr rp,mp_size_t n)385 mpn_zero (mp_ptr rp, mp_size_t n)
386 {
387 while (--n >= 0)
388 rp[n] = 0;
389 }
390
391 mp_limb_t
mpn_add_1(mp_ptr rp,mp_srcptr ap,mp_size_t n,mp_limb_t b)392 mpn_add_1 (mp_ptr rp, mp_srcptr ap, mp_size_t n, mp_limb_t b)
393 {
394 mp_size_t i;
395
396 assert (n > 0);
397 i = 0;
398 do
399 {
400 mp_limb_t r = ap[i] + b;
401 /* Carry out */
402 b = (r < b);
403 rp[i] = r;
404 }
405 while (++i < n);
406
407 return b;
408 }
409
410 mp_limb_t
mpn_add_n(mp_ptr rp,mp_srcptr ap,mp_srcptr bp,mp_size_t n)411 mpn_add_n (mp_ptr rp, mp_srcptr ap, mp_srcptr bp, mp_size_t n)
412 {
413 mp_size_t i;
414 mp_limb_t cy;
415
416 for (i = 0, cy = 0; i < n; i++)
417 {
418 mp_limb_t a, b, r;
419 a = ap[i]; b = bp[i];
420 r = a + cy;
421 cy = (r < cy);
422 r += b;
423 cy += (r < b);
424 rp[i] = r;
425 }
426 return cy;
427 }
428
429 mp_limb_t
mpn_add(mp_ptr rp,mp_srcptr ap,mp_size_t an,mp_srcptr bp,mp_size_t bn)430 mpn_add (mp_ptr rp, mp_srcptr ap, mp_size_t an, mp_srcptr bp, mp_size_t bn)
431 {
432 mp_limb_t cy;
433
434 assert (an >= bn);
435
436 cy = mpn_add_n (rp, ap, bp, bn);
437 if (an > bn)
438 cy = mpn_add_1 (rp + bn, ap + bn, an - bn, cy);
439 return cy;
440 }
441
442 mp_limb_t
mpn_sub_1(mp_ptr rp,mp_srcptr ap,mp_size_t n,mp_limb_t b)443 mpn_sub_1 (mp_ptr rp, mp_srcptr ap, mp_size_t n, mp_limb_t b)
444 {
445 mp_size_t i;
446
447 assert (n > 0);
448
449 i = 0;
450 do
451 {
452 mp_limb_t a = ap[i];
453 /* Carry out */
454 mp_limb_t cy = a < b;;
455 rp[i] = a - b;
456 b = cy;
457 }
458 while (++i < n);
459
460 return b;
461 }
462
463 mp_limb_t
mpn_sub_n(mp_ptr rp,mp_srcptr ap,mp_srcptr bp,mp_size_t n)464 mpn_sub_n (mp_ptr rp, mp_srcptr ap, mp_srcptr bp, mp_size_t n)
465 {
466 mp_size_t i;
467 mp_limb_t cy;
468
469 for (i = 0, cy = 0; i < n; i++)
470 {
471 mp_limb_t a, b;
472 a = ap[i]; b = bp[i];
473 b += cy;
474 cy = (b < cy);
475 cy += (a < b);
476 rp[i] = a - b;
477 }
478 return cy;
479 }
480
481 mp_limb_t
mpn_sub(mp_ptr rp,mp_srcptr ap,mp_size_t an,mp_srcptr bp,mp_size_t bn)482 mpn_sub (mp_ptr rp, mp_srcptr ap, mp_size_t an, mp_srcptr bp, mp_size_t bn)
483 {
484 mp_limb_t cy;
485
486 assert (an >= bn);
487
488 cy = mpn_sub_n (rp, ap, bp, bn);
489 if (an > bn)
490 cy = mpn_sub_1 (rp + bn, ap + bn, an - bn, cy);
491 return cy;
492 }
493
494 mp_limb_t
mpn_mul_1(mp_ptr rp,mp_srcptr up,mp_size_t n,mp_limb_t vl)495 mpn_mul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t vl)
496 {
497 mp_limb_t ul, cl, hpl, lpl;
498
499 assert (n >= 1);
500
501 cl = 0;
502 do
503 {
504 ul = *up++;
505 gmp_umul_ppmm (hpl, lpl, ul, vl);
506
507 lpl += cl;
508 cl = (lpl < cl) + hpl;
509
510 *rp++ = lpl;
511 }
512 while (--n != 0);
513
514 return cl;
515 }
516
517 mp_limb_t
mpn_addmul_1(mp_ptr rp,mp_srcptr up,mp_size_t n,mp_limb_t vl)518 mpn_addmul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t vl)
519 {
520 mp_limb_t ul, cl, hpl, lpl, rl;
521
522 assert (n >= 1);
523
524 cl = 0;
525 do
526 {
527 ul = *up++;
528 gmp_umul_ppmm (hpl, lpl, ul, vl);
529
530 lpl += cl;
531 cl = (lpl < cl) + hpl;
532
533 rl = *rp;
534 lpl = rl + lpl;
535 cl += lpl < rl;
536 *rp++ = lpl;
537 }
538 while (--n != 0);
539
540 return cl;
541 }
542
543 mp_limb_t
mpn_submul_1(mp_ptr rp,mp_srcptr up,mp_size_t n,mp_limb_t vl)544 mpn_submul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t vl)
545 {
546 mp_limb_t ul, cl, hpl, lpl, rl;
547
548 assert (n >= 1);
549
550 cl = 0;
551 do
552 {
553 ul = *up++;
554 gmp_umul_ppmm (hpl, lpl, ul, vl);
555
556 lpl += cl;
557 cl = (lpl < cl) + hpl;
558
559 rl = *rp;
560 lpl = rl - lpl;
561 cl += lpl > rl;
562 *rp++ = lpl;
563 }
564 while (--n != 0);
565
566 return cl;
567 }
568
569 mp_limb_t
mpn_mul(mp_ptr rp,mp_srcptr up,mp_size_t un,mp_srcptr vp,mp_size_t vn)570 mpn_mul (mp_ptr rp, mp_srcptr up, mp_size_t un, mp_srcptr vp, mp_size_t vn)
571 {
572 assert (un >= vn);
573 assert (vn >= 1);
574
575 /* We first multiply by the low order limb. This result can be
576 stored, not added, to rp. We also avoid a loop for zeroing this
577 way. */
578
579 rp[un] = mpn_mul_1 (rp, up, un, vp[0]);
580
581 /* Now accumulate the product of up[] and the next higher limb from
582 vp[]. */
583
584 while (--vn >= 1)
585 {
586 rp += 1, vp += 1;
587 rp[un] = mpn_addmul_1 (rp, up, un, vp[0]);
588 }
589 return rp[un];
590 }
591
592 void
mpn_mul_n(mp_ptr rp,mp_srcptr ap,mp_srcptr bp,mp_size_t n)593 mpn_mul_n (mp_ptr rp, mp_srcptr ap, mp_srcptr bp, mp_size_t n)
594 {
595 mpn_mul (rp, ap, n, bp, n);
596 }
597
598 void
mpn_sqr(mp_ptr rp,mp_srcptr ap,mp_size_t n)599 mpn_sqr (mp_ptr rp, mp_srcptr ap, mp_size_t n)
600 {
601 mpn_mul (rp, ap, n, ap, n);
602 }
603
604 mp_limb_t
mpn_lshift(mp_ptr rp,mp_srcptr up,mp_size_t n,unsigned int cnt)605 mpn_lshift (mp_ptr rp, mp_srcptr up, mp_size_t n, unsigned int cnt)
606 {
607 mp_limb_t high_limb, low_limb;
608 unsigned int tnc;
609 mp_limb_t retval;
610
611 assert (n >= 1);
612 assert (cnt >= 1);
613 assert (cnt < GMP_LIMB_BITS);
614
615 up += n;
616 rp += n;
617
618 tnc = GMP_LIMB_BITS - cnt;
619 low_limb = *--up;
620 retval = low_limb >> tnc;
621 high_limb = (low_limb << cnt);
622
623 while (--n != 0)
624 {
625 low_limb = *--up;
626 *--rp = high_limb | (low_limb >> tnc);
627 high_limb = (low_limb << cnt);
628 }
629 *--rp = high_limb;
630
631 return retval;
632 }
633
634 mp_limb_t
mpn_rshift(mp_ptr rp,mp_srcptr up,mp_size_t n,unsigned int cnt)635 mpn_rshift (mp_ptr rp, mp_srcptr up, mp_size_t n, unsigned int cnt)
636 {
637 mp_limb_t high_limb, low_limb;
638 unsigned int tnc;
639 mp_limb_t retval;
640
641 assert (n >= 1);
642 assert (cnt >= 1);
643 assert (cnt < GMP_LIMB_BITS);
644
645 tnc = GMP_LIMB_BITS - cnt;
646 high_limb = *up++;
647 retval = (high_limb << tnc);
648 low_limb = high_limb >> cnt;
649
650 while (--n != 0)
651 {
652 high_limb = *up++;
653 *rp++ = low_limb | (high_limb << tnc);
654 low_limb = high_limb >> cnt;
655 }
656 *rp = low_limb;
657
658 return retval;
659 }
660
661 static mp_bitcnt_t
mpn_common_scan(mp_limb_t limb,mp_size_t i,mp_srcptr up,mp_size_t un,mp_limb_t ux)662 mpn_common_scan (mp_limb_t limb, mp_size_t i, mp_srcptr up, mp_size_t un,
663 mp_limb_t ux)
664 {
665 unsigned cnt;
666
667 assert (ux == 0 || ux == GMP_LIMB_MAX);
668 assert (0 <= i && i <= un );
669
670 while (limb == 0)
671 {
672 i++;
673 if (i == un)
674 return (ux == 0 ? ~(mp_bitcnt_t) 0 : un * GMP_LIMB_BITS);
675 limb = ux ^ up[i];
676 }
677 gmp_ctz (cnt, limb);
678 return (mp_bitcnt_t) i * GMP_LIMB_BITS + cnt;
679 }
680
681 mp_bitcnt_t
mpn_scan1(mp_srcptr ptr,mp_bitcnt_t bit)682 mpn_scan1 (mp_srcptr ptr, mp_bitcnt_t bit)
683 {
684 mp_size_t i;
685 i = bit / GMP_LIMB_BITS;
686
687 return mpn_common_scan ( ptr[i] & (GMP_LIMB_MAX << (bit % GMP_LIMB_BITS)),
688 i, ptr, i, 0);
689 }
690
691 mp_bitcnt_t
mpn_scan0(mp_srcptr ptr,mp_bitcnt_t bit)692 mpn_scan0 (mp_srcptr ptr, mp_bitcnt_t bit)
693 {
694 mp_size_t i;
695 i = bit / GMP_LIMB_BITS;
696
697 return mpn_common_scan (~ptr[i] & (GMP_LIMB_MAX << (bit % GMP_LIMB_BITS)),
698 i, ptr, i, GMP_LIMB_MAX);
699 }
700
701
702 /* MPN division interface. */
703 mp_limb_t
mpn_invert_3by2(mp_limb_t u1,mp_limb_t u0)704 mpn_invert_3by2 (mp_limb_t u1, mp_limb_t u0)
705 {
706 mp_limb_t r, p, m;
707 unsigned ul, uh;
708 unsigned ql, qh;
709
710 /* First, do a 2/1 inverse. */
711 /* The inverse m is defined as floor( (B^2 - 1 - u1)/u1 ), so that 0 <
712 * B^2 - (B + m) u1 <= u1 */
713 assert (u1 >= GMP_LIMB_HIGHBIT);
714
715 ul = u1 & GMP_LLIMB_MASK;
716 uh = u1 >> (GMP_LIMB_BITS / 2);
717
718 qh = (unsigned)(~u1 / uh);
719 r = ((~u1 - (mp_limb_t) qh * uh) << (GMP_LIMB_BITS / 2)) | GMP_LLIMB_MASK;
720
721 p = (mp_limb_t) qh * ul;
722 /* Adjustment steps taken from udiv_qrnnd_c */
723 if (r < p)
724 {
725 qh--;
726 r += u1;
727 if (r >= u1) /* i.e. we didn't get carry when adding to r */
728 if (r < p)
729 {
730 qh--;
731 r += u1;
732 }
733 }
734 r -= p;
735
736 /* Do a 3/2 division (with half limb size) */
737 p = (r >> (GMP_LIMB_BITS / 2)) * qh + r;
738 ql = (p >> (GMP_LIMB_BITS / 2)) + 1;
739
740 /* By the 3/2 method, we don't need the high half limb. */
741 r = (r << (GMP_LIMB_BITS / 2)) + GMP_LLIMB_MASK - ql * u1;
742
743 if (r >= (p << (GMP_LIMB_BITS / 2)))
744 {
745 ql--;
746 r += u1;
747 }
748 m = ((mp_limb_t) qh << (GMP_LIMB_BITS / 2)) + ql;
749 if (r >= u1)
750 {
751 m++;
752 r -= u1;
753 }
754
755 if (u0 > 0)
756 {
757 mp_limb_t th, tl;
758 r = ~r;
759 r += u0;
760 if (r < u0)
761 {
762 m--;
763 if (r >= u1)
764 {
765 m--;
766 r -= u1;
767 }
768 r -= u1;
769 }
770 gmp_umul_ppmm (th, tl, u0, m);
771 r += th;
772 if (r < th)
773 {
774 m--;
775 m -= ((r > u1) | ((r == u1) & (tl > u0)));
776 }
777 }
778
779 return m;
780 }
781
782 struct gmp_div_inverse
783 {
784 /* Normalization shift count. */
785 unsigned shift;
786 /* Normalized divisor (d0 unused for mpn_div_qr_1) */
787 mp_limb_t d1, d0;
788 /* Inverse, for 2/1 or 3/2. */
789 mp_limb_t di;
790 };
791
792 static void
mpn_div_qr_1_invert(struct gmp_div_inverse * inv,mp_limb_t d)793 mpn_div_qr_1_invert (struct gmp_div_inverse *inv, mp_limb_t d)
794 {
795 unsigned shift;
796
797 assert (d > 0);
798 gmp_clz (shift, d);
799 inv->shift = shift;
800 inv->d1 = d << shift;
801 inv->di = mpn_invert_limb (inv->d1);
802 }
803
804 static void
mpn_div_qr_2_invert(struct gmp_div_inverse * inv,mp_limb_t d1,mp_limb_t d0)805 mpn_div_qr_2_invert (struct gmp_div_inverse *inv,
806 mp_limb_t d1, mp_limb_t d0)
807 {
808 unsigned shift;
809
810 assert (d1 > 0);
811 gmp_clz (shift, d1);
812 inv->shift = shift;
813 if (shift > 0)
814 {
815 d1 = (d1 << shift) | (d0 >> (GMP_LIMB_BITS - shift));
816 d0 <<= shift;
817 }
818 inv->d1 = d1;
819 inv->d0 = d0;
820 inv->di = mpn_invert_3by2 (d1, d0);
821 }
822
823 static void
mpn_div_qr_invert(struct gmp_div_inverse * inv,mp_srcptr dp,mp_size_t dn)824 mpn_div_qr_invert (struct gmp_div_inverse *inv,
825 mp_srcptr dp, mp_size_t dn)
826 {
827 assert (dn > 0);
828
829 if (dn == 1)
830 mpn_div_qr_1_invert (inv, dp[0]);
831 else if (dn == 2)
832 mpn_div_qr_2_invert (inv, dp[1], dp[0]);
833 else
834 {
835 unsigned shift;
836 mp_limb_t d1, d0;
837
838 d1 = dp[dn-1];
839 d0 = dp[dn-2];
840 assert (d1 > 0);
841 gmp_clz (shift, d1);
842 inv->shift = shift;
843 if (shift > 0)
844 {
845 d1 = (d1 << shift) | (d0 >> (GMP_LIMB_BITS - shift));
846 d0 = (d0 << shift) | (dp[dn-3] >> (GMP_LIMB_BITS - shift));
847 }
848 inv->d1 = d1;
849 inv->d0 = d0;
850 inv->di = mpn_invert_3by2 (d1, d0);
851 }
852 }
853
854 /* Not matching current public gmp interface, rather corresponding to
855 the sbpi1_div_* functions. */
856 static mp_limb_t
mpn_div_qr_1_preinv(mp_ptr qp,mp_srcptr np,mp_size_t nn,const struct gmp_div_inverse * inv)857 mpn_div_qr_1_preinv (mp_ptr qp, mp_srcptr np, mp_size_t nn,
858 const struct gmp_div_inverse *inv)
859 {
860 mp_limb_t d, di;
861 mp_limb_t r;
862 mp_ptr tp = NULL;
863
864 if (inv->shift > 0)
865 {
866 tp = gmp_xalloc_limbs (nn);
867 r = mpn_lshift (tp, np, nn, inv->shift);
868 np = tp;
869 }
870 else
871 r = 0;
872
873 d = inv->d1;
874 di = inv->di;
875 while (--nn >= 0)
876 {
877 mp_limb_t q;
878
879 gmp_udiv_qrnnd_preinv (q, r, r, np[nn], d, di);
880 if (qp)
881 qp[nn] = q;
882 }
883 if (inv->shift > 0)
884 gmp_free (tp);
885
886 return r >> inv->shift;
887 }
888
889 static mp_limb_t
mpn_div_qr_1(mp_ptr qp,mp_srcptr np,mp_size_t nn,mp_limb_t d)890 mpn_div_qr_1 (mp_ptr qp, mp_srcptr np, mp_size_t nn, mp_limb_t d)
891 {
892 assert (d > 0);
893
894 /* Special case for powers of two. */
895 if ((d & (d-1)) == 0)
896 {
897 mp_limb_t r = np[0] & (d-1);
898 if (qp)
899 {
900 if (d <= 1)
901 mpn_copyi (qp, np, nn);
902 else
903 {
904 unsigned shift;
905 gmp_ctz (shift, d);
906 mpn_rshift (qp, np, nn, shift);
907 }
908 }
909 return r;
910 }
911 else
912 {
913 struct gmp_div_inverse inv;
914 mpn_div_qr_1_invert (&inv, d);
915 return mpn_div_qr_1_preinv (qp, np, nn, &inv);
916 }
917 }
918
919 static void
mpn_div_qr_2_preinv(mp_ptr qp,mp_ptr rp,mp_srcptr np,mp_size_t nn,const struct gmp_div_inverse * inv)920 mpn_div_qr_2_preinv (mp_ptr qp, mp_ptr rp, mp_srcptr np, mp_size_t nn,
921 const struct gmp_div_inverse *inv)
922 {
923 unsigned shift;
924 mp_size_t i;
925 mp_limb_t d1, d0, di, r1, r0;
926 mp_ptr tp;
927
928 assert (nn >= 2);
929 shift = inv->shift;
930 d1 = inv->d1;
931 d0 = inv->d0;
932 di = inv->di;
933
934 if (shift > 0)
935 {
936 tp = gmp_xalloc_limbs (nn);
937 r1 = mpn_lshift (tp, np, nn, shift);
938 np = tp;
939 }
940 else
941 r1 = 0;
942
943 r0 = np[nn - 1];
944
945 i = nn - 2;
946 do
947 {
948 mp_limb_t n0, q;
949 n0 = np[i];
950 gmp_udiv_qr_3by2 (q, r1, r0, r1, r0, n0, d1, d0, di);
951
952 if (qp)
953 qp[i] = q;
954 }
955 while (--i >= 0);
956
957 if (shift > 0)
958 {
959 assert ((r0 << (GMP_LIMB_BITS - shift)) == 0);
960 r0 = (r0 >> shift) | (r1 << (GMP_LIMB_BITS - shift));
961 r1 >>= shift;
962
963 gmp_free (tp);
964 }
965
966 rp[1] = r1;
967 rp[0] = r0;
968 }
969
970 #if 0
971 static void
972 mpn_div_qr_2 (mp_ptr qp, mp_ptr rp, mp_srcptr np, mp_size_t nn,
973 mp_limb_t d1, mp_limb_t d0)
974 {
975 struct gmp_div_inverse inv;
976 assert (nn >= 2);
977
978 mpn_div_qr_2_invert (&inv, d1, d0);
979 mpn_div_qr_2_preinv (qp, rp, np, nn, &inv);
980 }
981 #endif
982
983 static void
mpn_div_qr_pi1(mp_ptr qp,mp_ptr np,mp_size_t nn,mp_limb_t n1,mp_srcptr dp,mp_size_t dn,mp_limb_t dinv)984 mpn_div_qr_pi1 (mp_ptr qp,
985 mp_ptr np, mp_size_t nn, mp_limb_t n1,
986 mp_srcptr dp, mp_size_t dn,
987 mp_limb_t dinv)
988 {
989 mp_size_t i;
990
991 mp_limb_t d1, d0;
992 mp_limb_t cy, cy1;
993 mp_limb_t q;
994
995 assert (dn > 2);
996 assert (nn >= dn);
997
998 d1 = dp[dn - 1];
999 d0 = dp[dn - 2];
1000
1001 assert ((d1 & GMP_LIMB_HIGHBIT) != 0);
1002 /* Iteration variable is the index of the q limb.
1003 *
1004 * We divide <n1, np[dn-1+i], np[dn-2+i], np[dn-3+i],..., np[i]>
1005 * by <d1, d0, dp[dn-3], ..., dp[0] >
1006 */
1007
1008 i = nn - dn;
1009 do
1010 {
1011 mp_limb_t n0 = np[dn-1+i];
1012
1013 if (n1 == d1 && n0 == d0)
1014 {
1015 q = GMP_LIMB_MAX;
1016 mpn_submul_1 (np+i, dp, dn, q);
1017 n1 = np[dn-1+i]; /* update n1, last loop's value will now be invalid */
1018 }
1019 else
1020 {
1021 gmp_udiv_qr_3by2 (q, n1, n0, n1, n0, np[dn-2+i], d1, d0, dinv);
1022
1023 cy = mpn_submul_1 (np + i, dp, dn-2, q);
1024
1025 cy1 = n0 < cy;
1026 n0 = n0 - cy;
1027 cy = n1 < cy1;
1028 n1 = n1 - cy1;
1029 np[dn-2+i] = n0;
1030
1031 if (cy != 0)
1032 {
1033 n1 += d1 + mpn_add_n (np + i, np + i, dp, dn - 1);
1034 q--;
1035 }
1036 }
1037
1038 if (qp)
1039 qp[i] = q;
1040 }
1041 while (--i >= 0);
1042
1043 np[dn - 1] = n1;
1044 }
1045
1046 static void
mpn_div_qr_preinv(mp_ptr qp,mp_ptr np,mp_size_t nn,mp_srcptr dp,mp_size_t dn,const struct gmp_div_inverse * inv)1047 mpn_div_qr_preinv (mp_ptr qp, mp_ptr np, mp_size_t nn,
1048 mp_srcptr dp, mp_size_t dn,
1049 const struct gmp_div_inverse *inv)
1050 {
1051 assert (dn > 0);
1052 assert (nn >= dn);
1053
1054 if (dn == 1)
1055 np[0] = mpn_div_qr_1_preinv (qp, np, nn, inv);
1056 else if (dn == 2)
1057 mpn_div_qr_2_preinv (qp, np, np, nn, inv);
1058 else
1059 {
1060 mp_limb_t nh;
1061 unsigned shift;
1062
1063 assert (inv->d1 == dp[dn-1]);
1064 assert (inv->d0 == dp[dn-2]);
1065 assert ((inv->d1 & GMP_LIMB_HIGHBIT) != 0);
1066
1067 shift = inv->shift;
1068 if (shift > 0)
1069 nh = mpn_lshift (np, np, nn, shift);
1070 else
1071 nh = 0;
1072
1073 mpn_div_qr_pi1 (qp, np, nn, nh, dp, dn, inv->di);
1074
1075 if (shift > 0)
1076 gmp_assert_nocarry (mpn_rshift (np, np, dn, shift));
1077 }
1078 }
1079
1080 static void
mpn_div_qr(mp_ptr qp,mp_ptr np,mp_size_t nn,mp_srcptr dp,mp_size_t dn)1081 mpn_div_qr (mp_ptr qp, mp_ptr np, mp_size_t nn, mp_srcptr dp, mp_size_t dn)
1082 {
1083 struct gmp_div_inverse inv;
1084 mp_ptr tp = NULL;
1085
1086 assert (dn > 0);
1087 assert (nn >= dn);
1088
1089 mpn_div_qr_invert (&inv, dp, dn);
1090 if (dn > 2 && inv.shift > 0)
1091 {
1092 tp = gmp_xalloc_limbs (dn);
1093 gmp_assert_nocarry (mpn_lshift (tp, dp, dn, inv.shift));
1094 dp = tp;
1095 }
1096 mpn_div_qr_preinv (qp, np, nn, dp, dn, &inv);
1097 if (tp)
1098 gmp_free (tp);
1099 }
1100
1101
1102 /* MPN base conversion. */
1103 static unsigned
mpn_base_power_of_two_p(unsigned b)1104 mpn_base_power_of_two_p (unsigned b)
1105 {
1106 switch (b)
1107 {
1108 case 2: return 1;
1109 case 4: return 2;
1110 case 8: return 3;
1111 case 16: return 4;
1112 case 32: return 5;
1113 case 64: return 6;
1114 case 128: return 7;
1115 case 256: return 8;
1116 default: return 0;
1117 }
1118 }
1119
1120 struct mpn_base_info
1121 {
1122 /* bb is the largest power of the base which fits in one limb, and
1123 exp is the corresponding exponent. */
1124 unsigned exp;
1125 mp_limb_t bb;
1126 };
1127
1128 static void
mpn_get_base_info(struct mpn_base_info * info,mp_limb_t b)1129 mpn_get_base_info (struct mpn_base_info *info, mp_limb_t b)
1130 {
1131 mp_limb_t m;
1132 mp_limb_t p;
1133 unsigned exp;
1134
1135 m = GMP_LIMB_MAX / b;
1136 for (exp = 1, p = b; p <= m; exp++)
1137 p *= b;
1138
1139 info->exp = exp;
1140 info->bb = p;
1141 }
1142
1143 static mp_bitcnt_t
mpn_limb_size_in_base_2(mp_limb_t u)1144 mpn_limb_size_in_base_2 (mp_limb_t u)
1145 {
1146 unsigned shift;
1147
1148 assert (u > 0);
1149 gmp_clz (shift, u);
1150 return GMP_LIMB_BITS - shift;
1151 }
1152
1153 static size_t
mpn_get_str_bits(unsigned char * sp,unsigned bits,mp_srcptr up,mp_size_t un)1154 mpn_get_str_bits (unsigned char *sp, unsigned bits, mp_srcptr up, mp_size_t un)
1155 {
1156 unsigned char mask;
1157 size_t sn, j;
1158 mp_size_t i;
1159 int shift;
1160
1161 sn = ((un - 1) * GMP_LIMB_BITS + mpn_limb_size_in_base_2 (up[un-1])
1162 + bits - 1) / bits;
1163
1164 mask = (1U << bits) - 1;
1165
1166 for (i = 0, j = sn, shift = 0; j-- > 0;)
1167 {
1168 unsigned char digit = up[i] >> shift;
1169
1170 shift += bits;
1171
1172 if (shift >= GMP_LIMB_BITS && ++i < un)
1173 {
1174 shift -= GMP_LIMB_BITS;
1175 digit |= up[i] << (bits - shift);
1176 }
1177 sp[j] = digit & mask;
1178 }
1179 return sn;
1180 }
1181
1182 /* We generate digits from the least significant end, and reverse at
1183 the end. */
1184 static size_t
mpn_limb_get_str(unsigned char * sp,mp_limb_t w,const struct gmp_div_inverse * binv)1185 mpn_limb_get_str (unsigned char *sp, mp_limb_t w,
1186 const struct gmp_div_inverse *binv)
1187 {
1188 mp_size_t i;
1189 for (i = 0; w > 0; i++)
1190 {
1191 mp_limb_t h, l, r;
1192
1193 h = w >> (GMP_LIMB_BITS - binv->shift);
1194 l = w << binv->shift;
1195
1196 gmp_udiv_qrnnd_preinv (w, r, h, l, binv->d1, binv->di);
1197 assert ( (r << (GMP_LIMB_BITS - binv->shift)) == 0);
1198 r >>= binv->shift;
1199
1200 sp[i] = r;
1201 }
1202 return i;
1203 }
1204
1205 static size_t
mpn_get_str_other(unsigned char * sp,int base,const struct mpn_base_info * info,mp_ptr up,mp_size_t un)1206 mpn_get_str_other (unsigned char *sp,
1207 int base, const struct mpn_base_info *info,
1208 mp_ptr up, mp_size_t un)
1209 {
1210 struct gmp_div_inverse binv;
1211 size_t sn;
1212 size_t i;
1213
1214 mpn_div_qr_1_invert (&binv, base);
1215
1216 sn = 0;
1217
1218 if (un > 1)
1219 {
1220 struct gmp_div_inverse bbinv;
1221 mpn_div_qr_1_invert (&bbinv, info->bb);
1222
1223 do
1224 {
1225 mp_limb_t w;
1226 size_t done;
1227 w = mpn_div_qr_1_preinv (up, up, un, &bbinv);
1228 un -= (up[un-1] == 0);
1229 done = mpn_limb_get_str (sp + sn, w, &binv);
1230
1231 for (sn += done; done < info->exp; done++)
1232 sp[sn++] = 0;
1233 }
1234 while (un > 1);
1235 }
1236 sn += mpn_limb_get_str (sp + sn, up[0], &binv);
1237
1238 /* Reverse order */
1239 for (i = 0; 2*i + 1 < sn; i++)
1240 {
1241 unsigned char t = sp[i];
1242 sp[i] = sp[sn - i - 1];
1243 sp[sn - i - 1] = t;
1244 }
1245
1246 return sn;
1247 }
1248
1249 size_t
mpn_get_str(unsigned char * sp,int base,mp_ptr up,mp_size_t un)1250 mpn_get_str (unsigned char *sp, int base, mp_ptr up, mp_size_t un)
1251 {
1252 unsigned bits;
1253
1254 assert (un > 0);
1255 assert (up[un-1] > 0);
1256
1257 bits = mpn_base_power_of_two_p (base);
1258 if (bits)
1259 return mpn_get_str_bits (sp, bits, up, un);
1260 else
1261 {
1262 struct mpn_base_info info;
1263
1264 mpn_get_base_info (&info, base);
1265 return mpn_get_str_other (sp, base, &info, up, un);
1266 }
1267 }
1268
1269 static mp_size_t
mpn_set_str_bits(mp_ptr rp,const unsigned char * sp,size_t sn,unsigned bits)1270 mpn_set_str_bits (mp_ptr rp, const unsigned char *sp, size_t sn,
1271 unsigned bits)
1272 {
1273 mp_size_t rn;
1274 size_t j;
1275 unsigned shift;
1276
1277 for (j = sn, rn = 0, shift = 0; j-- > 0; )
1278 {
1279 if (shift == 0)
1280 {
1281 rp[rn++] = sp[j];
1282 shift += bits;
1283 }
1284 else
1285 {
1286 rp[rn-1] |= (mp_limb_t) sp[j] << shift;
1287 shift += bits;
1288 if (shift >= GMP_LIMB_BITS)
1289 {
1290 shift -= GMP_LIMB_BITS;
1291 if (shift > 0)
1292 rp[rn++] = (mp_limb_t) sp[j] >> (bits - shift);
1293 }
1294 }
1295 }
1296 rn = mpn_normalized_size (rp, rn);
1297 return rn;
1298 }
1299
1300 static mp_size_t
mpn_set_str_other(mp_ptr rp,const unsigned char * sp,size_t sn,mp_limb_t b,const struct mpn_base_info * info)1301 mpn_set_str_other (mp_ptr rp, const unsigned char *sp, size_t sn,
1302 mp_limb_t b, const struct mpn_base_info *info)
1303 {
1304 mp_size_t rn;
1305 mp_limb_t w;
1306 unsigned k;
1307 size_t j;
1308
1309 k = 1 + (sn - 1) % info->exp;
1310
1311 j = 0;
1312 w = sp[j++];
1313 while (--k != 0)
1314 w = w * b + sp[j++];
1315
1316 rp[0] = w;
1317
1318 for (rn = (w > 0); j < sn;)
1319 {
1320 mp_limb_t cy;
1321
1322 w = sp[j++];
1323 for (k = 1; k < info->exp; k++)
1324 w = w * b + sp[j++];
1325
1326 cy = mpn_mul_1 (rp, rp, rn, info->bb);
1327 cy += mpn_add_1 (rp, rp, rn, w);
1328 if (cy > 0)
1329 rp[rn++] = cy;
1330 }
1331 assert (j == sn);
1332
1333 return rn;
1334 }
1335
1336 mp_size_t
mpn_set_str(mp_ptr rp,const unsigned char * sp,size_t sn,int base)1337 mpn_set_str (mp_ptr rp, const unsigned char *sp, size_t sn, int base)
1338 {
1339 unsigned bits;
1340
1341 if (sn == 0)
1342 return 0;
1343
1344 bits = mpn_base_power_of_two_p (base);
1345 if (bits)
1346 return mpn_set_str_bits (rp, sp, sn, bits);
1347 else
1348 {
1349 struct mpn_base_info info;
1350
1351 mpn_get_base_info (&info, base);
1352 return mpn_set_str_other (rp, sp, sn, base, &info);
1353 }
1354 }
1355
1356
1357 /* MPZ interface */
1358 void
mpz_init(mpz_t r)1359 mpz_init (mpz_t r)
1360 {
1361 r->_mp_alloc = 1;
1362 r->_mp_size = 0;
1363 r->_mp_d = gmp_xalloc_limbs (1);
1364 }
1365
1366 /* The utility of this function is a bit limited, since many functions
1367 assigns the result variable using mpz_swap. */
1368 void
mpz_init2(mpz_t r,mp_bitcnt_t bits)1369 mpz_init2 (mpz_t r, mp_bitcnt_t bits)
1370 {
1371 mp_size_t rn;
1372
1373 bits -= (bits != 0); /* Round down, except if 0 */
1374 rn = 1 + bits / GMP_LIMB_BITS;
1375
1376 r->_mp_alloc = rn;
1377 r->_mp_size = 0;
1378 r->_mp_d = gmp_xalloc_limbs (rn);
1379 }
1380
1381 void
mpz_clear(mpz_t r)1382 mpz_clear (mpz_t r)
1383 {
1384 gmp_free (r->_mp_d);
1385 }
1386
1387 static void *
mpz_realloc(mpz_t r,mp_size_t size)1388 mpz_realloc (mpz_t r, mp_size_t size)
1389 {
1390 size = GMP_MAX (size, 1);
1391
1392 r->_mp_d = gmp_xrealloc_limbs (r->_mp_d, size);
1393 r->_mp_alloc = size;
1394
1395 if (GMP_ABS (r->_mp_size) > size)
1396 r->_mp_size = 0;
1397
1398 return r->_mp_d;
1399 }
1400
1401 /* Realloc for an mpz_t WHAT if it has less than NEEDED limbs. */
1402 #define MPZ_REALLOC(z,n) ((n) > (z)->_mp_alloc \
1403 ? mpz_realloc(z,n) \
1404 : (z)->_mp_d)
1405
1406 /* MPZ assignment and basic conversions. */
1407 void
mpz_set_si(mpz_t r,signed long int x)1408 mpz_set_si (mpz_t r, signed long int x)
1409 {
1410 if (x >= 0)
1411 mpz_set_ui (r, x);
1412 else /* (x < 0) */
1413 {
1414 r->_mp_size = -1;
1415 r->_mp_d[0] = GMP_NEG_CAST (unsigned long int, x);
1416 }
1417 }
1418
1419 void
mpz_set_ui(mpz_t r,unsigned long int x)1420 mpz_set_ui (mpz_t r, unsigned long int x)
1421 {
1422 if (x > 0)
1423 {
1424 r->_mp_size = 1;
1425 r->_mp_d[0] = x;
1426 }
1427 else
1428 r->_mp_size = 0;
1429 }
1430
1431 void
mpz_set(mpz_t r,const mpz_t x)1432 mpz_set (mpz_t r, const mpz_t x)
1433 {
1434 /* Allow the NOP r == x */
1435 if (r != x)
1436 {
1437 mp_size_t n;
1438 mp_ptr rp;
1439
1440 n = GMP_ABS (x->_mp_size);
1441 rp = MPZ_REALLOC (r, n);
1442
1443 mpn_copyi (rp, x->_mp_d, n);
1444 r->_mp_size = x->_mp_size;
1445 }
1446 }
1447
1448 void
mpz_init_set_si(mpz_t r,signed long int x)1449 mpz_init_set_si (mpz_t r, signed long int x)
1450 {
1451 mpz_init (r);
1452 mpz_set_si (r, x);
1453 }
1454
1455 void
mpz_init_set_ui(mpz_t r,unsigned long int x)1456 mpz_init_set_ui (mpz_t r, unsigned long int x)
1457 {
1458 mpz_init (r);
1459 mpz_set_ui (r, x);
1460 }
1461
1462 void
mpz_init_set(mpz_t r,const mpz_t x)1463 mpz_init_set (mpz_t r, const mpz_t x)
1464 {
1465 mpz_init (r);
1466 mpz_set (r, x);
1467 }
1468
1469 int
mpz_fits_slong_p(const mpz_t u)1470 mpz_fits_slong_p (const mpz_t u)
1471 {
1472 mp_size_t us = u->_mp_size;
1473
1474 if (us == 1)
1475 return u->_mp_d[0] < GMP_LIMB_HIGHBIT;
1476 else if (us == -1)
1477 return u->_mp_d[0] <= GMP_LIMB_HIGHBIT;
1478 else
1479 return (us == 0);
1480 }
1481
1482 int
mpz_fits_ulong_p(const mpz_t u)1483 mpz_fits_ulong_p (const mpz_t u)
1484 {
1485 mp_size_t us = u->_mp_size;
1486
1487 return (us == (us > 0));
1488 }
1489
1490 long int
mpz_get_si(const mpz_t u)1491 mpz_get_si (const mpz_t u)
1492 {
1493 mp_size_t us = u->_mp_size;
1494
1495 if (us > 0)
1496 return (long) (u->_mp_d[0] & ~GMP_LIMB_HIGHBIT);
1497 else if (us < 0)
1498 return (long) (- u->_mp_d[0] | GMP_LIMB_HIGHBIT);
1499 else
1500 return 0;
1501 }
1502
1503 unsigned long int
mpz_get_ui(const mpz_t u)1504 mpz_get_ui (const mpz_t u)
1505 {
1506 return u->_mp_size == 0 ? 0 : u->_mp_d[0];
1507 }
1508
1509 size_t
mpz_size(const mpz_t u)1510 mpz_size (const mpz_t u)
1511 {
1512 return GMP_ABS (u->_mp_size);
1513 }
1514
1515 mp_limb_t
mpz_getlimbn(const mpz_t u,mp_size_t n)1516 mpz_getlimbn (const mpz_t u, mp_size_t n)
1517 {
1518 if (n >= 0 && n < GMP_ABS (u->_mp_size))
1519 return u->_mp_d[n];
1520 else
1521 return 0;
1522 }
1523
1524 void
mpz_realloc2(mpz_t x,mp_bitcnt_t n)1525 mpz_realloc2 (mpz_t x, mp_bitcnt_t n)
1526 {
1527 mpz_realloc (x, 1 + (n - (n != 0)) / GMP_LIMB_BITS);
1528 }
1529
1530 mp_srcptr
mpz_limbs_read(mpz_srcptr x)1531 mpz_limbs_read (mpz_srcptr x)
1532 {
1533 return x->_mp_d;;
1534 }
1535
1536 mp_ptr
mpz_limbs_modify(mpz_t x,mp_size_t n)1537 mpz_limbs_modify (mpz_t x, mp_size_t n)
1538 {
1539 assert (n > 0);
1540 return MPZ_REALLOC (x, n);
1541 }
1542
1543 mp_ptr
mpz_limbs_write(mpz_t x,mp_size_t n)1544 mpz_limbs_write (mpz_t x, mp_size_t n)
1545 {
1546 return mpz_limbs_modify (x, n);
1547 }
1548
1549 void
mpz_limbs_finish(mpz_t x,mp_size_t xs)1550 mpz_limbs_finish (mpz_t x, mp_size_t xs)
1551 {
1552 mp_size_t xn;
1553 xn = mpn_normalized_size (x->_mp_d, GMP_ABS (xs));
1554 x->_mp_size = xs < 0 ? -xn : xn;
1555 }
1556
1557 mpz_srcptr
mpz_roinit_n(mpz_t x,mp_srcptr xp,mp_size_t xs)1558 mpz_roinit_n (mpz_t x, mp_srcptr xp, mp_size_t xs)
1559 {
1560 x->_mp_alloc = 0;
1561 x->_mp_d = (mp_ptr) xp;
1562 mpz_limbs_finish (x, xs);
1563 return x;
1564 }
1565
1566
1567 /* Conversions and comparison to double. */
1568 void
mpz_set_d(mpz_t r,double x)1569 mpz_set_d (mpz_t r, double x)
1570 {
1571 int sign;
1572 mp_ptr rp;
1573 mp_size_t rn, i;
1574 double B;
1575 double Bi;
1576 mp_limb_t f;
1577
1578 /* x != x is true when x is a NaN, and x == x * 0.5 is true when x is
1579 zero or infinity. */
1580 if (x != x || x == x * 0.5)
1581 {
1582 r->_mp_size = 0;
1583 return;
1584 }
1585
1586 sign = x < 0.0 ;
1587 if (sign)
1588 x = - x;
1589
1590 if (x < 1.0)
1591 {
1592 r->_mp_size = 0;
1593 return;
1594 }
1595 B = 2.0 * (double) GMP_LIMB_HIGHBIT;
1596 Bi = 1.0 / B;
1597 for (rn = 1; x >= B; rn++)
1598 x *= Bi;
1599
1600 rp = MPZ_REALLOC (r, rn);
1601
1602 f = (mp_limb_t) x;
1603 x -= f;
1604 assert (x < 1.0);
1605 i = rn-1;
1606 rp[i] = f;
1607 while (--i >= 0)
1608 {
1609 x = B * x;
1610 f = (mp_limb_t) x;
1611 x -= f;
1612 assert (x < 1.0);
1613 rp[i] = f;
1614 }
1615
1616 r->_mp_size = sign ? - rn : rn;
1617 }
1618
1619 void
mpz_init_set_d(mpz_t r,double x)1620 mpz_init_set_d (mpz_t r, double x)
1621 {
1622 mpz_init (r);
1623 mpz_set_d (r, x);
1624 }
1625
1626 double
mpz_get_d(const mpz_t u)1627 mpz_get_d (const mpz_t u)
1628 {
1629 mp_size_t un;
1630 double x;
1631 double B = 2.0 * (double) GMP_LIMB_HIGHBIT;
1632
1633 un = GMP_ABS (u->_mp_size);
1634
1635 if (un == 0)
1636 return 0.0;
1637
1638 x = u->_mp_d[--un];
1639 while (un > 0)
1640 x = B*x + u->_mp_d[--un];
1641
1642 if (u->_mp_size < 0)
1643 x = -x;
1644
1645 return x;
1646 }
1647
1648 int
mpz_cmpabs_d(const mpz_t x,double d)1649 mpz_cmpabs_d (const mpz_t x, double d)
1650 {
1651 mp_size_t xn;
1652 double B, Bi;
1653 mp_size_t i;
1654
1655 xn = x->_mp_size;
1656 d = GMP_ABS (d);
1657
1658 if (xn != 0)
1659 {
1660 xn = GMP_ABS (xn);
1661
1662 B = 2.0 * (double) GMP_LIMB_HIGHBIT;
1663 Bi = 1.0 / B;
1664
1665 /* Scale d so it can be compared with the top limb. */
1666 for (i = 1; i < xn; i++)
1667 d *= Bi;
1668
1669 if (d >= B)
1670 return -1;
1671
1672 /* Compare floor(d) to top limb, subtract and cancel when equal. */
1673 for (i = xn; i-- > 0;)
1674 {
1675 mp_limb_t f, xl;
1676
1677 f = (mp_limb_t) d;
1678 xl = x->_mp_d[i];
1679 if (xl > f)
1680 return 1;
1681 else if (xl < f)
1682 return -1;
1683 d = B * (d - f);
1684 }
1685 }
1686 return - (d > 0.0);
1687 }
1688
1689 int
mpz_cmp_d(const mpz_t x,double d)1690 mpz_cmp_d (const mpz_t x, double d)
1691 {
1692 if (x->_mp_size < 0)
1693 {
1694 if (d >= 0.0)
1695 return -1;
1696 else
1697 return -mpz_cmpabs_d (x, d);
1698 }
1699 else
1700 {
1701 if (d < 0.0)
1702 return 1;
1703 else
1704 return mpz_cmpabs_d (x, d);
1705 }
1706 }
1707
1708
1709 /* MPZ comparisons and the like. */
1710 int
mpz_sgn(const mpz_t u)1711 mpz_sgn (const mpz_t u)
1712 {
1713 mp_size_t usize = u->_mp_size;
1714
1715 return (usize > 0) - (usize < 0);
1716 }
1717
1718 int
mpz_cmp_si(const mpz_t u,long v)1719 mpz_cmp_si (const mpz_t u, long v)
1720 {
1721 mp_size_t usize = u->_mp_size;
1722
1723 if (usize < -1)
1724 return -1;
1725 else if (v >= 0)
1726 return mpz_cmp_ui (u, v);
1727 else if (usize >= 0)
1728 return 1;
1729 else /* usize == -1 */
1730 {
1731 mp_limb_t ul = u->_mp_d[0];
1732 if ((mp_limb_t)GMP_NEG_CAST (unsigned long int, v) < ul)
1733 return -1;
1734 else
1735 return (mp_limb_t)GMP_NEG_CAST (unsigned long int, v) > ul;
1736 }
1737 }
1738
1739 int
mpz_cmp_ui(const mpz_t u,unsigned long v)1740 mpz_cmp_ui (const mpz_t u, unsigned long v)
1741 {
1742 mp_size_t usize = u->_mp_size;
1743
1744 if (usize > 1)
1745 return 1;
1746 else if (usize < 0)
1747 return -1;
1748 else
1749 {
1750 mp_limb_t ul = (usize > 0) ? u->_mp_d[0] : 0;
1751 return (ul > v) - (ul < v);
1752 }
1753 }
1754
1755 int
mpz_cmp(const mpz_t a,const mpz_t b)1756 mpz_cmp (const mpz_t a, const mpz_t b)
1757 {
1758 mp_size_t asize = a->_mp_size;
1759 mp_size_t bsize = b->_mp_size;
1760
1761 if (asize != bsize)
1762 return (asize < bsize) ? -1 : 1;
1763 else if (asize >= 0)
1764 return mpn_cmp (a->_mp_d, b->_mp_d, asize);
1765 else
1766 return mpn_cmp (b->_mp_d, a->_mp_d, -asize);
1767 }
1768
1769 int
mpz_cmpabs_ui(const mpz_t u,unsigned long v)1770 mpz_cmpabs_ui (const mpz_t u, unsigned long v)
1771 {
1772 mp_size_t un = GMP_ABS (u->_mp_size);
1773 mp_limb_t ul;
1774
1775 if (un > 1)
1776 return 1;
1777
1778 ul = (un == 1) ? u->_mp_d[0] : 0;
1779
1780 return (ul > v) - (ul < v);
1781 }
1782
1783 int
mpz_cmpabs(const mpz_t u,const mpz_t v)1784 mpz_cmpabs (const mpz_t u, const mpz_t v)
1785 {
1786 return mpn_cmp4 (u->_mp_d, GMP_ABS (u->_mp_size),
1787 v->_mp_d, GMP_ABS (v->_mp_size));
1788 }
1789
1790 void
mpz_abs(mpz_t r,const mpz_t u)1791 mpz_abs (mpz_t r, const mpz_t u)
1792 {
1793 mpz_set (r, u);
1794 r->_mp_size = GMP_ABS (r->_mp_size);
1795 }
1796
1797 void
mpz_neg(mpz_t r,const mpz_t u)1798 mpz_neg (mpz_t r, const mpz_t u)
1799 {
1800 mpz_set (r, u);
1801 r->_mp_size = -r->_mp_size;
1802 }
1803
1804 void
mpz_swap(mpz_t u,mpz_t v)1805 mpz_swap (mpz_t u, mpz_t v)
1806 {
1807 MP_SIZE_T_SWAP (u->_mp_size, v->_mp_size);
1808 MP_SIZE_T_SWAP (u->_mp_alloc, v->_mp_alloc);
1809 MP_PTR_SWAP (u->_mp_d, v->_mp_d);
1810 }
1811
1812
1813 /* MPZ addition and subtraction */
1814
1815 /* Adds to the absolute value. Returns new size, but doesn't store it. */
1816 static mp_size_t
mpz_abs_add_ui(mpz_t r,const mpz_t a,unsigned long b)1817 mpz_abs_add_ui (mpz_t r, const mpz_t a, unsigned long b)
1818 {
1819 mp_size_t an;
1820 mp_ptr rp;
1821 mp_limb_t cy;
1822
1823 an = GMP_ABS (a->_mp_size);
1824 if (an == 0)
1825 {
1826 r->_mp_d[0] = b;
1827 return b > 0;
1828 }
1829
1830 rp = MPZ_REALLOC (r, an + 1);
1831
1832 cy = mpn_add_1 (rp, a->_mp_d, an, b);
1833 rp[an] = cy;
1834 an += cy;
1835
1836 return an;
1837 }
1838
1839 /* Subtract from the absolute value. Returns new size, (or -1 on underflow),
1840 but doesn't store it. */
1841 static mp_size_t
mpz_abs_sub_ui(mpz_t r,const mpz_t a,unsigned long b)1842 mpz_abs_sub_ui (mpz_t r, const mpz_t a, unsigned long b)
1843 {
1844 mp_size_t an = GMP_ABS (a->_mp_size);
1845 mp_ptr rp = MPZ_REALLOC (r, an);
1846
1847 if (an == 0)
1848 {
1849 rp[0] = b;
1850 return -(b > 0);
1851 }
1852 else if (an == 1 && a->_mp_d[0] < b)
1853 {
1854 rp[0] = b - a->_mp_d[0];
1855 return -1;
1856 }
1857 else
1858 {
1859 gmp_assert_nocarry (mpn_sub_1 (rp, a->_mp_d, an, b));
1860 return mpn_normalized_size (rp, an);
1861 }
1862 }
1863
1864 void
mpz_add_ui(mpz_t r,const mpz_t a,unsigned long b)1865 mpz_add_ui (mpz_t r, const mpz_t a, unsigned long b)
1866 {
1867 if (a->_mp_size >= 0)
1868 r->_mp_size = mpz_abs_add_ui (r, a, b);
1869 else
1870 r->_mp_size = -mpz_abs_sub_ui (r, a, b);
1871 }
1872
1873 void
mpz_sub_ui(mpz_t r,const mpz_t a,unsigned long b)1874 mpz_sub_ui (mpz_t r, const mpz_t a, unsigned long b)
1875 {
1876 if (a->_mp_size < 0)
1877 r->_mp_size = -mpz_abs_add_ui (r, a, b);
1878 else
1879 r->_mp_size = mpz_abs_sub_ui (r, a, b);
1880 }
1881
1882 void
mpz_ui_sub(mpz_t r,unsigned long a,const mpz_t b)1883 mpz_ui_sub (mpz_t r, unsigned long a, const mpz_t b)
1884 {
1885 if (b->_mp_size < 0)
1886 r->_mp_size = mpz_abs_add_ui (r, b, a);
1887 else
1888 r->_mp_size = -mpz_abs_sub_ui (r, b, a);
1889 }
1890
1891 static mp_size_t
mpz_abs_add(mpz_t r,const mpz_t a,const mpz_t b)1892 mpz_abs_add (mpz_t r, const mpz_t a, const mpz_t b)
1893 {
1894 mp_size_t an = GMP_ABS (a->_mp_size);
1895 mp_size_t bn = GMP_ABS (b->_mp_size);
1896 mp_ptr rp;
1897 mp_limb_t cy;
1898
1899 if (an < bn)
1900 {
1901 MPZ_SRCPTR_SWAP (a, b);
1902 MP_SIZE_T_SWAP (an, bn);
1903 }
1904
1905 rp = MPZ_REALLOC (r, an + 1);
1906 cy = mpn_add (rp, a->_mp_d, an, b->_mp_d, bn);
1907
1908 rp[an] = cy;
1909
1910 return an + cy;
1911 }
1912
1913 static mp_size_t
mpz_abs_sub(mpz_t r,const mpz_t a,const mpz_t b)1914 mpz_abs_sub (mpz_t r, const mpz_t a, const mpz_t b)
1915 {
1916 mp_size_t an = GMP_ABS (a->_mp_size);
1917 mp_size_t bn = GMP_ABS (b->_mp_size);
1918 int cmp;
1919 mp_ptr rp;
1920
1921 cmp = mpn_cmp4 (a->_mp_d, an, b->_mp_d, bn);
1922 if (cmp > 0)
1923 {
1924 rp = MPZ_REALLOC (r, an);
1925 gmp_assert_nocarry (mpn_sub (rp, a->_mp_d, an, b->_mp_d, bn));
1926 return mpn_normalized_size (rp, an);
1927 }
1928 else if (cmp < 0)
1929 {
1930 rp = MPZ_REALLOC (r, bn);
1931 gmp_assert_nocarry (mpn_sub (rp, b->_mp_d, bn, a->_mp_d, an));
1932 return -mpn_normalized_size (rp, bn);
1933 }
1934 else
1935 return 0;
1936 }
1937
1938 void
mpz_add(mpz_t r,const mpz_t a,const mpz_t b)1939 mpz_add (mpz_t r, const mpz_t a, const mpz_t b)
1940 {
1941 mp_size_t rn;
1942
1943 if ( (a->_mp_size ^ b->_mp_size) >= 0)
1944 rn = mpz_abs_add (r, a, b);
1945 else
1946 rn = mpz_abs_sub (r, a, b);
1947
1948 r->_mp_size = a->_mp_size >= 0 ? rn : - rn;
1949 }
1950
1951 void
mpz_sub(mpz_t r,const mpz_t a,const mpz_t b)1952 mpz_sub (mpz_t r, const mpz_t a, const mpz_t b)
1953 {
1954 mp_size_t rn;
1955
1956 if ( (a->_mp_size ^ b->_mp_size) >= 0)
1957 rn = mpz_abs_sub (r, a, b);
1958 else
1959 rn = mpz_abs_add (r, a, b);
1960
1961 r->_mp_size = a->_mp_size >= 0 ? rn : - rn;
1962 }
1963
1964
1965 /* MPZ multiplication */
1966 void
mpz_mul_si(mpz_t r,const mpz_t u,long int v)1967 mpz_mul_si (mpz_t r, const mpz_t u, long int v)
1968 {
1969 if (v < 0)
1970 {
1971 mpz_mul_ui (r, u, GMP_NEG_CAST (unsigned long int, v));
1972 mpz_neg (r, r);
1973 }
1974 else
1975 mpz_mul_ui (r, u, (unsigned long int) v);
1976 }
1977
1978 void
mpz_mul_ui(mpz_t r,const mpz_t u,unsigned long int v)1979 mpz_mul_ui (mpz_t r, const mpz_t u, unsigned long int v)
1980 {
1981 mp_size_t un, us;
1982 mp_ptr tp;
1983 mp_limb_t cy;
1984
1985 us = u->_mp_size;
1986
1987 if (us == 0 || v == 0)
1988 {
1989 r->_mp_size = 0;
1990 return;
1991 }
1992
1993 un = GMP_ABS (us);
1994
1995 tp = MPZ_REALLOC (r, un + 1);
1996 cy = mpn_mul_1 (tp, u->_mp_d, un, v);
1997 tp[un] = cy;
1998
1999 un += (cy > 0);
2000 r->_mp_size = (us < 0) ? - un : un;
2001 }
2002
2003 void
mpz_mul(mpz_t r,const mpz_t u,const mpz_t v)2004 mpz_mul (mpz_t r, const mpz_t u, const mpz_t v)
2005 {
2006 int sign;
2007 mp_size_t un, vn, rn;
2008 mpz_t t;
2009 mp_ptr tp;
2010
2011 un = u->_mp_size;
2012 vn = v->_mp_size;
2013
2014 if (un == 0 || vn == 0)
2015 {
2016 r->_mp_size = 0;
2017 return;
2018 }
2019
2020 sign = (un ^ vn) < 0;
2021
2022 un = GMP_ABS (un);
2023 vn = GMP_ABS (vn);
2024
2025 mpz_init2 (t, (un + vn) * GMP_LIMB_BITS);
2026
2027 tp = t->_mp_d;
2028 if (un >= vn)
2029 mpn_mul (tp, u->_mp_d, un, v->_mp_d, vn);
2030 else
2031 mpn_mul (tp, v->_mp_d, vn, u->_mp_d, un);
2032
2033 rn = un + vn;
2034 rn -= tp[rn-1] == 0;
2035
2036 t->_mp_size = sign ? - rn : rn;
2037 mpz_swap (r, t);
2038 mpz_clear (t);
2039 }
2040
2041 void
mpz_mul_2exp(mpz_t r,const mpz_t u,mp_bitcnt_t bits)2042 mpz_mul_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t bits)
2043 {
2044 mp_size_t un, rn;
2045 mp_size_t limbs;
2046 unsigned shift;
2047 mp_ptr rp;
2048
2049 un = GMP_ABS (u->_mp_size);
2050 if (un == 0)
2051 {
2052 r->_mp_size = 0;
2053 return;
2054 }
2055
2056 limbs = bits / GMP_LIMB_BITS;
2057 shift = bits % GMP_LIMB_BITS;
2058
2059 rn = un + limbs + (shift > 0);
2060 rp = MPZ_REALLOC (r, rn);
2061 if (shift > 0)
2062 {
2063 mp_limb_t cy = mpn_lshift (rp + limbs, u->_mp_d, un, shift);
2064 rp[rn-1] = cy;
2065 rn -= (cy == 0);
2066 }
2067 else
2068 mpn_copyd (rp + limbs, u->_mp_d, un);
2069
2070 mpn_zero (rp, limbs);
2071
2072 r->_mp_size = (u->_mp_size < 0) ? - rn : rn;
2073 }
2074
2075 void
mpz_addmul_ui(mpz_t r,const mpz_t u,unsigned long int v)2076 mpz_addmul_ui (mpz_t r, const mpz_t u, unsigned long int v)
2077 {
2078 mpz_t t;
2079 mpz_init (t);
2080 mpz_mul_ui (t, u, v);
2081 mpz_add (r, r, t);
2082 mpz_clear (t);
2083 }
2084
2085 void
mpz_submul_ui(mpz_t r,const mpz_t u,unsigned long int v)2086 mpz_submul_ui (mpz_t r, const mpz_t u, unsigned long int v)
2087 {
2088 mpz_t t;
2089 mpz_init (t);
2090 mpz_mul_ui (t, u, v);
2091 mpz_sub (r, r, t);
2092 mpz_clear (t);
2093 }
2094
2095 void
mpz_addmul(mpz_t r,const mpz_t u,const mpz_t v)2096 mpz_addmul (mpz_t r, const mpz_t u, const mpz_t v)
2097 {
2098 mpz_t t;
2099 mpz_init (t);
2100 mpz_mul (t, u, v);
2101 mpz_add (r, r, t);
2102 mpz_clear (t);
2103 }
2104
2105 void
mpz_submul(mpz_t r,const mpz_t u,const mpz_t v)2106 mpz_submul (mpz_t r, const mpz_t u, const mpz_t v)
2107 {
2108 mpz_t t;
2109 mpz_init (t);
2110 mpz_mul (t, u, v);
2111 mpz_sub (r, r, t);
2112 mpz_clear (t);
2113 }
2114
2115
2116 /* MPZ division */
2117 enum mpz_div_round_mode { GMP_DIV_FLOOR, GMP_DIV_CEIL, GMP_DIV_TRUNC };
2118
2119 /* Allows q or r to be zero. Returns 1 iff remainder is non-zero. */
2120 static int
mpz_div_qr(mpz_t q,mpz_t r,const mpz_t n,const mpz_t d,enum mpz_div_round_mode mode)2121 mpz_div_qr (mpz_t q, mpz_t r,
2122 const mpz_t n, const mpz_t d, enum mpz_div_round_mode mode)
2123 {
2124 mp_size_t ns, ds, nn, dn, qs;
2125 ns = n->_mp_size;
2126 ds = d->_mp_size;
2127
2128 if (ds == 0)
2129 gmp_die("mpz_div_qr: Divide by zero.");
2130
2131 if (ns == 0)
2132 {
2133 if (q)
2134 q->_mp_size = 0;
2135 if (r)
2136 r->_mp_size = 0;
2137 return 0;
2138 }
2139
2140 nn = GMP_ABS (ns);
2141 dn = GMP_ABS (ds);
2142
2143 qs = ds ^ ns;
2144
2145 if (nn < dn)
2146 {
2147 if (mode == GMP_DIV_CEIL && qs >= 0)
2148 {
2149 /* q = 1, r = n - d */
2150 if (r)
2151 mpz_sub (r, n, d);
2152 if (q)
2153 mpz_set_ui (q, 1);
2154 }
2155 else if (mode == GMP_DIV_FLOOR && qs < 0)
2156 {
2157 /* q = -1, r = n + d */
2158 if (r)
2159 mpz_add (r, n, d);
2160 if (q)
2161 mpz_set_si (q, -1);
2162 }
2163 else
2164 {
2165 /* q = 0, r = d */
2166 if (r)
2167 mpz_set (r, n);
2168 if (q)
2169 q->_mp_size = 0;
2170 }
2171 return 1;
2172 }
2173 else
2174 {
2175 mp_ptr np, qp;
2176 mp_size_t qn, rn;
2177 mpz_t tq, tr;
2178
2179 mpz_init_set (tr, n);
2180 np = tr->_mp_d;
2181
2182 qn = nn - dn + 1;
2183
2184 if (q)
2185 {
2186 mpz_init2 (tq, qn * GMP_LIMB_BITS);
2187 qp = tq->_mp_d;
2188 }
2189 else
2190 qp = NULL;
2191
2192 mpn_div_qr (qp, np, nn, d->_mp_d, dn);
2193
2194 if (qp)
2195 {
2196 qn -= (qp[qn-1] == 0);
2197
2198 tq->_mp_size = qs < 0 ? -qn : qn;
2199 }
2200 rn = mpn_normalized_size (np, dn);
2201 tr->_mp_size = ns < 0 ? - rn : rn;
2202
2203 if (mode == GMP_DIV_FLOOR && qs < 0 && rn != 0)
2204 {
2205 if (q)
2206 mpz_sub_ui (tq, tq, 1);
2207 if (r)
2208 mpz_add (tr, tr, d);
2209 }
2210 else if (mode == GMP_DIV_CEIL && qs >= 0 && rn != 0)
2211 {
2212 if (q)
2213 mpz_add_ui (tq, tq, 1);
2214 if (r)
2215 mpz_sub (tr, tr, d);
2216 }
2217
2218 if (q)
2219 {
2220 mpz_swap (tq, q);
2221 mpz_clear (tq);
2222 }
2223 if (r)
2224 mpz_swap (tr, r);
2225
2226 mpz_clear (tr);
2227
2228 return rn != 0;
2229 }
2230 }
2231
2232 void
mpz_cdiv_qr(mpz_t q,mpz_t r,const mpz_t n,const mpz_t d)2233 mpz_cdiv_qr (mpz_t q, mpz_t r, const mpz_t n, const mpz_t d)
2234 {
2235 mpz_div_qr (q, r, n, d, GMP_DIV_CEIL);
2236 }
2237
2238 void
mpz_fdiv_qr(mpz_t q,mpz_t r,const mpz_t n,const mpz_t d)2239 mpz_fdiv_qr (mpz_t q, mpz_t r, const mpz_t n, const mpz_t d)
2240 {
2241 mpz_div_qr (q, r, n, d, GMP_DIV_FLOOR);
2242 }
2243
2244 void
mpz_tdiv_qr(mpz_t q,mpz_t r,const mpz_t n,const mpz_t d)2245 mpz_tdiv_qr (mpz_t q, mpz_t r, const mpz_t n, const mpz_t d)
2246 {
2247 mpz_div_qr (q, r, n, d, GMP_DIV_TRUNC);
2248 }
2249
2250 void
mpz_cdiv_q(mpz_t q,const mpz_t n,const mpz_t d)2251 mpz_cdiv_q (mpz_t q, const mpz_t n, const mpz_t d)
2252 {
2253 mpz_div_qr (q, NULL, n, d, GMP_DIV_CEIL);
2254 }
2255
2256 void
mpz_fdiv_q(mpz_t q,const mpz_t n,const mpz_t d)2257 mpz_fdiv_q (mpz_t q, const mpz_t n, const mpz_t d)
2258 {
2259 mpz_div_qr (q, NULL, n, d, GMP_DIV_FLOOR);
2260 }
2261
2262 void
mpz_tdiv_q(mpz_t q,const mpz_t n,const mpz_t d)2263 mpz_tdiv_q (mpz_t q, const mpz_t n, const mpz_t d)
2264 {
2265 mpz_div_qr (q, NULL, n, d, GMP_DIV_TRUNC);
2266 }
2267
2268 void
mpz_cdiv_r(mpz_t r,const mpz_t n,const mpz_t d)2269 mpz_cdiv_r (mpz_t r, const mpz_t n, const mpz_t d)
2270 {
2271 mpz_div_qr (NULL, r, n, d, GMP_DIV_CEIL);
2272 }
2273
2274 void
mpz_fdiv_r(mpz_t r,const mpz_t n,const mpz_t d)2275 mpz_fdiv_r (mpz_t r, const mpz_t n, const mpz_t d)
2276 {
2277 mpz_div_qr (NULL, r, n, d, GMP_DIV_FLOOR);
2278 }
2279
2280 void
mpz_tdiv_r(mpz_t r,const mpz_t n,const mpz_t d)2281 mpz_tdiv_r (mpz_t r, const mpz_t n, const mpz_t d)
2282 {
2283 mpz_div_qr (NULL, r, n, d, GMP_DIV_TRUNC);
2284 }
2285
2286 void
mpz_mod(mpz_t r,const mpz_t n,const mpz_t d)2287 mpz_mod (mpz_t r, const mpz_t n, const mpz_t d)
2288 {
2289 mpz_div_qr (NULL, r, n, d, d->_mp_size >= 0 ? GMP_DIV_FLOOR : GMP_DIV_CEIL);
2290 }
2291
2292 static void
mpz_div_q_2exp(mpz_t q,const mpz_t u,mp_bitcnt_t bit_index,enum mpz_div_round_mode mode)2293 mpz_div_q_2exp (mpz_t q, const mpz_t u, mp_bitcnt_t bit_index,
2294 enum mpz_div_round_mode mode)
2295 {
2296 mp_size_t un, qn;
2297 mp_size_t limb_cnt;
2298 mp_ptr qp;
2299 int adjust;
2300
2301 un = u->_mp_size;
2302 if (un == 0)
2303 {
2304 q->_mp_size = 0;
2305 return;
2306 }
2307 limb_cnt = bit_index / GMP_LIMB_BITS;
2308 qn = GMP_ABS (un) - limb_cnt;
2309 bit_index %= GMP_LIMB_BITS;
2310
2311 if (mode == ((un > 0) ? GMP_DIV_CEIL : GMP_DIV_FLOOR)) /* un != 0 here. */
2312 /* Note: Below, the final indexing at limb_cnt is valid because at
2313 that point we have qn > 0. */
2314 adjust = (qn <= 0
2315 || !mpn_zero_p (u->_mp_d, limb_cnt)
2316 || (u->_mp_d[limb_cnt]
2317 & (((mp_limb_t) 1 << bit_index) - 1)));
2318 else
2319 adjust = 0;
2320
2321 if (qn <= 0)
2322 qn = 0;
2323
2324 else
2325 {
2326 qp = MPZ_REALLOC (q, qn);
2327
2328 if (bit_index != 0)
2329 {
2330 mpn_rshift (qp, u->_mp_d + limb_cnt, qn, (int)bit_index);
2331 qn -= qp[qn - 1] == 0;
2332 }
2333 else
2334 {
2335 mpn_copyi (qp, u->_mp_d + limb_cnt, qn);
2336 }
2337 }
2338
2339 q->_mp_size = qn;
2340
2341 if (adjust)
2342 mpz_add_ui (q, q, 1);
2343 if (un < 0)
2344 mpz_neg (q, q);
2345 }
2346
2347 static void
mpz_div_r_2exp(mpz_t r,const mpz_t u,mp_bitcnt_t bit_index,enum mpz_div_round_mode mode)2348 mpz_div_r_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t bit_index,
2349 enum mpz_div_round_mode mode)
2350 {
2351 mp_size_t us, un, rn;
2352 mp_ptr rp;
2353 mp_limb_t mask;
2354
2355 us = u->_mp_size;
2356 if (us == 0 || bit_index == 0)
2357 {
2358 r->_mp_size = 0;
2359 return;
2360 }
2361 rn = (bit_index + GMP_LIMB_BITS - 1) / GMP_LIMB_BITS;
2362 assert (rn > 0);
2363
2364 rp = MPZ_REALLOC (r, rn);
2365 un = GMP_ABS (us);
2366
2367 mask = GMP_LIMB_MAX >> (rn * GMP_LIMB_BITS - bit_index);
2368
2369 if (rn > un)
2370 {
2371 /* Quotient (with truncation) is zero, and remainder is
2372 non-zero */
2373 if (mode == ((us > 0) ? GMP_DIV_CEIL : GMP_DIV_FLOOR)) /* us != 0 here. */
2374 {
2375 /* Have to negate and sign extend. */
2376 mp_size_t i;
2377 mp_limb_t cy;
2378
2379 for (cy = 1, i = 0; i < un; i++)
2380 {
2381 mp_limb_t s = ~u->_mp_d[i] + cy;
2382 cy = s < cy;
2383 rp[i] = s;
2384 }
2385 assert (cy == 0);
2386 for (; i < rn - 1; i++)
2387 rp[i] = GMP_LIMB_MAX;
2388
2389 rp[rn-1] = mask;
2390 us = -us;
2391 }
2392 else
2393 {
2394 /* Just copy */
2395 if (r != u)
2396 mpn_copyi (rp, u->_mp_d, un);
2397
2398 rn = un;
2399 }
2400 }
2401 else
2402 {
2403 if (r != u)
2404 mpn_copyi (rp, u->_mp_d, rn - 1);
2405
2406 rp[rn-1] = u->_mp_d[rn-1] & mask;
2407
2408 if (mode == ((us > 0) ? GMP_DIV_CEIL : GMP_DIV_FLOOR)) /* us != 0 here. */
2409 {
2410 /* If r != 0, compute 2^{bit_count} - r. */
2411 mp_size_t i;
2412
2413 for (i = 0; i < rn && rp[i] == 0; i++)
2414 ;
2415 if (i < rn)
2416 {
2417 /* r > 0, need to flip sign. */
2418 rp[i] = ~rp[i] + 1;
2419 while (++i < rn)
2420 rp[i] = ~rp[i];
2421
2422 rp[rn-1] &= mask;
2423
2424 /* us is not used for anything else, so we can modify it
2425 here to indicate flipped sign. */
2426 us = -us;
2427 }
2428 }
2429 }
2430 rn = mpn_normalized_size (rp, rn);
2431 r->_mp_size = us < 0 ? -rn : rn;
2432 }
2433
2434 void
mpz_cdiv_q_2exp(mpz_t r,const mpz_t u,mp_bitcnt_t cnt)2435 mpz_cdiv_q_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt)
2436 {
2437 mpz_div_q_2exp (r, u, cnt, GMP_DIV_CEIL);
2438 }
2439
2440 void
mpz_fdiv_q_2exp(mpz_t r,const mpz_t u,mp_bitcnt_t cnt)2441 mpz_fdiv_q_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt)
2442 {
2443 mpz_div_q_2exp (r, u, cnt, GMP_DIV_FLOOR);
2444 }
2445
2446 void
mpz_tdiv_q_2exp(mpz_t r,const mpz_t u,mp_bitcnt_t cnt)2447 mpz_tdiv_q_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt)
2448 {
2449 mpz_div_q_2exp (r, u, cnt, GMP_DIV_TRUNC);
2450 }
2451
2452 void
mpz_cdiv_r_2exp(mpz_t r,const mpz_t u,mp_bitcnt_t cnt)2453 mpz_cdiv_r_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt)
2454 {
2455 mpz_div_r_2exp (r, u, cnt, GMP_DIV_CEIL);
2456 }
2457
2458 void
mpz_fdiv_r_2exp(mpz_t r,const mpz_t u,mp_bitcnt_t cnt)2459 mpz_fdiv_r_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt)
2460 {
2461 mpz_div_r_2exp (r, u, cnt, GMP_DIV_FLOOR);
2462 }
2463
2464 void
mpz_tdiv_r_2exp(mpz_t r,const mpz_t u,mp_bitcnt_t cnt)2465 mpz_tdiv_r_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt)
2466 {
2467 mpz_div_r_2exp (r, u, cnt, GMP_DIV_TRUNC);
2468 }
2469
2470 void
mpz_divexact(mpz_t q,const mpz_t n,const mpz_t d)2471 mpz_divexact (mpz_t q, const mpz_t n, const mpz_t d)
2472 {
2473 gmp_assert_nocarry (mpz_div_qr (q, NULL, n, d, GMP_DIV_TRUNC));
2474 }
2475
2476 int
mpz_divisible_p(const mpz_t n,const mpz_t d)2477 mpz_divisible_p (const mpz_t n, const mpz_t d)
2478 {
2479 return mpz_div_qr (NULL, NULL, n, d, GMP_DIV_TRUNC) == 0;
2480 }
2481
2482 int
mpz_congruent_p(const mpz_t a,const mpz_t b,const mpz_t m)2483 mpz_congruent_p (const mpz_t a, const mpz_t b, const mpz_t m)
2484 {
2485 mpz_t t;
2486 int res;
2487
2488 /* a == b (mod 0) iff a == b */
2489 if (mpz_sgn (m) == 0)
2490 return (mpz_cmp (a, b) == 0);
2491
2492 mpz_init (t);
2493 mpz_sub (t, a, b);
2494 res = mpz_divisible_p (t, m);
2495 mpz_clear (t);
2496
2497 return res;
2498 }
2499
2500 static unsigned long
mpz_div_qr_ui(mpz_t q,mpz_t r,const mpz_t n,unsigned long d,enum mpz_div_round_mode mode)2501 mpz_div_qr_ui (mpz_t q, mpz_t r,
2502 const mpz_t n, unsigned long d, enum mpz_div_round_mode mode)
2503 {
2504 mp_size_t ns, qn;
2505 mp_ptr qp;
2506 mp_limb_t rl;
2507 mp_size_t rs;
2508
2509 ns = n->_mp_size;
2510 if (ns == 0)
2511 {
2512 if (q)
2513 q->_mp_size = 0;
2514 if (r)
2515 r->_mp_size = 0;
2516 return 0;
2517 }
2518
2519 qn = GMP_ABS (ns);
2520 if (q)
2521 qp = MPZ_REALLOC (q, qn);
2522 else
2523 qp = NULL;
2524
2525 rl = mpn_div_qr_1 (qp, n->_mp_d, qn, d);
2526 assert (rl < d);
2527
2528 rs = rl > 0;
2529 rs = (ns < 0) ? -rs : rs;
2530
2531 if (rl > 0 && ( (mode == GMP_DIV_FLOOR && ns < 0)
2532 || (mode == GMP_DIV_CEIL && ns >= 0)))
2533 {
2534 if (q)
2535 gmp_assert_nocarry (mpn_add_1 (qp, qp, qn, 1));
2536 rl = d - rl;
2537 rs = -rs;
2538 }
2539
2540 if (r)
2541 {
2542 r->_mp_d[0] = rl;
2543 r->_mp_size = rs;
2544 }
2545 if (q)
2546 {
2547 qn -= (qp[qn-1] == 0);
2548 assert (qn == 0 || qp[qn-1] > 0);
2549
2550 q->_mp_size = (ns < 0) ? - qn : qn;
2551 }
2552
2553 return rl;
2554 }
2555
2556 unsigned long
mpz_cdiv_qr_ui(mpz_t q,mpz_t r,const mpz_t n,unsigned long d)2557 mpz_cdiv_qr_ui (mpz_t q, mpz_t r, const mpz_t n, unsigned long d)
2558 {
2559 return mpz_div_qr_ui (q, r, n, d, GMP_DIV_CEIL);
2560 }
2561
2562 unsigned long
mpz_fdiv_qr_ui(mpz_t q,mpz_t r,const mpz_t n,unsigned long d)2563 mpz_fdiv_qr_ui (mpz_t q, mpz_t r, const mpz_t n, unsigned long d)
2564 {
2565 return mpz_div_qr_ui (q, r, n, d, GMP_DIV_FLOOR);
2566 }
2567
2568 unsigned long
mpz_tdiv_qr_ui(mpz_t q,mpz_t r,const mpz_t n,unsigned long d)2569 mpz_tdiv_qr_ui (mpz_t q, mpz_t r, const mpz_t n, unsigned long d)
2570 {
2571 return mpz_div_qr_ui (q, r, n, d, GMP_DIV_TRUNC);
2572 }
2573
2574 unsigned long
mpz_cdiv_q_ui(mpz_t q,const mpz_t n,unsigned long d)2575 mpz_cdiv_q_ui (mpz_t q, const mpz_t n, unsigned long d)
2576 {
2577 return mpz_div_qr_ui (q, NULL, n, d, GMP_DIV_CEIL);
2578 }
2579
2580 unsigned long
mpz_fdiv_q_ui(mpz_t q,const mpz_t n,unsigned long d)2581 mpz_fdiv_q_ui (mpz_t q, const mpz_t n, unsigned long d)
2582 {
2583 return mpz_div_qr_ui (q, NULL, n, d, GMP_DIV_FLOOR);
2584 }
2585
2586 unsigned long
mpz_tdiv_q_ui(mpz_t q,const mpz_t n,unsigned long d)2587 mpz_tdiv_q_ui (mpz_t q, const mpz_t n, unsigned long d)
2588 {
2589 return mpz_div_qr_ui (q, NULL, n, d, GMP_DIV_TRUNC);
2590 }
2591
2592 unsigned long
mpz_cdiv_r_ui(mpz_t r,const mpz_t n,unsigned long d)2593 mpz_cdiv_r_ui (mpz_t r, const mpz_t n, unsigned long d)
2594 {
2595 return mpz_div_qr_ui (NULL, r, n, d, GMP_DIV_CEIL);
2596 }
2597 unsigned long
mpz_fdiv_r_ui(mpz_t r,const mpz_t n,unsigned long d)2598 mpz_fdiv_r_ui (mpz_t r, const mpz_t n, unsigned long d)
2599 {
2600 return mpz_div_qr_ui (NULL, r, n, d, GMP_DIV_FLOOR);
2601 }
2602 unsigned long
mpz_tdiv_r_ui(mpz_t r,const mpz_t n,unsigned long d)2603 mpz_tdiv_r_ui (mpz_t r, const mpz_t n, unsigned long d)
2604 {
2605 return mpz_div_qr_ui (NULL, r, n, d, GMP_DIV_TRUNC);
2606 }
2607
2608 unsigned long
mpz_cdiv_ui(const mpz_t n,unsigned long d)2609 mpz_cdiv_ui (const mpz_t n, unsigned long d)
2610 {
2611 return mpz_div_qr_ui (NULL, NULL, n, d, GMP_DIV_CEIL);
2612 }
2613
2614 unsigned long
mpz_fdiv_ui(const mpz_t n,unsigned long d)2615 mpz_fdiv_ui (const mpz_t n, unsigned long d)
2616 {
2617 return mpz_div_qr_ui (NULL, NULL, n, d, GMP_DIV_FLOOR);
2618 }
2619
2620 unsigned long
mpz_tdiv_ui(const mpz_t n,unsigned long d)2621 mpz_tdiv_ui (const mpz_t n, unsigned long d)
2622 {
2623 return mpz_div_qr_ui (NULL, NULL, n, d, GMP_DIV_TRUNC);
2624 }
2625
2626 unsigned long
mpz_mod_ui(mpz_t r,const mpz_t n,unsigned long d)2627 mpz_mod_ui (mpz_t r, const mpz_t n, unsigned long d)
2628 {
2629 return mpz_div_qr_ui (NULL, r, n, d, GMP_DIV_FLOOR);
2630 }
2631
2632 void
mpz_divexact_ui(mpz_t q,const mpz_t n,unsigned long d)2633 mpz_divexact_ui (mpz_t q, const mpz_t n, unsigned long d)
2634 {
2635 gmp_assert_nocarry (mpz_div_qr_ui (q, NULL, n, d, GMP_DIV_TRUNC));
2636 }
2637
2638 int
mpz_divisible_ui_p(const mpz_t n,unsigned long d)2639 mpz_divisible_ui_p (const mpz_t n, unsigned long d)
2640 {
2641 return mpz_div_qr_ui (NULL, NULL, n, d, GMP_DIV_TRUNC) == 0;
2642 }
2643
2644
2645 /* GCD */
2646 static mp_limb_t
mpn_gcd_11(mp_limb_t u,mp_limb_t v)2647 mpn_gcd_11 (mp_limb_t u, mp_limb_t v)
2648 {
2649 unsigned shift;
2650
2651 assert ( (u | v) > 0);
2652
2653 if (u == 0)
2654 return v;
2655 else if (v == 0)
2656 return u;
2657
2658 gmp_ctz (shift, u | v);
2659
2660 u >>= shift;
2661 v >>= shift;
2662
2663 if ( (u & 1) == 0)
2664 MP_LIMB_T_SWAP (u, v);
2665
2666 while ( (v & 1) == 0)
2667 v >>= 1;
2668
2669 while (u != v)
2670 {
2671 if (u > v)
2672 {
2673 u -= v;
2674 do
2675 u >>= 1;
2676 while ( (u & 1) == 0);
2677 }
2678 else
2679 {
2680 v -= u;
2681 do
2682 v >>= 1;
2683 while ( (v & 1) == 0);
2684 }
2685 }
2686 return u << shift;
2687 }
2688
2689 unsigned long
mpz_gcd_ui(mpz_t g,const mpz_t u,unsigned long v)2690 mpz_gcd_ui (mpz_t g, const mpz_t u, unsigned long v)
2691 {
2692 mp_size_t un;
2693
2694 if (v == 0)
2695 {
2696 if (g)
2697 mpz_abs (g, u);
2698 }
2699 else
2700 {
2701 un = GMP_ABS (u->_mp_size);
2702 if (un != 0)
2703 v = mpn_gcd_11 (mpn_div_qr_1 (NULL, u->_mp_d, un, v), v);
2704
2705 if (g)
2706 mpz_set_ui (g, v);
2707 }
2708
2709 return v;
2710 }
2711
2712 static mp_bitcnt_t
mpz_make_odd(mpz_t r)2713 mpz_make_odd (mpz_t r)
2714 {
2715 mp_bitcnt_t shift;
2716
2717 assert (r->_mp_size > 0);
2718 /* Count trailing zeros, equivalent to mpn_scan1, because we know that there is a 1 */
2719 shift = mpn_common_scan (r->_mp_d[0], 0, r->_mp_d, 0, 0);
2720 mpz_tdiv_q_2exp (r, r, shift);
2721
2722 return shift;
2723 }
2724
2725 void
mpz_gcd(mpz_t g,const mpz_t u,const mpz_t v)2726 mpz_gcd (mpz_t g, const mpz_t u, const mpz_t v)
2727 {
2728 mpz_t tu, tv;
2729 mp_bitcnt_t uz, vz, gz;
2730
2731 if (u->_mp_size == 0)
2732 {
2733 mpz_abs (g, v);
2734 return;
2735 }
2736 if (v->_mp_size == 0)
2737 {
2738 mpz_abs (g, u);
2739 return;
2740 }
2741
2742 mpz_init (tu);
2743 mpz_init (tv);
2744
2745 mpz_abs (tu, u);
2746 uz = mpz_make_odd (tu);
2747 mpz_abs (tv, v);
2748 vz = mpz_make_odd (tv);
2749 gz = GMP_MIN (uz, vz);
2750
2751 if (tu->_mp_size < tv->_mp_size)
2752 mpz_swap (tu, tv);
2753
2754 mpz_tdiv_r (tu, tu, tv);
2755 if (tu->_mp_size == 0)
2756 {
2757 mpz_swap (g, tv);
2758 }
2759 else
2760 for (;;)
2761 {
2762 int c;
2763
2764 mpz_make_odd (tu);
2765 c = mpz_cmp (tu, tv);
2766 if (c == 0)
2767 {
2768 mpz_swap (g, tu);
2769 break;
2770 }
2771 if (c < 0)
2772 mpz_swap (tu, tv);
2773
2774 if (tv->_mp_size == 1)
2775 {
2776 mp_limb_t vl = tv->_mp_d[0];
2777 mp_limb_t ul = mpz_tdiv_ui (tu, vl);
2778 mpz_set_ui (g, mpn_gcd_11 (ul, vl));
2779 break;
2780 }
2781 mpz_sub (tu, tu, tv);
2782 }
2783 mpz_clear (tu);
2784 mpz_clear (tv);
2785 mpz_mul_2exp (g, g, gz);
2786 }
2787
2788 void
mpz_gcdext(mpz_t g,mpz_t s,mpz_t t,const mpz_t u,const mpz_t v)2789 mpz_gcdext (mpz_t g, mpz_t s, mpz_t t, const mpz_t u, const mpz_t v)
2790 {
2791 mpz_t tu, tv, s0, s1, t0, t1;
2792 mp_bitcnt_t uz, vz, gz;
2793 mp_bitcnt_t power;
2794
2795 if (u->_mp_size == 0)
2796 {
2797 /* g = 0 u + sgn(v) v */
2798 signed long sign = mpz_sgn (v);
2799 mpz_abs (g, v);
2800 if (s)
2801 mpz_set_ui (s, 0);
2802 if (t)
2803 mpz_set_si (t, sign);
2804 return;
2805 }
2806
2807 if (v->_mp_size == 0)
2808 {
2809 /* g = sgn(u) u + 0 v */
2810 signed long sign = mpz_sgn (u);
2811 mpz_abs (g, u);
2812 if (s)
2813 mpz_set_si (s, sign);
2814 if (t)
2815 mpz_set_ui (t, 0);
2816 return;
2817 }
2818
2819 mpz_init (tu);
2820 mpz_init (tv);
2821 mpz_init (s0);
2822 mpz_init (s1);
2823 mpz_init (t0);
2824 mpz_init (t1);
2825
2826 mpz_abs (tu, u);
2827 uz = mpz_make_odd (tu);
2828 mpz_abs (tv, v);
2829 vz = mpz_make_odd (tv);
2830 gz = GMP_MIN (uz, vz);
2831
2832 uz -= gz;
2833 vz -= gz;
2834
2835 /* Cofactors corresponding to odd gcd. gz handled later. */
2836 if (tu->_mp_size < tv->_mp_size)
2837 {
2838 mpz_swap (tu, tv);
2839 MPZ_SRCPTR_SWAP (u, v);
2840 MPZ_PTR_SWAP (s, t);
2841 MP_BITCNT_T_SWAP (uz, vz);
2842 }
2843
2844 /* Maintain
2845 *
2846 * u = t0 tu + t1 tv
2847 * v = s0 tu + s1 tv
2848 *
2849 * where u and v denote the inputs with common factors of two
2850 * eliminated, and det (s0, t0; s1, t1) = 2^p. Then
2851 *
2852 * 2^p tu = s1 u - t1 v
2853 * 2^p tv = -s0 u + t0 v
2854 */
2855
2856 /* After initial division, tu = q tv + tu', we have
2857 *
2858 * u = 2^uz (tu' + q tv)
2859 * v = 2^vz tv
2860 *
2861 * or
2862 *
2863 * t0 = 2^uz, t1 = 2^uz q
2864 * s0 = 0, s1 = 2^vz
2865 */
2866
2867 mpz_setbit (t0, uz);
2868 mpz_tdiv_qr (t1, tu, tu, tv);
2869 mpz_mul_2exp (t1, t1, uz);
2870
2871 mpz_setbit (s1, vz);
2872 power = uz + vz;
2873
2874 if (tu->_mp_size > 0)
2875 {
2876 mp_bitcnt_t shift;
2877 shift = mpz_make_odd (tu);
2878 mpz_mul_2exp (t0, t0, shift);
2879 mpz_mul_2exp (s0, s0, shift);
2880 power += shift;
2881
2882 for (;;)
2883 {
2884 int c;
2885 c = mpz_cmp (tu, tv);
2886 if (c == 0)
2887 break;
2888
2889 if (c < 0)
2890 {
2891 /* tv = tv' + tu
2892 *
2893 * u = t0 tu + t1 (tv' + tu) = (t0 + t1) tu + t1 tv'
2894 * v = s0 tu + s1 (tv' + tu) = (s0 + s1) tu + s1 tv' */
2895
2896 mpz_sub (tv, tv, tu);
2897 mpz_add (t0, t0, t1);
2898 mpz_add (s0, s0, s1);
2899
2900 shift = mpz_make_odd (tv);
2901 mpz_mul_2exp (t1, t1, shift);
2902 mpz_mul_2exp (s1, s1, shift);
2903 }
2904 else
2905 {
2906 mpz_sub (tu, tu, tv);
2907 mpz_add (t1, t0, t1);
2908 mpz_add (s1, s0, s1);
2909
2910 shift = mpz_make_odd (tu);
2911 mpz_mul_2exp (t0, t0, shift);
2912 mpz_mul_2exp (s0, s0, shift);
2913 }
2914 power += shift;
2915 }
2916 }
2917
2918 /* Now tv = odd part of gcd, and -s0 and t0 are corresponding
2919 cofactors. */
2920
2921 mpz_mul_2exp (tv, tv, gz);
2922 mpz_neg (s0, s0);
2923
2924 /* 2^p g = s0 u + t0 v. Eliminate one factor of two at a time. To
2925 adjust cofactors, we need u / g and v / g */
2926
2927 mpz_divexact (s1, v, tv);
2928 mpz_abs (s1, s1);
2929 mpz_divexact (t1, u, tv);
2930 mpz_abs (t1, t1);
2931
2932 while (power-- > 0)
2933 {
2934 /* s0 u + t0 v = (s0 - v/g) u - (t0 + u/g) v */
2935 if (mpz_odd_p (s0) || mpz_odd_p (t0))
2936 {
2937 mpz_sub (s0, s0, s1);
2938 mpz_add (t0, t0, t1);
2939 }
2940 mpz_divexact_ui (s0, s0, 2);
2941 mpz_divexact_ui (t0, t0, 2);
2942 }
2943
2944 /* Arrange so that |s| < |u| / 2g */
2945 mpz_add (s1, s0, s1);
2946 if (mpz_cmpabs (s0, s1) > 0)
2947 {
2948 mpz_swap (s0, s1);
2949 mpz_sub (t0, t0, t1);
2950 }
2951 if (u->_mp_size < 0)
2952 mpz_neg (s0, s0);
2953 if (v->_mp_size < 0)
2954 mpz_neg (t0, t0);
2955
2956 mpz_swap (g, tv);
2957 if (s)
2958 mpz_swap (s, s0);
2959 if (t)
2960 mpz_swap (t, t0);
2961
2962 mpz_clear (tu);
2963 mpz_clear (tv);
2964 mpz_clear (s0);
2965 mpz_clear (s1);
2966 mpz_clear (t0);
2967 mpz_clear (t1);
2968 }
2969
2970 void
mpz_lcm(mpz_t r,const mpz_t u,const mpz_t v)2971 mpz_lcm (mpz_t r, const mpz_t u, const mpz_t v)
2972 {
2973 mpz_t g;
2974
2975 if (u->_mp_size == 0 || v->_mp_size == 0)
2976 {
2977 r->_mp_size = 0;
2978 return;
2979 }
2980
2981 mpz_init (g);
2982
2983 mpz_gcd (g, u, v);
2984 mpz_divexact (g, u, g);
2985 mpz_mul (r, g, v);
2986
2987 mpz_clear (g);
2988 mpz_abs (r, r);
2989 }
2990
2991 void
mpz_lcm_ui(mpz_t r,const mpz_t u,unsigned long v)2992 mpz_lcm_ui (mpz_t r, const mpz_t u, unsigned long v)
2993 {
2994 if (v == 0 || u->_mp_size == 0)
2995 {
2996 r->_mp_size = 0;
2997 return;
2998 }
2999
3000 v /= mpz_gcd_ui (NULL, u, v);
3001 mpz_mul_ui (r, u, v);
3002
3003 mpz_abs (r, r);
3004 }
3005
3006 int
mpz_invert(mpz_t r,const mpz_t u,const mpz_t m)3007 mpz_invert (mpz_t r, const mpz_t u, const mpz_t m)
3008 {
3009 mpz_t g, tr;
3010 int invertible;
3011
3012 if (u->_mp_size == 0 || mpz_cmpabs_ui (m, 1) <= 0)
3013 return 0;
3014
3015 mpz_init (g);
3016 mpz_init (tr);
3017
3018 mpz_gcdext (g, tr, NULL, u, m);
3019 invertible = (mpz_cmp_ui (g, 1) == 0);
3020
3021 if (invertible)
3022 {
3023 if (tr->_mp_size < 0)
3024 {
3025 if (m->_mp_size >= 0)
3026 mpz_add (tr, tr, m);
3027 else
3028 mpz_sub (tr, tr, m);
3029 }
3030 mpz_swap (r, tr);
3031 }
3032
3033 mpz_clear (g);
3034 mpz_clear (tr);
3035 return invertible;
3036 }
3037
3038
3039 /* Higher level operations (sqrt, pow and root) */
3040
3041 void
mpz_pow_ui(mpz_t r,const mpz_t b,unsigned long e)3042 mpz_pow_ui (mpz_t r, const mpz_t b, unsigned long e)
3043 {
3044 unsigned long bit;
3045 mpz_t tr;
3046 mpz_init_set_ui (tr, 1);
3047
3048 bit = GMP_ULONG_HIGHBIT;
3049 do
3050 {
3051 mpz_mul (tr, tr, tr);
3052 if (e & bit)
3053 mpz_mul (tr, tr, b);
3054 bit >>= 1;
3055 }
3056 while (bit > 0);
3057
3058 mpz_swap (r, tr);
3059 mpz_clear (tr);
3060 }
3061
3062 void
mpz_ui_pow_ui(mpz_t r,unsigned long blimb,unsigned long e)3063 mpz_ui_pow_ui (mpz_t r, unsigned long blimb, unsigned long e)
3064 {
3065 mpz_t b;
3066 mpz_pow_ui (r, mpz_roinit_n (b, &blimb, 1), e);
3067 }
3068
3069 void
mpz_powm(mpz_t r,const mpz_t b,const mpz_t e,const mpz_t m)3070 mpz_powm (mpz_t r, const mpz_t b, const mpz_t e, const mpz_t m)
3071 {
3072 mpz_t tr;
3073 mpz_t base;
3074 mp_size_t en, mn;
3075 mp_srcptr mp;
3076 struct gmp_div_inverse minv;
3077 unsigned shift;
3078 mp_ptr tp = NULL;
3079
3080 en = GMP_ABS (e->_mp_size);
3081 mn = GMP_ABS (m->_mp_size);
3082 if (mn == 0)
3083 gmp_die ("mpz_powm: Zero modulo.");
3084
3085 if (en == 0)
3086 {
3087 mpz_set_ui (r, 1);
3088 return;
3089 }
3090
3091 mp = m->_mp_d;
3092 mpn_div_qr_invert (&minv, mp, mn);
3093 shift = minv.shift;
3094
3095 if (shift > 0)
3096 {
3097 /* To avoid shifts, we do all our reductions, except the final
3098 one, using a *normalized* m. */
3099 minv.shift = 0;
3100
3101 tp = gmp_xalloc_limbs (mn);
3102 gmp_assert_nocarry (mpn_lshift (tp, mp, mn, shift));
3103 mp = tp;
3104 }
3105
3106 mpz_init (base);
3107
3108 if (e->_mp_size < 0)
3109 {
3110 if (!mpz_invert (base, b, m))
3111 gmp_die ("mpz_powm: Negative exponent and non-invertible base.");
3112 }
3113 else
3114 {
3115 mp_size_t bn;
3116 mpz_abs (base, b);
3117
3118 bn = base->_mp_size;
3119 if (bn >= mn)
3120 {
3121 mpn_div_qr_preinv (NULL, base->_mp_d, base->_mp_size, mp, mn, &minv);
3122 bn = mn;
3123 }
3124
3125 /* We have reduced the absolute value. Now take care of the
3126 sign. Note that we get zero represented non-canonically as
3127 m. */
3128 if (b->_mp_size < 0)
3129 {
3130 mp_ptr bp = MPZ_REALLOC (base, mn);
3131 gmp_assert_nocarry (mpn_sub (bp, mp, mn, bp, bn));
3132 bn = mn;
3133 }
3134 base->_mp_size = mpn_normalized_size (base->_mp_d, bn);
3135 }
3136 mpz_init_set_ui (tr, 1);
3137
3138 while (--en >= 0)
3139 {
3140 mp_limb_t w = e->_mp_d[en];
3141 mp_limb_t bit;
3142
3143 bit = GMP_LIMB_HIGHBIT;
3144 do
3145 {
3146 mpz_mul (tr, tr, tr);
3147 if (w & bit)
3148 mpz_mul (tr, tr, base);
3149 if (tr->_mp_size > mn)
3150 {
3151 mpn_div_qr_preinv (NULL, tr->_mp_d, tr->_mp_size, mp, mn, &minv);
3152 tr->_mp_size = mpn_normalized_size (tr->_mp_d, mn);
3153 }
3154 bit >>= 1;
3155 }
3156 while (bit > 0);
3157 }
3158
3159 /* Final reduction */
3160 if (tr->_mp_size >= mn)
3161 {
3162 minv.shift = shift;
3163 mpn_div_qr_preinv (NULL, tr->_mp_d, tr->_mp_size, mp, mn, &minv);
3164 tr->_mp_size = mpn_normalized_size (tr->_mp_d, mn);
3165 }
3166 if (tp)
3167 gmp_free (tp);
3168
3169 mpz_swap (r, tr);
3170 mpz_clear (tr);
3171 mpz_clear (base);
3172 }
3173
3174 void
mpz_powm_ui(mpz_t r,const mpz_t b,unsigned long elimb,const mpz_t m)3175 mpz_powm_ui (mpz_t r, const mpz_t b, unsigned long elimb, const mpz_t m)
3176 {
3177 mpz_t e;
3178 mpz_powm (r, b, mpz_roinit_n (e, &elimb, 1), m);
3179 }
3180
3181 /* x=trunc(y^(1/z)), r=y-x^z */
3182 void
mpz_rootrem(mpz_t x,mpz_t r,const mpz_t y,unsigned long z)3183 mpz_rootrem (mpz_t x, mpz_t r, const mpz_t y, unsigned long z)
3184 {
3185 int sgn;
3186 mpz_t t, u;
3187
3188 sgn = y->_mp_size < 0;
3189 if ((~z & sgn) != 0)
3190 gmp_die ("mpz_rootrem: Negative argument, with even root.");
3191 if (z == 0)
3192 gmp_die ("mpz_rootrem: Zeroth root.");
3193
3194 if (mpz_cmpabs_ui (y, 1) <= 0) {
3195 if (x)
3196 mpz_set (x, y);
3197 if (r)
3198 r->_mp_size = 0;
3199 return;
3200 }
3201
3202 mpz_init (u);
3203 {
3204 mp_bitcnt_t tb;
3205 tb = mpz_sizeinbase (y, 2) / z + 1;
3206 mpz_init2 (t, tb + 1);
3207 mpz_setbit (t, tb);
3208 }
3209
3210 if (z == 2) /* simplify sqrt loop: z-1 == 1 */
3211 do {
3212 mpz_swap (u, t); /* u = x */
3213 mpz_tdiv_q (t, y, u); /* t = y/x */
3214 mpz_add (t, t, u); /* t = y/x + x */
3215 mpz_tdiv_q_2exp (t, t, 1); /* x'= (y/x + x)/2 */
3216 } while (mpz_cmpabs (t, u) < 0); /* |x'| < |x| */
3217 else /* z != 2 */ {
3218 mpz_t v;
3219
3220 mpz_init (v);
3221 if (sgn)
3222 mpz_neg (t, t);
3223
3224 do {
3225 mpz_swap (u, t); /* u = x */
3226 mpz_pow_ui (t, u, z - 1); /* t = x^(z-1) */
3227 mpz_tdiv_q (t, y, t); /* t = y/x^(z-1) */
3228 mpz_mul_ui (v, u, z - 1); /* v = x*(z-1) */
3229 mpz_add (t, t, v); /* t = y/x^(z-1) + x*(z-1) */
3230 mpz_tdiv_q_ui (t, t, z); /* x'=(y/x^(z-1) + x*(z-1))/z */
3231 } while (mpz_cmpabs (t, u) < 0); /* |x'| < |x| */
3232
3233 mpz_clear (v);
3234 }
3235
3236 if (r) {
3237 mpz_pow_ui (t, u, z);
3238 mpz_sub (r, y, t);
3239 }
3240 if (x)
3241 mpz_swap (x, u);
3242 mpz_clear (u);
3243 mpz_clear (t);
3244 }
3245
3246 int
mpz_root(mpz_t x,const mpz_t y,unsigned long z)3247 mpz_root (mpz_t x, const mpz_t y, unsigned long z)
3248 {
3249 int res;
3250 mpz_t r;
3251
3252 mpz_init (r);
3253 mpz_rootrem (x, r, y, z);
3254 res = r->_mp_size == 0;
3255 mpz_clear (r);
3256
3257 return res;
3258 }
3259
3260 /* Compute s = floor(sqrt(u)) and r = u - s^2. Allows r == NULL */
3261 void
mpz_sqrtrem(mpz_t s,mpz_t r,const mpz_t u)3262 mpz_sqrtrem (mpz_t s, mpz_t r, const mpz_t u)
3263 {
3264 mpz_rootrem (s, r, u, 2);
3265 }
3266
3267 void
mpz_sqrt(mpz_t s,const mpz_t u)3268 mpz_sqrt (mpz_t s, const mpz_t u)
3269 {
3270 mpz_rootrem (s, NULL, u, 2);
3271 }
3272
3273 int
mpz_perfect_square_p(const mpz_t u)3274 mpz_perfect_square_p (const mpz_t u)
3275 {
3276 if (u->_mp_size <= 0)
3277 return (u->_mp_size == 0);
3278 else
3279 return mpz_root (NULL, u, 2);
3280 }
3281
3282 int
mpn_perfect_square_p(mp_srcptr p,mp_size_t n)3283 mpn_perfect_square_p (mp_srcptr p, mp_size_t n)
3284 {
3285 mpz_t t;
3286
3287 assert (n > 0);
3288 assert (p [n-1] != 0);
3289 return mpz_root (NULL, mpz_roinit_n (t, p, n), 2);
3290 }
3291
3292 mp_size_t
mpn_sqrtrem(mp_ptr sp,mp_ptr rp,mp_srcptr p,mp_size_t n)3293 mpn_sqrtrem (mp_ptr sp, mp_ptr rp, mp_srcptr p, mp_size_t n)
3294 {
3295 mpz_t s, r, u;
3296 mp_size_t res;
3297
3298 assert (n > 0);
3299 assert (p [n-1] != 0);
3300
3301 mpz_init (r);
3302 mpz_init (s);
3303 mpz_rootrem (s, r, mpz_roinit_n (u, p, n), 2);
3304
3305 assert (s->_mp_size == (n+1)/2);
3306 mpn_copyd (sp, s->_mp_d, s->_mp_size);
3307 mpz_clear (s);
3308 res = r->_mp_size;
3309 if (rp)
3310 mpn_copyd (rp, r->_mp_d, res);
3311 mpz_clear (r);
3312 return res;
3313 }
3314
3315 /* Combinatorics */
3316
3317 void
mpz_fac_ui(mpz_t x,unsigned long n)3318 mpz_fac_ui (mpz_t x, unsigned long n)
3319 {
3320 mpz_set_ui (x, n + (n == 0));
3321 while (n > 2)
3322 mpz_mul_ui (x, x, --n);
3323 }
3324
3325 void
mpz_bin_uiui(mpz_t r,unsigned long n,unsigned long k)3326 mpz_bin_uiui (mpz_t r, unsigned long n, unsigned long k)
3327 {
3328 mpz_t t;
3329
3330 mpz_set_ui (r, k <= n);
3331
3332 if (k > (n >> 1))
3333 k = (k <= n) ? n - k : 0;
3334
3335 mpz_init (t);
3336 mpz_fac_ui (t, k);
3337
3338 for (; k > 0; k--)
3339 mpz_mul_ui (r, r, n--);
3340
3341 mpz_divexact (r, r, t);
3342 mpz_clear (t);
3343 }
3344
3345
3346 /* Primality testing */
3347 static int
gmp_millerrabin(const mpz_t n,const mpz_t nm1,mpz_t y,const mpz_t q,mp_bitcnt_t k)3348 gmp_millerrabin (const mpz_t n, const mpz_t nm1, mpz_t y,
3349 const mpz_t q, mp_bitcnt_t k)
3350 {
3351 assert (k > 0);
3352
3353 /* Caller must initialize y to the base. */
3354 mpz_powm (y, y, q, n);
3355
3356 if (mpz_cmp_ui (y, 1) == 0 || mpz_cmp (y, nm1) == 0)
3357 return 1;
3358
3359 while (--k > 0)
3360 {
3361 mpz_powm_ui (y, y, 2, n);
3362 if (mpz_cmp (y, nm1) == 0)
3363 return 1;
3364 /* y == 1 means that the previous y was a non-trivial square root
3365 of 1 (mod n). y == 0 means that n is a power of the base.
3366 In either case, n is not prime. */
3367 if (mpz_cmp_ui (y, 1) <= 0)
3368 return 0;
3369 }
3370 return 0;
3371 }
3372
3373 /* This product is 0xc0cfd797, and fits in 32 bits. */
3374 #define GMP_PRIME_PRODUCT \
3375 (3UL*5UL*7UL*11UL*13UL*17UL*19UL*23UL*29UL)
3376
3377 /* Bit (p+1)/2 is set, for each odd prime <= 61 */
3378 #define GMP_PRIME_MASK 0xc96996dcUL
3379
3380 int
mpz_probab_prime_p(const mpz_t n,int reps)3381 mpz_probab_prime_p (const mpz_t n, int reps)
3382 {
3383 mpz_t nm1;
3384 mpz_t q;
3385 mpz_t y;
3386 mp_bitcnt_t k;
3387 int is_prime;
3388 int j;
3389
3390 /* Note that we use the absolute value of n only, for compatibility
3391 with the real GMP. */
3392 if (mpz_even_p (n))
3393 return (mpz_cmpabs_ui (n, 2) == 0) ? 2 : 0;
3394
3395 /* Above test excludes n == 0 */
3396 assert (n->_mp_size != 0);
3397
3398 if (mpz_cmpabs_ui (n, 64) < 0)
3399 return (GMP_PRIME_MASK >> (n->_mp_d[0] >> 1)) & 2;
3400
3401 if (mpz_gcd_ui (NULL, n, GMP_PRIME_PRODUCT) != 1)
3402 return 0;
3403
3404 /* All prime factors are >= 31. */
3405 if (mpz_cmpabs_ui (n, 31*31) < 0)
3406 return 2;
3407
3408 /* Use Miller-Rabin, with a deterministic sequence of bases, a[j] =
3409 j^2 + j + 41 using Euler's polynomial. We potentially stop early,
3410 if a[j] >= n - 1. Since n >= 31*31, this can happen only if reps >
3411 30 (a[30] == 971 > 31*31 == 961). */
3412
3413 mpz_init (nm1);
3414 mpz_init (q);
3415 mpz_init (y);
3416
3417 /* Find q and k, where q is odd and n = 1 + 2**k * q. */
3418 nm1->_mp_size = mpz_abs_sub_ui (nm1, n, 1);
3419 k = mpz_scan1 (nm1, 0);
3420 mpz_tdiv_q_2exp (q, nm1, k);
3421
3422 for (j = 0, is_prime = 1; is_prime & (j < reps); j++)
3423 {
3424 mpz_set_ui (y, (unsigned long) j*j+j+41);
3425 if (mpz_cmp (y, nm1) >= 0)
3426 {
3427 /* Don't try any further bases. This "early" break does not affect
3428 the result for any reasonable reps value (<=5000 was tested) */
3429 assert (j >= 30);
3430 break;
3431 }
3432 is_prime = gmp_millerrabin (n, nm1, y, q, k);
3433 }
3434 mpz_clear (nm1);
3435 mpz_clear (q);
3436 mpz_clear (y);
3437
3438 return is_prime;
3439 }
3440
3441
3442 /* Logical operations and bit manipulation. */
3443
3444 /* Numbers are treated as if represented in two's complement (and
3445 infinitely sign extended). For a negative values we get the two's
3446 complement from -x = ~x + 1, where ~ is bitwise complement.
3447 Negation transforms
3448
3449 xxxx10...0
3450
3451 into
3452
3453 yyyy10...0
3454
3455 where yyyy is the bitwise complement of xxxx. So least significant
3456 bits, up to and including the first one bit, are unchanged, and
3457 the more significant bits are all complemented.
3458
3459 To change a bit from zero to one in a negative number, subtract the
3460 corresponding power of two from the absolute value. This can never
3461 underflow. To change a bit from one to zero, add the corresponding
3462 power of two, and this might overflow. E.g., if x = -001111, the
3463 two's complement is 110001. Clearing the least significant bit, we
3464 get two's complement 110000, and -010000. */
3465
3466 int
mpz_tstbit(const mpz_t d,mp_bitcnt_t bit_index)3467 mpz_tstbit (const mpz_t d, mp_bitcnt_t bit_index)
3468 {
3469 mp_size_t limb_index;
3470 unsigned shift;
3471 mp_size_t ds;
3472 mp_size_t dn;
3473 mp_limb_t w;
3474 int bit;
3475
3476 ds = d->_mp_size;
3477 dn = GMP_ABS (ds);
3478 limb_index = bit_index / GMP_LIMB_BITS;
3479 if (limb_index >= dn)
3480 return ds < 0;
3481
3482 shift = bit_index % GMP_LIMB_BITS;
3483 w = d->_mp_d[limb_index];
3484 bit = (w >> shift) & 1;
3485
3486 if (ds < 0)
3487 {
3488 /* d < 0. Check if any of the bits below is set: If so, our bit
3489 must be complemented. */
3490 if (shift > 0 && (w << (GMP_LIMB_BITS - shift)) > 0)
3491 return bit ^ 1;
3492 while (--limb_index >= 0)
3493 if (d->_mp_d[limb_index] > 0)
3494 return bit ^ 1;
3495 }
3496 return bit;
3497 }
3498
3499 static void
mpz_abs_add_bit(mpz_t d,mp_bitcnt_t bit_index)3500 mpz_abs_add_bit (mpz_t d, mp_bitcnt_t bit_index)
3501 {
3502 mp_size_t dn, limb_index;
3503 mp_limb_t bit;
3504 mp_ptr dp;
3505
3506 dn = GMP_ABS (d->_mp_size);
3507
3508 limb_index = bit_index / GMP_LIMB_BITS;
3509 bit = (mp_limb_t) 1 << (bit_index % GMP_LIMB_BITS);
3510
3511 if (limb_index >= dn)
3512 {
3513 mp_size_t i;
3514 /* The bit should be set outside of the end of the number.
3515 We have to increase the size of the number. */
3516 dp = MPZ_REALLOC (d, limb_index + 1);
3517
3518 dp[limb_index] = bit;
3519 for (i = dn; i < limb_index; i++)
3520 dp[i] = 0;
3521 dn = limb_index + 1;
3522 }
3523 else
3524 {
3525 mp_limb_t cy;
3526
3527 dp = d->_mp_d;
3528
3529 cy = mpn_add_1 (dp + limb_index, dp + limb_index, dn - limb_index, bit);
3530 if (cy > 0)
3531 {
3532 dp = MPZ_REALLOC (d, dn + 1);
3533 dp[dn++] = cy;
3534 }
3535 }
3536
3537 d->_mp_size = (d->_mp_size < 0) ? - dn : dn;
3538 }
3539
3540 static void
mpz_abs_sub_bit(mpz_t d,mp_bitcnt_t bit_index)3541 mpz_abs_sub_bit (mpz_t d, mp_bitcnt_t bit_index)
3542 {
3543 mp_size_t dn, limb_index;
3544 mp_ptr dp;
3545 mp_limb_t bit;
3546
3547 dn = GMP_ABS (d->_mp_size);
3548 dp = d->_mp_d;
3549
3550 limb_index = bit_index / GMP_LIMB_BITS;
3551 bit = (mp_limb_t) 1 << (bit_index % GMP_LIMB_BITS);
3552
3553 assert (limb_index < dn);
3554
3555 gmp_assert_nocarry (mpn_sub_1 (dp + limb_index, dp + limb_index,
3556 dn - limb_index, bit));
3557 dn = mpn_normalized_size (dp, dn);
3558 d->_mp_size = (d->_mp_size < 0) ? - dn : dn;
3559 }
3560
3561 void
mpz_setbit(mpz_t d,mp_bitcnt_t bit_index)3562 mpz_setbit (mpz_t d, mp_bitcnt_t bit_index)
3563 {
3564 if (!mpz_tstbit (d, bit_index))
3565 {
3566 if (d->_mp_size >= 0)
3567 mpz_abs_add_bit (d, bit_index);
3568 else
3569 mpz_abs_sub_bit (d, bit_index);
3570 }
3571 }
3572
3573 void
mpz_clrbit(mpz_t d,mp_bitcnt_t bit_index)3574 mpz_clrbit (mpz_t d, mp_bitcnt_t bit_index)
3575 {
3576 if (mpz_tstbit (d, bit_index))
3577 {
3578 if (d->_mp_size >= 0)
3579 mpz_abs_sub_bit (d, bit_index);
3580 else
3581 mpz_abs_add_bit (d, bit_index);
3582 }
3583 }
3584
3585 void
mpz_combit(mpz_t d,mp_bitcnt_t bit_index)3586 mpz_combit (mpz_t d, mp_bitcnt_t bit_index)
3587 {
3588 if (mpz_tstbit (d, bit_index) ^ (d->_mp_size < 0))
3589 mpz_abs_sub_bit (d, bit_index);
3590 else
3591 mpz_abs_add_bit (d, bit_index);
3592 }
3593
3594 void
mpz_com(mpz_t r,const mpz_t u)3595 mpz_com (mpz_t r, const mpz_t u)
3596 {
3597 mpz_neg (r, u);
3598 mpz_sub_ui (r, r, 1);
3599 }
3600
3601 void
mpz_and(mpz_t r,const mpz_t u,const mpz_t v)3602 mpz_and (mpz_t r, const mpz_t u, const mpz_t v)
3603 {
3604 mp_size_t un, vn, rn, i;
3605 mp_ptr up, vp, rp;
3606
3607 mp_limb_t ux, vx, rx;
3608 mp_limb_t uc, vc, rc;
3609 mp_limb_t ul, vl, rl;
3610
3611 un = GMP_ABS (u->_mp_size);
3612 vn = GMP_ABS (v->_mp_size);
3613 if (un < vn)
3614 {
3615 MPZ_SRCPTR_SWAP (u, v);
3616 MP_SIZE_T_SWAP (un, vn);
3617 }
3618 if (vn == 0)
3619 {
3620 r->_mp_size = 0;
3621 return;
3622 }
3623
3624 uc = u->_mp_size < 0;
3625 vc = v->_mp_size < 0;
3626 rc = uc & vc;
3627
3628 ux = -uc;
3629 vx = -vc;
3630 rx = -rc;
3631
3632 /* If the smaller input is positive, higher limbs don't matter. */
3633 rn = vx ? un : vn;
3634
3635 rp = MPZ_REALLOC (r, rn + rc);
3636
3637 up = u->_mp_d;
3638 vp = v->_mp_d;
3639
3640 i = 0;
3641 do
3642 {
3643 ul = (up[i] ^ ux) + uc;
3644 uc = ul < uc;
3645
3646 vl = (vp[i] ^ vx) + vc;
3647 vc = vl < vc;
3648
3649 rl = ( (ul & vl) ^ rx) + rc;
3650 rc = rl < rc;
3651 rp[i] = rl;
3652 }
3653 while (++i < vn);
3654 assert (vc == 0);
3655
3656 for (; i < rn; i++)
3657 {
3658 ul = (up[i] ^ ux) + uc;
3659 uc = ul < uc;
3660
3661 rl = ( (ul & vx) ^ rx) + rc;
3662 rc = rl < rc;
3663 rp[i] = rl;
3664 }
3665 if (rc)
3666 rp[rn++] = rc;
3667 else
3668 rn = mpn_normalized_size (rp, rn);
3669
3670 r->_mp_size = rx ? -rn : rn;
3671 }
3672
3673 void
mpz_ior(mpz_t r,const mpz_t u,const mpz_t v)3674 mpz_ior (mpz_t r, const mpz_t u, const mpz_t v)
3675 {
3676 mp_size_t un, vn, rn, i;
3677 mp_ptr up, vp, rp;
3678
3679 mp_limb_t ux, vx, rx;
3680 mp_limb_t uc, vc, rc;
3681 mp_limb_t ul, vl, rl;
3682
3683 un = GMP_ABS (u->_mp_size);
3684 vn = GMP_ABS (v->_mp_size);
3685 if (un < vn)
3686 {
3687 MPZ_SRCPTR_SWAP (u, v);
3688 MP_SIZE_T_SWAP (un, vn);
3689 }
3690 if (vn == 0)
3691 {
3692 mpz_set (r, u);
3693 return;
3694 }
3695
3696 uc = u->_mp_size < 0;
3697 vc = v->_mp_size < 0;
3698 rc = uc | vc;
3699
3700 ux = -uc;
3701 vx = -vc;
3702 rx = -rc;
3703
3704 /* If the smaller input is negative, by sign extension higher limbs
3705 don't matter. */
3706 rn = vx ? vn : un;
3707
3708 rp = MPZ_REALLOC (r, rn + rc);
3709
3710 up = u->_mp_d;
3711 vp = v->_mp_d;
3712
3713 i = 0;
3714 do
3715 {
3716 ul = (up[i] ^ ux) + uc;
3717 uc = ul < uc;
3718
3719 vl = (vp[i] ^ vx) + vc;
3720 vc = vl < vc;
3721
3722 rl = ( (ul | vl) ^ rx) + rc;
3723 rc = rl < rc;
3724 rp[i] = rl;
3725 }
3726 while (++i < vn);
3727 assert (vc == 0);
3728
3729 for (; i < rn; i++)
3730 {
3731 ul = (up[i] ^ ux) + uc;
3732 uc = ul < uc;
3733
3734 rl = ( (ul | vx) ^ rx) + rc;
3735 rc = rl < rc;
3736 rp[i] = rl;
3737 }
3738 if (rc)
3739 rp[rn++] = rc;
3740 else
3741 rn = mpn_normalized_size (rp, rn);
3742
3743 r->_mp_size = rx ? -rn : rn;
3744 }
3745
3746 void
mpz_xor(mpz_t r,const mpz_t u,const mpz_t v)3747 mpz_xor (mpz_t r, const mpz_t u, const mpz_t v)
3748 {
3749 mp_size_t un, vn, i;
3750 mp_ptr up, vp, rp;
3751
3752 mp_limb_t ux, vx, rx;
3753 mp_limb_t uc, vc, rc;
3754 mp_limb_t ul, vl, rl;
3755
3756 un = GMP_ABS (u->_mp_size);
3757 vn = GMP_ABS (v->_mp_size);
3758 if (un < vn)
3759 {
3760 MPZ_SRCPTR_SWAP (u, v);
3761 MP_SIZE_T_SWAP (un, vn);
3762 }
3763 if (vn == 0)
3764 {
3765 mpz_set (r, u);
3766 return;
3767 }
3768
3769 uc = u->_mp_size < 0;
3770 vc = v->_mp_size < 0;
3771 rc = uc ^ vc;
3772
3773 ux = -uc;
3774 vx = -vc;
3775 rx = -rc;
3776
3777 rp = MPZ_REALLOC (r, un + rc);
3778
3779 up = u->_mp_d;
3780 vp = v->_mp_d;
3781
3782 i = 0;
3783 do
3784 {
3785 ul = (up[i] ^ ux) + uc;
3786 uc = ul < uc;
3787
3788 vl = (vp[i] ^ vx) + vc;
3789 vc = vl < vc;
3790
3791 rl = (ul ^ vl ^ rx) + rc;
3792 rc = rl < rc;
3793 rp[i] = rl;
3794 }
3795 while (++i < vn);
3796 assert (vc == 0);
3797
3798 for (; i < un; i++)
3799 {
3800 ul = (up[i] ^ ux) + uc;
3801 uc = ul < uc;
3802
3803 rl = (ul ^ ux) + rc;
3804 rc = rl < rc;
3805 rp[i] = rl;
3806 }
3807 if (rc)
3808 rp[un++] = rc;
3809 else
3810 un = mpn_normalized_size (rp, un);
3811
3812 r->_mp_size = rx ? -un : un;
3813 }
3814
3815 static unsigned
gmp_popcount_limb(mp_limb_t x)3816 gmp_popcount_limb (mp_limb_t x)
3817 {
3818 unsigned c;
3819
3820 /* Do 16 bits at a time, to avoid limb-sized constants. */
3821 for (c = 0; x > 0; x >>= 16)
3822 {
3823 unsigned w = ((x >> 1) & 0x5555) + (x & 0x5555);
3824 w = ((w >> 2) & 0x3333) + (w & 0x3333);
3825 w = ((w >> 4) & 0x0f0f) + (w & 0x0f0f);
3826 w = (w >> 8) + (w & 0x00ff);
3827 c += w;
3828 }
3829 return c;
3830 }
3831
3832 mp_bitcnt_t
mpn_popcount(mp_srcptr p,mp_size_t n)3833 mpn_popcount (mp_srcptr p, mp_size_t n)
3834 {
3835 mp_size_t i;
3836 mp_bitcnt_t c;
3837
3838 for (c = 0, i = 0; i < n; i++)
3839 c += gmp_popcount_limb (p[i]);
3840
3841 return c;
3842 }
3843
3844 mp_bitcnt_t
mpz_popcount(const mpz_t u)3845 mpz_popcount (const mpz_t u)
3846 {
3847 mp_size_t un;
3848
3849 un = u->_mp_size;
3850
3851 if (un < 0)
3852 return ~(mp_bitcnt_t) 0;
3853
3854 return mpn_popcount (u->_mp_d, un);
3855 }
3856
3857 mp_bitcnt_t
mpz_hamdist(const mpz_t u,const mpz_t v)3858 mpz_hamdist (const mpz_t u, const mpz_t v)
3859 {
3860 mp_size_t un, vn, i;
3861 mp_limb_t uc, vc, ul, vl, comp;
3862 mp_srcptr up, vp;
3863 mp_bitcnt_t c;
3864
3865 un = u->_mp_size;
3866 vn = v->_mp_size;
3867
3868 if ( (un ^ vn) < 0)
3869 return ~(mp_bitcnt_t) 0;
3870
3871 comp = - (uc = vc = (un < 0));
3872 if (uc)
3873 {
3874 assert (vn < 0);
3875 un = -un;
3876 vn = -vn;
3877 }
3878
3879 up = u->_mp_d;
3880 vp = v->_mp_d;
3881
3882 if (un < vn)
3883 MPN_SRCPTR_SWAP (up, un, vp, vn);
3884
3885 for (i = 0, c = 0; i < vn; i++)
3886 {
3887 ul = (up[i] ^ comp) + uc;
3888 uc = ul < uc;
3889
3890 vl = (vp[i] ^ comp) + vc;
3891 vc = vl < vc;
3892
3893 c += gmp_popcount_limb (ul ^ vl);
3894 }
3895 assert (vc == 0);
3896
3897 for (; i < un; i++)
3898 {
3899 ul = (up[i] ^ comp) + uc;
3900 uc = ul < uc;
3901
3902 c += gmp_popcount_limb (ul ^ comp);
3903 }
3904
3905 return c;
3906 }
3907
3908 mp_bitcnt_t
mpz_scan1(const mpz_t u,mp_bitcnt_t starting_bit)3909 mpz_scan1 (const mpz_t u, mp_bitcnt_t starting_bit)
3910 {
3911 mp_ptr up;
3912 mp_size_t us, un, i;
3913 mp_limb_t limb, ux;
3914
3915 us = u->_mp_size;
3916 un = GMP_ABS (us);
3917 i = starting_bit / GMP_LIMB_BITS;
3918
3919 /* Past the end there's no 1 bits for u>=0, or an immediate 1 bit
3920 for u<0. Notice this test picks up any u==0 too. */
3921 if (i >= un)
3922 return (us >= 0 ? ~(mp_bitcnt_t) 0 : starting_bit);
3923
3924 up = u->_mp_d;
3925 ux = 0;
3926 limb = up[i];
3927
3928 if (starting_bit != 0)
3929 {
3930 if (us < 0)
3931 {
3932 ux = mpn_zero_p (up, i);
3933 limb = ~ limb + ux;
3934 ux = - (mp_limb_t) (limb >= ux);
3935 }
3936
3937 /* Mask to 0 all bits before starting_bit, thus ignoring them. */
3938 limb &= (GMP_LIMB_MAX << (starting_bit % GMP_LIMB_BITS));
3939 }
3940
3941 return mpn_common_scan (limb, i, up, un, ux);
3942 }
3943
3944 mp_bitcnt_t
mpz_scan0(const mpz_t u,mp_bitcnt_t starting_bit)3945 mpz_scan0 (const mpz_t u, mp_bitcnt_t starting_bit)
3946 {
3947 mp_ptr up;
3948 mp_size_t us, un, i;
3949 mp_limb_t limb, ux;
3950
3951 us = u->_mp_size;
3952 ux = - (mp_limb_t) (us >= 0);
3953 un = GMP_ABS (us);
3954 i = starting_bit / GMP_LIMB_BITS;
3955
3956 /* When past end, there's an immediate 0 bit for u>=0, or no 0 bits for
3957 u<0. Notice this test picks up all cases of u==0 too. */
3958 if (i >= un)
3959 return (ux ? starting_bit : ~(mp_bitcnt_t) 0);
3960
3961 up = u->_mp_d;
3962 limb = up[i] ^ ux;
3963
3964 if (ux == 0)
3965 limb -= mpn_zero_p (up, i); /* limb = ~(~limb + zero_p) */
3966
3967 /* Mask all bits before starting_bit, thus ignoring them. */
3968 limb &= (GMP_LIMB_MAX << (starting_bit % GMP_LIMB_BITS));
3969
3970 return mpn_common_scan (limb, i, up, un, ux);
3971 }
3972
3973
3974 /* MPZ base conversion. */
3975
3976 size_t
mpz_sizeinbase(const mpz_t u,int base)3977 mpz_sizeinbase (const mpz_t u, int base)
3978 {
3979 mp_size_t un;
3980 mp_srcptr up;
3981 mp_ptr tp;
3982 mp_bitcnt_t bits;
3983 struct gmp_div_inverse bi;
3984 size_t ndigits;
3985
3986 assert (base >= 2);
3987 assert (base <= 36);
3988
3989 un = GMP_ABS (u->_mp_size);
3990 if (un == 0)
3991 return 1;
3992
3993 up = u->_mp_d;
3994
3995 bits = (un - 1) * GMP_LIMB_BITS + mpn_limb_size_in_base_2 (up[un-1]);
3996 switch (base)
3997 {
3998 case 2:
3999 return bits;
4000 case 4:
4001 return (bits + 1) / 2;
4002 case 8:
4003 return (bits + 2) / 3;
4004 case 16:
4005 return (bits + 3) / 4;
4006 case 32:
4007 return (bits + 4) / 5;
4008 /* FIXME: Do something more clever for the common case of base
4009 10. */
4010 }
4011
4012 tp = gmp_xalloc_limbs (un);
4013 mpn_copyi (tp, up, un);
4014 mpn_div_qr_1_invert (&bi, base);
4015
4016 ndigits = 0;
4017 do
4018 {
4019 ndigits++;
4020 mpn_div_qr_1_preinv (tp, tp, un, &bi);
4021 un -= (tp[un-1] == 0);
4022 }
4023 while (un > 0);
4024
4025 gmp_free (tp);
4026 return ndigits;
4027 }
4028
4029 char *
mpz_get_str(char * sp,int base,const mpz_t u)4030 mpz_get_str (char *sp, int base, const mpz_t u)
4031 {
4032 unsigned bits;
4033 const char *digits;
4034 mp_size_t un;
4035 size_t i, sn;
4036
4037 if (base >= 0)
4038 {
4039 digits = "0123456789abcdefghijklmnopqrstuvwxyz";
4040 }
4041 else
4042 {
4043 base = -base;
4044 digits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ";
4045 }
4046 if (base <= 1)
4047 base = 10;
4048 if (base > 36)
4049 return NULL;
4050
4051 sn = 1 + mpz_sizeinbase (u, base);
4052 if (!sp)
4053 sp = gmp_xalloc (1 + sn);
4054
4055 un = GMP_ABS (u->_mp_size);
4056
4057 if (un == 0)
4058 {
4059 sp[0] = '0';
4060 sp[1] = '\0';
4061 return sp;
4062 }
4063
4064 i = 0;
4065
4066 if (u->_mp_size < 0)
4067 sp[i++] = '-';
4068
4069 bits = mpn_base_power_of_two_p (base);
4070
4071 if (bits)
4072 /* Not modified in this case. */
4073 sn = i + mpn_get_str_bits ((unsigned char *) sp + i, bits, u->_mp_d, un);
4074 else
4075 {
4076 struct mpn_base_info info;
4077 mp_ptr tp;
4078
4079 mpn_get_base_info (&info, base);
4080 tp = gmp_xalloc_limbs (un);
4081 mpn_copyi (tp, u->_mp_d, un);
4082
4083 sn = i + mpn_get_str_other ((unsigned char *) sp + i, base, &info, tp, un);
4084 gmp_free (tp);
4085 }
4086
4087 for (; i < sn; i++)
4088 sp[i] = digits[(unsigned char) sp[i]];
4089
4090 sp[sn] = '\0';
4091 return sp;
4092 }
4093
4094 int
mpz_set_str(mpz_t r,const char * sp,int base)4095 mpz_set_str (mpz_t r, const char *sp, int base)
4096 {
4097 unsigned bits;
4098 mp_size_t rn, alloc;
4099 mp_ptr rp;
4100 size_t sn;
4101 int sign;
4102 unsigned char *dp;
4103
4104 assert (base == 0 || (base >= 2 && base <= 36));
4105
4106 while (isspace( (unsigned char) *sp))
4107 sp++;
4108
4109 sign = (*sp == '-');
4110 sp += sign;
4111
4112 if (base == 0)
4113 {
4114 if (*sp == '0')
4115 {
4116 sp++;
4117 if (*sp == 'x' || *sp == 'X')
4118 {
4119 base = 16;
4120 sp++;
4121 }
4122 else if (*sp == 'b' || *sp == 'B')
4123 {
4124 base = 2;
4125 sp++;
4126 }
4127 else
4128 base = 8;
4129 }
4130 else
4131 base = 10;
4132 }
4133
4134 sn = strlen (sp);
4135 dp = gmp_xalloc (sn + (sn == 0));
4136
4137 for (sn = 0; *sp; sp++)
4138 {
4139 unsigned digit;
4140
4141 if (isspace ((unsigned char) *sp))
4142 continue;
4143 if (*sp >= '0' && *sp <= '9')
4144 digit = *sp - '0';
4145 else if (*sp >= 'a' && *sp <= 'z')
4146 digit = *sp - 'a' + 10;
4147 else if (*sp >= 'A' && *sp <= 'Z')
4148 digit = *sp - 'A' + 10;
4149 else
4150 digit = base; /* fail */
4151
4152 if (digit >= base)
4153 {
4154 gmp_free (dp);
4155 r->_mp_size = 0;
4156 return -1;
4157 }
4158
4159 dp[sn++] = digit;
4160 }
4161
4162 bits = mpn_base_power_of_two_p (base);
4163
4164 if (bits > 0)
4165 {
4166 alloc = (sn * bits + GMP_LIMB_BITS - 1) / GMP_LIMB_BITS;
4167 rp = MPZ_REALLOC (r, alloc);
4168 rn = mpn_set_str_bits (rp, dp, sn, bits);
4169 }
4170 else
4171 {
4172 struct mpn_base_info info;
4173 mpn_get_base_info (&info, base);
4174 alloc = (sn + info.exp - 1) / info.exp;
4175 rp = MPZ_REALLOC (r, alloc);
4176 rn = mpn_set_str_other (rp, dp, sn, base, &info);
4177 }
4178 assert (rn <= alloc);
4179 gmp_free (dp);
4180
4181 r->_mp_size = sign ? - rn : rn;
4182
4183 return 0;
4184 }
4185
4186 int
mpz_init_set_str(mpz_t r,const char * sp,int base)4187 mpz_init_set_str (mpz_t r, const char *sp, int base)
4188 {
4189 mpz_init (r);
4190 return mpz_set_str (r, sp, base);
4191 }
4192
4193 size_t
mpz_out_str(FILE * stream,int base,const mpz_t x)4194 mpz_out_str (FILE *stream, int base, const mpz_t x)
4195 {
4196 char *str;
4197 size_t len;
4198
4199 str = mpz_get_str (NULL, base, x);
4200 len = strlen (str);
4201 len = fwrite (str, 1, len, stream);
4202 gmp_free (str);
4203 return len;
4204 }
4205
4206
4207 static int
gmp_detect_endian(void)4208 gmp_detect_endian (void)
4209 {
4210 static const int i = 2;
4211 const unsigned char *p = (const unsigned char *) &i;
4212 return 1 - *p;
4213 }
4214
4215 /* Import and export. Does not support nails. */
4216 void
mpz_import(mpz_t r,size_t count,int order,size_t size,int endian,size_t nails,const void * src)4217 mpz_import (mpz_t r, size_t count, int order, size_t size, int endian,
4218 size_t nails, const void *src)
4219 {
4220 const unsigned char *p;
4221 ptrdiff_t word_step;
4222 mp_ptr rp;
4223 mp_size_t rn;
4224
4225 /* The current (partial) limb. */
4226 mp_limb_t limb;
4227 /* The number of bytes already copied to this limb (starting from
4228 the low end). */
4229 size_t bytes;
4230 /* The index where the limb should be stored, when completed. */
4231 mp_size_t i;
4232
4233 if (nails != 0)
4234 gmp_die ("mpz_import: Nails not supported.");
4235
4236 assert (order == 1 || order == -1);
4237 assert (endian >= -1 && endian <= 1);
4238
4239 if (endian == 0)
4240 endian = gmp_detect_endian ();
4241
4242 p = (unsigned char *) src;
4243
4244 word_step = (order != endian) ? 2 * size : 0;
4245
4246 /* Process bytes from the least significant end, so point p at the
4247 least significant word. */
4248 if (order == 1)
4249 {
4250 p += size * (count - 1);
4251 word_step = - word_step;
4252 }
4253
4254 /* And at least significant byte of that word. */
4255 if (endian == 1)
4256 p += (size - 1);
4257
4258 rn = (size * count + sizeof(mp_limb_t) - 1) / sizeof(mp_limb_t);
4259 rp = MPZ_REALLOC (r, rn);
4260
4261 for (limb = 0, bytes = 0, i = 0; count > 0; count--, p += word_step)
4262 {
4263 size_t j;
4264 for (j = 0; j < size; j++, p -= (ptrdiff_t) endian)
4265 {
4266 limb |= (mp_limb_t) *p << (bytes++ * CHAR_BIT);
4267 if (bytes == sizeof(mp_limb_t))
4268 {
4269 rp[i++] = limb;
4270 bytes = 0;
4271 limb = 0;
4272 }
4273 }
4274 }
4275 assert (i + (bytes > 0) == rn);
4276 if (limb != 0)
4277 rp[i++] = limb;
4278 else
4279 i = mpn_normalized_size (rp, i);
4280
4281 r->_mp_size = i;
4282 }
4283
4284 void *
mpz_export(void * r,size_t * countp,int order,size_t size,int endian,size_t nails,const mpz_t u)4285 mpz_export (void *r, size_t *countp, int order, size_t size, int endian,
4286 size_t nails, const mpz_t u)
4287 {
4288 size_t count;
4289 mp_size_t un;
4290
4291 if (nails != 0)
4292 gmp_die ("mpz_import: Nails not supported.");
4293
4294 assert (order == 1 || order == -1);
4295 assert (endian >= -1 && endian <= 1);
4296 assert (size > 0 || u->_mp_size == 0);
4297
4298 un = u->_mp_size;
4299 count = 0;
4300 if (un != 0)
4301 {
4302 size_t k;
4303 unsigned char *p;
4304 ptrdiff_t word_step;
4305 /* The current (partial) limb. */
4306 mp_limb_t limb;
4307 /* The number of bytes left to to in this limb. */
4308 size_t bytes;
4309 /* The index where the limb was read. */
4310 mp_size_t i;
4311
4312 un = GMP_ABS (un);
4313
4314 /* Count bytes in top limb. */
4315 limb = u->_mp_d[un-1];
4316 assert (limb != 0);
4317
4318 k = 0;
4319 do {
4320 k++; limb >>= CHAR_BIT;
4321 } while (limb != 0);
4322
4323 count = (k + (un-1) * sizeof (mp_limb_t) + size - 1) / size;
4324
4325 if (!r)
4326 r = gmp_xalloc (count * size);
4327
4328 if (endian == 0)
4329 endian = gmp_detect_endian ();
4330
4331 p = (unsigned char *) r;
4332
4333 word_step = (order != endian) ? 2 * size : 0;
4334
4335 /* Process bytes from the least significant end, so point p at the
4336 least significant word. */
4337 if (order == 1)
4338 {
4339 p += size * (count - 1);
4340 word_step = - word_step;
4341 }
4342
4343 /* And at least significant byte of that word. */
4344 if (endian == 1)
4345 p += (size - 1);
4346
4347 for (bytes = 0, i = 0, k = 0; k < count; k++, p += word_step)
4348 {
4349 size_t j;
4350 for (j = 0; j < size; j++, p -= (ptrdiff_t) endian)
4351 {
4352 if (bytes == 0)
4353 {
4354 if (i < un)
4355 limb = u->_mp_d[i++];
4356 bytes = sizeof (mp_limb_t);
4357 }
4358 *p = limb;
4359 limb >>= CHAR_BIT;
4360 bytes--;
4361 }
4362 }
4363 assert (i == un);
4364 assert (k == count);
4365 }
4366
4367 if (countp)
4368 *countp = count;
4369
4370 return r;
4371 }
4372