1 /* mini-gmp, a minimalistic implementation of a GNU GMP subset.
2
3 Contributed to the GNU project by Niels Möller
4
5 Copyright 1991-1997, 1999-2014 Free Software Foundation, Inc.
6
7 This file is part of the GNU MP Library.
8
9 The GNU MP Library is free software; you can redistribute it and/or modify
10 it under the terms of either:
11
12 * the GNU Lesser General Public License as published by the Free
13 Software Foundation; either version 3 of the License, or (at your
14 option) any later version.
15
16 or
17
18 * the GNU General Public License as published by the Free Software
19 Foundation; either version 2 of the License, or (at your option) any
20 later version.
21
22 or both in parallel, as here.
23
24 The GNU MP Library is distributed in the hope that it will be useful, but
25 WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
26 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
27 for more details.
28
29 You should have received copies of the GNU General Public License and the
30 GNU Lesser General Public License along with the GNU MP Library. If not,
31 see https://www.gnu.org/licenses/. */
32
33 /* NOTE: All functions in this file which are not declared in
34 mini-gmp.h are internal, and are not intended to be compatible
35 neither with GMP nor with future versions of mini-gmp. */
36
37 /* Much of the material copied from GMP files, including: gmp-impl.h,
38 longlong.h, mpn/generic/add_n.c, mpn/generic/addmul_1.c,
39 mpn/generic/lshift.c, mpn/generic/mul_1.c,
40 mpn/generic/mul_basecase.c, mpn/generic/rshift.c,
41 mpn/generic/sbpi1_div_qr.c, mpn/generic/sub_n.c,
42 mpn/generic/submul_1.c. */
43
44 #include <assert.h>
45 #include <ctype.h>
46 #include <limits.h>
47 #include <stdio.h>
48 #include <stdlib.h>
49 #include <string.h>
50
51 #include "mini-gmp.h"
52
53
54 /* Macros */
55 #define GMP_LIMB_BITS (sizeof(mp_limb_t) * CHAR_BIT)
56
57 #define GMP_LIMB_MAX (~ (mp_limb_t) 0)
58 #define GMP_LIMB_HIGHBIT ((mp_limb_t) 1 << (GMP_LIMB_BITS - 1))
59
60 #define GMP_HLIMB_BIT ((mp_limb_t) 1 << (GMP_LIMB_BITS / 2))
61 #define GMP_LLIMB_MASK (GMP_HLIMB_BIT - 1)
62
63 #define GMP_ULONG_BITS (sizeof(unsigned long) * CHAR_BIT)
64 #define GMP_ULONG_HIGHBIT ((unsigned long) 1 << (GMP_ULONG_BITS - 1))
65
66 #define GMP_ABS(x) ((x) >= 0 ? (x) : -(x))
67 #define GMP_NEG_CAST(T,x) (-((T)((x) + 1) - 1))
68
69 #define GMP_MIN(a, b) ((a) < (b) ? (a) : (b))
70 #define GMP_MAX(a, b) ((a) > (b) ? (a) : (b))
71
72 #define gmp_assert_nocarry(x) do { \
73 mp_limb_t __cy = x; \
74 assert (__cy == 0); \
75 } while (0)
76
77 #define gmp_clz(count, x) do { \
78 mp_limb_t __clz_x = (x); \
79 unsigned __clz_c; \
80 for (__clz_c = 0; \
81 (__clz_x & ((mp_limb_t) 0xff << (GMP_LIMB_BITS - 8))) == 0; \
82 __clz_c += 8) \
83 __clz_x <<= 8; \
84 for (; (__clz_x & GMP_LIMB_HIGHBIT) == 0; __clz_c++) \
85 __clz_x <<= 1; \
86 (count) = __clz_c; \
87 } while (0)
88
89 #define gmp_ctz(count, x) do { \
90 mp_limb_t __ctz_x = (x); \
91 unsigned __ctz_c = 0; \
92 gmp_clz (__ctz_c, __ctz_x & - __ctz_x); \
93 (count) = GMP_LIMB_BITS - 1 - __ctz_c; \
94 } while (0)
95
96 #define gmp_add_ssaaaa(sh, sl, ah, al, bh, bl) \
97 do { \
98 mp_limb_t __x; \
99 __x = (al) + (bl); \
100 (sh) = (ah) + (bh) + (__x < (al)); \
101 (sl) = __x; \
102 } while (0)
103
104 #define gmp_sub_ddmmss(sh, sl, ah, al, bh, bl) \
105 do { \
106 mp_limb_t __x; \
107 __x = (al) - (bl); \
108 (sh) = (ah) - (bh) - ((al) < (bl)); \
109 (sl) = __x; \
110 } while (0)
111
112 #define gmp_umul_ppmm(w1, w0, u, v) \
113 do { \
114 mp_limb_t __x0, __x1, __x2, __x3; \
115 unsigned __ul, __vl, __uh, __vh; \
116 mp_limb_t __u = (u), __v = (v); \
117 \
118 __ul = __u & GMP_LLIMB_MASK; \
119 __uh = __u >> (GMP_LIMB_BITS / 2); \
120 __vl = __v & GMP_LLIMB_MASK; \
121 __vh = __v >> (GMP_LIMB_BITS / 2); \
122 \
123 __x0 = (mp_limb_t) __ul * __vl; \
124 __x1 = (mp_limb_t) __ul * __vh; \
125 __x2 = (mp_limb_t) __uh * __vl; \
126 __x3 = (mp_limb_t) __uh * __vh; \
127 \
128 __x1 += __x0 >> (GMP_LIMB_BITS / 2);/* this can't give carry */ \
129 __x1 += __x2; /* but this indeed can */ \
130 if (__x1 < __x2) /* did we get it? */ \
131 __x3 += GMP_HLIMB_BIT; /* yes, add it in the proper pos. */ \
132 \
133 (w1) = __x3 + (__x1 >> (GMP_LIMB_BITS / 2)); \
134 (w0) = (__x1 << (GMP_LIMB_BITS / 2)) + (__x0 & GMP_LLIMB_MASK); \
135 } while (0)
136
137 #define gmp_udiv_qrnnd_preinv(q, r, nh, nl, d, di) \
138 do { \
139 mp_limb_t _qh, _ql, _r, _mask; \
140 gmp_umul_ppmm (_qh, _ql, (nh), (di)); \
141 gmp_add_ssaaaa (_qh, _ql, _qh, _ql, (nh) + 1, (nl)); \
142 _r = (nl) - _qh * (d); \
143 _mask = -(mp_limb_t) (_r > _ql); /* both > and >= are OK */ \
144 _qh += _mask; \
145 _r += _mask & (d); \
146 if (_r >= (d)) \
147 { \
148 _r -= (d); \
149 _qh++; \
150 } \
151 \
152 (r) = _r; \
153 (q) = _qh; \
154 } while (0)
155
156 #define gmp_udiv_qr_3by2(q, r1, r0, n2, n1, n0, d1, d0, dinv) \
157 do { \
158 mp_limb_t _q0, _t1, _t0, _mask; \
159 gmp_umul_ppmm ((q), _q0, (n2), (dinv)); \
160 gmp_add_ssaaaa ((q), _q0, (q), _q0, (n2), (n1)); \
161 \
162 /* Compute the two most significant limbs of n - q'd */ \
163 (r1) = (n1) - (d1) * (q); \
164 gmp_sub_ddmmss ((r1), (r0), (r1), (n0), (d1), (d0)); \
165 gmp_umul_ppmm (_t1, _t0, (d0), (q)); \
166 gmp_sub_ddmmss ((r1), (r0), (r1), (r0), _t1, _t0); \
167 (q)++; \
168 \
169 /* Conditionally adjust q and the remainders */ \
170 _mask = - (mp_limb_t) ((r1) >= _q0); \
171 (q) += _mask; \
172 gmp_add_ssaaaa ((r1), (r0), (r1), (r0), _mask & (d1), _mask & (d0)); \
173 if ((r1) >= (d1)) \
174 { \
175 if ((r1) > (d1) || (r0) >= (d0)) \
176 { \
177 (q)++; \
178 gmp_sub_ddmmss ((r1), (r0), (r1), (r0), (d1), (d0)); \
179 } \
180 } \
181 } while (0)
182
183 /* Swap macros. */
184 #define MP_LIMB_T_SWAP(x, y) \
185 do { \
186 mp_limb_t __mp_limb_t_swap__tmp = (x); \
187 (x) = (y); \
188 (y) = __mp_limb_t_swap__tmp; \
189 } while (0)
190 #define MP_SIZE_T_SWAP(x, y) \
191 do { \
192 mp_size_t __mp_size_t_swap__tmp = (x); \
193 (x) = (y); \
194 (y) = __mp_size_t_swap__tmp; \
195 } while (0)
196 #define MP_BITCNT_T_SWAP(x,y) \
197 do { \
198 mp_bitcnt_t __mp_bitcnt_t_swap__tmp = (x); \
199 (x) = (y); \
200 (y) = __mp_bitcnt_t_swap__tmp; \
201 } while (0)
202 #define MP_PTR_SWAP(x, y) \
203 do { \
204 mp_ptr __mp_ptr_swap__tmp = (x); \
205 (x) = (y); \
206 (y) = __mp_ptr_swap__tmp; \
207 } while (0)
208 #define MP_SRCPTR_SWAP(x, y) \
209 do { \
210 mp_srcptr __mp_srcptr_swap__tmp = (x); \
211 (x) = (y); \
212 (y) = __mp_srcptr_swap__tmp; \
213 } while (0)
214
215 #define MPN_PTR_SWAP(xp,xs, yp,ys) \
216 do { \
217 MP_PTR_SWAP (xp, yp); \
218 MP_SIZE_T_SWAP (xs, ys); \
219 } while(0)
220 #define MPN_SRCPTR_SWAP(xp,xs, yp,ys) \
221 do { \
222 MP_SRCPTR_SWAP (xp, yp); \
223 MP_SIZE_T_SWAP (xs, ys); \
224 } while(0)
225
226 #define MPZ_PTR_SWAP(x, y) \
227 do { \
228 mpz_ptr __mpz_ptr_swap__tmp = (x); \
229 (x) = (y); \
230 (y) = __mpz_ptr_swap__tmp; \
231 } while (0)
232 #define MPZ_SRCPTR_SWAP(x, y) \
233 do { \
234 mpz_srcptr __mpz_srcptr_swap__tmp = (x); \
235 (x) = (y); \
236 (y) = __mpz_srcptr_swap__tmp; \
237 } while (0)
238
239 const int mp_bits_per_limb = GMP_LIMB_BITS;
240
241
242 /* Memory allocation and other helper functions. */
243 static void
gmp_die(const char * msg)244 gmp_die (const char *msg)
245 {
246 fprintf (stderr, "%s\n", msg);
247 abort();
248 }
249
250 static void *
gmp_default_alloc(size_t size)251 gmp_default_alloc (size_t size)
252 {
253 void *p;
254
255 assert (size > 0);
256
257 p = malloc (size);
258 if (!p)
259 gmp_die("gmp_default_alloc: Virtual memory exhausted.");
260
261 return p;
262 }
263
264 static void *
gmp_default_realloc(void * old,size_t old_size,size_t new_size)265 gmp_default_realloc (void *old, size_t old_size, size_t new_size)
266 {
267 mp_ptr p;
268
269 p = realloc (old, new_size);
270
271 if (!p)
272 gmp_die("gmp_default_realoc: Virtual memory exhausted.");
273
274 return p;
275 }
276
277 static void
gmp_default_free(void * p,size_t size)278 gmp_default_free (void *p, size_t size)
279 {
280 free (p);
281 }
282
283 static void * (*gmp_allocate_func) (size_t) = gmp_default_alloc;
284 static void * (*gmp_reallocate_func) (void *, size_t, size_t) = gmp_default_realloc;
285 static void (*gmp_free_func) (void *, size_t) = gmp_default_free;
286
287 void
mp_get_memory_functions(void * (** alloc_func)(size_t),void * (** realloc_func)(void *,size_t,size_t),void (** free_func)(void *,size_t))288 mp_get_memory_functions (void *(**alloc_func) (size_t),
289 void *(**realloc_func) (void *, size_t, size_t),
290 void (**free_func) (void *, size_t))
291 {
292 if (alloc_func)
293 *alloc_func = gmp_allocate_func;
294
295 if (realloc_func)
296 *realloc_func = gmp_reallocate_func;
297
298 if (free_func)
299 *free_func = gmp_free_func;
300 }
301
302 void
mp_set_memory_functions(void * (* alloc_func)(size_t),void * (* realloc_func)(void *,size_t,size_t),void (* free_func)(void *,size_t))303 mp_set_memory_functions (void *(*alloc_func) (size_t),
304 void *(*realloc_func) (void *, size_t, size_t),
305 void (*free_func) (void *, size_t))
306 {
307 if (!alloc_func)
308 alloc_func = gmp_default_alloc;
309 if (!realloc_func)
310 realloc_func = gmp_default_realloc;
311 if (!free_func)
312 free_func = gmp_default_free;
313
314 gmp_allocate_func = alloc_func;
315 gmp_reallocate_func = realloc_func;
316 gmp_free_func = free_func;
317 }
318
319 #define gmp_xalloc(size) ((*gmp_allocate_func)((size)))
320 #define gmp_free(p) ((*gmp_free_func) ((p), 0))
321
322 static mp_ptr
gmp_xalloc_limbs(mp_size_t size)323 gmp_xalloc_limbs (mp_size_t size)
324 {
325 return gmp_xalloc (size * sizeof (mp_limb_t));
326 }
327
328 static mp_ptr
gmp_xrealloc_limbs(mp_ptr old,mp_size_t size)329 gmp_xrealloc_limbs (mp_ptr old, mp_size_t size)
330 {
331 assert (size > 0);
332 return (*gmp_reallocate_func) (old, 0, size * sizeof (mp_limb_t));
333 }
334
335
336 /* MPN interface */
337
338 void
mpn_copyi(mp_ptr d,mp_srcptr s,mp_size_t n)339 mpn_copyi (mp_ptr d, mp_srcptr s, mp_size_t n)
340 {
341 mp_size_t i;
342 for (i = 0; i < n; i++)
343 d[i] = s[i];
344 }
345
346 void
mpn_copyd(mp_ptr d,mp_srcptr s,mp_size_t n)347 mpn_copyd (mp_ptr d, mp_srcptr s, mp_size_t n)
348 {
349 while (n-- > 0)
350 d[n] = s[n];
351 }
352
353 int
mpn_cmp(mp_srcptr ap,mp_srcptr bp,mp_size_t n)354 mpn_cmp (mp_srcptr ap, mp_srcptr bp, mp_size_t n)
355 {
356 while (--n >= 0)
357 {
358 if (ap[n] != bp[n])
359 return ap[n] > bp[n] ? 1 : -1;
360 }
361 return 0;
362 }
363
364 static int
mpn_cmp4(mp_srcptr ap,mp_size_t an,mp_srcptr bp,mp_size_t bn)365 mpn_cmp4 (mp_srcptr ap, mp_size_t an, mp_srcptr bp, mp_size_t bn)
366 {
367 if (an != bn)
368 return an < bn ? -1 : 1;
369 else
370 return mpn_cmp (ap, bp, an);
371 }
372
373 static mp_size_t
mpn_normalized_size(mp_srcptr xp,mp_size_t n)374 mpn_normalized_size (mp_srcptr xp, mp_size_t n)
375 {
376 for (; n > 0 && xp[n-1] == 0; n--)
377 ;
378 return n;
379 }
380
381 #define mpn_zero_p(xp, n) (mpn_normalized_size ((xp), (n)) == 0)
382
383 void
mpn_zero(mp_ptr rp,mp_size_t n)384 mpn_zero (mp_ptr rp, mp_size_t n)
385 {
386 mp_size_t i;
387
388 for (i = 0; i < n; i++)
389 rp[i] = 0;
390 }
391
392 mp_limb_t
mpn_add_1(mp_ptr rp,mp_srcptr ap,mp_size_t n,mp_limb_t b)393 mpn_add_1 (mp_ptr rp, mp_srcptr ap, mp_size_t n, mp_limb_t b)
394 {
395 mp_size_t i;
396
397 assert (n > 0);
398 i = 0;
399 do
400 {
401 mp_limb_t r = ap[i] + b;
402 /* Carry out */
403 b = (r < b);
404 rp[i] = r;
405 }
406 while (++i < n);
407
408 return b;
409 }
410
411 mp_limb_t
mpn_add_n(mp_ptr rp,mp_srcptr ap,mp_srcptr bp,mp_size_t n)412 mpn_add_n (mp_ptr rp, mp_srcptr ap, mp_srcptr bp, mp_size_t n)
413 {
414 mp_size_t i;
415 mp_limb_t cy;
416
417 for (i = 0, cy = 0; i < n; i++)
418 {
419 mp_limb_t a, b, r;
420 a = ap[i]; b = bp[i];
421 r = a + cy;
422 cy = (r < cy);
423 r += b;
424 cy += (r < b);
425 rp[i] = r;
426 }
427 return cy;
428 }
429
430 mp_limb_t
mpn_add(mp_ptr rp,mp_srcptr ap,mp_size_t an,mp_srcptr bp,mp_size_t bn)431 mpn_add (mp_ptr rp, mp_srcptr ap, mp_size_t an, mp_srcptr bp, mp_size_t bn)
432 {
433 mp_limb_t cy;
434
435 assert (an >= bn);
436
437 cy = mpn_add_n (rp, ap, bp, bn);
438 if (an > bn)
439 cy = mpn_add_1 (rp + bn, ap + bn, an - bn, cy);
440 return cy;
441 }
442
443 mp_limb_t
mpn_sub_1(mp_ptr rp,mp_srcptr ap,mp_size_t n,mp_limb_t b)444 mpn_sub_1 (mp_ptr rp, mp_srcptr ap, mp_size_t n, mp_limb_t b)
445 {
446 mp_size_t i;
447
448 assert (n > 0);
449
450 i = 0;
451 do
452 {
453 mp_limb_t a = ap[i];
454 /* Carry out */
455 mp_limb_t cy = a < b;;
456 rp[i] = a - b;
457 b = cy;
458 }
459 while (++i < n);
460
461 return b;
462 }
463
464 mp_limb_t
mpn_sub_n(mp_ptr rp,mp_srcptr ap,mp_srcptr bp,mp_size_t n)465 mpn_sub_n (mp_ptr rp, mp_srcptr ap, mp_srcptr bp, mp_size_t n)
466 {
467 mp_size_t i;
468 mp_limb_t cy;
469
470 for (i = 0, cy = 0; i < n; i++)
471 {
472 mp_limb_t a, b;
473 a = ap[i]; b = bp[i];
474 b += cy;
475 cy = (b < cy);
476 cy += (a < b);
477 rp[i] = a - b;
478 }
479 return cy;
480 }
481
482 mp_limb_t
mpn_sub(mp_ptr rp,mp_srcptr ap,mp_size_t an,mp_srcptr bp,mp_size_t bn)483 mpn_sub (mp_ptr rp, mp_srcptr ap, mp_size_t an, mp_srcptr bp, mp_size_t bn)
484 {
485 mp_limb_t cy;
486
487 assert (an >= bn);
488
489 cy = mpn_sub_n (rp, ap, bp, bn);
490 if (an > bn)
491 cy = mpn_sub_1 (rp + bn, ap + bn, an - bn, cy);
492 return cy;
493 }
494
495 mp_limb_t
mpn_mul_1(mp_ptr rp,mp_srcptr up,mp_size_t n,mp_limb_t vl)496 mpn_mul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t vl)
497 {
498 mp_limb_t ul, cl, hpl, lpl;
499
500 assert (n >= 1);
501
502 cl = 0;
503 do
504 {
505 ul = *up++;
506 gmp_umul_ppmm (hpl, lpl, ul, vl);
507
508 lpl += cl;
509 cl = (lpl < cl) + hpl;
510
511 *rp++ = lpl;
512 }
513 while (--n != 0);
514
515 return cl;
516 }
517
518 mp_limb_t
mpn_addmul_1(mp_ptr rp,mp_srcptr up,mp_size_t n,mp_limb_t vl)519 mpn_addmul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t vl)
520 {
521 mp_limb_t ul, cl, hpl, lpl, rl;
522
523 assert (n >= 1);
524
525 cl = 0;
526 do
527 {
528 ul = *up++;
529 gmp_umul_ppmm (hpl, lpl, ul, vl);
530
531 lpl += cl;
532 cl = (lpl < cl) + hpl;
533
534 rl = *rp;
535 lpl = rl + lpl;
536 cl += lpl < rl;
537 *rp++ = lpl;
538 }
539 while (--n != 0);
540
541 return cl;
542 }
543
544 mp_limb_t
mpn_submul_1(mp_ptr rp,mp_srcptr up,mp_size_t n,mp_limb_t vl)545 mpn_submul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t vl)
546 {
547 mp_limb_t ul, cl, hpl, lpl, rl;
548
549 assert (n >= 1);
550
551 cl = 0;
552 do
553 {
554 ul = *up++;
555 gmp_umul_ppmm (hpl, lpl, ul, vl);
556
557 lpl += cl;
558 cl = (lpl < cl) + hpl;
559
560 rl = *rp;
561 lpl = rl - lpl;
562 cl += lpl > rl;
563 *rp++ = lpl;
564 }
565 while (--n != 0);
566
567 return cl;
568 }
569
570 mp_limb_t
mpn_mul(mp_ptr rp,mp_srcptr up,mp_size_t un,mp_srcptr vp,mp_size_t vn)571 mpn_mul (mp_ptr rp, mp_srcptr up, mp_size_t un, mp_srcptr vp, mp_size_t vn)
572 {
573 assert (un >= vn);
574 assert (vn >= 1);
575
576 /* We first multiply by the low order limb. This result can be
577 stored, not added, to rp. We also avoid a loop for zeroing this
578 way. */
579
580 rp[un] = mpn_mul_1 (rp, up, un, vp[0]);
581 rp += 1, vp += 1, vn -= 1;
582
583 /* Now accumulate the product of up[] and the next higher limb from
584 vp[]. */
585
586 while (vn >= 1)
587 {
588 rp[un] = mpn_addmul_1 (rp, up, un, vp[0]);
589 rp += 1, vp += 1, vn -= 1;
590 }
591 return rp[un - 1];
592 }
593
594 void
mpn_mul_n(mp_ptr rp,mp_srcptr ap,mp_srcptr bp,mp_size_t n)595 mpn_mul_n (mp_ptr rp, mp_srcptr ap, mp_srcptr bp, mp_size_t n)
596 {
597 mpn_mul (rp, ap, n, bp, n);
598 }
599
600 void
mpn_sqr(mp_ptr rp,mp_srcptr ap,mp_size_t n)601 mpn_sqr (mp_ptr rp, mp_srcptr ap, mp_size_t n)
602 {
603 mpn_mul (rp, ap, n, ap, n);
604 }
605
606 mp_limb_t
mpn_lshift(mp_ptr rp,mp_srcptr up,mp_size_t n,unsigned int cnt)607 mpn_lshift (mp_ptr rp, mp_srcptr up, mp_size_t n, unsigned int cnt)
608 {
609 mp_limb_t high_limb, low_limb;
610 unsigned int tnc;
611 mp_size_t i;
612 mp_limb_t retval;
613
614 assert (n >= 1);
615 assert (cnt >= 1);
616 assert (cnt < GMP_LIMB_BITS);
617
618 up += n;
619 rp += n;
620
621 tnc = GMP_LIMB_BITS - cnt;
622 low_limb = *--up;
623 retval = low_limb >> tnc;
624 high_limb = (low_limb << cnt);
625
626 for (i = n; --i != 0;)
627 {
628 low_limb = *--up;
629 *--rp = high_limb | (low_limb >> tnc);
630 high_limb = (low_limb << cnt);
631 }
632 *--rp = high_limb;
633
634 return retval;
635 }
636
637 mp_limb_t
mpn_rshift(mp_ptr rp,mp_srcptr up,mp_size_t n,unsigned int cnt)638 mpn_rshift (mp_ptr rp, mp_srcptr up, mp_size_t n, unsigned int cnt)
639 {
640 mp_limb_t high_limb, low_limb;
641 unsigned int tnc;
642 mp_size_t i;
643 mp_limb_t retval;
644
645 assert (n >= 1);
646 assert (cnt >= 1);
647 assert (cnt < GMP_LIMB_BITS);
648
649 tnc = GMP_LIMB_BITS - cnt;
650 high_limb = *up++;
651 retval = (high_limb << tnc);
652 low_limb = high_limb >> cnt;
653
654 for (i = n; --i != 0;)
655 {
656 high_limb = *up++;
657 *rp++ = low_limb | (high_limb << tnc);
658 low_limb = high_limb >> cnt;
659 }
660 *rp = low_limb;
661
662 return retval;
663 }
664
665 static mp_bitcnt_t
mpn_common_scan(mp_limb_t limb,mp_size_t i,mp_srcptr up,mp_size_t un,mp_limb_t ux)666 mpn_common_scan (mp_limb_t limb, mp_size_t i, mp_srcptr up, mp_size_t un,
667 mp_limb_t ux)
668 {
669 unsigned cnt;
670
671 assert (ux == 0 || ux == GMP_LIMB_MAX);
672 assert (0 <= i && i <= un );
673
674 while (limb == 0)
675 {
676 i++;
677 if (i == un)
678 return (ux == 0 ? ~(mp_bitcnt_t) 0 : un * GMP_LIMB_BITS);
679 limb = ux ^ up[i];
680 }
681 gmp_ctz (cnt, limb);
682 return (mp_bitcnt_t) i * GMP_LIMB_BITS + cnt;
683 }
684
685 mp_bitcnt_t
mpn_scan1(mp_srcptr ptr,mp_bitcnt_t bit)686 mpn_scan1 (mp_srcptr ptr, mp_bitcnt_t bit)
687 {
688 mp_size_t i;
689 i = bit / GMP_LIMB_BITS;
690
691 return mpn_common_scan ( ptr[i] & (GMP_LIMB_MAX << (bit % GMP_LIMB_BITS)),
692 i, ptr, i, 0);
693 }
694
695 mp_bitcnt_t
mpn_scan0(mp_srcptr ptr,mp_bitcnt_t bit)696 mpn_scan0 (mp_srcptr ptr, mp_bitcnt_t bit)
697 {
698 mp_size_t i;
699 i = bit / GMP_LIMB_BITS;
700
701 return mpn_common_scan (~ptr[i] & (GMP_LIMB_MAX << (bit % GMP_LIMB_BITS)),
702 i, ptr, i, GMP_LIMB_MAX);
703 }
704
705
706 /* MPN division interface. */
707 mp_limb_t
mpn_invert_3by2(mp_limb_t u1,mp_limb_t u0)708 mpn_invert_3by2 (mp_limb_t u1, mp_limb_t u0)
709 {
710 mp_limb_t r, p, m;
711 unsigned ul, uh;
712 unsigned ql, qh;
713
714 /* First, do a 2/1 inverse. */
715 /* The inverse m is defined as floor( (B^2 - 1 - u1)/u1 ), so that 0 <
716 * B^2 - (B + m) u1 <= u1 */
717 assert (u1 >= GMP_LIMB_HIGHBIT);
718
719 ul = u1 & GMP_LLIMB_MASK;
720 uh = u1 >> (GMP_LIMB_BITS / 2);
721
722 qh = ~u1 / uh;
723 r = ((~u1 - (mp_limb_t) qh * uh) << (GMP_LIMB_BITS / 2)) | GMP_LLIMB_MASK;
724
725 p = (mp_limb_t) qh * ul;
726 /* Adjustment steps taken from udiv_qrnnd_c */
727 if (r < p)
728 {
729 qh--;
730 r += u1;
731 if (r >= u1) /* i.e. we didn't get carry when adding to r */
732 if (r < p)
733 {
734 qh--;
735 r += u1;
736 }
737 }
738 r -= p;
739
740 /* Do a 3/2 division (with half limb size) */
741 p = (r >> (GMP_LIMB_BITS / 2)) * qh + r;
742 ql = (p >> (GMP_LIMB_BITS / 2)) + 1;
743
744 /* By the 3/2 method, we don't need the high half limb. */
745 r = (r << (GMP_LIMB_BITS / 2)) + GMP_LLIMB_MASK - ql * u1;
746
747 if (r >= (p << (GMP_LIMB_BITS / 2)))
748 {
749 ql--;
750 r += u1;
751 }
752 m = ((mp_limb_t) qh << (GMP_LIMB_BITS / 2)) + ql;
753 if (r >= u1)
754 {
755 m++;
756 r -= u1;
757 }
758
759 if (u0 > 0)
760 {
761 mp_limb_t th, tl;
762 r = ~r;
763 r += u0;
764 if (r < u0)
765 {
766 m--;
767 if (r >= u1)
768 {
769 m--;
770 r -= u1;
771 }
772 r -= u1;
773 }
774 gmp_umul_ppmm (th, tl, u0, m);
775 r += th;
776 if (r < th)
777 {
778 m--;
779 m -= ((r > u1) | ((r == u1) & (tl > u0)));
780 }
781 }
782
783 return m;
784 }
785
786 struct gmp_div_inverse
787 {
788 /* Normalization shift count. */
789 unsigned shift;
790 /* Normalized divisor (d0 unused for mpn_div_qr_1) */
791 mp_limb_t d1, d0;
792 /* Inverse, for 2/1 or 3/2. */
793 mp_limb_t di;
794 };
795
796 static void
mpn_div_qr_1_invert(struct gmp_div_inverse * inv,mp_limb_t d)797 mpn_div_qr_1_invert (struct gmp_div_inverse *inv, mp_limb_t d)
798 {
799 unsigned shift;
800
801 assert (d > 0);
802 gmp_clz (shift, d);
803 inv->shift = shift;
804 inv->d1 = d << shift;
805 inv->di = mpn_invert_limb (inv->d1);
806 }
807
808 static void
mpn_div_qr_2_invert(struct gmp_div_inverse * inv,mp_limb_t d1,mp_limb_t d0)809 mpn_div_qr_2_invert (struct gmp_div_inverse *inv,
810 mp_limb_t d1, mp_limb_t d0)
811 {
812 unsigned shift;
813
814 assert (d1 > 0);
815 gmp_clz (shift, d1);
816 inv->shift = shift;
817 if (shift > 0)
818 {
819 d1 = (d1 << shift) | (d0 >> (GMP_LIMB_BITS - shift));
820 d0 <<= shift;
821 }
822 inv->d1 = d1;
823 inv->d0 = d0;
824 inv->di = mpn_invert_3by2 (d1, d0);
825 }
826
827 static void
mpn_div_qr_invert(struct gmp_div_inverse * inv,mp_srcptr dp,mp_size_t dn)828 mpn_div_qr_invert (struct gmp_div_inverse *inv,
829 mp_srcptr dp, mp_size_t dn)
830 {
831 assert (dn > 0);
832
833 if (dn == 1)
834 mpn_div_qr_1_invert (inv, dp[0]);
835 else if (dn == 2)
836 mpn_div_qr_2_invert (inv, dp[1], dp[0]);
837 else
838 {
839 unsigned shift;
840 mp_limb_t d1, d0;
841
842 d1 = dp[dn-1];
843 d0 = dp[dn-2];
844 assert (d1 > 0);
845 gmp_clz (shift, d1);
846 inv->shift = shift;
847 if (shift > 0)
848 {
849 d1 = (d1 << shift) | (d0 >> (GMP_LIMB_BITS - shift));
850 d0 = (d0 << shift) | (dp[dn-3] >> (GMP_LIMB_BITS - shift));
851 }
852 inv->d1 = d1;
853 inv->d0 = d0;
854 inv->di = mpn_invert_3by2 (d1, d0);
855 }
856 }
857
858 /* Not matching current public gmp interface, rather corresponding to
859 the sbpi1_div_* functions. */
860 static mp_limb_t
mpn_div_qr_1_preinv(mp_ptr qp,mp_srcptr np,mp_size_t nn,const struct gmp_div_inverse * inv)861 mpn_div_qr_1_preinv (mp_ptr qp, mp_srcptr np, mp_size_t nn,
862 const struct gmp_div_inverse *inv)
863 {
864 mp_limb_t d, di;
865 mp_limb_t r;
866 mp_ptr tp = NULL;
867
868 if (inv->shift > 0)
869 {
870 tp = gmp_xalloc_limbs (nn);
871 r = mpn_lshift (tp, np, nn, inv->shift);
872 np = tp;
873 }
874 else
875 r = 0;
876
877 d = inv->d1;
878 di = inv->di;
879 while (nn-- > 0)
880 {
881 mp_limb_t q;
882
883 gmp_udiv_qrnnd_preinv (q, r, r, np[nn], d, di);
884 if (qp)
885 qp[nn] = q;
886 }
887 if (inv->shift > 0)
888 gmp_free (tp);
889
890 return r >> inv->shift;
891 }
892
893 static mp_limb_t
mpn_div_qr_1(mp_ptr qp,mp_srcptr np,mp_size_t nn,mp_limb_t d)894 mpn_div_qr_1 (mp_ptr qp, mp_srcptr np, mp_size_t nn, mp_limb_t d)
895 {
896 assert (d > 0);
897
898 /* Special case for powers of two. */
899 if ((d & (d-1)) == 0)
900 {
901 mp_limb_t r = np[0] & (d-1);
902 if (qp)
903 {
904 if (d <= 1)
905 mpn_copyi (qp, np, nn);
906 else
907 {
908 unsigned shift;
909 gmp_ctz (shift, d);
910 mpn_rshift (qp, np, nn, shift);
911 }
912 }
913 return r;
914 }
915 else
916 {
917 struct gmp_div_inverse inv;
918 mpn_div_qr_1_invert (&inv, d);
919 return mpn_div_qr_1_preinv (qp, np, nn, &inv);
920 }
921 }
922
923 static void
mpn_div_qr_2_preinv(mp_ptr qp,mp_ptr rp,mp_srcptr np,mp_size_t nn,const struct gmp_div_inverse * inv)924 mpn_div_qr_2_preinv (mp_ptr qp, mp_ptr rp, mp_srcptr np, mp_size_t nn,
925 const struct gmp_div_inverse *inv)
926 {
927 unsigned shift;
928 mp_size_t i;
929 mp_limb_t d1, d0, di, r1, r0;
930 mp_ptr tp;
931
932 assert (nn >= 2);
933 shift = inv->shift;
934 d1 = inv->d1;
935 d0 = inv->d0;
936 di = inv->di;
937
938 if (shift > 0)
939 {
940 tp = gmp_xalloc_limbs (nn);
941 r1 = mpn_lshift (tp, np, nn, shift);
942 np = tp;
943 }
944 else
945 r1 = 0;
946
947 r0 = np[nn - 1];
948
949 i = nn - 2;
950 do
951 {
952 mp_limb_t n0, q;
953 n0 = np[i];
954 gmp_udiv_qr_3by2 (q, r1, r0, r1, r0, n0, d1, d0, di);
955
956 if (qp)
957 qp[i] = q;
958 }
959 while (--i >= 0);
960
961 if (shift > 0)
962 {
963 assert ((r0 << (GMP_LIMB_BITS - shift)) == 0);
964 r0 = (r0 >> shift) | (r1 << (GMP_LIMB_BITS - shift));
965 r1 >>= shift;
966
967 gmp_free (tp);
968 }
969
970 rp[1] = r1;
971 rp[0] = r0;
972 }
973
974 #if 0
975 static void
976 mpn_div_qr_2 (mp_ptr qp, mp_ptr rp, mp_srcptr np, mp_size_t nn,
977 mp_limb_t d1, mp_limb_t d0)
978 {
979 struct gmp_div_inverse inv;
980 assert (nn >= 2);
981
982 mpn_div_qr_2_invert (&inv, d1, d0);
983 mpn_div_qr_2_preinv (qp, rp, np, nn, &inv);
984 }
985 #endif
986
987 static void
mpn_div_qr_pi1(mp_ptr qp,mp_ptr np,mp_size_t nn,mp_limb_t n1,mp_srcptr dp,mp_size_t dn,mp_limb_t dinv)988 mpn_div_qr_pi1 (mp_ptr qp,
989 mp_ptr np, mp_size_t nn, mp_limb_t n1,
990 mp_srcptr dp, mp_size_t dn,
991 mp_limb_t dinv)
992 {
993 mp_size_t i;
994
995 mp_limb_t d1, d0;
996 mp_limb_t cy, cy1;
997 mp_limb_t q;
998
999 assert (dn > 2);
1000 assert (nn >= dn);
1001
1002 d1 = dp[dn - 1];
1003 d0 = dp[dn - 2];
1004
1005 assert ((d1 & GMP_LIMB_HIGHBIT) != 0);
1006 /* Iteration variable is the index of the q limb.
1007 *
1008 * We divide <n1, np[dn-1+i], np[dn-2+i], np[dn-3+i],..., np[i]>
1009 * by <d1, d0, dp[dn-3], ..., dp[0] >
1010 */
1011
1012 i = nn - dn;
1013 do
1014 {
1015 mp_limb_t n0 = np[dn-1+i];
1016
1017 if (n1 == d1 && n0 == d0)
1018 {
1019 q = GMP_LIMB_MAX;
1020 mpn_submul_1 (np+i, dp, dn, q);
1021 n1 = np[dn-1+i]; /* update n1, last loop's value will now be invalid */
1022 }
1023 else
1024 {
1025 gmp_udiv_qr_3by2 (q, n1, n0, n1, n0, np[dn-2+i], d1, d0, dinv);
1026
1027 cy = mpn_submul_1 (np + i, dp, dn-2, q);
1028
1029 cy1 = n0 < cy;
1030 n0 = n0 - cy;
1031 cy = n1 < cy1;
1032 n1 = n1 - cy1;
1033 np[dn-2+i] = n0;
1034
1035 if (cy != 0)
1036 {
1037 n1 += d1 + mpn_add_n (np + i, np + i, dp, dn - 1);
1038 q--;
1039 }
1040 }
1041
1042 if (qp)
1043 qp[i] = q;
1044 }
1045 while (--i >= 0);
1046
1047 np[dn - 1] = n1;
1048 }
1049
1050 static void
mpn_div_qr_preinv(mp_ptr qp,mp_ptr np,mp_size_t nn,mp_srcptr dp,mp_size_t dn,const struct gmp_div_inverse * inv)1051 mpn_div_qr_preinv (mp_ptr qp, mp_ptr np, mp_size_t nn,
1052 mp_srcptr dp, mp_size_t dn,
1053 const struct gmp_div_inverse *inv)
1054 {
1055 assert (dn > 0);
1056 assert (nn >= dn);
1057
1058 if (dn == 1)
1059 np[0] = mpn_div_qr_1_preinv (qp, np, nn, inv);
1060 else if (dn == 2)
1061 mpn_div_qr_2_preinv (qp, np, np, nn, inv);
1062 else
1063 {
1064 mp_limb_t nh;
1065 unsigned shift;
1066
1067 assert (inv->d1 == dp[dn-1]);
1068 assert (inv->d0 == dp[dn-2]);
1069 assert ((inv->d1 & GMP_LIMB_HIGHBIT) != 0);
1070
1071 shift = inv->shift;
1072 if (shift > 0)
1073 nh = mpn_lshift (np, np, nn, shift);
1074 else
1075 nh = 0;
1076
1077 mpn_div_qr_pi1 (qp, np, nn, nh, dp, dn, inv->di);
1078
1079 if (shift > 0)
1080 gmp_assert_nocarry (mpn_rshift (np, np, dn, shift));
1081 }
1082 }
1083
1084 static void
mpn_div_qr(mp_ptr qp,mp_ptr np,mp_size_t nn,mp_srcptr dp,mp_size_t dn)1085 mpn_div_qr (mp_ptr qp, mp_ptr np, mp_size_t nn, mp_srcptr dp, mp_size_t dn)
1086 {
1087 struct gmp_div_inverse inv;
1088 mp_ptr tp = NULL;
1089
1090 assert (dn > 0);
1091 assert (nn >= dn);
1092
1093 mpn_div_qr_invert (&inv, dp, dn);
1094 if (dn > 2 && inv.shift > 0)
1095 {
1096 tp = gmp_xalloc_limbs (dn);
1097 gmp_assert_nocarry (mpn_lshift (tp, dp, dn, inv.shift));
1098 dp = tp;
1099 }
1100 mpn_div_qr_preinv (qp, np, nn, dp, dn, &inv);
1101 if (tp)
1102 gmp_free (tp);
1103 }
1104
1105
1106 /* MPN base conversion. */
1107 static unsigned
mpn_base_power_of_two_p(unsigned b)1108 mpn_base_power_of_two_p (unsigned b)
1109 {
1110 switch (b)
1111 {
1112 case 2: return 1;
1113 case 4: return 2;
1114 case 8: return 3;
1115 case 16: return 4;
1116 case 32: return 5;
1117 case 64: return 6;
1118 case 128: return 7;
1119 case 256: return 8;
1120 default: return 0;
1121 }
1122 }
1123
1124 struct mpn_base_info
1125 {
1126 /* bb is the largest power of the base which fits in one limb, and
1127 exp is the corresponding exponent. */
1128 unsigned exp;
1129 mp_limb_t bb;
1130 };
1131
1132 static void
mpn_get_base_info(struct mpn_base_info * info,mp_limb_t b)1133 mpn_get_base_info (struct mpn_base_info *info, mp_limb_t b)
1134 {
1135 mp_limb_t m;
1136 mp_limb_t p;
1137 unsigned exp;
1138
1139 m = GMP_LIMB_MAX / b;
1140 for (exp = 1, p = b; p <= m; exp++)
1141 p *= b;
1142
1143 info->exp = exp;
1144 info->bb = p;
1145 }
1146
1147 static mp_bitcnt_t
mpn_limb_size_in_base_2(mp_limb_t u)1148 mpn_limb_size_in_base_2 (mp_limb_t u)
1149 {
1150 unsigned shift;
1151
1152 assert (u > 0);
1153 gmp_clz (shift, u);
1154 return GMP_LIMB_BITS - shift;
1155 }
1156
1157 static size_t
mpn_get_str_bits(unsigned char * sp,unsigned bits,mp_srcptr up,mp_size_t un)1158 mpn_get_str_bits (unsigned char *sp, unsigned bits, mp_srcptr up, mp_size_t un)
1159 {
1160 unsigned char mask;
1161 size_t sn, j;
1162 mp_size_t i;
1163 int shift;
1164
1165 sn = ((un - 1) * GMP_LIMB_BITS + mpn_limb_size_in_base_2 (up[un-1])
1166 + bits - 1) / bits;
1167
1168 mask = (1U << bits) - 1;
1169
1170 for (i = 0, j = sn, shift = 0; j-- > 0;)
1171 {
1172 unsigned char digit = up[i] >> shift;
1173
1174 shift += bits;
1175
1176 if (shift >= GMP_LIMB_BITS && ++i < un)
1177 {
1178 shift -= GMP_LIMB_BITS;
1179 digit |= up[i] << (bits - shift);
1180 }
1181 sp[j] = digit & mask;
1182 }
1183 return sn;
1184 }
1185
1186 /* We generate digits from the least significant end, and reverse at
1187 the end. */
1188 static size_t
mpn_limb_get_str(unsigned char * sp,mp_limb_t w,const struct gmp_div_inverse * binv)1189 mpn_limb_get_str (unsigned char *sp, mp_limb_t w,
1190 const struct gmp_div_inverse *binv)
1191 {
1192 mp_size_t i;
1193 for (i = 0; w > 0; i++)
1194 {
1195 mp_limb_t h, l, r;
1196
1197 h = w >> (GMP_LIMB_BITS - binv->shift);
1198 l = w << binv->shift;
1199
1200 gmp_udiv_qrnnd_preinv (w, r, h, l, binv->d1, binv->di);
1201 assert ( (r << (GMP_LIMB_BITS - binv->shift)) == 0);
1202 r >>= binv->shift;
1203
1204 sp[i] = r;
1205 }
1206 return i;
1207 }
1208
1209 static size_t
mpn_get_str_other(unsigned char * sp,int base,const struct mpn_base_info * info,mp_ptr up,mp_size_t un)1210 mpn_get_str_other (unsigned char *sp,
1211 int base, const struct mpn_base_info *info,
1212 mp_ptr up, mp_size_t un)
1213 {
1214 struct gmp_div_inverse binv;
1215 size_t sn;
1216 size_t i;
1217
1218 mpn_div_qr_1_invert (&binv, base);
1219
1220 sn = 0;
1221
1222 if (un > 1)
1223 {
1224 struct gmp_div_inverse bbinv;
1225 mpn_div_qr_1_invert (&bbinv, info->bb);
1226
1227 do
1228 {
1229 mp_limb_t w;
1230 size_t done;
1231 w = mpn_div_qr_1_preinv (up, up, un, &bbinv);
1232 un -= (up[un-1] == 0);
1233 done = mpn_limb_get_str (sp + sn, w, &binv);
1234
1235 for (sn += done; done < info->exp; done++)
1236 sp[sn++] = 0;
1237 }
1238 while (un > 1);
1239 }
1240 sn += mpn_limb_get_str (sp + sn, up[0], &binv);
1241
1242 /* Reverse order */
1243 for (i = 0; 2*i + 1 < sn; i++)
1244 {
1245 unsigned char t = sp[i];
1246 sp[i] = sp[sn - i - 1];
1247 sp[sn - i - 1] = t;
1248 }
1249
1250 return sn;
1251 }
1252
1253 size_t
mpn_get_str(unsigned char * sp,int base,mp_ptr up,mp_size_t un)1254 mpn_get_str (unsigned char *sp, int base, mp_ptr up, mp_size_t un)
1255 {
1256 unsigned bits;
1257
1258 assert (un > 0);
1259 assert (up[un-1] > 0);
1260
1261 bits = mpn_base_power_of_two_p (base);
1262 if (bits)
1263 return mpn_get_str_bits (sp, bits, up, un);
1264 else
1265 {
1266 struct mpn_base_info info;
1267
1268 mpn_get_base_info (&info, base);
1269 return mpn_get_str_other (sp, base, &info, up, un);
1270 }
1271 }
1272
1273 static mp_size_t
mpn_set_str_bits(mp_ptr rp,const unsigned char * sp,size_t sn,unsigned bits)1274 mpn_set_str_bits (mp_ptr rp, const unsigned char *sp, size_t sn,
1275 unsigned bits)
1276 {
1277 mp_size_t rn;
1278 size_t j;
1279 unsigned shift;
1280
1281 for (j = sn, rn = 0, shift = 0; j-- > 0; )
1282 {
1283 if (shift == 0)
1284 {
1285 rp[rn++] = sp[j];
1286 shift += bits;
1287 }
1288 else
1289 {
1290 rp[rn-1] |= (mp_limb_t) sp[j] << shift;
1291 shift += bits;
1292 if (shift >= GMP_LIMB_BITS)
1293 {
1294 shift -= GMP_LIMB_BITS;
1295 if (shift > 0)
1296 rp[rn++] = (mp_limb_t) sp[j] >> (bits - shift);
1297 }
1298 }
1299 }
1300 rn = mpn_normalized_size (rp, rn);
1301 return rn;
1302 }
1303
1304 static mp_size_t
mpn_set_str_other(mp_ptr rp,const unsigned char * sp,size_t sn,mp_limb_t b,const struct mpn_base_info * info)1305 mpn_set_str_other (mp_ptr rp, const unsigned char *sp, size_t sn,
1306 mp_limb_t b, const struct mpn_base_info *info)
1307 {
1308 mp_size_t rn;
1309 mp_limb_t w;
1310 unsigned k;
1311 size_t j;
1312
1313 k = 1 + (sn - 1) % info->exp;
1314
1315 j = 0;
1316 w = sp[j++];
1317 for (; --k > 0; )
1318 w = w * b + sp[j++];
1319
1320 rp[0] = w;
1321
1322 for (rn = (w > 0); j < sn;)
1323 {
1324 mp_limb_t cy;
1325
1326 w = sp[j++];
1327 for (k = 1; k < info->exp; k++)
1328 w = w * b + sp[j++];
1329
1330 cy = mpn_mul_1 (rp, rp, rn, info->bb);
1331 cy += mpn_add_1 (rp, rp, rn, w);
1332 if (cy > 0)
1333 rp[rn++] = cy;
1334 }
1335 assert (j == sn);
1336
1337 return rn;
1338 }
1339
1340 mp_size_t
mpn_set_str(mp_ptr rp,const unsigned char * sp,size_t sn,int base)1341 mpn_set_str (mp_ptr rp, const unsigned char *sp, size_t sn, int base)
1342 {
1343 unsigned bits;
1344
1345 if (sn == 0)
1346 return 0;
1347
1348 bits = mpn_base_power_of_two_p (base);
1349 if (bits)
1350 return mpn_set_str_bits (rp, sp, sn, bits);
1351 else
1352 {
1353 struct mpn_base_info info;
1354
1355 mpn_get_base_info (&info, base);
1356 return mpn_set_str_other (rp, sp, sn, base, &info);
1357 }
1358 }
1359
1360
1361 /* MPZ interface */
1362 void
mpz_init(mpz_t r)1363 mpz_init (mpz_t r)
1364 {
1365 r->_mp_alloc = 1;
1366 r->_mp_size = 0;
1367 r->_mp_d = gmp_xalloc_limbs (1);
1368 }
1369
1370 /* The utility of this function is a bit limited, since many functions
1371 assigns the result variable using mpz_swap. */
1372 void
mpz_init2(mpz_t r,mp_bitcnt_t bits)1373 mpz_init2 (mpz_t r, mp_bitcnt_t bits)
1374 {
1375 mp_size_t rn;
1376
1377 bits -= (bits != 0); /* Round down, except if 0 */
1378 rn = 1 + bits / GMP_LIMB_BITS;
1379
1380 r->_mp_alloc = rn;
1381 r->_mp_size = 0;
1382 r->_mp_d = gmp_xalloc_limbs (rn);
1383 }
1384
1385 void
mpz_clear(mpz_t r)1386 mpz_clear (mpz_t r)
1387 {
1388 gmp_free (r->_mp_d);
1389 }
1390
1391 static void *
mpz_realloc(mpz_t r,mp_size_t size)1392 mpz_realloc (mpz_t r, mp_size_t size)
1393 {
1394 size = GMP_MAX (size, 1);
1395
1396 r->_mp_d = gmp_xrealloc_limbs (r->_mp_d, size);
1397 r->_mp_alloc = size;
1398
1399 if (GMP_ABS (r->_mp_size) > size)
1400 r->_mp_size = 0;
1401
1402 return r->_mp_d;
1403 }
1404
1405 /* Realloc for an mpz_t WHAT if it has less than NEEDED limbs. */
1406 #define MPZ_REALLOC(z,n) ((n) > (z)->_mp_alloc \
1407 ? mpz_realloc(z,n) \
1408 : (z)->_mp_d)
1409
1410 /* MPZ assignment and basic conversions. */
1411 void
mpz_set_si(mpz_t r,signed long int x)1412 mpz_set_si (mpz_t r, signed long int x)
1413 {
1414 if (x >= 0)
1415 mpz_set_ui (r, x);
1416 else /* (x < 0) */
1417 {
1418 r->_mp_size = -1;
1419 r->_mp_d[0] = GMP_NEG_CAST (unsigned long int, x);
1420 }
1421 }
1422
1423 void
mpz_set_ui(mpz_t r,unsigned long int x)1424 mpz_set_ui (mpz_t r, unsigned long int x)
1425 {
1426 if (x > 0)
1427 {
1428 r->_mp_size = 1;
1429 r->_mp_d[0] = x;
1430 }
1431 else
1432 r->_mp_size = 0;
1433 }
1434
1435 void
mpz_set(mpz_t r,const mpz_t x)1436 mpz_set (mpz_t r, const mpz_t x)
1437 {
1438 /* Allow the NOP r == x */
1439 if (r != x)
1440 {
1441 mp_size_t n;
1442 mp_ptr rp;
1443
1444 n = GMP_ABS (x->_mp_size);
1445 rp = MPZ_REALLOC (r, n);
1446
1447 mpn_copyi (rp, x->_mp_d, n);
1448 r->_mp_size = x->_mp_size;
1449 }
1450 }
1451
1452 void
mpz_init_set_si(mpz_t r,signed long int x)1453 mpz_init_set_si (mpz_t r, signed long int x)
1454 {
1455 mpz_init (r);
1456 mpz_set_si (r, x);
1457 }
1458
1459 void
mpz_init_set_ui(mpz_t r,unsigned long int x)1460 mpz_init_set_ui (mpz_t r, unsigned long int x)
1461 {
1462 mpz_init (r);
1463 mpz_set_ui (r, x);
1464 }
1465
1466 void
mpz_init_set(mpz_t r,const mpz_t x)1467 mpz_init_set (mpz_t r, const mpz_t x)
1468 {
1469 mpz_init (r);
1470 mpz_set (r, x);
1471 }
1472
1473 int
mpz_fits_slong_p(const mpz_t u)1474 mpz_fits_slong_p (const mpz_t u)
1475 {
1476 mp_size_t us = u->_mp_size;
1477
1478 if (us == 0)
1479 return 1;
1480 else if (us == 1)
1481 return u->_mp_d[0] < GMP_LIMB_HIGHBIT;
1482 else if (us == -1)
1483 return u->_mp_d[0] <= GMP_LIMB_HIGHBIT;
1484 else
1485 return 0;
1486 }
1487
1488 int
mpz_fits_ulong_p(const mpz_t u)1489 mpz_fits_ulong_p (const mpz_t u)
1490 {
1491 mp_size_t us = u->_mp_size;
1492
1493 return (us == (us > 0));
1494 }
1495
1496 long int
mpz_get_si(const mpz_t u)1497 mpz_get_si (const mpz_t u)
1498 {
1499 mp_size_t us = u->_mp_size;
1500
1501 if (us > 0)
1502 return (long) (u->_mp_d[0] & ~GMP_LIMB_HIGHBIT);
1503 else if (us < 0)
1504 return (long) (- u->_mp_d[0] | GMP_LIMB_HIGHBIT);
1505 else
1506 return 0;
1507 }
1508
1509 unsigned long int
mpz_get_ui(const mpz_t u)1510 mpz_get_ui (const mpz_t u)
1511 {
1512 return u->_mp_size == 0 ? 0 : u->_mp_d[0];
1513 }
1514
1515 size_t
mpz_size(const mpz_t u)1516 mpz_size (const mpz_t u)
1517 {
1518 return GMP_ABS (u->_mp_size);
1519 }
1520
1521 mp_limb_t
mpz_getlimbn(const mpz_t u,mp_size_t n)1522 mpz_getlimbn (const mpz_t u, mp_size_t n)
1523 {
1524 if (n >= 0 && n < GMP_ABS (u->_mp_size))
1525 return u->_mp_d[n];
1526 else
1527 return 0;
1528 }
1529
1530 void
mpz_realloc2(mpz_t x,mp_bitcnt_t n)1531 mpz_realloc2 (mpz_t x, mp_bitcnt_t n)
1532 {
1533 mpz_realloc (x, 1 + (n - (n != 0)) / GMP_LIMB_BITS);
1534 }
1535
1536 mp_srcptr
mpz_limbs_read(mpz_srcptr x)1537 mpz_limbs_read (mpz_srcptr x)
1538 {
1539 return x->_mp_d;;
1540 }
1541
1542 mp_ptr
mpz_limbs_modify(mpz_t x,mp_size_t n)1543 mpz_limbs_modify (mpz_t x, mp_size_t n)
1544 {
1545 assert (n > 0);
1546 return MPZ_REALLOC (x, n);
1547 }
1548
1549 mp_ptr
mpz_limbs_write(mpz_t x,mp_size_t n)1550 mpz_limbs_write (mpz_t x, mp_size_t n)
1551 {
1552 return mpz_limbs_modify (x, n);
1553 }
1554
1555 void
mpz_limbs_finish(mpz_t x,mp_size_t xs)1556 mpz_limbs_finish (mpz_t x, mp_size_t xs)
1557 {
1558 mp_size_t xn;
1559 xn = mpn_normalized_size (x->_mp_d, GMP_ABS (xs));
1560 x->_mp_size = xs < 0 ? -xn : xn;
1561 }
1562
1563 mpz_srcptr
mpz_roinit_n(mpz_t x,mp_srcptr xp,mp_size_t xs)1564 mpz_roinit_n (mpz_t x, mp_srcptr xp, mp_size_t xs)
1565 {
1566 x->_mp_alloc = 0;
1567 x->_mp_d = (mp_ptr) xp;
1568 mpz_limbs_finish (x, xs);
1569 return x;
1570 }
1571
1572
1573 /* Conversions and comparison to double. */
1574 void
mpz_set_d(mpz_t r,double x)1575 mpz_set_d (mpz_t r, double x)
1576 {
1577 int sign;
1578 mp_ptr rp;
1579 mp_size_t rn, i;
1580 double B;
1581 double Bi;
1582 mp_limb_t f;
1583
1584 /* x != x is true when x is a NaN, and x == x * 0.5 is true when x is
1585 zero or infinity. */
1586 if (x != x || x == x * 0.5)
1587 {
1588 r->_mp_size = 0;
1589 return;
1590 }
1591
1592 sign = x < 0.0 ;
1593 if (sign)
1594 x = - x;
1595
1596 if (x < 1.0)
1597 {
1598 r->_mp_size = 0;
1599 return;
1600 }
1601 B = 2.0 * (double) GMP_LIMB_HIGHBIT;
1602 Bi = 1.0 / B;
1603 for (rn = 1; x >= B; rn++)
1604 x *= Bi;
1605
1606 rp = MPZ_REALLOC (r, rn);
1607
1608 f = (mp_limb_t) x;
1609 x -= f;
1610 assert (x < 1.0);
1611 i = rn-1;
1612 rp[i] = f;
1613 while (--i >= 0)
1614 {
1615 x = B * x;
1616 f = (mp_limb_t) x;
1617 x -= f;
1618 assert (x < 1.0);
1619 rp[i] = f;
1620 }
1621
1622 r->_mp_size = sign ? - rn : rn;
1623 }
1624
1625 void
mpz_init_set_d(mpz_t r,double x)1626 mpz_init_set_d (mpz_t r, double x)
1627 {
1628 mpz_init (r);
1629 mpz_set_d (r, x);
1630 }
1631
1632 double
mpz_get_d(const mpz_t u)1633 mpz_get_d (const mpz_t u)
1634 {
1635 mp_size_t un;
1636 double x;
1637 double B = 2.0 * (double) GMP_LIMB_HIGHBIT;
1638
1639 un = GMP_ABS (u->_mp_size);
1640
1641 if (un == 0)
1642 return 0.0;
1643
1644 x = u->_mp_d[--un];
1645 while (un > 0)
1646 x = B*x + u->_mp_d[--un];
1647
1648 if (u->_mp_size < 0)
1649 x = -x;
1650
1651 return x;
1652 }
1653
1654 int
mpz_cmpabs_d(const mpz_t x,double d)1655 mpz_cmpabs_d (const mpz_t x, double d)
1656 {
1657 mp_size_t xn;
1658 double B, Bi;
1659 mp_size_t i;
1660
1661 xn = x->_mp_size;
1662 d = GMP_ABS (d);
1663
1664 if (xn != 0)
1665 {
1666 xn = GMP_ABS (xn);
1667
1668 B = 2.0 * (double) GMP_LIMB_HIGHBIT;
1669 Bi = 1.0 / B;
1670
1671 /* Scale d so it can be compared with the top limb. */
1672 for (i = 1; i < xn; i++)
1673 d *= Bi;
1674
1675 if (d >= B)
1676 return -1;
1677
1678 /* Compare floor(d) to top limb, subtract and cancel when equal. */
1679 for (i = xn; i-- > 0;)
1680 {
1681 mp_limb_t f, xl;
1682
1683 f = (mp_limb_t) d;
1684 xl = x->_mp_d[i];
1685 if (xl > f)
1686 return 1;
1687 else if (xl < f)
1688 return -1;
1689 d = B * (d - f);
1690 }
1691 }
1692 return - (d > 0.0);
1693 }
1694
1695 int
mpz_cmp_d(const mpz_t x,double d)1696 mpz_cmp_d (const mpz_t x, double d)
1697 {
1698 if (x->_mp_size < 0)
1699 {
1700 if (d >= 0.0)
1701 return -1;
1702 else
1703 return -mpz_cmpabs_d (x, d);
1704 }
1705 else
1706 {
1707 if (d < 0.0)
1708 return 1;
1709 else
1710 return mpz_cmpabs_d (x, d);
1711 }
1712 }
1713
1714
1715 /* MPZ comparisons and the like. */
1716 int
mpz_sgn(const mpz_t u)1717 mpz_sgn (const mpz_t u)
1718 {
1719 mp_size_t usize = u->_mp_size;
1720
1721 return (usize > 0) - (usize < 0);
1722 }
1723
1724 int
mpz_cmp_si(const mpz_t u,long v)1725 mpz_cmp_si (const mpz_t u, long v)
1726 {
1727 mp_size_t usize = u->_mp_size;
1728
1729 if (usize < -1)
1730 return -1;
1731 else if (v >= 0)
1732 return mpz_cmp_ui (u, v);
1733 else if (usize >= 0)
1734 return 1;
1735 else /* usize == -1 */
1736 {
1737 mp_limb_t ul = u->_mp_d[0];
1738 if ((mp_limb_t)GMP_NEG_CAST (unsigned long int, v) < ul)
1739 return -1;
1740 else
1741 return (mp_limb_t)GMP_NEG_CAST (unsigned long int, v) > ul;
1742 }
1743 }
1744
1745 int
mpz_cmp_ui(const mpz_t u,unsigned long v)1746 mpz_cmp_ui (const mpz_t u, unsigned long v)
1747 {
1748 mp_size_t usize = u->_mp_size;
1749
1750 if (usize > 1)
1751 return 1;
1752 else if (usize < 0)
1753 return -1;
1754 else
1755 {
1756 mp_limb_t ul = (usize > 0) ? u->_mp_d[0] : 0;
1757 return (ul > v) - (ul < v);
1758 }
1759 }
1760
1761 int
mpz_cmp(const mpz_t a,const mpz_t b)1762 mpz_cmp (const mpz_t a, const mpz_t b)
1763 {
1764 mp_size_t asize = a->_mp_size;
1765 mp_size_t bsize = b->_mp_size;
1766
1767 if (asize != bsize)
1768 return (asize < bsize) ? -1 : 1;
1769 else if (asize >= 0)
1770 return mpn_cmp (a->_mp_d, b->_mp_d, asize);
1771 else
1772 return mpn_cmp (b->_mp_d, a->_mp_d, -asize);
1773 }
1774
1775 int
mpz_cmpabs_ui(const mpz_t u,unsigned long v)1776 mpz_cmpabs_ui (const mpz_t u, unsigned long v)
1777 {
1778 mp_size_t un = GMP_ABS (u->_mp_size);
1779 mp_limb_t ul;
1780
1781 if (un > 1)
1782 return 1;
1783
1784 ul = (un == 1) ? u->_mp_d[0] : 0;
1785
1786 return (ul > v) - (ul < v);
1787 }
1788
1789 int
mpz_cmpabs(const mpz_t u,const mpz_t v)1790 mpz_cmpabs (const mpz_t u, const mpz_t v)
1791 {
1792 return mpn_cmp4 (u->_mp_d, GMP_ABS (u->_mp_size),
1793 v->_mp_d, GMP_ABS (v->_mp_size));
1794 }
1795
1796 void
mpz_abs(mpz_t r,const mpz_t u)1797 mpz_abs (mpz_t r, const mpz_t u)
1798 {
1799 if (r != u)
1800 mpz_set (r, u);
1801
1802 r->_mp_size = GMP_ABS (r->_mp_size);
1803 }
1804
1805 void
mpz_neg(mpz_t r,const mpz_t u)1806 mpz_neg (mpz_t r, const mpz_t u)
1807 {
1808 if (r != u)
1809 mpz_set (r, u);
1810
1811 r->_mp_size = -r->_mp_size;
1812 }
1813
1814 void
mpz_swap(mpz_t u,mpz_t v)1815 mpz_swap (mpz_t u, mpz_t v)
1816 {
1817 MP_SIZE_T_SWAP (u->_mp_size, v->_mp_size);
1818 MP_SIZE_T_SWAP (u->_mp_alloc, v->_mp_alloc);
1819 MP_PTR_SWAP (u->_mp_d, v->_mp_d);
1820 }
1821
1822
1823 /* MPZ addition and subtraction */
1824
1825 /* Adds to the absolute value. Returns new size, but doesn't store it. */
1826 static mp_size_t
mpz_abs_add_ui(mpz_t r,const mpz_t a,unsigned long b)1827 mpz_abs_add_ui (mpz_t r, const mpz_t a, unsigned long b)
1828 {
1829 mp_size_t an;
1830 mp_ptr rp;
1831 mp_limb_t cy;
1832
1833 an = GMP_ABS (a->_mp_size);
1834 if (an == 0)
1835 {
1836 r->_mp_d[0] = b;
1837 return b > 0;
1838 }
1839
1840 rp = MPZ_REALLOC (r, an + 1);
1841
1842 cy = mpn_add_1 (rp, a->_mp_d, an, b);
1843 rp[an] = cy;
1844 an += cy;
1845
1846 return an;
1847 }
1848
1849 /* Subtract from the absolute value. Returns new size, (or -1 on underflow),
1850 but doesn't store it. */
1851 static mp_size_t
mpz_abs_sub_ui(mpz_t r,const mpz_t a,unsigned long b)1852 mpz_abs_sub_ui (mpz_t r, const mpz_t a, unsigned long b)
1853 {
1854 mp_size_t an = GMP_ABS (a->_mp_size);
1855 mp_ptr rp = MPZ_REALLOC (r, an);
1856
1857 if (an == 0)
1858 {
1859 rp[0] = b;
1860 return -(b > 0);
1861 }
1862 else if (an == 1 && a->_mp_d[0] < b)
1863 {
1864 rp[0] = b - a->_mp_d[0];
1865 return -1;
1866 }
1867 else
1868 {
1869 gmp_assert_nocarry (mpn_sub_1 (rp, a->_mp_d, an, b));
1870 return mpn_normalized_size (rp, an);
1871 }
1872 }
1873
1874 void
mpz_add_ui(mpz_t r,const mpz_t a,unsigned long b)1875 mpz_add_ui (mpz_t r, const mpz_t a, unsigned long b)
1876 {
1877 if (a->_mp_size >= 0)
1878 r->_mp_size = mpz_abs_add_ui (r, a, b);
1879 else
1880 r->_mp_size = -mpz_abs_sub_ui (r, a, b);
1881 }
1882
1883 void
mpz_sub_ui(mpz_t r,const mpz_t a,unsigned long b)1884 mpz_sub_ui (mpz_t r, const mpz_t a, unsigned long b)
1885 {
1886 if (a->_mp_size < 0)
1887 r->_mp_size = -mpz_abs_add_ui (r, a, b);
1888 else
1889 r->_mp_size = mpz_abs_sub_ui (r, a, b);
1890 }
1891
1892 void
mpz_ui_sub(mpz_t r,unsigned long a,const mpz_t b)1893 mpz_ui_sub (mpz_t r, unsigned long a, const mpz_t b)
1894 {
1895 if (b->_mp_size < 0)
1896 r->_mp_size = mpz_abs_add_ui (r, b, a);
1897 else
1898 r->_mp_size = -mpz_abs_sub_ui (r, b, a);
1899 }
1900
1901 static mp_size_t
mpz_abs_add(mpz_t r,const mpz_t a,const mpz_t b)1902 mpz_abs_add (mpz_t r, const mpz_t a, const mpz_t b)
1903 {
1904 mp_size_t an = GMP_ABS (a->_mp_size);
1905 mp_size_t bn = GMP_ABS (b->_mp_size);
1906 mp_ptr rp;
1907 mp_limb_t cy;
1908
1909 if (an < bn)
1910 {
1911 MPZ_SRCPTR_SWAP (a, b);
1912 MP_SIZE_T_SWAP (an, bn);
1913 }
1914
1915 rp = MPZ_REALLOC (r, an + 1);
1916 cy = mpn_add (rp, a->_mp_d, an, b->_mp_d, bn);
1917
1918 rp[an] = cy;
1919
1920 return an + cy;
1921 }
1922
1923 static mp_size_t
mpz_abs_sub(mpz_t r,const mpz_t a,const mpz_t b)1924 mpz_abs_sub (mpz_t r, const mpz_t a, const mpz_t b)
1925 {
1926 mp_size_t an = GMP_ABS (a->_mp_size);
1927 mp_size_t bn = GMP_ABS (b->_mp_size);
1928 int cmp;
1929 mp_ptr rp;
1930
1931 cmp = mpn_cmp4 (a->_mp_d, an, b->_mp_d, bn);
1932 if (cmp > 0)
1933 {
1934 rp = MPZ_REALLOC (r, an);
1935 gmp_assert_nocarry (mpn_sub (rp, a->_mp_d, an, b->_mp_d, bn));
1936 return mpn_normalized_size (rp, an);
1937 }
1938 else if (cmp < 0)
1939 {
1940 rp = MPZ_REALLOC (r, bn);
1941 gmp_assert_nocarry (mpn_sub (rp, b->_mp_d, bn, a->_mp_d, an));
1942 return -mpn_normalized_size (rp, bn);
1943 }
1944 else
1945 return 0;
1946 }
1947
1948 void
mpz_add(mpz_t r,const mpz_t a,const mpz_t b)1949 mpz_add (mpz_t r, const mpz_t a, const mpz_t b)
1950 {
1951 mp_size_t rn;
1952
1953 if ( (a->_mp_size ^ b->_mp_size) >= 0)
1954 rn = mpz_abs_add (r, a, b);
1955 else
1956 rn = mpz_abs_sub (r, a, b);
1957
1958 r->_mp_size = a->_mp_size >= 0 ? rn : - rn;
1959 }
1960
1961 void
mpz_sub(mpz_t r,const mpz_t a,const mpz_t b)1962 mpz_sub (mpz_t r, const mpz_t a, const mpz_t b)
1963 {
1964 mp_size_t rn;
1965
1966 if ( (a->_mp_size ^ b->_mp_size) >= 0)
1967 rn = mpz_abs_sub (r, a, b);
1968 else
1969 rn = mpz_abs_add (r, a, b);
1970
1971 r->_mp_size = a->_mp_size >= 0 ? rn : - rn;
1972 }
1973
1974
1975 /* MPZ multiplication */
1976 void
mpz_mul_si(mpz_t r,const mpz_t u,long int v)1977 mpz_mul_si (mpz_t r, const mpz_t u, long int v)
1978 {
1979 if (v < 0)
1980 {
1981 mpz_mul_ui (r, u, GMP_NEG_CAST (unsigned long int, v));
1982 mpz_neg (r, r);
1983 }
1984 else
1985 mpz_mul_ui (r, u, (unsigned long int) v);
1986 }
1987
1988 void
mpz_mul_ui(mpz_t r,const mpz_t u,unsigned long int v)1989 mpz_mul_ui (mpz_t r, const mpz_t u, unsigned long int v)
1990 {
1991 mp_size_t un, us;
1992 mp_ptr tp;
1993 mp_limb_t cy;
1994
1995 us = u->_mp_size;
1996
1997 if (us == 0 || v == 0)
1998 {
1999 r->_mp_size = 0;
2000 return;
2001 }
2002
2003 un = GMP_ABS (us);
2004
2005 tp = MPZ_REALLOC (r, un + 1);
2006 cy = mpn_mul_1 (tp, u->_mp_d, un, v);
2007 tp[un] = cy;
2008
2009 un += (cy > 0);
2010 r->_mp_size = (us < 0) ? - un : un;
2011 }
2012
2013 void
mpz_mul(mpz_t r,const mpz_t u,const mpz_t v)2014 mpz_mul (mpz_t r, const mpz_t u, const mpz_t v)
2015 {
2016 int sign;
2017 mp_size_t un, vn, rn;
2018 mpz_t t;
2019 mp_ptr tp;
2020
2021 un = u->_mp_size;
2022 vn = v->_mp_size;
2023
2024 if (un == 0 || vn == 0)
2025 {
2026 r->_mp_size = 0;
2027 return;
2028 }
2029
2030 sign = (un ^ vn) < 0;
2031
2032 un = GMP_ABS (un);
2033 vn = GMP_ABS (vn);
2034
2035 mpz_init2 (t, (un + vn) * GMP_LIMB_BITS);
2036
2037 tp = t->_mp_d;
2038 if (un >= vn)
2039 mpn_mul (tp, u->_mp_d, un, v->_mp_d, vn);
2040 else
2041 mpn_mul (tp, v->_mp_d, vn, u->_mp_d, un);
2042
2043 rn = un + vn;
2044 rn -= tp[rn-1] == 0;
2045
2046 t->_mp_size = sign ? - rn : rn;
2047 mpz_swap (r, t);
2048 mpz_clear (t);
2049 }
2050
2051 void
mpz_mul_2exp(mpz_t r,const mpz_t u,mp_bitcnt_t bits)2052 mpz_mul_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t bits)
2053 {
2054 mp_size_t un, rn;
2055 mp_size_t limbs;
2056 unsigned shift;
2057 mp_ptr rp;
2058
2059 un = GMP_ABS (u->_mp_size);
2060 if (un == 0)
2061 {
2062 r->_mp_size = 0;
2063 return;
2064 }
2065
2066 limbs = bits / GMP_LIMB_BITS;
2067 shift = bits % GMP_LIMB_BITS;
2068
2069 rn = un + limbs + (shift > 0);
2070 rp = MPZ_REALLOC (r, rn);
2071 if (shift > 0)
2072 {
2073 mp_limb_t cy = mpn_lshift (rp + limbs, u->_mp_d, un, shift);
2074 rp[rn-1] = cy;
2075 rn -= (cy == 0);
2076 }
2077 else
2078 mpn_copyd (rp + limbs, u->_mp_d, un);
2079
2080 while (limbs > 0)
2081 rp[--limbs] = 0;
2082
2083 r->_mp_size = (u->_mp_size < 0) ? - rn : rn;
2084 }
2085
2086 void
mpz_addmul_ui(mpz_t r,const mpz_t u,unsigned long int v)2087 mpz_addmul_ui (mpz_t r, const mpz_t u, unsigned long int v)
2088 {
2089 mpz_t t;
2090 mpz_init (t);
2091 mpz_mul_ui (t, u, v);
2092 mpz_add (r, r, t);
2093 mpz_clear (t);
2094 }
2095
2096 void
mpz_submul_ui(mpz_t r,const mpz_t u,unsigned long int v)2097 mpz_submul_ui (mpz_t r, const mpz_t u, unsigned long int v)
2098 {
2099 mpz_t t;
2100 mpz_init (t);
2101 mpz_mul_ui (t, u, v);
2102 mpz_sub (r, r, t);
2103 mpz_clear (t);
2104 }
2105
2106 void
mpz_addmul(mpz_t r,const mpz_t u,const mpz_t v)2107 mpz_addmul (mpz_t r, const mpz_t u, const mpz_t v)
2108 {
2109 mpz_t t;
2110 mpz_init (t);
2111 mpz_mul (t, u, v);
2112 mpz_add (r, r, t);
2113 mpz_clear (t);
2114 }
2115
2116 void
mpz_submul(mpz_t r,const mpz_t u,const mpz_t v)2117 mpz_submul (mpz_t r, const mpz_t u, const mpz_t v)
2118 {
2119 mpz_t t;
2120 mpz_init (t);
2121 mpz_mul (t, u, v);
2122 mpz_sub (r, r, t);
2123 mpz_clear (t);
2124 }
2125
2126
2127 /* MPZ division */
2128 enum mpz_div_round_mode { GMP_DIV_FLOOR, GMP_DIV_CEIL, GMP_DIV_TRUNC };
2129
2130 /* Allows q or r to be zero. Returns 1 iff remainder is non-zero. */
2131 static int
mpz_div_qr(mpz_t q,mpz_t r,const mpz_t n,const mpz_t d,enum mpz_div_round_mode mode)2132 mpz_div_qr (mpz_t q, mpz_t r,
2133 const mpz_t n, const mpz_t d, enum mpz_div_round_mode mode)
2134 {
2135 mp_size_t ns, ds, nn, dn, qs;
2136 ns = n->_mp_size;
2137 ds = d->_mp_size;
2138
2139 if (ds == 0)
2140 gmp_die("mpz_div_qr: Divide by zero.");
2141
2142 if (ns == 0)
2143 {
2144 if (q)
2145 q->_mp_size = 0;
2146 if (r)
2147 r->_mp_size = 0;
2148 return 0;
2149 }
2150
2151 nn = GMP_ABS (ns);
2152 dn = GMP_ABS (ds);
2153
2154 qs = ds ^ ns;
2155
2156 if (nn < dn)
2157 {
2158 if (mode == GMP_DIV_CEIL && qs >= 0)
2159 {
2160 /* q = 1, r = n - d */
2161 if (r)
2162 mpz_sub (r, n, d);
2163 if (q)
2164 mpz_set_ui (q, 1);
2165 }
2166 else if (mode == GMP_DIV_FLOOR && qs < 0)
2167 {
2168 /* q = -1, r = n + d */
2169 if (r)
2170 mpz_add (r, n, d);
2171 if (q)
2172 mpz_set_si (q, -1);
2173 }
2174 else
2175 {
2176 /* q = 0, r = d */
2177 if (r)
2178 mpz_set (r, n);
2179 if (q)
2180 q->_mp_size = 0;
2181 }
2182 return 1;
2183 }
2184 else
2185 {
2186 mp_ptr np, qp;
2187 mp_size_t qn, rn;
2188 mpz_t tq, tr;
2189
2190 mpz_init_set (tr, n);
2191 np = tr->_mp_d;
2192
2193 qn = nn - dn + 1;
2194
2195 if (q)
2196 {
2197 mpz_init2 (tq, qn * GMP_LIMB_BITS);
2198 qp = tq->_mp_d;
2199 }
2200 else
2201 qp = NULL;
2202
2203 mpn_div_qr (qp, np, nn, d->_mp_d, dn);
2204
2205 if (qp)
2206 {
2207 qn -= (qp[qn-1] == 0);
2208
2209 tq->_mp_size = qs < 0 ? -qn : qn;
2210 }
2211 rn = mpn_normalized_size (np, dn);
2212 tr->_mp_size = ns < 0 ? - rn : rn;
2213
2214 if (mode == GMP_DIV_FLOOR && qs < 0 && rn != 0)
2215 {
2216 if (q)
2217 mpz_sub_ui (tq, tq, 1);
2218 if (r)
2219 mpz_add (tr, tr, d);
2220 }
2221 else if (mode == GMP_DIV_CEIL && qs >= 0 && rn != 0)
2222 {
2223 if (q)
2224 mpz_add_ui (tq, tq, 1);
2225 if (r)
2226 mpz_sub (tr, tr, d);
2227 }
2228
2229 if (q)
2230 {
2231 mpz_swap (tq, q);
2232 mpz_clear (tq);
2233 }
2234 if (r)
2235 mpz_swap (tr, r);
2236
2237 mpz_clear (tr);
2238
2239 return rn != 0;
2240 }
2241 }
2242
2243 void
mpz_cdiv_qr(mpz_t q,mpz_t r,const mpz_t n,const mpz_t d)2244 mpz_cdiv_qr (mpz_t q, mpz_t r, const mpz_t n, const mpz_t d)
2245 {
2246 mpz_div_qr (q, r, n, d, GMP_DIV_CEIL);
2247 }
2248
2249 void
mpz_fdiv_qr(mpz_t q,mpz_t r,const mpz_t n,const mpz_t d)2250 mpz_fdiv_qr (mpz_t q, mpz_t r, const mpz_t n, const mpz_t d)
2251 {
2252 mpz_div_qr (q, r, n, d, GMP_DIV_FLOOR);
2253 }
2254
2255 void
mpz_tdiv_qr(mpz_t q,mpz_t r,const mpz_t n,const mpz_t d)2256 mpz_tdiv_qr (mpz_t q, mpz_t r, const mpz_t n, const mpz_t d)
2257 {
2258 mpz_div_qr (q, r, n, d, GMP_DIV_TRUNC);
2259 }
2260
2261 void
mpz_cdiv_q(mpz_t q,const mpz_t n,const mpz_t d)2262 mpz_cdiv_q (mpz_t q, const mpz_t n, const mpz_t d)
2263 {
2264 mpz_div_qr (q, NULL, n, d, GMP_DIV_CEIL);
2265 }
2266
2267 void
mpz_fdiv_q(mpz_t q,const mpz_t n,const mpz_t d)2268 mpz_fdiv_q (mpz_t q, const mpz_t n, const mpz_t d)
2269 {
2270 mpz_div_qr (q, NULL, n, d, GMP_DIV_FLOOR);
2271 }
2272
2273 void
mpz_tdiv_q(mpz_t q,const mpz_t n,const mpz_t d)2274 mpz_tdiv_q (mpz_t q, const mpz_t n, const mpz_t d)
2275 {
2276 mpz_div_qr (q, NULL, n, d, GMP_DIV_TRUNC);
2277 }
2278
2279 void
mpz_cdiv_r(mpz_t r,const mpz_t n,const mpz_t d)2280 mpz_cdiv_r (mpz_t r, const mpz_t n, const mpz_t d)
2281 {
2282 mpz_div_qr (NULL, r, n, d, GMP_DIV_CEIL);
2283 }
2284
2285 void
mpz_fdiv_r(mpz_t r,const mpz_t n,const mpz_t d)2286 mpz_fdiv_r (mpz_t r, const mpz_t n, const mpz_t d)
2287 {
2288 mpz_div_qr (NULL, r, n, d, GMP_DIV_FLOOR);
2289 }
2290
2291 void
mpz_tdiv_r(mpz_t r,const mpz_t n,const mpz_t d)2292 mpz_tdiv_r (mpz_t r, const mpz_t n, const mpz_t d)
2293 {
2294 mpz_div_qr (NULL, r, n, d, GMP_DIV_TRUNC);
2295 }
2296
2297 void
mpz_mod(mpz_t r,const mpz_t n,const mpz_t d)2298 mpz_mod (mpz_t r, const mpz_t n, const mpz_t d)
2299 {
2300 mpz_div_qr (NULL, r, n, d, d->_mp_size >= 0 ? GMP_DIV_FLOOR : GMP_DIV_CEIL);
2301 }
2302
2303 static void
mpz_div_q_2exp(mpz_t q,const mpz_t u,mp_bitcnt_t bit_index,enum mpz_div_round_mode mode)2304 mpz_div_q_2exp (mpz_t q, const mpz_t u, mp_bitcnt_t bit_index,
2305 enum mpz_div_round_mode mode)
2306 {
2307 mp_size_t un, qn;
2308 mp_size_t limb_cnt;
2309 mp_ptr qp;
2310 int adjust;
2311
2312 un = u->_mp_size;
2313 if (un == 0)
2314 {
2315 q->_mp_size = 0;
2316 return;
2317 }
2318 limb_cnt = bit_index / GMP_LIMB_BITS;
2319 qn = GMP_ABS (un) - limb_cnt;
2320 bit_index %= GMP_LIMB_BITS;
2321
2322 if (mode == ((un > 0) ? GMP_DIV_CEIL : GMP_DIV_FLOOR)) /* un != 0 here. */
2323 /* Note: Below, the final indexing at limb_cnt is valid because at
2324 that point we have qn > 0. */
2325 adjust = (qn <= 0
2326 || !mpn_zero_p (u->_mp_d, limb_cnt)
2327 || (u->_mp_d[limb_cnt]
2328 & (((mp_limb_t) 1 << bit_index) - 1)));
2329 else
2330 adjust = 0;
2331
2332 if (qn <= 0)
2333 qn = 0;
2334
2335 else
2336 {
2337 qp = MPZ_REALLOC (q, qn);
2338
2339 if (bit_index != 0)
2340 {
2341 mpn_rshift (qp, u->_mp_d + limb_cnt, qn, bit_index);
2342 qn -= qp[qn - 1] == 0;
2343 }
2344 else
2345 {
2346 mpn_copyi (qp, u->_mp_d + limb_cnt, qn);
2347 }
2348 }
2349
2350 q->_mp_size = qn;
2351
2352 if (adjust)
2353 mpz_add_ui (q, q, 1);
2354 if (un < 0)
2355 mpz_neg (q, q);
2356 }
2357
2358 static void
mpz_div_r_2exp(mpz_t r,const mpz_t u,mp_bitcnt_t bit_index,enum mpz_div_round_mode mode)2359 mpz_div_r_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t bit_index,
2360 enum mpz_div_round_mode mode)
2361 {
2362 mp_size_t us, un, rn;
2363 mp_ptr rp;
2364 mp_limb_t mask;
2365
2366 us = u->_mp_size;
2367 if (us == 0 || bit_index == 0)
2368 {
2369 r->_mp_size = 0;
2370 return;
2371 }
2372 rn = (bit_index + GMP_LIMB_BITS - 1) / GMP_LIMB_BITS;
2373 assert (rn > 0);
2374
2375 rp = MPZ_REALLOC (r, rn);
2376 un = GMP_ABS (us);
2377
2378 mask = GMP_LIMB_MAX >> (rn * GMP_LIMB_BITS - bit_index);
2379
2380 if (rn > un)
2381 {
2382 /* Quotient (with truncation) is zero, and remainder is
2383 non-zero */
2384 if (mode == ((us > 0) ? GMP_DIV_CEIL : GMP_DIV_FLOOR)) /* us != 0 here. */
2385 {
2386 /* Have to negate and sign extend. */
2387 mp_size_t i;
2388 mp_limb_t cy;
2389
2390 for (cy = 1, i = 0; i < un; i++)
2391 {
2392 mp_limb_t s = ~u->_mp_d[i] + cy;
2393 cy = s < cy;
2394 rp[i] = s;
2395 }
2396 assert (cy == 0);
2397 for (; i < rn - 1; i++)
2398 rp[i] = GMP_LIMB_MAX;
2399
2400 rp[rn-1] = mask;
2401 us = -us;
2402 }
2403 else
2404 {
2405 /* Just copy */
2406 if (r != u)
2407 mpn_copyi (rp, u->_mp_d, un);
2408
2409 rn = un;
2410 }
2411 }
2412 else
2413 {
2414 if (r != u)
2415 mpn_copyi (rp, u->_mp_d, rn - 1);
2416
2417 rp[rn-1] = u->_mp_d[rn-1] & mask;
2418
2419 if (mode == ((us > 0) ? GMP_DIV_CEIL : GMP_DIV_FLOOR)) /* us != 0 here. */
2420 {
2421 /* If r != 0, compute 2^{bit_count} - r. */
2422 mp_size_t i;
2423
2424 for (i = 0; i < rn && rp[i] == 0; i++)
2425 ;
2426 if (i < rn)
2427 {
2428 /* r > 0, need to flip sign. */
2429 rp[i] = ~rp[i] + 1;
2430 while (++i < rn)
2431 rp[i] = ~rp[i];
2432
2433 rp[rn-1] &= mask;
2434
2435 /* us is not used for anything else, so we can modify it
2436 here to indicate flipped sign. */
2437 us = -us;
2438 }
2439 }
2440 }
2441 rn = mpn_normalized_size (rp, rn);
2442 r->_mp_size = us < 0 ? -rn : rn;
2443 }
2444
2445 void
mpz_cdiv_q_2exp(mpz_t r,const mpz_t u,mp_bitcnt_t cnt)2446 mpz_cdiv_q_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt)
2447 {
2448 mpz_div_q_2exp (r, u, cnt, GMP_DIV_CEIL);
2449 }
2450
2451 void
mpz_fdiv_q_2exp(mpz_t r,const mpz_t u,mp_bitcnt_t cnt)2452 mpz_fdiv_q_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt)
2453 {
2454 mpz_div_q_2exp (r, u, cnt, GMP_DIV_FLOOR);
2455 }
2456
2457 void
mpz_tdiv_q_2exp(mpz_t r,const mpz_t u,mp_bitcnt_t cnt)2458 mpz_tdiv_q_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt)
2459 {
2460 mpz_div_q_2exp (r, u, cnt, GMP_DIV_TRUNC);
2461 }
2462
2463 void
mpz_cdiv_r_2exp(mpz_t r,const mpz_t u,mp_bitcnt_t cnt)2464 mpz_cdiv_r_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt)
2465 {
2466 mpz_div_r_2exp (r, u, cnt, GMP_DIV_CEIL);
2467 }
2468
2469 void
mpz_fdiv_r_2exp(mpz_t r,const mpz_t u,mp_bitcnt_t cnt)2470 mpz_fdiv_r_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt)
2471 {
2472 mpz_div_r_2exp (r, u, cnt, GMP_DIV_FLOOR);
2473 }
2474
2475 void
mpz_tdiv_r_2exp(mpz_t r,const mpz_t u,mp_bitcnt_t cnt)2476 mpz_tdiv_r_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt)
2477 {
2478 mpz_div_r_2exp (r, u, cnt, GMP_DIV_TRUNC);
2479 }
2480
2481 void
mpz_divexact(mpz_t q,const mpz_t n,const mpz_t d)2482 mpz_divexact (mpz_t q, const mpz_t n, const mpz_t d)
2483 {
2484 gmp_assert_nocarry (mpz_div_qr (q, NULL, n, d, GMP_DIV_TRUNC));
2485 }
2486
2487 int
mpz_divisible_p(const mpz_t n,const mpz_t d)2488 mpz_divisible_p (const mpz_t n, const mpz_t d)
2489 {
2490 return mpz_div_qr (NULL, NULL, n, d, GMP_DIV_TRUNC) == 0;
2491 }
2492
2493 int
mpz_congruent_p(const mpz_t a,const mpz_t b,const mpz_t m)2494 mpz_congruent_p (const mpz_t a, const mpz_t b, const mpz_t m)
2495 {
2496 mpz_t t;
2497 int res;
2498
2499 /* a == b (mod 0) iff a == b */
2500 if (mpz_sgn (m) == 0)
2501 return (mpz_cmp (a, b) == 0);
2502
2503 mpz_init (t);
2504 mpz_sub (t, a, b);
2505 res = mpz_divisible_p (t, m);
2506 mpz_clear (t);
2507
2508 return res;
2509 }
2510
2511 static unsigned long
mpz_div_qr_ui(mpz_t q,mpz_t r,const mpz_t n,unsigned long d,enum mpz_div_round_mode mode)2512 mpz_div_qr_ui (mpz_t q, mpz_t r,
2513 const mpz_t n, unsigned long d, enum mpz_div_round_mode mode)
2514 {
2515 mp_size_t ns, qn;
2516 mp_ptr qp;
2517 mp_limb_t rl;
2518 mp_size_t rs;
2519
2520 ns = n->_mp_size;
2521 if (ns == 0)
2522 {
2523 if (q)
2524 q->_mp_size = 0;
2525 if (r)
2526 r->_mp_size = 0;
2527 return 0;
2528 }
2529
2530 qn = GMP_ABS (ns);
2531 if (q)
2532 qp = MPZ_REALLOC (q, qn);
2533 else
2534 qp = NULL;
2535
2536 rl = mpn_div_qr_1 (qp, n->_mp_d, qn, d);
2537 assert (rl < d);
2538
2539 rs = rl > 0;
2540 rs = (ns < 0) ? -rs : rs;
2541
2542 if (rl > 0 && ( (mode == GMP_DIV_FLOOR && ns < 0)
2543 || (mode == GMP_DIV_CEIL && ns >= 0)))
2544 {
2545 if (q)
2546 gmp_assert_nocarry (mpn_add_1 (qp, qp, qn, 1));
2547 rl = d - rl;
2548 rs = -rs;
2549 }
2550
2551 if (r)
2552 {
2553 r->_mp_d[0] = rl;
2554 r->_mp_size = rs;
2555 }
2556 if (q)
2557 {
2558 qn -= (qp[qn-1] == 0);
2559 assert (qn == 0 || qp[qn-1] > 0);
2560
2561 q->_mp_size = (ns < 0) ? - qn : qn;
2562 }
2563
2564 return rl;
2565 }
2566
2567 unsigned long
mpz_cdiv_qr_ui(mpz_t q,mpz_t r,const mpz_t n,unsigned long d)2568 mpz_cdiv_qr_ui (mpz_t q, mpz_t r, const mpz_t n, unsigned long d)
2569 {
2570 return mpz_div_qr_ui (q, r, n, d, GMP_DIV_CEIL);
2571 }
2572
2573 unsigned long
mpz_fdiv_qr_ui(mpz_t q,mpz_t r,const mpz_t n,unsigned long d)2574 mpz_fdiv_qr_ui (mpz_t q, mpz_t r, const mpz_t n, unsigned long d)
2575 {
2576 return mpz_div_qr_ui (q, r, n, d, GMP_DIV_FLOOR);
2577 }
2578
2579 unsigned long
mpz_tdiv_qr_ui(mpz_t q,mpz_t r,const mpz_t n,unsigned long d)2580 mpz_tdiv_qr_ui (mpz_t q, mpz_t r, const mpz_t n, unsigned long d)
2581 {
2582 return mpz_div_qr_ui (q, r, n, d, GMP_DIV_TRUNC);
2583 }
2584
2585 unsigned long
mpz_cdiv_q_ui(mpz_t q,const mpz_t n,unsigned long d)2586 mpz_cdiv_q_ui (mpz_t q, const mpz_t n, unsigned long d)
2587 {
2588 return mpz_div_qr_ui (q, NULL, n, d, GMP_DIV_CEIL);
2589 }
2590
2591 unsigned long
mpz_fdiv_q_ui(mpz_t q,const mpz_t n,unsigned long d)2592 mpz_fdiv_q_ui (mpz_t q, const mpz_t n, unsigned long d)
2593 {
2594 return mpz_div_qr_ui (q, NULL, n, d, GMP_DIV_FLOOR);
2595 }
2596
2597 unsigned long
mpz_tdiv_q_ui(mpz_t q,const mpz_t n,unsigned long d)2598 mpz_tdiv_q_ui (mpz_t q, const mpz_t n, unsigned long d)
2599 {
2600 return mpz_div_qr_ui (q, NULL, n, d, GMP_DIV_TRUNC);
2601 }
2602
2603 unsigned long
mpz_cdiv_r_ui(mpz_t r,const mpz_t n,unsigned long d)2604 mpz_cdiv_r_ui (mpz_t r, const mpz_t n, unsigned long d)
2605 {
2606 return mpz_div_qr_ui (NULL, r, n, d, GMP_DIV_CEIL);
2607 }
2608 unsigned long
mpz_fdiv_r_ui(mpz_t r,const mpz_t n,unsigned long d)2609 mpz_fdiv_r_ui (mpz_t r, const mpz_t n, unsigned long d)
2610 {
2611 return mpz_div_qr_ui (NULL, r, n, d, GMP_DIV_FLOOR);
2612 }
2613 unsigned long
mpz_tdiv_r_ui(mpz_t r,const mpz_t n,unsigned long d)2614 mpz_tdiv_r_ui (mpz_t r, const mpz_t n, unsigned long d)
2615 {
2616 return mpz_div_qr_ui (NULL, r, n, d, GMP_DIV_TRUNC);
2617 }
2618
2619 unsigned long
mpz_cdiv_ui(const mpz_t n,unsigned long d)2620 mpz_cdiv_ui (const mpz_t n, unsigned long d)
2621 {
2622 return mpz_div_qr_ui (NULL, NULL, n, d, GMP_DIV_CEIL);
2623 }
2624
2625 unsigned long
mpz_fdiv_ui(const mpz_t n,unsigned long d)2626 mpz_fdiv_ui (const mpz_t n, unsigned long d)
2627 {
2628 return mpz_div_qr_ui (NULL, NULL, n, d, GMP_DIV_FLOOR);
2629 }
2630
2631 unsigned long
mpz_tdiv_ui(const mpz_t n,unsigned long d)2632 mpz_tdiv_ui (const mpz_t n, unsigned long d)
2633 {
2634 return mpz_div_qr_ui (NULL, NULL, n, d, GMP_DIV_TRUNC);
2635 }
2636
2637 unsigned long
mpz_mod_ui(mpz_t r,const mpz_t n,unsigned long d)2638 mpz_mod_ui (mpz_t r, const mpz_t n, unsigned long d)
2639 {
2640 return mpz_div_qr_ui (NULL, r, n, d, GMP_DIV_FLOOR);
2641 }
2642
2643 void
mpz_divexact_ui(mpz_t q,const mpz_t n,unsigned long d)2644 mpz_divexact_ui (mpz_t q, const mpz_t n, unsigned long d)
2645 {
2646 gmp_assert_nocarry (mpz_div_qr_ui (q, NULL, n, d, GMP_DIV_TRUNC));
2647 }
2648
2649 int
mpz_divisible_ui_p(const mpz_t n,unsigned long d)2650 mpz_divisible_ui_p (const mpz_t n, unsigned long d)
2651 {
2652 return mpz_div_qr_ui (NULL, NULL, n, d, GMP_DIV_TRUNC) == 0;
2653 }
2654
2655
2656 /* GCD */
2657 static mp_limb_t
mpn_gcd_11(mp_limb_t u,mp_limb_t v)2658 mpn_gcd_11 (mp_limb_t u, mp_limb_t v)
2659 {
2660 unsigned shift;
2661
2662 assert ( (u | v) > 0);
2663
2664 if (u == 0)
2665 return v;
2666 else if (v == 0)
2667 return u;
2668
2669 gmp_ctz (shift, u | v);
2670
2671 u >>= shift;
2672 v >>= shift;
2673
2674 if ( (u & 1) == 0)
2675 MP_LIMB_T_SWAP (u, v);
2676
2677 while ( (v & 1) == 0)
2678 v >>= 1;
2679
2680 while (u != v)
2681 {
2682 if (u > v)
2683 {
2684 u -= v;
2685 do
2686 u >>= 1;
2687 while ( (u & 1) == 0);
2688 }
2689 else
2690 {
2691 v -= u;
2692 do
2693 v >>= 1;
2694 while ( (v & 1) == 0);
2695 }
2696 }
2697 return u << shift;
2698 }
2699
2700 unsigned long
mpz_gcd_ui(mpz_t g,const mpz_t u,unsigned long v)2701 mpz_gcd_ui (mpz_t g, const mpz_t u, unsigned long v)
2702 {
2703 mp_size_t un;
2704
2705 if (v == 0)
2706 {
2707 if (g)
2708 mpz_abs (g, u);
2709 }
2710 else
2711 {
2712 un = GMP_ABS (u->_mp_size);
2713 if (un != 0)
2714 v = mpn_gcd_11 (mpn_div_qr_1 (NULL, u->_mp_d, un, v), v);
2715
2716 if (g)
2717 mpz_set_ui (g, v);
2718 }
2719
2720 return v;
2721 }
2722
2723 static mp_bitcnt_t
mpz_make_odd(mpz_t r)2724 mpz_make_odd (mpz_t r)
2725 {
2726 mp_bitcnt_t shift;
2727
2728 assert (r->_mp_size > 0);
2729 /* Count trailing zeros, equivalent to mpn_scan1, because we know that there is a 1 */
2730 shift = mpn_common_scan (r->_mp_d[0], 0, r->_mp_d, 0, 0);
2731 mpz_tdiv_q_2exp (r, r, shift);
2732
2733 return shift;
2734 }
2735
2736 void
mpz_gcd(mpz_t g,const mpz_t u,const mpz_t v)2737 mpz_gcd (mpz_t g, const mpz_t u, const mpz_t v)
2738 {
2739 mpz_t tu, tv;
2740 mp_bitcnt_t uz, vz, gz;
2741
2742 if (u->_mp_size == 0)
2743 {
2744 mpz_abs (g, v);
2745 return;
2746 }
2747 if (v->_mp_size == 0)
2748 {
2749 mpz_abs (g, u);
2750 return;
2751 }
2752
2753 mpz_init (tu);
2754 mpz_init (tv);
2755
2756 mpz_abs (tu, u);
2757 uz = mpz_make_odd (tu);
2758 mpz_abs (tv, v);
2759 vz = mpz_make_odd (tv);
2760 gz = GMP_MIN (uz, vz);
2761
2762 if (tu->_mp_size < tv->_mp_size)
2763 mpz_swap (tu, tv);
2764
2765 mpz_tdiv_r (tu, tu, tv);
2766 if (tu->_mp_size == 0)
2767 {
2768 mpz_swap (g, tv);
2769 }
2770 else
2771 for (;;)
2772 {
2773 int c;
2774
2775 mpz_make_odd (tu);
2776 c = mpz_cmp (tu, tv);
2777 if (c == 0)
2778 {
2779 mpz_swap (g, tu);
2780 break;
2781 }
2782 if (c < 0)
2783 mpz_swap (tu, tv);
2784
2785 if (tv->_mp_size == 1)
2786 {
2787 mp_limb_t vl = tv->_mp_d[0];
2788 mp_limb_t ul = mpz_tdiv_ui (tu, vl);
2789 mpz_set_ui (g, mpn_gcd_11 (ul, vl));
2790 break;
2791 }
2792 mpz_sub (tu, tu, tv);
2793 }
2794 mpz_clear (tu);
2795 mpz_clear (tv);
2796 mpz_mul_2exp (g, g, gz);
2797 }
2798
2799 void
mpz_gcdext(mpz_t g,mpz_t s,mpz_t t,const mpz_t u,const mpz_t v)2800 mpz_gcdext (mpz_t g, mpz_t s, mpz_t t, const mpz_t u, const mpz_t v)
2801 {
2802 mpz_t tu, tv, s0, s1, t0, t1;
2803 mp_bitcnt_t uz, vz, gz;
2804 mp_bitcnt_t power;
2805
2806 if (u->_mp_size == 0)
2807 {
2808 /* g = 0 u + sgn(v) v */
2809 signed long sign = mpz_sgn (v);
2810 mpz_abs (g, v);
2811 if (s)
2812 mpz_set_ui (s, 0);
2813 if (t)
2814 mpz_set_si (t, sign);
2815 return;
2816 }
2817
2818 if (v->_mp_size == 0)
2819 {
2820 /* g = sgn(u) u + 0 v */
2821 signed long sign = mpz_sgn (u);
2822 mpz_abs (g, u);
2823 if (s)
2824 mpz_set_si (s, sign);
2825 if (t)
2826 mpz_set_ui (t, 0);
2827 return;
2828 }
2829
2830 mpz_init (tu);
2831 mpz_init (tv);
2832 mpz_init (s0);
2833 mpz_init (s1);
2834 mpz_init (t0);
2835 mpz_init (t1);
2836
2837 mpz_abs (tu, u);
2838 uz = mpz_make_odd (tu);
2839 mpz_abs (tv, v);
2840 vz = mpz_make_odd (tv);
2841 gz = GMP_MIN (uz, vz);
2842
2843 uz -= gz;
2844 vz -= gz;
2845
2846 /* Cofactors corresponding to odd gcd. gz handled later. */
2847 if (tu->_mp_size < tv->_mp_size)
2848 {
2849 mpz_swap (tu, tv);
2850 MPZ_SRCPTR_SWAP (u, v);
2851 MPZ_PTR_SWAP (s, t);
2852 MP_BITCNT_T_SWAP (uz, vz);
2853 }
2854
2855 /* Maintain
2856 *
2857 * u = t0 tu + t1 tv
2858 * v = s0 tu + s1 tv
2859 *
2860 * where u and v denote the inputs with common factors of two
2861 * eliminated, and det (s0, t0; s1, t1) = 2^p. Then
2862 *
2863 * 2^p tu = s1 u - t1 v
2864 * 2^p tv = -s0 u + t0 v
2865 */
2866
2867 /* After initial division, tu = q tv + tu', we have
2868 *
2869 * u = 2^uz (tu' + q tv)
2870 * v = 2^vz tv
2871 *
2872 * or
2873 *
2874 * t0 = 2^uz, t1 = 2^uz q
2875 * s0 = 0, s1 = 2^vz
2876 */
2877
2878 mpz_setbit (t0, uz);
2879 mpz_tdiv_qr (t1, tu, tu, tv);
2880 mpz_mul_2exp (t1, t1, uz);
2881
2882 mpz_setbit (s1, vz);
2883 power = uz + vz;
2884
2885 if (tu->_mp_size > 0)
2886 {
2887 mp_bitcnt_t shift;
2888 shift = mpz_make_odd (tu);
2889 mpz_mul_2exp (t0, t0, shift);
2890 mpz_mul_2exp (s0, s0, shift);
2891 power += shift;
2892
2893 for (;;)
2894 {
2895 int c;
2896 c = mpz_cmp (tu, tv);
2897 if (c == 0)
2898 break;
2899
2900 if (c < 0)
2901 {
2902 /* tv = tv' + tu
2903 *
2904 * u = t0 tu + t1 (tv' + tu) = (t0 + t1) tu + t1 tv'
2905 * v = s0 tu + s1 (tv' + tu) = (s0 + s1) tu + s1 tv' */
2906
2907 mpz_sub (tv, tv, tu);
2908 mpz_add (t0, t0, t1);
2909 mpz_add (s0, s0, s1);
2910
2911 shift = mpz_make_odd (tv);
2912 mpz_mul_2exp (t1, t1, shift);
2913 mpz_mul_2exp (s1, s1, shift);
2914 }
2915 else
2916 {
2917 mpz_sub (tu, tu, tv);
2918 mpz_add (t1, t0, t1);
2919 mpz_add (s1, s0, s1);
2920
2921 shift = mpz_make_odd (tu);
2922 mpz_mul_2exp (t0, t0, shift);
2923 mpz_mul_2exp (s0, s0, shift);
2924 }
2925 power += shift;
2926 }
2927 }
2928
2929 /* Now tv = odd part of gcd, and -s0 and t0 are corresponding
2930 cofactors. */
2931
2932 mpz_mul_2exp (tv, tv, gz);
2933 mpz_neg (s0, s0);
2934
2935 /* 2^p g = s0 u + t0 v. Eliminate one factor of two at a time. To
2936 adjust cofactors, we need u / g and v / g */
2937
2938 mpz_divexact (s1, v, tv);
2939 mpz_abs (s1, s1);
2940 mpz_divexact (t1, u, tv);
2941 mpz_abs (t1, t1);
2942
2943 while (power-- > 0)
2944 {
2945 /* s0 u + t0 v = (s0 - v/g) u - (t0 + u/g) v */
2946 if (mpz_odd_p (s0) || mpz_odd_p (t0))
2947 {
2948 mpz_sub (s0, s0, s1);
2949 mpz_add (t0, t0, t1);
2950 }
2951 mpz_divexact_ui (s0, s0, 2);
2952 mpz_divexact_ui (t0, t0, 2);
2953 }
2954
2955 /* Arrange so that |s| < |u| / 2g */
2956 mpz_add (s1, s0, s1);
2957 if (mpz_cmpabs (s0, s1) > 0)
2958 {
2959 mpz_swap (s0, s1);
2960 mpz_sub (t0, t0, t1);
2961 }
2962 if (u->_mp_size < 0)
2963 mpz_neg (s0, s0);
2964 if (v->_mp_size < 0)
2965 mpz_neg (t0, t0);
2966
2967 mpz_swap (g, tv);
2968 if (s)
2969 mpz_swap (s, s0);
2970 if (t)
2971 mpz_swap (t, t0);
2972
2973 mpz_clear (tu);
2974 mpz_clear (tv);
2975 mpz_clear (s0);
2976 mpz_clear (s1);
2977 mpz_clear (t0);
2978 mpz_clear (t1);
2979 }
2980
2981 void
mpz_lcm(mpz_t r,const mpz_t u,const mpz_t v)2982 mpz_lcm (mpz_t r, const mpz_t u, const mpz_t v)
2983 {
2984 mpz_t g;
2985
2986 if (u->_mp_size == 0 || v->_mp_size == 0)
2987 {
2988 r->_mp_size = 0;
2989 return;
2990 }
2991
2992 mpz_init (g);
2993
2994 mpz_gcd (g, u, v);
2995 mpz_divexact (g, u, g);
2996 mpz_mul (r, g, v);
2997
2998 mpz_clear (g);
2999 mpz_abs (r, r);
3000 }
3001
3002 void
mpz_lcm_ui(mpz_t r,const mpz_t u,unsigned long v)3003 mpz_lcm_ui (mpz_t r, const mpz_t u, unsigned long v)
3004 {
3005 if (v == 0 || u->_mp_size == 0)
3006 {
3007 r->_mp_size = 0;
3008 return;
3009 }
3010
3011 v /= mpz_gcd_ui (NULL, u, v);
3012 mpz_mul_ui (r, u, v);
3013
3014 mpz_abs (r, r);
3015 }
3016
3017 int
mpz_invert(mpz_t r,const mpz_t u,const mpz_t m)3018 mpz_invert (mpz_t r, const mpz_t u, const mpz_t m)
3019 {
3020 mpz_t g, tr;
3021 int invertible;
3022
3023 if (u->_mp_size == 0 || mpz_cmpabs_ui (m, 1) <= 0)
3024 return 0;
3025
3026 mpz_init (g);
3027 mpz_init (tr);
3028
3029 mpz_gcdext (g, tr, NULL, u, m);
3030 invertible = (mpz_cmp_ui (g, 1) == 0);
3031
3032 if (invertible)
3033 {
3034 if (tr->_mp_size < 0)
3035 {
3036 if (m->_mp_size >= 0)
3037 mpz_add (tr, tr, m);
3038 else
3039 mpz_sub (tr, tr, m);
3040 }
3041 mpz_swap (r, tr);
3042 }
3043
3044 mpz_clear (g);
3045 mpz_clear (tr);
3046 return invertible;
3047 }
3048
3049
3050 /* Higher level operations (sqrt, pow and root) */
3051
3052 void
mpz_pow_ui(mpz_t r,const mpz_t b,unsigned long e)3053 mpz_pow_ui (mpz_t r, const mpz_t b, unsigned long e)
3054 {
3055 unsigned long bit;
3056 mpz_t tr;
3057 mpz_init_set_ui (tr, 1);
3058
3059 bit = GMP_ULONG_HIGHBIT;
3060 do
3061 {
3062 mpz_mul (tr, tr, tr);
3063 if (e & bit)
3064 mpz_mul (tr, tr, b);
3065 bit >>= 1;
3066 }
3067 while (bit > 0);
3068
3069 mpz_swap (r, tr);
3070 mpz_clear (tr);
3071 }
3072
3073 void
mpz_ui_pow_ui(mpz_t r,unsigned long blimb,unsigned long e)3074 mpz_ui_pow_ui (mpz_t r, unsigned long blimb, unsigned long e)
3075 {
3076 mpz_t b;
3077 mpz_init_set_ui (b, blimb);
3078 mpz_pow_ui (r, b, e);
3079 mpz_clear (b);
3080 }
3081
3082 void
mpz_powm(mpz_t r,const mpz_t b,const mpz_t e,const mpz_t m)3083 mpz_powm (mpz_t r, const mpz_t b, const mpz_t e, const mpz_t m)
3084 {
3085 mpz_t tr;
3086 mpz_t base;
3087 mp_size_t en, mn;
3088 mp_srcptr mp;
3089 struct gmp_div_inverse minv;
3090 unsigned shift;
3091 mp_ptr tp = NULL;
3092
3093 en = GMP_ABS (e->_mp_size);
3094 mn = GMP_ABS (m->_mp_size);
3095 if (mn == 0)
3096 gmp_die ("mpz_powm: Zero modulo.");
3097
3098 if (en == 0)
3099 {
3100 mpz_set_ui (r, 1);
3101 return;
3102 }
3103
3104 mp = m->_mp_d;
3105 mpn_div_qr_invert (&minv, mp, mn);
3106 shift = minv.shift;
3107
3108 if (shift > 0)
3109 {
3110 /* To avoid shifts, we do all our reductions, except the final
3111 one, using a *normalized* m. */
3112 minv.shift = 0;
3113
3114 tp = gmp_xalloc_limbs (mn);
3115 gmp_assert_nocarry (mpn_lshift (tp, mp, mn, shift));
3116 mp = tp;
3117 }
3118
3119 mpz_init (base);
3120
3121 if (e->_mp_size < 0)
3122 {
3123 if (!mpz_invert (base, b, m))
3124 gmp_die ("mpz_powm: Negative exponent and non-invertible base.");
3125 }
3126 else
3127 {
3128 mp_size_t bn;
3129 mpz_abs (base, b);
3130
3131 bn = base->_mp_size;
3132 if (bn >= mn)
3133 {
3134 mpn_div_qr_preinv (NULL, base->_mp_d, base->_mp_size, mp, mn, &minv);
3135 bn = mn;
3136 }
3137
3138 /* We have reduced the absolute value. Now take care of the
3139 sign. Note that we get zero represented non-canonically as
3140 m. */
3141 if (b->_mp_size < 0)
3142 {
3143 mp_ptr bp = MPZ_REALLOC (base, mn);
3144 gmp_assert_nocarry (mpn_sub (bp, mp, mn, bp, bn));
3145 bn = mn;
3146 }
3147 base->_mp_size = mpn_normalized_size (base->_mp_d, bn);
3148 }
3149 mpz_init_set_ui (tr, 1);
3150
3151 while (en-- > 0)
3152 {
3153 mp_limb_t w = e->_mp_d[en];
3154 mp_limb_t bit;
3155
3156 bit = GMP_LIMB_HIGHBIT;
3157 do
3158 {
3159 mpz_mul (tr, tr, tr);
3160 if (w & bit)
3161 mpz_mul (tr, tr, base);
3162 if (tr->_mp_size > mn)
3163 {
3164 mpn_div_qr_preinv (NULL, tr->_mp_d, tr->_mp_size, mp, mn, &minv);
3165 tr->_mp_size = mpn_normalized_size (tr->_mp_d, mn);
3166 }
3167 bit >>= 1;
3168 }
3169 while (bit > 0);
3170 }
3171
3172 /* Final reduction */
3173 if (tr->_mp_size >= mn)
3174 {
3175 minv.shift = shift;
3176 mpn_div_qr_preinv (NULL, tr->_mp_d, tr->_mp_size, mp, mn, &minv);
3177 tr->_mp_size = mpn_normalized_size (tr->_mp_d, mn);
3178 }
3179 if (tp)
3180 gmp_free (tp);
3181
3182 mpz_swap (r, tr);
3183 mpz_clear (tr);
3184 mpz_clear (base);
3185 }
3186
3187 void
mpz_powm_ui(mpz_t r,const mpz_t b,unsigned long elimb,const mpz_t m)3188 mpz_powm_ui (mpz_t r, const mpz_t b, unsigned long elimb, const mpz_t m)
3189 {
3190 mpz_t e;
3191 mpz_init_set_ui (e, elimb);
3192 mpz_powm (r, b, e, m);
3193 mpz_clear (e);
3194 }
3195
3196 /* x=trunc(y^(1/z)), r=y-x^z */
3197 void
mpz_rootrem(mpz_t x,mpz_t r,const mpz_t y,unsigned long z)3198 mpz_rootrem (mpz_t x, mpz_t r, const mpz_t y, unsigned long z)
3199 {
3200 int sgn;
3201 mpz_t t, u;
3202
3203 sgn = y->_mp_size < 0;
3204 if ((~z & sgn) != 0)
3205 gmp_die ("mpz_rootrem: Negative argument, with even root.");
3206 if (z == 0)
3207 gmp_die ("mpz_rootrem: Zeroth root.");
3208
3209 if (mpz_cmpabs_ui (y, 1) <= 0) {
3210 if (x)
3211 mpz_set (x, y);
3212 if (r)
3213 r->_mp_size = 0;
3214 return;
3215 }
3216
3217 mpz_init (u);
3218 {
3219 mp_bitcnt_t tb;
3220 tb = mpz_sizeinbase (y, 2) / z + 1;
3221 mpz_init2 (t, tb);
3222 mpz_setbit (t, tb);
3223 }
3224
3225 if (z == 2) /* simplify sqrt loop: z-1 == 1 */
3226 do {
3227 mpz_swap (u, t); /* u = x */
3228 mpz_tdiv_q (t, y, u); /* t = y/x */
3229 mpz_add (t, t, u); /* t = y/x + x */
3230 mpz_tdiv_q_2exp (t, t, 1); /* x'= (y/x + x)/2 */
3231 } while (mpz_cmpabs (t, u) < 0); /* |x'| < |x| */
3232 else /* z != 2 */ {
3233 mpz_t v;
3234
3235 mpz_init (v);
3236 if (sgn)
3237 mpz_neg (t, t);
3238
3239 do {
3240 mpz_swap (u, t); /* u = x */
3241 mpz_pow_ui (t, u, z - 1); /* t = x^(z-1) */
3242 mpz_tdiv_q (t, y, t); /* t = y/x^(z-1) */
3243 mpz_mul_ui (v, u, z - 1); /* v = x*(z-1) */
3244 mpz_add (t, t, v); /* t = y/x^(z-1) + x*(z-1) */
3245 mpz_tdiv_q_ui (t, t, z); /* x'=(y/x^(z-1) + x*(z-1))/z */
3246 } while (mpz_cmpabs (t, u) < 0); /* |x'| < |x| */
3247
3248 mpz_clear (v);
3249 }
3250
3251 if (r) {
3252 mpz_pow_ui (t, u, z);
3253 mpz_sub (r, y, t);
3254 }
3255 if (x)
3256 mpz_swap (x, u);
3257 mpz_clear (u);
3258 mpz_clear (t);
3259 }
3260
3261 int
mpz_root(mpz_t x,const mpz_t y,unsigned long z)3262 mpz_root (mpz_t x, const mpz_t y, unsigned long z)
3263 {
3264 int res;
3265 mpz_t r;
3266
3267 mpz_init (r);
3268 mpz_rootrem (x, r, y, z);
3269 res = r->_mp_size == 0;
3270 mpz_clear (r);
3271
3272 return res;
3273 }
3274
3275 /* Compute s = floor(sqrt(u)) and r = u - s^2. Allows r == NULL */
3276 void
mpz_sqrtrem(mpz_t s,mpz_t r,const mpz_t u)3277 mpz_sqrtrem (mpz_t s, mpz_t r, const mpz_t u)
3278 {
3279 mpz_rootrem (s, r, u, 2);
3280 }
3281
3282 void
mpz_sqrt(mpz_t s,const mpz_t u)3283 mpz_sqrt (mpz_t s, const mpz_t u)
3284 {
3285 mpz_rootrem (s, NULL, u, 2);
3286 }
3287
3288 int
mpz_perfect_square_p(const mpz_t u)3289 mpz_perfect_square_p (const mpz_t u)
3290 {
3291 if (u->_mp_size <= 0)
3292 return (u->_mp_size == 0);
3293 else
3294 return mpz_root (NULL, u, 2);
3295 }
3296
3297 int
mpn_perfect_square_p(mp_srcptr p,mp_size_t n)3298 mpn_perfect_square_p (mp_srcptr p, mp_size_t n)
3299 {
3300 mpz_t t;
3301
3302 assert (n > 0);
3303 assert (p [n-1] != 0);
3304 return mpz_root (NULL, mpz_roinit_n (t, p, n), 2);
3305 }
3306
3307 mp_size_t
mpn_sqrtrem(mp_ptr sp,mp_ptr rp,mp_srcptr p,mp_size_t n)3308 mpn_sqrtrem (mp_ptr sp, mp_ptr rp, mp_srcptr p, mp_size_t n)
3309 {
3310 mpz_t s, r, u;
3311 mp_size_t res;
3312
3313 assert (n > 0);
3314 assert (p [n-1] != 0);
3315
3316 mpz_init (r);
3317 mpz_init (s);
3318 mpz_rootrem (s, r, mpz_roinit_n (u, p, n), 2);
3319
3320 assert (s->_mp_size == (n+1)/2);
3321 mpn_copyd (sp, s->_mp_d, s->_mp_size);
3322 mpz_clear (s);
3323 res = r->_mp_size;
3324 if (rp)
3325 mpn_copyd (rp, r->_mp_d, res);
3326 mpz_clear (r);
3327 return res;
3328 }
3329
3330 /* Combinatorics */
3331
3332 void
mpz_fac_ui(mpz_t x,unsigned long n)3333 mpz_fac_ui (mpz_t x, unsigned long n)
3334 {
3335 mpz_set_ui (x, n + (n == 0));
3336 for (;n > 2;)
3337 mpz_mul_ui (x, x, --n);
3338 }
3339
3340 void
mpz_bin_uiui(mpz_t r,unsigned long n,unsigned long k)3341 mpz_bin_uiui (mpz_t r, unsigned long n, unsigned long k)
3342 {
3343 mpz_t t;
3344
3345 mpz_set_ui (r, k <= n);
3346
3347 if (k > (n >> 1))
3348 k = (k <= n) ? n - k : 0;
3349
3350 mpz_init (t);
3351 mpz_fac_ui (t, k);
3352
3353 for (; k > 0; k--)
3354 mpz_mul_ui (r, r, n--);
3355
3356 mpz_divexact (r, r, t);
3357 mpz_clear (t);
3358 }
3359
3360
3361 /* Primality testing */
3362 static int
gmp_millerrabin(const mpz_t n,const mpz_t nm1,mpz_t y,const mpz_t q,mp_bitcnt_t k)3363 gmp_millerrabin (const mpz_t n, const mpz_t nm1, mpz_t y,
3364 const mpz_t q, mp_bitcnt_t k)
3365 {
3366 assert (k > 0);
3367
3368 /* Caller must initialize y to the base. */
3369 mpz_powm (y, y, q, n);
3370
3371 if (mpz_cmp_ui (y, 1) == 0 || mpz_cmp (y, nm1) == 0)
3372 return 1;
3373
3374 while (--k > 0)
3375 {
3376 mpz_powm_ui (y, y, 2, n);
3377 if (mpz_cmp (y, nm1) == 0)
3378 return 1;
3379 /* y == 1 means that the previous y was a non-trivial square root
3380 of 1 (mod n). y == 0 means that n is a power of the base.
3381 In either case, n is not prime. */
3382 if (mpz_cmp_ui (y, 1) <= 0)
3383 return 0;
3384 }
3385 return 0;
3386 }
3387
3388 /* This product is 0xc0cfd797, and fits in 32 bits. */
3389 #define GMP_PRIME_PRODUCT \
3390 (3UL*5UL*7UL*11UL*13UL*17UL*19UL*23UL*29UL)
3391
3392 /* Bit (p+1)/2 is set, for each odd prime <= 61 */
3393 #define GMP_PRIME_MASK 0xc96996dcUL
3394
3395 int
mpz_probab_prime_p(const mpz_t n,int reps)3396 mpz_probab_prime_p (const mpz_t n, int reps)
3397 {
3398 mpz_t nm1;
3399 mpz_t q;
3400 mpz_t y;
3401 mp_bitcnt_t k;
3402 int is_prime;
3403 int j;
3404
3405 /* Note that we use the absolute value of n only, for compatibility
3406 with the real GMP. */
3407 if (mpz_even_p (n))
3408 return (mpz_cmpabs_ui (n, 2) == 0) ? 2 : 0;
3409
3410 /* Above test excludes n == 0 */
3411 assert (n->_mp_size != 0);
3412
3413 if (mpz_cmpabs_ui (n, 64) < 0)
3414 return (GMP_PRIME_MASK >> (n->_mp_d[0] >> 1)) & 2;
3415
3416 if (mpz_gcd_ui (NULL, n, GMP_PRIME_PRODUCT) != 1)
3417 return 0;
3418
3419 /* All prime factors are >= 31. */
3420 if (mpz_cmpabs_ui (n, 31*31) < 0)
3421 return 2;
3422
3423 /* Use Miller-Rabin, with a deterministic sequence of bases, a[j] =
3424 j^2 + j + 41 using Euler's polynomial. We potentially stop early,
3425 if a[j] >= n - 1. Since n >= 31*31, this can happen only if reps >
3426 30 (a[30] == 971 > 31*31 == 961). */
3427
3428 mpz_init (nm1);
3429 mpz_init (q);
3430 mpz_init (y);
3431
3432 /* Find q and k, where q is odd and n = 1 + 2**k * q. */
3433 nm1->_mp_size = mpz_abs_sub_ui (nm1, n, 1);
3434 k = mpz_scan1 (nm1, 0);
3435 mpz_tdiv_q_2exp (q, nm1, k);
3436
3437 for (j = 0, is_prime = 1; is_prime & (j < reps); j++)
3438 {
3439 mpz_set_ui (y, (unsigned long) j*j+j+41);
3440 if (mpz_cmp (y, nm1) >= 0)
3441 {
3442 /* Don't try any further bases. This "early" break does not affect
3443 the result for any reasonable reps value (<=5000 was tested) */
3444 assert (j >= 30);
3445 break;
3446 }
3447 is_prime = gmp_millerrabin (n, nm1, y, q, k);
3448 }
3449 mpz_clear (nm1);
3450 mpz_clear (q);
3451 mpz_clear (y);
3452
3453 return is_prime;
3454 }
3455
3456
3457 /* Logical operations and bit manipulation. */
3458
3459 /* Numbers are treated as if represented in two's complement (and
3460 infinitely sign extended). For a negative values we get the two's
3461 complement from -x = ~x + 1, where ~ is bitwise complement.
3462 Negation transforms
3463
3464 xxxx10...0
3465
3466 into
3467
3468 yyyy10...0
3469
3470 where yyyy is the bitwise complement of xxxx. So least significant
3471 bits, up to and including the first one bit, are unchanged, and
3472 the more significant bits are all complemented.
3473
3474 To change a bit from zero to one in a negative number, subtract the
3475 corresponding power of two from the absolute value. This can never
3476 underflow. To change a bit from one to zero, add the corresponding
3477 power of two, and this might overflow. E.g., if x = -001111, the
3478 two's complement is 110001. Clearing the least significant bit, we
3479 get two's complement 110000, and -010000. */
3480
3481 int
mpz_tstbit(const mpz_t d,mp_bitcnt_t bit_index)3482 mpz_tstbit (const mpz_t d, mp_bitcnt_t bit_index)
3483 {
3484 mp_size_t limb_index;
3485 unsigned shift;
3486 mp_size_t ds;
3487 mp_size_t dn;
3488 mp_limb_t w;
3489 int bit;
3490
3491 ds = d->_mp_size;
3492 dn = GMP_ABS (ds);
3493 limb_index = bit_index / GMP_LIMB_BITS;
3494 if (limb_index >= dn)
3495 return ds < 0;
3496
3497 shift = bit_index % GMP_LIMB_BITS;
3498 w = d->_mp_d[limb_index];
3499 bit = (w >> shift) & 1;
3500
3501 if (ds < 0)
3502 {
3503 /* d < 0. Check if any of the bits below is set: If so, our bit
3504 must be complemented. */
3505 if (shift > 0 && (w << (GMP_LIMB_BITS - shift)) > 0)
3506 return bit ^ 1;
3507 while (limb_index-- > 0)
3508 if (d->_mp_d[limb_index] > 0)
3509 return bit ^ 1;
3510 }
3511 return bit;
3512 }
3513
3514 static void
mpz_abs_add_bit(mpz_t d,mp_bitcnt_t bit_index)3515 mpz_abs_add_bit (mpz_t d, mp_bitcnt_t bit_index)
3516 {
3517 mp_size_t dn, limb_index;
3518 mp_limb_t bit;
3519 mp_ptr dp;
3520
3521 dn = GMP_ABS (d->_mp_size);
3522
3523 limb_index = bit_index / GMP_LIMB_BITS;
3524 bit = (mp_limb_t) 1 << (bit_index % GMP_LIMB_BITS);
3525
3526 if (limb_index >= dn)
3527 {
3528 mp_size_t i;
3529 /* The bit should be set outside of the end of the number.
3530 We have to increase the size of the number. */
3531 dp = MPZ_REALLOC (d, limb_index + 1);
3532
3533 dp[limb_index] = bit;
3534 for (i = dn; i < limb_index; i++)
3535 dp[i] = 0;
3536 dn = limb_index + 1;
3537 }
3538 else
3539 {
3540 mp_limb_t cy;
3541
3542 dp = d->_mp_d;
3543
3544 cy = mpn_add_1 (dp + limb_index, dp + limb_index, dn - limb_index, bit);
3545 if (cy > 0)
3546 {
3547 dp = MPZ_REALLOC (d, dn + 1);
3548 dp[dn++] = cy;
3549 }
3550 }
3551
3552 d->_mp_size = (d->_mp_size < 0) ? - dn : dn;
3553 }
3554
3555 static void
mpz_abs_sub_bit(mpz_t d,mp_bitcnt_t bit_index)3556 mpz_abs_sub_bit (mpz_t d, mp_bitcnt_t bit_index)
3557 {
3558 mp_size_t dn, limb_index;
3559 mp_ptr dp;
3560 mp_limb_t bit;
3561
3562 dn = GMP_ABS (d->_mp_size);
3563 dp = d->_mp_d;
3564
3565 limb_index = bit_index / GMP_LIMB_BITS;
3566 bit = (mp_limb_t) 1 << (bit_index % GMP_LIMB_BITS);
3567
3568 assert (limb_index < dn);
3569
3570 gmp_assert_nocarry (mpn_sub_1 (dp + limb_index, dp + limb_index,
3571 dn - limb_index, bit));
3572 dn -= (dp[dn-1] == 0);
3573 d->_mp_size = (d->_mp_size < 0) ? - dn : dn;
3574 }
3575
3576 void
mpz_setbit(mpz_t d,mp_bitcnt_t bit_index)3577 mpz_setbit (mpz_t d, mp_bitcnt_t bit_index)
3578 {
3579 if (!mpz_tstbit (d, bit_index))
3580 {
3581 if (d->_mp_size >= 0)
3582 mpz_abs_add_bit (d, bit_index);
3583 else
3584 mpz_abs_sub_bit (d, bit_index);
3585 }
3586 }
3587
3588 void
mpz_clrbit(mpz_t d,mp_bitcnt_t bit_index)3589 mpz_clrbit (mpz_t d, mp_bitcnt_t bit_index)
3590 {
3591 if (mpz_tstbit (d, bit_index))
3592 {
3593 if (d->_mp_size >= 0)
3594 mpz_abs_sub_bit (d, bit_index);
3595 else
3596 mpz_abs_add_bit (d, bit_index);
3597 }
3598 }
3599
3600 void
mpz_combit(mpz_t d,mp_bitcnt_t bit_index)3601 mpz_combit (mpz_t d, mp_bitcnt_t bit_index)
3602 {
3603 if (mpz_tstbit (d, bit_index) ^ (d->_mp_size < 0))
3604 mpz_abs_sub_bit (d, bit_index);
3605 else
3606 mpz_abs_add_bit (d, bit_index);
3607 }
3608
3609 void
mpz_com(mpz_t r,const mpz_t u)3610 mpz_com (mpz_t r, const mpz_t u)
3611 {
3612 mpz_neg (r, u);
3613 mpz_sub_ui (r, r, 1);
3614 }
3615
3616 void
mpz_and(mpz_t r,const mpz_t u,const mpz_t v)3617 mpz_and (mpz_t r, const mpz_t u, const mpz_t v)
3618 {
3619 mp_size_t un, vn, rn, i;
3620 mp_ptr up, vp, rp;
3621
3622 mp_limb_t ux, vx, rx;
3623 mp_limb_t uc, vc, rc;
3624 mp_limb_t ul, vl, rl;
3625
3626 un = GMP_ABS (u->_mp_size);
3627 vn = GMP_ABS (v->_mp_size);
3628 if (un < vn)
3629 {
3630 MPZ_SRCPTR_SWAP (u, v);
3631 MP_SIZE_T_SWAP (un, vn);
3632 }
3633 if (vn == 0)
3634 {
3635 r->_mp_size = 0;
3636 return;
3637 }
3638
3639 uc = u->_mp_size < 0;
3640 vc = v->_mp_size < 0;
3641 rc = uc & vc;
3642
3643 ux = -uc;
3644 vx = -vc;
3645 rx = -rc;
3646
3647 /* If the smaller input is positive, higher limbs don't matter. */
3648 rn = vx ? un : vn;
3649
3650 rp = MPZ_REALLOC (r, rn + rc);
3651
3652 up = u->_mp_d;
3653 vp = v->_mp_d;
3654
3655 i = 0;
3656 do
3657 {
3658 ul = (up[i] ^ ux) + uc;
3659 uc = ul < uc;
3660
3661 vl = (vp[i] ^ vx) + vc;
3662 vc = vl < vc;
3663
3664 rl = ( (ul & vl) ^ rx) + rc;
3665 rc = rl < rc;
3666 rp[i] = rl;
3667 }
3668 while (++i < vn);
3669 assert (vc == 0);
3670
3671 for (; i < rn; i++)
3672 {
3673 ul = (up[i] ^ ux) + uc;
3674 uc = ul < uc;
3675
3676 rl = ( (ul & vx) ^ rx) + rc;
3677 rc = rl < rc;
3678 rp[i] = rl;
3679 }
3680 if (rc)
3681 rp[rn++] = rc;
3682 else
3683 rn = mpn_normalized_size (rp, rn);
3684
3685 r->_mp_size = rx ? -rn : rn;
3686 }
3687
3688 void
mpz_ior(mpz_t r,const mpz_t u,const mpz_t v)3689 mpz_ior (mpz_t r, const mpz_t u, const mpz_t v)
3690 {
3691 mp_size_t un, vn, rn, i;
3692 mp_ptr up, vp, rp;
3693
3694 mp_limb_t ux, vx, rx;
3695 mp_limb_t uc, vc, rc;
3696 mp_limb_t ul, vl, rl;
3697
3698 un = GMP_ABS (u->_mp_size);
3699 vn = GMP_ABS (v->_mp_size);
3700 if (un < vn)
3701 {
3702 MPZ_SRCPTR_SWAP (u, v);
3703 MP_SIZE_T_SWAP (un, vn);
3704 }
3705 if (vn == 0)
3706 {
3707 mpz_set (r, u);
3708 return;
3709 }
3710
3711 uc = u->_mp_size < 0;
3712 vc = v->_mp_size < 0;
3713 rc = uc | vc;
3714
3715 ux = -uc;
3716 vx = -vc;
3717 rx = -rc;
3718
3719 /* If the smaller input is negative, by sign extension higher limbs
3720 don't matter. */
3721 rn = vx ? vn : un;
3722
3723 rp = MPZ_REALLOC (r, rn + rc);
3724
3725 up = u->_mp_d;
3726 vp = v->_mp_d;
3727
3728 i = 0;
3729 do
3730 {
3731 ul = (up[i] ^ ux) + uc;
3732 uc = ul < uc;
3733
3734 vl = (vp[i] ^ vx) + vc;
3735 vc = vl < vc;
3736
3737 rl = ( (ul | vl) ^ rx) + rc;
3738 rc = rl < rc;
3739 rp[i] = rl;
3740 }
3741 while (++i < vn);
3742 assert (vc == 0);
3743
3744 for (; i < rn; i++)
3745 {
3746 ul = (up[i] ^ ux) + uc;
3747 uc = ul < uc;
3748
3749 rl = ( (ul | vx) ^ rx) + rc;
3750 rc = rl < rc;
3751 rp[i] = rl;
3752 }
3753 if (rc)
3754 rp[rn++] = rc;
3755 else
3756 rn = mpn_normalized_size (rp, rn);
3757
3758 r->_mp_size = rx ? -rn : rn;
3759 }
3760
3761 void
mpz_xor(mpz_t r,const mpz_t u,const mpz_t v)3762 mpz_xor (mpz_t r, const mpz_t u, const mpz_t v)
3763 {
3764 mp_size_t un, vn, i;
3765 mp_ptr up, vp, rp;
3766
3767 mp_limb_t ux, vx, rx;
3768 mp_limb_t uc, vc, rc;
3769 mp_limb_t ul, vl, rl;
3770
3771 un = GMP_ABS (u->_mp_size);
3772 vn = GMP_ABS (v->_mp_size);
3773 if (un < vn)
3774 {
3775 MPZ_SRCPTR_SWAP (u, v);
3776 MP_SIZE_T_SWAP (un, vn);
3777 }
3778 if (vn == 0)
3779 {
3780 mpz_set (r, u);
3781 return;
3782 }
3783
3784 uc = u->_mp_size < 0;
3785 vc = v->_mp_size < 0;
3786 rc = uc ^ vc;
3787
3788 ux = -uc;
3789 vx = -vc;
3790 rx = -rc;
3791
3792 rp = MPZ_REALLOC (r, un + rc);
3793
3794 up = u->_mp_d;
3795 vp = v->_mp_d;
3796
3797 i = 0;
3798 do
3799 {
3800 ul = (up[i] ^ ux) + uc;
3801 uc = ul < uc;
3802
3803 vl = (vp[i] ^ vx) + vc;
3804 vc = vl < vc;
3805
3806 rl = (ul ^ vl ^ rx) + rc;
3807 rc = rl < rc;
3808 rp[i] = rl;
3809 }
3810 while (++i < vn);
3811 assert (vc == 0);
3812
3813 for (; i < un; i++)
3814 {
3815 ul = (up[i] ^ ux) + uc;
3816 uc = ul < uc;
3817
3818 rl = (ul ^ ux) + rc;
3819 rc = rl < rc;
3820 rp[i] = rl;
3821 }
3822 if (rc)
3823 rp[un++] = rc;
3824 else
3825 un = mpn_normalized_size (rp, un);
3826
3827 r->_mp_size = rx ? -un : un;
3828 }
3829
3830 static unsigned
gmp_popcount_limb(mp_limb_t x)3831 gmp_popcount_limb (mp_limb_t x)
3832 {
3833 unsigned c;
3834
3835 /* Do 16 bits at a time, to avoid limb-sized constants. */
3836 for (c = 0; x > 0; x >>= 16)
3837 {
3838 unsigned w = ((x >> 1) & 0x5555) + (x & 0x5555);
3839 w = ((w >> 2) & 0x3333) + (w & 0x3333);
3840 w = ((w >> 4) & 0x0f0f) + (w & 0x0f0f);
3841 w = (w >> 8) + (w & 0x00ff);
3842 c += w;
3843 }
3844 return c;
3845 }
3846
3847 mp_bitcnt_t
mpn_popcount(mp_srcptr p,mp_size_t n)3848 mpn_popcount (mp_srcptr p, mp_size_t n)
3849 {
3850 mp_size_t i;
3851 mp_bitcnt_t c;
3852
3853 for (c = 0, i = 0; i < n; i++)
3854 c += gmp_popcount_limb (p[i]);
3855
3856 return c;
3857 }
3858
3859 mp_bitcnt_t
mpz_popcount(const mpz_t u)3860 mpz_popcount (const mpz_t u)
3861 {
3862 mp_size_t un;
3863
3864 un = u->_mp_size;
3865
3866 if (un < 0)
3867 return ~(mp_bitcnt_t) 0;
3868
3869 return mpn_popcount (u->_mp_d, un);
3870 }
3871
3872 mp_bitcnt_t
mpz_hamdist(const mpz_t u,const mpz_t v)3873 mpz_hamdist (const mpz_t u, const mpz_t v)
3874 {
3875 mp_size_t un, vn, i;
3876 mp_limb_t uc, vc, ul, vl, comp;
3877 mp_srcptr up, vp;
3878 mp_bitcnt_t c;
3879
3880 un = u->_mp_size;
3881 vn = v->_mp_size;
3882
3883 if ( (un ^ vn) < 0)
3884 return ~(mp_bitcnt_t) 0;
3885
3886 comp = - (uc = vc = (un < 0));
3887 if (uc)
3888 {
3889 assert (vn < 0);
3890 un = -un;
3891 vn = -vn;
3892 }
3893
3894 up = u->_mp_d;
3895 vp = v->_mp_d;
3896
3897 if (un < vn)
3898 MPN_SRCPTR_SWAP (up, un, vp, vn);
3899
3900 for (i = 0, c = 0; i < vn; i++)
3901 {
3902 ul = (up[i] ^ comp) + uc;
3903 uc = ul < uc;
3904
3905 vl = (vp[i] ^ comp) + vc;
3906 vc = vl < vc;
3907
3908 c += gmp_popcount_limb (ul ^ vl);
3909 }
3910 assert (vc == 0);
3911
3912 for (; i < un; i++)
3913 {
3914 ul = (up[i] ^ comp) + uc;
3915 uc = ul < uc;
3916
3917 c += gmp_popcount_limb (ul ^ comp);
3918 }
3919
3920 return c;
3921 }
3922
3923 mp_bitcnt_t
mpz_scan1(const mpz_t u,mp_bitcnt_t starting_bit)3924 mpz_scan1 (const mpz_t u, mp_bitcnt_t starting_bit)
3925 {
3926 mp_ptr up;
3927 mp_size_t us, un, i;
3928 mp_limb_t limb, ux;
3929
3930 us = u->_mp_size;
3931 un = GMP_ABS (us);
3932 i = starting_bit / GMP_LIMB_BITS;
3933
3934 /* Past the end there's no 1 bits for u>=0, or an immediate 1 bit
3935 for u<0. Notice this test picks up any u==0 too. */
3936 if (i >= un)
3937 return (us >= 0 ? ~(mp_bitcnt_t) 0 : starting_bit);
3938
3939 up = u->_mp_d;
3940 ux = 0;
3941 limb = up[i];
3942
3943 if (starting_bit != 0)
3944 {
3945 if (us < 0)
3946 {
3947 ux = mpn_zero_p (up, i);
3948 limb = ~ limb + ux;
3949 ux = - (mp_limb_t) (limb >= ux);
3950 }
3951
3952 /* Mask to 0 all bits before starting_bit, thus ignoring them. */
3953 limb &= (GMP_LIMB_MAX << (starting_bit % GMP_LIMB_BITS));
3954 }
3955
3956 return mpn_common_scan (limb, i, up, un, ux);
3957 }
3958
3959 mp_bitcnt_t
mpz_scan0(const mpz_t u,mp_bitcnt_t starting_bit)3960 mpz_scan0 (const mpz_t u, mp_bitcnt_t starting_bit)
3961 {
3962 mp_ptr up;
3963 mp_size_t us, un, i;
3964 mp_limb_t limb, ux;
3965
3966 us = u->_mp_size;
3967 ux = - (mp_limb_t) (us >= 0);
3968 un = GMP_ABS (us);
3969 i = starting_bit / GMP_LIMB_BITS;
3970
3971 /* When past end, there's an immediate 0 bit for u>=0, or no 0 bits for
3972 u<0. Notice this test picks up all cases of u==0 too. */
3973 if (i >= un)
3974 return (ux ? starting_bit : ~(mp_bitcnt_t) 0);
3975
3976 up = u->_mp_d;
3977 limb = up[i] ^ ux;
3978
3979 if (ux == 0)
3980 limb -= mpn_zero_p (up, i); /* limb = ~(~limb + zero_p) */
3981
3982 /* Mask all bits before starting_bit, thus ignoring them. */
3983 limb &= (GMP_LIMB_MAX << (starting_bit % GMP_LIMB_BITS));
3984
3985 return mpn_common_scan (limb, i, up, un, ux);
3986 }
3987
3988
3989 /* MPZ base conversion. */
3990
3991 size_t
mpz_sizeinbase(const mpz_t u,int base)3992 mpz_sizeinbase (const mpz_t u, int base)
3993 {
3994 mp_size_t un;
3995 mp_srcptr up;
3996 mp_ptr tp;
3997 mp_bitcnt_t bits;
3998 struct gmp_div_inverse bi;
3999 size_t ndigits;
4000
4001 assert (base >= 2);
4002 assert (base <= 36);
4003
4004 un = GMP_ABS (u->_mp_size);
4005 if (un == 0)
4006 return 1;
4007
4008 up = u->_mp_d;
4009
4010 bits = (un - 1) * GMP_LIMB_BITS + mpn_limb_size_in_base_2 (up[un-1]);
4011 switch (base)
4012 {
4013 case 2:
4014 return bits;
4015 case 4:
4016 return (bits + 1) / 2;
4017 case 8:
4018 return (bits + 2) / 3;
4019 case 16:
4020 return (bits + 3) / 4;
4021 case 32:
4022 return (bits + 4) / 5;
4023 /* FIXME: Do something more clever for the common case of base
4024 10. */
4025 }
4026
4027 tp = gmp_xalloc_limbs (un);
4028 mpn_copyi (tp, up, un);
4029 mpn_div_qr_1_invert (&bi, base);
4030
4031 ndigits = 0;
4032 do
4033 {
4034 ndigits++;
4035 mpn_div_qr_1_preinv (tp, tp, un, &bi);
4036 un -= (tp[un-1] == 0);
4037 }
4038 while (un > 0);
4039
4040 gmp_free (tp);
4041 return ndigits;
4042 }
4043
4044 char *
mpz_get_str(char * sp,int base,const mpz_t u)4045 mpz_get_str (char *sp, int base, const mpz_t u)
4046 {
4047 unsigned bits;
4048 const char *digits;
4049 mp_size_t un;
4050 size_t i, sn;
4051
4052 if (base >= 0)
4053 {
4054 digits = "0123456789abcdefghijklmnopqrstuvwxyz";
4055 }
4056 else
4057 {
4058 base = -base;
4059 digits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ";
4060 }
4061 if (base <= 1)
4062 base = 10;
4063 if (base > 36)
4064 return NULL;
4065
4066 sn = 1 + mpz_sizeinbase (u, base);
4067 if (!sp)
4068 sp = gmp_xalloc (1 + sn);
4069
4070 un = GMP_ABS (u->_mp_size);
4071
4072 if (un == 0)
4073 {
4074 sp[0] = '0';
4075 sp[1] = '\0';
4076 return sp;
4077 }
4078
4079 i = 0;
4080
4081 if (u->_mp_size < 0)
4082 sp[i++] = '-';
4083
4084 bits = mpn_base_power_of_two_p (base);
4085
4086 if (bits)
4087 /* Not modified in this case. */
4088 sn = i + mpn_get_str_bits ((unsigned char *) sp + i, bits, u->_mp_d, un);
4089 else
4090 {
4091 struct mpn_base_info info;
4092 mp_ptr tp;
4093
4094 mpn_get_base_info (&info, base);
4095 tp = gmp_xalloc_limbs (un);
4096 mpn_copyi (tp, u->_mp_d, un);
4097
4098 sn = i + mpn_get_str_other ((unsigned char *) sp + i, base, &info, tp, un);
4099 gmp_free (tp);
4100 }
4101
4102 for (; i < sn; i++)
4103 sp[i] = digits[(unsigned char) sp[i]];
4104
4105 sp[sn] = '\0';
4106 return sp;
4107 }
4108
4109 int
mpz_set_str(mpz_t r,const char * sp,int base)4110 mpz_set_str (mpz_t r, const char *sp, int base)
4111 {
4112 unsigned bits;
4113 mp_size_t rn, alloc;
4114 mp_ptr rp;
4115 size_t sn;
4116 int sign;
4117 unsigned char *dp;
4118
4119 assert (base == 0 || (base >= 2 && base <= 36));
4120
4121 while (isspace( (unsigned char) *sp))
4122 sp++;
4123
4124 sign = (*sp == '-');
4125 sp += sign;
4126
4127 if (base == 0)
4128 {
4129 if (*sp == '0')
4130 {
4131 sp++;
4132 if (*sp == 'x' || *sp == 'X')
4133 {
4134 base = 16;
4135 sp++;
4136 }
4137 else if (*sp == 'b' || *sp == 'B')
4138 {
4139 base = 2;
4140 sp++;
4141 }
4142 else
4143 base = 8;
4144 }
4145 else
4146 base = 10;
4147 }
4148
4149 sn = strlen (sp);
4150 dp = gmp_xalloc (sn + (sn == 0));
4151
4152 for (sn = 0; *sp; sp++)
4153 {
4154 unsigned digit;
4155
4156 if (isspace ((unsigned char) *sp))
4157 continue;
4158 if (*sp >= '0' && *sp <= '9')
4159 digit = *sp - '0';
4160 else if (*sp >= 'a' && *sp <= 'z')
4161 digit = *sp - 'a' + 10;
4162 else if (*sp >= 'A' && *sp <= 'Z')
4163 digit = *sp - 'A' + 10;
4164 else
4165 digit = base; /* fail */
4166
4167 if (digit >= base)
4168 {
4169 gmp_free (dp);
4170 r->_mp_size = 0;
4171 return -1;
4172 }
4173
4174 dp[sn++] = digit;
4175 }
4176
4177 bits = mpn_base_power_of_two_p (base);
4178
4179 if (bits > 0)
4180 {
4181 alloc = (sn * bits + GMP_LIMB_BITS - 1) / GMP_LIMB_BITS;
4182 rp = MPZ_REALLOC (r, alloc);
4183 rn = mpn_set_str_bits (rp, dp, sn, bits);
4184 }
4185 else
4186 {
4187 struct mpn_base_info info;
4188 mpn_get_base_info (&info, base);
4189 alloc = (sn + info.exp - 1) / info.exp;
4190 rp = MPZ_REALLOC (r, alloc);
4191 rn = mpn_set_str_other (rp, dp, sn, base, &info);
4192 }
4193 assert (rn <= alloc);
4194 gmp_free (dp);
4195
4196 r->_mp_size = sign ? - rn : rn;
4197
4198 return 0;
4199 }
4200
4201 int
mpz_init_set_str(mpz_t r,const char * sp,int base)4202 mpz_init_set_str (mpz_t r, const char *sp, int base)
4203 {
4204 mpz_init (r);
4205 return mpz_set_str (r, sp, base);
4206 }
4207
4208 size_t
mpz_out_str(FILE * stream,int base,const mpz_t x)4209 mpz_out_str (FILE *stream, int base, const mpz_t x)
4210 {
4211 char *str;
4212 size_t len;
4213
4214 str = mpz_get_str (NULL, base, x);
4215 len = strlen (str);
4216 len = fwrite (str, 1, len, stream);
4217 gmp_free (str);
4218 return len;
4219 }
4220
4221
4222 static int
gmp_detect_endian(void)4223 gmp_detect_endian (void)
4224 {
4225 static const int i = 2;
4226 const unsigned char *p = (const unsigned char *) &i;
4227 return 1 - *p;
4228 }
4229
4230 /* Import and export. Does not support nails. */
4231 void
mpz_import(mpz_t r,size_t count,int order,size_t size,int endian,size_t nails,const void * src)4232 mpz_import (mpz_t r, size_t count, int order, size_t size, int endian,
4233 size_t nails, const void *src)
4234 {
4235 const unsigned char *p;
4236 ptrdiff_t word_step;
4237 mp_ptr rp;
4238 mp_size_t rn;
4239
4240 /* The current (partial) limb. */
4241 mp_limb_t limb;
4242 /* The number of bytes already copied to this limb (starting from
4243 the low end). */
4244 size_t bytes;
4245 /* The index where the limb should be stored, when completed. */
4246 mp_size_t i;
4247
4248 if (nails != 0)
4249 gmp_die ("mpz_import: Nails not supported.");
4250
4251 assert (order == 1 || order == -1);
4252 assert (endian >= -1 && endian <= 1);
4253
4254 if (endian == 0)
4255 endian = gmp_detect_endian ();
4256
4257 p = (unsigned char *) src;
4258
4259 word_step = (order != endian) ? 2 * size : 0;
4260
4261 /* Process bytes from the least significant end, so point p at the
4262 least significant word. */
4263 if (order == 1)
4264 {
4265 p += size * (count - 1);
4266 word_step = - word_step;
4267 }
4268
4269 /* And at least significant byte of that word. */
4270 if (endian == 1)
4271 p += (size - 1);
4272
4273 rn = (size * count + sizeof(mp_limb_t) - 1) / sizeof(mp_limb_t);
4274 rp = MPZ_REALLOC (r, rn);
4275
4276 for (limb = 0, bytes = 0, i = 0; count > 0; count--, p += word_step)
4277 {
4278 size_t j;
4279 for (j = 0; j < size; j++, p -= (ptrdiff_t) endian)
4280 {
4281 limb |= (mp_limb_t) *p << (bytes++ * CHAR_BIT);
4282 if (bytes == sizeof(mp_limb_t))
4283 {
4284 rp[i++] = limb;
4285 bytes = 0;
4286 limb = 0;
4287 }
4288 }
4289 }
4290 assert (i + (bytes > 0) == rn);
4291 if (limb != 0)
4292 rp[i++] = limb;
4293 else
4294 i = mpn_normalized_size (rp, i);
4295
4296 r->_mp_size = i;
4297 }
4298
4299 void *
mpz_export(void * r,size_t * countp,int order,size_t size,int endian,size_t nails,const mpz_t u)4300 mpz_export (void *r, size_t *countp, int order, size_t size, int endian,
4301 size_t nails, const mpz_t u)
4302 {
4303 size_t count;
4304 mp_size_t un;
4305
4306 if (nails != 0)
4307 gmp_die ("mpz_import: Nails not supported.");
4308
4309 assert (order == 1 || order == -1);
4310 assert (endian >= -1 && endian <= 1);
4311 assert (size > 0 || u->_mp_size == 0);
4312
4313 un = u->_mp_size;
4314 count = 0;
4315 if (un != 0)
4316 {
4317 size_t k;
4318 unsigned char *p;
4319 ptrdiff_t word_step;
4320 /* The current (partial) limb. */
4321 mp_limb_t limb;
4322 /* The number of bytes left to to in this limb. */
4323 size_t bytes;
4324 /* The index where the limb was read. */
4325 mp_size_t i;
4326
4327 un = GMP_ABS (un);
4328
4329 /* Count bytes in top limb. */
4330 limb = u->_mp_d[un-1];
4331 assert (limb != 0);
4332
4333 k = 0;
4334 do {
4335 k++; limb >>= CHAR_BIT;
4336 } while (limb != 0);
4337
4338 count = (k + (un-1) * sizeof (mp_limb_t) + size - 1) / size;
4339
4340 if (!r)
4341 r = gmp_xalloc (count * size);
4342
4343 if (endian == 0)
4344 endian = gmp_detect_endian ();
4345
4346 p = (unsigned char *) r;
4347
4348 word_step = (order != endian) ? 2 * size : 0;
4349
4350 /* Process bytes from the least significant end, so point p at the
4351 least significant word. */
4352 if (order == 1)
4353 {
4354 p += size * (count - 1);
4355 word_step = - word_step;
4356 }
4357
4358 /* And at least significant byte of that word. */
4359 if (endian == 1)
4360 p += (size - 1);
4361
4362 for (bytes = 0, i = 0, k = 0; k < count; k++, p += word_step)
4363 {
4364 size_t j;
4365 for (j = 0; j < size; j++, p -= (ptrdiff_t) endian)
4366 {
4367 if (bytes == 0)
4368 {
4369 if (i < un)
4370 limb = u->_mp_d[i++];
4371 bytes = sizeof (mp_limb_t);
4372 }
4373 *p = limb;
4374 limb >>= CHAR_BIT;
4375 bytes--;
4376 }
4377 }
4378 assert (i == un);
4379 assert (k == count);
4380 }
4381
4382 if (countp)
4383 *countp = count;
4384
4385 return r;
4386 }
4387