1 /* Include file for internal GNU MP types and definitions.
2 
3    THE CONTENTS OF THIS FILE ARE FOR INTERNAL USE AND ARE ALMOST CERTAIN TO
4    BE SUBJECT TO INCOMPATIBLE CHANGES IN FUTURE GNU MP RELEASES.
5 
6 Copyright 1991, 1993-1997, 1999, 2000-2015 Free Software Foundation, Inc.
7 
8 Copyright 2009, 2013 William Hart
9 
10 
11 This file is part of the GNU MP Library.
12 
13 The GNU MP Library is free software; you can redistribute it and/or modify
14 it under the terms of the GNU Lesser General Public License as published by
15 the Free Software Foundation; either version 3 of the License, or (at your
16 option) any later version.
17 
18 The GNU MP Library is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
20 or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
21 License for more details.
22 
23 You should have received a copy of the GNU Lesser General Public License
24 along with the GNU MP Library.  If not, see http://www.gnu.org/licenses/.  */
25 
26 
27 
28 /* __GMP_DECLSPEC must be given on any global data that will be accessed
29    from outside libmpir, meaning from the test or development programs, or
30    from libmpirxx.  Failing to do this will result in an incorrect address
31    being used for the accesses.  On functions __GMP_DECLSPEC makes calls
32    from outside libmpir more efficient, but they'll still work fine without
33    it.  */
34 
35 
36 #ifndef __GMP_IMPL_H__
37 #define __GMP_IMPL_H__
38 
39 /* limits.h is not used in general, since it's an ANSI-ism, and since on
40    solaris gcc 2.95 under -mcpu=ultrasparc in ABI=32 ends up getting wrong
41    values (the ABI=64 values).
42 
43    On Cray vector systems, however, we need the system limits.h since sizes
44    of signed and unsigned types can differ there, depending on compiler
45    options (eg. -hnofastmd), making our SHRT_MAX etc expressions fail.  For
46    reference, int can be 46 or 64 bits, whereas uint is always 64 bits; and
47    short can be 24, 32, 46 or 64 bits, and different for ushort.  */
48 
49 #if defined _WIN64
50 #include <limits.h>
51 #endif
52 
53 /* For fat.h and other fat binary stuff.
54    No need for __GMP_ATTRIBUTE_PURE or __GMP_NOTHROW, since functions
55    declared this way are only used to set function pointers in __gmp_cpuvec,
56    they're not called directly.  */
57 #define DECL_add_err1_n(name) \
58   mp_limb_t name __GMP_PROTO ((mp_ptr, mp_srcptr, mp_srcptr, mp_ptr, mp_srcptr, mp_size_t, mp_limb_t))
59 #define DECL_add_err2_n(name) \
60   mp_limb_t name __GMP_PROTO ((mp_ptr, mp_srcptr, mp_srcptr, mp_ptr, mp_srcptr, mp_srcptr, mp_size_t, mp_limb_t))
61 #define DECL_add_n(name) \
62   mp_limb_t name __GMP_PROTO ((mp_ptr, mp_srcptr, mp_srcptr, mp_size_t))
63 #define DECL_addmul_1(name) \
64   mp_limb_t name __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_limb_t))
65 #define DECL_copyd(name) \
66   void name __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t))
67 #define DECL_copyi(name) \
68   void name __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t))
69 #define DECL_divexact_1(name) \
70   void name __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_limb_t))
71 #define DECL_divexact_by3c(name) \
72   mp_limb_t name __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_limb_t))
73 #define DECL_divexact_byfobm1(name) \
74   mp_limb_t name __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_limb_t,mp_limb_t))
75 #define DECL_divrem_1(name) \
76   mp_limb_t name __GMP_PROTO ((mp_ptr, mp_size_t, mp_srcptr, mp_size_t, mp_limb_t))
77 #define DECL_divrem_2(name) \
78   mp_limb_t name __GMP_PROTO ((mp_ptr, mp_size_t, mp_ptr, mp_size_t, mp_srcptr))
79 #define DECL_divrem_euclidean_qr_1(name) \
80   mp_limb_t name __GMP_PROTO ((mp_ptr, mp_size_t, mp_srcptr, mp_size_t, mp_limb_t))
81 #define DECL_divrem_euclidean_qr_2(name) \
82   mp_limb_t name __GMP_PROTO ((mp_ptr, mp_ptr, mp_size_t, mp_srcptr))
83 #define DECL_gcd_1(name) \
84   mp_limb_t name __GMP_PROTO ((mp_srcptr, mp_size_t, mp_limb_t))
85 #define DECL_lshift(name) \
86   mp_limb_t name __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, unsigned))
87 #define DECL_mod_1(name) \
88   mp_limb_t name __GMP_PROTO ((mp_srcptr, mp_size_t, mp_limb_t))
89 #define DECL_mod_34lsub1(name) \
90   mp_limb_t name __GMP_PROTO ((mp_srcptr, mp_size_t))
91 #define DECL_modexact_1c_odd(name) \
92   mp_limb_t name __GMP_PROTO ((mp_srcptr, mp_size_t, mp_limb_t, mp_limb_t))
93 #define DECL_mul_1(name) \
94   DECL_addmul_1 (name)
95 #define DECL_mul_basecase(name) \
96   void name __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t))
97 #define DECL_mulmid_basecase(name) \
98   void name __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t))
99 #define DECL_preinv_divrem_1(name) \
100   mp_limb_t name __GMP_PROTO ((mp_ptr, mp_size_t, mp_srcptr, mp_size_t, mp_limb_t, mp_limb_t, int))
101 #define DECL_preinv_mod_1(name) \
102   mp_limb_t name __GMP_PROTO ((mp_srcptr, mp_size_t, mp_limb_t, mp_limb_t))
103 #define DECL_redc_1(name) \
104   void name __GMP_PROTO ((mp_ptr, mp_ptr, mp_srcptr, mp_size_t, mp_limb_t))
105 #define DECL_rshift(name) \
106   DECL_lshift (name)
107 #define DECL_sqr_basecase(name) \
108   void name __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t))
109 #define DECL_sub_err1_n(name) \
110   mp_limb_t name __GMP_PROTO ((mp_ptr, mp_srcptr, mp_srcptr, mp_ptr, mp_srcptr, mp_size_t, mp_limb_t))
111 #define DECL_sub_err2_n(name) \
112   mp_limb_t name __GMP_PROTO ((mp_ptr, mp_srcptr, mp_srcptr, mp_ptr, mp_srcptr, mp_srcptr, mp_size_t, mp_limb_t))
113 #define DECL_sub_n(name) \
114   DECL_add_n (name)
115 #define DECL_submul_1(name) \
116   DECL_addmul_1 (name)
117 #define DECL_sumdiff_n(name) \
118   mp_limb_t name __GMP_PROTO ((mp_ptr, mp_ptr, mp_srcptr,mp_srcptr,mp_size_t))
119 #define DECL_nsumdiff_n(name) \
120   DECL_sumdiff_n(name)
121 
122 #if ! __GMP_WITHIN_CONFIGURE
123 #include "config.h"
124 #include "gmp-mparam.h"
125 
126 /* These constants are generated by gen-fib.c header limbbits nailbits */
127 #if GMP_NUMB_BITS == 32
128 #define FIB_TABLE_LIMIT         47
129 #define FIB_TABLE_LUCNUM_LIMIT  46
130 #endif /* 32 bits */
131 #if GMP_NUMB_BITS == 64
132 #define FIB_TABLE_LIMIT         93
133 #define FIB_TABLE_LUCNUM_LIMIT  92
134 #endif /* 64 bits */
135 
136 /* This constants are generated by gen-bases.c header limbbits nailbits */
137 #if GMP_NUMB_BITS == 32
138 #define MP_BASES_CHARS_PER_LIMB_10      9
139 #define MP_BASES_BIG_BASE_10            CNST_LIMB(0x3b9aca00)
140 #define MP_BASES_BIG_BASE_INVERTED_10   CNST_LIMB(0x12e0be82)
141 #define MP_BASES_NORMALIZATION_STEPS_10 2
142 #endif /* 32 bits */
143 #if GMP_NUMB_BITS == 64
144 #define MP_BASES_CHARS_PER_LIMB_10      19
145 #define MP_BASES_BIG_BASE_10            CNST_LIMB(0x8ac7230489e80000)
146 #define MP_BASES_BIG_BASE_INVERTED_10   CNST_LIMB(0xd83c94fb6d2ac34a)
147 #define MP_BASES_NORMALIZATION_STEPS_10 0
148 #endif /* 64 bits */
149 
150 #if defined _LONG_LONG_LIMB
151 #if __GMP_HAVE_TOKEN_PASTE
152 #define CNST_LIMB(C) ((mp_limb_t) C##LL)
153 #else
154 #define CNST_LIMB(C) ((mp_limb_t) C/**/LL)
155 #endif
156 #else /* not _LONG_LONG_LIMB */
157 #if __GMP_HAVE_TOKEN_PASTE
158 #define CNST_LIMB(C) ((mp_limb_t) C##L)
159 #else
160 #define CNST_LIMB(C) ((mp_limb_t) C/**/L)
161 #endif
162 #endif /* _LONG_LONG_LIMB */
163 
164 /* This constants and defines are generated by gen-psqr limbbits nailbits */
165 #if GMP_LIMB_BITS == 32 && GMP_NAIL_BITS == 0
166 /* Non-zero bit indicates a quadratic residue mod 0x100.
167    This test identifies 82.81% as non-squares (212/256). */
168 static const mp_limb_t
169 sq_res_0x100[8] = {
170   CNST_LIMB(0x2030213),
171   CNST_LIMB(0x2020212),
172   CNST_LIMB(0x2020213),
173   CNST_LIMB(0x2020212),
174   CNST_LIMB(0x2030212),
175   CNST_LIMB(0x2020212),
176   CNST_LIMB(0x2020212),
177   CNST_LIMB(0x2020212),
178 };
179 
180 /* 2^24-1 = 3^2 * 5 * 7 * 13 * 17 ... */
181 #define PERFSQR_MOD_BITS  25
182 
183 /* This test identifies 95.66% as non-squares. */
184 #define PERFSQR_MOD_TEST(up, usize) \
185   do {                              \
186     mp_limb_t  r;                   \
187     PERFSQR_MOD_34 (r, up, usize);  \
188                                     \
189     /* 73.33% */                    \
190     PERFSQR_MOD_2 (r, CNST_LIMB(45), CNST_LIMB(0xfa4fa5), \
191                    CNST_LIMB(0x920), CNST_LIMB(0x1a442481)); \
192                                     \
193     /* 47.06% */                    \
194     PERFSQR_MOD_1 (r, CNST_LIMB(17), CNST_LIMB(0xf0f0f1), \
195                    CNST_LIMB(0x1a317)); \
196                                     \
197     /* 46.15% */                    \
198     PERFSQR_MOD_1 (r, CNST_LIMB(13), CNST_LIMB(0xec4ec5), \
199                    CNST_LIMB(0x9e5)); \
200                                     \
201     /* 42.86% */                    \
202     PERFSQR_MOD_1 (r, CNST_LIMB( 7), CNST_LIMB(0xdb6db7), \
203                    CNST_LIMB(0x69)); \
204   } while (0)
205 
206 /* Grand total sq_res_0x100 and PERFSQR_MOD_TEST, 99.25% non-squares. */
207 
208 /* helper for tests/mpz/t-perfsqr.c */
209 #define PERFSQR_DIVISORS  { 256, 45, 17, 13, 7, }
210 
211 #elif GMP_LIMB_BITS == 64 && GMP_NAIL_BITS == 0
212 
213 /* Non-zero bit indicates a quadratic residue mod 0x100.
214    This test identifies 82.81% as non-squares (212/256). */
215 static const mp_limb_t
216 sq_res_0x100[4] = {
217   CNST_LIMB(0x202021202030213),
218   CNST_LIMB(0x202021202020213),
219   CNST_LIMB(0x202021202030212),
220   CNST_LIMB(0x202021202020212),
221 };
222 
223 /* 2^48-1 = 3^2 * 5 * 7 * 13 * 17 * 97 ... */
224 #define PERFSQR_MOD_BITS  49
225 
226 /* This test identifies 97.81% as non-squares. */
227 #define PERFSQR_MOD_TEST(up, usize) \
228   do {                              \
229     mp_limb_t  r;                   \
230     PERFSQR_MOD_34 (r, up, usize);  \
231                                     \
232     /* 69.23% */                    \
233     PERFSQR_MOD_2 (r, CNST_LIMB(91), CNST_LIMB(0xfd2fd2fd2fd3), \
234                    CNST_LIMB(0x2191240), CNST_LIMB(0x8850a206953820e1)); \
235                                     \
236     /* 68.24% */                    \
237     PERFSQR_MOD_2 (r, CNST_LIMB(85), CNST_LIMB(0xfcfcfcfcfcfd), \
238                    CNST_LIMB(0x82158), CNST_LIMB(0x10b48c4b4206a105)); \
239                                     \
240     /* 55.56% */                    \
241     PERFSQR_MOD_1 (r, CNST_LIMB( 9), CNST_LIMB(0xe38e38e38e39), \
242                    CNST_LIMB(0x93)); \
243                                     \
244     /* 49.48% */                    \
245     PERFSQR_MOD_2 (r, CNST_LIMB(97), CNST_LIMB(0xfd5c5f02a3a1), \
246                    CNST_LIMB(0x1eb628b47), CNST_LIMB(0x6067981b8b451b5f)); \
247   } while (0)
248 
249 /* Grand total sq_res_0x100 and PERFSQR_MOD_TEST, 99.62% non-squares. */
250 
251 /* helper for tests/mpz/t-perfsqr.c */
252 #define PERFSQR_DIVISORS  { 256, 91, 85, 9, 97, }
253 
254 #else
255 #error no data available for this limb size in perfsqr.h
256 #endif
257 
258 #if WANT_FAT_BINARY
259 #include "fat.h"
260 #endif
261 #endif
262 
263 #if HAVE_INTTYPES_H      /* for uint_least32_t */
264 # include <inttypes.h>
265 #else
266 # ifdef HAVE_STDINT_H
267 #  include <stdint.h>
268 # endif
269 #endif
270 
271 #ifdef __cplusplus
272 #include <cstring>  /* for strlen */
273 #include <string>   /* for std::string */
274 #endif
275 
276 
277 #ifndef WANT_TMP_DEBUG  /* for TMP_ALLOC_LIMBS_2 and others */
278 #define WANT_TMP_DEBUG 0
279 #endif
280 
281 
282 /* Might search and replace _PROTO to __GMP_PROTO internally one day, to
283    avoid two names for one thing, but no hurry for that.  */
284 #define _PROTO(x)  __GMP_PROTO(x)
285 
286 /* The following tries to get a good version of alloca.  The tests are
287    adapted from autoconf AC_FUNC_ALLOCA, with a couple of additions.
288    Whether this succeeds is tested by GMP_FUNC_ALLOCA and HAVE_ALLOCA will
289    be setup appropriately.
290 
291    ifndef alloca - a cpp define might already exist.
292        glibc <stdlib.h> includes <alloca.h> which uses GCC __builtin_alloca.
293        HP cc +Olibcalls adds a #define of alloca to __builtin_alloca.
294 
295    GCC __builtin_alloca - preferred whenever available.
296 
297    _AIX pragma - IBM compilers need a #pragma in "each module that needs to
298        use alloca".  Pragma indented to protect pre-ANSI cpp's.  _IBMR2 was
299        used in past versions of GMP, retained still in case it matters.
300 
301        The autoconf manual says this pragma needs to be at the start of a C
302        file, apart from comments and preprocessor directives.  Is that true?
303        xlc on aix 4.xxx doesn't seem to mind it being after prototypes etc
304        from mpir.h.
305 */
306 
307 #ifndef alloca
308 # ifdef __GNUC__
309 #  define alloca __builtin_alloca
310 # else
311 #  ifdef __DECC
312 #   define alloca(x) __ALLOCA(x)
313 #  else
314 #   ifdef _MSC_VER
315 #    include <malloc.h>
316 #    define alloca _alloca
317 #   else
318 #    if HAVE_ALLOCA_H
319 #     include <alloca.h>
320 #    else
321 #     if defined (_AIX) || defined (_IBMR2)
322  #pragma alloca
323 #     else
324        char *alloca ();
325 #     endif
326 #    endif
327 #   endif
328 #  endif
329 # endif
330 #endif
331 
332 
333 /* if not provided by gmp-mparam.h */
334 #ifndef BYTES_PER_MP_LIMB
335 #define BYTES_PER_MP_LIMB  SIZEOF_MP_LIMB_T
336 #endif
337 #ifndef BITS_PER_MP_LIMB
338 #define BITS_PER_MP_LIMB  (8 * SIZEOF_MP_LIMB_T)
339 #endif
340 
341 #define BITS_PER_ULONG   (8 * SIZEOF_UNSIGNED_LONG)
342 #ifdef HAVE_STDINT_H
343 #define BITS_PER_UINTMAX (8 * SIZEOF_UINTMAX_T)
344 #endif
345 
346 /* gmp_uint_least32_t is an unsigned integer type with at least 32 bits. */
347 #if HAVE_UINT_LEAST32_T
348 typedef uint_least32_t      gmp_uint_least32_t;
349 #else
350 #if SIZEOF_UNSIGNED_SHORT >= 4
351 typedef unsigned short      gmp_uint_least32_t;
352 #else
353 #if SIZEOF_UNSIGNED >= 4
354 typedef unsigned            gmp_uint_least32_t;
355 #else
356 typedef unsigned long       gmp_uint_least32_t;
357 #endif
358 #endif
359 #endif
360 
361 /* pre-inverse types for truncating division and modulo */
362 typedef struct {mp_limb_t inv32;} gmp_pi1_t;
363 typedef struct {mp_limb_t inv21, inv32, inv53;} gmp_pi2_t;
364 
365 
366 
367 /* const and signed must match __gmp_const and __gmp_signed, so follow the
368    decision made for those in mpir.h.    */
369 #if ! __GMP_HAVE_CONST
370 #define const   /* empty */
371 #define signed  /* empty */
372 #endif
373 
374 /* "const" basically means a function does nothing but examine its arguments
375    and give a return value, it doesn't read or write any memory (neither
376    global nor pointed to by arguments), and has no other side-effects.  This
377    is more restrictive than "pure".  See info node "(gcc)Function
378    Attributes".  __GMP_NO_ATTRIBUTE_CONST_PURE lets tune/common.c etc turn
379    this off when trying to write timing loops.  */
380 #if HAVE_ATTRIBUTE_CONST && ! defined (__GMP_NO_ATTRIBUTE_CONST_PURE) && !( defined (__cplusplus) && defined (__sun))
381 #define ATTRIBUTE_CONST  __attribute__ ((const))
382 #else
383 #define ATTRIBUTE_CONST
384 #endif
385 
386 #if HAVE_ATTRIBUTE_NORETURN && !( defined (__cplusplus) && defined (__sun))
387 #define ATTRIBUTE_NORETURN  __attribute__ ((noreturn))
388 #else
389 #define ATTRIBUTE_NORETURN
390 #endif
391 
392 /* "malloc" means a function behaves like malloc in that the pointer it
393    returns doesn't alias anything.  */
394 #if HAVE_ATTRIBUTE_MALLOC && !( defined (__cplusplus) && defined (__sun))
395 #define ATTRIBUTE_MALLOC  __attribute__ ((malloc))
396 #else
397 #define ATTRIBUTE_MALLOC
398 #endif
399 
400 
401 #if ! HAVE_STRCHR
402 #define strchr(s,c)  index(s,c)
403 #endif
404 
405 #if ! HAVE_MEMSET
406 #define memset(p, c, n)                 \
407   do {                                  \
408     ASSERT ((n) >= 0);                  \
409     char *__memset__p = (p);            \
410     int  __i;                           \
411     for (__i = 0; __i < (n); __i++)     \
412       __memset__p[__i] = (c);           \
413   } while (0)
414 #endif
415 
416 /* va_copy is standard in C99, and gcc provides __va_copy when in strict C89
417    mode.  Falling back to a memcpy will give maximum portability, since it
418    works no matter whether va_list is a pointer, struct or array.  */
419 #if ! defined (va_copy) && defined (__va_copy)
420 #define va_copy(dst,src)  __va_copy(dst,src)
421 #endif
422 #if ! defined (va_copy)
423 #define va_copy(dst,src) \
424   do { memcpy (&(dst), &(src), sizeof (va_list)); } while (0)
425 #endif
426 
427 #if defined (__cplusplus)
428 extern "C" {
429 #endif
430 
431 
432 /* Usage: TMP_DECL;
433           TMP_MARK;
434           ptr = TMP_ALLOC (bytes);
435           TMP_FREE;
436 
437    Small allocations should use TMP_SALLOC, big allocations should use
438    TMP_BALLOC.  Allocations that might be small or big should use TMP_ALLOC.
439 
440    Functions that use just TMP_SALLOC should use TMP_SDECL, TMP_SMARK, and
441    TMP_SFREE.
442 
443    TMP_DECL just declares a variable, but might be empty and so must be last
444    in a list of variables.  TMP_MARK must be done before any TMP_ALLOC.
445    TMP_ALLOC(0) is not allowed.  TMP_FREE doesn't need to be done if a
446    TMP_MARK was made, but then no TMP_ALLOCs.  */
447 
448 /* The alignment in bytes, used for TMP_ALLOCed blocks, when alloca or
449    __gmp_allocate_func doesn't already determine it.  Currently TMP_ALLOC
450    isn't used for "double"s, so that's not in the union.  */
451 union tmp_align_t {
452   mp_limb_t  l;
453   char       *p;
454 };
455 #define __TMP_ALIGN  sizeof (union tmp_align_t)
456 
457 /* Return "a" rounded upwards to a multiple of "m", if it isn't already.
458    "a" must be an unsigned type.
459    This is designed for use with a compile-time constant "m".
460    The POW2 case is expected to be usual, and gcc 3.0 and up recognises
461    "(-(8*n))%8" or the like is always zero, which means the rounding up in
462    the WANT_TMP_NOTREENTRANT version of TMP_ALLOC below will be a noop.  */
463 #define ROUND_UP_MULTIPLE(a,m)          \
464   (POW2_P(m) ? (a) + (-(a))%(m)         \
465    : (a)+(m)-1 - (((a)+(m)-1) % (m)))
466 
467 #if defined (WANT_TMP_ALLOCA) || defined (WANT_TMP_REENTRANT)
468 struct tmp_reentrant_t {
469   struct tmp_reentrant_t  *next;
470   size_t		  size;	  /* bytes, including header */
471 };
472 __GMP_DECLSPEC void *__gmp_tmp_reentrant_alloc _PROTO ((struct tmp_reentrant_t **, size_t)) ATTRIBUTE_MALLOC;
473 __GMP_DECLSPEC void  __gmp_tmp_reentrant_free _PROTO ((struct tmp_reentrant_t *));
474 #endif
475 
476 #if WANT_TMP_ALLOCA
477 #define TMP_SDECL
478 #define TMP_DECL		struct tmp_reentrant_t *__tmp_marker
479 #define TMP_SMARK
480 #define TMP_MARK		__tmp_marker = 0
481 #define TMP_SALLOC(n)		alloca(n)
482 #define TMP_BALLOC(n)		__gmp_tmp_reentrant_alloc (&__tmp_marker, n)
483 #define TMP_ALLOC(n)							\
484   (LIKELY ((n) < 65536) ? TMP_SALLOC(n) : TMP_BALLOC(n))
485 #define TMP_SFREE
486 #define TMP_FREE							   \
487   do {									   \
488     if (UNLIKELY (__tmp_marker != 0)) __gmp_tmp_reentrant_free (__tmp_marker); \
489   } while (0)
490 #endif
491 
492 #if WANT_TMP_REENTRANT
493 #define TMP_SDECL		TMP_DECL
494 #define TMP_DECL		struct tmp_reentrant_t *__tmp_marker
495 #define TMP_SMARK		TMP_MARK
496 #define TMP_MARK		__tmp_marker = 0
497 #define TMP_SALLOC(n)		TMP_ALLOC(n)
498 #define TMP_BALLOC(n)		TMP_ALLOC(n)
499 #define TMP_ALLOC(n)		__gmp_tmp_reentrant_alloc (&__tmp_marker, n)
500 #define TMP_SFREE		TMP_FREE
501 #define TMP_FREE		__gmp_tmp_reentrant_free (__tmp_marker)
502 #endif
503 
504 #if WANT_TMP_NOTREENTRANT
505 struct tmp_marker
506 {
507   struct tmp_stack *which_chunk;
508   void *alloc_point;
509 };
510 __GMP_DECLSPEC void *__gmp_tmp_alloc _PROTO ((unsigned long)) ATTRIBUTE_MALLOC;
511 __GMP_DECLSPEC void __gmp_tmp_mark _PROTO ((struct tmp_marker *));
512 __GMP_DECLSPEC void __gmp_tmp_free _PROTO ((struct tmp_marker *));
513 #define TMP_SDECL		TMP_DECL
514 #define TMP_DECL		struct tmp_marker __tmp_marker
515 #define TMP_SMARK		TMP_MARK
516 #define TMP_MARK		__gmp_tmp_mark (&__tmp_marker)
517 #define TMP_SALLOC(n)		TMP_ALLOC(n)
518 #define TMP_BALLOC(n)		TMP_ALLOC(n)
519 #define TMP_ALLOC(n)							\
520   __gmp_tmp_alloc (ROUND_UP_MULTIPLE ((unsigned long) (n), __TMP_ALIGN))
521 #define TMP_SFREE		TMP_FREE
522 #define TMP_FREE		__gmp_tmp_free (&__tmp_marker)
523 #endif
524 
525 #if WANT_TMP_DEBUG
526 /* See tal-debug.c for some comments. */
527 struct tmp_debug_t {
528   struct tmp_debug_entry_t  *list;
529   const char                *file;
530   int                       line;
531 };
532 struct tmp_debug_entry_t {
533   struct tmp_debug_entry_t  *next;
534   char                      *block;
535   size_t                    size;
536 };
537 __GMP_DECLSPEC void  __gmp_tmp_debug_mark  _PROTO ((const char *, int, struct tmp_debug_t **,
538                                      struct tmp_debug_t *,
539                                      const char *, const char *));
540 __GMP_DECLSPEC void *__gmp_tmp_debug_alloc _PROTO ((const char *, int, int,
541                                      struct tmp_debug_t **, const char *,
542                                      size_t)) ATTRIBUTE_MALLOC;
543 __GMP_DECLSPEC void  __gmp_tmp_debug_free  _PROTO ((const char *, int, int,
544                                      struct tmp_debug_t **,
545                                      const char *, const char *));
546 #define TMP_SDECL TMP_DECL_NAME(__tmp_xmarker, "__tmp_marker")
547 #define TMP_DECL TMP_DECL_NAME(__tmp_xmarker, "__tmp_marker")
548 #define TMP_SMARK TMP_MARK_NAME(__tmp_xmarker, "__tmp_marker")
549 #define TMP_MARK TMP_MARK_NAME(__tmp_xmarker, "__tmp_marker")
550 #define TMP_SFREE TMP_FREE_NAME(__tmp_xmarker, "__tmp_marker")
551 #define TMP_FREE TMP_FREE_NAME(__tmp_xmarker, "__tmp_marker")
552 /* The marker variable is designed to provoke an uninitialized varialble
553    warning from the compiler if TMP_FREE is used without a TMP_MARK.
554    __tmp_marker_inscope does the same for TMP_ALLOC.  Runtime tests pick
555    these things up too.  */
556 #define TMP_DECL_NAME(marker, marker_name)                      \
557   int marker;                                                   \
558   int __tmp_marker_inscope;                                     \
559   const char *__tmp_marker_name = marker_name;                  \
560   struct tmp_debug_t  __tmp_marker_struct;                      \
561   /* don't demand NULL, just cast a zero */                     \
562   struct tmp_debug_t  *__tmp_marker = (struct tmp_debug_t *) 0
563 #define TMP_MARK_NAME(marker, marker_name)                      \
564   do {                                                          \
565     marker = 1;                                                 \
566     __tmp_marker_inscope = 1;                                   \
567     __gmp_tmp_debug_mark  (ASSERT_FILE, ASSERT_LINE,            \
568                            &__tmp_marker, &__tmp_marker_struct, \
569                            __tmp_marker_name, marker_name);     \
570   } while (0)
571 #define TMP_SALLOC(n)		TMP_ALLOC(n)
572 #define TMP_BALLOC(n)		TMP_ALLOC(n)
573 #define TMP_ALLOC(size)                                                 \
574   __gmp_tmp_debug_alloc (ASSERT_FILE, ASSERT_LINE,                      \
575                          __tmp_marker_inscope,                          \
576                          &__tmp_marker, __tmp_marker_name, size)
577 #define TMP_FREE_NAME(marker, marker_name)                      \
578   do {                                                          \
579     __gmp_tmp_debug_free  (ASSERT_FILE, ASSERT_LINE,            \
580                            marker, &__tmp_marker,               \
581                            __tmp_marker_name, marker_name);     \
582   } while (0)
583 #endif /* WANT_TMP_DEBUG */
584 
585 
586 /* Allocating various types. */
587 #define TMP_ALLOC_TYPE(n,type)  ((type *) TMP_ALLOC ((n) * sizeof (type)))
588 #define TMP_SALLOC_TYPE(n,type) ((type *) TMP_SALLOC ((n) * sizeof (type)))
589 #define TMP_BALLOC_TYPE(n,type) ((type *) TMP_BALLOC ((n) * sizeof (type)))
590 #define TMP_ALLOC_LIMBS(n)      TMP_ALLOC_TYPE(n,mp_limb_t)
591 #define TMP_SALLOC_LIMBS(n)     TMP_SALLOC_TYPE(n,mp_limb_t)
592 #define TMP_BALLOC_LIMBS(n)     TMP_BALLOC_TYPE(n,mp_limb_t)
593 #define TMP_ALLOC_MP_PTRS(n)    TMP_ALLOC_TYPE(n,mp_ptr)
594 #define TMP_SALLOC_MP_PTRS(n)   TMP_SALLOC_TYPE(n,mp_ptr)
595 #define TMP_BALLOC_MP_PTRS(n)   TMP_BALLOC_TYPE(n,mp_ptr)
596 
597 /* It's more efficient to allocate one block than two.  This is certainly
598    true of the malloc methods, but it can even be true of alloca if that
599    involves copying a chunk of stack (various RISCs), or a call to a stack
600    bounds check (mingw).  In any case, when debugging keep separate blocks
601    so a redzoning malloc debugger can protect each individually.  */
602 #define TMP_ALLOC_LIMBS_2(xp,xsize, yp,ysize)           \
603   do {                                                  \
604     if (WANT_TMP_DEBUG)                                 \
605       {                                                 \
606         (xp) = TMP_ALLOC_LIMBS (xsize);                 \
607         (yp) = TMP_ALLOC_LIMBS (ysize);                 \
608       }                                                 \
609     else                                                \
610       {                                                 \
611         (xp) = TMP_ALLOC_LIMBS ((xsize) + (ysize));     \
612         (yp) = (xp) + (xsize);                          \
613       }                                                 \
614   } while (0)
615 
616 
617 /* From mpir.h, nicer names for internal use. */
618 #define MPN_CMP(result, xp, yp, size)  __GMPN_CMP(result, xp, yp, size)
619 #define LIKELY(cond)                   __GMP_LIKELY(cond)
620 #define UNLIKELY(cond)                 __GMP_UNLIKELY(cond)
621 
622 #define ABS(x) ((x) >= 0 ? (x) : -(x))
623 #undef MIN
624 #define MIN(l,o) ((l) < (o) ? (l) : (o))
625 #undef MAX
626 #define MAX(h,i) ((h) > (i) ? (h) : (i))
627 #define numberof(x)  (sizeof (x) / sizeof ((x)[0]))
628 
629 /* Field access macros.  */
630 #define SIZ(x) ((x)->_mp_size)
631 #define ABSIZ(x) ABS (SIZ (x))
632 #define PTR(x) ((x)->_mp_d)
633 #define LIMBS(x) ((x)->_mp_d)
634 #define EXP(x) ((x)->_mp_exp)
635 #define PREC(x) ((x)->_mp_prec)
636 #define ALLOC(x) ((x)->_mp_alloc)
637 #define NUM(x) mpq_numref(x)
638 #define DEN(x) mpq_denref(x)
639 
640 /* n-1 inverts any low zeros and the lowest one bit.  If n&(n-1) leaves zero
641    then that lowest one bit must have been the only bit set.  n==0 will
642    return true though, so avoid that.  */
643 #define POW2_P(n)  (((n) & ((n) - 1)) == 0)
644 
645 /* This is intended for constant THRESHOLDs only, where the compiler
646    can completely fold the result.  */
647 #define LOG2C(n) \
648  (((n) >=    0x1) + ((n) >=    0x2) + ((n) >=    0x4) + ((n) >=    0x8) + \
649   ((n) >=   0x10) + ((n) >=   0x20) + ((n) >=   0x40) + ((n) >=   0x80) + \
650   ((n) >=  0x100) + ((n) >=  0x200) + ((n) >=  0x400) + ((n) >=  0x800) + \
651   ((n) >= 0x1000) + ((n) >= 0x2000) + ((n) >= 0x4000) + ((n) >= 0x8000))
652 
653 /* The "short" defines are a bit different because shorts are promoted to
654    ints by ~ or >> etc.
655 
656    #ifndef's are used since on some systems (HP?) header files other than
657    limits.h setup these defines.  We could forcibly #undef in that case, but
658    there seems no need to worry about that.  */
659 
660 #ifndef ULONG_MAX
661 #define ULONG_MAX   __GMP_ULONG_MAX
662 #endif
663 #ifndef UINT_MAX
664 #define UINT_MAX    __GMP_UINT_MAX
665 #endif
666 #ifndef USHRT_MAX
667 #define USHRT_MAX   __GMP_USHRT_MAX
668 #endif
669 #define MP_LIMB_T_MAX      (~ (mp_limb_t) 0)
670 
671 /* Must cast ULONG_MAX etc to unsigned long etc, since they might not be
672    unsigned on a K&R compiler.  In particular the HP-UX 10 bundled K&R cc
673    treats the plain decimal values in <limits.h> as signed.  */
674 #define ULONG_HIGHBIT      (ULONG_MAX ^ ((unsigned long) ULONG_MAX >> 1))
675 #define UINT_HIGHBIT       (UINT_MAX ^ ((unsigned) UINT_MAX >> 1))
676 #define USHRT_HIGHBIT      ((unsigned short) (USHRT_MAX ^ ((unsigned short) USHRT_MAX >> 1)))
677 #define GMP_LIMB_HIGHBIT  (MP_LIMB_T_MAX ^ (MP_LIMB_T_MAX >> 1))
678 #ifdef HAVE_STDINT_H
679 #define UINTMAX_HIGHBIT   (UINTMAX_MAX ^ (UINTMAX_MAX >> 1))
680 #endif
681 
682 #ifndef LONG_MIN
683 #define LONG_MIN           ((long) ULONG_HIGHBIT)
684 #endif
685 #ifndef LONG_MAX
686 #define LONG_MAX           (-(LONG_MIN+1))
687 #endif
688 
689 #ifndef INT_MIN
690 #define INT_MIN            ((int) UINT_HIGHBIT)
691 #endif
692 #ifndef INT_MAX
693 #define INT_MAX            (-(INT_MIN+1))
694 #endif
695 
696 #ifndef SHRT_MIN
697 #define SHRT_MIN           ((short) USHRT_HIGHBIT)
698 #endif
699 #ifndef SHRT_MAX
700 #define SHRT_MAX           ((short) (-(SHRT_MIN+1)))
701 #endif
702 
703 #if defined( _WIN64 )
704 #define MP_SIZE_T_MAX      _I64_MAX
705 #define MP_SIZE_T_MIN      _I64_MIN
706 #elif __GMP_MP_SIZE_T_INT
707 #define MP_SIZE_T_MAX      INT_MAX
708 #define MP_SIZE_T_MIN      INT_MIN
709 #else
710 #define MP_SIZE_T_MAX      LONG_MAX
711 #define MP_SIZE_T_MIN      LONG_MIN
712 #endif
713 
714 /* mp_exp_t is the same as mp_size_t */
715 #if defined( _WIN64 )
716 #define MP_EXP_T_MAX   LONG_MAX
717 #define MP_EXP_T_MIN   LONG_MIN
718 #else
719 #define MP_EXP_T_MAX   MP_SIZE_T_MAX
720 #define MP_EXP_T_MIN   MP_SIZE_T_MIN
721 #endif
722 
723 #define LONG_HIGHBIT       LONG_MIN
724 #define INT_HIGHBIT        INT_MIN
725 #define SHRT_HIGHBIT       SHRT_MIN
726 
727 
728 #define GMP_NUMB_HIGHBIT  (CNST_LIMB(1) << (GMP_NUMB_BITS-1))
729 
730 #if GMP_NAIL_BITS == 0
731 #define GMP_NAIL_LOWBIT   CNST_LIMB(0)
732 #else
733 #define GMP_NAIL_LOWBIT   (CNST_LIMB(1) << GMP_NUMB_BITS)
734 #endif
735 
736 /* Swap macros. */
737 
738 #define MP_LIMB_T_SWAP(x, y)                    \
739   do {                                          \
740     mp_limb_t __mp_limb_t_swap__tmp = (x);      \
741     (x) = (y);                                  \
742     (y) = __mp_limb_t_swap__tmp;                \
743   } while (0)
744 #define MP_SIZE_T_SWAP(x, y)                    \
745   do {                                          \
746     mp_size_t __mp_size_t_swap__tmp = (x);      \
747     (x) = (y);                                  \
748     (y) = __mp_size_t_swap__tmp;                \
749   } while (0)
750 
751 #define MP_PTR_SWAP(x, y)               \
752   do {                                  \
753     mp_ptr __mp_ptr_swap__tmp = (x);    \
754     (x) = (y);                          \
755     (y) = __mp_ptr_swap__tmp;           \
756   } while (0)
757 #define MP_SRCPTR_SWAP(x, y)                    \
758   do {                                          \
759     mp_srcptr __mp_srcptr_swap__tmp = (x);      \
760     (x) = (y);                                  \
761     (y) = __mp_srcptr_swap__tmp;                \
762   } while (0)
763 
764 #define MPN_PTR_SWAP(xp,xs, yp,ys)      \
765   do {                                  \
766     MP_PTR_SWAP (xp, yp);               \
767     MP_SIZE_T_SWAP (xs, ys);            \
768   } while(0)
769 #define MPN_SRCPTR_SWAP(xp,xs, yp,ys)   \
770   do {                                  \
771     MP_SRCPTR_SWAP (xp, yp);            \
772     MP_SIZE_T_SWAP (xs, ys);            \
773   } while(0)
774 
775 #define MPZ_PTR_SWAP(x, y)              \
776   do {                                  \
777     mpz_ptr __mpz_ptr_swap__tmp = (x);  \
778     (x) = (y);                          \
779     (y) = __mpz_ptr_swap__tmp;          \
780   } while (0)
781 #define MPZ_SRCPTR_SWAP(x, y)                   \
782   do {                                          \
783     mpz_srcptr __mpz_srcptr_swap__tmp = (x);    \
784     (x) = (y);                                  \
785     (y) = __mpz_srcptr_swap__tmp;               \
786   } while (0)
787 
788 
789 /* Enhancement: __gmp_allocate_func could have "__attribute__ ((malloc))",
790    but current gcc (3.0) doesn't seem to support that.  */
791 __GMP_DECLSPEC extern void * (*__gmp_allocate_func) __GMP_PROTO ((size_t));
792 __GMP_DECLSPEC extern void * (*__gmp_reallocate_func) __GMP_PROTO ((void *, size_t, size_t));
793 __GMP_DECLSPEC extern void   (*__gmp_free_func) __GMP_PROTO ((void *, size_t));
794 
795 __GMP_DECLSPEC void *__gmp_default_allocate _PROTO ((size_t));
796 __GMP_DECLSPEC void *__gmp_default_reallocate _PROTO ((void *, size_t, size_t));
797 __GMP_DECLSPEC void __gmp_default_free _PROTO ((void *, size_t));
798 
799 #define __GMP_ALLOCATE_FUNC_TYPE(n,type) \
800   ((type *) (*__gmp_allocate_func) ((n) * sizeof (type)))
801 #define __GMP_ALLOCATE_FUNC_LIMBS(n)   __GMP_ALLOCATE_FUNC_TYPE (n, mp_limb_t)
802 
803 #define __GMP_REALLOCATE_FUNC_TYPE(p, old_size, new_size, type) \
804   ((type *) (*__gmp_reallocate_func)                            \
805    (p, (old_size) * sizeof (type), (new_size) * sizeof (type)))
806 #define __GMP_REALLOCATE_FUNC_LIMBS(p, old_size, new_size) \
807   __GMP_REALLOCATE_FUNC_TYPE(p, old_size, new_size, mp_limb_t)
808 
809 #define __GMP_FREE_FUNC_TYPE(p,n,type) (*__gmp_free_func) (p, (n) * sizeof (type))
810 #define __GMP_FREE_FUNC_LIMBS(p,n)     __GMP_FREE_FUNC_TYPE (p, n, mp_limb_t)
811 
812 #define __GMP_REALLOCATE_FUNC_MAYBE(ptr, oldsize, newsize)      \
813   do {                                                          \
814     if ((oldsize) != (newsize))                                 \
815       (ptr) = (*__gmp_reallocate_func) (ptr, oldsize, newsize); \
816   } while (0)
817 
818 #define __GMP_REALLOCATE_FUNC_MAYBE_TYPE(ptr, oldsize, newsize, type)   \
819   do {                                                                  \
820     if ((oldsize) != (newsize))                                         \
821       (ptr) = (type *) (*__gmp_reallocate_func)                         \
822         (ptr, (oldsize) * sizeof (type), (newsize) * sizeof (type));    \
823   } while (0)
824 
825 
826 /* Dummy for non-gcc, code involving it will go dead. */
827 #if ! defined (__GNUC__) || __GNUC__ < 2
828 #define __builtin_constant_p(x)   0
829 #endif
830 
831 
832 /* In gcc 2.96 and up on i386, tail calls are optimized to jumps if the
833    stack usage is compatible.  __attribute__ ((regparm (N))) helps by
834    putting leading parameters in registers, avoiding extra stack.
835 
836    regparm cannot be used with calls going through the PLT, because the
837    binding code there may clobber the registers (%eax, %edx, %ecx) used for
838    the regparm parameters.  Calls to local (ie. static) functions could
839    still use this, if we cared to differentiate locals and globals.
840 
841    On athlon-unknown-freebsd4.9 with gcc 3.3.3, regparm cannot be used with
842    -p or -pg profiling, since that version of gcc doesn't realize the
843    .mcount calls will clobber the parameter registers.  Other systems are
844    ok, like debian with glibc 2.3.2 (mcount doesn't clobber), but we don't
845    bother to try to detect this.  regparm is only an optimization so we just
846    disable it when profiling (profiling being a slowdown anyway).  */
847 
848 #define USE_LEADING_REGPARM 0
849 
850 
851 /* Macros for altering parameter order according to regparm usage. */
852 #if USE_LEADING_REGPARM
853 #define REGPARM_2_1(a,b,x)    x,a,b
854 #define REGPARM_3_1(a,b,c,x)  x,a,b,c
855 #define REGPARM_ATTR(n) __attribute__ ((regparm (n)))
856 #else
857 #define REGPARM_2_1(a,b,x)    a,b,x
858 #define REGPARM_3_1(a,b,c,x)  a,b,c,x
859 #define REGPARM_ATTR(n)
860 #endif
861 
862 __GMP_DECLSPEC int mpir_is_likely_prime_BPSW(mp_limb_t n);
863 
864 __GMP_DECLSPEC mp_limb_t mpir_sqrt(mp_limb_t r);
865 
866 __GMP_DECLSPEC void __gmpz_aorsmul_1 _PROTO ((REGPARM_3_1 (mpz_ptr w, mpz_srcptr u, mp_limb_t v, mp_size_t sub))) REGPARM_ATTR(1);
867 #define mpz_aorsmul_1(w,u,v,sub)  __gmpz_aorsmul_1 (REGPARM_3_1 (w, u, v, sub))
868 
869 #define mpz_n_pow_ui __gmpz_n_pow_ui
870 __GMP_DECLSPEC void    mpz_n_pow_ui _PROTO ((mpz_ptr, mp_srcptr, mp_size_t, mpir_ui));
871 
872 
873 #define mpn_add_nc __MPN(add_nc)
874 __GMP_DECLSPEC mp_limb_t mpn_add_nc __GMP_PROTO ((mp_ptr, mp_srcptr, mp_srcptr, mp_size_t, mp_limb_t));
875 
876 #define mpn_addmul_1c __MPN(addmul_1c)
877 __GMP_DECLSPEC mp_limb_t mpn_addmul_1c __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_limb_t, mp_limb_t));
878 
879 #define mpn_addmul_2 __MPN(addmul_2)
880 __GMP_DECLSPEC mp_limb_t mpn_addmul_2 _PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr));
881 
882 #define mpn_addmul_3 __MPN(addmul_3)
883 __GMP_DECLSPEC mp_limb_t mpn_addmul_3 _PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr));
884 
885 #define mpn_addmul_4 __MPN(addmul_4)
886 __GMP_DECLSPEC mp_limb_t mpn_addmul_4 _PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr));
887 
888 #define mpn_addmul_5 __MPN(addmul_5)
889 __GMP_DECLSPEC mp_limb_t mpn_addmul_5 _PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr));
890 
891 #define mpn_addmul_6 __MPN(addmul_6)
892 __GMP_DECLSPEC mp_limb_t mpn_addmul_6 _PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr));
893 
894 #define mpn_addmul_7 __MPN(addmul_7)
895 __GMP_DECLSPEC mp_limb_t mpn_addmul_7 _PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr));
896 
897 #define mpn_addmul_8 __MPN(addmul_8)
898 __GMP_DECLSPEC mp_limb_t mpn_addmul_8 _PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr));
899 
900 #define mpn_addlsh_n __MPN(addlsh_n)
901 __GMP_DECLSPEC mp_limb_t mpn_addlsh_n __GMP_PROTO ((mp_ptr, mp_srcptr, mp_srcptr, mp_size_t,unsigned int));
902 
903 #define mpn_sublsh_n __MPN(sublsh_n)
904 __GMP_DECLSPEC mp_limb_t mpn_sublsh_n __GMP_PROTO ((mp_ptr, mp_srcptr, mp_srcptr, mp_size_t,unsigned int));
905 
906 #define mpn_addlsh_nc __MPN(addlsh_nc)
907 __GMP_DECLSPEC mp_limb_t mpn_addlsh_nc __GMP_PROTO ((mp_ptr, mp_srcptr, mp_srcptr, mp_size_t,unsigned int, mp_limb_t));
908 
909 #define mpn_sublsh_nc __MPN(sublsh_nc)
910 __GMP_DECLSPEC mp_limb_t mpn_sublsh_nc __GMP_PROTO ((mp_ptr, mp_srcptr, mp_srcptr, mp_size_t,unsigned int, mp_limb_t));
911 
912 /* mpn_rsh1add_n(c,a,b,n), when it exists, sets {c,n} to ({a,n} + {b,n}) >> 1,
913    and returns the bit rshifted out (0 or 1).  */
914 #define mpn_rsh1add_n __MPN(rsh1add_n)
915 __GMP_DECLSPEC mp_limb_t mpn_rsh1add_n __GMP_PROTO ((mp_ptr, mp_srcptr, mp_srcptr, mp_size_t));
916 
917 /* mpn_rsh1sub_n(c,a,b,n), when it exists, sets {c,n} to ({a,n} - {b,n}) >> 1,
918    and returns the bit rshifted out (0 or 1).  If there's a borrow from the
919    subtract, it's stored as a 1 in the high bit of c[n-1], like a twos
920    complement negative.  */
921 #define mpn_rsh1sub_n __MPN(rsh1sub_n)
922 __GMP_DECLSPEC mp_limb_t mpn_rsh1sub_n __GMP_PROTO ((mp_ptr, mp_srcptr, mp_srcptr, mp_size_t));
923 
924 #if HAVE_NATIVE_mpn_lshiftc
925 #define mpn_lshiftc __MPN(lshiftc)
926 __GMP_DECLSPEC mp_limb_t mpn_lshiftc __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t,unsigned int));
927 #endif
928 
929 #define mpn_addadd_n __MPN(addadd_n)
930 __GMP_DECLSPEC mp_limb_t mpn_addadd_n __GMP_PROTO ((mp_ptr, mp_srcptr, mp_srcptr, mp_srcptr, mp_size_t));
931 
932 #define mpn_addsub_n __MPN(addsub_n)
933 __GMP_DECLSPEC int mpn_addsub_n __GMP_PROTO ((mp_ptr, mp_srcptr, mp_srcptr, mp_srcptr, mp_size_t));
934 
935 #define mpn_subadd_n __MPN(subadd_n)
936 __GMP_DECLSPEC mp_limb_t mpn_subadd_n __GMP_PROTO ((mp_ptr, mp_srcptr, mp_srcptr, mp_srcptr, mp_size_t));
937 
938 #if HAVE_NATIVE_mpn_karaadd
939 #define mpn_karaadd __MPN(karaadd)
940 __GMP_DECLSPEC void mpn_karaadd __GMP_PROTO ((mp_ptr, mp_ptr, mp_size_t));
941 #endif
942 
943 #if HAVE_NATIVE_mpn_karasub
944 #define mpn_karasub __MPN(karasub)
945 __GMP_DECLSPEC void mpn_karasub __GMP_PROTO ((mp_ptr, mp_ptr, mp_size_t));
946 #endif
947 
948 #ifndef mpn_sumdiff_n  /* if not done with cpuvec in a fat binary */
949 #define mpn_sumdiff_n __MPN(sumdiff_n)
950 __GMP_DECLSPEC mp_limb_t mpn_sumdiff_n __GMP_PROTO ((mp_ptr, mp_ptr, mp_srcptr, mp_srcptr, mp_size_t));
951 #endif
952 
953 #ifndef mpn_nsumdiff_n
954 #define mpn_nsumdiff_n __MPN(nsumdiff_n)
955 __GMP_DECLSPEC mp_limb_t mpn_nsumdiff_n __GMP_PROTO ((mp_ptr, mp_ptr, mp_srcptr, mp_srcptr, mp_size_t));
956 #endif
957 
958 #define mpn_sumdiff_nc __MPN(sumdiff_nc)
959 __GMP_DECLSPEC mp_limb_t mpn_sumdiff_nc __GMP_PROTO ((mp_ptr, mp_ptr, mp_srcptr, mp_srcptr, mp_size_t, mp_limb_t));
960 
961 #define mpn_divexact_byff __MPN(divexact_byff)
962 __GMP_DECLSPEC mp_limb_t mpn_divexact_byff __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t));
963 
964 #ifndef mpn_divexact_byfobm1      /* if not done with cpuvec in a fat binary */
965 #define mpn_divexact_byfobm1 __MPN(divexact_byfobm1)
966 __GMP_DECLSPEC mp_limb_t mpn_divexact_byfobm1 __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t,mp_limb_t,mp_limb_t));
967 #endif
968 
969 #ifndef mpn_add_err1_n      /* if not done with cpuvec in a fat binary */
970 #define mpn_add_err1_n  __MPN(add_err1_n)
971 __GMP_DECLSPEC mp_limb_t mpn_add_err1_n (mp_ptr, mp_srcptr, mp_srcptr, mp_ptr, mp_srcptr, mp_size_t, mp_limb_t);
972 #endif
973 
974 #ifndef mpn_sub_err1_n      /* if not done with cpuvec in a fat binary */
975 #define mpn_sub_err1_n  __MPN(sub_err1_n)
976 __GMP_DECLSPEC mp_limb_t mpn_sub_err1_n (mp_ptr, mp_srcptr, mp_srcptr, mp_ptr, mp_srcptr, mp_size_t, mp_limb_t);
977 #endif
978 
979 #ifndef mpn_add_err2_n      /* if not done with cpuvec in a fat binary */
980 #define mpn_add_err2_n  __MPN(add_err2_n)
981 __GMP_DECLSPEC mp_limb_t mpn_add_err2_n (mp_ptr, mp_srcptr, mp_srcptr, mp_ptr, mp_srcptr, mp_srcptr, mp_size_t, mp_limb_t);
982 #endif
983 
984 #ifndef mpn_sub_err2_n      /* if not done with cpuvec in a fat binary */
985 #define mpn_sub_err2_n  __MPN(sub_err2_n)
986 __GMP_DECLSPEC mp_limb_t mpn_sub_err2_n (mp_ptr, mp_srcptr, mp_srcptr, mp_ptr, mp_srcptr, mp_srcptr, mp_size_t, mp_limb_t);
987 #endif
988 
989 #define mpn_divrem_1c __MPN(divrem_1c)
990 __GMP_DECLSPEC mp_limb_t mpn_divrem_1c __GMP_PROTO ((mp_ptr, mp_size_t, mp_srcptr, mp_size_t, mp_limb_t, mp_limb_t));
991 
992 #define mpn_dump __MPN(dump)
993 __GMP_DECLSPEC void mpn_dump __GMP_PROTO ((mp_srcptr, mp_size_t));
994 
995 #define mpn_fib2_ui __MPN(fib2_ui)
996 __GMP_DECLSPEC mp_size_t mpn_fib2_ui _PROTO ((mp_ptr, mp_ptr, mpir_ui));
997 
998 /* Remap names of internal mpn functions.  */
999 #define __clz_tab               __MPN(clz_tab)
1000 #define mpn_udiv_w_sdiv		__MPN(udiv_w_sdiv)
1001 
1002 #define mpn_jacobi_base __MPN(jacobi_base)
1003 __GMP_DECLSPEC int mpn_jacobi_base _PROTO ((mp_limb_t a, mp_limb_t b, int result_bit1)) ATTRIBUTE_CONST;
1004 
1005 #define mpn_jacobi_2 __MPN(jacobi_2)
1006 __GMP_DECLSPEC int mpn_jacobi_2 _PROTO ((mp_srcptr, mp_srcptr, unsigned));
1007 
1008 #define mpn_jacobi_n __MPN(jacobi_n)
1009 __GMP_DECLSPEC int mpn_jacobi_n _PROTO ((mp_ptr, mp_ptr, mp_size_t, unsigned));
1010 
1011 #define mpn_mod_1c __MPN(mod_1c)
1012 __GMP_DECLSPEC mp_limb_t mpn_mod_1c __GMP_PROTO ((mp_srcptr, mp_size_t, mp_limb_t, mp_limb_t)) __GMP_ATTRIBUTE_PURE;
1013 
1014 #define mpn_mul_1c __MPN(mul_1c)
1015 __GMP_DECLSPEC mp_limb_t mpn_mul_1c __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_limb_t, mp_limb_t));
1016 
1017 #define mpn_mul_2 __MPN(mul_2)
1018 mp_limb_t mpn_mul_2 _PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr));
1019 
1020 #ifndef mpn_mul_basecase  /* if not done with cpuvec in a fat binary */
1021 #define mpn_mul_basecase __MPN(mul_basecase)
1022 __GMP_DECLSPEC void mpn_mul_basecase __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t));
1023 #endif
1024 
1025 #define mpn_mullow_n __MPN(mullow_n)
1026 __GMP_DECLSPEC void mpn_mullow_n __GMP_PROTO ((mp_ptr, mp_srcptr, mp_srcptr, mp_size_t));
1027 
1028 #define mpn_mullow_n_basecase __MPN(mullow_n_basecase)
1029 __GMP_DECLSPEC void mpn_mullow_n_basecase __GMP_PROTO ((mp_ptr, mp_srcptr, mp_srcptr, mp_size_t));
1030 
1031 #define mpn_mulhigh_n __MPN(mulhigh_n)
1032 __GMP_DECLSPEC void mpn_mulhigh_n __GMP_PROTO ((mp_ptr, mp_srcptr, mp_srcptr, mp_size_t));
1033 
1034 #define mpn_mullow_basecase __MPN(mullow_basecase)
1035 __GMP_DECLSPEC void mpn_mullow_basecase __GMP_PROTO ((mp_ptr, mp_srcptr,mp_size_t, mp_srcptr, mp_size_t,mp_size_t));
1036 
1037 #ifndef mpn_mulmid_basecase      /* if not done with cpuvec in a fat binary */
1038 #define mpn_mulmid_basecase __MPN(mulmid_basecase)
1039 __GMP_DECLSPEC void mpn_mulmid_basecase __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t));
1040 #endif
1041 
1042 #define mpn_mod_1_1 __MPN(mod_1_1)
1043 __GMP_DECLSPEC void mpn_mod_1_1 __GMP_PROTO ((mp_ptr,mp_srcptr, mp_size_t, mp_srcptr));
1044 
1045 #define mpn_mod_1_2 __MPN(mod_1_2)
1046 __GMP_DECLSPEC void mpn_mod_1_2 __GMP_PROTO ((mp_ptr,mp_srcptr, mp_size_t, mp_srcptr));
1047 
1048 #define mpn_mod_1_3 __MPN(mod_1_3)
1049 __GMP_DECLSPEC void mpn_mod_1_3 __GMP_PROTO ((mp_ptr,mp_srcptr, mp_size_t, mp_srcptr));
1050 
1051 #define mpn_mod_1_k __MPN(mod_1_k)
1052 __GMP_DECLSPEC mp_limb_t mpn_mod_1_k __GMP_PROTO ((mp_srcptr, mp_size_t, mp_limb_t,mp_size_t));
1053 
1054 #define mpn_mulmid __MPN(mulmid)
1055 __GMP_DECLSPEC void mpn_mulmid __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t));
1056 
1057 #define mpn_mulmid_n __MPN(mulmid_n)
1058 __GMP_DECLSPEC void mpn_mulmid_n __GMP_PROTO ((mp_ptr, mp_srcptr, mp_srcptr, mp_size_t));
1059 
1060 #ifndef mpn_sqr_basecase  /* if not done with cpuvec in a fat binary */
1061 #define mpn_sqr_basecase __MPN(sqr_basecase)
1062 __GMP_DECLSPEC void mpn_sqr_basecase __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t));
1063 #endif
1064 
1065 #define mpz_trial_division __gmpz_trial_division
1066 __GMP_DECLSPEC unsigned long mpz_trial_division __GMP_PROTO ((mpz_srcptr,unsigned long, unsigned long));
1067 
1068 #define mpn_sub_nc __MPN(sub_nc)
1069 __GMP_DECLSPEC mp_limb_t mpn_sub_nc __GMP_PROTO ((mp_ptr, mp_srcptr, mp_srcptr, mp_size_t, mp_limb_t));
1070 
1071 #define mpn_submul_1c __MPN(submul_1c)
1072 __GMP_DECLSPEC mp_limb_t mpn_submul_1c __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_limb_t, mp_limb_t));
1073 
1074 #define mpn_invert_2exp __MPN(invert_2exp)
1075 __GMP_DECLSPEC void mpn_invert_2exp __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_ptr));
1076 
1077 #define mpn_is_invert __MPN(is_invert)
1078 __GMP_DECLSPEC int mpn_is_invert __GMP_PROTO ((mp_srcptr, mp_srcptr, mp_size_t));
1079 
1080 #define mpn_invert_trunc __MPN(invert_trunc)
1081 __GMP_DECLSPEC void mpn_invert_trunc __GMP_PROTO ((mp_ptr, mp_size_t, mp_srcptr, mp_size_t, mp_srcptr));
1082 
1083 #define   mpn_binvert __MPN(binvert)
1084 __GMP_DECLSPEC void      mpn_binvert __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_ptr));
1085 #define   mpn_binvert_itch __MPN(binvert_itch)
1086 __GMP_DECLSPEC mp_size_t mpn_binvert_itch __GMP_PROTO ((mp_size_t)) ATTRIBUTE_CONST;
1087 
1088 #define   mpn_powm __MPN(powm)
1089 __GMP_DECLSPEC void      mpn_powm __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr));
1090 #define   mpn_powlo __MPN(powlo)
1091 __GMP_DECLSPEC void      mpn_powlo __GMP_PROTO ((mp_ptr, mp_srcptr, mp_srcptr, mp_size_t, mp_size_t, mp_ptr));
1092 
1093 #ifndef mpn_divrem_euclidean_qr_1    /* if not done with cpuvec in a fat binary */
1094 #define mpn_divrem_euclidean_qr_1 __MPN(divrem_euclidean_qr_1)
1095 __GMP_DECLSPEC mp_limb_t mpn_divrem_euclidean_qr_1 __GMP_PROTO ((mp_ptr, mp_size_t, mp_srcptr, mp_size_t,mp_limb_t));
1096 #endif
1097 
1098 #ifndef mpn_divrem_euclidean_qr_2    /* if not done with cpuvec in a fat binary */
1099 #define mpn_divrem_euclidean_qr_2 __MPN(divrem_euclidean_qr_2)
1100 __GMP_DECLSPEC mp_limb_t mpn_divrem_euclidean_qr_2 __GMP_PROTO ((mp_ptr, mp_ptr, mp_size_t,mp_srcptr));
1101 #endif
1102 
1103 #define mpn_divrem_euclidean_r_1 __MPN(divrem_euclidean_r_1)
1104 __GMP_DECLSPEC mp_limb_t mpn_divrem_euclidean_r_1 __GMP_PROTO ((mp_srcptr, mp_size_t,mp_limb_t));
1105 
1106 #define mpn_divrem_hensel_qr_1 __MPN(divrem_hensel_qr_1)
1107 __GMP_DECLSPEC mp_limb_t mpn_divrem_hensel_qr_1 __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t,mp_limb_t));
1108 
1109 #define mpn_divrem_hensel_qr_1_1 __MPN(divrem_hensel_qr_1_1)
1110 __GMP_DECLSPEC mp_limb_t mpn_divrem_hensel_qr_1_1 __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t,mp_limb_t));
1111 
1112 #define mpn_divrem_hensel_qr_1_2 __MPN(divrem_hensel_qr_1_2)
1113 __GMP_DECLSPEC mp_limb_t mpn_divrem_hensel_qr_1_2 __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t,mp_limb_t));
1114 
1115 #define mpn_divrem_hensel_rsh_qr_1_preinv __MPN(divrem_hensel_rsh_qr_1_preinv)
1116 __GMP_DECLSPEC mp_limb_t mpn_divrem_hensel_rsh_qr_1_preinv __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t,mp_limb_t,mp_limb_t,int));
1117 
1118 #define mpn_divrem_hensel_rsh_qr_1 __MPN(divrem_hensel_rsh_qr_1)
1119 __GMP_DECLSPEC mp_limb_t mpn_divrem_hensel_rsh_qr_1 __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t,mp_limb_t,int));
1120 
1121 #define mpn_rsh_divrem_hensel_qr_1 __MPN(rsh_divrem_hensel_qr_1)
1122 __GMP_DECLSPEC mp_limb_t mpn_rsh_divrem_hensel_qr_1 __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t,mp_limb_t,int,mp_limb_t));
1123 
1124 #define mpn_rsh_divrem_hensel_qr_1_1 __MPN(rsh_divrem_hensel_qr_1_1)
1125 __GMP_DECLSPEC mp_limb_t mpn_rsh_divrem_hensel_qr_1_1 __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t,mp_limb_t,int,mp_limb_t));
1126 
1127 #define mpn_rsh_divrem_hensel_qr_1_2 __MPN(rsh_divrem_hensel_qr_1_2)
1128 __GMP_DECLSPEC mp_limb_t mpn_rsh_divrem_hensel_qr_1_2 __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t,mp_limb_t,int,mp_limb_t));
1129 
1130 #define mpn_divrem_hensel_r_1 __MPN(divrem_hensel_r_1)
1131 __GMP_DECLSPEC mp_limb_t mpn_divrem_hensel_r_1 __GMP_PROTO ((mp_srcptr, mp_size_t,mp_limb_t));
1132 
1133 #define mpir_random_fermat(nn, state, limbs) \
1134    do { mp_limb_t t; \
1135       mpn_rrandom(nn, state, limbs); \
1136       mpn_rrandom(&nn[limbs], state, 1); \
1137       nn[limbs] %= 1024; \
1138       mpn_rrandom(&t, state, 1); \
1139       if (t % 2) \
1140          nn[limbs] = -nn[limbs]; \
1141    } while (0)
1142 
1143 #define mpn_addmod_2expp1_1(r, limbs, c)                    \
1144 do {                                                        \
1145    mp_limb_t __sum = (r)[0] + (mp_limb_signed_t)(c);        \
1146    /* check if adding c causes carry propagation */         \
1147    if ((mp_limb_signed_t)(__sum ^ (r)[0]) >= 0)             \
1148       (r)[0] = __sum;                                       \
1149    else                                                     \
1150    {                                                        \
1151       if ((mp_limb_signed_t) (c) >= 0) mpn_add_1((r),       \
1152           (r), (limbs) + 1, (mp_limb_signed_t) (c));        \
1153       else mpn_sub_1((r), (r), (limbs) + 1,                 \
1154                          -(mp_limb_signed_t) (c));          \
1155    }                                                        \
1156 } while (0)
1157 
1158 #define mpn_mul_2expmod_2expp1 __MPN(mul_2expmod_2expp1)
1159 __GMP_DECLSPEC void mpn_mul_2expmod_2expp1 __GMP_PROTO ((mp_ptr t, mp_ptr i1, mp_size_t limbs, mp_bitcnt_t d));
1160 
1161 #define mpir_revbin __mpir_revbin
1162 __GMP_DECLSPEC mp_limb_t mpir_revbin __GMP_PROTO ((mp_limb_t in, mp_limb_t bits));
1163 
1164 #define  mpir_fft_adjust_limbs  __mpir_fft_adjust_limbs
1165 __GMP_DECLSPEC mpir_si mpir_fft_adjust_limbs __GMP_PROTO ((mp_size_t limbs));
1166 
1167 #define mpir_fft_combine_bits __mpir_fft_combine_bits
1168 __GMP_DECLSPEC void mpir_fft_combine_bits __GMP_PROTO ((mp_ptr res, const mp_ptr * poly,
1169                 long length, mp_bitcnt_t bits, mp_size_t output_limbs, mp_size_t total_limbs));
1170 
1171 #define mpir_fft_split_bits __mpir_fft_split_bits
1172 __GMP_DECLSPEC mp_size_t mpir_fft_split_bits __GMP_PROTO ((mp_ptr * poly, mp_srcptr limbs,
1173                  mp_size_t total_limbs, mp_bitcnt_t bits, mp_size_t output_limbs));
1174 
1175 #define mpir_fft_adjust __mpir_fft_adjust
1176 __GMP_DECLSPEC void mpir_fft_adjust __GMP_PROTO ((mp_ptr r, mp_ptr i1,
1177                                      mp_size_t i, mp_size_t limbs, mp_bitcnt_t w));
1178 
1179 #define mpir_fft_adjust_sqrt2 __mpir_fft_adjust_sqrt2
1180 __GMP_DECLSPEC void mpir_fft_adjust_sqrt2 __GMP_PROTO ((mp_ptr r, mp_ptr i1,
1181                    mp_size_t i, mp_size_t limbs, mp_bitcnt_t w, mp_ptr temp));
1182 
1183 #define mpir_butterfly_lshB __mpir_butterfly_lshB
1184 __GMP_DECLSPEC void mpir_butterfly_lshB __GMP_PROTO ((mp_ptr t, mp_ptr u, mp_ptr i1,
1185                        mp_ptr i2, mp_size_t limbs, mp_size_t x, mp_size_t y));
1186 
1187 #define mpir_butterfly_rshB __mpir_butterfly_rshB
1188 __GMP_DECLSPEC void mpir_butterfly_rshB __GMP_PROTO ((mp_ptr t, mp_ptr u, mp_ptr i1,
1189                        mp_ptr i2, mp_size_t limbs, mp_size_t x, mp_size_t y));
1190 
1191 #define mpir_fermat_to_mpz __fermat_to_mpz
1192 __GMP_DECLSPEC void mpir_fermat_to_mpz __GMP_PROTO ((mpz_t m, mp_ptr i, mp_size_t limbs));
1193 
1194 #define mpir_fft_butterfly_twiddle __mpir_fft_butterfly_twiddle
1195 __GMP_DECLSPEC void mpir_fft_butterfly_twiddle __GMP_PROTO ((mp_ptr u, mp_ptr v,
1196    mp_ptr s, mp_ptr t, mp_size_t limbs, mp_bitcnt_t b1, mp_bitcnt_t b2));
1197 
1198 #define mpir_ifft_butterfly_twiddle __mpir_ifft_butterfly_twiddle
1199 __GMP_DECLSPEC void mpir_ifft_butterfly_twiddle __GMP_PROTO ((mp_ptr u, mp_ptr v,
1200    mp_ptr s, mp_ptr t, mp_size_t limbs, mp_bitcnt_t b1, mp_bitcnt_t b2));
1201 
1202 #define mpir_fft_butterfly_sqrt2 __mpir_fft_butterfly_sqrt2
1203 __GMP_DECLSPEC void mpir_fft_butterfly_sqrt2 __GMP_PROTO ((mp_ptr s, mp_ptr t,
1204                          mp_ptr i1, mp_ptr i2, mp_size_t i,
1205                                 mp_size_t limbs, mp_bitcnt_t w, mp_ptr temp));
1206 
1207 #define mpir_ifft_butterfly_sqrt2 __mpir_ifft_butterfly_sqrt2
1208 __GMP_DECLSPEC void mpir_ifft_butterfly_sqrt2 __GMP_PROTO ((mp_ptr s, mp_ptr t, mp_ptr i1,
1209    mp_ptr i2, mp_size_t i, mp_size_t limbs, mp_bitcnt_t w, mp_ptr temp));
1210 
1211 #define mpir_fft_butterfly __mpir_fft_butterfly
1212 __GMP_DECLSPEC void mpir_fft_butterfly __GMP_PROTO ((mp_ptr s, mp_ptr t, mp_ptr i1,
1213                      mp_ptr i2, mp_size_t i, mp_size_t limbs, mp_bitcnt_t w));
1214 
1215 #define mpir_ifft_butterfly __mpir_ifft_butterfly
1216 __GMP_DECLSPEC void mpir_ifft_butterfly __GMP_PROTO ((mp_ptr s, mp_ptr t, mp_ptr i1,
1217                      mp_ptr i2, mp_size_t i, mp_size_t limbs, mp_bitcnt_t w));
1218 
1219 #define mpir_fft_combine_limbs __combine_limbs
1220 __GMP_DECLSPEC void mpir_fft_combine_limbs __GMP_PROTO ((mp_ptr res, const mp_ptr * poly, long length,
1221             mp_size_t coeff_limbs, mp_size_t output_limbs, mp_size_t total_limbs));
1222 
1223 #define mpir_fft_split_limbs __mpir_fft_split_limbs
1224 __GMP_DECLSPEC mp_size_t mpir_fft_split_limbs __GMP_PROTO ((mp_ptr * poly, mp_srcptr limbs,
1225             mp_size_t total_limbs, mp_size_t coeff_limbs, mp_size_t output_limbs));
1226 
1227 #define mpir_fft_radix2 __mpir_fft_radix2
1228 __GMP_DECLSPEC void mpir_fft_radix2 __GMP_PROTO ((mp_ptr * ii,
1229                     mp_size_t n, mp_bitcnt_t w, mp_ptr * t1, mp_ptr * t2));
1230 
1231 #define mpir_ifft_radix2 __mpir_ifft_radix2
1232 __GMP_DECLSPEC void mpir_ifft_radix2 __GMP_PROTO ((mp_ptr * ii, mp_size_t n,
1233                                  mp_bitcnt_t w, mp_ptr * t1, mp_ptr * t2));
1234 
1235 #define mpir_fft_trunc __mpir_fft_trunc
1236 __GMP_DECLSPEC void mpir_fft_trunc __GMP_PROTO ((mp_ptr * ii,  mp_size_t n, mp_bitcnt_t w,
1237                                mp_ptr * t1, mp_ptr * t2, mp_size_t trunc));
1238 
1239 #define mpir_ifft_trunc __mpir_ifft_trunc
1240 __GMP_DECLSPEC void mpir_ifft_trunc __GMP_PROTO ((mp_ptr * ii, mp_size_t n, mp_bitcnt_t w,
1241                                mp_ptr * t1, mp_ptr * t2, mp_size_t trunc));
1242 
1243 #define mpir_fft_trunc_sqrt2 __mpir_fft_trunc_sqrt2
1244 __GMP_DECLSPEC void mpir_fft_trunc_sqrt2 __GMP_PROTO ((mp_ptr * ii, mp_size_t n, mp_bitcnt_t w,
1245             mp_ptr * t1, mp_ptr * t2, mp_ptr * temp, mp_size_t trunc));
1246 
1247 #define mpir_ifft_trunc_sqrt2 __mpir_ifft_trunc_sqrt2
1248 __GMP_DECLSPEC void mpir_ifft_trunc_sqrt2 __GMP_PROTO ((mp_ptr * ii, mp_size_t n, mp_bitcnt_t w,
1249             mp_ptr * t1, mp_ptr * t2, mp_ptr * temp, mp_size_t trunc));
1250 
1251 #define mpir_fft_mfa_trunc_sqrt2 __mpir_fft_mfa_trunc_sqrt2
1252 __GMP_DECLSPEC void mpir_fft_mfa_trunc_sqrt2 __GMP_PROTO ((mp_ptr * ii, mp_size_t n,
1253                        mp_bitcnt_t w, mp_ptr * t1, mp_ptr * t2,
1254                                 mp_ptr * temp, mp_size_t n1, mp_size_t trunc));
1255 
1256 #define mpir_ifft_mfa_trunc_sqrt2 __mpir_ifft_mfa_trunc_sqrt2
1257 __GMP_DECLSPEC void mpir_ifft_mfa_trunc_sqrt2 __GMP_PROTO ((mp_ptr * ii, mp_size_t n,
1258                       mp_bitcnt_t w, mp_ptr * t1, mp_ptr * t2,
1259                                 mp_ptr * temp, mp_size_t n1, mp_size_t trunc));
1260 
1261 #define mpir_fft_negacyclic __mpir_fft_negacyclic
1262 __GMP_DECLSPEC void mpir_fft_negacyclic __GMP_PROTO ((mp_ptr * ii, mp_size_t n, mp_bitcnt_t w,
1263                              mp_ptr * t1, mp_ptr * t2, mp_ptr * temp));
1264 
1265 #define mpir_ifft_negacyclic __mpir_ifft_negacyclic
1266 __GMP_DECLSPEC void mpir_ifft_negacyclic __GMP_PROTO ((mp_ptr * ii, mp_size_t n, mp_bitcnt_t w,
1267                              mp_ptr * t1, mp_ptr * t2, mp_ptr * temp));
1268 
1269 #define mpir_fft_mfa_trunc_sqrt2_outer __mpir_fft_mfa_trunc_sqrt2_outer
1270 __GMP_DECLSPEC void mpir_fft_mfa_trunc_sqrt2_outer __GMP_PROTO ((mp_ptr * ii, mp_size_t n,
1271                       mp_bitcnt_t w, mp_ptr * t1, mp_ptr * t2,
1272                                 mp_ptr * temp, mp_size_t n1, mp_size_t trunc));
1273 
1274 #define mpir_ifft_mfa_trunc_sqrt2_outer __mpir_ifft_mfa_trunc_sqrt2_outer
1275 __GMP_DECLSPEC void mpir_ifft_mfa_trunc_sqrt2_outer __GMP_PROTO ((mp_ptr * ii, mp_size_t n,
1276                         mp_bitcnt_t w, mp_ptr * t1, mp_ptr * t2,
1277                                 mp_ptr * temp, mp_size_t n1, mp_size_t trunc));
1278 
1279 #define mpir_fft_mfa_trunc_sqrt2_inner __mpir_fft_mfa_trunc_sqrt2_inner
1280 __GMP_DECLSPEC void mpir_fft_mfa_trunc_sqrt2_inner __GMP_PROTO ((mp_ptr * ii, mp_ptr * jj,
1281             mp_size_t n, mp_bitcnt_t w, mp_ptr * t1, mp_ptr * t2,
1282                 mp_ptr * temp, mp_size_t n1, mp_size_t trunc, mp_ptr tt));
1283 
1284 #define mpir_fft_mulmod_2expp1 __mpir_fft_mulmod_2expp1
1285 __GMP_DECLSPEC void mpir_fft_mulmod_2expp1(mp_ptr r1, mp_srcptr i1, mp_srcptr i2,
1286                  mp_size_t r_limbs, mp_bitcnt_t depth, mp_bitcnt_t w);
1287 
1288 #define mpir_fft_trunc1 __mpir_fft_trunc1
1289 __GMP_DECLSPEC void mpir_fft_trunc1 __GMP_PROTO ((mp_ptr * ii, mp_size_t n, mp_bitcnt_t w,
1290                                mp_ptr * t1, mp_ptr * t2, mp_size_t trunc));
1291 
1292 #define mpir_ifft_trunc1 __mpir_ifft_trunc1
1293 __GMP_DECLSPEC void mpir_ifft_trunc1 __GMP_PROTO ((mp_ptr * ii, mp_size_t n, mp_bitcnt_t w,
1294                                mp_ptr * t1, mp_ptr * t2, mp_size_t trunc));
1295 
1296 #define mpir_fft_radix2_twiddle __mpir_fft_radix2_twiddle
1297 __GMP_DECLSPEC void mpir_fft_radix2_twiddle __GMP_PROTO ((mp_ptr * ii, mp_size_t is,
1298       mp_size_t n, mp_bitcnt_t w, mp_ptr * t1, mp_ptr * t2,
1299                             mp_size_t ws, mp_size_t r, mp_size_t c, mp_size_t rs));
1300 
1301 #define mpir_ifft_radix2_twiddle __mpir_ifft_radix2_twiddle
1302 __GMP_DECLSPEC void mpir_ifft_radix2_twiddle __GMP_PROTO ((mp_ptr * ii, mp_size_t is,
1303         mp_size_t n, mp_bitcnt_t w, mp_ptr * t1, mp_ptr * t2,
1304                             mp_size_t ws, mp_size_t r, mp_size_t c, mp_size_t rs));
1305 
1306 #define mpir_fft_trunc1_twiddle __mpir_fft_trunc1_twiddle
1307 __GMP_DECLSPEC void mpir_fft_trunc1_twiddle __GMP_PROTO ((mp_ptr * ii, mp_size_t is,
1308         mp_size_t n, mp_bitcnt_t w, mp_ptr * t1, mp_ptr * t2,
1309            mp_size_t ws, mp_size_t r, mp_size_t c, mp_size_t rs, mp_size_t trunc));
1310 
1311 #define mpir_ifft_trunc1_twiddle __mpir_ifft_trunc1_twiddle
1312 __GMP_DECLSPEC void mpir_ifft_trunc1_twiddle __GMP_PROTO ((mp_ptr * ii, mp_size_t is,
1313         mp_size_t n, mp_bitcnt_t w, mp_ptr * t1, mp_ptr * t2,
1314            mp_size_t ws, mp_size_t r, mp_size_t c, mp_size_t rs, mp_size_t trunc));
1315 
1316 #define mpir_fft_naive_convolution_1 __mpir_fft_naive_convolution_1
1317 __GMP_DECLSPEC void mpir_fft_naive_convolution_1 __GMP_PROTO ((mp_ptr r, mp_srcptr ii,
1318                                                      mp_srcptr jj, mp_size_t m));
1319 
1320 #define mpn_mulmod_2expp1_basecase __MPN(mulmod_2expp1_basecase)
1321 __GMP_DECLSPEC int mpn_mulmod_2expp1_basecase __GMP_PROTO ((mp_ptr, mp_srcptr, mp_srcptr, int, mpir_ui, mp_ptr));
1322 
1323 typedef __gmp_randstate_struct *gmp_randstate_ptr;
1324 typedef const __gmp_randstate_struct *gmp_randstate_srcptr;
1325 
1326 #define mpn_normmod_2expp1 __MPN(normmod_2expp1)
1327 __GMP_DECLSPEC void mpn_normmod_2expp1 __GMP_PROTO ((mp_ptr t, mp_size_t limbs));
1328 
1329 #define mpn_div_2expmod_2expp1 __MPN(div_2expmod_2expp1)
1330 __GMP_DECLSPEC void mpn_div_2expmod_2expp1 __GMP_PROTO ((mp_ptr t, mp_srcptr i1, mp_size_t limbs, mp_bitcnt_t d));
1331 
1332 #define mpn_mul_trunc_sqrt2 __MPN(mul_trunc_sqrt2)
1333 __GMP_DECLSPEC void mpn_mul_trunc_sqrt2 __GMP_PROTO ((mp_ptr r1, mp_srcptr i1, mp_size_t n1,
1334                   mp_srcptr i2, mp_size_t n2, mp_bitcnt_t depth, mp_bitcnt_t w));
1335 
1336 #define mpn_mul_mfa_trunc_sqrt2 __MPN(mul_mfa_trunc_sqrt2)
1337 __GMP_DECLSPEC void mpn_mul_mfa_trunc_sqrt2 __GMP_PROTO ((mp_ptr r1, mp_srcptr i1, mp_size_t n1,
1338                   mp_srcptr i2, mp_size_t n2, mp_bitcnt_t depth, mp_bitcnt_t w));
1339 
1340 /* Pseudo-random number generator function pointers structure.  */
1341 typedef struct {
1342   void (*randseed_fn) __GMP_PROTO ((gmp_randstate_t rstate, mpz_srcptr seed));
1343   void (*randget_fn) __GMP_PROTO ((gmp_randstate_t rstate, mp_ptr dest, mpir_ui nbits));
1344   void (*randclear_fn) __GMP_PROTO ((gmp_randstate_t rstate));
1345   void (*randiset_fn) __GMP_PROTO ((gmp_randstate_ptr, gmp_randstate_srcptr));
1346 } gmp_randfnptr_t;
1347 
1348 /* Macro to obtain a void pointer to the function pointers structure.  */
1349 #define RNG_FNPTR(rstate) ((rstate)->_mp_algdata._mp_lc)
1350 
1351 /* Macro to obtain a pointer to the generator's state.
1352    When used as a lvalue the rvalue needs to be cast to mp_ptr.  */
1353 #define RNG_STATE(rstate) ((rstate)->_mp_seed->_mp_d)
1354 
1355 /* Write a given number of random bits to rp.  */
1356 #define _gmp_rand(rp, state, bits)                              \
1357   do {                                                          \
1358     gmp_randstate_ptr  __rstate = (state);                      \
1359     (*((gmp_randfnptr_t *) RNG_FNPTR (__rstate))->randget_fn)   \
1360        (__rstate, rp, bits);                                    \
1361   } while (0)
1362 
1363 __GMP_DECLSPEC void __gmp_randinit_mt_noseed __GMP_PROTO ((gmp_randstate_t));
1364 
1365 
1366 /* __gmp_rands is the global state for the old-style random functions, and
1367    is also used in the test programs (hence the __GMP_DECLSPEC).
1368 
1369    There's no seeding here, so mpz_random etc will generate the same
1370    sequence every time.  This is not unlike the C library random functions
1371    if you don't seed them, so perhaps it's acceptable.  Digging up a seed
1372    from /dev/random or the like would work on many systems, but might
1373    encourage a false confidence, since it'd be pretty much impossible to do
1374    something that would work reliably everywhere.  In any case the new style
1375    functions are recommended to applications which care about randomness, so
1376    the old functions aren't too important.  */
1377 
1378 __GMP_DECLSPEC extern char             __gmp_rands_initialized;
1379 __GMP_DECLSPEC extern gmp_randstate_t  __gmp_rands;
1380 
1381 #define RANDS                                       \
1382   ((__gmp_rands_initialized ? 0                     \
1383     : (__gmp_rands_initialized = 1,                 \
1384        __gmp_randinit_mt_noseed (__gmp_rands), 0)), \
1385    __gmp_rands)
1386 
1387 /* this is used by the test programs, to free memory */
1388 #define RANDS_CLEAR()                   \
1389   do {                                  \
1390     if (__gmp_rands_initialized)        \
1391       {                                 \
1392         __gmp_rands_initialized = 0;    \
1393         gmp_randclear (__gmp_rands);    \
1394       }                                 \
1395   } while (0)
1396 
1397 #define mpn_toom42_mulmid_itch(n) (3*(n) + 64)
1398 
1399 /* kara uses n+1 limbs of temporary space and then recurses with the balance,
1400    so need (n+1) + (ceil(n/2)+1) + (ceil(n/4)+1) + ...  This can be solved to
1401    2n + o(n).  Since n is very limited, o(n) in practice could be around 15.
1402    For now, assume n is arbitrarily large.  */
1403 #define MPN_KARA_MUL_N_TSIZE(n)   (2*(n) + 2*GMP_LIMB_BITS)
1404 #define MPN_KARA_SQR_N_TSIZE(n)   (2*(n) + 2*GMP_LIMB_BITS)
1405 
1406 /* toom3 uses 2n + 2n/3 + o(n) limbs of temporary space if mpn_sublsh1_n is
1407    unavailable, but just 2n + o(n) if mpn_sublsh1_n is available.  It is hard
1408    to pin down the value of o(n), since it is a complex function of
1409    MUL_TOOM3_THRESHOLD and n.  Normally toom3 is used between kara and fft; in
1410    that case o(n) will be really limited.  If toom3 is used for arbitrarily
1411    large operands, o(n) will be larger.  These definitions handle operands of
1412    up to 8956264246117233 limbs.  A single multiplication using toom3 on the
1413    fastest hardware currently (2003) would need 100 million years, which
1414    suggests that these limits are acceptable.  */
1415 #if WANT_FFT
1416 #if HAVE_NATIVE_mpn_sublsh1_n
1417 #define MPN_TOOM3_MUL_N_TSIZE(n)  (2*(n) + 63)
1418 #define MPN_TOOM3_MUL_TSIZE(n)    (3*(n) + 63)
1419 #define MPN_TOOM3_SQR_N_TSIZE(n)  (2*(n) + 63)
1420 #else
1421 #define MPN_TOOM3_MUL_N_TSIZE(n)  (2*(n) + 2*(n/3) + 63)
1422 #define MPN_TOOM3_MUL_TSIZE(n)    (3*(n) + 3*(n/3) + 63)
1423 #define MPN_TOOM3_SQR_N_TSIZE(n)  (2*(n) + 2*(n/3) + 63)
1424 #endif
1425 #else /* WANT_FFT */
1426 #if HAVE_NATIVE_mpn_sublsh1_n
1427 #define MPN_TOOM3_MUL_N_TSIZE(n)  (2*(n) + 255)
1428 #define MPN_TOOM3_MUL_TSIZE(n)    (3*(n) + 255)
1429 #define MPN_TOOM3_SQR_N_TSIZE(n)  (2*(n) + 255)
1430 #else
1431 #define MPN_TOOM3_MUL_N_TSIZE(n)  (2*(n) + 2*(n/3) + 255)
1432 #define MPN_TOOM3_MUL_TSIZE(n)    (3*(n) + 3*(n/3) + 255)
1433 #define MPN_TOOM3_SQR_N_TSIZE(n)  (2*(n) + 2*(n/3) + 255)
1434 #endif
1435 #define MPN_TOOM3_MAX_N 285405
1436 #endif /* WANT_FFT */
1437 
1438 /* need 2 so that n2>=1 */
1439 #if defined(HAVE_NATIVE_mpn_karaadd) || defined(HAVE_NATIVE_mpn_karasub)
1440 #define MPN_KARA_MUL_N_MINSIZE    8
1441 #define MPN_KARA_SQR_N_MINSIZE    8
1442 #else
1443 #define MPN_KARA_MUL_N_MINSIZE    2
1444 #define MPN_KARA_SQR_N_MINSIZE    2
1445 #endif
1446 
1447 /* Need l>=1, ls>=1, and 2*ls > l (the latter for the tD MPN_INCR_U) */
1448 #define MPN_TOOM3_MUL_N_MINSIZE   17
1449 #define MPN_TOOM4_MUL_N_MINSIZE   32
1450 #define MPN_TOOM8H_MUL_MINSIZE    86
1451 #define MPN_TOOM3_SQR_N_MINSIZE   17
1452 #define MPN_TOOM4_SQR_N_MINSIZE   32
1453 #define MPN_TOOM8_SQR_N_MINSIZE   58
1454 #define MPN_FFT_MUL_N_MINSIZE     64
1455 
1456 #define mpn_sqr_diagonal __MPN(sqr_diagonal)
1457 __GMP_DECLSPEC void mpn_sqr_diagonal _PROTO ((mp_ptr, mp_srcptr, mp_size_t));
1458 
1459 #define mpn_kara_mul_n	__MPN(kara_mul_n)
1460 __GMP_DECLSPEC void mpn_kara_mul_n _PROTO((mp_ptr, mp_srcptr, mp_srcptr, mp_size_t, mp_ptr));
1461 
1462 #define mpn_kara_sqr_n  __MPN(kara_sqr_n)
1463 __GMP_DECLSPEC void mpn_kara_sqr_n _PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_ptr));
1464 
1465 #define mpn_toom3_mul_n  __MPN(toom3_mul_n)
1466 __GMP_DECLSPEC void mpn_toom3_mul_n _PROTO ((mp_ptr, mp_srcptr, mp_srcptr, mp_size_t,mp_ptr));
1467 
1468 #define mpn_toom3_mul  __MPN(toom3_mul)
1469 __GMP_DECLSPEC void mpn_toom3_mul _PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr,
1470                                                            mp_size_t,mp_ptr));
1471 
1472 #define mpn_toom3_interpolate __MPN(toom3_interpolate)
1473 __GMP_DECLSPEC void mpn_toom3_interpolate _PROTO ((mp_ptr c, mp_ptr v1, mp_ptr v2, mp_ptr vm1,
1474                      mp_ptr vinf, mp_size_t k, mp_size_t rr2, int sa,
1475                                           mp_limb_t vinf0, mp_ptr ws));
1476 
1477 #define mpn_toom32_mul __MPN(toom32_mul)
1478 __GMP_DECLSPEC void mpn_toom32_mul _PROTO ((mp_ptr c, mp_srcptr a, mp_size_t an, mp_srcptr b,
1479                                                       mp_size_t bn, mp_ptr t));
1480 
1481 #define mpn_toom42_mul  __MPN(toom42_mul)
1482 __GMP_DECLSPEC void mpn_toom42_mul _PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t,mp_ptr));
1483 
1484 
1485 #define mpn_toom4_mul_n  __MPN(toom4_mul_n)
1486 __GMP_DECLSPEC void mpn_toom4_mul_n _PROTO ((mp_ptr, mp_srcptr, mp_srcptr, mp_size_t));
1487 
1488 #define mpn_toom4_mul  __MPN(toom4_mul)
1489 __GMP_DECLSPEC void mpn_toom4_mul _PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr,
1490                                                            mp_size_t));
1491 
1492 #define mpn_toom53_mul __MPN(toom53_mul)
1493 __GMP_DECLSPEC void mpn_toom53_mul _PROTO ((mp_ptr rp, mp_srcptr up, mp_size_t un,
1494                                            mp_srcptr vp, mp_size_t vn));
1495 
1496 #define mpn_toom4_interpolate __MPN(toom4_interpolate)
1497 __GMP_DECLSPEC void mpn_toom4_interpolate _PROTO ((mp_ptr rp, mp_size_t * rpn, mp_size_t sn,
1498                mp_ptr tp, mp_size_t s4, mp_size_t n4, mp_size_t n6,
1499                mp_limb_t r30));
1500 
1501 #define mpn_toom_eval_dgr3_pm1  __MPN(toom_eval_dgr3_pm1)
1502 __GMP_DECLSPEC int mpn_toom_eval_dgr3_pm1 _PROTO ((mp_ptr xp1, mp_ptr xm1,
1503             mp_srcptr xp, mp_size_t n, mp_size_t x3n, mp_ptr tp));
1504 
1505 #define mpn_toom_eval_dgr3_pm2  __MPN(toom_eval_dgr3_pm2)
1506 __GMP_DECLSPEC int mpn_toom_eval_dgr3_pm2 _PROTO ((mp_ptr xp2, mp_ptr xm2,
1507             mp_srcptr xp, mp_size_t n, mp_size_t x3n, mp_ptr tp));
1508 
1509 #define mpn_toom_eval_pm1  __MPN(toom_eval_pm1)
1510 __GMP_DECLSPEC int mpn_toom_eval_pm1 _PROTO ((mp_ptr xp1, mp_ptr xm1, unsigned k,
1511            mp_srcptr xp, mp_size_t n, mp_size_t hn, mp_ptr tp));
1512 
1513 #define mpn_toom_eval_pm2  __MPN(toom_eval_pm2)
1514 __GMP_DECLSPEC int mpn_toom_eval_pm2 _PROTO ((mp_ptr xp2, mp_ptr xm2, unsigned k,
1515            mp_srcptr xp, mp_size_t n, mp_size_t hn, mp_ptr tp));
1516 
1517 #define mpn_toom_eval_pm2exp  __MPN(toom_eval_pm2exp)
1518 __GMP_DECLSPEC int mpn_toom_eval_pm2exp _PROTO ((mp_ptr xp2, mp_ptr xm2, unsigned k,
1519               mp_srcptr xp, mp_size_t n, mp_size_t hn, unsigned shift,
1520               mp_ptr tp));
1521 
1522 #define mpn_toom_eval_pm2rexp  __MPN(toom_eval_pm2rexp)
1523 __GMP_DECLSPEC int mpn_toom_eval_pm2rexp _PROTO ((mp_ptr rp, mp_ptr rm,
1524               unsigned int q, mp_srcptr ap, mp_size_t n, mp_size_t t,
1525               unsigned int s, mp_ptr ws));
1526 
1527 #define mpn_toom_interpolate_16pts  __MPN(toom_interpolate_16pts)
1528 __GMP_DECLSPEC void mpn_toom_interpolate_16pts _PROTO ((mp_ptr pp, mp_ptr r1, mp_ptr r3,
1529                    mp_ptr r5, mp_ptr r7, mp_size_t n, mp_size_t spt,
1530                          int half, mp_ptr wsi));
1531 
1532 #define mpn_toom_couple_handling  __MPN(toom_couple_handling)
1533 __GMP_DECLSPEC void mpn_toom_couple_handling _PROTO ((mp_ptr pp, mp_size_t n, mp_ptr np,
1534               int nsign, mp_size_t off, int ps, int ns));
1535 
1536 #define mpn_toom8h_mul  __MPN(toom8h_mul)
1537 __GMP_DECLSPEC void mpn_toom8h_mul _PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr,
1538                                                              mp_size_t));
1539 
1540 #define mpn_toom3_sqr_n  __MPN(toom3_sqr_n)
1541 __GMP_DECLSPEC void mpn_toom3_sqr_n _PROTO((mp_ptr, mp_srcptr, mp_size_t, mp_ptr));
1542 
1543 #define mpn_toom4_sqr_n  __MPN(toom4_sqr_n)
1544 __GMP_DECLSPEC void mpn_toom4_sqr_n _PROTO((mp_ptr, mp_srcptr, mp_size_t));
1545 
1546 #define mpn_toom8_sqr_n  __MPN(toom8_sqr_n)
1547 __GMP_DECLSPEC void mpn_toom8_sqr_n _PROTO((mp_ptr, mp_srcptr, mp_size_t));
1548 
1549 #define   mpn_toom42_mulmid __MPN(toom42_mulmid)
1550 __GMP_DECLSPEC void      mpn_toom42_mulmid __GMP_PROTO ((mp_ptr, mp_srcptr, mp_srcptr, mp_size_t, mp_ptr));
1551 
1552 #define mpn_mulmod_Bexpp1_fft __MPN(mulmod_Bexpp1_fft)
1553 __GMP_DECLSPEC int mpn_mulmod_Bexpp1_fft _PROTO ((mp_ptr op, mp_size_t pl,
1554 	     mp_srcptr n, mp_size_t nl,
1555 	     mp_srcptr m, mp_size_t ml));
1556 
1557 #define DC_DIVAPPR_Q_N_ITCH(n) ((n)*4 + 64)
1558 #define DC_BDIV_Q_N_ITCH(n) ((n)/2 + 2)
1559 #define DC_BDIV_QR_N_ITCH(n) (n)
1560 
1561 /* #define mpn_tdiv_q  __MPN(tdiv_q) */
1562 /* void mpn_tdiv_q _PROTO ((mp_ptr, mp_size_t, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t)); */
1563 
1564 #define mpz_divexact_gcd  __gmpz_divexact_gcd
1565 __GMP_DECLSPEC void mpz_divexact_gcd _PROTO ((mpz_ptr q, mpz_srcptr a, mpz_srcptr d));
1566 
1567 #define mpz_prodlimbs  __gmpz_prodlimbs
1568 __GMP_DECLSPEC mp_size_t mpz_prodlimbs (mpz_ptr, mp_ptr, mp_size_t);
1569 
1570 #define mpz_oddfac_1  __gmpz_oddfac_1
1571 __GMP_DECLSPEC void mpz_oddfac_1 (mpz_ptr, mp_limb_t, unsigned);
1572 
1573 #define mpz_inp_str_nowhite __gmpz_inp_str_nowhite
1574 #ifdef _GMP_H_HAVE_FILE
1575 __GMP_DECLSPEC size_t mpz_inp_str_nowhite _PROTO ((mpz_ptr x, FILE *stream, int base, int c, size_t nread));
1576 #endif
1577 
1578 #define mpn_divisible_p __MPN(divisible_p)
1579 __GMP_DECLSPEC int mpn_divisible_p _PROTO ((mp_srcptr ap, mp_size_t asize,
1580                                  mp_srcptr dp, mp_size_t dsize)) __GMP_ATTRIBUTE_PURE;
1581 
1582 #define mpn_rootrem __MPN(rootrem)
1583 __GMP_DECLSPEC mp_size_t mpn_rootrem _PROTO ((mp_ptr, mp_ptr, mp_srcptr, mp_size_t, mp_limb_t));
1584 
1585 #define mpn_rootrem_basecase __MPN(rootrem_basecase)
1586 __GMP_DECLSPEC mp_size_t mpn_rootrem_basecase _PROTO ((mp_ptr, mp_ptr, mp_srcptr, mp_size_t, mp_limb_t));
1587 
1588 #if ! defined (MPN_COPY_INCR) && HAVE_NATIVE_mpn_copyi
1589 #define MPN_COPY_INCR(dst, src, size)                   \
1590   do {                                                  \
1591     ASSERT ((size) >= 0);                               \
1592     ASSERT (MPN_SAME_OR_INCR_P (dst, src, size));       \
1593     mpn_copyi (dst, src, size);                         \
1594   } while (0)
1595 #endif
1596 
1597 /* Copy N limbs from SRC to DST incrementing, N==0 allowed.  */
1598 #if ! defined (MPN_COPY_INCR)
1599 #define MPN_COPY_INCR(dst, src, n)                      \
1600   do {                                                  \
1601     ASSERT ((n) >= 0);                                  \
1602     ASSERT (MPN_SAME_OR_INCR_P (dst, src, n));          \
1603     if ((n) != 0)                                       \
1604       {                                                 \
1605     mp_size_t __n = (n) - 1;                        \
1606     mp_ptr __dst = (dst);                           \
1607     mp_srcptr __src = (src);                        \
1608     mp_limb_t __x;                                  \
1609     __x = *__src++;                                 \
1610     if (__n != 0)                                   \
1611       {                                             \
1612         do                                          \
1613           {                                         \
1614         *__dst++ = __x;                         \
1615         __x = *__src++;                         \
1616           }                                         \
1617         while (--__n);                              \
1618       }                                             \
1619     *__dst++ = __x;                                 \
1620       }                                                 \
1621   } while (0)
1622 #endif
1623 
1624 #if ! defined (MPN_COPY_DECR) && HAVE_NATIVE_mpn_copyd
1625 #define MPN_COPY_DECR(dst, src, size)                   \
1626   do {                                                  \
1627     ASSERT ((size) >= 0);                               \
1628     ASSERT (MPN_SAME_OR_DECR_P (dst, src, size));       \
1629     mpn_copyd (dst, src, size);                         \
1630   } while (0)
1631 #endif
1632 
1633 /* Copy N limbs from SRC to DST decrementing, N==0 allowed.  */
1634 #if ! defined (MPN_COPY_DECR)
1635 #define MPN_COPY_DECR(dst, src, n)                      \
1636   do {                                                  \
1637     ASSERT ((n) >= 0);                                  \
1638     ASSERT (MPN_SAME_OR_DECR_P (dst, src, n));          \
1639     if ((n) != 0)                                       \
1640       {                                                 \
1641     mp_size_t __n = (n) - 1;                        \
1642     mp_ptr __dst = (dst) + __n;                     \
1643     mp_srcptr __src = (src) + __n;                  \
1644     mp_limb_t __x;                                  \
1645     __x = *__src--;                                 \
1646     if (__n != 0)                                   \
1647       {                                             \
1648         do                                          \
1649           {                                         \
1650         *__dst-- = __x;                         \
1651         __x = *__src--;                         \
1652           }                                         \
1653         while (--__n);                              \
1654       }                                             \
1655     *__dst-- = __x;                                 \
1656       }                                                 \
1657   } while (0)
1658 #endif
1659 
1660 
1661 #ifndef MPN_COPY
1662 #define MPN_COPY(d,s,n)                         \
1663   do {                                          \
1664     ASSERT (MPN_SAME_OR_SEPARATE_P (d, s, n));  \
1665     MPN_COPY_INCR (d, s, n);                    \
1666   } while (0)
1667 #endif
1668 
1669 
1670 /* Set {dst,size} to the limbs of {src,size} in reverse order. */
1671 #define MPN_REVERSE(dst, src, size)                     \
1672   do {                                                  \
1673     mp_ptr     __dst = (dst);                           \
1674     mp_size_t  __size = (size);                         \
1675     mp_srcptr  __src = (src) + __size - 1;              \
1676     mp_size_t  __i;                                     \
1677     ASSERT ((size) >= 0);                               \
1678     ASSERT (! MPN_OVERLAP_P (dst, size, src, size));    \
1679     for (__i = 0; __i < __size; __i++)                  \
1680       {                                                 \
1681         *__dst = *__src;                                \
1682         __dst++;                                        \
1683         __src--;                                        \
1684       }                                                 \
1685   } while (0)
1686 
1687 
1688 
1689 /* On the x86s repe/scasl doesn't seem useful, since it takes many cycles to
1690    start up and would need to strip a lot of zeros before it'd be faster
1691    than a simple cmpl loop.  Here are some times in cycles for
1692    std/repe/scasl/cld and cld/repe/scasl (the latter would be for stripping
1693    low zeros).
1694 
1695                 std   cld
1696            P5    18    16
1697            P6    46    38
1698            K6    36    13
1699            K7    21    20
1700 */
1701 #ifndef MPN_NORMALIZE
1702 #define MPN_NORMALIZE(DST, NLIMBS) \
1703   do {									\
1704     while ((NLIMBS) > 0)                                                \
1705       {									\
1706     if ((DST)[(NLIMBS) - 1] != 0)					\
1707       break;							\
1708     (NLIMBS)--;							\
1709       }									\
1710   } while (0)
1711 #endif
1712 #ifndef MPN_NORMALIZE_NOT_ZERO
1713 #define MPN_NORMALIZE_NOT_ZERO(DST, NLIMBS)     \
1714   do {                                          \
1715     ASSERT ((NLIMBS) >= 1);                     \
1716     while (1)                                   \
1717       {                                         \
1718     if ((DST)[(NLIMBS) - 1] != 0)           \
1719       break;                                \
1720     (NLIMBS)--;                             \
1721       }                                         \
1722   } while (0)
1723 #endif
1724 
1725 /* Strip least significant zero limbs from {ptr,size} by incrementing ptr
1726    and decrementing size.  low should be ptr[0], and will be the new ptr[0]
1727    on returning.  The number in {ptr,size} must be non-zero, ie. size!=0 and
1728    somewhere a non-zero limb.  */
1729 #define MPN_STRIP_LOW_ZEROS_NOT_ZERO(ptr, size, low)    \
1730   do {                                                  \
1731     ASSERT ((size) >= 1);                               \
1732     ASSERT ((low) == (ptr)[0]);                         \
1733                                                         \
1734     while ((low) == 0)                                  \
1735       {                                                 \
1736         (size)--;                                       \
1737         ASSERT ((size) >= 1);                           \
1738         (ptr)++;                                        \
1739         (low) = *(ptr);                                 \
1740       }                                                 \
1741   } while (0)
1742 
1743 /* Initialize X of type mpz_t with space for NLIMBS limbs.  X should be a
1744    temporary variable; it will be automatically cleared out at function
1745    return.  We use __x here to make it possible to accept both mpz_ptr and
1746    mpz_t arguments.  */
1747 #define MPZ_TMP_INIT(X, NLIMBS)                                         \
1748   do {                                                                  \
1749     mpz_ptr __x = (X);                                                  \
1750     ASSERT ((NLIMBS) >= 1);                                             \
1751     __x->_mp_alloc = (NLIMBS);                                          \
1752     __x->_mp_d = (mp_ptr) TMP_ALLOC ((NLIMBS) * BYTES_PER_MP_LIMB);     \
1753   } while (0)
1754 
1755 
1756 /* Realloc for an mpz_t WHAT if it has less than NEEDED limbs.  */
1757 #define MPZ_REALLOC(z,n) (UNLIKELY ((n) > ALLOC(z))     \
1758                           ? (mp_ptr) _mpz_realloc(z,n)  \
1759                           : PTR(z))
1760 
1761 #define MPZ_NEWALLOC MPZ_REALLOC
1762 
1763 #define MPZ_EQUAL_1_P(z)  (SIZ(z)==1 && PTR(z)[0] == 1)
1764 
1765 
1766 /* MPN_FIB2_SIZE(n) is the size in limbs required by mpn_fib2_ui for fp and
1767    f1p.
1768 
1769    From Knuth vol 1 section 1.2.8, F[n] = phi^n/sqrt(5) rounded to the
1770    nearest integer, where phi=(1+sqrt(5))/2 is the golden ratio.  So the
1771    number of bits required is n*log_2((1+sqrt(5))/2) = n*0.6942419.
1772 
1773    The multiplier used is 23/32=0.71875 for efficient calculation on CPUs
1774    without good floating point.  There's +2 for rounding up, and a further
1775    +2 since at the last step x limbs are doubled into a 2x+1 limb region
1776    whereas the actual F[2k] value might be only 2x-1 limbs.
1777 
1778    Note that a division is done first, since on a 32-bit system it's at
1779    least conceivable to go right up to n==ULONG_MAX.  (F[2^32-1] would be
1780    about 380Mbytes, plus temporary workspace of about 1.2Gbytes here and
1781    whatever a multiply of two 190Mbyte numbers takes.)
1782 
1783    Enhancement: When GMP_NUMB_BITS is not a power of 2 the division could be
1784    worked into the multiplier.  */
1785 
1786 #define MPN_FIB2_SIZE(n) \
1787   ((mp_size_t) ((n) / 32 * 23 / GMP_NUMB_BITS) + 4)
1788 
1789 
1790 /* FIB_TABLE(n) returns the Fibonacci number F[n].  Must have n in the range
1791    -1 <= n <= FIB_TABLE_LIMIT
1792 
1793    FIB_TABLE_LUCNUM_LIMIT is the largest n for which L[n] =
1794    F[n] + 2*F[n-1] fits in a limb.  */
1795 
1796 __GMP_DECLSPEC extern const mp_limb_t __gmp_fib_table[];
1797 #define FIB_TABLE(n)  (__gmp_fib_table[(n)+1])
1798 
1799 extern const mp_limb_t __gmp_oddfac_table[];
1800 extern const mp_limb_t __gmp_odd2fac_table[];
1801 extern const unsigned char __gmp_fac2cnt_table[];
1802 extern const mp_limb_t __gmp_limbroots_table[];
1803 
1804 /* n^log <= GMP_NUMB_MAX, a limb can store log factors less than n */
1805 static inline unsigned
log_n_max(mp_limb_t n)1806 log_n_max (mp_limb_t n)
1807 {
1808   unsigned log;
1809   for (log = 8; n > __gmp_limbroots_table[log - 1]; log--);
1810   return log;
1811 }
1812 
1813 #define SIEVESIZE 512		/* FIXME: Allow gmp_init_primesieve to choose */
1814 typedef struct
1815 {
1816   mpir_ui d;		   /* current index in s[] */
1817   mpir_ui s0;		   /* number corresponding to s[0] */
1818   mpir_ui sqrt_s0;	   /* misnomer for sqrt(s[SIEVESIZE-1]) */
1819   unsigned char s[SIEVESIZE + 1];  /* sieve table */
1820 } gmp_primesieve_t;
1821 
1822 #define gmp_init_primesieve __gmp_init_primesieve
1823 __GMP_DECLSPEC void gmp_init_primesieve (gmp_primesieve_t *);
1824 
1825 #define gmp_nextprime __gmp_nextprime
1826 __GMP_DECLSPEC mpir_ui gmp_nextprime (gmp_primesieve_t *);
1827 
1828 #define gmp_primesieve __gmp_primesieve
1829 __GMP_DECLSPEC mp_limb_t gmp_primesieve (mp_ptr, mp_limb_t);
1830 
1831 /* For a threshold between algorithms A and B, size>=thresh is where B
1832    should be used.  Special value MP_SIZE_T_MAX means only ever use A, or
1833    value 0 means only ever use B.  The tests for these special values will
1834    be compile-time constants, so the compiler should be able to eliminate
1835    the code for the unwanted algorithm.  */
1836 
1837 #define ABOVE_THRESHOLD(size,thresh)    \
1838   ((thresh) == 0                        \
1839    || ((thresh) != MP_SIZE_T_MAX        \
1840        && (size) >= (thresh)))
1841 #define BELOW_THRESHOLD(size,thresh)  (! ABOVE_THRESHOLD (size, thresh))
1842 
1843 /* If MUL_KARATSUBA_THRESHOLD is not already defined, define it to a
1844    value which is good on most machines.  */
1845 #ifndef MUL_KARATSUBA_THRESHOLD
1846 #define MUL_KARATSUBA_THRESHOLD 32
1847 #endif
1848 
1849 #ifndef SQR_KARATSUBA_THRESHOLD
1850 #define SQR_KARATSUBA_THRESHOLD 32
1851 #endif
1852 
1853 /* If MUL_TOOM3_THRESHOLD is not already defined, define it to a
1854    value which is good on most machines.  */
1855 #ifndef MUL_TOOM3_THRESHOLD
1856 #define MUL_TOOM3_THRESHOLD 128
1857 #endif
1858 
1859 #ifndef MUL_TOOM4_THRESHOLD
1860 #define MUL_TOOM4_THRESHOLD 300
1861 #endif
1862 
1863 #ifndef MULMID_TOOM42_THRESHOLD
1864 #define MULMID_TOOM42_THRESHOLD 36
1865 #endif
1866 
1867 #ifndef MUL_TOOM8H_THRESHOLD
1868 #define MUL_TOOM8H_THRESHOLD 401
1869 #endif
1870 
1871 #ifndef SQR_TOOM3_THRESHOLD
1872 #define SQR_TOOM3_THRESHOLD 128
1873 #endif
1874 
1875 #ifndef SQR_TOOM4_THRESHOLD
1876 #define SQR_TOOM4_THRESHOLD 300
1877 #endif
1878 
1879 #ifndef SQR_TOOM8_THRESHOLD
1880 #define SQR_TOOM8_THRESHOLD 401
1881 #endif
1882 
1883 #ifndef MULLOW_BASECASE_THRESHOLD
1884 #define MULLOW_BASECASE_THRESHOLD	8
1885 #endif
1886 
1887 #ifndef MULLOW_DC_THRESHOLD
1888 #define MULLOW_DC_THRESHOLD	32
1889 #endif
1890 
1891 #ifndef MULLOW_MUL_THRESHOLD
1892 #define MULLOW_MUL_THRESHOLD	8192
1893 #endif
1894 
1895 #ifndef MULHIGH_BASECASE_THRESHOLD
1896 #define MULHIGH_BASECASE_THRESHOLD	16
1897 #endif
1898 
1899 #ifndef MULHIGH_DC_THRESHOLD
1900 #define MULHIGH_DC_THRESHOLD	32
1901 #endif
1902 
1903 #ifndef MULHIGH_MUL_THRESHOLD
1904 #define MULHIGH_MUL_THRESHOLD	8192
1905 #endif
1906 
1907 #ifndef MULMOD_2EXPM1_THRESHOLD
1908 #define MULMOD_2EXPM1_THRESHOLD	16
1909 #endif
1910 
1911 #ifndef FAC_UI_THRESHOLD
1912 #define FAC_UI_THRESHOLD	8192
1913 #endif
1914 
1915 #ifndef ROOTREM_THRESHOLD
1916 #define ROOTREM_THRESHOLD	8
1917 #endif
1918 
1919 #ifndef DIVREM_HENSEL_QR_1_THRESHOLD
1920 #define DIVREM_HENSEL_QR_1_THRESHOLD 8
1921 #endif
1922 
1923 #ifndef RSH_DIVREM_HENSEL_QR_1_THRESHOLD
1924 #define RSH_DIVREM_HENSEL_QR_1_THRESHOLD 8
1925 #endif
1926 
1927 #ifndef DIVREM_EUCLID_HENSEL_THRESHOLD
1928 #define DIVREM_EUCLID_HENSEL_THRESHOLD 32
1929 #endif
1930 
1931 #ifndef MOD_1_1_THRESHOLD
1932 #define MOD_1_1_THRESHOLD	16
1933 #endif
1934 
1935 #ifndef MOD_1_2_THRESHOLD
1936 #define MOD_1_2_THRESHOLD	32
1937 #endif
1938 
1939 #ifndef MOD_1_3_THRESHOLD
1940 #define MOD_1_3_THRESHOLD	64
1941 #endif
1942 
1943 /* MUL_KARATSUBA_THRESHOLD_LIMIT is the maximum for MUL_KARATSUBA_THRESHOLD.
1944    In a normal build MUL_KARATSUBA_THRESHOLD is a constant and we use that.
1945    In a fat binary or tune program build MUL_KARATSUBA_THRESHOLD is a
1946    variable and a separate hard limit will have been defined.  Similarly for
1947    TOOM3.  */
1948 #ifndef MUL_KARATSUBA_THRESHOLD_LIMIT
1949 #define MUL_KARATSUBA_THRESHOLD_LIMIT  MUL_KARATSUBA_THRESHOLD
1950 #endif
1951 #ifndef MUL_TOOM3_THRESHOLD_LIMIT
1952 #define MUL_TOOM3_THRESHOLD_LIMIT  MUL_TOOM3_THRESHOLD
1953 #endif
1954 #ifndef MUL_TOOM4_THRESHOLD_LIMIT
1955 #define MUL_TOOM4_THRESHOLD_LIMIT  MUL_TOOM4_THRESHOLD
1956 #endif
1957 #ifndef MUL_TOOM8H_THRESHOLD_LIMIT
1958 #define MUL_TOOM8H_THRESHOLD_LIMIT  MUL_TOOM8H_THRESHOLD
1959 #endif
1960 #ifndef MULLOW_BASECASE_THRESHOLD_LIMIT
1961 #define MULLOW_BASECASE_THRESHOLD_LIMIT  MULLOW_BASECASE_THRESHOLD
1962 #endif
1963 
1964 /* SQR_BASECASE_THRESHOLD is where mpn_sqr_basecase should take over from
1965    mpn_mul_basecase in mpn_sqr_n.  Default is to use mpn_sqr_basecase
1966    always.  (Note that we certainly always want it if there's a native
1967    assembler mpn_sqr_basecase.)
1968 
1969    If it turns out that mpn_kara_sqr_n becomes faster than mpn_mul_basecase
1970    before mpn_sqr_basecase does, then SQR_BASECASE_THRESHOLD is the
1971    karatsuba threshold and SQR_KARATSUBA_THRESHOLD is 0.  This oddity arises
1972    more or less because SQR_KARATSUBA_THRESHOLD represents the size up to
1973    which mpn_sqr_basecase should be used, and that may be never.  */
1974 
1975 #ifndef SQR_BASECASE_THRESHOLD
1976 #define SQR_BASECASE_THRESHOLD 0
1977 #endif
1978 
1979 #ifndef SQR_KARATSUBA_THRESHOLD
1980 #define SQR_KARATSUBA_THRESHOLD (2*MUL_KARATSUBA_THRESHOLD)
1981 #endif
1982 
1983 #ifndef SQR_TOOM3_THRESHOLD
1984 #define SQR_TOOM3_THRESHOLD 128
1985 #endif
1986 
1987 #ifndef SQR_TOOM4_THRESHOLD
1988 #define SQR_TOOM4_THRESHOLD 300
1989 #endif
1990 
1991 #ifndef SQR_TOOM8_THRESHOLD
1992 #define SQR_TOOM8_THRESHOLD 400
1993 #endif
1994 
1995 /* See comments above about MUL_TOOM3_THRESHOLD_LIMIT.  */
1996 #ifndef SQR_TOOM3_THRESHOLD_LIMIT
1997 #define SQR_TOOM3_THRESHOLD_LIMIT  SQR_TOOM3_THRESHOLD
1998 #endif
1999 
2000 #ifndef SQR_TOOM4_THRESHOLD_LIMIT
2001 #define SQR_TOOM4_THRESHOLD_LIMIT  SQR_TOOM4_THRESHOLD
2002 #endif
2003 
2004 #ifndef SQR_TOOM8_THRESHOLD_LIMIT
2005 #define SQR_TOOM8_THRESHOLD_LIMIT  SQR_TOOM8_THRESHOLD
2006 #endif
2007 
2008 /* points at which fft is used for mul/sqr and mulmod_Bexp resp. */
2009 #ifndef MUL_FFT_FULL_THRESHOLD
2010 #define MUL_FFT_FULL_THRESHOLD   (MUL_TOOM8H_THRESHOLD * 10)
2011 #endif
2012 #ifndef SQR_FFT_FULL_THRESHOLD
2013 #define SQR_FFT_FULL_THRESHOLD   (SQR_TOOM8_THRESHOLD * 10)
2014 #endif
2015 
2016 #ifndef MUL_FFT_THRESHOLD
2017 #define MUL_FFT_THRESHOLD   (MUL_FFT_FULL_THRESHOLD / 2)
2018 #endif
2019 #ifndef SQR_FFT_THRESHOLD
2020 #define SQR_FFT_THRESHOLD   (SQR_FFT_FULL_THRESHOLD / 2)
2021 #endif
2022 
2023 #ifndef FFT_MULMOD_2EXPP1_CUTOFF
2024 #define FFT_MULMOD_2EXPP1_CUTOFF 128
2025 #endif
2026 
2027 #if HAVE_NATIVE_mpn_addmul_2 || HAVE_NATIVE_mpn_redc_2
2028 
2029 #ifndef REDC_1_TO_REDC_2_THRESHOLD
2030 #define REDC_1_TO_REDC_2_THRESHOLD       15
2031 #endif
2032 #ifndef REDC_2_TO_REDC_N_THRESHOLD
2033 #define REDC_2_TO_REDC_N_THRESHOLD      100
2034 #endif
2035 
2036 #else
2037 
2038 #ifndef REDC_1_TO_REDC_N_THRESHOLD
2039 #define REDC_1_TO_REDC_N_THRESHOLD      100
2040 #endif
2041 
2042 #endif /* HAVE_NATIVE_mpn_addmul_2 || HAVE_NATIVE_mpn_redc_2 */
2043 
2044 #ifndef DC_DIV_QR_THRESHOLD
2045 #define DC_DIV_QR_THRESHOLD    (3 * MUL_KARATSUBA_THRESHOLD)
2046 #endif
2047 
2048 #ifndef DC_DIVAPPR_Q_N_THRESHOLD
2049 #define DC_DIVAPPR_Q_N_THRESHOLD    (3 * MUL_KARATSUBA_THRESHOLD)
2050 #endif
2051 
2052 #ifndef DC_BDIV_QR_THRESHOLD
2053 #define DC_BDIV_QR_THRESHOLD    (3 * MUL_KARATSUBA_THRESHOLD)
2054 #endif
2055 
2056 #ifndef DC_BDIV_Q_THRESHOLD
2057 #define DC_BDIV_Q_THRESHOLD    (3 * MUL_KARATSUBA_THRESHOLD)
2058 #endif
2059 
2060 #ifndef INV_DIV_QR_THRESHOLD
2061 #define INV_DIV_QR_THRESHOLD    (MUL_FFT_THRESHOLD/3)
2062 #endif
2063 
2064 #ifndef INV_DIVAPPR_Q_N_THRESHOLD
2065 #define INV_DIVAPPR_Q_N_THRESHOLD    (MUL_FFT_THRESHOLD/3)
2066 #endif
2067 
2068 #ifndef DC_DIV_Q_THRESHOLD
2069 #define DC_DIV_Q_THRESHOLD    (3 * MUL_KARATSUBA_THRESHOLD)
2070 #endif
2071 
2072 #ifndef INV_DIV_Q_THRESHOLD
2073 #define INV_DIV_Q_THRESHOLD    (MUL_FFT_THRESHOLD/3)
2074 #endif
2075 
2076 #ifndef BINV_NEWTON_THRESHOLD
2077 #define BINV_NEWTON_THRESHOLD           300
2078 #endif
2079 
2080 #ifndef DC_DIVAPPR_Q_THRESHOLD
2081 #define DC_DIVAPPR_Q_THRESHOLD    (3 * MUL_TOOM3_THRESHOLD)
2082 #endif
2083 
2084 #ifndef INV_DIVAPPR_Q_THRESHOLD
2085 #define INV_DIVAPPR_Q_THRESHOLD    (MUL_FFT_THRESHOLD/2)
2086 #endif
2087 
2088 #ifndef GET_STR_DC_THRESHOLD
2089 #define GET_STR_DC_THRESHOLD             18
2090 #endif
2091 
2092 #ifndef GET_STR_PRECOMPUTE_THRESHOLD
2093 #define GET_STR_PRECOMPUTE_THRESHOLD     35
2094 #endif
2095 
2096 #ifndef SET_STR_DC_THRESHOLD
2097 #define SET_STR_DC_THRESHOLD            750
2098 #endif
2099 
2100 #ifndef SET_STR_PRECOMPUTE_THRESHOLD
2101 #define SET_STR_PRECOMPUTE_THRESHOLD   2000
2102 #endif
2103 
2104 #ifndef FAC_ODD_THRESHOLD
2105 #define FAC_ODD_THRESHOLD    35
2106 #endif
2107 
2108 #ifndef FAC_DSC_THRESHOLD
2109 #define FAC_DSC_THRESHOLD   400
2110 #endif
2111 
2112 /* Return non-zero if xp,xsize and yp,ysize overlap.
2113    If xp+xsize<=yp there's no overlap, or if yp+ysize<=xp there's no
2114    overlap.  If both these are false, there's an overlap. */
2115 #define MPN_OVERLAP_P(xp, xsize, yp, ysize) \
2116   ((xp) + (xsize) > (yp) && (yp) + (ysize) > (xp))
2117 #define MEM_OVERLAP_P(xp, xsize, yp, ysize)     \
2118   (   (char *) (xp) + (xsize) > (char *) (yp)   \
2119    && (char *) (yp) + (ysize) > (char *) (xp))
2120 
2121 /* Return non-zero if xp,xsize and yp,ysize are either identical or not
2122    overlapping.  Return zero if they're partially overlapping. */
2123 #define MPN_SAME_OR_SEPARATE_P(xp, yp, size)    \
2124   MPN_SAME_OR_SEPARATE2_P(xp, size, yp, size)
2125 #define MPN_SAME_OR_SEPARATE2_P(xp, xsize, yp, ysize)           \
2126   ((xp) == (yp) || ! MPN_OVERLAP_P (xp, xsize, yp, ysize))
2127 
2128 /* Return non-zero if dst,dsize and src,ssize are either identical or
2129    overlapping in a way suitable for an incrementing/decrementing algorithm.
2130    Return zero if they're partially overlapping in an unsuitable fashion. */
2131 #define MPN_SAME_OR_INCR2_P(dst, dsize, src, ssize)             \
2132   ((dst) <= (src) || ! MPN_OVERLAP_P (dst, dsize, src, ssize))
2133 #define MPN_SAME_OR_INCR_P(dst, src, size)      \
2134   MPN_SAME_OR_INCR2_P(dst, size, src, size)
2135 #define MPN_SAME_OR_DECR2_P(dst, dsize, src, ssize)             \
2136   ((dst) >= (src) || ! MPN_OVERLAP_P (dst, dsize, src, ssize))
2137 #define MPN_SAME_OR_DECR_P(dst, src, size)      \
2138   MPN_SAME_OR_DECR2_P(dst, size, src, size)
2139 
2140 
2141 /* ASSERT() is a private assertion checking scheme, similar to <assert.h>.
2142    ASSERT() does the check only if WANT_ASSERT is selected, ASSERT_ALWAYS()
2143    does it always.  Generally assertions are meant for development, but
2144    might help when looking for a problem later too.
2145 
2146    Note that strings shouldn't be used within the ASSERT expression,
2147    eg. ASSERT(strcmp(s,"notgood")!=0), since the quotes upset the "expr"
2148    used in the !HAVE_STRINGIZE case (ie. K&R).  */
2149 
2150 #ifdef __LINE__
2151 #define ASSERT_LINE  __LINE__
2152 #else
2153 #define ASSERT_LINE  -1
2154 #endif
2155 
2156 #ifdef __FILE__
2157 #define ASSERT_FILE  __FILE__
2158 #else
2159 #define ASSERT_FILE  ""
2160 #endif
2161 
2162 __GMP_DECLSPEC void __gmp_assert_header _PROTO ((const char *filename, int linenum));
2163 __GMP_DECLSPEC void __gmp_assert_fail _PROTO ((const char *filename, int linenum, const char *expr)) ATTRIBUTE_NORETURN;
2164 
2165 #if HAVE_STRINGIZE
2166 #define ASSERT_FAIL(expr)  __gmp_assert_fail (ASSERT_FILE, ASSERT_LINE, #expr)
2167 #else
2168 #define ASSERT_FAIL(expr)  __gmp_assert_fail (ASSERT_FILE, ASSERT_LINE, "expr")
2169 #endif
2170 
2171 #define ASSERT_ALWAYS(expr)     \
2172   do {                          \
2173     if (!(expr))                \
2174       ASSERT_FAIL (expr);       \
2175   } while (0)
2176 
2177 #if WANT_ASSERT
2178 #define ASSERT(expr)   ASSERT_ALWAYS (expr)
2179 #else
2180 #define ASSERT(expr)   do {} while (0)
2181 #endif
2182 
2183 
2184 /* ASSERT_CARRY checks the expression is non-zero, and ASSERT_NOCARRY checks
2185    that it's zero.  In both cases if assertion checking is disabled the
2186    expression is still evaluated.  These macros are meant for use with
2187    routines like mpn_add_n() where the return value represents a carry or
2188    whatever that should or shouldn't occur in some context.  For example,
2189    ASSERT_NOCARRY (mpn_add_n (rp, s1p, s2p, size)); */
2190 #if WANT_ASSERT
2191 #define ASSERT_CARRY(expr)     ASSERT_ALWAYS ((expr) != 0)
2192 #define ASSERT_NOCARRY(expr)   ASSERT_ALWAYS ((expr) == 0)
2193 #else
2194 #define ASSERT_CARRY(expr)     (expr)
2195 #define ASSERT_NOCARRY(expr)   (expr)
2196 #endif
2197 
2198 
2199 /* ASSERT_CODE includes code when assertion checking is wanted.  This is the
2200    same as writing "#if WANT_ASSERT", but more compact.  */
2201 #if WANT_ASSERT
2202 #define ASSERT_CODE(expr)  expr
2203 #else
2204 #define ASSERT_CODE(expr)
2205 #endif
2206 
2207 
2208 /* Test that an mpq_t is in fully canonical form.  This can be used as
2209    protection on routines like mpq_equal which give wrong results on
2210    non-canonical inputs.  */
2211 #if WANT_ASSERT
2212 #define ASSERT_MPQ_CANONICAL(q)                         \
2213   do {                                                  \
2214     ASSERT (q->_mp_den._mp_size > 0);                   \
2215     if (q->_mp_num._mp_size == 0)                       \
2216       {                                                 \
2217         /* zero should be 0/1 */                        \
2218         ASSERT (mpz_cmp_ui (mpq_denref(q), 1L) == 0);   \
2219       }                                                 \
2220     else                                                \
2221       {                                                 \
2222         /* no common factors */                         \
2223         mpz_t  __g;                                     \
2224         mpz_init (__g);                                 \
2225         mpz_gcd (__g, mpq_numref(q), mpq_denref(q));    \
2226         ASSERT (mpz_cmp_ui (__g, 1) == 0);              \
2227         mpz_clear (__g);                                \
2228       }                                                 \
2229   } while (0)
2230 #else
2231 #define ASSERT_MPQ_CANONICAL(q)  do {} while (0)
2232 #endif
2233 
2234 /* Check that the nail parts are zero. */
2235 #define ASSERT_ALWAYS_LIMB(limb)                \
2236   do {                                          \
2237     mp_limb_t  __nail = (limb) & GMP_NAIL_MASK; \
2238     ASSERT_ALWAYS (__nail == 0);                \
2239   } while (0)
2240 #define ASSERT_ALWAYS_MPN(ptr, size)            \
2241   do {                                          \
2242     /* let whole loop go dead when no nails */  \
2243     if (GMP_NAIL_BITS != 0)                     \
2244       {                                         \
2245         mp_size_t  __i;                         \
2246         for (__i = 0; __i < (size); __i++)      \
2247           ASSERT_ALWAYS_LIMB ((ptr)[__i]);      \
2248       }                                         \
2249   } while (0)
2250 #if WANT_ASSERT
2251 #define ASSERT_LIMB(limb)       ASSERT_ALWAYS_LIMB (limb)
2252 #define ASSERT_MPN(ptr, size)   ASSERT_ALWAYS_MPN (ptr, size)
2253 #else
2254 #define ASSERT_LIMB(limb)       do {} while (0)
2255 #define ASSERT_MPN(ptr, size)   do {} while (0)
2256 #endif
2257 
2258 
2259 /* Assert that an mpn region {ptr,size} is zero, or non-zero.
2260    size==0 is allowed, and in that case {ptr,size} considered to be zero.  */
2261 #if WANT_ASSERT
2262 #define ASSERT_MPN_ZERO_P(ptr,size)     \
2263   do {                                  \
2264     mp_size_t  __i;                     \
2265     ASSERT ((size) >= 0);               \
2266     for (__i = 0; __i < (size); __i++)  \
2267       ASSERT ((ptr)[__i] == 0);         \
2268   } while (0)
2269 #define ASSERT_MPN_NONZERO_P(ptr,size)  \
2270   do {                                  \
2271     mp_size_t  __i;                     \
2272     int        __nonzero = 0;           \
2273     ASSERT ((size) >= 0);               \
2274     for (__i = 0; __i < (size); __i++)  \
2275       if ((ptr)[__i] != 0)              \
2276         {                               \
2277           __nonzero = 1;                \
2278           break;                        \
2279         }                               \
2280     ASSERT (__nonzero);                 \
2281   } while (0)
2282 #else
2283 #define ASSERT_MPN_ZERO_P(ptr,size)     do {} while (0)
2284 #define ASSERT_MPN_NONZERO_P(ptr,size)  do {} while (0)
2285 #endif
2286 
2287 
2288 #if HAVE_NATIVE_mpn_com_n
2289 #define mpn_com_n __MPN(com_n)
2290 __GMP_DECLSPEC void mpn_com_n _PROTO ((mp_ptr, mp_srcptr, mp_size_t));
2291 #elif !defined(mpn_com_n)
2292 #define mpn_com_n(d,s,n)                                \
2293   do {                                                  \
2294     mp_ptr     __d = (d);                               \
2295     mp_srcptr  __s = (s);                               \
2296     mp_size_t  __n = (n);                               \
2297     ASSERT (__n >= 1);                                  \
2298     ASSERT (MPN_SAME_OR_SEPARATE_P (__d, __s, __n));    \
2299     do                                                  \
2300       *__d++ = (~ *__s++) & GMP_NUMB_MASK;              \
2301     while (--__n);                                      \
2302   } while (0)
2303 #endif
2304 
2305 #define MPN_LOGOPS_N_INLINE(d, s1, s2, n, operation)    \
2306   do {                                                  \
2307     mp_ptr	 __d = (d);                             \
2308     mp_srcptr	 __s1 = (s1);                           \
2309     mp_srcptr	 __s2 = (s2);                           \
2310     mp_size_t	 __n = (n);                             \
2311     ASSERT (__n >= 1);                                  \
2312     ASSERT (MPN_SAME_OR_SEPARATE_P (__d, __s1, __n));   \
2313     ASSERT (MPN_SAME_OR_SEPARATE_P (__d, __s2, __n));   \
2314     do                                                  \
2315       operation;                                        \
2316     while (--__n);                                      \
2317   } while (0)
2318 
2319 #if !HAVE_NATIVE_mpn_and_n
2320 #undef mpn_and_n
2321 #define mpn_and_n(d, s1, s2, n) \
2322   MPN_LOGOPS_N_INLINE (d, s1, s2, n, *__d++ = *__s1++ & *__s2++)
2323 #endif
2324 
2325 #if !HAVE_NATIVE_mpn_andn_n
2326 #undef mpn_andn_n
2327 #define mpn_andn_n(d, s1, s2, n) \
2328   MPN_LOGOPS_N_INLINE (d, s1, s2, n, *__d++ = *__s1++ & ~*__s2++)
2329 #endif
2330 
2331 #if !HAVE_NATIVE_mpn_nand_n
2332 #undef mpn_nand_n
2333 #define mpn_nand_n(d, s1, s2, n) \
2334   MPN_LOGOPS_N_INLINE (d, s1, s2, n, *__d++ = ~(*__s1++ & *__s2++) & GMP_NUMB_MASK)
2335 #endif
2336 
2337 #if !HAVE_NATIVE_mpn_ior_n
2338 #undef mpn_ior_n
2339 #define mpn_ior_n(d, s1, s2, n) \
2340   MPN_LOGOPS_N_INLINE (d, s1, s2, n, *__d++ = *__s1++ | *__s2++)
2341 #endif
2342 
2343 #if !HAVE_NATIVE_mpn_iorn_n
2344 #undef mpn_iorn_n
2345 #define mpn_iorn_n(d, s1, s2, n) \
2346   MPN_LOGOPS_N_INLINE (d, s1, s2, n, *__d++ = (*__s1++ | ~*__s2++) & GMP_NUMB_MASK)
2347 #endif
2348 
2349 #if !HAVE_NATIVE_mpn_nior_n
2350 #undef mpn_nior_n
2351 #define mpn_nior_n(d, s1, s2, n) \
2352   MPN_LOGOPS_N_INLINE (d, s1, s2, n, *__d++ = ~(*__s1++ | *__s2++) & GMP_NUMB_MASK)
2353 #endif
2354 
2355 #if !HAVE_NATIVE_mpn_xor_n
2356 #undef mpn_xor_n
2357 #define mpn_xor_n(d, s1, s2, n) \
2358   MPN_LOGOPS_N_INLINE (d, s1, s2, n, *__d++ = *__s1++ ^ *__s2++)
2359 #endif
2360 
2361 #if !HAVE_NATIVE_mpn_xnor_n
2362 #undef mpn_xnor_n
2363 #define mpn_xnor_n(d, s1, s2, n) \
2364   MPN_LOGOPS_N_INLINE (d, s1, s2, n, *__d++ = ~(*__s1++ ^ *__s2++) & GMP_NUMB_MASK)
2365 #endif
2366 
2367 #if HAVE_NATIVE_mpn_not
2368 #define mpn_not __MPN(not)
2369 __GMP_DECLSPEC void mpn_not _PROTO ((mp_ptr,mp_size_t));
2370 #else
2371 #define mpn_not(__xp,__n) mpn_com((__xp),(__xp),(__n))
2372 #endif
2373 
2374 #if HAVE_NATIVE_mpn_double
2375 #define mpn_double __MPN(double)
2376 __GMP_DECLSPEC mp_limb_t mpn_double _PROTO ((mp_ptr,mp_size_t));
2377 #else
2378 #define mpn_double(__xp,__n) mpn_lshift1((__xp),(__xp),(__n))
2379 #endif
2380 
2381 #if HAVE_NATIVE_mpn_half
2382 #define mpn_half __MPN(half)
2383 __GMP_DECLSPEC mp_limb_t mpn_half _PROTO ((mp_ptr,mp_size_t));
2384 #else
2385 #define mpn_half(__xp,__n) mpn_rshift1((__xp),(__xp),(__n))
2386 #endif
2387 
2388 #if HAVE_NATIVE_mpn_lshift1
2389 #define mpn_lshift1 __MPN(lshift1)
2390 __GMP_DECLSPEC mp_limb_t mpn_lshift1 _PROTO ((mp_ptr,mp_srcptr,mp_size_t));
2391 #else
2392 #define mpn_lshift1(__xp,__yp,__n) mpn_lshift((__xp),(__yp),(__n),1)
2393 #endif
2394 
2395 #if HAVE_NATIVE_mpn_rshift1
2396 #define mpn_rshift1 __MPN(rshift1)
2397 __GMP_DECLSPEC mp_limb_t mpn_rshift1 _PROTO ((mp_ptr,mp_srcptr,mp_size_t));
2398 #else
2399 #define mpn_rshift1(__xp,__yp,__n) mpn_rshift((__xp),(__yp),(__n),1)
2400 #endif
2401 
2402 #if HAVE_NATIVE_mpn_lshift2
2403 #define mpn_lshift2 __MPN(lshift2)
2404 __GMP_DECLSPEC mp_limb_t mpn_lshift2 _PROTO ((mp_ptr,mp_srcptr,mp_size_t));
2405 #else
2406 #define mpn_lshift2(__xp,__yp,__n) mpn_lshift((__xp),(__yp),(__n),2)
2407 #endif
2408 
2409 #if HAVE_NATIVE_mpn_rshift2
2410 #define mpn_rshift2 __MPN(rshift2)
2411 __GMP_DECLSPEC mp_limb_t mpn_rshift2 _PROTO ((mp_ptr,mp_srcptr,mp_size_t));
2412 #else
2413 #define mpn_rshift2(__xp,__yp,__n) mpn_rshift((__xp),(__yp),(__n),2)
2414 #endif
2415 
2416 #if HAVE_NATIVE_mpn_addlsh1_n
2417 #define mpn_addlsh1_n __MPN(addlsh1_n)
2418 __GMP_DECLSPEC mp_limb_t mpn_addlsh1_n __GMP_PROTO ((mp_ptr, mp_srcptr, mp_srcptr, mp_size_t));
2419 #elif HAVE_NATIVE_mpn_addlsh_n
2420 #define mpn_addlsh1_n(__xp,__yp,__zp,__n) mpn_addlsh_n((__xp),(__yp),(__zp),(__n),1)
2421 #define HAVE_NATIVE_mpn_addlsh1_n 1
2422 #endif
2423 
2424 #if HAVE_NATIVE_mpn_sublsh1_n
2425 #define mpn_sublsh1_n __MPN(sublsh1_n)
2426 __GMP_DECLSPEC mp_limb_t mpn_sublsh1_n __GMP_PROTO ((mp_ptr, mp_srcptr, mp_srcptr, mp_size_t));
2427 #elif HAVE_NATIVE_mpn_sublsh_n
2428 #define mpn_sublsh1_n(__xp,__yp,__zp,__n) mpn_sublsh_n((__xp),(__yp),(__zp),(__n),1)
2429 #define HAVE_NATIVE_mpn_sublsh1_n 1
2430 #endif
2431 
2432 #if HAVE_NATIVE_mpn_inclsh_n
2433 #define mpn_inclsh_n __MPN(inclsh_n)
2434 __GMP_DECLSPEC mp_limb_t mpn_inclsh_n __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, unsigned int));
2435 #elif HAVE_NATIVE_mpn_addlsh_n
2436 #define mpn_inclsh_n(__xp,__yp,__n,__c) mpn_addlsh_n((__xp),(__xp),(__yp),(__n),(__c))
2437 #define HAVE_NATIVE_mpn_inclsh_n 1
2438 #endif
2439 
2440 #if HAVE_NATIVE_mpn_declsh_n
2441 #define mpn_declsh_n __MPN(declsh_n)
2442 __GMP_DECLSPEC mp_limb_t mpn_declsh_n __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, unsigned int));
2443 #elif HAVE_NATIVE_mpn_sublsh_n
2444 #define mpn_declsh_n(__xp,__yp,__n,__c) mpn_sublsh_n((__xp),(__xp),(__yp),(__n),(__c))
2445 #define HAVE_NATIVE_mpn_declsh_n 1
2446 #endif
2447 
2448 #if HAVE_NATIVE_mpn_store
2449 #define mpn_store __MPN(store)
2450 __GMP_DECLSPEC mp_limb_t mpn_store _PROTO ((mp_ptr,mp_size_t,mp_limb_t));
2451 #else
2452 #define mpn_store(dst, n,val)			\
2453   do {						\
2454     ASSERT ((n) >= 0);				\
2455     if ((n) != 0)				\
2456       {						\
2457     mp_ptr __dst = (dst);			\
2458     mp_size_t __n = (n);			\
2459     do					\
2460       *__dst++ = val;			\
2461     while (--__n);				\
2462       }						\
2463   } while (0)
2464 #endif
2465 
2466 #define MPN_ZERO(dst,n)	mpn_store(dst,n,0)
2467 
2468 /* ADDC_LIMB sets w=x+y and cout to 0 or 1 for a carry from that addition. */
2469 #if GMP_NAIL_BITS == 0
2470 #define ADDC_LIMB(cout, w, x, y)        \
2471   do {                                  \
2472     mp_limb_t  __x = (x);               \
2473     mp_limb_t  __y = (y);               \
2474     mp_limb_t  __w = __x + __y;         \
2475     (w) = __w;                          \
2476     (cout) = __w < __x;                 \
2477   } while (0)
2478 #else
2479 #define ADDC_LIMB(cout, w, x, y)        \
2480   do {                                  \
2481     mp_limb_t  __w;                     \
2482     ASSERT_LIMB (x);                    \
2483     ASSERT_LIMB (y);                    \
2484     __w = (x) + (y);                    \
2485     (w) = __w & GMP_NUMB_MASK;          \
2486     (cout) = __w >> GMP_NUMB_BITS;      \
2487   } while (0)
2488 #endif
2489 
2490 /* SUBC_LIMB sets w=x-y and cout to 0 or 1 for a borrow from that
2491    subtract.  */
2492 #if GMP_NAIL_BITS == 0
2493 #define SUBC_LIMB(cout, w, x, y)        \
2494   do {                                  \
2495     mp_limb_t  __x = (x);               \
2496     mp_limb_t  __y = (y);               \
2497     mp_limb_t  __w = __x - __y;         \
2498     (w) = __w;                          \
2499     (cout) = __w > __x;                 \
2500   } while (0)
2501 #else
2502 #define SUBC_LIMB(cout, w, x, y)        \
2503   do {                                  \
2504     mp_limb_t  __w = (x) - (y);         \
2505     (w) = __w & GMP_NUMB_MASK;          \
2506     (cout) = __w >> (GMP_LIMB_BITS-1);  \
2507   } while (0)
2508 #endif
2509 
2510 
2511 /* MPN_INCR_U does {ptr,size} += n, MPN_DECR_U does {ptr,size} -= n, both
2512    expecting no carry (or borrow) from that.
2513 
2514    The size parameter is only for the benefit of assertion checking.  In a
2515    normal build it's unused and the carry/borrow is just propagated as far
2516    as it needs to go.
2517 
2518    On random data, usually only one or two limbs of {ptr,size} get updated,
2519    so there's no need for any sophisticated looping, just something compact
2520    and sensible.
2521 
2522    FIXME: Switch all code from mpn_{incr,decr}_u to MPN_{INCR,DECR}_U,
2523    declaring their operand sizes, then remove the former.  This is purely
2524    for the benefit of assertion checking.  */
2525 
2526 #if GMP_NAIL_BITS == 0
2527 #ifndef mpn_incr_u
2528 #define mpn_incr_u(p,incr)                              \
2529   do {                                                  \
2530     mp_limb_t __x;                                      \
2531     mp_ptr __p = (p);                                   \
2532     if (__builtin_constant_p (incr) && (incr) == 1)     \
2533       {                                                 \
2534         while (++(*(__p++)) == 0)                       \
2535           ;                                             \
2536       }                                                 \
2537     else                                                \
2538       {                                                 \
2539         __x = *__p + (incr);                            \
2540         *__p = __x;                                     \
2541         if (__x < (incr))                               \
2542           while (++(*(++__p)) == 0)                     \
2543             ;                                           \
2544       }                                                 \
2545   } while (0)
2546 #endif
2547 #ifndef mpn_decr_u
2548 #define mpn_decr_u(p,incr)                              \
2549   do {                                                  \
2550     mp_limb_t __x;                                      \
2551     mp_ptr __p = (p);                                   \
2552     if (__builtin_constant_p (incr) && (incr) == 1)     \
2553       {                                                 \
2554         while ((*(__p++))-- == 0)                       \
2555           ;                                             \
2556       }                                                 \
2557     else                                                \
2558       {                                                 \
2559         __x = *__p;                                     \
2560         *__p = __x - (incr);                            \
2561         if (__x < (incr))                               \
2562           while ((*(++__p))-- == 0)                     \
2563             ;                                           \
2564       }                                                 \
2565   } while (0)
2566 #endif
2567 #endif
2568 
2569 #if GMP_NAIL_BITS >= 1
2570 #ifndef mpn_incr_u
2571 #define mpn_incr_u(p,incr)                              \
2572   do {							\
2573     mp_limb_t __x;					\
2574     mp_ptr __p = (p);					\
2575     if (__builtin_constant_p (incr) && (incr) == 1)	\
2576       {							\
2577     do						\
2578       {						\
2579         __x = (*__p + 1) & GMP_NUMB_MASK;		\
2580         *__p++ = __x;				\
2581       }						\
2582     while (__x == 0);				\
2583       }							\
2584     else						\
2585       {							\
2586     __x = (*__p + (incr));				\
2587     *__p++ = __x & GMP_NUMB_MASK;			\
2588     if (__x >> GMP_NUMB_BITS != 0)			\
2589       {						\
2590         do						\
2591           {						\
2592         __x = (*__p + 1) & GMP_NUMB_MASK;	\
2593         *__p++ = __x;				\
2594           }						\
2595         while (__x == 0);				\
2596       }						\
2597       }							\
2598   } while (0)
2599 #endif
2600 #ifndef mpn_decr_u
2601 #define mpn_decr_u(p,incr)				\
2602   do {							\
2603     mp_limb_t __x;					\
2604     mp_ptr __p = (p);					\
2605     if (__builtin_constant_p (incr) && (incr) == 1)	\
2606       {							\
2607     do						\
2608       {						\
2609         __x = *__p;					\
2610         *__p++ = (__x - 1) & GMP_NUMB_MASK;		\
2611       }						\
2612     while (__x == 0);				\
2613       }							\
2614     else						\
2615       {							\
2616     __x = *__p - (incr);				\
2617     *__p++ = __x & GMP_NUMB_MASK;			\
2618     if (__x >> GMP_NUMB_BITS != 0)			\
2619       {						\
2620         do						\
2621           {						\
2622         __x = *__p;				\
2623         *__p++ = (__x - 1) & GMP_NUMB_MASK;	\
2624           }						\
2625         while (__x == 0);				\
2626       }						\
2627       }							\
2628   } while (0)
2629 #endif
2630 #endif
2631 
2632 #ifndef MPN_INCR_U
2633 #if WANT_ASSERT
2634 #define MPN_INCR_U(ptr, size, n)                        \
2635   do {                                                  \
2636     ASSERT ((size) >= 1);                               \
2637     ASSERT_NOCARRY (mpn_add_1 (ptr, ptr, size, n));     \
2638   } while (0)
2639 #else
2640 #define MPN_INCR_U(ptr, size, n)   mpn_incr_u (ptr, n)
2641 #endif
2642 #endif
2643 
2644 #ifndef MPN_DECR_U
2645 #if WANT_ASSERT
2646 #define MPN_DECR_U(ptr, size, n)                        \
2647   do {                                                  \
2648     ASSERT ((size) >= 1);                               \
2649     ASSERT_NOCARRY (mpn_sub_1 (ptr, ptr, size, n));     \
2650   } while (0)
2651 #else
2652 #define MPN_DECR_U(ptr, size, n)   mpn_decr_u (ptr, n)
2653 #endif
2654 #endif
2655 
2656 
2657 /* Structure for conversion between internal binary format and
2658    strings in base 2..36.  */
2659 struct bases
2660 {
2661   /* Number of digits in the conversion base that always fits in an mp_limb_t.
2662      For example, for base 10 on a machine where a mp_limb_t has 32 bits this
2663      is 9, since 10**9 is the largest number that fits into a mp_limb_t.  */
2664   int chars_per_limb;
2665 
2666   /* log(2)/log(conversion_base) */
2667   double chars_per_bit_exactly;
2668 
2669   /* base**chars_per_limb, i.e. the biggest number that fits a word, built by
2670      factors of base.  Exception: For 2, 4, 8, etc, big_base is log2(base),
2671      i.e. the number of bits used to represent each digit in the base.  */
2672   mp_limb_t big_base;
2673 
2674   /* A BITS_PER_MP_LIMB bit approximation to 1/big_base, represented as a
2675      fixed-point number.  Instead of dividing by big_base an application can
2676      choose to multiply by big_base_inverted.  */
2677   mp_limb_t big_base_inverted;
2678 };
2679 
2680 #define mp_bases __MPN(bases)
2681 #define __mp_bases __MPN(bases)
2682 __GMP_DECLSPEC extern const struct bases mp_bases[257];
2683 
2684 /* the following are exposed for the benefit of MPIR.Net. */
2685 
2686 __GMP_DECLSPEC extern const unsigned char __gmp_digit_value_tab[480];
2687 
2688 /* This struct is used to pass local state between functions that comprise the raw i/o interfaces mpz_inp_raw and mpz_out_raw.
2689    Previously monolithic, they were split into several calls for the benefit of MPIR.Net.
2690    The separation did not change the contract nor the implementation, merely separating the routines into several steps,
2691    in order for MPIR.Net to consume the raw format processing code while substituting its own file I/O. */
2692 typedef struct
2693 {
2694     char* allocated;
2695     size_t allocatedSize;
2696     char* written;
2697     size_t writtenSize;
2698 } __mpir_out_struct;
2699 typedef __mpir_out_struct mpir_out_struct[1];
2700 typedef __mpir_out_struct *mpir_out_ptr;
2701 /* Part of mpz_inp_raw that decodes input size and allocates appropriate memory. Does not need _GMP_H_HAVE_FILE. Also used by MPIR.Net. */
2702 __GMP_DECLSPEC void mpz_inp_raw_p __GMP_PROTO ((mpz_ptr x, unsigned char* csize_bytes, mpir_out_ptr out));
2703 /* Part of mpz_inp_raw that reconstitutes limb data from raw format. Does not need _GMP_H_HAVE_FILE. Also used by MPIR.Net. */
2704 __GMP_DECLSPEC void mpz_inp_raw_m __GMP_PROTO ((mpz_ptr x, mpir_out_ptr out));
2705 /* Part of mpz_out_raw that performs raw output into memory in preparation for writing out to a file. Does not need _GMP_H_HAVE_FILE. Also used by MPIR.Net. */
2706 __GMP_DECLSPEC void mpz_out_raw_m __GMP_PROTO((mpir_out_ptr, mpz_srcptr));
2707 
2708 /* End of MPIR.Net consumables */
2709 
2710 /* For power of 2 bases this is exact.  For other bases the result is either
2711    exact or one too big.
2712 
2713    To be exact always it'd be necessary to examine all the limbs of the
2714    operand, since numbers like 100..000 and 99...999 generally differ only
2715    in the lowest limb.  It'd be possible to examine just a couple of high
2716    limbs to increase the probability of being exact, but that doesn't seem
2717    worth bothering with.  */
2718 
2719 #define MPN_SIZEINBASE(result, ptr, size, base)                         \
2720   do {                                                                  \
2721     int       __lb_base, __cnt;                                         \
2722     size_t __totbits;                                                   \
2723                                                                         \
2724     ASSERT ((size) >= 0);                                               \
2725     ASSERT ((base) >= 2);                                               \
2726     ASSERT ((base) < numberof (mp_bases));                              \
2727                                                                         \
2728     /* Special case for X == 0.  */                                     \
2729     if ((size) == 0)                                                    \
2730       (result) = 1;                                                     \
2731     else                                                                \
2732       {                                                                 \
2733         /* Calculate the total number of significant bits of X.  */     \
2734         count_leading_zeros (__cnt, (ptr)[(size)-1]);                   \
2735         __totbits = (size_t) (size) * GMP_NUMB_BITS - (__cnt - GMP_NAIL_BITS);\
2736                                                                         \
2737         if (POW2_P (base))                                              \
2738           {                                                             \
2739             __lb_base = mp_bases[base].big_base;                        \
2740             (result) = (__totbits + __lb_base - 1) / __lb_base;         \
2741           }                                                             \
2742         else                                                            \
2743           (result) = (size_t)                                           \
2744             (__totbits * mp_bases[base].chars_per_bit_exactly) + 1;     \
2745       }                                                                 \
2746   } while (0)
2747 
2748 #define MPN_SIZEINBASE_2EXP(result, ptr, size, base2exp)			\
2749   do {										\
2750     int          __cnt;								\
2751     mp_bitcnt_t  __totbits;							\
2752     ASSERT ((size) > 0);							\
2753     ASSERT ((ptr)[(size)-1] != 0);						\
2754     count_leading_zeros (__cnt, (ptr)[(size)-1]);				\
2755     __totbits = (mp_bitcnt_t) (size) * GMP_NUMB_BITS - (__cnt - GMP_NAIL_BITS);	\
2756     (result) = (__totbits + (base2exp)-1) / (base2exp);				\
2757   } while (0)
2758 
2759 /* eliminate mp_bases lookups for base==16 */
2760 #define MPN_SIZEINBASE_16(result, ptr, size)                            \
2761   do {                                                                  \
2762     int       __cnt;                                                    \
2763     mp_size_t __totbits;                                                \
2764                                                                         \
2765     ASSERT ((size) >= 0);                                               \
2766                                                                         \
2767     /* Special case for X == 0.  */                                     \
2768     if ((size) == 0)                                                    \
2769       (result) = 1;                                                     \
2770     else                                                                \
2771       {                                                                 \
2772         /* Calculate the total number of significant bits of X.  */     \
2773         count_leading_zeros (__cnt, (ptr)[(size)-1]);                   \
2774         __totbits = (size_t) (size) * GMP_NUMB_BITS - (__cnt - GMP_NAIL_BITS);\
2775         (result) = (__totbits + 4 - 1) / 4;                             \
2776       }                                                                 \
2777   } while (0)
2778 
2779 /* bit count to limb count, rounding up */
2780 #define BITS_TO_LIMBS(n)  (((n) + (GMP_NUMB_BITS - 1)) / GMP_NUMB_BITS)
2781 
2782 /* MPN_SET_UI sets an mpn (ptr, cnt) to given ui.  MPZ_FAKE_UI creates fake
2783    mpz_t from ui.  The zp argument must have room for LIMBS_PER_UI limbs
2784    in both cases (LIMBS_PER_UI is also defined here.) */
2785 #if BITS_PER_UI <= GMP_NUMB_BITS /* need one limb per ulong */
2786 
2787 #define LIMBS_PER_UI 1
2788 #define MPN_SET_UI(zp, zn, u)   \
2789   (zp)[0] = (u);                \
2790   (zn) = ((zp)[0] != 0);
2791 #define MPZ_FAKE_UI(z, zp, u)   \
2792   (zp)[0] = (u);                \
2793   PTR (z) = (zp);               \
2794   SIZ (z) = ((zp)[0] != 0);     \
2795   ASSERT_CODE (ALLOC (z) = 1);
2796 
2797 #else /* need two limbs per ulong */
2798 
2799 #define LIMBS_PER_UI 2
2800 #define MPN_SET_UI(zp, zn, u)                          \
2801   (zp)[0] = (u) & GMP_NUMB_MASK;                       \
2802   (zp)[1] = (u) >> GMP_NUMB_BITS;                      \
2803   (zn) = ((zp)[1] != 0 ? 2 : (zp)[0] != 0 ? 1 : 0);
2804 #define MPZ_FAKE_UI(z, zp, u)                          \
2805   (zp)[0] = (u) & GMP_NUMB_MASK;                       \
2806   (zp)[1] = (u) >> GMP_NUMB_BITS;                      \
2807   SIZ (z) = ((zp)[1] != 0 ? 2 : (zp)[0] != 0 ? 1 : 0); \
2808   PTR (z) = (zp);                                      \
2809   ASSERT_CODE (ALLOC (z) = 2);
2810 
2811 #endif
2812 
2813 /* LIMB_HIGHBIT_TO_MASK(n) examines the high bit of a limb value and turns 1
2814    or 0 there into a limb 0xFF..FF or 0 respectively.
2815 
2816    On most CPUs this is just an arithmetic right shift by GMP_LIMB_BITS-1,
2817    but C99 doesn't guarantee signed right shifts are arithmetic, so we have
2818    a little compile-time test and a fallback to a "? :" form.  The latter is
2819    necessary for instance on Cray vector systems.
2820 
2821    Recent versions of gcc (eg. 3.3) will in fact optimize a "? :" like this
2822    to an arithmetic right shift anyway, but it's good to get the desired
2823    shift on past versions too (in particular since an important use of
2824    LIMB_HIGHBIT_TO_MASK is in udiv_qrnnd_preinv).  */
2825 
2826 #define LIMB_HIGHBIT_TO_MASK(n)                                 \
2827   (((mp_limb_signed_t) -1 >> 1) < 0                             \
2828    ? (mp_limb_signed_t) (n) >> (GMP_LIMB_BITS - 1)              \
2829    : (n) & GMP_LIMB_HIGHBIT ? MP_LIMB_T_MAX : CNST_LIMB(0))
2830 
2831 
2832 /* Use a library function for invert_limb, if available. */
2833 #define mpn_invert_limb  __MPN(invert_limb)
2834 mp_limb_t mpn_invert_limb _PROTO ((mp_limb_t)) ATTRIBUTE_CONST;
2835 #if ! defined (invert_limb) && HAVE_NATIVE_mpn_invert_limb
2836 #define invert_limb(invxl,xl)           \
2837   do {                                  \
2838     (invxl) = mpn_invert_limb (xl);     \
2839   } while (0)
2840 #endif
2841 
2842 #ifndef invert_limb
2843 #define invert_limb(invxl,xl)                   \
2844   do {                                          \
2845     mp_limb_t dummy;                            \
2846     ASSERT ((xl) != 0);                         \
2847     udiv_qrnnd (invxl, dummy, ~(xl), ~CNST_LIMB(0), xl);  \
2848   } while (0)
2849 #endif
2850 
2851 #define mpir_invert_pi1(dinv, d1, d0)					\
2852   do {									\
2853     mp_limb_t _v, _p, _t1, _t0, _mask;					\
2854     invert_limb (_v, d1);						\
2855     _p = (d1) * _v;							\
2856     _p += (d0);								\
2857     if (_p < (d0))							\
2858       {									\
2859 	_v--;								\
2860 	_mask = -(mp_limb_t) (_p >= (d1));				\
2861 	_p -= (d1);							\
2862 	_v += _mask;							\
2863 	_p -= _mask & (d1);						\
2864       }									\
2865     umul_ppmm (_t1, _t0, d0, _v);					\
2866     _p += _t1;								\
2867     if (_p < _t1)							\
2868       {									\
2869 	_v--;								\
2870 	if (UNLIKELY (_p >= (d1)))					\
2871 	  {								\
2872 	    if (_p > (d1) || _t0 >= (d0))				\
2873 	      _v--;							\
2874 	  }								\
2875       }									\
2876     dinv = _v;							\
2877   } while (0)
2878 
2879 /* For compatibility with GMP only */
2880 #define invert_pi1(dinv, d1, d0)				\
2881    mpir_invert_pi1((dinv).inv32, d1, d0)
2882 
2883 /* Compute quotient the quotient and remainder for n / d. Requires d
2884    >= B^2 / 2 and n < d B. di is the inverse
2885 
2886      floor ((B^3 - 1) / (d0 + d1 B)) - B.
2887 
2888    NOTE: Output variables are updated multiple times. Only some inputs
2889    and outputs may overlap.
2890 */
2891 #define udiv_qr_3by2(q, r1, r0, n2, n1, n0, d1, d0, dinv)		\
2892   do {									\
2893     mp_limb_t _q0, _t1, _t0;					\
2894     umul_ppmm ((q), _q0, (n2), (dinv));					\
2895     add_ssaaaa ((q), _q0, (q), _q0, (n2), (n1));			\
2896                                     \
2897     /* Compute the two most significant limbs of n - q'd */		\
2898     (r1) = (n1) - (d1) * (q);						\
2899     sub_ddmmss ((r1), (r0), (r1), (n0), (d1), (d0));			\
2900     umul_ppmm (_t1, _t0, (d0), (q));					\
2901     sub_ddmmss ((r1), (r0), (r1), (r0), _t1, _t0);			\
2902     (q)++;								\
2903                                     \
2904     /* Conditionally adjust q and the remainders */			\
2905     if ((r1) >= _q0) {				\
2906        (q)--;							\
2907        add_ssaaaa ((r1), (r0), (r1), (r0), (d1), (d0));	} \
2908     if (UNLIKELY ((r1) >= (d1)))					\
2909       {									\
2910     if ((r1) > (d1) || (r0) >= (d0))				\
2911       {								\
2912         (q)++;							\
2913         sub_ddmmss ((r1), (r0), (r1), (r0), (d1), (d0));		\
2914       }								\
2915       }									\
2916   } while (0)
2917 
2918 #ifndef udiv_qrnnd_preinv
2919 #define udiv_qrnnd_preinv udiv_qrnnd_preinv2
2920 #endif
2921 
2922 /* Divide the two-limb number in (NH,,NL) by D, with DI being the largest
2923    limb not larger than (2**(2*BITS_PER_MP_LIMB))/D - (2**BITS_PER_MP_LIMB).
2924    If this would yield overflow, DI should be the largest possible number
2925    (i.e., only ones).  For correct operation, the most significant bit of D
2926    has to be set.  Put the quotient in Q and the remainder in R.  */
2927 #define udiv_qrnnd_preinv1(q, r, nh, nl, d, di)				\
2928   do {									\
2929     mp_limb_t _q, _ql, _r;						\
2930     mp_limb_t _xh, _xl;							\
2931     ASSERT ((d) != 0);							\
2932     umul_ppmm (_q, _ql, (nh), (di));					\
2933     _q += (nh);	/* Compensate, di is 2**GMP_LIMB_BITS too small */	\
2934     umul_ppmm (_xh, _xl, _q, (d));					\
2935     sub_ddmmss (_xh, _r, (nh), (nl), _xh, _xl);				\
2936     if (_xh != 0)							\
2937       {									\
2938     sub_ddmmss (_xh, _r, _xh, _r, 0, (d));				\
2939     _q += 1;							\
2940     if (_xh != 0)							\
2941       {								\
2942         _r -= (d);							\
2943         _q += 1;							\
2944       }								\
2945       }									\
2946     if (_r >= (d))							\
2947       {									\
2948     _r -= (d);							\
2949     _q += 1;							\
2950       }									\
2951     (r) = _r;								\
2952     (q) = _q;								\
2953   } while (0)
2954 
2955 /* Like udiv_qrnnd_preinv, but branch-free. */
2956 #define udiv_qrnnd_preinv2(q, r, nh, nl, d, di)				\
2957   do {									\
2958     mp_limb_t _n2, _n10, _nmask, _nadj, _q1;				\
2959     mp_limb_t _xh, _xl;							\
2960     _n2 = (nh);								\
2961     _n10 = (nl);							\
2962     _nmask = LIMB_HIGHBIT_TO_MASK (_n10);				\
2963     _nadj = _n10 + (_nmask & (d));					\
2964     umul_ppmm (_xh, _xl, di, _n2 - _nmask);				\
2965     add_ssaaaa (_xh, _xl, _xh, _xl, _n2, _nadj);			\
2966     _q1 = ~_xh;								\
2967     umul_ppmm (_xh, _xl, _q1, d);					\
2968     add_ssaaaa (_xh, _xl, _xh, _xl, nh, nl);				\
2969     _xh -= (d);					/* xh = 0 or -1 */	\
2970     (r) = _xl + ((d) & _xh);						\
2971     (q) = _xh - _q1;							\
2972   } while (0)
2973 
2974 /* Like udiv_qrnnd_preinv2, but for for any value D.  DNORM is D shifted left
2975    so that its most significant bit is set.  LGUP is ceil(log2(D)).  */
2976 #define udiv_qrnnd_preinv2gen(q, r, nh, nl, d, di, dnorm, lgup) \
2977   do {									\
2978     mp_limb_t _n2, _n10, _nmask, _nadj, _q1;				\
2979     mp_limb_t _xh, _xl;							\
2980     _n2 = ((nh) << (BITS_PER_MP_LIMB - (lgup))) + ((nl) >> 1 >> (l - 1));\
2981     _n10 = (nl) << (BITS_PER_MP_LIMB - (lgup));				\
2982     _nmask = LIMB_HIGHBIT_TO_MASK (_n10);				\
2983     _nadj = _n10 + (_nmask & (dnorm));					\
2984     umul_ppmm (_xh, _xl, di, _n2 - _nmask);				\
2985     add_ssaaaa (_xh, _xl, _xh, _xl, _n2, _nadj);			\
2986     _q1 = ~_xh;								\
2987     umul_ppmm (_xh, _xl, _q1, d);					\
2988     add_ssaaaa (_xh, _xl, _xh, _xl, nh, nl);				\
2989     _xh -= (d);								\
2990     (r) = _xl + ((d) & _xh);						\
2991     (q) = _xh - _q1;							\
2992   } while (0)
2993 
2994 
2995 #ifndef mpn_preinv_divrem_1  /* if not done with cpuvec in a fat binary */
2996 #define mpn_preinv_divrem_1  __MPN(preinv_divrem_1)
2997 __GMP_DECLSPEC mp_limb_t mpn_preinv_divrem_1 _PROTO ((mp_ptr, mp_size_t, mp_srcptr, mp_size_t, mp_limb_t, mp_limb_t, int));
2998 #endif
2999 
3000 
3001 /* USE_PREINV_DIVREM_1 is whether to use mpn_preinv_divrem_1, as opposed to
3002    the plain mpn_divrem_1.  Likewise USE_PREINV_MOD_1 chooses between
3003    mpn_preinv_mod_1 and plain mpn_mod_1.  The default for both is yes, since
3004    the few CISC chips where preinv is not good have defines saying so.  */
3005 #ifndef USE_PREINV_DIVREM_1
3006 #define USE_PREINV_DIVREM_1   1
3007 #endif
3008 #ifndef USE_PREINV_MOD_1
3009 #define USE_PREINV_MOD_1   1
3010 #endif
3011 
3012 #if USE_PREINV_DIVREM_1
3013 #define MPN_DIVREM_OR_PREINV_DIVREM_1(qp,xsize,ap,size,d,dinv,shift)    \
3014   mpn_preinv_divrem_1 (qp, xsize, ap, size, d, dinv, shift)
3015 #else
3016 #define MPN_DIVREM_OR_PREINV_DIVREM_1(qp,xsize,ap,size,d,dinv,shift)    \
3017   mpn_divrem_1 (qp, xsize, ap, size, d)
3018 #endif
3019 
3020 #if USE_PREINV_MOD_1
3021 #define MPN_MOD_OR_PREINV_MOD_1(src,size,divisor,inverse)       \
3022   mpn_preinv_mod_1 (src, size, divisor, inverse)
3023 #else
3024 #define MPN_MOD_OR_PREINV_MOD_1(src,size,divisor,inverse)       \
3025   mpn_mod_1 (src, size, divisor)
3026 #endif
3027 
3028 
3029 #ifndef mpn_mod_34lsub1  /* if not done with cpuvec in a fat binary */
3030 #define mpn_mod_34lsub1 __MPN(mod_34lsub1)
3031 __GMP_DECLSPEC mp_limb_t mpn_mod_34lsub1 _PROTO ((mp_srcptr, mp_size_t)) __GMP_ATTRIBUTE_PURE;
3032 #endif
3033 
3034 
3035 /* DIVEXACT_1_THRESHOLD is at what size to use mpn_divexact_1, as opposed to
3036    plain mpn_divrem_1.  Likewise MODEXACT_1_ODD_THRESHOLD for
3037    mpn_modexact_1_odd against plain mpn_mod_1.  On most CPUs divexact and
3038    modexact are faster at all sizes, so the defaults are 0.  Those CPUs
3039    where this is not right have a tuned threshold.  */
3040 #ifndef DIVEXACT_1_THRESHOLD
3041 #define DIVEXACT_1_THRESHOLD  0
3042 #endif
3043 #ifndef MODEXACT_1_ODD_THRESHOLD
3044 #define MODEXACT_1_ODD_THRESHOLD  0
3045 #endif
3046 
3047 #ifndef mpn_divexact_1  /* if not done with cpuvec in a fat binary */
3048 #define mpn_divexact_1 __MPN(divexact_1)
3049 __GMP_DECLSPEC void mpn_divexact_1 _PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_limb_t));
3050 #endif
3051 
3052 #define MPN_DIVREM_OR_DIVEXACT_1(dst, src, size, divisor)                     \
3053   do {                                                                        \
3054     if (BELOW_THRESHOLD (size, DIVEXACT_1_THRESHOLD))                         \
3055       ASSERT_NOCARRY (mpn_divrem_1 (dst, (mp_size_t) 0, src, size, divisor)); \
3056     else                                                                      \
3057       {                                                                       \
3058         ASSERT (mpn_mod_1 (src, size, divisor) == 0);                         \
3059         mpn_divexact_1 (dst, src, size, divisor);                             \
3060       }                                                                       \
3061   } while (0)
3062 
3063 #ifndef mpn_modexact_1c_odd  /* if not done with cpuvec in a fat binary */
3064 #define mpn_modexact_1c_odd  __MPN(modexact_1c_odd)
3065 __GMP_DECLSPEC mp_limb_t mpn_modexact_1c_odd _PROTO ((mp_srcptr src, mp_size_t size,
3066                                        mp_limb_t divisor, mp_limb_t c)) __GMP_ATTRIBUTE_PURE;
3067 #endif
3068 
3069 #if HAVE_NATIVE_mpn_modexact_1_odd
3070 #define mpn_modexact_1_odd   __MPN(modexact_1_odd)
3071 __GMP_DECLSPEC mp_limb_t mpn_modexact_1_odd _PROTO ((mp_srcptr src, mp_size_t size,
3072                                       mp_limb_t divisor)) __GMP_ATTRIBUTE_PURE;
3073 #else
3074 #define mpn_modexact_1_odd(src,size,divisor) \
3075   mpn_modexact_1c_odd (src, size, divisor, CNST_LIMB(0))
3076 #endif
3077 
3078 #define MPN_MOD_OR_MODEXACT_1_ODD(src,size,divisor)			\
3079   (ABOVE_THRESHOLD (size, MODEXACT_1_ODD_THRESHOLD)			\
3080    ? mpn_modexact_1_odd (src, size, divisor)				\
3081    : mpn_mod_1 (src, size, divisor))
3082 
3083 
3084 /* modlimb_invert() sets inv to the multiplicative inverse of n modulo
3085    2^GMP_NUMB_BITS, ie. satisfying inv*n == 1 mod 2^GMP_NUMB_BITS.
3086    n must be odd (otherwise such an inverse doesn't exist).
3087 
3088    This is not to be confused with invert_limb(), which is completely
3089    different.
3090 
3091    The table lookup gives an inverse with the low 8 bits valid, and each
3092    multiply step doubles the number of bits.  See Jebelean "An algorithm for
3093    exact division" end of section 4 (reference in gmp.texi).
3094 
3095    Possible enhancement: Could use UHWtype until the last step, if half-size
3096    multiplies are faster (might help under _LONG_LONG_LIMB).
3097 
3098    Alternative: As noted in Granlund and Montgomery "Division by Invariant
3099    Integers using Multiplication" (reference in gmp.texi), n itself gives a
3100    3-bit inverse immediately, and could be used instead of a table lookup.
3101    A 4-bit inverse can be obtained effectively from xoring bits 1 and 2 into
3102    bit 3, for instance with (((n + 2) & 4) << 1) ^ n.  */
3103 
3104 #define modlimb_invert_table  __gmp_modlimb_invert_table
3105 __GMP_DECLSPEC extern const unsigned char  modlimb_invert_table[128];
3106 
3107 #define modlimb_invert(inv,n)						\
3108   do {									\
3109     mp_limb_t  __n = (n);						\
3110     mp_limb_t  __inv;							\
3111     ASSERT ((__n & 1) == 1);						\
3112                                     \
3113     __inv = modlimb_invert_table[(__n/2) & 0x7F]; /*  8 */		\
3114     if (GMP_NUMB_BITS > 8)   __inv = 2 * __inv - __inv * __inv * __n;	\
3115     if (GMP_NUMB_BITS > 16)  __inv = 2 * __inv - __inv * __inv * __n;	\
3116     if (GMP_NUMB_BITS > 32)  __inv = 2 * __inv - __inv * __inv * __n;	\
3117                                     \
3118     if (GMP_NUMB_BITS > 64)						\
3119       {									\
3120     int  __invbits = 64;						\
3121     do {								\
3122       __inv = 2 * __inv - __inv * __inv * __n;			\
3123       __invbits *= 2;						\
3124     } while (__invbits < GMP_NUMB_BITS);				\
3125       }									\
3126                                     \
3127     ASSERT ((__inv * __n & GMP_NUMB_MASK) == 1);			\
3128     (inv) = __inv & GMP_NUMB_MASK;					\
3129   } while (0)
3130 
3131 /* Multiplicative inverse of 3, modulo 2^GMP_NUMB_BITS.
3132    Eg. 0xAAAAAAAB for 32 bits, 0xAAAAAAAAAAAAAAAB for 64 bits.
3133    GMP_NUMB_MAX/3*2+1 is right when GMP_NUMB_BITS is even, but when it's odd
3134    we need to start from GMP_NUMB_MAX>>1. */
3135 #define MODLIMB_INVERSE_3 (((GMP_NUMB_MAX >> (GMP_NUMB_BITS % 2)) / 3) * 2 + 1)
3136 
3137 /* ceil(GMP_NUMB_MAX/3) and ceil(2*GMP_NUMB_MAX/3).
3138    These expressions work because GMP_NUMB_MAX%3 != 0 for all GMP_NUMB_BITS. */
3139 #define GMP_NUMB_CEIL_MAX_DIV3   (GMP_NUMB_MAX / 3 + 1)
3140 #define GMP_NUMB_CEIL_2MAX_DIV3  ((GMP_NUMB_MAX>>1) / 3 + 1 + GMP_NUMB_HIGHBIT)
3141 
3142 
3143 /* Set r to -a mod d.  a>=d is allowed.  Can give r>d.  All should be limbs.
3144 
3145    It's not clear whether this is the best way to do this calculation.
3146    Anything congruent to -a would be fine for the one limb congruence
3147    tests.  */
3148 
3149 #define NEG_MOD(r, a, d)						\
3150   do {									\
3151     ASSERT ((d) != 0);							\
3152     ASSERT_LIMB (a);							\
3153     ASSERT_LIMB (d);							\
3154                                     \
3155     if ((a) <= (d))							\
3156       {									\
3157         /* small a is reasonably likely */				\
3158         (r) = (d) - (a);						\
3159       }									\
3160     else								\
3161       {									\
3162         unsigned   __twos;						\
3163         mp_limb_t  __dnorm;						\
3164         count_leading_zeros (__twos, d);				\
3165         __twos -= GMP_NAIL_BITS;					\
3166         __dnorm = (d) << __twos;					\
3167         (r) = ((a) <= __dnorm ? __dnorm : 2*__dnorm) - (a);		\
3168       }									\
3169                                     \
3170     ASSERT_LIMB (r);							\
3171   } while (0)
3172 
3173 /* A bit mask of all the least significant zero bits of n, or -1 if n==0. */
3174 #define LOW_ZEROS_MASK(n)  (((n) & -(n)) - 1)
3175 
3176 
3177 /* ULONG_PARITY sets "p" to 1 if there's an odd number of 1 bits in "n", or
3178    to 0 if there's an even number.  "n" should be an unsigned long and "p"
3179    an int.  */
3180 
3181 #if ! defined (ULONG_PARITY)
3182 #define ULONG_PARITY(p, n)						\
3183   do {									\
3184     unsigned long  __n = (n);						\
3185     int  __p = 0;							\
3186     do									\
3187       {									\
3188         __p ^= 0x96696996L >> (__n & 0x1F);				\
3189         __n >>= 5;							\
3190       }									\
3191     while (__n != 0);							\
3192                                     \
3193     (p) = __p & 1;							\
3194   } while (0)
3195 #endif
3196 
3197 #if ! defined (BSWAP_LIMB)
3198 #if BITS_PER_MP_LIMB == 8
3199 #define BSWAP_LIMB(dst, src)            \
3200   do { (dst) = (src); } while (0)
3201 #endif
3202 #if BITS_PER_MP_LIMB == 16
3203 #define BSWAP_LIMB(dst, src)                    \
3204   do {                                          \
3205     (dst) = ((src) << 8) + ((src) >> 8);        \
3206   } while (0)
3207 #endif
3208 #if BITS_PER_MP_LIMB == 32
3209 #define BSWAP_LIMB(dst, src)    \
3210   do {                          \
3211     (dst) =                     \
3212       ((src) << 24)             \
3213       + (((src) & 0xFF00) << 8) \
3214       + (((src) >> 8) & 0xFF00) \
3215       + ((src) >> 24);          \
3216   } while (0)
3217 #endif
3218 #if BITS_PER_MP_LIMB == 64
3219 #define BSWAP_LIMB(dst, src)            \
3220   do {                                  \
3221     (dst) =                             \
3222       ((src) << 56)                     \
3223       + (((src) & 0xFF00) << 40)        \
3224       + (((src) & 0xFF0000) << 24)      \
3225       + (((src) & 0xFF000000) << 8)     \
3226       + (((src) >> 8) & 0xFF000000)     \
3227       + (((src) >> 24) & 0xFF0000)      \
3228       + (((src) >> 40) & 0xFF00)        \
3229       + ((src) >> 56);                  \
3230   } while (0)
3231 #endif
3232 #endif
3233 
3234 #if ! defined (BSWAP_LIMB)
3235 #define BSWAP_LIMB(dst, src)                            \
3236   do {                                                  \
3237     mp_limb_t  __bswapl_src = (src);                    \
3238     mp_limb_t  __dst = 0;                               \
3239     int        __i;                                     \
3240     for (__i = 0; __i < BYTES_PER_MP_LIMB; __i++)       \
3241       {                                                 \
3242         __dst = (__dst << 8) | (__bswapl_src & 0xFF);   \
3243         __bswapl_src >>= 8;                             \
3244       }                                                 \
3245     (dst) = __dst;                                      \
3246   } while (0)
3247 #endif
3248 
3249 #if ! defined (BSWAP_LIMB_FETCH)
3250 #define BSWAP_LIMB_FETCH(limb, src)  BSWAP_LIMB (limb, *(src))
3251 #endif
3252 
3253 #if ! defined (BSWAP_LIMB_STORE)
3254 #define BSWAP_LIMB_STORE(dst, limb)  BSWAP_LIMB (*(dst), limb)
3255 #endif
3256 
3257 
3258 /* Byte swap limbs from {src,size} and store at {dst,size}. */
3259 #define MPN_BSWAP(dst, src, size)                       \
3260   do {                                                  \
3261     mp_ptr     __dst = (dst);                           \
3262     mp_srcptr  __src = (src);                           \
3263     mp_size_t  __size = (size);                         \
3264     mp_size_t  __i;                                     \
3265     ASSERT ((size) >= 0);                               \
3266     ASSERT (MPN_SAME_OR_SEPARATE_P (dst, src, size));   \
3267     for (__i = 0; __i < __size; __i++)                  \
3268       {                                                 \
3269         BSWAP_LIMB_FETCH (*__dst, __src);               \
3270         __dst++;                                        \
3271         __src++;                                        \
3272       }                                                 \
3273   } while (0)
3274 
3275 /* Byte swap limbs from {dst,size} and store in reverse order at {src,size}. */
3276 #define MPN_BSWAP_REVERSE(dst, src, size)               \
3277   do {                                                  \
3278     mp_ptr     __dst = (dst);                           \
3279     mp_size_t  __size = (size);                         \
3280     mp_srcptr  __src = (src) + __size - 1;              \
3281     mp_size_t  __i;                                     \
3282     ASSERT ((size) >= 0);                               \
3283     ASSERT (! MPN_OVERLAP_P (dst, size, src, size));    \
3284     for (__i = 0; __i < __size; __i++)                  \
3285       {                                                 \
3286         BSWAP_LIMB_FETCH (*__dst, __src);               \
3287         __dst++;                                        \
3288         __src--;                                        \
3289       }                                                 \
3290   } while (0)
3291 
3292 /* Cool population count of an mp_limb_t.
3293    You have to figure out how this works, We won't tell you!
3294 
3295    The constants could also be expressed as:
3296      0x55... = [2^N / 3]     = [(2^N-1)/3]
3297      0x33... = [2^N / 5]     = [(2^N-1)/5]
3298      0x0f... = [2^N / 17]    = [(2^N-1)/17]
3299      (N is GMP_LIMB_BITS, [] denotes truncation.) */
3300 
3301 #if ! defined (popc_limb) && GMP_LIMB_BITS == 8
3302 #define popc_limb(result, input)					\
3303   do {									\
3304     mp_limb_t  __x = (input);						\
3305     __x -= (__x >> 1) & MP_LIMB_T_MAX/3;				\
3306     __x = ((__x >> 2) & MP_LIMB_T_MAX/5) + (__x & MP_LIMB_T_MAX/5);	\
3307     __x = ((__x >> 4) + __x) & MP_LIMB_T_MAX/17;			\
3308     (result) = __x & 0xff;						\
3309   } while (0)
3310 #endif
3311 
3312 #if ! defined (popc_limb) && GMP_LIMB_BITS == 16
3313 #define popc_limb(result, input)					\
3314   do {									\
3315     mp_limb_t  __x = (input);						\
3316     __x -= (__x >> 1) & MP_LIMB_T_MAX/3;				\
3317     __x = ((__x >> 2) & MP_LIMB_T_MAX/5) + (__x & MP_LIMB_T_MAX/5);	\
3318     __x = ((__x >> 4) + __x) & MP_LIMB_T_MAX/17;			\
3319     if (GMP_LIMB_BITS > 8)						\
3320       __x = ((__x >> 8) + __x);						\
3321     (result) = __x & 0xff;						\
3322   } while (0)
3323 #endif
3324 
3325 #if ! defined (popc_limb) && GMP_LIMB_BITS == 32
3326 #define popc_limb(result, input)					\
3327   do {									\
3328     mp_limb_t  __x = (input);						\
3329     __x -= (__x >> 1) & MP_LIMB_T_MAX/3;				\
3330     __x = ((__x >> 2) & MP_LIMB_T_MAX/5) + (__x & MP_LIMB_T_MAX/5);	\
3331     __x = ((__x >> 4) + __x) & MP_LIMB_T_MAX/17;			\
3332     if (GMP_LIMB_BITS > 8)						\
3333       __x = ((__x >> 8) + __x);						\
3334     if (GMP_LIMB_BITS > 16)						\
3335       __x = ((__x >> 16) + __x);					\
3336     (result) = __x & 0xff;						\
3337   } while (0)
3338 #endif
3339 
3340 #if ! defined (popc_limb) && GMP_LIMB_BITS == 64
3341 #define popc_limb(result, input)					\
3342   do {									\
3343     mp_limb_t  __x = (input);						\
3344     __x -= (__x >> 1) & MP_LIMB_T_MAX/3;				\
3345     __x = ((__x >> 2) & MP_LIMB_T_MAX/5) + (__x & MP_LIMB_T_MAX/5);	\
3346     __x = ((__x >> 4) + __x) & MP_LIMB_T_MAX/17;			\
3347     if (GMP_LIMB_BITS > 8)						\
3348       __x = ((__x >> 8) + __x);						\
3349     if (GMP_LIMB_BITS > 16)						\
3350       __x = ((__x >> 16) + __x);					\
3351     if (GMP_LIMB_BITS > 32)						\
3352       __x = ((__x >> 32) + __x);					\
3353     (result) = __x & 0xff;						\
3354   } while (0)
3355 #endif
3356 
3357 
3358 /* Define stuff for longlong.h.  */
3359 #if HAVE_ATTRIBUTE_MODE && defined (__GNUC__)
3360 typedef unsigned int UQItype	__attribute__ ((mode (QI)));
3361 typedef		 int SItype	__attribute__ ((mode (SI)));
3362 typedef unsigned int USItype	__attribute__ ((mode (SI)));
3363 typedef		 int DItype	__attribute__ ((mode (DI)));
3364 typedef unsigned int UDItype	__attribute__ ((mode (DI)));
3365 #else
3366 typedef unsigned char UQItype;
3367 typedef		 long SItype;
3368 typedef unsigned long USItype;
3369 #if HAVE_LONG_LONG
3370 typedef	long long int DItype;
3371 typedef unsigned long long int UDItype;
3372 #else /* Assume `long' gives us a wide enough type.  Needed for hppa2.0w.  */
3373 typedef long int DItype;
3374 typedef unsigned long int UDItype;
3375 #endif
3376 #endif
3377 
3378 typedef mp_limb_t UWtype;
3379 typedef unsigned int UHWtype;
3380 #define W_TYPE_SIZE BITS_PER_MP_LIMB
3381 
3382 /* Define ieee_double_extract
3383 
3384    Bit field packing is "implementation defined" according to C99, which
3385    leaves us at the compiler's mercy here.  For some systems packing is
3386    defined in the ABI (eg. x86).  In any case so far it seems universal that
3387    little endian systems pack from low to high, and big endian from high to
3388    low within the given type.
3389 
3390    Within the fields we rely on the integer endianness being the same as the
3391    float endianness, this is true everywhere we know of and it'd be a fairly
3392    strange system that did anything else.  */
3393 
3394 #if HAVE_DOUBLE_IEEE_LITTLE_SWAPPED
3395 #define _GMP_IEEE_FLOATS 1
3396 union ieee_double_extract
3397 {
3398   struct
3399     {
3400       gmp_uint_least32_t manh:20;
3401       gmp_uint_least32_t exp:11;
3402       gmp_uint_least32_t sig:1;
3403       gmp_uint_least32_t manl:32;
3404     } s;
3405   double d;
3406 };
3407 #endif
3408 
3409 #if HAVE_DOUBLE_IEEE_LITTLE_ENDIAN
3410 #define _GMP_IEEE_FLOATS 1
3411 union ieee_double_extract
3412 {
3413   struct
3414     {
3415       gmp_uint_least32_t manl:32;
3416       gmp_uint_least32_t manh:20;
3417       gmp_uint_least32_t exp:11;
3418       gmp_uint_least32_t sig:1;
3419     } s;
3420   double d;
3421 };
3422 #endif
3423 
3424 #if HAVE_DOUBLE_IEEE_BIG_ENDIAN
3425 #define _GMP_IEEE_FLOATS 1
3426 union ieee_double_extract
3427 {
3428   struct
3429     {
3430       gmp_uint_least32_t sig:1;
3431       gmp_uint_least32_t exp:11;
3432       gmp_uint_least32_t manh:20;
3433       gmp_uint_least32_t manl:32;
3434     } s;
3435   double d;
3436 };
3437 #endif
3438 
3439 
3440 /* Use (4.0 * ...) instead of (2.0 * ...) to work around buggy compilers
3441    that don't convert ulong->double correctly (eg. SunOS 4 native cc).  */
3442 #define MP_BASE_AS_DOUBLE (4.0 * ((mp_limb_t) 1 << (GMP_NUMB_BITS - 2)))
3443 /* Maximum number of limbs it will take to store any `double'.
3444    We assume doubles have 53 mantissam bits.  */
3445 #define LIMBS_PER_DOUBLE ((53 + GMP_NUMB_BITS - 1) / GMP_NUMB_BITS + 1)
3446 
3447 __GMP_DECLSPEC int __gmp_extract_double _PROTO ((mp_ptr, double));
3448 
3449 #define mpn_get_d __gmpn_get_d
3450 __GMP_DECLSPEC double mpn_get_d __GMP_PROTO ((mp_srcptr, mp_size_t, mp_size_t, long)) __GMP_ATTRIBUTE_PURE;
3451 
3452 
3453 /* DOUBLE_NAN_INF_ACTION executes code a_nan if x is a NaN, or executes
3454    a_inf if x is an infinity.  Both are considered unlikely values, for
3455    branch prediction.  */
3456 
3457 #if _GMP_IEEE_FLOATS
3458 #define DOUBLE_NAN_INF_ACTION(x, a_nan, a_inf)  \
3459   do {                                          \
3460     union ieee_double_extract  u;               \
3461     u.d = (x);                                  \
3462     if (UNLIKELY (u.s.exp == 0x7FF))            \
3463       {                                         \
3464         if (u.s.manl == 0 && u.s.manh == 0)     \
3465           { a_inf; }                            \
3466         else                                    \
3467           { a_nan; }                            \
3468       }                                         \
3469   } while (0)
3470 #endif
3471 
3472 #ifndef DOUBLE_NAN_INF_ACTION
3473 /* Unknown format, try something generic.
3474    NaN should be "unordered", so x!=x.
3475    Inf should be bigger than DBL_MAX.  */
3476 #define DOUBLE_NAN_INF_ACTION(x, a_nan, a_inf)                  \
3477   do {                                                          \
3478     {                                                           \
3479       if (UNLIKELY ((x) != (x)))                                \
3480         { a_nan; }                                              \
3481       else if (UNLIKELY ((x) > DBL_MAX || (x) < -DBL_MAX))      \
3482         { a_inf; }                                              \
3483     }                                                           \
3484   } while (0)
3485 #endif
3486 
3487 __GMP_DECLSPEC extern int __gmp_junk;
3488 __GMP_DECLSPEC extern const int __gmp_0;
3489 __GMP_DECLSPEC void __gmp_exception _PROTO ((int)) ATTRIBUTE_NORETURN;
3490 __GMP_DECLSPEC void __gmp_divide_by_zero _PROTO ((void)) ATTRIBUTE_NORETURN;
3491 __GMP_DECLSPEC void __gmp_sqrt_of_negative _PROTO ((void)) ATTRIBUTE_NORETURN;
3492 __GMP_DECLSPEC void __gmp_invalid_operation _PROTO ((void)) ATTRIBUTE_NORETURN;
3493 #define GMP_ERROR(code)   __gmp_exception (code)
3494 #define DIVIDE_BY_ZERO    __gmp_divide_by_zero ()
3495 #define SQRT_OF_NEGATIVE  __gmp_sqrt_of_negative ()
3496 
3497 
3498 /* Stuff used by mpn/generic/perfsqr.c and mpz/prime_p.c */
3499 #if GMP_NUMB_BITS == 2
3500 #define PP 0x3					/* 3 */
3501 #define PP_FIRST_OMITTED 5
3502 #endif
3503 #if GMP_NUMB_BITS == 4
3504 #define PP 0xF					/* 3 x 5 */
3505 #define PP_FIRST_OMITTED 7
3506 #endif
3507 #if GMP_NUMB_BITS == 8
3508 #define PP 0x69					/* 3 x 5 x 7 */
3509 #define PP_FIRST_OMITTED 11
3510 #endif
3511 #if GMP_NUMB_BITS == 16
3512 #define PP 0x3AA7				/* 3 x 5 x 7 x 11 x 13 */
3513 #define PP_FIRST_OMITTED 17
3514 #endif
3515 #if GMP_NUMB_BITS == 32
3516 #define PP 0xC0CFD797L				/* 3 x 5 x 7 x 11 x ... x 29 */
3517 #define PP_INVERTED 0x53E5645CL
3518 #define PP_FIRST_OMITTED 31
3519 #endif
3520 #if GMP_NUMB_BITS == 64
3521 #define PP CNST_LIMB(0xE221F97C30E94E1D)	/* 3 x 5 x 7 x 11 x ... x 53 */
3522 #define PP_INVERTED CNST_LIMB(0x21CFE6CFC938B36B)
3523 #define PP_FIRST_OMITTED 59
3524 #endif
3525 #ifndef PP_FIRST_OMITTED
3526 #define PP_FIRST_OMITTED 3
3527 #endif
3528 
3529 /* BIT1 means a result value in bit 1 (second least significant bit), with a
3530    zero bit representing +1 and a one bit representing -1.  Bits other than
3531    bit 1 are garbage.  These are meant to be kept in "int"s, and casts are
3532    used to ensure the expressions are "int"s even if a and/or b might be
3533    other types.
3534 
3535    JACOBI_TWOS_U_BIT1 and JACOBI_RECIP_UU_BIT1 are used in mpn_jacobi_base
3536    and their speed is important.  Expressions are used rather than
3537    conditionals to accumulate sign changes, which effectively means XORs
3538    instead of conditional JUMPs. */
3539 
3540 /* (a/0), with a signed; is 1 if a=+/-1, 0 otherwise */
3541 #define JACOBI_S0(a)   (((a) == 1) | ((a) == -1))
3542 
3543 /* (a/0), with a unsigned; is 1 if a=+/-1, 0 otherwise */
3544 #define JACOBI_U0(a)   ((a) == 1)
3545 
3546 /* (a/0), with a given by low and size;
3547    is 1 if a=+/-1, 0 otherwise */
3548 #define JACOBI_LS0(alow,asize) \
3549   (((asize) == 1 || (asize) == -1) && (alow) == 1)
3550 
3551 /* (a/0), with a an mpz_t;
3552    fetch of low limb always valid, even if size is zero */
3553 #define JACOBI_Z0(a)   JACOBI_LS0 (PTR(a)[0], SIZ(a))
3554 
3555 /* (0/b), with b unsigned; is 1 if b=1, 0 otherwise */
3556 #define JACOBI_0U(b)   ((b) == 1)
3557 
3558 /* (0/b), with b unsigned; is 1 if b=+/-1, 0 otherwise */
3559 #define JACOBI_0S(b)   ((b) == 1 || (b) == -1)
3560 
3561 /* (0/b), with b given by low and size; is 1 if b=+/-1, 0 otherwise */
3562 #define JACOBI_0LS(blow,bsize) \
3563   (((bsize) == 1 || (bsize) == -1) && (blow) == 1)
3564 
3565 /* Convert a bit1 to +1 or -1. */
3566 #define JACOBI_BIT1_TO_PN(result_bit1) \
3567   (1 - ((int) (result_bit1) & 2))
3568 
3569 /* (2/b), with b unsigned and odd;
3570    is (-1)^((b^2-1)/8) which is 1 if b==1,7mod8 or -1 if b==3,5mod8 and
3571    hence obtained from (b>>1)^b */
3572 #define JACOBI_TWO_U_BIT1(b) \
3573   ((int) (((b) >> 1) ^ (b)))
3574 
3575 /* (2/b)^twos, with b unsigned and odd */
3576 #define JACOBI_TWOS_U_BIT1(twos, b) \
3577   ((int) ((twos) << 1) & JACOBI_TWO_U_BIT1 (b))
3578 
3579 /* (2/b)^twos, with b unsigned and odd */
3580 #define JACOBI_TWOS_U(twos, b) \
3581   (JACOBI_BIT1_TO_PN (JACOBI_TWOS_U_BIT1 (twos, b)))
3582 
3583 /* (-1/b), with b odd (signed or unsigned);
3584    is (-1)^((b-1)/2) */
3585 #define JACOBI_N1B_BIT1(b) \
3586   ((int) (b))
3587 
3588 /* (a/b) effect due to sign of a: signed/unsigned, b odd;
3589    is (-1/b) if a<0, or +1 if a>=0 */
3590 #define JACOBI_ASGN_SU_BIT1(a, b) \
3591   ((((a) < 0) << 1) & JACOBI_N1B_BIT1(b))
3592 
3593 /* (a/b) effect due to sign of b: signed/signed;
3594    is -1 if a and b both negative, +1 otherwise */
3595 #define JACOBI_BSGN_SS_BIT1(a, b) \
3596   ((((a)<0) & ((b)<0)) << 1)
3597 
3598 /* (a/b) effect due to sign of b: signed/mpz;
3599    is -1 if a and b both negative, +1 otherwise */
3600 #define JACOBI_BSGN_SZ_BIT1(a, b) \
3601   JACOBI_BSGN_SS_BIT1 (a, SIZ(b))
3602 
3603 /* (a/b) effect due to sign of b: mpz/signed;
3604    is -1 if a and b both negative, +1 otherwise */
3605 #define JACOBI_BSGN_ZS_BIT1(a, b) \
3606   JACOBI_BSGN_SZ_BIT1 (b, a)
3607 
3608 /* (a/b) reciprocity to switch to (b/a), a,b both unsigned and odd;
3609    is (-1)^((a-1)*(b-1)/4), which means +1 if either a,b==1mod4, or -1 if
3610    both a,b==3mod4, achieved in bit 1 by a&b.  No ASSERT()s about a,b odd
3611    because this is used in a couple of places with only bit 1 of a or b
3612    valid. */
3613 #define JACOBI_RECIP_UU_BIT1(a, b) \
3614   ((int) ((a) & (b)))
3615 
3616 /* Strip low zero limbs from {b_ptr,b_size} by incrementing b_ptr and
3617    decrementing b_size.  b_low should be b_ptr[0] on entry, and will be
3618    updated for the new b_ptr.  result_bit1 is updated according to the
3619    factors of 2 stripped, as per (a/2).  */
3620 #define JACOBI_STRIP_LOW_ZEROS(result_bit1, a, b_ptr, b_size, b_low)    \
3621   do {                                                                  \
3622     ASSERT ((b_size) >= 1);                                             \
3623     ASSERT ((b_low) == (b_ptr)[0]);                                     \
3624                                                                         \
3625     while (UNLIKELY ((b_low) == 0))                                     \
3626       {                                                                 \
3627         (b_size)--;                                                     \
3628         ASSERT ((b_size) >= 1);                                         \
3629         (b_ptr)++;                                                      \
3630         (b_low) = *(b_ptr);                                             \
3631                                                                         \
3632         ASSERT (((a) & 1) != 0);                                        \
3633         if ((GMP_NUMB_BITS % 2) == 1)                                   \
3634           (result_bit1) ^= JACOBI_TWO_U_BIT1(a);                        \
3635       }                                                                 \
3636   } while (0)
3637 
3638 /* Set a_rem to {a_ptr,a_size} reduced modulo b, either using mod_1 or
3639    modexact_1_odd, but in either case leaving a_rem<b.  b must be odd and
3640    unsigned.  modexact_1_odd effectively calculates -a mod b, and
3641    result_bit1 is adjusted for the factor of -1.
3642 
3643    The way mpn_modexact_1_odd sometimes bases its remainder on a_size and
3644    sometimes on a_size-1 means if GMP_NUMB_BITS is odd we can't know what
3645    factor to introduce into result_bit1, so for that case use mpn_mod_1
3646    unconditionally.
3647 
3648    FIXME: mpn_modexact_1_odd is more efficient, so some way to get it used
3649    for odd GMP_NUMB_BITS would be good.  Perhaps it could mung its result,
3650    or not skip a divide step, or something. */
3651 
3652 #define JACOBI_MOD_OR_MODEXACT_1_ODD(result_bit1, a_rem, a_ptr, a_size, b) \
3653   do {                                                                     \
3654     mp_srcptr  __a_ptr  = (a_ptr);                                         \
3655     mp_size_t  __a_size = (a_size);                                        \
3656     mp_limb_t  __b      = (b);                                             \
3657                                                                            \
3658     ASSERT (__a_size >= 1);                                                \
3659     ASSERT (__b & 1);                                                      \
3660                                                                            \
3661     if ((GMP_NUMB_BITS % 2) != 0                                           \
3662         || BELOW_THRESHOLD (__a_size, MODEXACT_1_ODD_THRESHOLD))           \
3663       {                                                                    \
3664         (a_rem) = mpn_mod_1 (__a_ptr, __a_size, __b);                      \
3665       }                                                                    \
3666     else                                                                   \
3667       {                                                                    \
3668         (result_bit1) ^= JACOBI_N1B_BIT1 (__b);                            \
3669         (a_rem) = mpn_modexact_1_odd (__a_ptr, __a_size, __b);             \
3670       }                                                                    \
3671   } while (0)
3672 
3673 /* State for the Jacobi computation using Lehmer. */
3674 #define jacobi_table __gmp_jacobi_table
3675 __GMP_DECLSPEC extern const unsigned char jacobi_table[208];
3676 
3677 /* Bit layout for the initial state. b must be odd.
3678 
3679       3  2  1 0
3680    +--+--+--+--+
3681    |a1|a0|b1| s|
3682    +--+--+--+--+
3683 
3684  */
3685 static inline unsigned
mpn_jacobi_init(unsigned a,unsigned b,unsigned s)3686 mpn_jacobi_init (unsigned a, unsigned b, unsigned s)
3687 {
3688   ASSERT (b & 1);
3689   ASSERT (s <= 1);
3690   return ((a & 3) << 2) + (b & 2) + s;
3691 }
3692 
3693 static inline int
mpn_jacobi_finish(unsigned bits)3694 mpn_jacobi_finish (unsigned bits)
3695 {
3696   /* (a, b) = (1,0) or (0,1) */
3697   ASSERT ( (bits & 14) == 0);
3698 
3699   return 1-2*(bits & 1);
3700 }
3701 
3702 static inline unsigned
mpn_jacobi_update(unsigned bits,unsigned denominator,unsigned q)3703 mpn_jacobi_update (unsigned bits, unsigned denominator, unsigned q)
3704 {
3705   /* FIXME: Could halve table size by not including the e bit in the
3706    * index, and instead xor when updating. Then the lookup would be
3707    * like
3708    *
3709    *   bits ^= table[((bits & 30) << 2) + (denominator << 2) + q];
3710    */
3711 
3712   ASSERT (bits < 26);
3713   ASSERT (denominator < 2);
3714   ASSERT (q < 4);
3715 
3716   /* For almost all calls, denominator is constant and quite often q
3717      is constant too. So use addition rather than or, so the compiler
3718      can put the constant part can into the offset of an indexed
3719      addressing instruction.
3720 
3721      With constant denominator, the below table lookup is compiled to
3722 
3723        C Constant q = 1, constant denominator = 1
3724        movzbl table+5(%eax,8), %eax
3725 
3726      or
3727 
3728        C q in %edx, constant denominator = 1
3729        movzbl table+4(%edx,%eax,8), %eax
3730 
3731      One could maintain the state preshifted 3 bits, to save a shift
3732      here, but at least on x86, that's no real saving.
3733   */
3734   return bits = jacobi_table[(bits << 3) + (denominator << 2) + q];
3735 }
3736 
3737 
3738 /* Matrix multiplication */
3739 #define   mpn_matrix22_mul __MPN(matrix22_mul)
3740 __GMP_DECLSPEC void      mpn_matrix22_mul __GMP_PROTO ((mp_ptr, mp_ptr, mp_ptr, mp_ptr, mp_size_t, mp_srcptr, mp_srcptr, mp_srcptr, mp_srcptr, mp_size_t, mp_ptr));
3741 #define   mpn_matrix22_mul_strassen __MPN(matrix22_mul_strassen)
3742 __GMP_DECLSPEC void      mpn_matrix22_mul_strassen __GMP_PROTO ((mp_ptr, mp_ptr, mp_ptr, mp_ptr, mp_size_t, mp_srcptr, mp_srcptr, mp_srcptr, mp_srcptr, mp_size_t, mp_ptr));
3743 #define   mpn_matrix22_mul_itch __MPN(matrix22_mul_itch)
3744 __GMP_DECLSPEC mp_size_t mpn_matrix22_mul_itch __GMP_PROTO ((mp_size_t, mp_size_t));
3745 
3746 #ifndef MATRIX22_STRASSEN_THRESHOLD
3747 #define MATRIX22_STRASSEN_THRESHOLD 30
3748 #endif
3749 
3750 /* HGCD definitions */
3751 
3752 /* Extract one numb, shifting count bits left
3753     ________  ________
3754    |___xh___||___xl___|
3755       |____r____|
3756    >count <
3757 
3758    The count includes any nail bits, so it should work fine if count
3759    is computed using count_leading_zeros. If GMP_NAIL_BITS > 0, all of
3760    xh, xl and r include nail bits. Must have 0 < count < GMP_LIMB_BITS.
3761 
3762    FIXME: Omit masking with GMP_NUMB_MASK, and let callers do that for
3763    those calls where the count high bits of xh may be non-zero.
3764 */
3765 
3766 #define MPN_EXTRACT_NUMB(count, xh, xl)				\
3767   ((((xh) << ((count) - GMP_NAIL_BITS)) & GMP_NUMB_MASK) |	\
3768    ((xl) >> (GMP_LIMB_BITS - (count))))
3769 
3770 
3771 /* The matrix non-negative M = (u, u'; v,v') keeps track of the
3772    reduction (a;b) = M (alpha; beta) where alpha, beta are smaller
3773    than a, b. The determinant must always be one, so that M has an
3774    inverse (v', -u'; -v, u). Elements always fit in GMP_NUMB_BITS - 1
3775    bits. */
3776 struct hgcd_matrix1
3777 {
3778   mp_limb_t u[2][2];
3779 };
3780 
3781 #define mpn_hgcd2 __MPN (hgcd2)
3782 __GMP_DECLSPEC int mpn_hgcd2 (mp_limb_t, mp_limb_t, mp_limb_t, mp_limb_t,	struct hgcd_matrix1 *);
3783 
3784 #define mpn_hgcd_mul_matrix1_vector __MPN (hgcd_mul_matrix1_vector)
3785 __GMP_DECLSPEC mp_size_t mpn_hgcd_mul_matrix1_vector (const struct hgcd_matrix1 *, mp_ptr, mp_srcptr, mp_ptr, mp_size_t);
3786 
3787 #define mpn_matrix22_mul1_inverse_vector __MPN (matrix22_mul1_inverse_vector)
3788 __GMP_DECLSPEC mp_size_t mpn_matrix22_mul1_inverse_vector (const struct hgcd_matrix1 *, mp_ptr, mp_srcptr, mp_ptr, mp_size_t);
3789 
3790 #define mpn_hgcd2_jacobi __MPN (hgcd2_jacobi)
3791 __GMP_DECLSPEC int mpn_hgcd2_jacobi (mp_limb_t, mp_limb_t, mp_limb_t, mp_limb_t, struct hgcd_matrix1 *, unsigned *);
3792 
3793 struct hgcd_matrix
3794 {
3795   mp_size_t alloc;		/* for sanity checking only */
3796   mp_size_t n;
3797   mp_ptr p[2][2];
3798 };
3799 
3800 #define MPN_HGCD_MATRIX_INIT_ITCH(n) (4 * ((n+1)/2 + 1))
3801 
3802 #define mpn_hgcd_matrix_init __MPN (hgcd_matrix_init)
3803 __GMP_DECLSPEC void mpn_hgcd_matrix_init (struct hgcd_matrix *, mp_size_t, mp_ptr);
3804 
3805 #define mpn_hgcd_matrix_update_q __MPN (hgcd_matrix_update_q)
3806 __GMP_DECLSPEC void mpn_hgcd_matrix_update_q (struct hgcd_matrix *, mp_srcptr, mp_size_t, unsigned, mp_ptr);
3807 
3808 #define mpn_hgcd_matrix_mul_1 __MPN (hgcd_matrix_mul_1)
3809 __GMP_DECLSPEC void mpn_hgcd_matrix_mul_1 (struct hgcd_matrix *, const struct hgcd_matrix1 *, mp_ptr);
3810 
3811 #define mpn_hgcd_matrix_mul __MPN (hgcd_matrix_mul)
3812 __GMP_DECLSPEC void mpn_hgcd_matrix_mul (struct hgcd_matrix *, const struct hgcd_matrix *, mp_ptr);
3813 
3814 #define mpn_hgcd_matrix_adjust __MPN (hgcd_matrix_adjust)
3815 __GMP_DECLSPEC mp_size_t mpn_hgcd_matrix_adjust (const struct hgcd_matrix *, mp_size_t, mp_ptr, mp_ptr, mp_size_t, mp_ptr);
3816 
3817 #define mpn_hgcd_step __MPN(hgcd_step)
3818 __GMP_DECLSPEC mp_size_t mpn_hgcd_step (mp_size_t, mp_ptr, mp_ptr, mp_size_t, struct hgcd_matrix *, mp_ptr);
3819 
3820 #define mpn_hgcd_reduce __MPN(hgcd_reduce)
3821 __GMP_DECLSPEC mp_size_t mpn_hgcd_reduce (struct hgcd_matrix *, mp_ptr, mp_ptr, mp_size_t, mp_size_t, mp_ptr);
3822 
3823 #define mpn_hgcd_reduce_itch __MPN(hgcd_reduce_itch)
3824 __GMP_DECLSPEC mp_size_t mpn_hgcd_reduce_itch (mp_size_t, mp_size_t);
3825 
3826 #define mpn_hgcd_itch __MPN (hgcd_itch)
3827 __GMP_DECLSPEC mp_size_t mpn_hgcd_itch (mp_size_t);
3828 
3829 #define mpn_hgcd __MPN (hgcd)
3830 __GMP_DECLSPEC mp_size_t mpn_hgcd (mp_ptr, mp_ptr, mp_size_t, struct hgcd_matrix *, mp_ptr);
3831 
3832 #define mpn_hgcd_appr_itch __MPN (hgcd_appr_itch)
3833 __GMP_DECLSPEC mp_size_t mpn_hgcd_appr_itch (mp_size_t);
3834 
3835 #define mpn_hgcd_appr __MPN (hgcd_appr)
3836 __GMP_DECLSPEC int mpn_hgcd_appr (mp_ptr, mp_ptr, mp_size_t, struct hgcd_matrix *, mp_ptr);
3837 
3838 #define mpn_hgcd_jacobi __MPN (hgcd_jacobi)
3839 __GMP_DECLSPEC mp_size_t mpn_hgcd_jacobi (mp_ptr, mp_ptr, mp_size_t, struct hgcd_matrix *, unsigned *, mp_ptr);
3840 
3841 typedef void gcd_subdiv_step_hook(void *, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, int);
3842 
3843 /* Needs storage for the quotient */
3844 #define MPN_GCD_SUBDIV_STEP_ITCH(n) (n)
3845 
3846 #define mpn_gcd_subdiv_step __MPN(gcd_subdiv_step)
3847 __GMP_DECLSPEC mp_size_t mpn_gcd_subdiv_step (mp_ptr, mp_ptr, mp_size_t, mp_size_t, gcd_subdiv_step_hook *, void *, mp_ptr);
3848 
3849 struct gcdext_ctx
3850 {
3851   /* Result parameters. */
3852   mp_ptr gp;
3853   mp_size_t gn;
3854   mp_ptr up;
3855   mp_size_t *usize;
3856 
3857   /* Cofactors updated in each step. */
3858   mp_size_t un;
3859   mp_ptr u0, u1, tp;
3860 };
3861 
3862 #define mpn_gcdext_hook __MPN (gcdext_hook)
3863 gcd_subdiv_step_hook mpn_gcdext_hook;
3864 
3865 #define MPN_GCDEXT_LEHMER_N_ITCH(n) (4*(n) + 3)
3866 
3867 #define mpn_gcdext_lehmer_n __MPN(gcdext_lehmer_n)
3868 __GMP_DECLSPEC mp_size_t mpn_gcdext_lehmer_n (mp_ptr, mp_ptr, mp_size_t *, mp_ptr, mp_ptr, mp_size_t, mp_ptr);
3869 
3870 /* 4*(an + 1) + 4*(bn + 1) + an */
3871 #define MPN_GCDEXT_LEHMER_ITCH(an, bn) (5*(an) + 4*(bn) + 8)
3872 
3873 #ifndef HGCD_THRESHOLD
3874 #define HGCD_THRESHOLD 400
3875 #endif
3876 
3877 #ifndef HGCD_APPR_THRESHOLD
3878 #define HGCD_APPR_THRESHOLD 400
3879 #endif
3880 
3881 #ifndef HGCD_REDUCE_THRESHOLD
3882 #define HGCD_REDUCE_THRESHOLD 1000
3883 #endif
3884 
3885 #ifndef GCD_DC_THRESHOLD
3886 #define GCD_DC_THRESHOLD 1000
3887 #endif
3888 
3889 #ifndef GCDEXT_DC_THRESHOLD
3890 #define GCDEXT_DC_THRESHOLD 600
3891 #endif
3892 
3893 #define mpn_mulmod_bnm1 __MPN(mulmod_bnm1)
3894 __GMP_DECLSPEC void mpn_mulmod_bnm1 (mp_ptr, mp_size_t, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr);
3895 
3896 #define mpn_mulmod_bnm1_next_size(x) \
3897    ((x) <= 2*FFT_MULMOD_2EXPP1_CUTOFF ? (x) : 2*mpir_fft_adjust_limbs(((x) + 1)/2))
3898 
3899 static inline mp_size_t
mpn_mulmod_bnm1_itch(mp_size_t rn,mp_size_t an,mp_size_t bn)3900 mpn_mulmod_bnm1_itch (mp_size_t rn, mp_size_t an, mp_size_t bn) {
3901   return 5*rn + 220;
3902 }
3903 
3904 /* Definitions for mpn_set_str and mpn_get_str */
3905 struct powers
3906 {
3907   mp_ptr p;			/* actual power value */
3908   mp_size_t n;			/* # of limbs at p */
3909   mp_size_t shift;		/* weight of lowest limb, in limb base B */
3910   size_t digits_in_base;	/* number of corresponding digits */
3911   int base;
3912 };
3913 typedef struct powers powers_t;
3914 #define mpn_dc_set_str_powtab_alloc(n) ((n) + GMP_LIMB_BITS)
3915 #define mpn_dc_set_str_itch(n) ((n) + GMP_LIMB_BITS)
3916 #define mpn_dc_get_str_powtab_alloc(n) ((n) + 2 * GMP_LIMB_BITS)
3917 #define mpn_dc_get_str_itch(n) ((n) + GMP_LIMB_BITS)
3918 
3919 #define   mpn_dc_set_str __MPN(dc_set_str)
3920 __GMP_DECLSPEC mp_size_t mpn_dc_set_str __GMP_PROTO ((mp_ptr, const unsigned char *, size_t, const powers_t *, mp_ptr));
3921 #define   mpn_bc_set_str __MPN(bc_set_str)
3922 __GMP_DECLSPEC mp_size_t mpn_bc_set_str __GMP_PROTO ((mp_ptr, const unsigned char *, size_t, int));
3923 #define   mpn_set_str_compute_powtab __MPN(set_str_compute_powtab)
3924 __GMP_DECLSPEC void      mpn_set_str_compute_powtab __GMP_PROTO ((powers_t *, mp_ptr, mp_size_t, int));
3925 #define mpn_pre_set_str __MPN(pre_set_str)
3926 __GMP_DECLSPEC void mpn_pre_set_str __GMP_PROTO ((mp_ptr wp, unsigned char *str, size_t str_len, powers_t *powtab, mp_ptr tp));
3927 
3928 
3929 void _tc4_add(mp_ptr rp, mp_size_t * rn, mp_srcptr r1, mp_size_t r1n, mp_srcptr r2, mp_size_t r2n);
3930 
3931 void tc4_add(mp_ptr rp, mp_size_t * rn, mp_srcptr r1, mp_size_t r1n, mp_srcptr r2, mp_size_t r2n);
3932 
3933 void _tc4_add_unsigned(mp_ptr rp, mp_size_t * rn, mp_srcptr r1, mp_size_t r1n, mp_srcptr r2, mp_size_t r2n);
3934 
3935 void tc4_add_unsigned(mp_ptr rp, mp_size_t * rn, mp_srcptr r1, mp_size_t r1n, mp_srcptr r2, mp_size_t r2n);
3936 
3937 void tc4_sub(mp_ptr rp, mp_size_t * rn, mp_srcptr r1, mp_size_t r1n, mp_srcptr r2, mp_size_t r2n);
3938 
3939 void tc4_lshift(mp_ptr rp, mp_size_t * rn, mp_srcptr xp, mp_size_t xn, mp_size_t bits);
3940 
3941 void tc4_rshift_inplace(mp_ptr rp, mp_size_t * rn, mp_size_t bits);
3942 
3943 void tc4_addlsh1_unsigned(mp_ptr rp, mp_size_t * rn, mp_srcptr xp, mp_size_t xn);
3944 
3945 void tc4_divexact_ui(mp_ptr rp, mp_size_t * rn, mp_ptr x, mp_size_t xn, mp_limb_t c);
3946 
3947 void tc4_divexact_by3(mp_ptr rp, mp_size_t * rn, mp_ptr x, mp_size_t xn);
3948 
3949 void tc4_divexact_by15(mp_ptr rp, mp_size_t * rn, mp_ptr x, mp_size_t xn);
3950 
3951 void tc4_addmul_1(mp_ptr wp, mp_size_t * wn, mp_srcptr xp, mp_size_t xn, mp_limb_t y);
3952 
3953 void tc4_submul_1(mp_ptr wp, mp_size_t * wn, mp_srcptr x, mp_size_t xn, mp_limb_t y);
3954 
3955 void tc4_copy (mp_ptr yp, mp_size_t * yn, mp_size_t offset, mp_srcptr xp, mp_size_t xn);
3956 
3957 void __divappr_helper(mp_ptr qp, mp_ptr np, mp_srcptr dp, mp_size_t qn);
3958 
3959 /* __GMPF_BITS_TO_PREC applies a minimum 53 bits, rounds upwards to a whole
3960    limb and adds an extra limb.  __GMPF_PREC_TO_BITS drops that extra limb,
3961    hence giving back the user's size in bits rounded up.  Notice that
3962    converting prec->bits->prec gives an unchanged value.  */
3963 #define __GMPF_BITS_TO_PREC(n)						\
3964   ((mp_size_t) ((__GMP_MAX (53, n) + 2 * GMP_NUMB_BITS - 1) / GMP_NUMB_BITS))
3965 #define __GMPF_PREC_TO_BITS(n) \
3966   ((mp_bitcnt_t) (n) * GMP_NUMB_BITS - GMP_NUMB_BITS)
3967 
3968 __GMP_DECLSPEC extern mp_size_t __gmp_default_fp_limb_precision;
3969 
3970 
3971 /* Set n to the number of significant digits an mpf of the given _mp_prec
3972    field, in the given base.  This is a rounded up value, designed to ensure
3973    there's enough digits to reproduce all the guaranteed part of the value.
3974 
3975    There are prec many limbs, but the high might be only "1" so forget it
3976    and just count prec-1 limbs into chars.  +1 rounds that upwards, and a
3977    further +1 is because the limbs usually won't fall on digit boundaries.
3978 
3979    FIXME: If base is a power of 2 and the bits per digit divides
3980    BITS_PER_MP_LIMB then the +2 is unnecessary.  This happens always for
3981    base==2, and in base==16 with the current 32 or 64 bit limb sizes. */
3982 
3983 #define MPF_SIGNIFICANT_DIGITS(n, base, prec)                           \
3984   do {                                                                  \
3985     ASSERT (base >= 2 && base < numberof (mp_bases));                   \
3986     (n) = 2 + (size_t) ((((size_t) (prec) - 1) * GMP_NUMB_BITS)         \
3987                         * mp_bases[(base)].chars_per_bit_exactly);      \
3988   } while (0)
3989 
3990 
3991 /* Decimal point string, from the current C locale.  Needs <langinfo.h> for
3992    nl_langinfo and constants, preferrably with _GNU_SOURCE defined to get
3993    DECIMAL_POINT from glibc, and needs <locale.h> for localeconv, each under
3994    their respective #if HAVE_FOO_H.
3995 
3996    GLIBC recommends nl_langinfo because getting only one facet can be
3997    faster, apparently. */
3998 
3999 /* DECIMAL_POINT seems to need _GNU_SOURCE defined to get it from glibc. */
4000 #if HAVE_NL_LANGINFO && defined (DECIMAL_POINT)
4001 #define GMP_DECIMAL_POINT  (nl_langinfo (DECIMAL_POINT))
4002 #endif
4003 /* RADIXCHAR is deprecated, still in unix98 or some such. */
4004 #if HAVE_NL_LANGINFO && defined (RADIXCHAR) && ! defined (GMP_DECIMAL_POINT)
4005 #define GMP_DECIMAL_POINT  (nl_langinfo (RADIXCHAR))
4006 #endif
4007 /* localeconv is slower since it returns all locale stuff */
4008 #if HAVE_LOCALECONV && ! defined (GMP_DECIMAL_POINT)
4009 #define GMP_DECIMAL_POINT  (localeconv()->decimal_point)
4010 #endif
4011 #if ! defined (GMP_DECIMAL_POINT)
4012 #define GMP_DECIMAL_POINT  (".")
4013 #endif
4014 
4015 
4016 #define DOPRNT_CONV_FIXED        1
4017 #define DOPRNT_CONV_SCIENTIFIC   2
4018 #define DOPRNT_CONV_GENERAL      3
4019 
4020 #define DOPRNT_JUSTIFY_NONE      0
4021 #define DOPRNT_JUSTIFY_LEFT      1
4022 #define DOPRNT_JUSTIFY_RIGHT     2
4023 #define DOPRNT_JUSTIFY_INTERNAL  3
4024 
4025 #define DOPRNT_SHOWBASE_YES      1
4026 #define DOPRNT_SHOWBASE_NO       2
4027 #define DOPRNT_SHOWBASE_NONZERO  3
4028 
4029 struct doprnt_params_t {
4030   int         base;          /* negative for upper case */
4031   int         conv;          /* choices above */
4032   const char  *expfmt;       /* exponent format */
4033   int         exptimes4;     /* exponent multiply by 4 */
4034   char        fill;          /* character */
4035   int         justify;       /* choices above */
4036   int         prec;          /* prec field, or -1 for all digits */
4037   int         showbase;      /* choices above */
4038   int         showpoint;     /* if radix point always shown */
4039   int         showtrailing;  /* if trailing zeros wanted */
4040   char        sign;          /* '+', ' ', or '\0' */
4041   int         width;         /* width field */
4042 };
4043 
4044 #if _GMP_H_HAVE_VA_LIST
4045 
4046 typedef int (*doprnt_format_t) _PROTO ((void *data, const char *fmt, va_list ap));
4047 typedef int (*doprnt_memory_t) _PROTO ((void *data, const char *str, size_t len));
4048 typedef int (*doprnt_reps_t) _PROTO ((void *data, int c, int reps));
4049 typedef int (*doprnt_final_t) _PROTO ((void *data));
4050 
4051 struct doprnt_funs_t {
4052   doprnt_format_t  format;
4053   doprnt_memory_t  memory;
4054   doprnt_reps_t    reps;
4055   doprnt_final_t   final;   /* NULL if not required */
4056 };
4057 
4058 extern const struct doprnt_funs_t  __gmp_fprintf_funs;
4059 extern const struct doprnt_funs_t  __gmp_sprintf_funs;
4060 extern const struct doprnt_funs_t  __gmp_snprintf_funs;
4061 extern const struct doprnt_funs_t  __gmp_obstack_printf_funs;
4062 extern const struct doprnt_funs_t  __gmp_ostream_funs;
4063 
4064 /* "buf" is a __gmp_allocate_func block of "alloc" many bytes.  The first
4065    "size" of these have been written.  "alloc > size" is maintained, so
4066    there's room to store a '\0' at the end.  "result" is where the
4067    application wants the final block pointer.  */
4068 struct gmp_asprintf_t {
4069   char    **result;
4070   char    *buf;
4071   size_t  size;
4072   size_t  alloc;
4073 };
4074 
4075 #define GMP_ASPRINTF_T_INIT(d, output)                          \
4076   do {                                                          \
4077     (d).result = (output);                                      \
4078     (d).alloc = 256;                                            \
4079     (d).buf = (char *) (*__gmp_allocate_func) ((d).alloc);      \
4080     (d).size = 0;                                               \
4081   } while (0)
4082 
4083 /* If a realloc is necessary, use twice the size actually required, so as to
4084    avoid repeated small reallocs.  */
4085 #define GMP_ASPRINTF_T_NEED(d, n)                                       \
4086   do {                                                                  \
4087     size_t  alloc, newsize, newalloc;                                   \
4088     ASSERT ((d)->alloc >= (d)->size + 1);                               \
4089                                                                         \
4090     alloc = (d)->alloc;                                                 \
4091     newsize = (d)->size + (n);                                          \
4092     if (alloc <= newsize)                                               \
4093       {                                                                 \
4094         newalloc = 2*newsize;                                           \
4095         (d)->alloc = newalloc;                                          \
4096         (d)->buf = __GMP_REALLOCATE_FUNC_TYPE ((d)->buf,                \
4097                                                alloc, newalloc, char);  \
4098       }                                                                 \
4099   } while (0)
4100 
4101 __GMP_DECLSPEC int __gmp_asprintf_memory _PROTO ((struct gmp_asprintf_t *d, const char *str, size_t len));
4102 __GMP_DECLSPEC int __gmp_asprintf_reps _PROTO ((struct gmp_asprintf_t *d, int c, int reps));
4103 __GMP_DECLSPEC int __gmp_asprintf_final _PROTO ((struct gmp_asprintf_t *d));
4104 
4105 /* buf is where to write the next output, and size is how much space is left
4106    there.  If the application passed size==0 then that's what we'll have
4107    here, and nothing at all should be written.  */
4108 struct gmp_snprintf_t {
4109   char    *buf;
4110   size_t  size;
4111 };
4112 
4113 /* Add the bytes printed by the call to the total retval, or bail out on an
4114    error.  */
4115 #define DOPRNT_ACCUMULATE(call) \
4116   do {                          \
4117     int  __ret;                 \
4118     __ret = call;               \
4119     if (__ret == -1)            \
4120       goto error;               \
4121     retval += __ret;            \
4122   } while (0)
4123 #define DOPRNT_ACCUMULATE_FUN(fun, params)      \
4124   do {                                          \
4125     ASSERT ((fun) != NULL);                     \
4126     DOPRNT_ACCUMULATE ((*(fun)) params);        \
4127   } while (0)
4128 
4129 #define DOPRNT_FORMAT(fmt, ap)                          \
4130   DOPRNT_ACCUMULATE_FUN (funs->format, (data, fmt, ap))
4131 #define DOPRNT_MEMORY(ptr, len)                                 \
4132   DOPRNT_ACCUMULATE_FUN (funs->memory, (data, ptr, len))
4133 #define DOPRNT_REPS(c, n)                               \
4134   DOPRNT_ACCUMULATE_FUN (funs->reps, (data, c, n))
4135 
4136 #define DOPRNT_STRING(str)      DOPRNT_MEMORY (str, strlen (str))
4137 
4138 #define DOPRNT_REPS_MAYBE(c, n) \
4139   do {                          \
4140     if ((n) != 0)               \
4141       DOPRNT_REPS (c, n);       \
4142   } while (0)
4143 #define DOPRNT_MEMORY_MAYBE(ptr, len)   \
4144   do {                                  \
4145     if ((len) != 0)                     \
4146       DOPRNT_MEMORY (ptr, len);         \
4147   } while (0)
4148 
4149 __GMP_DECLSPEC int __gmp_doprnt _PROTO ((const struct doprnt_funs_t *, void *, const char *, va_list));
4150 __GMP_DECLSPEC int __gmp_doprnt_integer _PROTO ((const struct doprnt_funs_t *, void *, const struct doprnt_params_t *, const char *));
4151 
4152 #define __gmp_doprnt_mpf __gmp_doprnt_mpf2
4153 __GMP_DECLSPEC int __gmp_doprnt_mpf _PROTO ((const struct doprnt_funs_t *, void *, const struct doprnt_params_t *, const char *, mpf_srcptr));
4154 
4155 __GMP_DECLSPEC int __gmp_replacement_vsnprintf _PROTO ((char *, size_t, const char *, va_list));
4156 #endif /* _GMP_H_HAVE_VA_LIST */
4157 
4158 
4159 typedef int (*gmp_doscan_scan_t)  _PROTO ((void *, const char *, ...));
4160 typedef void *(*gmp_doscan_step_t) _PROTO ((void *, int));
4161 typedef int (*gmp_doscan_get_t)   _PROTO ((void *));
4162 typedef int (*gmp_doscan_unget_t) _PROTO ((int, void *));
4163 
4164 struct gmp_doscan_funs_t {
4165   gmp_doscan_scan_t   scan;
4166   gmp_doscan_step_t   step;
4167   gmp_doscan_get_t    get;
4168   gmp_doscan_unget_t  unget;
4169 };
4170 extern const struct gmp_doscan_funs_t  __gmp_fscanf_funs;
4171 extern const struct gmp_doscan_funs_t  __gmp_sscanf_funs;
4172 
4173 #if _GMP_H_HAVE_VA_LIST
4174 __GMP_DECLSPEC int __gmp_doscan _PROTO ((const struct gmp_doscan_funs_t *, void *,
4175                           const char *, va_list));
4176 #endif
4177 
4178 
4179 /* For testing and debugging.  */
4180 #define MPZ_CHECK_FORMAT(z)					\
4181   do {                                                          \
4182     ASSERT_ALWAYS (SIZ(z) == 0 || PTR(z)[ABSIZ(z) - 1] != 0);	\
4183     ASSERT_ALWAYS (ALLOC(z) >= ABSIZ(z));			\
4184     ASSERT_ALWAYS_MPN (PTR(z), ABSIZ(z));                       \
4185   } while (0)
4186 
4187 #define MPQ_CHECK_FORMAT(q)                             \
4188   do {                                                  \
4189     MPZ_CHECK_FORMAT (mpq_numref (q));                  \
4190     MPZ_CHECK_FORMAT (mpq_denref (q));                  \
4191     ASSERT_ALWAYS (SIZ(mpq_denref(q)) >= 1);            \
4192                                                         \
4193     if (SIZ(mpq_numref(q)) == 0)                        \
4194       {                                                 \
4195         /* should have zero as 0/1 */                   \
4196         ASSERT_ALWAYS (SIZ(mpq_denref(q)) == 1          \
4197                        && PTR(mpq_denref(q))[0] == 1);  \
4198       }                                                 \
4199     else                                                \
4200       {                                                 \
4201         /* should have no common factors */             \
4202         mpz_t  g;                                       \
4203         mpz_init (g);                                   \
4204         mpz_gcd (g, mpq_numref(q), mpq_denref(q));      \
4205         ASSERT_ALWAYS (mpz_cmp_ui (g, 1) == 0);         \
4206         mpz_clear (g);                                  \
4207       }                                                 \
4208   } while (0)
4209 
4210 #define MPF_CHECK_FORMAT(f)                             \
4211   do {                                                  \
4212     ASSERT_ALWAYS (PREC(f) >= __GMPF_BITS_TO_PREC(53)); \
4213     ASSERT_ALWAYS (ABSIZ(f) <= PREC(f)+1);              \
4214     if (SIZ(f) == 0)                                    \
4215       ASSERT_ALWAYS (EXP(f) == 0);                      \
4216     if (SIZ(f) != 0)                                    \
4217       ASSERT_ALWAYS (PTR(f)[ABSIZ(f) - 1] != 0);        \
4218   } while (0)
4219 
4220 
4221 #define MPZ_PROVOKE_REALLOC(z)					\
4222   do { ALLOC(z) = ABSIZ(z); } while (0)
4223 
4224 
4225 /* Enhancement: The "mod" and "gcd_1" functions below could have
4226    __GMP_ATTRIBUTE_PURE, but currently (gcc 3.3) that's not supported on
4227    function pointers, only actual functions.  It probably doesn't make much
4228    difference to the gmp code, since hopefully we arrange calls so there's
4229    no great need for the compiler to move things around.  */
4230 
4231 #if WANT_FAT_BINARY
4232 /* NOTE: The function pointers in this struct are also in CPUVEC_FUNCS_LIST
4233    in mpn/x86/x86-defs.m4 and in mpn/x86_64/x86_64-defs.m4.  Be sure to
4234     update them there when changing here.  */
4235 struct cpuvec_t {
4236   DECL_add_err1_n      ((*add_err1_n));
4237   DECL_add_err2_n      ((*add_err2_n));
4238   DECL_add_n           ((*add_n));
4239   DECL_addmul_1        ((*addmul_1));
4240   DECL_copyd           ((*copyd));
4241   DECL_copyi           ((*copyi));
4242   DECL_divexact_1      ((*divexact_1));
4243   DECL_divexact_by3c   ((*divexact_by3c));
4244   DECL_divexact_byfobm1   ((*divexact_byfobm1));
4245   DECL_divrem_1        ((*divrem_1));
4246   DECL_divrem_2        ((*divrem_2));
4247   DECL_divrem_euclidean_qr_1        ((*divrem_euclidean_qr_1));
4248   DECL_divrem_euclidean_qr_2        ((*divrem_euclidean_qr_2));
4249   DECL_gcd_1           ((*gcd_1));
4250   DECL_lshift          ((*lshift));
4251   DECL_mod_1           ((*mod_1));
4252   DECL_mod_34lsub1     ((*mod_34lsub1));
4253   DECL_modexact_1c_odd ((*modexact_1c_odd));
4254   DECL_mul_1           ((*mul_1));
4255   DECL_mul_basecase    ((*mul_basecase));
4256   DECL_mulmid_basecase ((*mulmid_basecase));
4257   DECL_preinv_divrem_1 ((*preinv_divrem_1));
4258   DECL_preinv_mod_1    ((*preinv_mod_1));
4259   DECL_redc_1   ((*redc_1));
4260   DECL_rshift          ((*rshift));
4261   DECL_sqr_basecase    ((*sqr_basecase));
4262   DECL_sub_err1_n      ((*sub_err1_n));
4263   DECL_sub_err2_n      ((*sub_err2_n));
4264   DECL_sub_n           ((*sub_n));
4265   DECL_submul_1        ((*submul_1));
4266   DECL_sumdiff_n       ((*sumdiff_n));
4267 
4268   int                  initialized;
4269   mp_size_t            mul_karatsuba_threshold;
4270   mp_size_t            mul_toom3_threshold;
4271   mp_size_t            sqr_karatsuba_threshold;
4272   mp_size_t            sqr_toom3_threshold;
4273 };
4274 __GMP_DECLSPEC extern struct cpuvec_t __gmpn_cpuvec;
4275 #endif /* x86 fat binary */
4276 
4277 __GMP_DECLSPEC void __gmpn_cpuvec_init __GMP_PROTO ((void));
4278 
4279 /* Get a threshold "field" from __gmpn_cpuvec, running __gmpn_cpuvec_init()
4280    if that hasn't yet been done (to establish the right values).  */
4281 #define CPUVEC_THRESHOLD(field)                                               \
4282   ((LIKELY (__gmpn_cpuvec.initialized) ? 0 : (__gmpn_cpuvec_init (), 0)),     \
4283    __gmpn_cpuvec.field)
4284 
4285 
4286 
4287 static inline int
mpn_zero_p(mp_srcptr ap,mp_size_t n)4288 mpn_zero_p (mp_srcptr ap, mp_size_t n)
4289 {
4290   mp_size_t i;
4291   for (i = n - 1; i >= 0; i--)
4292     {
4293       if (ap[i] != 0)
4294     return 0;
4295     }
4296   return 1;
4297 }
4298 
4299 #if TUNE_PROGRAM_BUILD
4300 /* Some extras wanted when recompiling some .c files for use by the tune
4301    program.  Not part of a normal build.
4302 
4303    It's necessary to keep these thresholds as #defines (just to an
4304    identically named variable), since various defaults are established based
4305    on #ifdef in the .c files.  For some this is not so (the defaults are
4306    instead establshed above), but all are done this way for consistency. */
4307 
4308 #undef  MUL_KARATSUBA_THRESHOLD
4309 #define MUL_KARATSUBA_THRESHOLD      mul_karatsuba_threshold
4310 extern mp_size_t                     mul_karatsuba_threshold;
4311 
4312 #undef  MUL_TOOM3_THRESHOLD
4313 #define MUL_TOOM3_THRESHOLD          mul_toom3_threshold
4314 extern mp_size_t                     mul_toom3_threshold;
4315 
4316 #undef  MUL_TOOM4_THRESHOLD
4317 #define MUL_TOOM4_THRESHOLD          mul_toom4_threshold
4318 extern mp_size_t                     mul_toom4_threshold;
4319 
4320 #undef  MUL_TOOM8H_THRESHOLD
4321 #define MUL_TOOM8H_THRESHOLD         mul_toom8h_threshold
4322 extern mp_size_t                     mul_toom8h_threshold;
4323 
4324 #undef  MUL_FFT_THRESHOLD
4325 #define MUL_FFT_THRESHOLD            mul_fft_threshold
4326 extern mp_size_t                     mul_fft_threshold;
4327 
4328 #undef  MUL_FFT_FULL_THRESHOLD
4329 #define MUL_FFT_FULL_THRESHOLD       mul_fft_full_threshold
4330 extern mp_size_t                     mul_fft_full_threshold;
4331 
4332 /* A native mpn_sqr_basecase is not tuned and SQR_BASECASE_THRESHOLD should
4333    remain as zero (always use it). */
4334 #if ! HAVE_NATIVE_mpn_sqr_basecase
4335 #undef  SQR_BASECASE_THRESHOLD
4336 #define SQR_BASECASE_THRESHOLD       sqr_basecase_threshold
4337 extern mp_size_t                     sqr_basecase_threshold;
4338 #endif
4339 
4340 #if TUNE_PROGRAM_BUILD_SQR
4341 #undef  SQR_KARATSUBA_THRESHOLD
4342 #define SQR_KARATSUBA_THRESHOLD      SQR_KARATSUBA_MAX_GENERIC
4343 #else
4344 #undef  SQR_KARATSUBA_THRESHOLD
4345 #define SQR_KARATSUBA_THRESHOLD      sqr_karatsuba_threshold
4346 extern mp_size_t                     sqr_karatsuba_threshold;
4347 #endif
4348 
4349 #undef  SQR_TOOM3_THRESHOLD
4350 #define SQR_TOOM3_THRESHOLD          sqr_toom3_threshold
4351 extern mp_size_t                     sqr_toom3_threshold;
4352 
4353 #undef  SQR_TOOM4_THRESHOLD
4354 #define SQR_TOOM4_THRESHOLD          sqr_toom4_threshold
4355 extern mp_size_t                     sqr_toom4_threshold;
4356 
4357 #undef  SQR_TOOM8_THRESHOLD
4358 #define SQR_TOOM8_THRESHOLD          sqr_toom8_threshold
4359 extern mp_size_t                     sqr_toom8_threshold;
4360 
4361 #undef SQR_FFT_THRESHOLD
4362 #define SQR_FFT_THRESHOLD            sqr_fft_threshold
4363 extern mp_size_t                     sqr_fft_threshold;
4364 
4365 #undef SQR_FFT_FULL_THRESHOLD
4366 #define SQR_FFT_FULL_THRESHOLD       sqr_fft_full_threshold
4367 extern mp_size_t                     sqr_fft_full_threshold;
4368 
4369 #undef  MULLOW_BASECASE_THRESHOLD
4370 #define MULLOW_BASECASE_THRESHOLD    mullow_basecase_threshold
4371 extern mp_size_t                     mullow_basecase_threshold;
4372 
4373 #undef  MULLOW_DC_THRESHOLD
4374 #define MULLOW_DC_THRESHOLD          mullow_dc_threshold
4375 extern mp_size_t                     mullow_dc_threshold;
4376 
4377 #undef  MULLOW_MUL_THRESHOLD
4378 #define MULLOW_MUL_THRESHOLD         mullow_mul_threshold
4379 extern mp_size_t                     mullow_mul_threshold;
4380 
4381 #undef  MULMID_TOOM42_THRESHOLD
4382 #define MULMID_TOOM42_THRESHOLD      mulmid_toom42_threshold
4383 extern mp_size_t                     mulmid_toom42_threshold;
4384 
4385 #undef  MULHIGH_BASECASE_THRESHOLD
4386 #define MULHIGH_BASECASE_THRESHOLD   mulhigh_basecase_threshold
4387 extern mp_size_t                     mulhigh_basecase_threshold;
4388 
4389 #undef  MULHIGH_DC_THRESHOLD
4390 #define MULHIGH_DC_THRESHOLD         mulhigh_dc_threshold
4391 extern mp_size_t                     mulhigh_dc_threshold;
4392 
4393 #undef  MULHIGH_MUL_THRESHOLD
4394 #define MULHIGH_MUL_THRESHOLD        mulhigh_mul_threshold
4395 extern mp_size_t                     mulhigh_mul_threshold;
4396 
4397 #undef  MULMOD_2EXPM1_THRESHOLD
4398 #define MULMOD_2EXPM1_THRESHOLD      mulmod_2expm1_threshold
4399 extern mp_size_t                     mulmod_2expm1_threshold;
4400 
4401 #if ! UDIV_PREINV_ALWAYS
4402 #undef  DIV_SB_PREINV_THRESHOLD
4403 #define DIV_SB_PREINV_THRESHOLD      div_sb_preinv_threshold
4404 extern mp_size_t                     div_sb_preinv_threshold;
4405 #endif
4406 
4407 #undef  DC_DIV_QR_THRESHOLD
4408 #define DC_DIV_QR_THRESHOLD          dc_div_qr_threshold
4409 extern mp_size_t                     dc_div_qr_threshold;
4410 
4411 #undef  DC_BDIV_QR_THRESHOLD
4412 #define DC_BDIV_QR_THRESHOLD         dc_bdiv_qr_threshold
4413 extern mp_size_t                     dc_bdiv_qr_threshold;
4414 
4415 #undef  DC_BDIV_Q_THRESHOLD
4416 #define DC_BDIV_Q_THRESHOLD          dc_bdiv_q_threshold
4417 extern mp_size_t                     dc_bdiv_q_threshold;
4418 
4419 #undef  INV_DIV_QR_THRESHOLD
4420 #define INV_DIV_QR_THRESHOLD         inv_div_qr_threshold
4421 extern mp_size_t                     inv_div_qr_threshold;
4422 
4423 #undef  INV_DIVAPPR_Q_N_THRESHOLD
4424 #define INV_DIVAPPR_Q_N_THRESHOLD    inv_divappr_q_n_threshold
4425 extern mp_size_t                     inv_divappr_q_n_threshold;
4426 
4427 #undef  DC_DIV_Q_THRESHOLD
4428 #define DC_DIV_Q_THRESHOLD           dc_div_q_threshold
4429 extern mp_size_t                     dc_div_q_threshold;
4430 
4431 #undef  INV_DIV_Q_THRESHOLD
4432 #define INV_DIV_Q_THRESHOLD          inv_div_q_threshold
4433 extern mp_size_t                     inv_div_q_threshold;
4434 
4435 #undef BINV_NEWTON_THRESHOLD
4436 #define BINV_NEWTON_THRESHOLD        binv_newton_threshold
4437 extern mp_size_t                     binv_newton_threshold;
4438 
4439 #undef  DC_DIVAPPR_Q_THRESHOLD
4440 #define DC_DIVAPPR_Q_THRESHOLD       dc_divappr_q_threshold
4441 extern mp_size_t                     dc_divappr_q_threshold;
4442 
4443 #undef  INV_DIVAPPR_Q_THRESHOLD
4444 #define INV_DIVAPPR_Q_THRESHOLD      inv_divappr_q_threshold
4445 extern mp_size_t                     inv_divappr_q_threshold;
4446 
4447 #undef  ROOTREM_THRESHOLD
4448 #define ROOTREM_THRESHOLD            rootrem_threshold
4449 extern mp_size_t                     rootrem_threshold;
4450 
4451 #undef DIVREM_HENSEL_QR_1_THRESHOLD
4452 #define DIVREM_HENSEL_QR_1_THRESHOLD divrem_hensel_qr_1_threshold
4453 extern mp_size_t		     divrem_hensel_qr_1_threshold;
4454 
4455 #undef RSH_DIVREM_HENSEL_QR_1_THRESHOLD
4456 #define RSH_DIVREM_HENSEL_QR_1_THRESHOLD rsh_divrem_hensel_qr_1_threshold
4457 extern mp_size_t		     rsh_divrem_hensel_qr_1_threshold;
4458 
4459 #undef DIVREM_EUCLID_HENSEL_THRESHOLD
4460 #define DIVREM_EUCLID_HENSEL_THRESHOLD divrem_euclid_hensel_threshold
4461 extern mp_size_t		       divrem_euclid_hensel_threshold;
4462 
4463 #undef  MOD_1_1_THRESHOLD
4464 #define MOD_1_1_THRESHOLD            mod_1_1_threshold
4465 extern mp_size_t                     mod_1_1_threshold;
4466 
4467 #undef  MOD_1_2_THRESHOLD
4468 #define MOD_1_2_THRESHOLD            mod_1_2_threshold
4469 extern mp_size_t                     mod_1_2_threshold;
4470 
4471 #undef  MOD_1_3_THRESHOLD
4472 #define MOD_1_3_THRESHOLD            mod_1_3_threshold
4473 extern mp_size_t                     mod_1_3_threshold;
4474 
4475 #undef	REDC_1_TO_REDC_2_THRESHOLD
4476 #define REDC_1_TO_REDC_2_THRESHOLD	redc_1_to_redc_2_threshold
4477 extern mp_size_t			redc_1_to_redc_2_threshold;
4478 
4479 #undef	REDC_2_TO_REDC_N_THRESHOLD
4480 #define REDC_2_TO_REDC_N_THRESHOLD	redc_2_to_redc_n_threshold
4481 extern mp_size_t			redc_2_to_redc_n_threshold;
4482 
4483 #undef	REDC_1_TO_REDC_N_THRESHOLD
4484 #define REDC_1_TO_REDC_N_THRESHOLD	redc_1_to_redc_n_threshold
4485 extern mp_size_t			redc_1_to_redc_n_threshold;
4486 
4487 #undef	MATRIX22_STRASSEN_THRESHOLD
4488 #define MATRIX22_STRASSEN_THRESHOLD	matrix22_strassen_threshold
4489 extern mp_size_t			matrix22_strassen_threshold;
4490 
4491 #undef	HGCD_THRESHOLD
4492 #define HGCD_THRESHOLD			hgcd_threshold
4493 extern mp_size_t			hgcd_threshold;
4494 
4495 #undef	HGCD_APPR_THRESHOLD
4496 #define HGCD_APPR_THRESHOLD		hgcd_appr_threshold
4497 extern mp_size_t			hgcd_appr_threshold;
4498 
4499 #undef	HGCD_REDUCE_THRESHOLD
4500 #define HGCD_REDUCE_THRESHOLD		hgcd_reduce_threshold
4501 extern mp_size_t			hgcd_reduce_threshold;
4502 
4503 #undef	GCD_DC_THRESHOLD
4504 #define GCD_DC_THRESHOLD		gcd_dc_threshold
4505 extern mp_size_t			gcd_dc_threshold;
4506 
4507 #undef  GCDEXT_DC_THRESHOLD
4508 #define GCDEXT_DC_THRESHOLD		gcdext_dc_threshold
4509 extern mp_size_t			gcdext_dc_threshold;
4510 
4511 #undef DIVREM_1_NORM_THRESHOLD
4512 #define DIVREM_1_NORM_THRESHOLD      divrem_1_norm_threshold
4513 extern mp_size_t                     divrem_1_norm_threshold;
4514 
4515 #undef DIVREM_1_UNNORM_THRESHOLD
4516 #define DIVREM_1_UNNORM_THRESHOLD    divrem_1_unnorm_threshold
4517 extern mp_size_t                     divrem_1_unnorm_threshold;
4518 
4519 #undef MOD_1_NORM_THRESHOLD
4520 #define MOD_1_NORM_THRESHOLD         mod_1_norm_threshold
4521 extern mp_size_t                     mod_1_norm_threshold;
4522 
4523 #undef MOD_1_UNNORM_THRESHOLD
4524 #define MOD_1_UNNORM_THRESHOLD       mod_1_unnorm_threshold
4525 extern mp_size_t                     mod_1_unnorm_threshold;
4526 
4527 #if ! UDIV_PREINV_ALWAYS
4528 #undef  DIVREM_2_THRESHOLD
4529 #define DIVREM_2_THRESHOLD           divrem_2_threshold
4530 extern mp_size_t                     divrem_2_threshold;
4531 #endif
4532 
4533 #undef  GET_STR_DC_THRESHOLD
4534 #define GET_STR_DC_THRESHOLD         get_str_dc_threshold
4535 extern mp_size_t                     get_str_dc_threshold;
4536 
4537 #undef GET_STR_PRECOMPUTE_THRESHOLD
4538 #define GET_STR_PRECOMPUTE_THRESHOLD get_str_precompute_threshold
4539 extern mp_size_t                     get_str_precompute_threshold;
4540 
4541 #undef	SET_STR_DC_THRESHOLD
4542 #define SET_STR_DC_THRESHOLD		set_str_dc_threshold
4543 extern mp_size_t			set_str_dc_threshold;
4544 
4545 #undef  SET_STR_PRECOMPUTE_THRESHOLD
4546 #define SET_STR_PRECOMPUTE_THRESHOLD	set_str_precompute_threshold
4547 extern mp_size_t			set_str_precompute_threshold;
4548 
4549 #undef  FAC_ODD_THRESHOLD
4550 #define FAC_ODD_THRESHOLD		fac_odd_threshold
4551 extern  mp_size_t			fac_odd_threshold;
4552 
4553 #undef  FAC_DSC_THRESHOLD
4554 #define FAC_DSC_THRESHOLD		fac_dsc_threshold
4555 extern  mp_size_t			fac_dsc_threshold;
4556 
4557 /* Sizes the tune program tests up to, used in a couple of recompilations. */
4558 #undef MUL_KARATSUBA_THRESHOLD_LIMIT
4559 #undef MUL_TOOM3_THRESHOLD_LIMIT
4560 #undef MUL_TOOM4_THRESHOLD_LIMIT
4561 #undef MUL_TOOM8H_THRESHOLD_LIMIT
4562 #undef MULLOW_BASECASE_THRESHOLD_LIMIT
4563 #undef SQR_TOOM3_THRESHOLD_LIMIT
4564 #undef SQR_TOOM4_THRESHOLD_LIMIT
4565 #undef SQR_TOOM8_THRESHOLD_LIMIT
4566 #define SQR_KARATSUBA_MAX_GENERIC       200
4567 #define MUL_KARATSUBA_THRESHOLD_LIMIT   700
4568 #define MUL_TOOM3_THRESHOLD_LIMIT       700
4569 #define MUL_TOOM4_THRESHOLD_LIMIT       1000
4570 #define MUL_TOOM8H_THRESHOLD_LIMIT      2000
4571 #define MULLOW_BASECASE_THRESHOLD_LIMIT 200
4572 #define SQR_TOOM3_THRESHOLD_LIMIT       400
4573 #define SQR_TOOM4_THRESHOLD_LIMIT       1000
4574 #define SQR_TOOM8_THRESHOLD_LIMIT       2000
4575 #define GET_STR_THRESHOLD_LIMIT         150
4576 #define FAC_DSC_THRESHOLD_LIMIT         2048
4577 
4578 #endif /* TUNE_PROGRAM_BUILD */
4579 
4580 #if defined (__cplusplus)
4581 }
4582 #endif
4583 
4584 
4585 #ifdef __cplusplus
4586 
4587 /* A little helper for a null-terminated __gmp_allocate_func string.
4588    The destructor ensures it's freed even if an exception is thrown.
4589    The len field is needed by the destructor, and can be used by anyone else
4590    to avoid a second strlen pass over the data.
4591 
4592    Since our input is a C string, using strlen is correct.  Perhaps it'd be
4593    more C++-ish style to use std::char_traits<char>::length, but char_traits
4594    isn't available in gcc 2.95.4.  */
4595 
4596 class gmp_allocated_string {
4597  public:
4598   char *str;
4599   size_t len;
gmp_allocated_string(char * arg)4600   gmp_allocated_string(char *arg)
4601   {
4602     str = arg;
4603     len = std::strlen (str);
4604   }
~gmp_allocated_string()4605   ~gmp_allocated_string()
4606   {
4607     (*__gmp_free_func) (str, len+1);
4608   }
4609 };
4610 
4611 std::istream &__gmpz_operator_in_nowhite (std::istream &, mpz_ptr, char);
4612 int __gmp_istream_set_base (std::istream &, char &, bool &, bool &);
4613 void __gmp_istream_set_digits (std::string &, std::istream &, char &, bool &, int);
4614 void __gmp_doprnt_params_from_ios (struct doprnt_params_t *p, std::ios &o);
4615 std::ostream& __gmp_doprnt_integer_ostream (std::ostream &o, struct doprnt_params_t *p, char *s);
4616 extern const struct doprnt_funs_t  __gmp_asprintf_funs_noformat;
4617 
4618 #endif /* __cplusplus */
4619 
4620 #endif /* __GMP_IMPL_H__ */
4621