1 
2 
3 #ifndef NTL_ZZ__H
4 #define NTL_ZZ__H
5 
6 
7 
8 
9 /********************************************************
10 
11    LIP INTERFACE
12 
13    The class ZZ implements signed, arbitrary length integers.
14 
15 **********************************************************/
16 
17 
18 #include <NTL/lip.h>
19 #include <NTL/tools.h>
20 #include <NTL/vector.h>
21 #include <NTL/SmartPtr.h>
22 #include <NTL/sp_arith.h>
23 
24 NTL_OPEN_NNS
25 
26 
27 class ZZ_p; // forward declaration
28 class ZZX;
29 
30 class ZZ {
31 public:
32 typedef ZZ_p residue_type;
33 typedef ZZX poly_type;
34 
35 
36 
37 class Deleter {
38 public:
apply(_ntl_gbigint p)39    static void apply(_ntl_gbigint p) { _ntl_gfree(p); }
40 };
41 
42 WrappedPtr<_ntl_gbigint_body, Deleter> rep;
43 // This is currently public for "emergency" situations
44 // May be private in future versions.
45 
ZZ()46 ZZ() { }
47 
48 
ZZ(long a)49 explicit ZZ(long a) { *this = a; }
50 
ZZ(INIT_SIZE_TYPE,long k)51 ZZ(INIT_SIZE_TYPE, long k)
52 // initial value is 0, but space is pre-allocated so that numbers
53 // x with x.size() <= k can be stored without re-allocation.
54 // Call with ZZ(INIT_SIZE, k).
55 // The purpose for the INIT_SIZE argument is to prevent automatic
56 // type conversion from long to ZZ, which would be tempting, but wrong.
57 
58 
59 {
60    _ntl_gsetlength(&rep, k);
61 }
62 
ZZ(const ZZ & a)63 ZZ(const ZZ& a)
64 // initial value is a.
65 
66 {
67    _ntl_gcopy(a.rep, &rep);
68 }
69 
70 
ZZ(INIT_VAL_TYPE,long a)71 ZZ(INIT_VAL_TYPE, long a)  { _ntl_gintoz(a, &rep); }
ZZ(INIT_VAL_TYPE,int a)72 ZZ(INIT_VAL_TYPE, int a)  { _ntl_gintoz(a, &rep); }
73 
ZZ(INIT_VAL_TYPE,unsigned long a)74 ZZ(INIT_VAL_TYPE, unsigned long a)  { _ntl_guintoz(a, &rep); }
ZZ(INIT_VAL_TYPE,unsigned int a)75 ZZ(INIT_VAL_TYPE, unsigned int a)  { _ntl_guintoz((unsigned long) a, &rep); }
76 
77 inline ZZ(INIT_VAL_TYPE, const char *);
78 inline ZZ(INIT_VAL_TYPE, float);
79 inline ZZ(INIT_VAL_TYPE, double);
80 
81 
82 ZZ& operator=(const ZZ& a) { _ntl_gcopy(a.rep, &rep); return *this; }
83 
84 ZZ& operator=(long a) { _ntl_gintoz(a, &rep); return *this; }
85 
86 
kill()87 void kill()
88 // force the space held by this ZZ to be released.
89 // The value then becomes 0.
90 
91 { rep.kill(); }
92 
93 
swap(ZZ & x)94 void swap(ZZ& x)
95 { _ntl_gswap(&rep, &x.rep); }
96 
pinned()97 bool pinned() const
98 {
99    return _ntl_PINNED(rep);
100 }
101 
102 
103 #if (NTL_CXX_STANDARD >= 2011 && !defined(NTL_DISABLE_MOVE))
104 
ZZ(ZZ && a)105 ZZ(ZZ&& a) NTL_FAKE_NOEXCEPT
106 {
107    *this = std::move(a);
108 }
109 
110 ZZ& operator=(ZZ&& a) NTL_FAKE_NOEXCEPT
111 {
112    if (pinned() || a.pinned()) {
113       _ntl_gcopy(a.rep, &rep);
114    }
115    else {
116       rep.move(a.rep);
117    }
118 
119    return *this;
120 }
121 
122 
123 #endif
124 
125 
SetSize(long k)126 void SetSize(long k)
127 // pre-allocates space for k-digit numbers (base 2^NTL_ZZ_NBITS);
128 // does not change the value.
129 
130 { _ntl_gsetlength(&rep, k); }
131 
size()132 long size() const
133 // returns the number of (NTL_ZZ_NBIT-bit) digits of |a|; the size of 0 is 0.
134    { return _ntl_gsize(rep); }
135 
null()136 long null() const
137 // test of rep is null
138    { return !rep; }
139 
MaxAlloc()140 long MaxAlloc() const
141 // returns max allocation request, possibly rounded up a bit...
142    { return _ntl_gmaxalloc(rep); }
143 
144 
SinglePrecision()145 long SinglePrecision() const
146    { return _ntl_gsptest(rep); }
147 
148 // tests if less than NTL_SP_BOUND in absolute value
149 
WideSinglePrecision()150 long WideSinglePrecision() const
151    { return _ntl_gwsptest(rep); }
152 
153 // tests if less than NTL_WSP_BOUND in absolute value
154 
155 static const ZZ& zero();
156 
157 
ZZ(ZZ & x,INIT_TRANS_TYPE)158 ZZ(ZZ& x, INIT_TRANS_TYPE) { rep.swap(x.rep); }
159 // used to cheaply hand off memory management of return value,
160 // without copying, assuming compiler implements the
161 // "return value optimization".  This is probably obsolete by
162 // now, as modern compilers can and should optimize
163 // the copy constructor in the situations where this is used.
164 // This should only be used for simple, local variables
165 // that are not be subject to special memory management.
166 
167 
168 // mainly for internal consumption by ZZWatcher
169 
KillBig()170 void KillBig() { if (MaxAlloc() > NTL_RELEASE_THRESH) kill(); }
171 
172 
validate()173 long validate() { return _ntl_gvalidate(rep); }
174 
175 };
176 
177 
178 NTL_DECLARE_RELOCATABLE((ZZ*))
179 
180 
181 class ZZWatcher {
182 public:
183    ZZ& watched;
184    explicit
ZZWatcher(ZZ & _watched)185    ZZWatcher(ZZ& _watched) : watched(_watched) {}
186 
~ZZWatcher()187    ~ZZWatcher() { watched.KillBig(); }
188 };
189 
190 #define NTL_ZZRegister(x) NTL_TLS_LOCAL(ZZ, x); ZZWatcher _WATCHER__ ## x(x)
191 
192 
193 
194 
195 
196 const ZZ& ZZ_expo(long e);
197 
198 
clear(ZZ & x)199 inline void clear(ZZ& x)
200 // x = 0
201 
202    { _ntl_gzero(&x.rep); }
203 
set(ZZ & x)204 inline void set(ZZ& x)
205 // x = 1
206 
207    { _ntl_gone(&x.rep); }
208 
209 
swap(ZZ & x,ZZ & y)210 inline void swap(ZZ& x, ZZ& y)
211 // swap the values of x and y (swaps pointers only)
212 
213    { x.swap(y); }
214 
215 
log(const ZZ & a)216 inline double log(const ZZ& a)
217    { return _ntl_glog(a.rep); }
218 
219 
220 
221 
222 /**********************************************************
223 
224    Conversion routines.
225 
226 ***********************************************************/
227 
228 
229 
conv(ZZ & x,const ZZ & a)230 inline void conv(ZZ& x, const ZZ& a) { x = a; }
to_ZZ(const ZZ & a)231 inline ZZ to_ZZ(const ZZ& a) { return a; }
232 
conv(ZZ & x,long a)233 inline void conv(ZZ& x, long a) { _ntl_gintoz(a, &x.rep); }
to_ZZ(long a)234 inline ZZ to_ZZ(long a) { return ZZ(INIT_VAL, a); }
235 
conv(ZZ & x,int a)236 inline void conv(ZZ& x, int a) { _ntl_gintoz(long(a), &x.rep); }
to_ZZ(int a)237 inline ZZ to_ZZ(int a) { return ZZ(INIT_VAL, a); }
238 
conv(ZZ & x,unsigned long a)239 inline void conv(ZZ& x, unsigned long a) { _ntl_guintoz(a, &x.rep); }
to_ZZ(unsigned long a)240 inline ZZ to_ZZ(unsigned long a) { return ZZ(INIT_VAL, a); }
241 
conv(ZZ & x,unsigned int a)242 inline void conv(ZZ& x, unsigned int a) { _ntl_guintoz((unsigned long)(a), &x.rep); }
to_ZZ(unsigned int a)243 inline ZZ to_ZZ(unsigned int a) { return ZZ(INIT_VAL, a); }
244 
ZZ(INIT_VAL_TYPE,const char * s)245 inline ZZ::ZZ(INIT_VAL_TYPE, const char *s)  { conv(*this, s); }
to_ZZ(const char * s)246 inline ZZ to_ZZ(const char *s) { return ZZ(INIT_VAL, s); }
247 
conv(ZZ & x,double a)248 inline void conv(ZZ& x, double a) { _ntl_gdoubtoz(a, &x.rep); }
ZZ(INIT_VAL_TYPE,double a)249 inline ZZ::ZZ(INIT_VAL_TYPE, double a)  { conv(*this, a); }
to_ZZ(double a)250 inline ZZ to_ZZ(double a) { return ZZ(INIT_VAL, a); }
251 
conv(ZZ & x,float a)252 inline void conv(ZZ& x, float a) { _ntl_gdoubtoz(double(a), &x.rep); }
ZZ(INIT_VAL_TYPE,float a)253 inline ZZ::ZZ(INIT_VAL_TYPE, float a)  { conv(*this, a); }
to_ZZ(float a)254 inline ZZ to_ZZ(float a) { return ZZ(INIT_VAL, a); }
255 
conv(long & x,const ZZ & a)256 inline void conv(long& x, const ZZ& a) { x = _ntl_gtoint(a.rep); }
to_long(const ZZ & a)257 inline long to_long(const ZZ& a)  { return _ntl_gtoint(a.rep); }
258 
conv(int & x,const ZZ & a)259 inline void conv(int& x, const ZZ& a)
260    { unsigned int res = (unsigned int) _ntl_gtouint(a.rep);
261      x = NTL_UINT_TO_INT(res); }
262 
to_int(const ZZ & a)263 inline int to_int(const ZZ& a)
264    { unsigned int res = (unsigned int) _ntl_gtouint(a.rep);
265      return NTL_UINT_TO_INT(res); }
266 
conv(unsigned long & x,const ZZ & a)267 inline void conv(unsigned long& x, const ZZ& a) { x = _ntl_gtouint(a.rep); }
to_ulong(const ZZ & a)268 inline unsigned long to_ulong(const ZZ& a)  { return _ntl_gtouint(a.rep); }
269 
conv(unsigned int & x,const ZZ & a)270 inline void conv(unsigned int& x, const ZZ& a)
271    { x = (unsigned int)(_ntl_gtouint(a.rep)); }
to_uint(const ZZ & a)272 inline unsigned int to_uint(const ZZ& a)
273    { return (unsigned int)(_ntl_gtouint(a.rep)); }
274 
conv(double & x,const ZZ & a)275 inline void conv(double& x, const ZZ& a) { x = _ntl_gdoub(a.rep); }
to_double(const ZZ & a)276 inline double to_double(const ZZ& a) { return _ntl_gdoub(a.rep); }
277 
conv(float & x,const ZZ & a)278 inline void conv(float& x, const ZZ& a) { x = float(_ntl_gdoub(a.rep)); }
to_float(const ZZ & a)279 inline float to_float(const ZZ& a) { return float(_ntl_gdoub(a.rep)); }
280 
ZZFromBytes(ZZ & x,const unsigned char * p,long n)281 inline void ZZFromBytes(ZZ& x, const unsigned char *p, long n)
282    { _ntl_gfrombytes(&x.rep, p, n); }
283 
ZZFromBytes(const unsigned char * p,long n)284 inline ZZ ZZFromBytes(const unsigned char *p, long n)
285    { ZZ x; ZZFromBytes(x, p, n); NTL_OPT_RETURN(ZZ, x); }
286 
BytesFromZZ(unsigned char * p,const ZZ & a,long n)287 inline void BytesFromZZ(unsigned char *p, const ZZ& a, long n)
288    { _ntl_gbytesfromz(p, a.rep, n); }
289 
290 
291 
292 
293 // ****** comparisons
294 
295 
sign(const ZZ & a)296 inline long sign(const ZZ& a)
297 // returns the sign of a (-1, 0, or 1).
298 
299    { return _ntl_gsign(a.rep); }
300 
301 
compare(const ZZ & a,const ZZ & b)302 inline long compare(const ZZ& a, const ZZ& b)
303 // returns the sign of a-b (-1, 0, or 1).
304 
305 {
306    return _ntl_gcompare(a.rep, b.rep);
307 }
308 
IsZero(const ZZ & a)309 inline long IsZero(const ZZ& a)
310 // zero test
311 
312    { return _ntl_giszero(a.rep); }
313 
314 
IsOne(const ZZ & a)315 inline long IsOne(const ZZ& a)
316    { return _ntl_gisone(a.rep); }
317 // test for 1
318 
319 
320 /* the usual comparison operators */
321 
322 inline long operator==(const ZZ& a, const ZZ& b)
323   { return _ntl_gcompare(a.rep, b.rep) == 0; }
324 inline long operator!=(const ZZ& a, const ZZ& b)
325   { return _ntl_gcompare(a.rep, b.rep) != 0; }
326 inline long operator<(const ZZ& a, const ZZ& b)
327   { return _ntl_gcompare(a.rep, b.rep) < 0; }
328 inline long operator>(const ZZ& a, const ZZ& b)
329   { return _ntl_gcompare(a.rep, b.rep) > 0; }
330 inline long operator<=(const ZZ& a, const ZZ& b)
331   { return _ntl_gcompare(a.rep, b.rep) <= 0; }
332 inline long operator>=(const ZZ& a, const ZZ& b)
333   { return _ntl_gcompare(a.rep, b.rep) >= 0; }
334 
335 /* single-precision versions of the above */
336 
compare(const ZZ & a,long b)337 inline long compare(const ZZ& a, long b) { return _ntl_gscompare(a.rep, b); }
compare(long a,const ZZ & b)338 inline long compare(long a, const ZZ& b) { return -_ntl_gscompare(b.rep, a); }
339 
340 inline long operator==(const ZZ& a, long b) { return _ntl_gscompare(a.rep, b) == 0; }
341 inline long operator!=(const ZZ& a, long b) { return _ntl_gscompare(a.rep, b) != 0; }
342 inline long operator<(const ZZ& a, long b) { return _ntl_gscompare(a.rep, b) < 0; }
343 inline long operator>(const ZZ& a, long b) { return _ntl_gscompare(a.rep, b) > 0; }
344 inline long operator<=(const ZZ& a, long b) { return _ntl_gscompare(a.rep, b) <= 0; }
345 inline long operator>=(const ZZ& a, long b) { return _ntl_gscompare(a.rep, b) >= 0; }
346 
347 
348 inline long operator==(long a, const ZZ& b) { return b == a; }
349 inline long operator!=(long a, const ZZ& b) { return b != a; }
350 inline long operator<(long a, const ZZ& b) { return b > a; }
351 inline long operator>(long a, const ZZ& b) { return b < a; }
352 inline long operator<=(long a, const ZZ& b) { return b >= a; }
353 inline long operator>=(long a, const ZZ& b) { return b <= a; }
354 
355 /**************************************************
356 
357                  Addition
358 
359 **************************************************/
360 
361 
add(ZZ & x,const ZZ & a,const ZZ & b)362 inline void add(ZZ& x, const ZZ& a, const ZZ& b)
363 // x = a + b
364 
365    { _ntl_gadd(a.rep, b.rep, &x.rep); }
366 
sub(ZZ & x,const ZZ & a,const ZZ & b)367 inline void sub(ZZ& x, const ZZ& a, const ZZ& b)
368 // x = a - b
369 
370    { _ntl_gsub(a.rep, b.rep, &x.rep); }
371 
SubPos(ZZ & x,const ZZ & a,const ZZ & b)372 inline void SubPos(ZZ& x, const ZZ& a, const ZZ& b)
373 // x = a - b;  assumes a >= b >= 0.
374 
375    { _ntl_gsubpos(a.rep, b.rep, &x.rep); }
376 
negate(ZZ & x,const ZZ & a)377 inline void negate(ZZ& x, const ZZ& a)
378 // x = -a
379 
380    { _ntl_gcopy(a.rep, &x.rep); _ntl_gnegate(&x.rep); }
381 
abs(ZZ & x,const ZZ & a)382 inline void abs(ZZ& x, const ZZ& a)
383 // x = |a|
384 { _ntl_gcopy(a.rep, &x.rep); _ntl_gabs(&x.rep); }
385 
386 
387 /* single-precision versions of the above */
388 
add(ZZ & x,const ZZ & a,long b)389 inline void add(ZZ& x, const ZZ& a, long b)
390    { _ntl_gsadd(a.rep, b, &x.rep); }
391 
add(ZZ & x,long a,const ZZ & b)392 inline void add(ZZ& x, long a, const ZZ& b) { add(x, b, a); }
393 
394 
sub(ZZ & x,const ZZ & a,long b)395 inline void sub(ZZ& x, const ZZ& a, long b)
396    { _ntl_gssub(a.rep, b, &x.rep); }
397 
398 void sub(ZZ& x, long a, const ZZ& b);
399 // defined in ZZ.cpp
400 
401 /* operator/function notation */
402 
403 inline ZZ operator+(const ZZ& a, const ZZ& b)
404   { ZZ x; add(x, a, b); NTL_OPT_RETURN(ZZ, x); }
405 
406 inline ZZ operator+(const ZZ& a, long b)
407   { ZZ x; add(x, a, b); NTL_OPT_RETURN(ZZ, x); }
408 
409 inline ZZ operator+(long  a, const ZZ& b)
410   { ZZ x; add(x, a, b); NTL_OPT_RETURN(ZZ, x); }
411 
412 inline ZZ operator-(const ZZ& a, const ZZ& b)
413   { ZZ x; sub(x, a, b); NTL_OPT_RETURN(ZZ, x); }
414 
415 inline ZZ operator-(const ZZ& a, long b)
416   { ZZ x; sub(x, a, b); NTL_OPT_RETURN(ZZ, x); }
417 
418 inline ZZ operator-(long  a, const ZZ& b)
419   { ZZ x; sub(x, a, b); NTL_OPT_RETURN(ZZ, x); }
420 
421 inline ZZ operator-(const ZZ& a)
422   { ZZ x; negate(x, a); NTL_OPT_RETURN(ZZ, x); }
423 
abs(const ZZ & a)424 inline ZZ abs(const ZZ& a)
425   { ZZ x; abs(x, a); NTL_OPT_RETURN(ZZ, x); }
426 
427 /* op= notation */
428 
429 inline ZZ& operator+=(ZZ& x, const ZZ& a)
430   { add(x, x, a); return x; }
431 
432 inline ZZ& operator+=(ZZ& x, long a)
433   { add(x, x, a); return x; }
434 
435 inline ZZ& operator-=(ZZ& x, const ZZ& a)
436   { sub(x, x, a); return x; }
437 
438 inline ZZ& operator-=(ZZ& x, long a)
439   { sub(x, x, a); return x; }
440 
441 /* inc/dec */
442 
443 inline ZZ& operator++(ZZ& x) { add(x, x, 1); return x; }
444 
445 inline void operator++(ZZ& x, int) { add(x, x, 1); }
446 
447 inline ZZ& operator--(ZZ& x) { add(x, x, -1); return x; }
448 
449 inline void operator--(ZZ& x, int) { add(x, x, -1); }
450 
451 
452 
453 /*******************************************************
454 
455                  Multiplication.
456 
457 ********************************************************/
458 
459 
mul(ZZ & x,const ZZ & a,const ZZ & b)460 inline void mul(ZZ& x, const ZZ& a, const ZZ& b)
461 // x = a * b
462 
463    { _ntl_gmul(a.rep, b.rep, &x.rep); }
464 
465 
sqr(ZZ & x,const ZZ & a)466 inline void sqr(ZZ& x, const ZZ& a)
467 // x = a*a
468 
469    { _ntl_gsq(a.rep, &x.rep); }
470 
sqr(const ZZ & a)471 inline ZZ sqr(const ZZ& a)
472    { ZZ x; sqr(x, a); NTL_OPT_RETURN(ZZ, x); }
473 
474 
475 /* single-precision versions */
476 
mul(ZZ & x,const ZZ & a,long b)477 inline void mul(ZZ& x, const ZZ& a, long b)
478    { _ntl_gsmul(a.rep, b, &x.rep); }
479 
mul(ZZ & x,long a,const ZZ & b)480 inline void mul(ZZ& x, long a, const ZZ& b)
481     { mul(x, b, a); }
482 
483 /* operator notation */
484 
485 inline ZZ operator*(const ZZ& a, const ZZ& b)
486   { ZZ x; mul(x, a, b); NTL_OPT_RETURN(ZZ, x); }
487 
488 inline ZZ operator*(const ZZ& a, long b)
489   { ZZ x; mul(x, a, b); NTL_OPT_RETURN(ZZ, x); }
490 
491 inline ZZ operator*(long a, const ZZ& b)
492   { ZZ x; mul(x, a, b); NTL_OPT_RETURN(ZZ, x); }
493 
494 /* op= notation */
495 
496 inline ZZ& operator*=(ZZ& x, const ZZ& a)
497   { mul(x, x, a); return x; }
498 
499 inline ZZ& operator*=(ZZ& x, long a)
500   { mul(x, x, a); return x; }
501 
502 // x += a*b
503 
504 inline void
MulAddTo(ZZ & x,const ZZ & a,long b)505 MulAddTo(ZZ& x, const ZZ& a, long b)
506 {
507    _ntl_gsaddmul(a.rep, b, &x.rep);
508 }
509 
510 inline void
MulAddTo(ZZ & x,const ZZ & a,const ZZ & b)511 MulAddTo(ZZ& x, const ZZ& a, const ZZ& b)
512 {
513    _ntl_gaddmul(a.rep, b.rep, &x.rep);
514 }
515 
516 // x -= a*b
517 
518 inline void
MulSubFrom(ZZ & x,const ZZ & a,long b)519 MulSubFrom(ZZ& x, const ZZ& a, long b)
520 {
521    _ntl_gssubmul(a.rep, b, &x.rep);
522 }
523 
524 inline void
MulSubFrom(ZZ & x,const ZZ & a,const ZZ & b)525 MulSubFrom(ZZ& x, const ZZ& a, const ZZ& b)
526 {
527    _ntl_gsubmul(a.rep, b.rep, &x.rep);
528 }
529 
530 
531 
532 // Special routines for implementing CRT in ZZ_pX arithmetic
533 // These are verbose, but fairly boilerplate
534 
535 
536 
537 class ZZ_CRTStructAdapter;
538 class ZZ_RemStructAdapter;
539 
540 class ZZ_TmpVecAdapter {
541 public:
542    UniquePtr<_ntl_tmp_vec> rep;
543 
544    inline void fetch(const ZZ_CRTStructAdapter&);
545    inline void fetch(ZZ_CRTStructAdapter&);
546    inline void fetch(const ZZ_RemStructAdapter&);
547 };
548 
549 
550 class ZZ_CRTStructAdapter {
551 public:
552    UniquePtr<_ntl_crt_struct> rep;
553 
init(long n,const ZZ & p,long (* primes)(long))554    void init(long n, const ZZ& p, long (*primes)(long))
555    {
556       rep.reset(_ntl_crt_struct_build(n, p.rep, primes));
557    }
558 
insert(long i,const ZZ & m)559    void insert(long i, const ZZ& m)
560    {
561        rep->insert(i, m.rep);
562    }
563 
eval(ZZ & t,const long * a,ZZ_TmpVecAdapter & tmp_vec)564    void eval(ZZ& t, const long *a, ZZ_TmpVecAdapter& tmp_vec) const
565    {
566       rep->eval(&t.rep, a, tmp_vec.rep.get());
567    }
568 
special()569    bool special() const
570    {
571       return rep->special();
572    }
573 };
574 
575 
576 class ZZ_RemStructAdapter {
577 public:
578    UniquePtr<_ntl_rem_struct> rep;
579 
init(long n,const ZZ & p,long (* primes)(long))580    void init(long n, const ZZ& p, long (*primes)(long))
581    {
582       rep.reset(_ntl_rem_struct_build(n, p.rep, primes));
583    }
584 
eval(long * x,const ZZ & a,ZZ_TmpVecAdapter & tmp_vec)585    void eval(long *x, const ZZ& a, ZZ_TmpVecAdapter& tmp_vec) const
586    {
587       rep->eval(x, a.rep, tmp_vec.rep.get());
588    }
589 };
590 
591 
fetch(const ZZ_CRTStructAdapter & crt_struct)592 inline void ZZ_TmpVecAdapter::fetch(const ZZ_CRTStructAdapter& crt_struct)
593 {
594    rep.reset(crt_struct.rep->fetch());
595 }
596 
fetch(ZZ_CRTStructAdapter & crt_struct)597 inline void ZZ_TmpVecAdapter::fetch(ZZ_CRTStructAdapter& crt_struct)
598 {
599    rep.reset(crt_struct.rep->extract()); // EXTRACT!!
600 }
601 
602 
fetch(const ZZ_RemStructAdapter & rem_struct)603 inline void ZZ_TmpVecAdapter::fetch(const ZZ_RemStructAdapter& rem_struct)
604 {
605    rep.reset(rem_struct.rep->fetch());
606 }
607 
608 
609 // montgomery
610 class ZZ_ReduceStructAdapter {
611 public:
612    UniquePtr<_ntl_reduce_struct> rep;
613 
init(const ZZ & p,const ZZ & excess)614    void init(const ZZ& p, const ZZ& excess)
615    {
616       rep.reset(_ntl_reduce_struct_build(p.rep, excess.rep));
617    }
618 
eval(ZZ & x,ZZ & a)619    void eval(ZZ& x, ZZ& a) const
620    {
621       rep->eval(&x.rep, &a.rep);
622    }
623 
adjust(ZZ & x)624    void adjust(ZZ& x) const
625    {
626       rep->adjust(&x.rep);
627    }
628 };
629 
630 
631 
632 /*******************************************************
633 
634                     Division
635 
636 *******************************************************/
637 
638 
DivRem(ZZ & q,ZZ & r,const ZZ & a,const ZZ & b)639 inline void DivRem(ZZ& q, ZZ& r, const ZZ& a, const ZZ& b)
640 // q = [a/b], r = a - b*q
641 // |r| < |b|, and if r != 0, sign(r) = sign(b)
642 
643    { _ntl_gdiv(a.rep, b.rep, &q.rep, &r.rep); }
644 
645 
646 
div(ZZ & q,const ZZ & a,const ZZ & b)647 inline void div(ZZ& q, const ZZ& a, const ZZ& b)
648 // q = a/b
649 
650    { _ntl_gdiv(a.rep, b.rep, &q.rep, 0); }
651 
rem(ZZ & r,const ZZ & a,const ZZ & b)652 inline void rem(ZZ& r, const ZZ& a, const ZZ& b)
653 // r = a%b
654 
655    { _ntl_gmod(a.rep, b.rep, &r.rep); }
656 
657 
QuickRem(ZZ & r,const ZZ & b)658 inline void QuickRem(ZZ& r, const ZZ& b)
659 // r = r%b
660 // assumes b > 0 and r >=0
661 // division is performed in place and may cause r to be re-allocated.
662 
663    { _ntl_gquickmod(&r.rep, b.rep); }
664 
665 long divide(ZZ& q, const ZZ& a, const ZZ& b);
666 // if b | a, sets q = a/b and returns 1; otherwise returns 0.
667 
668 long divide(const ZZ& a, const ZZ& b);
669 // if b | a, returns 1; otherwise returns 0.
670 
671 
672 /* non-standard single-precision versions */
673 
DivRem(ZZ & q,const ZZ & a,long b)674 inline long DivRem(ZZ& q, const ZZ& a, long b)
675    { return _ntl_gsdiv(a.rep, b, &q.rep); }
676 
rem(const ZZ & a,long b)677 inline long rem(const ZZ& a, long b)
678    { return _ntl_gsmod(a.rep, b); }
679 
680 
681 /* single precision versions */
682 
div(ZZ & q,const ZZ & a,long b)683 inline void div(ZZ& q, const ZZ& a, long b)
684    { (void) _ntl_gsdiv(a.rep, b, &q.rep); }
685 
686 
687 long divide(ZZ& q, const ZZ& a, long b);
688 // if b | a, sets q = a/b and returns 1; otherwise returns 0.
689 
690 long divide(const ZZ& a, long b);
691 // if b | a, returns 1; otherwise returns 0.
692 
693 
694 inline ZZ operator/(const ZZ& a, const ZZ& b)
695    { ZZ x; div(x, a, b); NTL_OPT_RETURN(ZZ, x); }
696 
697 inline ZZ operator/(const ZZ& a, long b)
698    { ZZ x; div(x, a, b); NTL_OPT_RETURN(ZZ, x); }
699 
700 inline ZZ operator%(const ZZ& a, const ZZ& b)
701    { ZZ x; rem(x, a, b); NTL_OPT_RETURN(ZZ, x); }
702 
703 inline long operator%(const ZZ& a, long b)
704    { return rem(a, b); }
705 
706 inline ZZ& operator/=(ZZ& x, const ZZ& b)
707    { div(x, x, b); return x; }
708 
709 inline ZZ& operator/=(ZZ& x, long b)
710    { div(x, x, b); return x; }
711 
712 inline ZZ& operator%=(ZZ& x, const ZZ& b)
713    { rem(x, x, b); return x; }
714 
715 
716 // preconditioned single-precision variant
717 // not documented for now...
718 
719 
720 
721 
722 struct sp_ZZ_reduce_struct_policy {
723 
724    static
deletersp_ZZ_reduce_struct_policy725    void deleter(_ntl_general_rem_one_struct *pinfo)
726    {
727       _ntl_general_rem_one_struct_delete(pinfo);
728    }
729 
730 };
731 
732 struct sp_ZZ_reduce_struct {
733    long p;
734    UniquePtr<_ntl_general_rem_one_struct,sp_ZZ_reduce_struct_policy> pinfo;
735 
sp_ZZ_reduce_structsp_ZZ_reduce_struct736    sp_ZZ_reduce_struct() : p(0) { }
737 
buildsp_ZZ_reduce_struct738    void build(long _p)
739    {
740       pinfo.reset(_ntl_general_rem_one_struct_build(_p));
741       p = _p;
742    }
743 
remsp_ZZ_reduce_struct744    long rem(const ZZ& a) const
745    {
746       return _ntl_general_rem_one_struct_apply(a.rep, p, pinfo.get());
747    }
748 };
749 
750 
751 // special-purpose routines for accumulating CRT-like summations
752 // Not documented for now.
753 
754 
755 // Allocates sz+2 limbs and zeros them all out.
756 // x is not normalized.
757 inline
QuickAccumBegin(ZZ & x,long sz)758 void QuickAccumBegin(ZZ& x, long sz)
759 {
760    _ntl_quick_accum_begin(&x.rep, sz);
761 }
762 
763 // x += y*b.
764 // Assumes y >= 0 and that 0 <= b < NTL_SP_BOUND.
765 // Caller must assure that x does not exceed sz+2 limbs.
766 // x remains unnormalized.
767 inline
QuickAccumMulAdd(ZZ & x,const ZZ & y,long b)768 void QuickAccumMulAdd(ZZ& x, const ZZ& y, long b)
769 {
770    _ntl_quick_accum_muladd(x.rep, y.rep, b);
771 }
772 
773 // renormalizes x.
774 inline
QuickAccumEnd(ZZ & x)775 void QuickAccumEnd(ZZ& x)
776 {
777    _ntl_quick_accum_end(x.rep);
778 }
779 
780 
781 /**********************************************************
782 
783                         GCD's
784 
785 ***********************************************************/
786 
787 
GCD(ZZ & d,const ZZ & a,const ZZ & b)788 inline void GCD(ZZ& d, const ZZ& a, const ZZ& b)
789 // d = gcd(a, b)
790 
791    { _ntl_ggcd(a.rep, b.rep, &d.rep); }
792 
GCD(const ZZ & a,const ZZ & b)793 inline ZZ GCD(const ZZ& a, const ZZ& b)
794    { ZZ x; GCD(x, a, b); NTL_OPT_RETURN(ZZ, x); }
795 
GCD_alt(ZZ & d,const ZZ & a,const ZZ & b)796 inline void GCD_alt(ZZ& d, const ZZ& a, const ZZ& b)
797 // d = gcd(a, b)
798 
799    { _ntl_ggcd_alt(a.rep, b.rep, &d.rep); }
800 
801 
XGCD(ZZ & d,ZZ & s,ZZ & t,const ZZ & a,const ZZ & b)802 inline void XGCD(ZZ& d, ZZ& s, ZZ& t, const ZZ& a, const ZZ& b)
803 //  d = gcd(a, b) = a*s + b*t;
804 
805    { _ntl_gexteucl(a.rep, &s.rep, b.rep, &t.rep, &d.rep); }
806 
807 // single-precision versions
808 long GCD(long a, long b);
809 
810 void XGCD(long& d, long& s, long& t, long a, long b);
811 
812 
813 
814 
815 
816 
817 
818 /************************************************************
819 
820                       Bit Operations
821 
822 *************************************************************/
823 
824 
LeftShift(ZZ & x,const ZZ & a,long k)825 inline void LeftShift(ZZ& x, const ZZ& a, long k)
826 // x = (a << k), k < 0 => RightShift
827 
828    { _ntl_glshift(a.rep, k, &x.rep); }
829 
LeftShift(const ZZ & a,long k)830 inline ZZ LeftShift(const ZZ& a, long k)
831    { ZZ x; LeftShift(x, a, k); NTL_OPT_RETURN(ZZ, x); }
832 
833 
RightShift(ZZ & x,const ZZ & a,long k)834 inline void RightShift(ZZ& x, const ZZ& a, long k)
835 // x = (a >> k), k < 0 => LeftShift
836 
837    { _ntl_grshift(a.rep, k, &x.rep); }
838 
RightShift(const ZZ & a,long k)839 inline ZZ RightShift(const ZZ& a, long k)
840    { ZZ x; RightShift(x, a, k); NTL_OPT_RETURN(ZZ, x); }
841 
842 #ifndef NTL_TRANSITION
843 
844 inline ZZ operator>>(const ZZ& a, long n)
845    { ZZ x; RightShift(x, a, n); NTL_OPT_RETURN(ZZ, x); }
846 
847 inline ZZ operator<<(const ZZ& a, long n)
848    { ZZ x; LeftShift(x, a, n); NTL_OPT_RETURN(ZZ, x); }
849 
850 inline ZZ& operator<<=(ZZ& x, long n)
851    { LeftShift(x, x, n); return x; }
852 
853 inline ZZ& operator>>=(ZZ& x, long n)
854    { RightShift(x, x, n); return x; }
855 
856 #endif
857 
858 
MakeOdd(ZZ & x)859 inline long MakeOdd(ZZ& x)
860 // removes factors of 2 from x, returns the number of 2's removed
861 // returns 0 if x == 0
862 
863    { return _ntl_gmakeodd(&x.rep); }
864 
NumTwos(const ZZ & x)865 inline long NumTwos(const ZZ& x)
866 // returns max e such that 2^e divides x if x != 0, and returns 0 if x == 0.
867 
868    { return _ntl_gnumtwos(x.rep); }
869 
870 
IsOdd(const ZZ & a)871 inline long IsOdd(const ZZ& a)
872 // returns 1 if a is odd, otherwise 0
873 
874    { return _ntl_godd(a.rep); }
875 
876 
NumBits(const ZZ & a)877 inline long NumBits(const ZZ& a)
878 // returns the number of bits in |a|; NumBits(0) = 0
879    { return _ntl_g2log(a.rep); }
880 
881 
882 
bit(const ZZ & a,long k)883 inline long bit(const ZZ& a, long k)
884 // returns bit k of a, 0 being the low-order bit
885 
886    { return _ntl_gbit(a.rep, k); }
887 
888 #ifndef NTL_GMP_LIP
889 
890 // only defined for the "classic" long integer package, for backward
891 // compatability.
892 
digit(const ZZ & a,long k)893 inline long digit(const ZZ& a, long k)
894    { return _ntl_gdigit(a.rep, k); }
895 
896 #endif
897 
898 // returns k-th digit of |a|, 0 being the low-order digit.
899 
trunc(ZZ & x,const ZZ & a,long k)900 inline void trunc(ZZ& x, const ZZ& a, long k)
901 // puts k low order bits of |a| into x
902 
903    { _ntl_glowbits(a.rep, k, &x.rep); }
904 
trunc_ZZ(const ZZ & a,long k)905 inline ZZ trunc_ZZ(const ZZ& a, long k)
906    { ZZ x; trunc(x, a, k); NTL_OPT_RETURN(ZZ, x); }
907 
trunc_long(const ZZ & a,long k)908 inline long trunc_long(const ZZ& a, long k)
909 // returns k low order bits of |a|
910 
911    { return _ntl_gslowbits(a.rep, k); }
912 
SetBit(ZZ & x,long p)913 inline long SetBit(ZZ& x, long p)
914 // returns original value of p-th bit of |a|, and replaces
915 // p-th bit of a by 1 if it was zero;
916 // error if p < 0
917 
918    { return _ntl_gsetbit(&x.rep, p); }
919 
SwitchBit(ZZ & x,long p)920 inline long SwitchBit(ZZ& x, long p)
921 // returns original value of p-th bit of |a|, and switches
922 // the value of p-th bit of a;
923 // p starts counting at 0;
924 //   error if p < 0
925 
926    { return _ntl_gswitchbit(&x.rep, p); }
927 
weight(long a)928 inline long weight(long a)
929 // returns Hamming weight of |a|
930 
931    { return _ntl_gweights(a); }
932 
weight(const ZZ & a)933 inline long weight(const ZZ& a)
934 // returns Hamming weight of |a|
935 
936    { return _ntl_gweight(a.rep); }
937 
bit_and(ZZ & x,const ZZ & a,const ZZ & b)938 inline void bit_and(ZZ& x, const ZZ& a, const ZZ& b)
939 // x = |a| AND |b|
940 
941    { _ntl_gand(a.rep, b.rep, &x.rep); }
942 
943 void bit_and(ZZ& x, const ZZ& a, long b);
bit_and(ZZ & x,long a,const ZZ & b)944 inline void bit_and(ZZ& x, long a, const ZZ& b)
945    { bit_and(x, b, a); }
946 
947 
bit_or(ZZ & x,const ZZ & a,const ZZ & b)948 inline void bit_or(ZZ& x, const ZZ& a, const ZZ& b)
949 // x = |a| OR |b|
950 
951    { _ntl_gor(a.rep, b.rep, &x.rep); }
952 
953 void bit_or(ZZ& x, const ZZ& a, long b);
bit_or(ZZ & x,long a,const ZZ & b)954 inline void bit_or(ZZ& x, long a, const ZZ& b)
955    { bit_or(x, b, a); }
956 
bit_xor(ZZ & x,const ZZ & a,const ZZ & b)957 inline void bit_xor(ZZ& x, const ZZ& a, const ZZ& b)
958 // x = |a| XOR |b|
959 
960    { _ntl_gxor(a.rep, b.rep, &x.rep); }
961 
962 void bit_xor(ZZ& x, const ZZ& a, long b);
bit_xor(ZZ & x,long a,const ZZ & b)963 inline void bit_xor(ZZ& x, long a, const ZZ& b)
964    { bit_xor(x, b, a); }
965 
966 
967 inline ZZ operator&(const ZZ& a, const ZZ& b)
968    { ZZ x; bit_and(x, a, b); NTL_OPT_RETURN(ZZ, x); }
969 
970 inline ZZ operator&(const ZZ& a, long b)
971    { ZZ x; bit_and(x, a, b); NTL_OPT_RETURN(ZZ, x); }
972 
973 inline ZZ operator&(long a, const ZZ& b)
974    { ZZ x; bit_and(x, a, b); NTL_OPT_RETURN(ZZ, x); }
975 
976 inline ZZ operator|(const ZZ& a, const ZZ& b)
977    { ZZ x; bit_or(x, a, b); NTL_OPT_RETURN(ZZ, x); }
978 
979 inline ZZ operator|(const ZZ& a, long b)
980    { ZZ x; bit_or(x, a, b); NTL_OPT_RETURN(ZZ, x); }
981 
982 inline ZZ operator|(long a, const ZZ& b)
983    { ZZ x; bit_or(x, a, b); NTL_OPT_RETURN(ZZ, x); }
984 
985 inline ZZ operator^(const ZZ& a, const ZZ& b)
986    { ZZ x; bit_xor(x, a, b); NTL_OPT_RETURN(ZZ, x); }
987 
988 inline ZZ operator^(const ZZ& a, long b)
989    { ZZ x; bit_xor(x, a, b); NTL_OPT_RETURN(ZZ, x); }
990 
991 inline ZZ operator^(long a, const ZZ& b)
992    { ZZ x; bit_xor(x, a, b); NTL_OPT_RETURN(ZZ, x); }
993 
994 inline ZZ& operator&=(ZZ& x, const ZZ& b)
995    { bit_and(x, x, b); return x; }
996 
997 inline ZZ& operator&=(ZZ& x, long b)
998    { bit_and(x, x, b); return x; }
999 
1000 inline ZZ& operator|=(ZZ& x, const ZZ& b)
1001    { bit_or(x, x, b); return x; }
1002 
1003 inline ZZ& operator|=(ZZ& x, long b)
1004    { bit_or(x, x, b); return x; }
1005 
1006 inline ZZ& operator^=(ZZ& x, const ZZ& b)
1007    { bit_xor(x, x, b); return x; }
1008 
1009 inline ZZ& operator^=(ZZ& x, long b)
1010    { bit_xor(x, x, b); return x; }
1011 
1012 
1013 
1014 inline
NumBits(long a)1015 long NumBits(long a)
1016    { return _ntl_g2logs(a); }
1017 
1018 long bit(long a, long k);
1019 
1020 long NextPowerOfTwo(long m);
1021 // returns least nonnegative k such that 2^k >= m
1022 
1023 inline
NumBytes(const ZZ & a)1024 long NumBytes(const ZZ& a)
1025    { return (NumBits(a)+7)/8; }
1026 
1027 inline
NumBytes(long a)1028 long NumBytes(long a)
1029    { return (NumBits(a)+7)/8; }
1030 
1031 
1032 
1033 /***********************************************************
1034 
1035           Some specialized routines
1036 
1037 ************************************************************/
1038 
1039 
ZZ_BlockConstructAlloc(ZZ & x,long d,long n)1040 inline long ZZ_BlockConstructAlloc(ZZ& x, long d, long n)
1041    { return _ntl_gblock_construct_alloc(&x.rep, d, n); }
1042 
ZZ_BlockConstructSet(ZZ & x,ZZ & y,long i)1043 inline void ZZ_BlockConstructSet(ZZ& x, ZZ& y, long i)
1044    { _ntl_gblock_construct_set(x.rep, &y.rep, i); }
1045 
ZZ_BlockDestroy(ZZ & x)1046 inline long ZZ_BlockDestroy(ZZ& x)
1047    { return _ntl_gblock_destroy(x.rep);  }
1048 
ZZ_storage(long d)1049 inline long ZZ_storage(long d)
1050    { return _ntl_gblock_storage(d); }
1051 
ZZ_RoundCorrection(const ZZ & a,long k,long residual)1052 inline long ZZ_RoundCorrection(const ZZ& a, long k, long residual)
1053    { return _ntl_ground_correction(a.rep, k, residual); }
1054 
1055 
1056 /***********************************************************
1057 
1058                   Psuedo-random Numbers
1059 
1060 ************************************************************/
1061 
1062 
1063 // ================ NEW PRG STUFF =================
1064 
1065 
1066 // Low-level key-derivation
1067 
1068 
1069 void DeriveKey(unsigned char *key, long klen,
1070                const unsigned char *data, long dlen);
1071 
1072 
1073 
1074 // Low-level chacha stuff
1075 
1076 #define NTL_PRG_KEYLEN (32)
1077 
1078 
1079 struct RandomStream_impl;
1080 
1081 RandomStream_impl *
1082 RandomStream_impl_build(const unsigned char *key);
1083 
1084 RandomStream_impl *
1085 RandomStream_impl_build(const RandomStream_impl&);
1086 
1087 void
1088 RandomStream_impl_copy(RandomStream_impl&, const RandomStream_impl&);
1089 
1090 const unsigned char *
1091 RandomStream_impl_get_buf(const RandomStream_impl&);
1092 
1093 long
1094 RandomStream_impl_get_buf_len(const RandomStream_impl&);
1095 
1096 long
1097 RandomStream_impl_get_bytes(RandomStream_impl& impl, unsigned char *res,
1098    long n, long pos);
1099 
1100 void RandomStream_impl_set_nonce(RandomStream_impl& impl, unsigned long nonce);
1101 
1102 void
1103 RandomStream_impl_delete(RandomStream_impl*);
1104 
1105 struct
1106 RandomStream_impl_deleter {
deleterRandomStream_impl_deleter1107    static void deleter(RandomStream_impl* p) { RandomStream_impl_delete(p); }
1108 };
1109 
1110 
1111 class RandomStream {
1112 private:
1113 
1114    long pos;
1115    const unsigned char *buf;
1116    long buf_len;
1117 
1118    UniquePtr<RandomStream_impl,RandomStream_impl_deleter> impl;
1119 
1120 public:
1121 
1122    explicit
RandomStream(const unsigned char * key)1123    RandomStream(const unsigned char *key) {
1124       impl.reset(RandomStream_impl_build(key));
1125       pos = buf_len = RandomStream_impl_get_buf_len(*impl);
1126       buf = RandomStream_impl_get_buf(*impl);
1127    }
1128 
RandomStream(const RandomStream & other)1129    RandomStream(const RandomStream& other) {
1130       impl.reset(RandomStream_impl_build(*other.impl));
1131       pos = other.pos;
1132       buf_len = other.buf_len;
1133       buf = RandomStream_impl_get_buf(*impl);
1134    }
1135 
1136    RandomStream& operator=(const RandomStream& other) {
1137       RandomStream_impl_copy(*impl, *other.impl);
1138       pos = other.pos;
1139       buf_len = other.buf_len;
1140       buf = RandomStream_impl_get_buf(*impl);
1141       return *this;
1142    }
1143 
get(unsigned char * res,long n)1144    void get(unsigned char *res, long n)
1145    {
1146       // optimize short reads
1147       if (n > 0 && n <= buf_len-pos) {
1148 #if 1
1149          std::memcpy(&res[0], &buf[pos], n);
1150          // NOTE: mempy undefined if res == null
1151          // That's a reason we don't do this for n==0, which
1152          // is anyway an unlikely case
1153 #else
1154          for (long i = 0; i < n; i++) {
1155             res[i] = buf[pos+i];
1156          }
1157 #endif
1158          pos += n;
1159       }
1160       else {
1161          pos = RandomStream_impl_get_bytes(*impl, res, n, pos);
1162       }
1163    }
1164 
1165    // FIXME: document this? Not sure if I want to
1166    // commit to this interface, as it is somewhat ChaCha-specific
set_nonce(unsigned long nonce)1167    void set_nonce(unsigned long nonce)
1168    {
1169       RandomStream_impl_set_nonce(*impl, nonce);
1170       pos = buf_len;
1171    }
1172 
1173 };
1174 
1175 // this is the number of bits we can pass through the set_nonce
1176 // interface
1177 #if (NTL_BITS_PER_LONG > 64)
1178 #define NTL_BITS_PER_NONCE (64)
1179 #else
1180 #define NTL_BITS_PER_NONCE NTL_BITS_PER_LONG
1181 #endif
1182 
1183 
1184 
1185 // ============ old stuff: for testing  ============
1186 
1187 class old_RandomStream {
1188 private:
1189    _ntl_uint32 state[16];
1190    unsigned char buf[64];
1191    long pos;
1192 
1193    void do_get(unsigned char *res, long n);
1194 
1195 public:
1196    explicit
1197    old_RandomStream(const unsigned char *key);
1198 
1199    // No default constructor
1200    // default copy and assignment
1201 
get(unsigned char * res,long n)1202    void get(unsigned char *res, long n)
1203    {
1204       // optimize short reads
1205       if (n >= 0 && n <= 64-pos) {
1206          long i;
1207          for (i = 0; i < n; i++) {
1208             res[i] = buf[pos+i];
1209          }
1210          pos += n;
1211       }
1212       else {
1213          do_get(res, n);
1214       }
1215    }
1216 
1217 };
1218 
1219 
1220 
1221 
1222 RandomStream& GetCurrentRandomStream();
1223 // get reference to the current random by stream --
1224 // if SetSeed has not been called, it is called with
1225 // a default value (which should be unique to each
1226 // process/thread
1227 
1228 
1229 void SetSeed(const ZZ& s);
1230 void SetSeed(const unsigned char *data, long dlen);
1231 void SetSeed(const RandomStream& s);
1232 // initialize random number generator
1233 // in the first two version, a PRG key is derived from
1234 // the data using DeriveKey.
1235 
1236 
1237 // RAII for saving/restoring current state of PRG
1238 
1239 class RandomStreamPush {
1240 private:
1241    RandomStream saved;
1242 
1243    RandomStreamPush(const RandomStreamPush&); // disable
1244    void operator=(const RandomStreamPush&); // disable
1245 
1246 public:
RandomStreamPush()1247    RandomStreamPush() : saved(GetCurrentRandomStream()) { }
~RandomStreamPush()1248    ~RandomStreamPush() { SetSeed(saved); }
1249 
1250 };
1251 
1252 
1253 
1254 void RandomBnd(ZZ& x, const ZZ& n);
1255 // x = "random number" in the range 0..n-1, or 0  if n <= 0
1256 
RandomBnd(const ZZ & n)1257 inline ZZ RandomBnd(const ZZ& n)
1258    { ZZ x; RandomBnd(x, n); NTL_OPT_RETURN(ZZ, x); }
1259 
1260 
1261 void RandomLen(ZZ& x, long NumBits);
1262 // x = "random number" with precisely NumBits bits.
1263 
1264 
RandomLen_ZZ(long NumBits)1265 inline ZZ RandomLen_ZZ(long NumBits)
1266    { ZZ x; RandomLen(x, NumBits); NTL_OPT_RETURN(ZZ, x); }
1267 
1268 
1269 void RandomBits(ZZ& x, long NumBits);
1270 // x = "random number", 0 <= x < 2^NumBits
1271 
RandomBits_ZZ(long NumBits)1272 inline ZZ RandomBits_ZZ(long NumBits)
1273    { ZZ x; RandomBits(x, NumBits); NTL_OPT_RETURN(ZZ, x); }
1274 
1275 
1276 // single-precision version of the above
1277 
1278 long RandomBnd(long n);
RandomBnd(long & x,long n)1279 inline void RandomBnd(long& x, long n) { x = RandomBnd(n); }
1280 
1281 long RandomLen_long(long l);
RandomLen(long & x,long l)1282 inline void RandomLen(long& x, long l) { x = RandomLen_long(l); }
1283 
1284 long RandomBits_long(long l);
RandomBits(long & x,long l)1285 inline void RandomBits(long& x, long l) { x = RandomBits_long(l); }
1286 
1287 
1288 // specialty routines
1289 
1290 unsigned long RandomWord();
1291 unsigned long RandomBits_ulong(long l);
1292 
1293 // helper class to make generating small random numbers faster
1294 // FIXME: add documentation?
1295 struct RandomBndGenerator {
1296 
1297    long p;
1298    long nb;
1299    unsigned long mask;
1300 
1301    RandomStream *str;
1302 
RandomBndGeneratorRandomBndGenerator1303    RandomBndGenerator() : p(0) { }
1304 
1305    explicit
RandomBndGeneratorRandomBndGenerator1306    RandomBndGenerator(long _p) : p(0) { build(_p); }
1307 
buildRandomBndGenerator1308    void build(long _p)
1309    {
1310       if (_p <= 1) LogicError("RandomBndGenerator::init: bad args");
1311 
1312       if (!p) {
1313          str = &GetCurrentRandomStream();
1314       }
1315 
1316       p = _p;
1317       long l = NumBits(p-1);
1318       nb = (l+7)/8;
1319       mask = (1UL << l)-1UL;
1320    }
1321 
nextRandomBndGenerator1322    long next()
1323    {
1324       unsigned char buf[NTL_BITS_PER_LONG/8];
1325       long tmp;
1326 
1327       do {
1328          str->get(buf, nb);
1329 
1330          unsigned long word = 0;
1331          for (long i = nb-1; i >= 0; i--) word = (word << 8) | buf[i];
1332 
1333          tmp = long(word & mask);
1334       } while (tmp >= p);
1335 
1336       return tmp;
1337    }
1338 };
1339 
1340 
VectorRandomBnd(long k,long * x,long n)1341 inline void VectorRandomBnd(long k, long* x, long n)
1342 {
1343    if (k <= 0) return;
1344    if (n <= 1) {
1345       for (long i = 0; i < k; i++) x[i] = 0;
1346    }
1347    else {
1348       RandomBndGenerator gen(n);
1349       for (long i = 0; i < k; i++) x[i] = gen.next();
1350    }
1351 }
1352 
1353 
1354 void VectorRandomWord(long k, unsigned long* x);
1355 
1356 
1357 /**********************************************************
1358 
1359              Incremental Chinese Remaindering
1360 
1361 ***********************************************************/
1362 
1363 long CRT(ZZ& a, ZZ& p, const ZZ& A, const ZZ& P);
1364 long CRT(ZZ& a, ZZ& p, long A, long P);
1365 // 0 <= A < P, (p, P) = 1;
1366 // computes b such that b = a mod p, b = A mod p,
1367 //   and -p*P/2 < b <= p*P/2;
1368 // sets a = b, p = p*P, and returns 1 if a's value
1369 //   has changed, otherwise 0
1370 
CRTInRange(const ZZ & gg,const ZZ & aa)1371 inline long CRTInRange(const ZZ& gg, const ZZ& aa)
1372    { return _ntl_gcrtinrange(gg.rep, aa.rep); }
1373 
1374 // an auxilliary routine used by newer CRT routines to maintain
1375 // backward compatability.
1376 
1377 // test if a > 0 and -a/2 < g <= a/2
1378 // this is "hand crafted" so as not too waste too much time
1379 // in the CRT routines.
1380 
1381 
1382 
1383 /**********************************************************
1384 
1385                   Rational Reconstruction
1386 
1387 ***********************************************************/
1388 
1389 inline
ReconstructRational(ZZ & a,ZZ & b,const ZZ & u,const ZZ & m,const ZZ & a_bound,const ZZ & b_bound)1390 long ReconstructRational(ZZ& a, ZZ& b, const ZZ& u, const ZZ& m,
1391                          const ZZ& a_bound, const ZZ& b_bound)
1392 {
1393    return _ntl_gxxratrecon(u.rep, m.rep, a_bound.rep, b_bound.rep, &a.rep, &b.rep);
1394 
1395 }
1396 
1397 
1398 
1399 
1400 /************************************************************
1401 
1402                       Primality Testing
1403 
1404 *************************************************************/
1405 
1406 
1407 void GenPrime(ZZ& n, long l, long err = 80);
1408 inline ZZ GenPrime_ZZ(long l, long err = 80)
1409 { ZZ x; GenPrime(x, l, err); NTL_OPT_RETURN(ZZ, x); }
1410 
1411 long GenPrime_long(long l, long err = 80);
1412 // This generates a random prime n of length l so that the
1413 // probability of erroneously returning a composite is bounded by 2^(-err).
1414 
1415 void GenGermainPrime(ZZ& n, long l, long err = 80);
1416 inline ZZ GenGermainPrime_ZZ(long l, long err = 80)
1417 { ZZ x; GenGermainPrime(x, l, err); NTL_OPT_RETURN(ZZ, x); }
1418 
1419 long GenGermainPrime_long(long l, long err = 80);
1420 // This generates a random prime n of length l so that the
1421 
1422 
1423 long ProbPrime(const ZZ& n, long NumTrials = 10);
1424 // tests if n is prime;  performs a little trial division,
1425 // followed by a single-precision MillerWitness test, followed by
1426 // up to NumTrials general MillerWitness tests.
1427 
1428 long MillerWitness(const ZZ& n, const ZZ& w);
1429 // Tests if w is a witness to primality a la Miller.
1430 // Assumption: n is odd and positive, 0 <= w < n.
1431 
1432 void RandomPrime(ZZ& n, long l, long NumTrials=10);
1433 // n =  random l-bit prime
1434 
1435 inline ZZ RandomPrime_ZZ(long l, long NumTrials=10)
1436    { ZZ x; RandomPrime(x, l, NumTrials); NTL_OPT_RETURN(ZZ, x); }
1437 
1438 void NextPrime(ZZ& n, const ZZ& m, long NumTrials=10);
1439 // n = smallest prime >= m.
1440 
1441 inline ZZ NextPrime(const ZZ& m, long NumTrials=10)
1442    { ZZ x; NextPrime(x, m, NumTrials); NTL_OPT_RETURN(ZZ, x); }
1443 
1444 // single-precision versions
1445 
1446 long ProbPrime(long n, long NumTrials = 10);
1447 
1448 
1449 long RandomPrime_long(long l, long NumTrials=10);
1450 
1451 long NextPrime(long l, long NumTrials=10);
1452 
1453 
1454 /************************************************************
1455 
1456                       Exponentiation
1457 
1458 *************************************************************/
1459 
power(ZZ & x,const ZZ & a,long e)1460 inline void power(ZZ& x, const ZZ& a, long e)
1461    { _ntl_gexp(a.rep, e, &x.rep); }
1462 
power(const ZZ & a,long e)1463 inline ZZ power(const ZZ& a, long e)
1464    { ZZ x; power(x, a, e); NTL_OPT_RETURN(ZZ, x); }
1465 
power(ZZ & x,long a,long e)1466 inline void power(ZZ& x, long a, long e)
1467    {  _ntl_gexps(a, e, &x.rep); }
1468 
power_ZZ(long a,long e)1469 inline ZZ power_ZZ(long a, long e)
1470    { ZZ x; power(x, a, e); NTL_OPT_RETURN(ZZ, x); }
1471 
1472 long power_long(long a, long e);
1473 
1474 void power2(ZZ& x, long e);
1475 
power2_ZZ(long e)1476 inline ZZ power2_ZZ(long e)
1477    { ZZ x; power2(x, e); NTL_OPT_RETURN(ZZ, x); }
1478 
1479 
1480 
1481 
1482 
1483 /*************************************************************
1484 
1485                        Square Roots
1486 
1487 **************************************************************/
1488 
1489 
1490 
1491 
SqrRoot(ZZ & x,const ZZ & a)1492 inline void SqrRoot(ZZ& x, const ZZ& a)
1493 // x = [a^{1/2}], a >= 0
1494 
1495 {
1496    _ntl_gsqrt(a.rep, &x.rep);
1497 }
1498 
SqrRoot(const ZZ & a)1499 inline ZZ SqrRoot(const ZZ& a)
1500    { ZZ x; SqrRoot(x, a); NTL_OPT_RETURN(ZZ, x); }
1501 
1502 
SqrRoot(long a)1503 inline long SqrRoot(long a) { return _ntl_gsqrts(a); }
1504 // single-precision version
1505 
1506 
1507 
1508 /***************************************************************
1509 
1510                       Modular Arithmetic
1511 
1512 ***************************************************************/
1513 
1514 // The following routines perform arithmetic mod n, n positive.
1515 // All args (other than exponents) are assumed to be in the range 0..n-1.
1516 
1517 
1518 
AddMod(ZZ & x,const ZZ & a,const ZZ & b,const ZZ & n)1519 inline void AddMod(ZZ& x, const ZZ& a, const ZZ& b, const ZZ& n)
1520 // x = (a+b)%n
1521    { _ntl_gaddmod(a.rep, b.rep, n.rep, &x.rep); }
1522 
1523 
AddMod(const ZZ & a,const ZZ & b,const ZZ & n)1524 inline ZZ AddMod(const ZZ& a, const ZZ& b, const ZZ& n)
1525    { ZZ x; AddMod(x, a, b, n); NTL_OPT_RETURN(ZZ, x); }
1526 
SubMod(ZZ & x,const ZZ & a,const ZZ & b,const ZZ & n)1527 inline void SubMod(ZZ& x, const ZZ& a, const ZZ& b, const ZZ& n)
1528 // x = (a-b)%n
1529 
1530    { _ntl_gsubmod(a.rep, b.rep, n.rep, &x.rep); }
1531 
SubMod(const ZZ & a,const ZZ & b,const ZZ & n)1532 inline ZZ SubMod(const ZZ& a, const ZZ& b, const ZZ& n)
1533    { ZZ x; SubMod(x, a, b, n); NTL_OPT_RETURN(ZZ, x); }
1534 
NegateMod(ZZ & x,const ZZ & a,const ZZ & n)1535 inline void NegateMod(ZZ& x, const ZZ& a, const ZZ& n)
1536 // x = -a % n
1537 
1538    { _ntl_gsubmod(0, a.rep, n.rep, &x.rep); }
1539 
NegateMod(const ZZ & a,const ZZ & n)1540 inline ZZ NegateMod(const ZZ& a, const ZZ& n)
1541    { ZZ x; NegateMod(x, a, n); NTL_OPT_RETURN(ZZ, x); }
1542 
1543 void AddMod(ZZ& x, const ZZ& a, long b, const ZZ& n);
AddMod(const ZZ & a,long b,const ZZ & n)1544 inline ZZ AddMod(const ZZ& a, long b, const ZZ& n)
1545    { ZZ x; AddMod(x, a, b, n); NTL_OPT_RETURN(ZZ, x); }
1546 
AddMod(ZZ & x,long a,const ZZ & b,const ZZ & n)1547 inline void AddMod(ZZ& x, long a, const ZZ& b, const ZZ& n)
1548    { AddMod(x, b, a, n); }
AddMod(long a,const ZZ & b,const ZZ & n)1549 inline ZZ AddMod(long a, const ZZ& b, const ZZ& n)
1550    { ZZ x; AddMod(x, a, b, n); NTL_OPT_RETURN(ZZ, x); }
1551 
1552 void SubMod(ZZ& x, const ZZ& a, long b, const ZZ& n);
SubMod(const ZZ & a,long b,const ZZ & n)1553 inline ZZ SubMod(const ZZ& a, long b, const ZZ& n)
1554    { ZZ x; SubMod(x, a, b, n); NTL_OPT_RETURN(ZZ, x); }
1555 
1556 void SubMod(ZZ& x, long a, const ZZ& b, const ZZ& n);
SubMod(long a,const ZZ & b,const ZZ & n)1557 inline ZZ SubMod(long a, const ZZ& b, const ZZ& n)
1558    { ZZ x; SubMod(x, a, b, n); NTL_OPT_RETURN(ZZ, x); }
1559 
MulMod(ZZ & x,const ZZ & a,const ZZ & b,const ZZ & n)1560 inline void MulMod(ZZ& x, const ZZ& a, const ZZ& b, const ZZ& n)
1561 // x = (a*b)%n
1562 
1563    { _ntl_gmulmod(a.rep, b.rep, n.rep, &x.rep); }
1564 
MulMod(const ZZ & a,const ZZ & b,const ZZ & n)1565 inline ZZ MulMod(const ZZ& a, const ZZ& b, const ZZ& n)
1566    { ZZ x; MulMod(x, a, b, n); NTL_OPT_RETURN(ZZ, x); }
1567 
MulMod(ZZ & x,const ZZ & a,long b,const ZZ & n)1568 inline void MulMod(ZZ& x, const ZZ& a, long b, const ZZ& n)
1569 // x = (a*b)%n
1570 
1571    { _ntl_gsmulmod(a.rep, b, n.rep, &x.rep); }
1572 
MulMod(const ZZ & a,long b,const ZZ & n)1573 inline ZZ MulMod(const ZZ& a, long b, const ZZ& n)
1574    { ZZ x; MulMod(x, a, b, n); NTL_OPT_RETURN(ZZ, x); }
1575 
MulMod(ZZ & x,long a,const ZZ & b,const ZZ & n)1576 inline void MulMod(ZZ& x, long a, const ZZ& b, const ZZ& n)
1577    { MulMod(x, b, a, n); }
1578 
MulMod(long a,const ZZ & b,const ZZ & n)1579 inline ZZ MulMod(long a, const ZZ& b, const ZZ& n)
1580    { ZZ x; MulMod(x, a, b, n); NTL_OPT_RETURN(ZZ, x); }
1581 
1582 
SqrMod(ZZ & x,const ZZ & a,const ZZ & n)1583 inline void SqrMod(ZZ& x, const ZZ& a, const ZZ& n)
1584 // x = a^2 % n
1585 
1586    { _ntl_gsqmod(a.rep, n.rep, &x.rep); }
1587 
SqrMod(const ZZ & a,const ZZ & n)1588 inline ZZ SqrMod(const ZZ& a, const ZZ& n)
1589    {  ZZ x; SqrMod(x, a, n); NTL_OPT_RETURN(ZZ, x); }
1590 
1591 void InvMod(ZZ& x, const ZZ& a, const ZZ& n);
1592 // defined in ZZ.c in terms of InvModStatus
1593 
InvMod(const ZZ & a,const ZZ & n)1594 inline ZZ InvMod(const ZZ& a, const ZZ& n)
1595    {  ZZ x; InvMod(x, a, n); NTL_OPT_RETURN(ZZ, x); }
1596 
1597 
InvModStatus(ZZ & x,const ZZ & a,const ZZ & n)1598 inline long InvModStatus(ZZ& x, const ZZ& a, const ZZ& n)
1599 // if gcd(a,n) = 1, then ReturnValue = 0, x = a^{-1} mod n
1600 // otherwise, ReturnValue = 1, x = gcd(a, n)
1601 
1602   { return _ntl_ginv(a.rep, n.rep, &x.rep); }
1603 
1604 
1605 void PowerMod(ZZ& x, const ZZ& a, const ZZ& e, const ZZ& n);
1606 // defined in ZZ.c in terms of LowLevelPowerMod
1607 
LowLevelPowerMod(ZZ & x,const ZZ & a,const ZZ & e,const ZZ & n)1608 inline void LowLevelPowerMod(ZZ& x, const ZZ& a, const ZZ& e, const ZZ& n)
1609    { _ntl_gpowermod(a.rep, e.rep, n.rep, &x.rep); }
1610 
PowerMod(const ZZ & a,const ZZ & e,const ZZ & n)1611 inline ZZ PowerMod(const ZZ& a, const ZZ& e, const ZZ& n)
1612    { ZZ x; PowerMod(x, a, e, n); NTL_OPT_RETURN(ZZ, x); }
1613 
PowerMod(ZZ & x,const ZZ & a,long e,const ZZ & n)1614 inline void PowerMod(ZZ& x, const ZZ& a, long e, const ZZ& n)
1615    { PowerMod(x, a, ZZ_expo(e), n); }
1616 
PowerMod(const ZZ & a,long e,const ZZ & n)1617 inline ZZ PowerMod(const ZZ& a, long e, const ZZ& n)
1618    { ZZ x; PowerMod(x, a, e, n); NTL_OPT_RETURN(ZZ, x); }
1619 
1620 
1621 
1622 
1623 
1624 
1625 /*************************************************************
1626 
1627    Jacobi symbol and modular squre roots
1628 
1629 **************************************************************/
1630 
1631 
1632 long Jacobi(const ZZ& a, const ZZ& n);
1633 //  compute Jacobi symbol of a and n;
1634 //  assumes 0 <= a < n, n odd
1635 
1636 void SqrRootMod(ZZ& x, const ZZ& a, const ZZ& n);
1637 //  computes square root of a mod n;
1638 //  assumes n is an odd prime, and that a is a square mod n
1639 
SqrRootMod(const ZZ & a,const ZZ & n)1640 inline ZZ SqrRootMod(const ZZ& a, const ZZ& n)
1641    { ZZ x; SqrRootMod(x, a, n); NTL_OPT_RETURN(ZZ, x); }
1642 
1643 
1644 
1645 
1646 /*************************************************************
1647 
1648 
1649                     Small Prime Generation
1650 
1651 
1652 *************************************************************/
1653 
1654 
1655 // primes are generated in sequence, starting at 2,
1656 // and up until (2*NTL_PRIME_BND+1)^2, which is less than NTL_SP_BOUND.
1657 
1658 #if (NTL_SP_NBITS > 30)
1659 #define NTL_PRIME_BND ((1L << 14) - 1)
1660 #else
1661 #define NTL_PRIME_BND ((1L << (NTL_SP_NBITS/2-1)) - 1)
1662 #endif
1663 
1664 
1665 class PrimeSeq {
1666 
1667 const char *movesieve;
1668 Vec<char> movesieve_mem;
1669 long pindex;
1670 long pshift;
1671 long exhausted;
1672 
1673 public:
1674 
1675 PrimeSeq();
1676 
1677 long next();
1678 // returns next prime in the sequence.
1679 // returns 0 if list of small primes is exhausted.
1680 
1681 void reset(long b);
1682 // resets generator so that the next prime in the sequence
1683 // is the smallest prime >= b.
1684 
1685 private:
1686 
1687 PrimeSeq(const PrimeSeq&);        // disabled
1688 void operator=(const PrimeSeq&);  // disabled
1689 
1690 // auxilliary routines
1691 
1692 void start();
1693 void shift(long);
1694 
1695 };
1696 
1697 
1698 
1699 
1700 /**************************************************************
1701 
1702                       Input/Output
1703 
1704 ***************************************************************/
1705 
1706 NTL_SNS istream& operator>>(NTL_SNS istream& s, ZZ& x);
1707 NTL_SNS ostream& operator<<(NTL_SNS ostream& s, const ZZ& a);
1708 
1709 
1710 
1711 
1712 // Some additional SP arithmetic routines, not defined in sp_arith.h
1713 
1714 
1715 long InvMod(long a, long n);
1716 // computes a^{-1} mod n.  Error is raised if undefined.
1717 
1718 long InvModStatus(long& x, long a, long n);
1719 // if gcd(a,n) = 1, then ReturnValue = 0, x = a^{-1} mod n
1720 // otherwise, ReturnValue = 1, x = gcd(a, n)
1721 
1722 long PowerMod(long a, long e, long n);
1723 // computes a^e mod n, e >= 0
1724 
1725 
1726 // Error handling
1727 
1728 #ifdef NTL_EXCEPTIONS
1729 
1730 class InvModErrorObject : public ArithmeticErrorObject {
1731 private:
1732    SmartPtr<ZZ> a_ptr;
1733    SmartPtr<ZZ> n_ptr;
1734 public:
InvModErrorObject(const char * s,const ZZ & a,const ZZ & n)1735    InvModErrorObject(const char *s, const ZZ& a, const ZZ& n)
1736       : ArithmeticErrorObject(s) , a_ptr(MakeSmart<ZZ>(a)),
1737         n_ptr(MakeSmart<ZZ>(n)) { }
1738 
get_a()1739    const ZZ& get_a() const { return *a_ptr; }
get_n()1740    const ZZ& get_n() const { return *n_ptr; }
1741 };
1742 
1743 #else
1744 
1745 // We need this alt definition to keep pre-C++11
1746 // compilers happy (NTL_EXCEPTIONS should only be used
1747 // with C++11 compilers).
1748 
1749 class InvModErrorObject : public ArithmeticErrorObject {
1750 public:
InvModErrorObject(const char * s,const ZZ & a,const ZZ & n)1751    InvModErrorObject(const char *s, const ZZ& a, const ZZ& n)
1752       : ArithmeticErrorObject(s) { }
1753 
get_a()1754    const ZZ& get_a() const { return ZZ::zero(); }
get_n()1755    const ZZ& get_n() const { return ZZ::zero(); }
1756 };
1757 
1758 #endif
1759 
1760 
1761 void InvModError(const char *s, const ZZ& a, const ZZ& n);
1762 
1763 #ifdef NTL_PROVIDES_SS_LIP_IMPL
1764 
1765 inline void
LeftRotate_lip_impl(ZZ & a,const ZZ & b,long e,const ZZ & p,long n,ZZ & scratch)1766 LeftRotate_lip_impl(ZZ& a, const ZZ& b, long e, const ZZ& p, long n, ZZ& scratch)
1767 // Compute a = b * 2^e mod p, where p = 2^n+1. 0<=e<n and 0<b<p
1768 // a may not alias p
1769 // scratch may not alias a, b, or p
1770 {
1771    _ntl_leftrotate(&a.rep, &b.rep, e, p.rep, n, &scratch.rep);
1772 }
1773 
1774 inline void
SS_AddMod_lip_impl(ZZ & x,const ZZ & a,const ZZ & b,const ZZ & p,long n)1775 SS_AddMod_lip_impl(ZZ& x, const ZZ& a, const ZZ& b, const ZZ& p, long n)
1776 // x = a + b mod p, where p = 2^n+1,  a, b in [0, p).
1777 // x may not alias p.
1778 {
1779    _ntl_ss_addmod(&x.rep, &a.rep, &b.rep, p.rep, n);
1780 }
1781 
1782 inline void
SS_SubMod_lip_impl(ZZ & x,const ZZ & a,const ZZ & b,const ZZ & p,long n)1783 SS_SubMod_lip_impl(ZZ& x, const ZZ& a, const ZZ& b, const ZZ& p, long n)
1784 // x = a + b mod p, where p = 2^n+1,  a, b in [0, p).
1785 // x may not alias b or p.
1786 {
1787    _ntl_ss_submod(&x.rep, &a.rep, &b.rep, p.rep, n);
1788 }
1789 
1790 #endif
1791 
1792 
1793 
1794 
1795 
1796 NTL_CLOSE_NNS
1797 
1798 
1799 #endif
1800 
1801