1 /* Operations with very long integers.
2    Copyright (C) 2012-2021 Free Software Foundation, Inc.
3    Contributed by Kenneth Zadeck <zadeck@naturalbridge.com>
4 
5 This file is part of GCC.
6 
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by the
9 Free Software Foundation; either version 3, or (at your option) any
10 later version.
11 
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
15 for more details.
16 
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3.  If not see
19 <http://www.gnu.org/licenses/>.  */
20 
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "tree.h"
26 #include "selftest.h"
27 
28 
29 #define HOST_BITS_PER_HALF_WIDE_INT 32
30 #if HOST_BITS_PER_HALF_WIDE_INT == HOST_BITS_PER_LONG
31 # define HOST_HALF_WIDE_INT long
32 #elif HOST_BITS_PER_HALF_WIDE_INT == HOST_BITS_PER_INT
33 # define HOST_HALF_WIDE_INT int
34 #else
35 #error Please add support for HOST_HALF_WIDE_INT
36 #endif
37 
38 #define W_TYPE_SIZE HOST_BITS_PER_WIDE_INT
39 /* Do not include longlong.h when compiler is clang-based. See PR61146.  */
40 #if GCC_VERSION >= 3000 && (W_TYPE_SIZE == 32 || defined (__SIZEOF_INT128__)) && !defined(__clang__)
41 typedef unsigned HOST_HALF_WIDE_INT UHWtype;
42 typedef unsigned HOST_WIDE_INT UWtype;
43 typedef unsigned int UQItype __attribute__ ((mode (QI)));
44 typedef unsigned int USItype __attribute__ ((mode (SI)));
45 typedef unsigned int UDItype __attribute__ ((mode (DI)));
46 #if W_TYPE_SIZE == 32
47 typedef unsigned int UDWtype __attribute__ ((mode (DI)));
48 #else
49 typedef unsigned int UDWtype __attribute__ ((mode (TI)));
50 #endif
51 #include "longlong.h"
52 #endif
53 
54 static const HOST_WIDE_INT zeros[WIDE_INT_MAX_ELTS] = {};
55 
56 /*
57  * Internal utilities.
58  */
59 
60 /* Quantities to deal with values that hold half of a wide int.  Used
61    in multiply and divide.  */
62 #define HALF_INT_MASK ((HOST_WIDE_INT_1 << HOST_BITS_PER_HALF_WIDE_INT) - 1)
63 
64 #define BLOCK_OF(TARGET) ((TARGET) / HOST_BITS_PER_WIDE_INT)
65 #define BLOCKS_NEEDED(PREC) \
66   (PREC ? (((PREC) + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT) : 1)
67 #define SIGN_MASK(X) ((HOST_WIDE_INT) (X) < 0 ? -1 : 0)
68 
69 /* Return the value a VAL[I] if I < LEN, otherwise, return 0 or -1
70    based on the top existing bit of VAL. */
71 
72 static unsigned HOST_WIDE_INT
safe_uhwi(const HOST_WIDE_INT * val,unsigned int len,unsigned int i)73 safe_uhwi (const HOST_WIDE_INT *val, unsigned int len, unsigned int i)
74 {
75   return i < len ? val[i] : val[len - 1] < 0 ? HOST_WIDE_INT_M1 : 0;
76 }
77 
78 /* Convert the integer in VAL to canonical form, returning its new length.
79    LEN is the number of blocks currently in VAL and PRECISION is the number
80    of bits in the integer it represents.
81 
82    This function only changes the representation, not the value.  */
83 static unsigned int
canonize(HOST_WIDE_INT * val,unsigned int len,unsigned int precision)84 canonize (HOST_WIDE_INT *val, unsigned int len, unsigned int precision)
85 {
86   unsigned int blocks_needed = BLOCKS_NEEDED (precision);
87   HOST_WIDE_INT top;
88   int i;
89 
90   if (len > blocks_needed)
91     len = blocks_needed;
92 
93   if (len == 1)
94     return len;
95 
96   top = val[len - 1];
97   if (len * HOST_BITS_PER_WIDE_INT > precision)
98     val[len - 1] = top = sext_hwi (top, precision % HOST_BITS_PER_WIDE_INT);
99   if (top != 0 && top != (HOST_WIDE_INT)-1)
100     return len;
101 
102   /* At this point we know that the top is either 0 or -1.  Find the
103      first block that is not a copy of this.  */
104   for (i = len - 2; i >= 0; i--)
105     {
106       HOST_WIDE_INT x = val[i];
107       if (x != top)
108 	{
109 	  if (SIGN_MASK (x) == top)
110 	    return i + 1;
111 
112 	  /* We need an extra block because the top bit block i does
113 	     not match the extension.  */
114 	  return i + 2;
115 	}
116     }
117 
118   /* The number is 0 or -1.  */
119   return 1;
120 }
121 
122 /* VAL[0] is the unsigned result of an operation.  Canonize it by adding
123    another 0 block if needed, and return number of blocks needed.  */
124 
125 static inline unsigned int
canonize_uhwi(HOST_WIDE_INT * val,unsigned int precision)126 canonize_uhwi (HOST_WIDE_INT *val, unsigned int precision)
127 {
128   if (val[0] < 0 && precision > HOST_BITS_PER_WIDE_INT)
129     {
130       val[1] = 0;
131       return 2;
132     }
133   return 1;
134 }
135 
136 /*
137  * Conversion routines in and out of wide_int.
138  */
139 
140 /* Copy XLEN elements from XVAL to VAL.  If NEED_CANON, canonize the
141    result for an integer with precision PRECISION.  Return the length
142    of VAL (after any canonization.  */
143 unsigned int
from_array(HOST_WIDE_INT * val,const HOST_WIDE_INT * xval,unsigned int xlen,unsigned int precision,bool need_canon)144 wi::from_array (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
145 		unsigned int xlen, unsigned int precision, bool need_canon)
146 {
147   for (unsigned i = 0; i < xlen; i++)
148     val[i] = xval[i];
149   return need_canon ? canonize (val, xlen, precision) : xlen;
150 }
151 
152 /* Construct a wide int from a buffer of length LEN.  BUFFER will be
153    read according to byte endianness and word endianness of the target.
154    Only the lower BUFFER_LEN bytes of the result are set; the remaining
155    high bytes are cleared.  */
156 wide_int
from_buffer(const unsigned char * buffer,unsigned int buffer_len)157 wi::from_buffer (const unsigned char *buffer, unsigned int buffer_len)
158 {
159   unsigned int precision = buffer_len * BITS_PER_UNIT;
160   wide_int result = wide_int::create (precision);
161   unsigned int words = buffer_len / UNITS_PER_WORD;
162 
163   /* We have to clear all the bits ourself, as we merely or in values
164      below.  */
165   unsigned int len = BLOCKS_NEEDED (precision);
166   HOST_WIDE_INT *val = result.write_val ();
167   for (unsigned int i = 0; i < len; ++i)
168     val[i] = 0;
169 
170   for (unsigned int byte = 0; byte < buffer_len; byte++)
171     {
172       unsigned int offset;
173       unsigned int index;
174       unsigned int bitpos = byte * BITS_PER_UNIT;
175       unsigned HOST_WIDE_INT value;
176 
177       if (buffer_len > UNITS_PER_WORD)
178 	{
179 	  unsigned int word = byte / UNITS_PER_WORD;
180 
181 	  if (WORDS_BIG_ENDIAN)
182 	    word = (words - 1) - word;
183 
184 	  offset = word * UNITS_PER_WORD;
185 
186 	  if (BYTES_BIG_ENDIAN)
187 	    offset += (UNITS_PER_WORD - 1) - (byte % UNITS_PER_WORD);
188 	  else
189 	    offset += byte % UNITS_PER_WORD;
190 	}
191       else
192 	offset = BYTES_BIG_ENDIAN ? (buffer_len - 1) - byte : byte;
193 
194       value = (unsigned HOST_WIDE_INT) buffer[offset];
195 
196       index = bitpos / HOST_BITS_PER_WIDE_INT;
197       val[index] |= value << (bitpos % HOST_BITS_PER_WIDE_INT);
198     }
199 
200   result.set_len (canonize (val, len, precision));
201 
202   return result;
203 }
204 
205 /* Sets RESULT from X, the sign is taken according to SGN.  */
206 void
to_mpz(const wide_int_ref & x,mpz_t result,signop sgn)207 wi::to_mpz (const wide_int_ref &x, mpz_t result, signop sgn)
208 {
209   int len = x.get_len ();
210   const HOST_WIDE_INT *v = x.get_val ();
211   int excess = len * HOST_BITS_PER_WIDE_INT - x.get_precision ();
212 
213   if (wi::neg_p (x, sgn))
214     {
215       /* We use ones complement to avoid -x80..0 edge case that -
216 	 won't work on.  */
217       HOST_WIDE_INT *t = XALLOCAVEC (HOST_WIDE_INT, len);
218       for (int i = 0; i < len; i++)
219 	t[i] = ~v[i];
220       if (excess > 0)
221 	t[len - 1] = (unsigned HOST_WIDE_INT) t[len - 1] << excess >> excess;
222       mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, t);
223       mpz_com (result, result);
224     }
225   else if (excess > 0)
226     {
227       HOST_WIDE_INT *t = XALLOCAVEC (HOST_WIDE_INT, len);
228       for (int i = 0; i < len - 1; i++)
229 	t[i] = v[i];
230       t[len - 1] = (unsigned HOST_WIDE_INT) v[len - 1] << excess >> excess;
231       mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, t);
232     }
233   else if (excess < 0 && wi::neg_p (x))
234     {
235       int extra
236 	= (-excess + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT;
237       HOST_WIDE_INT *t = XALLOCAVEC (HOST_WIDE_INT, len + extra);
238       for (int i = 0; i < len; i++)
239 	t[i] = v[i];
240       for (int i = 0; i < extra; i++)
241 	t[len + i] = -1;
242       excess = (-excess) % HOST_BITS_PER_WIDE_INT;
243       if (excess)
244 	t[len + extra - 1] = (HOST_WIDE_INT_1U << excess) - 1;
245       mpz_import (result, len + extra, -1, sizeof (HOST_WIDE_INT), 0, 0, t);
246     }
247   else
248     mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, v);
249 }
250 
251 /* Returns X converted to TYPE.  If WRAP is true, then out-of-range
252    values of VAL will be wrapped; otherwise, they will be set to the
253    appropriate minimum or maximum TYPE bound.  */
254 wide_int
from_mpz(const_tree type,mpz_t x,bool wrap)255 wi::from_mpz (const_tree type, mpz_t x, bool wrap)
256 {
257   size_t count, numb;
258   unsigned int prec = TYPE_PRECISION (type);
259   wide_int res = wide_int::create (prec);
260 
261   if (!wrap)
262     {
263       mpz_t min, max;
264 
265       mpz_init (min);
266       mpz_init (max);
267       get_type_static_bounds (type, min, max);
268 
269       if (mpz_cmp (x, min) < 0)
270 	mpz_set (x, min);
271       else if (mpz_cmp (x, max) > 0)
272 	mpz_set (x, max);
273 
274       mpz_clear (min);
275       mpz_clear (max);
276     }
277 
278   /* Determine the number of unsigned HOST_WIDE_INTs that are required
279      for representing the absolute value.  The code to calculate count is
280      extracted from the GMP manual, section "Integer Import and Export":
281      http://gmplib.org/manual/Integer-Import-and-Export.html  */
282   numb = CHAR_BIT * sizeof (HOST_WIDE_INT);
283   count = (mpz_sizeinbase (x, 2) + numb - 1) / numb;
284   HOST_WIDE_INT *val = res.write_val ();
285   /* Read the absolute value.
286 
287      Write directly to the wide_int storage if possible, otherwise leave
288      GMP to allocate the memory for us.  It might be slightly more efficient
289      to use mpz_tdiv_r_2exp for the latter case, but the situation is
290      pathological and it seems safer to operate on the original mpz value
291      in all cases.  */
292   void *valres = mpz_export (count <= WIDE_INT_MAX_ELTS ? val : 0,
293 			     &count, -1, sizeof (HOST_WIDE_INT), 0, 0, x);
294   if (count < 1)
295     {
296       val[0] = 0;
297       count = 1;
298     }
299   count = MIN (count, BLOCKS_NEEDED (prec));
300   if (valres != val)
301     {
302       memcpy (val, valres, count * sizeof (HOST_WIDE_INT));
303       free (valres);
304     }
305   /* Zero-extend the absolute value to PREC bits.  */
306   if (count < BLOCKS_NEEDED (prec) && val[count - 1] < 0)
307     val[count++] = 0;
308   else
309     count = canonize (val, count, prec);
310   res.set_len (count);
311 
312   if (mpz_sgn (x) < 0)
313     res = -res;
314 
315   return res;
316 }
317 
318 /*
319  * Largest and smallest values in a mode.
320  */
321 
322 /* Return the largest SGNed number that is representable in PRECISION bits.
323 
324    TODO: There is still code from the double_int era that trys to
325    make up for the fact that double int's could not represent the
326    min and max values of all types.  This code should be removed
327    because the min and max values can always be represented in
328    wide_ints and int-csts.  */
329 wide_int
max_value(unsigned int precision,signop sgn)330 wi::max_value (unsigned int precision, signop sgn)
331 {
332   gcc_checking_assert (precision != 0);
333   if (sgn == UNSIGNED)
334     /* The unsigned max is just all ones.  */
335     return shwi (-1, precision);
336   else
337     /* The signed max is all ones except the top bit.  This must be
338        explicitly represented.  */
339     return mask (precision - 1, false, precision);
340 }
341 
342 /* Return the largest SGNed number that is representable in PRECISION bits.  */
343 wide_int
min_value(unsigned int precision,signop sgn)344 wi::min_value (unsigned int precision, signop sgn)
345 {
346   gcc_checking_assert (precision != 0);
347   if (sgn == UNSIGNED)
348     return uhwi (0, precision);
349   else
350     /* The signed min is all zeros except the top bit.  This must be
351        explicitly represented.  */
352     return wi::set_bit_in_zero (precision - 1, precision);
353 }
354 
355 /*
356  * Public utilities.
357  */
358 
359 /* Convert the number represented by XVAL, XLEN and XPRECISION, which has
360    signedness SGN, to an integer that has PRECISION bits.  Store the blocks
361    in VAL and return the number of blocks used.
362 
363    This function can handle both extension (PRECISION > XPRECISION)
364    and truncation (PRECISION < XPRECISION).  */
365 unsigned int
force_to_size(HOST_WIDE_INT * val,const HOST_WIDE_INT * xval,unsigned int xlen,unsigned int xprecision,unsigned int precision,signop sgn)366 wi::force_to_size (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
367 		   unsigned int xlen, unsigned int xprecision,
368 		   unsigned int precision, signop sgn)
369 {
370   unsigned int blocks_needed = BLOCKS_NEEDED (precision);
371   unsigned int len = blocks_needed < xlen ? blocks_needed : xlen;
372   for (unsigned i = 0; i < len; i++)
373     val[i] = xval[i];
374 
375   if (precision > xprecision)
376     {
377       unsigned int small_xprecision = xprecision % HOST_BITS_PER_WIDE_INT;
378 
379       /* Expanding.  */
380       if (sgn == UNSIGNED)
381 	{
382 	  if (small_xprecision && len == BLOCKS_NEEDED (xprecision))
383 	    val[len - 1] = zext_hwi (val[len - 1], small_xprecision);
384 	  else if (val[len - 1] < 0)
385 	    {
386 	      while (len < BLOCKS_NEEDED (xprecision))
387 		val[len++] = -1;
388 	      if (small_xprecision)
389 		val[len - 1] = zext_hwi (val[len - 1], small_xprecision);
390 	      else
391 		val[len++] = 0;
392 	    }
393 	}
394       else
395 	{
396 	  if (small_xprecision && len == BLOCKS_NEEDED (xprecision))
397 	    val[len - 1] = sext_hwi (val[len - 1], small_xprecision);
398 	}
399     }
400   len = canonize (val, len, precision);
401 
402   return len;
403 }
404 
405 /* This function hides the fact that we cannot rely on the bits beyond
406    the precision.  This issue comes up in the relational comparisions
407    where we do allow comparisons of values of different precisions.  */
408 static inline HOST_WIDE_INT
selt(const HOST_WIDE_INT * a,unsigned int len,unsigned int blocks_needed,unsigned int small_prec,unsigned int index,signop sgn)409 selt (const HOST_WIDE_INT *a, unsigned int len,
410       unsigned int blocks_needed, unsigned int small_prec,
411       unsigned int index, signop sgn)
412 {
413   HOST_WIDE_INT val;
414   if (index < len)
415     val = a[index];
416   else if (index < blocks_needed || sgn == SIGNED)
417     /* Signed or within the precision.  */
418     val = SIGN_MASK (a[len - 1]);
419   else
420     /* Unsigned extension beyond the precision. */
421     val = 0;
422 
423   if (small_prec && index == blocks_needed - 1)
424     return (sgn == SIGNED
425 	    ? sext_hwi (val, small_prec)
426 	    : zext_hwi (val, small_prec));
427   else
428     return val;
429 }
430 
431 /* Find the highest bit represented in a wide int.  This will in
432    general have the same value as the sign bit.  */
433 static inline HOST_WIDE_INT
top_bit_of(const HOST_WIDE_INT * a,unsigned int len,unsigned int prec)434 top_bit_of (const HOST_WIDE_INT *a, unsigned int len, unsigned int prec)
435 {
436   int excess = len * HOST_BITS_PER_WIDE_INT - prec;
437   unsigned HOST_WIDE_INT val = a[len - 1];
438   if (excess > 0)
439     val <<= excess;
440   return val >> (HOST_BITS_PER_WIDE_INT - 1);
441 }
442 
443 /*
444  * Comparisons, note that only equality is an operator.  The other
445  * comparisons cannot be operators since they are inherently signed or
446  * unsigned and C++ has no such operators.
447  */
448 
449 /* Return true if OP0 == OP1.  */
450 bool
eq_p_large(const HOST_WIDE_INT * op0,unsigned int op0len,const HOST_WIDE_INT * op1,unsigned int op1len,unsigned int prec)451 wi::eq_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
452 		const HOST_WIDE_INT *op1, unsigned int op1len,
453 		unsigned int prec)
454 {
455   int l0 = op0len - 1;
456   unsigned int small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1);
457 
458   if (op0len != op1len)
459     return false;
460 
461   if (op0len == BLOCKS_NEEDED (prec) && small_prec)
462     {
463       /* It does not matter if we zext or sext here, we just have to
464 	 do both the same way.  */
465       if (zext_hwi (op0 [l0], small_prec) != zext_hwi (op1 [l0], small_prec))
466 	return false;
467       l0--;
468     }
469 
470   while (l0 >= 0)
471     if (op0[l0] != op1[l0])
472       return false;
473     else
474       l0--;
475 
476   return true;
477 }
478 
479 /* Return true if OP0 < OP1 using signed comparisons.  */
480 bool
lts_p_large(const HOST_WIDE_INT * op0,unsigned int op0len,unsigned int precision,const HOST_WIDE_INT * op1,unsigned int op1len)481 wi::lts_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
482 		 unsigned int precision,
483 		 const HOST_WIDE_INT *op1, unsigned int op1len)
484 {
485   HOST_WIDE_INT s0, s1;
486   unsigned HOST_WIDE_INT u0, u1;
487   unsigned int blocks_needed = BLOCKS_NEEDED (precision);
488   unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
489   int l = MAX (op0len - 1, op1len - 1);
490 
491   /* Only the top block is compared as signed.  The rest are unsigned
492      comparisons.  */
493   s0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
494   s1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
495   if (s0 < s1)
496     return true;
497   if (s0 > s1)
498     return false;
499 
500   l--;
501   while (l >= 0)
502     {
503       u0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
504       u1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
505 
506       if (u0 < u1)
507 	return true;
508       if (u0 > u1)
509 	return false;
510       l--;
511     }
512 
513   return false;
514 }
515 
516 /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
517    signed compares.  */
518 int
cmps_large(const HOST_WIDE_INT * op0,unsigned int op0len,unsigned int precision,const HOST_WIDE_INT * op1,unsigned int op1len)519 wi::cmps_large (const HOST_WIDE_INT *op0, unsigned int op0len,
520 		unsigned int precision,
521 		const HOST_WIDE_INT *op1, unsigned int op1len)
522 {
523   HOST_WIDE_INT s0, s1;
524   unsigned HOST_WIDE_INT u0, u1;
525   unsigned int blocks_needed = BLOCKS_NEEDED (precision);
526   unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
527   int l = MAX (op0len - 1, op1len - 1);
528 
529   /* Only the top block is compared as signed.  The rest are unsigned
530      comparisons.  */
531   s0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
532   s1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
533   if (s0 < s1)
534     return -1;
535   if (s0 > s1)
536     return 1;
537 
538   l--;
539   while (l >= 0)
540     {
541       u0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
542       u1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
543 
544       if (u0 < u1)
545 	return -1;
546       if (u0 > u1)
547 	return 1;
548       l--;
549     }
550 
551   return 0;
552 }
553 
554 /* Return true if OP0 < OP1 using unsigned comparisons.  */
555 bool
ltu_p_large(const HOST_WIDE_INT * op0,unsigned int op0len,unsigned int precision,const HOST_WIDE_INT * op1,unsigned int op1len)556 wi::ltu_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
557 		 unsigned int precision,
558 		 const HOST_WIDE_INT *op1, unsigned int op1len)
559 {
560   unsigned HOST_WIDE_INT x0;
561   unsigned HOST_WIDE_INT x1;
562   unsigned int blocks_needed = BLOCKS_NEEDED (precision);
563   unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
564   int l = MAX (op0len - 1, op1len - 1);
565 
566   while (l >= 0)
567     {
568       x0 = selt (op0, op0len, blocks_needed, small_prec, l, UNSIGNED);
569       x1 = selt (op1, op1len, blocks_needed, small_prec, l, UNSIGNED);
570       if (x0 < x1)
571 	return true;
572       if (x0 > x1)
573 	return false;
574       l--;
575     }
576 
577   return false;
578 }
579 
580 /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
581    unsigned compares.  */
582 int
cmpu_large(const HOST_WIDE_INT * op0,unsigned int op0len,unsigned int precision,const HOST_WIDE_INT * op1,unsigned int op1len)583 wi::cmpu_large (const HOST_WIDE_INT *op0, unsigned int op0len,
584 		unsigned int precision,
585 		const HOST_WIDE_INT *op1, unsigned int op1len)
586 {
587   unsigned HOST_WIDE_INT x0;
588   unsigned HOST_WIDE_INT x1;
589   unsigned int blocks_needed = BLOCKS_NEEDED (precision);
590   unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
591   int l = MAX (op0len - 1, op1len - 1);
592 
593   while (l >= 0)
594     {
595       x0 = selt (op0, op0len, blocks_needed, small_prec, l, UNSIGNED);
596       x1 = selt (op1, op1len, blocks_needed, small_prec, l, UNSIGNED);
597       if (x0 < x1)
598 	return -1;
599       if (x0 > x1)
600 	return 1;
601       l--;
602     }
603 
604   return 0;
605 }
606 
607 /*
608  * Extension.
609  */
610 
611 /* Sign-extend the number represented by XVAL and XLEN into VAL,
612    starting at OFFSET.  Return the number of blocks in VAL.  Both XVAL
613    and VAL have PRECISION bits.  */
614 unsigned int
sext_large(HOST_WIDE_INT * val,const HOST_WIDE_INT * xval,unsigned int xlen,unsigned int precision,unsigned int offset)615 wi::sext_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
616 		unsigned int xlen, unsigned int precision, unsigned int offset)
617 {
618   unsigned int len = offset / HOST_BITS_PER_WIDE_INT;
619   /* Extending beyond the precision is a no-op.  If we have only stored
620      OFFSET bits or fewer, the rest are already signs.  */
621   if (offset >= precision || len >= xlen)
622     {
623       for (unsigned i = 0; i < xlen; ++i)
624 	val[i] = xval[i];
625       return xlen;
626     }
627   unsigned int suboffset = offset % HOST_BITS_PER_WIDE_INT;
628   for (unsigned int i = 0; i < len; i++)
629     val[i] = xval[i];
630   if (suboffset > 0)
631     {
632       val[len] = sext_hwi (xval[len], suboffset);
633       len += 1;
634     }
635   return canonize (val, len, precision);
636 }
637 
638 /* Zero-extend the number represented by XVAL and XLEN into VAL,
639    starting at OFFSET.  Return the number of blocks in VAL.  Both XVAL
640    and VAL have PRECISION bits.  */
641 unsigned int
zext_large(HOST_WIDE_INT * val,const HOST_WIDE_INT * xval,unsigned int xlen,unsigned int precision,unsigned int offset)642 wi::zext_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
643 		unsigned int xlen, unsigned int precision, unsigned int offset)
644 {
645   unsigned int len = offset / HOST_BITS_PER_WIDE_INT;
646   /* Extending beyond the precision is a no-op.  If we have only stored
647      OFFSET bits or fewer, and the upper stored bit is zero, then there
648      is nothing to do.  */
649   if (offset >= precision || (len >= xlen && xval[xlen - 1] >= 0))
650     {
651       for (unsigned i = 0; i < xlen; ++i)
652 	val[i] = xval[i];
653       return xlen;
654     }
655   unsigned int suboffset = offset % HOST_BITS_PER_WIDE_INT;
656   for (unsigned int i = 0; i < len; i++)
657     val[i] = i < xlen ? xval[i] : -1;
658   if (suboffset > 0)
659     val[len] = zext_hwi (len < xlen ? xval[len] : -1, suboffset);
660   else
661     val[len] = 0;
662   return canonize (val, len + 1, precision);
663 }
664 
665 /*
666  * Masking, inserting, shifting, rotating.
667  */
668 
669 /* Insert WIDTH bits from Y into X starting at START.  */
670 wide_int
insert(const wide_int & x,const wide_int & y,unsigned int start,unsigned int width)671 wi::insert (const wide_int &x, const wide_int &y, unsigned int start,
672 	    unsigned int width)
673 {
674   wide_int result;
675   wide_int mask;
676   wide_int tmp;
677 
678   unsigned int precision = x.get_precision ();
679   if (start >= precision)
680     return x;
681 
682   gcc_checking_assert (precision >= width);
683 
684   if (start + width >= precision)
685     width = precision - start;
686 
687   mask = wi::shifted_mask (start, width, false, precision);
688   tmp = wi::lshift (wide_int::from (y, precision, UNSIGNED), start);
689   result = tmp & mask;
690 
691   tmp = wi::bit_and_not (x, mask);
692   result = result | tmp;
693 
694   return result;
695 }
696 
697 /* Copy the number represented by XVAL and XLEN into VAL, setting bit BIT.
698    Return the number of blocks in VAL.  Both XVAL and VAL have PRECISION
699    bits.  */
700 unsigned int
set_bit_large(HOST_WIDE_INT * val,const HOST_WIDE_INT * xval,unsigned int xlen,unsigned int precision,unsigned int bit)701 wi::set_bit_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
702 		   unsigned int xlen, unsigned int precision, unsigned int bit)
703 {
704   unsigned int block = bit / HOST_BITS_PER_WIDE_INT;
705   unsigned int subbit = bit % HOST_BITS_PER_WIDE_INT;
706 
707   if (block + 1 >= xlen)
708     {
709       /* The operation either affects the last current block or needs
710 	 a new block.  */
711       unsigned int len = block + 1;
712       for (unsigned int i = 0; i < len; i++)
713 	val[i] = safe_uhwi (xval, xlen, i);
714       val[block] |= HOST_WIDE_INT_1U << subbit;
715 
716       /* If the bit we just set is at the msb of the block, make sure
717 	 that any higher bits are zeros.  */
718       if (bit + 1 < precision && subbit == HOST_BITS_PER_WIDE_INT - 1)
719 	{
720 	  val[len++] = 0;
721 	  return len;
722 	}
723       return canonize (val, len, precision);
724     }
725   else
726     {
727       for (unsigned int i = 0; i < xlen; i++)
728 	val[i] = xval[i];
729       val[block] |= HOST_WIDE_INT_1U << subbit;
730       return canonize (val, xlen, precision);
731     }
732 }
733 
734 /* bswap THIS.  */
735 wide_int
bswap() const736 wide_int_storage::bswap () const
737 {
738   wide_int result = wide_int::create (precision);
739   unsigned int i, s;
740   unsigned int len = BLOCKS_NEEDED (precision);
741   unsigned int xlen = get_len ();
742   const HOST_WIDE_INT *xval = get_val ();
743   HOST_WIDE_INT *val = result.write_val ();
744 
745   /* This is not a well defined operation if the precision is not a
746      multiple of 8.  */
747   gcc_assert ((precision & 0x7) == 0);
748 
749   for (i = 0; i < len; i++)
750     val[i] = 0;
751 
752   /* Only swap the bytes that are not the padding.  */
753   for (s = 0; s < precision; s += 8)
754     {
755       unsigned int d = precision - s - 8;
756       unsigned HOST_WIDE_INT byte;
757 
758       unsigned int block = s / HOST_BITS_PER_WIDE_INT;
759       unsigned int offset = s & (HOST_BITS_PER_WIDE_INT - 1);
760 
761       byte = (safe_uhwi (xval, xlen, block) >> offset) & 0xff;
762 
763       block = d / HOST_BITS_PER_WIDE_INT;
764       offset = d & (HOST_BITS_PER_WIDE_INT - 1);
765 
766       val[block] |= byte << offset;
767     }
768 
769   result.set_len (canonize (val, len, precision));
770   return result;
771 }
772 
773 /* Fill VAL with a mask where the lower WIDTH bits are ones and the bits
774    above that up to PREC are zeros.  The result is inverted if NEGATE
775    is true.  Return the number of blocks in VAL.  */
776 unsigned int
mask(HOST_WIDE_INT * val,unsigned int width,bool negate,unsigned int prec)777 wi::mask (HOST_WIDE_INT *val, unsigned int width, bool negate,
778 	  unsigned int prec)
779 {
780   if (width >= prec)
781     {
782       val[0] = negate ? 0 : -1;
783       return 1;
784     }
785   else if (width == 0)
786     {
787       val[0] = negate ? -1 : 0;
788       return 1;
789     }
790 
791   unsigned int i = 0;
792   while (i < width / HOST_BITS_PER_WIDE_INT)
793     val[i++] = negate ? 0 : -1;
794 
795   unsigned int shift = width & (HOST_BITS_PER_WIDE_INT - 1);
796   if (shift != 0)
797     {
798       HOST_WIDE_INT last = (HOST_WIDE_INT_1U << shift) - 1;
799       val[i++] = negate ? ~last : last;
800     }
801   else
802     val[i++] = negate ? -1 : 0;
803 
804   return i;
805 }
806 
807 /* Fill VAL with a mask where the lower START bits are zeros, the next WIDTH
808    bits are ones, and the bits above that up to PREC are zeros.  The result
809    is inverted if NEGATE is true.  Return the number of blocks in VAL.  */
810 unsigned int
shifted_mask(HOST_WIDE_INT * val,unsigned int start,unsigned int width,bool negate,unsigned int prec)811 wi::shifted_mask (HOST_WIDE_INT *val, unsigned int start, unsigned int width,
812 		  bool negate, unsigned int prec)
813 {
814   if (start >= prec || width == 0)
815     {
816       val[0] = negate ? -1 : 0;
817       return 1;
818     }
819 
820   if (width > prec - start)
821     width = prec - start;
822   unsigned int end = start + width;
823 
824   unsigned int i = 0;
825   while (i < start / HOST_BITS_PER_WIDE_INT)
826     val[i++] = negate ? -1 : 0;
827 
828   unsigned int shift = start & (HOST_BITS_PER_WIDE_INT - 1);
829   if (shift)
830     {
831       HOST_WIDE_INT block = (HOST_WIDE_INT_1U << shift) - 1;
832       shift += width;
833       if (shift < HOST_BITS_PER_WIDE_INT)
834 	{
835 	  /* case 000111000 */
836 	  block = (HOST_WIDE_INT_1U << shift) - block - 1;
837 	  val[i++] = negate ? ~block : block;
838 	  return i;
839 	}
840       else
841 	/* ...111000 */
842 	val[i++] = negate ? block : ~block;
843     }
844 
845   while (i < end / HOST_BITS_PER_WIDE_INT)
846     /* 1111111 */
847     val[i++] = negate ? 0 : -1;
848 
849   shift = end & (HOST_BITS_PER_WIDE_INT - 1);
850   if (shift != 0)
851     {
852       /* 000011111 */
853       HOST_WIDE_INT block = (HOST_WIDE_INT_1U << shift) - 1;
854       val[i++] = negate ? ~block : block;
855     }
856   else if (end < prec)
857     val[i++] = negate ? -1 : 0;
858 
859   return i;
860 }
861 
862 /*
863  * logical operations.
864  */
865 
866 /* Set VAL to OP0 & OP1.  Return the number of blocks used.  */
867 unsigned int
and_large(HOST_WIDE_INT * val,const HOST_WIDE_INT * op0,unsigned int op0len,const HOST_WIDE_INT * op1,unsigned int op1len,unsigned int prec)868 wi::and_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
869 	       unsigned int op0len, const HOST_WIDE_INT *op1,
870 	       unsigned int op1len, unsigned int prec)
871 {
872   int l0 = op0len - 1;
873   int l1 = op1len - 1;
874   bool need_canon = true;
875 
876   unsigned int len = MAX (op0len, op1len);
877   if (l0 > l1)
878     {
879       HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
880       if (op1mask == 0)
881 	{
882 	  l0 = l1;
883 	  len = l1 + 1;
884 	}
885       else
886 	{
887 	  need_canon = false;
888 	  while (l0 > l1)
889 	    {
890 	      val[l0] = op0[l0];
891 	      l0--;
892 	    }
893 	}
894     }
895   else if (l1 > l0)
896     {
897       HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
898       if (op0mask == 0)
899 	len = l0 + 1;
900       else
901 	{
902 	  need_canon = false;
903 	  while (l1 > l0)
904 	    {
905 	      val[l1] = op1[l1];
906 	      l1--;
907 	    }
908 	}
909     }
910 
911   while (l0 >= 0)
912     {
913       val[l0] = op0[l0] & op1[l0];
914       l0--;
915     }
916 
917   if (need_canon)
918     len = canonize (val, len, prec);
919 
920   return len;
921 }
922 
923 /* Set VAL to OP0 & ~OP1.  Return the number of blocks used.  */
924 unsigned int
and_not_large(HOST_WIDE_INT * val,const HOST_WIDE_INT * op0,unsigned int op0len,const HOST_WIDE_INT * op1,unsigned int op1len,unsigned int prec)925 wi::and_not_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
926 		   unsigned int op0len, const HOST_WIDE_INT *op1,
927 		   unsigned int op1len, unsigned int prec)
928 {
929   wide_int result;
930   int l0 = op0len - 1;
931   int l1 = op1len - 1;
932   bool need_canon = true;
933 
934   unsigned int len = MAX (op0len, op1len);
935   if (l0 > l1)
936     {
937       HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
938       if (op1mask != 0)
939 	{
940 	  l0 = l1;
941 	  len = l1 + 1;
942 	}
943       else
944 	{
945 	  need_canon = false;
946 	  while (l0 > l1)
947 	    {
948 	      val[l0] = op0[l0];
949 	      l0--;
950 	    }
951 	}
952     }
953   else if (l1 > l0)
954     {
955       HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
956       if (op0mask == 0)
957 	len = l0 + 1;
958       else
959 	{
960 	  need_canon = false;
961 	  while (l1 > l0)
962 	    {
963 	      val[l1] = ~op1[l1];
964 	      l1--;
965 	    }
966 	}
967     }
968 
969   while (l0 >= 0)
970     {
971       val[l0] = op0[l0] & ~op1[l0];
972       l0--;
973     }
974 
975   if (need_canon)
976     len = canonize (val, len, prec);
977 
978   return len;
979 }
980 
981 /* Set VAL to OP0 | OP1.  Return the number of blocks used.  */
982 unsigned int
or_large(HOST_WIDE_INT * val,const HOST_WIDE_INT * op0,unsigned int op0len,const HOST_WIDE_INT * op1,unsigned int op1len,unsigned int prec)983 wi::or_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
984 	      unsigned int op0len, const HOST_WIDE_INT *op1,
985 	      unsigned int op1len, unsigned int prec)
986 {
987   wide_int result;
988   int l0 = op0len - 1;
989   int l1 = op1len - 1;
990   bool need_canon = true;
991 
992   unsigned int len = MAX (op0len, op1len);
993   if (l0 > l1)
994     {
995       HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
996       if (op1mask != 0)
997 	{
998 	  l0 = l1;
999 	  len = l1 + 1;
1000 	}
1001       else
1002 	{
1003 	  need_canon = false;
1004 	  while (l0 > l1)
1005 	    {
1006 	      val[l0] = op0[l0];
1007 	      l0--;
1008 	    }
1009 	}
1010     }
1011   else if (l1 > l0)
1012     {
1013       HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
1014       if (op0mask != 0)
1015 	len = l0 + 1;
1016       else
1017 	{
1018 	  need_canon = false;
1019 	  while (l1 > l0)
1020 	    {
1021 	      val[l1] = op1[l1];
1022 	      l1--;
1023 	    }
1024 	}
1025     }
1026 
1027   while (l0 >= 0)
1028     {
1029       val[l0] = op0[l0] | op1[l0];
1030       l0--;
1031     }
1032 
1033   if (need_canon)
1034     len = canonize (val, len, prec);
1035 
1036   return len;
1037 }
1038 
1039 /* Set VAL to OP0 | ~OP1.  Return the number of blocks used.  */
1040 unsigned int
or_not_large(HOST_WIDE_INT * val,const HOST_WIDE_INT * op0,unsigned int op0len,const HOST_WIDE_INT * op1,unsigned int op1len,unsigned int prec)1041 wi::or_not_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1042 		  unsigned int op0len, const HOST_WIDE_INT *op1,
1043 		  unsigned int op1len, unsigned int prec)
1044 {
1045   wide_int result;
1046   int l0 = op0len - 1;
1047   int l1 = op1len - 1;
1048   bool need_canon = true;
1049 
1050   unsigned int len = MAX (op0len, op1len);
1051   if (l0 > l1)
1052     {
1053       HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
1054       if (op1mask == 0)
1055 	{
1056 	  l0 = l1;
1057 	  len = l1 + 1;
1058 	}
1059       else
1060 	{
1061 	  need_canon = false;
1062 	  while (l0 > l1)
1063 	    {
1064 	      val[l0] = op0[l0];
1065 	      l0--;
1066 	    }
1067 	}
1068     }
1069   else if (l1 > l0)
1070     {
1071       HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
1072       if (op0mask != 0)
1073 	len = l0 + 1;
1074       else
1075 	{
1076 	  need_canon = false;
1077 	  while (l1 > l0)
1078 	    {
1079 	      val[l1] = ~op1[l1];
1080 	      l1--;
1081 	    }
1082 	}
1083     }
1084 
1085   while (l0 >= 0)
1086     {
1087       val[l0] = op0[l0] | ~op1[l0];
1088       l0--;
1089     }
1090 
1091   if (need_canon)
1092     len = canonize (val, len, prec);
1093 
1094   return len;
1095 }
1096 
1097 /* Set VAL to OP0 ^ OP1.  Return the number of blocks used.  */
1098 unsigned int
xor_large(HOST_WIDE_INT * val,const HOST_WIDE_INT * op0,unsigned int op0len,const HOST_WIDE_INT * op1,unsigned int op1len,unsigned int prec)1099 wi::xor_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1100 	       unsigned int op0len, const HOST_WIDE_INT *op1,
1101 	       unsigned int op1len, unsigned int prec)
1102 {
1103   wide_int result;
1104   int l0 = op0len - 1;
1105   int l1 = op1len - 1;
1106 
1107   unsigned int len = MAX (op0len, op1len);
1108   if (l0 > l1)
1109     {
1110       HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
1111       while (l0 > l1)
1112 	{
1113 	  val[l0] = op0[l0] ^ op1mask;
1114 	  l0--;
1115 	}
1116     }
1117 
1118   if (l1 > l0)
1119     {
1120       HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
1121       while (l1 > l0)
1122 	{
1123 	  val[l1] = op0mask ^ op1[l1];
1124 	  l1--;
1125 	}
1126     }
1127 
1128   while (l0 >= 0)
1129     {
1130       val[l0] = op0[l0] ^ op1[l0];
1131       l0--;
1132     }
1133 
1134   return canonize (val, len, prec);
1135 }
1136 
1137 /*
1138  * math
1139  */
1140 
1141 /* Set VAL to OP0 + OP1.  If OVERFLOW is nonnull, record in *OVERFLOW
1142    whether the result overflows when OP0 and OP1 are treated as having
1143    signedness SGN.  Return the number of blocks in VAL.  */
1144 unsigned int
add_large(HOST_WIDE_INT * val,const HOST_WIDE_INT * op0,unsigned int op0len,const HOST_WIDE_INT * op1,unsigned int op1len,unsigned int prec,signop sgn,wi::overflow_type * overflow)1145 wi::add_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1146 	       unsigned int op0len, const HOST_WIDE_INT *op1,
1147 	       unsigned int op1len, unsigned int prec,
1148 	       signop sgn, wi::overflow_type *overflow)
1149 {
1150   unsigned HOST_WIDE_INT o0 = 0;
1151   unsigned HOST_WIDE_INT o1 = 0;
1152   unsigned HOST_WIDE_INT x = 0;
1153   unsigned HOST_WIDE_INT carry = 0;
1154   unsigned HOST_WIDE_INT old_carry = 0;
1155   unsigned HOST_WIDE_INT mask0, mask1;
1156   unsigned int i;
1157 
1158   unsigned int len = MAX (op0len, op1len);
1159   mask0 = -top_bit_of (op0, op0len, prec);
1160   mask1 = -top_bit_of (op1, op1len, prec);
1161   /* Add all of the explicitly defined elements.  */
1162 
1163   for (i = 0; i < len; i++)
1164     {
1165       o0 = i < op0len ? (unsigned HOST_WIDE_INT) op0[i] : mask0;
1166       o1 = i < op1len ? (unsigned HOST_WIDE_INT) op1[i] : mask1;
1167       x = o0 + o1 + carry;
1168       val[i] = x;
1169       old_carry = carry;
1170       carry = carry == 0 ? x < o0 : x <= o0;
1171     }
1172 
1173   if (len * HOST_BITS_PER_WIDE_INT < prec)
1174     {
1175       val[len] = mask0 + mask1 + carry;
1176       len++;
1177       if (overflow)
1178 	*overflow
1179 	  = (sgn == UNSIGNED && carry) ? wi::OVF_OVERFLOW : wi::OVF_NONE;
1180     }
1181   else if (overflow)
1182     {
1183       unsigned int shift = -prec % HOST_BITS_PER_WIDE_INT;
1184       if (sgn == SIGNED)
1185 	{
1186 	  unsigned HOST_WIDE_INT x = (val[len - 1] ^ o0) & (val[len - 1] ^ o1);
1187 	  if ((HOST_WIDE_INT) (x << shift) < 0)
1188 	    {
1189 	      if (o0 > (unsigned HOST_WIDE_INT) val[len - 1])
1190 		*overflow = wi::OVF_UNDERFLOW;
1191 	      else if (o0 < (unsigned HOST_WIDE_INT) val[len - 1])
1192 		*overflow = wi::OVF_OVERFLOW;
1193 	      else
1194 		*overflow = wi::OVF_NONE;
1195 	    }
1196 	  else
1197 	    *overflow = wi::OVF_NONE;
1198 	}
1199       else
1200 	{
1201 	  /* Put the MSB of X and O0 and in the top of the HWI.  */
1202 	  x <<= shift;
1203 	  o0 <<= shift;
1204 	  if (old_carry)
1205 	    *overflow = (x <= o0) ? wi::OVF_OVERFLOW : wi::OVF_NONE;
1206 	  else
1207 	    *overflow = (x < o0) ? wi::OVF_OVERFLOW : wi::OVF_NONE;
1208 	}
1209     }
1210 
1211   return canonize (val, len, prec);
1212 }
1213 
1214 /* Subroutines of the multiplication and division operations.  Unpack
1215    the first IN_LEN HOST_WIDE_INTs in INPUT into 2 * IN_LEN
1216    HOST_HALF_WIDE_INTs of RESULT.  The rest of RESULT is filled by
1217    uncompressing the top bit of INPUT[IN_LEN - 1].  */
1218 static void
wi_unpack(unsigned HOST_HALF_WIDE_INT * result,const HOST_WIDE_INT * input,unsigned int in_len,unsigned int out_len,unsigned int prec,signop sgn)1219 wi_unpack (unsigned HOST_HALF_WIDE_INT *result, const HOST_WIDE_INT *input,
1220 	   unsigned int in_len, unsigned int out_len,
1221 	   unsigned int prec, signop sgn)
1222 {
1223   unsigned int i;
1224   unsigned int j = 0;
1225   unsigned int small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1);
1226   unsigned int blocks_needed = BLOCKS_NEEDED (prec);
1227   HOST_WIDE_INT mask;
1228 
1229   if (sgn == SIGNED)
1230     {
1231       mask = -top_bit_of ((const HOST_WIDE_INT *) input, in_len, prec);
1232       mask &= HALF_INT_MASK;
1233     }
1234   else
1235     mask = 0;
1236 
1237   for (i = 0; i < blocks_needed - 1; i++)
1238     {
1239       HOST_WIDE_INT x = safe_uhwi (input, in_len, i);
1240       result[j++] = x;
1241       result[j++] = x >> HOST_BITS_PER_HALF_WIDE_INT;
1242     }
1243 
1244   HOST_WIDE_INT x = safe_uhwi (input, in_len, i);
1245   if (small_prec)
1246     {
1247       if (sgn == SIGNED)
1248 	x = sext_hwi (x, small_prec);
1249       else
1250 	x = zext_hwi (x, small_prec);
1251     }
1252   result[j++] = x;
1253   result[j++] = x >> HOST_BITS_PER_HALF_WIDE_INT;
1254 
1255   /* Smear the sign bit.  */
1256   while (j < out_len)
1257     result[j++] = mask;
1258 }
1259 
1260 /* The inverse of wi_unpack.  IN_LEN is the number of input
1261    blocks and PRECISION is the precision of the result.  Return the
1262    number of blocks in the canonicalized result.  */
1263 static unsigned int
wi_pack(HOST_WIDE_INT * result,const unsigned HOST_HALF_WIDE_INT * input,unsigned int in_len,unsigned int precision)1264 wi_pack (HOST_WIDE_INT *result,
1265 	 const unsigned HOST_HALF_WIDE_INT *input,
1266 	 unsigned int in_len, unsigned int precision)
1267 {
1268   unsigned int i = 0;
1269   unsigned int j = 0;
1270   unsigned int blocks_needed = BLOCKS_NEEDED (precision);
1271 
1272   while (i + 1 < in_len)
1273     {
1274       result[j++] = ((unsigned HOST_WIDE_INT) input[i]
1275 		     | ((unsigned HOST_WIDE_INT) input[i + 1]
1276 			<< HOST_BITS_PER_HALF_WIDE_INT));
1277       i += 2;
1278     }
1279 
1280   /* Handle the case where in_len is odd.   For this we zero extend.  */
1281   if (in_len & 1)
1282     result[j++] = (unsigned HOST_WIDE_INT) input[i];
1283   else if (j < blocks_needed)
1284     result[j++] = 0;
1285   return canonize (result, j, precision);
1286 }
1287 
1288 /* Multiply Op1 by Op2.  If HIGH is set, only the upper half of the
1289    result is returned.
1290 
1291    If HIGH is not set, throw away the upper half after the check is
1292    made to see if it overflows.  Unfortunately there is no better way
1293    to check for overflow than to do this.  If OVERFLOW is nonnull,
1294    record in *OVERFLOW whether the result overflowed.  SGN controls
1295    the signedness and is used to check overflow or if HIGH is set.
1296 
1297    NOTE: Overflow type for signed overflow is not yet implemented.  */
1298 unsigned int
mul_internal(HOST_WIDE_INT * val,const HOST_WIDE_INT * op1val,unsigned int op1len,const HOST_WIDE_INT * op2val,unsigned int op2len,unsigned int prec,signop sgn,wi::overflow_type * overflow,bool high)1299 wi::mul_internal (HOST_WIDE_INT *val, const HOST_WIDE_INT *op1val,
1300 		  unsigned int op1len, const HOST_WIDE_INT *op2val,
1301 		  unsigned int op2len, unsigned int prec, signop sgn,
1302 		  wi::overflow_type *overflow, bool high)
1303 {
1304   unsigned HOST_WIDE_INT o0, o1, k, t;
1305   unsigned int i;
1306   unsigned int j;
1307   unsigned int blocks_needed = BLOCKS_NEEDED (prec);
1308   unsigned int half_blocks_needed = blocks_needed * 2;
1309   /* The sizes here are scaled to support a 2x largest mode by 2x
1310      largest mode yielding a 4x largest mode result.  This is what is
1311      needed by vpn.  */
1312 
1313   unsigned HOST_HALF_WIDE_INT
1314     u[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1315   unsigned HOST_HALF_WIDE_INT
1316     v[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1317   /* The '2' in 'R' is because we are internally doing a full
1318      multiply.  */
1319   unsigned HOST_HALF_WIDE_INT
1320     r[2 * 4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1321   HOST_WIDE_INT mask = ((HOST_WIDE_INT)1 << HOST_BITS_PER_HALF_WIDE_INT) - 1;
1322 
1323   /* If the top level routine did not really pass in an overflow, then
1324      just make sure that we never attempt to set it.  */
1325   bool needs_overflow = (overflow != 0);
1326   if (needs_overflow)
1327     *overflow = wi::OVF_NONE;
1328 
1329   wide_int_ref op1 = wi::storage_ref (op1val, op1len, prec);
1330   wide_int_ref op2 = wi::storage_ref (op2val, op2len, prec);
1331 
1332   /* This is a surprisingly common case, so do it first.  */
1333   if (op1 == 0 || op2 == 0)
1334     {
1335       val[0] = 0;
1336       return 1;
1337     }
1338 
1339 #ifdef umul_ppmm
1340   if (sgn == UNSIGNED)
1341     {
1342       /* If the inputs are single HWIs and the output has room for at
1343 	 least two HWIs, we can use umul_ppmm directly.  */
1344       if (prec >= HOST_BITS_PER_WIDE_INT * 2
1345 	  && wi::fits_uhwi_p (op1)
1346 	  && wi::fits_uhwi_p (op2))
1347 	{
1348 	  /* This case never overflows.  */
1349 	  if (high)
1350 	    {
1351 	      val[0] = 0;
1352 	      return 1;
1353 	    }
1354 	  umul_ppmm (val[1], val[0], op1.ulow (), op2.ulow ());
1355 	  if (val[1] < 0 && prec > HOST_BITS_PER_WIDE_INT * 2)
1356 	    {
1357 	      val[2] = 0;
1358 	      return 3;
1359 	    }
1360 	  return 1 + (val[1] != 0 || val[0] < 0);
1361 	}
1362       /* Likewise if the output is a full single HWI, except that the
1363 	 upper HWI of the result is only used for determining overflow.
1364 	 (We handle this case inline when overflow isn't needed.)  */
1365       else if (prec == HOST_BITS_PER_WIDE_INT)
1366 	{
1367 	  unsigned HOST_WIDE_INT upper;
1368 	  umul_ppmm (upper, val[0], op1.ulow (), op2.ulow ());
1369 	  if (needs_overflow)
1370 	    /* Unsigned overflow can only be +OVERFLOW.  */
1371 	    *overflow = (upper != 0) ? wi::OVF_OVERFLOW : wi::OVF_NONE;
1372 	  if (high)
1373 	    val[0] = upper;
1374 	  return 1;
1375 	}
1376     }
1377 #endif
1378 
1379   /* Handle multiplications by 1.  */
1380   if (op1 == 1)
1381     {
1382       if (high)
1383 	{
1384 	  val[0] = wi::neg_p (op2, sgn) ? -1 : 0;
1385 	  return 1;
1386 	}
1387       for (i = 0; i < op2len; i++)
1388 	val[i] = op2val[i];
1389       return op2len;
1390     }
1391   if (op2 == 1)
1392     {
1393       if (high)
1394 	{
1395 	  val[0] = wi::neg_p (op1, sgn) ? -1 : 0;
1396 	  return 1;
1397 	}
1398       for (i = 0; i < op1len; i++)
1399 	val[i] = op1val[i];
1400       return op1len;
1401     }
1402 
1403   /* If we need to check for overflow, we can only do half wide
1404      multiplies quickly because we need to look at the top bits to
1405      check for the overflow.  */
1406   if ((high || needs_overflow)
1407       && (prec <= HOST_BITS_PER_HALF_WIDE_INT))
1408     {
1409       unsigned HOST_WIDE_INT r;
1410 
1411       if (sgn == SIGNED)
1412 	{
1413 	  o0 = op1.to_shwi ();
1414 	  o1 = op2.to_shwi ();
1415 	}
1416       else
1417 	{
1418 	  o0 = op1.to_uhwi ();
1419 	  o1 = op2.to_uhwi ();
1420 	}
1421 
1422       r = o0 * o1;
1423       if (needs_overflow)
1424 	{
1425 	  if (sgn == SIGNED)
1426 	    {
1427 	      if ((HOST_WIDE_INT) r != sext_hwi (r, prec))
1428 		/* FIXME: Signed overflow type is not implemented yet.  */
1429 		*overflow = OVF_UNKNOWN;
1430 	    }
1431 	  else
1432 	    {
1433 	      if ((r >> prec) != 0)
1434 		/* Unsigned overflow can only be +OVERFLOW.  */
1435 		*overflow = OVF_OVERFLOW;
1436 	    }
1437 	}
1438       val[0] = high ? r >> prec : r;
1439       return 1;
1440     }
1441 
1442   /* We do unsigned mul and then correct it.  */
1443   wi_unpack (u, op1val, op1len, half_blocks_needed, prec, SIGNED);
1444   wi_unpack (v, op2val, op2len, half_blocks_needed, prec, SIGNED);
1445 
1446   /* The 2 is for a full mult.  */
1447   memset (r, 0, half_blocks_needed * 2
1448 	  * HOST_BITS_PER_HALF_WIDE_INT / CHAR_BIT);
1449 
1450   for (j = 0; j < half_blocks_needed; j++)
1451     {
1452       k = 0;
1453       for (i = 0; i < half_blocks_needed; i++)
1454 	{
1455 	  t = ((unsigned HOST_WIDE_INT)u[i] * (unsigned HOST_WIDE_INT)v[j]
1456 	       + r[i + j] + k);
1457 	  r[i + j] = t & HALF_INT_MASK;
1458 	  k = t >> HOST_BITS_PER_HALF_WIDE_INT;
1459 	}
1460       r[j + half_blocks_needed] = k;
1461     }
1462 
1463   /* We did unsigned math above.  For signed we must adjust the
1464      product (assuming we need to see that).  */
1465   if (sgn == SIGNED && (high || needs_overflow))
1466     {
1467       unsigned HOST_WIDE_INT b;
1468       if (wi::neg_p (op1))
1469 	{
1470 	  b = 0;
1471 	  for (i = 0; i < half_blocks_needed; i++)
1472 	    {
1473 	      t = (unsigned HOST_WIDE_INT)r[i + half_blocks_needed]
1474 		- (unsigned HOST_WIDE_INT)v[i] - b;
1475 	      r[i + half_blocks_needed] = t & HALF_INT_MASK;
1476 	      b = t >> (HOST_BITS_PER_WIDE_INT - 1);
1477 	    }
1478 	}
1479       if (wi::neg_p (op2))
1480 	{
1481 	  b = 0;
1482 	  for (i = 0; i < half_blocks_needed; i++)
1483 	    {
1484 	      t = (unsigned HOST_WIDE_INT)r[i + half_blocks_needed]
1485 		- (unsigned HOST_WIDE_INT)u[i] - b;
1486 	      r[i + half_blocks_needed] = t & HALF_INT_MASK;
1487 	      b = t >> (HOST_BITS_PER_WIDE_INT - 1);
1488 	    }
1489 	}
1490     }
1491 
1492   if (needs_overflow)
1493     {
1494       HOST_WIDE_INT top;
1495 
1496       /* For unsigned, overflow is true if any of the top bits are set.
1497 	 For signed, overflow is true if any of the top bits are not equal
1498 	 to the sign bit.  */
1499       if (sgn == UNSIGNED)
1500 	top = 0;
1501       else
1502 	{
1503 	  top = r[(half_blocks_needed) - 1];
1504 	  top = SIGN_MASK (top << (HOST_BITS_PER_WIDE_INT / 2));
1505 	  top &= mask;
1506 	}
1507 
1508       for (i = half_blocks_needed; i < half_blocks_needed * 2; i++)
1509 	if (((HOST_WIDE_INT)(r[i] & mask)) != top)
1510 	  /* FIXME: Signed overflow type is not implemented yet.  */
1511 	  *overflow = (sgn == UNSIGNED) ? wi::OVF_OVERFLOW : wi::OVF_UNKNOWN;
1512     }
1513 
1514   int r_offset = high ? half_blocks_needed : 0;
1515   return wi_pack (val, &r[r_offset], half_blocks_needed, prec);
1516 }
1517 
1518 /* Compute the population count of X.  */
1519 int
popcount(const wide_int_ref & x)1520 wi::popcount (const wide_int_ref &x)
1521 {
1522   unsigned int i;
1523   int count;
1524 
1525   /* The high order block is special if it is the last block and the
1526      precision is not an even multiple of HOST_BITS_PER_WIDE_INT.  We
1527      have to clear out any ones above the precision before doing
1528      popcount on this block.  */
1529   count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
1530   unsigned int stop = x.len;
1531   if (count < 0)
1532     {
1533       count = popcount_hwi (x.uhigh () << -count);
1534       stop -= 1;
1535     }
1536   else
1537     {
1538       if (x.sign_mask () >= 0)
1539 	count = 0;
1540     }
1541 
1542   for (i = 0; i < stop; ++i)
1543     count += popcount_hwi (x.val[i]);
1544 
1545   return count;
1546 }
1547 
1548 /* Set VAL to OP0 - OP1.  If OVERFLOW is nonnull, record in *OVERFLOW
1549    whether the result overflows when OP0 and OP1 are treated as having
1550    signedness SGN.  Return the number of blocks in VAL.  */
1551 unsigned int
sub_large(HOST_WIDE_INT * val,const HOST_WIDE_INT * op0,unsigned int op0len,const HOST_WIDE_INT * op1,unsigned int op1len,unsigned int prec,signop sgn,wi::overflow_type * overflow)1552 wi::sub_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1553 	       unsigned int op0len, const HOST_WIDE_INT *op1,
1554 	       unsigned int op1len, unsigned int prec,
1555 	       signop sgn, wi::overflow_type *overflow)
1556 {
1557   unsigned HOST_WIDE_INT o0 = 0;
1558   unsigned HOST_WIDE_INT o1 = 0;
1559   unsigned HOST_WIDE_INT x = 0;
1560   /* We implement subtraction as an in place negate and add.  Negation
1561      is just inversion and add 1, so we can do the add of 1 by just
1562      starting the borrow in of the first element at 1.  */
1563   unsigned HOST_WIDE_INT borrow = 0;
1564   unsigned HOST_WIDE_INT old_borrow = 0;
1565 
1566   unsigned HOST_WIDE_INT mask0, mask1;
1567   unsigned int i;
1568 
1569   unsigned int len = MAX (op0len, op1len);
1570   mask0 = -top_bit_of (op0, op0len, prec);
1571   mask1 = -top_bit_of (op1, op1len, prec);
1572 
1573   /* Subtract all of the explicitly defined elements.  */
1574   for (i = 0; i < len; i++)
1575     {
1576       o0 = i < op0len ? (unsigned HOST_WIDE_INT)op0[i] : mask0;
1577       o1 = i < op1len ? (unsigned HOST_WIDE_INT)op1[i] : mask1;
1578       x = o0 - o1 - borrow;
1579       val[i] = x;
1580       old_borrow = borrow;
1581       borrow = borrow == 0 ? o0 < o1 : o0 <= o1;
1582     }
1583 
1584   if (len * HOST_BITS_PER_WIDE_INT < prec)
1585     {
1586       val[len] = mask0 - mask1 - borrow;
1587       len++;
1588       if (overflow)
1589 	*overflow = (sgn == UNSIGNED && borrow) ? OVF_UNDERFLOW : OVF_NONE;
1590     }
1591   else if (overflow)
1592     {
1593       unsigned int shift = -prec % HOST_BITS_PER_WIDE_INT;
1594       if (sgn == SIGNED)
1595 	{
1596 	  unsigned HOST_WIDE_INT x = (o0 ^ o1) & (val[len - 1] ^ o0);
1597 	  if ((HOST_WIDE_INT) (x << shift) < 0)
1598 	    {
1599 	      if (o0 > o1)
1600 		*overflow = OVF_UNDERFLOW;
1601 	      else if (o0 < o1)
1602 		*overflow = OVF_OVERFLOW;
1603 	      else
1604 		*overflow = OVF_NONE;
1605 	    }
1606 	  else
1607 	    *overflow = OVF_NONE;
1608 	}
1609       else
1610 	{
1611 	  /* Put the MSB of X and O0 and in the top of the HWI.  */
1612 	  x <<= shift;
1613 	  o0 <<= shift;
1614 	  if (old_borrow)
1615 	    *overflow = (x >= o0) ? OVF_UNDERFLOW : OVF_NONE;
1616 	  else
1617 	    *overflow = (x > o0) ? OVF_UNDERFLOW : OVF_NONE;
1618 	}
1619     }
1620 
1621   return canonize (val, len, prec);
1622 }
1623 
1624 
1625 /*
1626  * Division and Mod
1627  */
1628 
1629 /* Compute B_QUOTIENT and B_REMAINDER from B_DIVIDEND/B_DIVISOR.  The
1630    algorithm is a small modification of the algorithm in Hacker's
1631    Delight by Warren, which itself is a small modification of Knuth's
1632    algorithm.  M is the number of significant elements of U however
1633    there needs to be at least one extra element of B_DIVIDEND
1634    allocated, N is the number of elements of B_DIVISOR.  */
1635 static void
divmod_internal_2(unsigned HOST_HALF_WIDE_INT * b_quotient,unsigned HOST_HALF_WIDE_INT * b_remainder,unsigned HOST_HALF_WIDE_INT * b_dividend,unsigned HOST_HALF_WIDE_INT * b_divisor,int m,int n)1636 divmod_internal_2 (unsigned HOST_HALF_WIDE_INT *b_quotient,
1637 		   unsigned HOST_HALF_WIDE_INT *b_remainder,
1638 		   unsigned HOST_HALF_WIDE_INT *b_dividend,
1639 		   unsigned HOST_HALF_WIDE_INT *b_divisor,
1640 		   int m, int n)
1641 {
1642   /* The "digits" are a HOST_HALF_WIDE_INT which the size of half of a
1643      HOST_WIDE_INT and stored in the lower bits of each word.  This
1644      algorithm should work properly on both 32 and 64 bit
1645      machines.  */
1646   unsigned HOST_WIDE_INT b
1647     = (unsigned HOST_WIDE_INT)1 << HOST_BITS_PER_HALF_WIDE_INT;
1648   unsigned HOST_WIDE_INT qhat;   /* Estimate of quotient digit.  */
1649   unsigned HOST_WIDE_INT rhat;   /* A remainder.  */
1650   unsigned HOST_WIDE_INT p;      /* Product of two digits.  */
1651   HOST_WIDE_INT t, k;
1652   int i, j, s;
1653 
1654   /* Single digit divisor.  */
1655   if (n == 1)
1656     {
1657       k = 0;
1658       for (j = m - 1; j >= 0; j--)
1659 	{
1660 	  b_quotient[j] = (k * b + b_dividend[j])/b_divisor[0];
1661 	  k = ((k * b + b_dividend[j])
1662 	       - ((unsigned HOST_WIDE_INT)b_quotient[j]
1663 		  * (unsigned HOST_WIDE_INT)b_divisor[0]));
1664 	}
1665       b_remainder[0] = k;
1666       return;
1667     }
1668 
1669   s = clz_hwi (b_divisor[n-1]) - HOST_BITS_PER_HALF_WIDE_INT; /* CHECK clz */
1670 
1671   if (s)
1672     {
1673       /* Normalize B_DIVIDEND and B_DIVISOR.  Unlike the published
1674 	 algorithm, we can overwrite b_dividend and b_divisor, so we do
1675 	 that.  */
1676       for (i = n - 1; i > 0; i--)
1677 	b_divisor[i] = (b_divisor[i] << s)
1678 	  | (b_divisor[i-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s));
1679       b_divisor[0] = b_divisor[0] << s;
1680 
1681       b_dividend[m] = b_dividend[m-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s);
1682       for (i = m - 1; i > 0; i--)
1683 	b_dividend[i] = (b_dividend[i] << s)
1684 	  | (b_dividend[i-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s));
1685       b_dividend[0] = b_dividend[0] << s;
1686     }
1687 
1688   /* Main loop.  */
1689   for (j = m - n; j >= 0; j--)
1690     {
1691       qhat = (b_dividend[j+n] * b + b_dividend[j+n-1]) / b_divisor[n-1];
1692       rhat = (b_dividend[j+n] * b + b_dividend[j+n-1]) - qhat * b_divisor[n-1];
1693     again:
1694       if (qhat >= b || qhat * b_divisor[n-2] > b * rhat + b_dividend[j+n-2])
1695 	{
1696 	  qhat -= 1;
1697 	  rhat += b_divisor[n-1];
1698 	  if (rhat < b)
1699 	    goto again;
1700 	}
1701 
1702       /* Multiply and subtract.  */
1703       k = 0;
1704       for (i = 0; i < n; i++)
1705 	{
1706 	  p = qhat * b_divisor[i];
1707 	  t = b_dividend[i+j] - k - (p & HALF_INT_MASK);
1708 	  b_dividend[i + j] = t;
1709 	  k = ((p >> HOST_BITS_PER_HALF_WIDE_INT)
1710 	       - (t >> HOST_BITS_PER_HALF_WIDE_INT));
1711 	}
1712       t = b_dividend[j+n] - k;
1713       b_dividend[j+n] = t;
1714 
1715       b_quotient[j] = qhat;
1716       if (t < 0)
1717 	{
1718 	  b_quotient[j] -= 1;
1719 	  k = 0;
1720 	  for (i = 0; i < n; i++)
1721 	    {
1722 	      t = (HOST_WIDE_INT)b_dividend[i+j] + b_divisor[i] + k;
1723 	      b_dividend[i+j] = t;
1724 	      k = t >> HOST_BITS_PER_HALF_WIDE_INT;
1725 	    }
1726 	  b_dividend[j+n] += k;
1727 	}
1728     }
1729   if (s)
1730     for (i = 0; i < n; i++)
1731       b_remainder[i] = (b_dividend[i] >> s)
1732 	| (b_dividend[i+1] << (HOST_BITS_PER_HALF_WIDE_INT - s));
1733   else
1734     for (i = 0; i < n; i++)
1735       b_remainder[i] = b_dividend[i];
1736 }
1737 
1738 
1739 /* Divide DIVIDEND by DIVISOR, which have signedness SGN, and truncate
1740    the result.  If QUOTIENT is nonnull, store the value of the quotient
1741    there and return the number of blocks in it.  The return value is
1742    not defined otherwise.  If REMAINDER is nonnull, store the value
1743    of the remainder there and store the number of blocks in
1744    *REMAINDER_LEN.  If OFLOW is not null, store in *OFLOW whether
1745    the division overflowed.  */
1746 unsigned int
divmod_internal(HOST_WIDE_INT * quotient,unsigned int * remainder_len,HOST_WIDE_INT * remainder,const HOST_WIDE_INT * dividend_val,unsigned int dividend_len,unsigned int dividend_prec,const HOST_WIDE_INT * divisor_val,unsigned int divisor_len,unsigned int divisor_prec,signop sgn,wi::overflow_type * oflow)1747 wi::divmod_internal (HOST_WIDE_INT *quotient, unsigned int *remainder_len,
1748 		     HOST_WIDE_INT *remainder,
1749 		     const HOST_WIDE_INT *dividend_val,
1750 		     unsigned int dividend_len, unsigned int dividend_prec,
1751 		     const HOST_WIDE_INT *divisor_val, unsigned int divisor_len,
1752 		     unsigned int divisor_prec, signop sgn,
1753 		     wi::overflow_type *oflow)
1754 {
1755   unsigned int dividend_blocks_needed = 2 * BLOCKS_NEEDED (dividend_prec);
1756   unsigned int divisor_blocks_needed = 2 * BLOCKS_NEEDED (divisor_prec);
1757   unsigned HOST_HALF_WIDE_INT
1758     b_quotient[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1759   unsigned HOST_HALF_WIDE_INT
1760     b_remainder[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1761   unsigned HOST_HALF_WIDE_INT
1762     b_dividend[(4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT) + 1];
1763   unsigned HOST_HALF_WIDE_INT
1764     b_divisor[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1765   unsigned int m, n;
1766   bool dividend_neg = false;
1767   bool divisor_neg = false;
1768   bool overflow = false;
1769   wide_int neg_dividend, neg_divisor;
1770 
1771   wide_int_ref dividend = wi::storage_ref (dividend_val, dividend_len,
1772 					   dividend_prec);
1773   wide_int_ref divisor = wi::storage_ref (divisor_val, divisor_len,
1774 					  divisor_prec);
1775   if (divisor == 0)
1776     overflow = true;
1777 
1778   /* The smallest signed number / -1 causes overflow.  The dividend_len
1779      check is for speed rather than correctness.  */
1780   if (sgn == SIGNED
1781       && dividend_len == BLOCKS_NEEDED (dividend_prec)
1782       && divisor == -1
1783       && wi::only_sign_bit_p (dividend))
1784     overflow = true;
1785 
1786   /* Handle the overflow cases.  Viewed as unsigned value, the quotient of
1787      (signed min / -1) has the same representation as the orignal dividend.
1788      We have traditionally made division by zero act as division by one,
1789      so there too we use the original dividend.  */
1790   if (overflow)
1791     {
1792       if (remainder)
1793 	{
1794 	  *remainder_len = 1;
1795 	  remainder[0] = 0;
1796 	}
1797       if (oflow)
1798 	*oflow = OVF_OVERFLOW;
1799       if (quotient)
1800 	for (unsigned int i = 0; i < dividend_len; ++i)
1801 	  quotient[i] = dividend_val[i];
1802       return dividend_len;
1803     }
1804 
1805   if (oflow)
1806     *oflow = OVF_NONE;
1807 
1808   /* Do it on the host if you can.  */
1809   if (sgn == SIGNED
1810       && wi::fits_shwi_p (dividend)
1811       && wi::fits_shwi_p (divisor))
1812     {
1813       HOST_WIDE_INT o0 = dividend.to_shwi ();
1814       HOST_WIDE_INT o1 = divisor.to_shwi ();
1815 
1816       if (o0 == HOST_WIDE_INT_MIN && o1 == -1)
1817 	{
1818 	  gcc_checking_assert (dividend_prec > HOST_BITS_PER_WIDE_INT);
1819 	  if (quotient)
1820 	    {
1821 	      quotient[0] = HOST_WIDE_INT_MIN;
1822 	      quotient[1] = 0;
1823 	    }
1824 	  if (remainder)
1825 	    {
1826 	      remainder[0] = 0;
1827 	      *remainder_len = 1;
1828 	    }
1829 	  return 2;
1830 	}
1831       else
1832 	{
1833 	  if (quotient)
1834 	    quotient[0] = o0 / o1;
1835 	  if (remainder)
1836 	    {
1837 	      remainder[0] = o0 % o1;
1838 	      *remainder_len = 1;
1839 	    }
1840 	  return 1;
1841 	}
1842     }
1843 
1844   if (sgn == UNSIGNED
1845       && wi::fits_uhwi_p (dividend)
1846       && wi::fits_uhwi_p (divisor))
1847     {
1848       unsigned HOST_WIDE_INT o0 = dividend.to_uhwi ();
1849       unsigned HOST_WIDE_INT o1 = divisor.to_uhwi ();
1850       unsigned int quotient_len = 1;
1851 
1852       if (quotient)
1853 	{
1854 	  quotient[0] = o0 / o1;
1855 	  quotient_len = canonize_uhwi (quotient, dividend_prec);
1856 	}
1857       if (remainder)
1858 	{
1859 	  remainder[0] = o0 % o1;
1860 	  *remainder_len = canonize_uhwi (remainder, dividend_prec);
1861 	}
1862       return quotient_len;
1863     }
1864 
1865   /* Make the divisor and dividend positive and remember what we
1866      did.  */
1867   if (sgn == SIGNED)
1868     {
1869       if (wi::neg_p (dividend))
1870 	{
1871 	  neg_dividend = -dividend;
1872 	  dividend = neg_dividend;
1873 	  dividend_neg = true;
1874 	}
1875       if (wi::neg_p (divisor))
1876 	{
1877 	  neg_divisor = -divisor;
1878 	  divisor = neg_divisor;
1879 	  divisor_neg = true;
1880 	}
1881     }
1882 
1883   wi_unpack (b_dividend, dividend.get_val (), dividend.get_len (),
1884 	     dividend_blocks_needed, dividend_prec, sgn);
1885   wi_unpack (b_divisor, divisor.get_val (), divisor.get_len (),
1886 	     divisor_blocks_needed, divisor_prec, sgn);
1887 
1888   m = dividend_blocks_needed;
1889   b_dividend[m] = 0;
1890   while (m > 1 && b_dividend[m - 1] == 0)
1891     m--;
1892 
1893   n = divisor_blocks_needed;
1894   while (n > 1 && b_divisor[n - 1] == 0)
1895     n--;
1896 
1897   memset (b_quotient, 0, sizeof (b_quotient));
1898 
1899   divmod_internal_2 (b_quotient, b_remainder, b_dividend, b_divisor, m, n);
1900 
1901   unsigned int quotient_len = 0;
1902   if (quotient)
1903     {
1904       quotient_len = wi_pack (quotient, b_quotient, m, dividend_prec);
1905       /* The quotient is neg if exactly one of the divisor or dividend is
1906 	 neg.  */
1907       if (dividend_neg != divisor_neg)
1908 	quotient_len = wi::sub_large (quotient, zeros, 1, quotient,
1909 				      quotient_len, dividend_prec,
1910 				      UNSIGNED, 0);
1911     }
1912 
1913   if (remainder)
1914     {
1915       *remainder_len = wi_pack (remainder, b_remainder, n, dividend_prec);
1916       /* The remainder is always the same sign as the dividend.  */
1917       if (dividend_neg)
1918 	*remainder_len = wi::sub_large (remainder, zeros, 1, remainder,
1919 					*remainder_len, dividend_prec,
1920 					UNSIGNED, 0);
1921     }
1922 
1923   return quotient_len;
1924 }
1925 
1926 /*
1927  * Shifting, rotating and extraction.
1928  */
1929 
1930 /* Left shift XVAL by SHIFT and store the result in VAL.  Return the
1931    number of blocks in VAL.  Both XVAL and VAL have PRECISION bits.  */
1932 unsigned int
lshift_large(HOST_WIDE_INT * val,const HOST_WIDE_INT * xval,unsigned int xlen,unsigned int precision,unsigned int shift)1933 wi::lshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
1934 		  unsigned int xlen, unsigned int precision,
1935 		  unsigned int shift)
1936 {
1937   /* Split the shift into a whole-block shift and a subblock shift.  */
1938   unsigned int skip = shift / HOST_BITS_PER_WIDE_INT;
1939   unsigned int small_shift = shift % HOST_BITS_PER_WIDE_INT;
1940 
1941   /* The whole-block shift fills with zeros.  */
1942   unsigned int len = BLOCKS_NEEDED (precision);
1943   for (unsigned int i = 0; i < skip; ++i)
1944     val[i] = 0;
1945 
1946   /* It's easier to handle the simple block case specially.  */
1947   if (small_shift == 0)
1948     for (unsigned int i = skip; i < len; ++i)
1949       val[i] = safe_uhwi (xval, xlen, i - skip);
1950   else
1951     {
1952       /* The first unfilled output block is a left shift of the first
1953 	 block in XVAL.  The other output blocks contain bits from two
1954 	 consecutive input blocks.  */
1955       unsigned HOST_WIDE_INT carry = 0;
1956       for (unsigned int i = skip; i < len; ++i)
1957 	{
1958 	  unsigned HOST_WIDE_INT x = safe_uhwi (xval, xlen, i - skip);
1959 	  val[i] = (x << small_shift) | carry;
1960 	  carry = x >> (-small_shift % HOST_BITS_PER_WIDE_INT);
1961 	}
1962     }
1963   return canonize (val, len, precision);
1964 }
1965 
1966 /* Right shift XVAL by SHIFT and store the result in VAL.  Return the
1967    number of blocks in VAL.  The input has XPRECISION bits and the
1968    output has XPRECISION - SHIFT bits.  */
1969 static unsigned int
rshift_large_common(HOST_WIDE_INT * val,const HOST_WIDE_INT * xval,unsigned int xlen,unsigned int xprecision,unsigned int shift)1970 rshift_large_common (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
1971 		     unsigned int xlen, unsigned int xprecision,
1972 		     unsigned int shift)
1973 {
1974   /* Split the shift into a whole-block shift and a subblock shift.  */
1975   unsigned int skip = shift / HOST_BITS_PER_WIDE_INT;
1976   unsigned int small_shift = shift % HOST_BITS_PER_WIDE_INT;
1977 
1978   /* Work out how many blocks are needed to store the significant bits
1979      (excluding the upper zeros or signs).  */
1980   unsigned int len = BLOCKS_NEEDED (xprecision - shift);
1981 
1982   /* It's easier to handle the simple block case specially.  */
1983   if (small_shift == 0)
1984     for (unsigned int i = 0; i < len; ++i)
1985       val[i] = safe_uhwi (xval, xlen, i + skip);
1986   else
1987     {
1988       /* Each output block but the last is a combination of two input blocks.
1989 	 The last block is a right shift of the last block in XVAL.  */
1990       unsigned HOST_WIDE_INT curr = safe_uhwi (xval, xlen, skip);
1991       for (unsigned int i = 0; i < len; ++i)
1992 	{
1993 	  val[i] = curr >> small_shift;
1994 	  curr = safe_uhwi (xval, xlen, i + skip + 1);
1995 	  val[i] |= curr << (-small_shift % HOST_BITS_PER_WIDE_INT);
1996 	}
1997     }
1998   return len;
1999 }
2000 
2001 /* Logically right shift XVAL by SHIFT and store the result in VAL.
2002    Return the number of blocks in VAL.  XVAL has XPRECISION bits and
2003    VAL has PRECISION bits.  */
2004 unsigned int
lrshift_large(HOST_WIDE_INT * val,const HOST_WIDE_INT * xval,unsigned int xlen,unsigned int xprecision,unsigned int precision,unsigned int shift)2005 wi::lrshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
2006 		   unsigned int xlen, unsigned int xprecision,
2007 		   unsigned int precision, unsigned int shift)
2008 {
2009   unsigned int len = rshift_large_common (val, xval, xlen, xprecision, shift);
2010 
2011   /* The value we just created has precision XPRECISION - SHIFT.
2012      Zero-extend it to wider precisions.  */
2013   if (precision > xprecision - shift)
2014     {
2015       unsigned int small_prec = (xprecision - shift) % HOST_BITS_PER_WIDE_INT;
2016       if (small_prec)
2017 	val[len - 1] = zext_hwi (val[len - 1], small_prec);
2018       else if (val[len - 1] < 0)
2019 	{
2020 	  /* Add a new block with a zero. */
2021 	  val[len++] = 0;
2022 	  return len;
2023 	}
2024     }
2025   return canonize (val, len, precision);
2026 }
2027 
2028 /* Arithmetically right shift XVAL by SHIFT and store the result in VAL.
2029    Return the number of blocks in VAL.  XVAL has XPRECISION bits and
2030    VAL has PRECISION bits.  */
2031 unsigned int
arshift_large(HOST_WIDE_INT * val,const HOST_WIDE_INT * xval,unsigned int xlen,unsigned int xprecision,unsigned int precision,unsigned int shift)2032 wi::arshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
2033 		   unsigned int xlen, unsigned int xprecision,
2034 		   unsigned int precision, unsigned int shift)
2035 {
2036   unsigned int len = rshift_large_common (val, xval, xlen, xprecision, shift);
2037 
2038   /* The value we just created has precision XPRECISION - SHIFT.
2039      Sign-extend it to wider types.  */
2040   if (precision > xprecision - shift)
2041     {
2042       unsigned int small_prec = (xprecision - shift) % HOST_BITS_PER_WIDE_INT;
2043       if (small_prec)
2044 	val[len - 1] = sext_hwi (val[len - 1], small_prec);
2045     }
2046   return canonize (val, len, precision);
2047 }
2048 
2049 /* Return the number of leading (upper) zeros in X.  */
2050 int
clz(const wide_int_ref & x)2051 wi::clz (const wide_int_ref &x)
2052 {
2053   /* Calculate how many bits there above the highest represented block.  */
2054   int count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
2055 
2056   unsigned HOST_WIDE_INT high = x.uhigh ();
2057   if (count < 0)
2058     /* The upper -COUNT bits of HIGH are not part of the value.
2059        Clear them out.  */
2060     high = (high << -count) >> -count;
2061   else if (x.sign_mask () < 0)
2062     /* The upper bit is set, so there are no leading zeros.  */
2063     return 0;
2064 
2065   /* We don't need to look below HIGH.  Either HIGH is nonzero,
2066      or the top bit of the block below is nonzero; clz_hwi is
2067      HOST_BITS_PER_WIDE_INT in the latter case.  */
2068   return count + clz_hwi (high);
2069 }
2070 
2071 /* Return the number of redundant sign bits in X.  (That is, the number
2072    of bits immediately below the sign bit that have the same value as
2073    the sign bit.)  */
2074 int
clrsb(const wide_int_ref & x)2075 wi::clrsb (const wide_int_ref &x)
2076 {
2077   /* Calculate how many bits there above the highest represented block.  */
2078   int count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
2079 
2080   unsigned HOST_WIDE_INT high = x.uhigh ();
2081   unsigned HOST_WIDE_INT mask = -1;
2082   if (count < 0)
2083     {
2084       /* The upper -COUNT bits of HIGH are not part of the value.
2085 	 Clear them from both MASK and HIGH.  */
2086       mask >>= -count;
2087       high &= mask;
2088     }
2089 
2090   /* If the top bit is 1, count the number of leading 1s.  If the top
2091      bit is zero, count the number of leading zeros.  */
2092   if (high > mask / 2)
2093     high ^= mask;
2094 
2095   /* There are no sign bits below the top block, so we don't need to look
2096      beyond HIGH.  Note that clz_hwi is HOST_BITS_PER_WIDE_INT when
2097      HIGH is 0.  */
2098   return count + clz_hwi (high) - 1;
2099 }
2100 
2101 /* Return the number of trailing (lower) zeros in X.  */
2102 int
ctz(const wide_int_ref & x)2103 wi::ctz (const wide_int_ref &x)
2104 {
2105   if (x.len == 1 && x.ulow () == 0)
2106     return x.precision;
2107 
2108   /* Having dealt with the zero case, there must be a block with a
2109      nonzero bit.  We don't care about the bits above the first 1.  */
2110   unsigned int i = 0;
2111   while (x.val[i] == 0)
2112     ++i;
2113   return i * HOST_BITS_PER_WIDE_INT + ctz_hwi (x.val[i]);
2114 }
2115 
2116 /* If X is an exact power of 2, return the base-2 logarithm, otherwise
2117    return -1.  */
2118 int
exact_log2(const wide_int_ref & x)2119 wi::exact_log2 (const wide_int_ref &x)
2120 {
2121   /* Reject cases where there are implicit -1 blocks above HIGH.  */
2122   if (x.len * HOST_BITS_PER_WIDE_INT < x.precision && x.sign_mask () < 0)
2123     return -1;
2124 
2125   /* Set CRUX to the index of the entry that should be nonzero.
2126      If the top block is zero then the next lowest block (if any)
2127      must have the high bit set.  */
2128   unsigned int crux = x.len - 1;
2129   if (crux > 0 && x.val[crux] == 0)
2130     crux -= 1;
2131 
2132   /* Check that all lower blocks are zero.  */
2133   for (unsigned int i = 0; i < crux; ++i)
2134     if (x.val[i] != 0)
2135       return -1;
2136 
2137   /* Get a zero-extended form of block CRUX.  */
2138   unsigned HOST_WIDE_INT hwi = x.val[crux];
2139   if ((crux + 1) * HOST_BITS_PER_WIDE_INT > x.precision)
2140     hwi = zext_hwi (hwi, x.precision % HOST_BITS_PER_WIDE_INT);
2141 
2142   /* Now it's down to whether HWI is a power of 2.  */
2143   int res = ::exact_log2 (hwi);
2144   if (res >= 0)
2145     res += crux * HOST_BITS_PER_WIDE_INT;
2146   return res;
2147 }
2148 
2149 /* Return the base-2 logarithm of X, rounding down.  Return -1 if X is 0.  */
2150 int
floor_log2(const wide_int_ref & x)2151 wi::floor_log2 (const wide_int_ref &x)
2152 {
2153   return x.precision - 1 - clz (x);
2154 }
2155 
2156 /* Return the index of the first (lowest) set bit in X, counting from 1.
2157    Return 0 if X is 0.  */
2158 int
ffs(const wide_int_ref & x)2159 wi::ffs (const wide_int_ref &x)
2160 {
2161   return eq_p (x, 0) ? 0 : ctz (x) + 1;
2162 }
2163 
2164 /* Return true if sign-extending X to have precision PRECISION would give
2165    the minimum signed value at that precision.  */
2166 bool
only_sign_bit_p(const wide_int_ref & x,unsigned int precision)2167 wi::only_sign_bit_p (const wide_int_ref &x, unsigned int precision)
2168 {
2169   return ctz (x) + 1 == int (precision);
2170 }
2171 
2172 /* Return true if X represents the minimum signed value.  */
2173 bool
only_sign_bit_p(const wide_int_ref & x)2174 wi::only_sign_bit_p (const wide_int_ref &x)
2175 {
2176   return only_sign_bit_p (x, x.precision);
2177 }
2178 
2179 /* Return VAL if VAL has no bits set outside MASK.  Otherwise round VAL
2180    down to the previous value that has no bits set outside MASK.
2181    This rounding wraps for signed values if VAL is negative and
2182    the top bit of MASK is clear.
2183 
2184    For example, round_down_for_mask (6, 0xf1) would give 1 and
2185    round_down_for_mask (24, 0xf1) would give 17.  */
2186 
2187 wide_int
round_down_for_mask(const wide_int & val,const wide_int & mask)2188 wi::round_down_for_mask (const wide_int &val, const wide_int &mask)
2189 {
2190   /* Get the bits in VAL that are outside the mask.  */
2191   wide_int extra_bits = wi::bit_and_not (val, mask);
2192   if (extra_bits == 0)
2193     return val;
2194 
2195   /* Get a mask that includes the top bit in EXTRA_BITS and is all 1s
2196      below that bit.  */
2197   unsigned int precision = val.get_precision ();
2198   wide_int lower_mask = wi::mask (precision - wi::clz (extra_bits),
2199 				  false, precision);
2200 
2201   /* Clear the bits that aren't in MASK, but ensure that all bits
2202      in MASK below the top cleared bit are set.  */
2203   return (val & mask) | (mask & lower_mask);
2204 }
2205 
2206 /* Return VAL if VAL has no bits set outside MASK.  Otherwise round VAL
2207    up to the next value that has no bits set outside MASK.  The rounding
2208    wraps if there are no suitable values greater than VAL.
2209 
2210    For example, round_up_for_mask (6, 0xf1) would give 16 and
2211    round_up_for_mask (24, 0xf1) would give 32.  */
2212 
2213 wide_int
round_up_for_mask(const wide_int & val,const wide_int & mask)2214 wi::round_up_for_mask (const wide_int &val, const wide_int &mask)
2215 {
2216   /* Get the bits in VAL that are outside the mask.  */
2217   wide_int extra_bits = wi::bit_and_not (val, mask);
2218   if (extra_bits == 0)
2219     return val;
2220 
2221   /* Get a mask that is all 1s above the top bit in EXTRA_BITS.  */
2222   unsigned int precision = val.get_precision ();
2223   wide_int upper_mask = wi::mask (precision - wi::clz (extra_bits),
2224 				  true, precision);
2225 
2226   /* Get the bits of the mask that are above the top bit in EXTRA_BITS.  */
2227   upper_mask &= mask;
2228 
2229   /* Conceptually we need to:
2230 
2231      - clear bits of VAL outside UPPER_MASK
2232      - add the lowest bit in UPPER_MASK to VAL (or add 0 if UPPER_MASK is 0)
2233      - propagate the carry through the bits of VAL in UPPER_MASK
2234 
2235      If (~VAL & UPPER_MASK) is nonzero, the carry eventually
2236      reaches that bit and the process leaves all lower bits clear.
2237      If (~VAL & UPPER_MASK) is zero then the result is also zero.  */
2238   wide_int tmp = wi::bit_and_not (upper_mask, val);
2239 
2240   return (val | tmp) & -tmp;
2241 }
2242 
2243 /* Compute the modular multiplicative inverse of A modulo B
2244    using extended Euclid's algorithm.  Assumes A and B are coprime,
2245    and that A and B have the same precision.  */
2246 wide_int
mod_inv(const wide_int & a,const wide_int & b)2247 wi::mod_inv (const wide_int &a, const wide_int &b)
2248 {
2249   /* Verify the assumption.  */
2250   gcc_checking_assert (wi::eq_p (wi::gcd (a, b), 1));
2251 
2252   unsigned int p = a.get_precision () + 1;
2253   gcc_checking_assert (b.get_precision () + 1 == p);
2254   wide_int c = wide_int::from (a, p, UNSIGNED);
2255   wide_int d = wide_int::from (b, p, UNSIGNED);
2256   wide_int x0 = wide_int::from (0, p, UNSIGNED);
2257   wide_int x1 = wide_int::from (1, p, UNSIGNED);
2258 
2259   if (wi::eq_p (b, 1))
2260     return wide_int::from (1, p, UNSIGNED);
2261 
2262   while (wi::gt_p (c, 1, UNSIGNED))
2263     {
2264       wide_int t = d;
2265       wide_int q = wi::divmod_trunc (c, d, UNSIGNED, &d);
2266       c = t;
2267       wide_int s = x0;
2268       x0 = wi::sub (x1, wi::mul (q, x0));
2269       x1 = s;
2270     }
2271   if (wi::lt_p (x1, 0, SIGNED))
2272     x1 += d;
2273   return x1;
2274 }
2275 
2276 /*
2277  * Private utilities.
2278  */
2279 
gt_ggc_mx(widest_int *)2280 void gt_ggc_mx (widest_int *) { }
gt_pch_nx(widest_int *,void (*)(void *,void *),void *)2281 void gt_pch_nx (widest_int *, void (*) (void *, void *), void *) { }
gt_pch_nx(widest_int *)2282 void gt_pch_nx (widest_int *) { }
2283 
2284 template void wide_int::dump () const;
2285 template void generic_wide_int <wide_int_ref_storage <false> >::dump () const;
2286 template void generic_wide_int <wide_int_ref_storage <true> >::dump () const;
2287 template void offset_int::dump () const;
2288 template void widest_int::dump () const;
2289 
2290 /* We could add all the above ::dump variants here, but wide_int and
2291    widest_int should handle the common cases.  Besides, you can always
2292    call the dump method directly.  */
2293 
2294 DEBUG_FUNCTION void
debug(const wide_int & ref)2295 debug (const wide_int &ref)
2296 {
2297   ref.dump ();
2298 }
2299 
2300 DEBUG_FUNCTION void
debug(const wide_int * ptr)2301 debug (const wide_int *ptr)
2302 {
2303   if (ptr)
2304     debug (*ptr);
2305   else
2306     fprintf (stderr, "<nil>\n");
2307 }
2308 
2309 DEBUG_FUNCTION void
debug(const widest_int & ref)2310 debug (const widest_int &ref)
2311 {
2312   ref.dump ();
2313 }
2314 
2315 DEBUG_FUNCTION void
debug(const widest_int * ptr)2316 debug (const widest_int *ptr)
2317 {
2318   if (ptr)
2319     debug (*ptr);
2320   else
2321     fprintf (stderr, "<nil>\n");
2322 }
2323 
2324 #if CHECKING_P
2325 
2326 namespace selftest {
2327 
2328 /* Selftests for wide ints.  We run these multiple times, once per type.  */
2329 
2330 /* Helper function for building a test value.  */
2331 
2332 template <class VALUE_TYPE>
2333 static VALUE_TYPE
2334 from_int (int i);
2335 
2336 /* Specializations of the fixture for each wide-int type.  */
2337 
2338 /* Specialization for VALUE_TYPE == wide_int.  */
2339 
2340 template <>
2341 wide_int
from_int(int i)2342 from_int (int i)
2343 {
2344   return wi::shwi (i, 32);
2345 }
2346 
2347 /* Specialization for VALUE_TYPE == offset_int.  */
2348 
2349 template <>
2350 offset_int
from_int(int i)2351 from_int (int i)
2352 {
2353   return offset_int (i);
2354 }
2355 
2356 /* Specialization for VALUE_TYPE == widest_int.  */
2357 
2358 template <>
2359 widest_int
from_int(int i)2360 from_int (int i)
2361 {
2362   return widest_int (i);
2363 }
2364 
2365 /* Verify that print_dec (WI, ..., SGN) gives the expected string
2366    representation (using base 10).  */
2367 
2368 static void
assert_deceq(const char * expected,const wide_int_ref & wi,signop sgn)2369 assert_deceq (const char *expected, const wide_int_ref &wi, signop sgn)
2370 {
2371   char buf[WIDE_INT_PRINT_BUFFER_SIZE];
2372   print_dec (wi, buf, sgn);
2373   ASSERT_STREQ (expected, buf);
2374 }
2375 
2376 /* Likewise for base 16.  */
2377 
2378 static void
assert_hexeq(const char * expected,const wide_int_ref & wi)2379 assert_hexeq (const char *expected, const wide_int_ref &wi)
2380 {
2381   char buf[WIDE_INT_PRINT_BUFFER_SIZE];
2382   print_hex (wi, buf);
2383   ASSERT_STREQ (expected, buf);
2384 }
2385 
2386 /* Test cases.  */
2387 
2388 /* Verify that print_dec and print_hex work for VALUE_TYPE.  */
2389 
2390 template <class VALUE_TYPE>
2391 static void
test_printing()2392 test_printing ()
2393 {
2394   VALUE_TYPE a = from_int<VALUE_TYPE> (42);
2395   assert_deceq ("42", a, SIGNED);
2396   assert_hexeq ("0x2a", a);
2397   assert_hexeq ("0x1fffffffffffffffff", wi::shwi (-1, 69));
2398   assert_hexeq ("0xffffffffffffffff", wi::mask (64, false, 69));
2399   assert_hexeq ("0xffffffffffffffff", wi::mask <widest_int> (64, false));
2400   if (WIDE_INT_MAX_PRECISION > 128)
2401     {
2402       assert_hexeq ("0x20000000000000000fffffffffffffffe",
2403 		    wi::lshift (1, 129) + wi::lshift (1, 64) - 2);
2404       assert_hexeq ("0x200000000000004000123456789abcdef",
2405 		    wi::lshift (1, 129) + wi::lshift (1, 74)
2406 		    + wi::lshift (0x1234567, 32) + 0x89abcdef);
2407     }
2408 }
2409 
2410 /* Verify that various operations work correctly for VALUE_TYPE,
2411    unary and binary, using both function syntax, and
2412    overloaded-operators.  */
2413 
2414 template <class VALUE_TYPE>
2415 static void
test_ops()2416 test_ops ()
2417 {
2418   VALUE_TYPE a = from_int<VALUE_TYPE> (7);
2419   VALUE_TYPE b = from_int<VALUE_TYPE> (3);
2420 
2421   /* Using functions.  */
2422   assert_deceq ("-7", wi::neg (a), SIGNED);
2423   assert_deceq ("10", wi::add (a, b), SIGNED);
2424   assert_deceq ("4", wi::sub (a, b), SIGNED);
2425   assert_deceq ("-4", wi::sub (b, a), SIGNED);
2426   assert_deceq ("21", wi::mul (a, b), SIGNED);
2427 
2428   /* Using operators.  */
2429   assert_deceq ("-7", -a, SIGNED);
2430   assert_deceq ("10", a + b, SIGNED);
2431   assert_deceq ("4", a - b, SIGNED);
2432   assert_deceq ("-4", b - a, SIGNED);
2433   assert_deceq ("21", a * b, SIGNED);
2434 }
2435 
2436 /* Verify that various comparisons work correctly for VALUE_TYPE.  */
2437 
2438 template <class VALUE_TYPE>
2439 static void
test_comparisons()2440 test_comparisons ()
2441 {
2442   VALUE_TYPE a = from_int<VALUE_TYPE> (7);
2443   VALUE_TYPE b = from_int<VALUE_TYPE> (3);
2444 
2445   /* == */
2446   ASSERT_TRUE (wi::eq_p (a, a));
2447   ASSERT_FALSE (wi::eq_p (a, b));
2448 
2449   /* != */
2450   ASSERT_TRUE (wi::ne_p (a, b));
2451   ASSERT_FALSE (wi::ne_p (a, a));
2452 
2453   /* < */
2454   ASSERT_FALSE (wi::lts_p (a, a));
2455   ASSERT_FALSE (wi::lts_p (a, b));
2456   ASSERT_TRUE (wi::lts_p (b, a));
2457 
2458   /* <= */
2459   ASSERT_TRUE (wi::les_p (a, a));
2460   ASSERT_FALSE (wi::les_p (a, b));
2461   ASSERT_TRUE (wi::les_p (b, a));
2462 
2463   /* > */
2464   ASSERT_FALSE (wi::gts_p (a, a));
2465   ASSERT_TRUE (wi::gts_p (a, b));
2466   ASSERT_FALSE (wi::gts_p (b, a));
2467 
2468   /* >= */
2469   ASSERT_TRUE (wi::ges_p (a, a));
2470   ASSERT_TRUE (wi::ges_p (a, b));
2471   ASSERT_FALSE (wi::ges_p (b, a));
2472 
2473   /* comparison */
2474   ASSERT_EQ (-1, wi::cmps (b, a));
2475   ASSERT_EQ (0, wi::cmps (a, a));
2476   ASSERT_EQ (1, wi::cmps (a, b));
2477 }
2478 
2479 /* Run all of the selftests, using the given VALUE_TYPE.  */
2480 
2481 template <class VALUE_TYPE>
run_all_wide_int_tests()2482 static void run_all_wide_int_tests ()
2483 {
2484   test_printing <VALUE_TYPE> ();
2485   test_ops <VALUE_TYPE> ();
2486   test_comparisons <VALUE_TYPE> ();
2487 }
2488 
2489 /* Test overflow conditions.  */
2490 
2491 static void
test_overflow()2492 test_overflow ()
2493 {
2494   static int precs[] = { 31, 32, 33, 63, 64, 65, 127, 128 };
2495   static int offsets[] = { 16, 1, 0 };
2496   for (unsigned int i = 0; i < ARRAY_SIZE (precs); ++i)
2497     for (unsigned int j = 0; j < ARRAY_SIZE (offsets); ++j)
2498       {
2499 	int prec = precs[i];
2500 	int offset = offsets[j];
2501 	wi::overflow_type overflow;
2502 	wide_int sum, diff;
2503 
2504 	sum = wi::add (wi::max_value (prec, UNSIGNED) - offset, 1,
2505 		       UNSIGNED, &overflow);
2506 	ASSERT_EQ (sum, -offset);
2507 	ASSERT_EQ (overflow != wi::OVF_NONE, offset == 0);
2508 
2509 	sum = wi::add (1, wi::max_value (prec, UNSIGNED) - offset,
2510 		       UNSIGNED, &overflow);
2511 	ASSERT_EQ (sum, -offset);
2512 	ASSERT_EQ (overflow != wi::OVF_NONE, offset == 0);
2513 
2514 	diff = wi::sub (wi::max_value (prec, UNSIGNED) - offset,
2515 			wi::max_value (prec, UNSIGNED),
2516 			UNSIGNED, &overflow);
2517 	ASSERT_EQ (diff, -offset);
2518 	ASSERT_EQ (overflow != wi::OVF_NONE, offset != 0);
2519 
2520 	diff = wi::sub (wi::max_value (prec, UNSIGNED) - offset,
2521 			wi::max_value (prec, UNSIGNED) - 1,
2522 			UNSIGNED, &overflow);
2523 	ASSERT_EQ (diff, 1 - offset);
2524 	ASSERT_EQ (overflow != wi::OVF_NONE, offset > 1);
2525     }
2526 }
2527 
2528 /* Test the round_{down,up}_for_mask functions.  */
2529 
2530 static void
test_round_for_mask()2531 test_round_for_mask ()
2532 {
2533   unsigned int prec = 18;
2534   ASSERT_EQ (17, wi::round_down_for_mask (wi::shwi (17, prec),
2535 					  wi::shwi (0xf1, prec)));
2536   ASSERT_EQ (17, wi::round_up_for_mask (wi::shwi (17, prec),
2537 					wi::shwi (0xf1, prec)));
2538 
2539   ASSERT_EQ (1, wi::round_down_for_mask (wi::shwi (6, prec),
2540 					 wi::shwi (0xf1, prec)));
2541   ASSERT_EQ (16, wi::round_up_for_mask (wi::shwi (6, prec),
2542 					wi::shwi (0xf1, prec)));
2543 
2544   ASSERT_EQ (17, wi::round_down_for_mask (wi::shwi (24, prec),
2545 					  wi::shwi (0xf1, prec)));
2546   ASSERT_EQ (32, wi::round_up_for_mask (wi::shwi (24, prec),
2547 					wi::shwi (0xf1, prec)));
2548 
2549   ASSERT_EQ (0x011, wi::round_down_for_mask (wi::shwi (0x22, prec),
2550 					     wi::shwi (0x111, prec)));
2551   ASSERT_EQ (0x100, wi::round_up_for_mask (wi::shwi (0x22, prec),
2552 					   wi::shwi (0x111, prec)));
2553 
2554   ASSERT_EQ (100, wi::round_down_for_mask (wi::shwi (101, prec),
2555 					   wi::shwi (0xfc, prec)));
2556   ASSERT_EQ (104, wi::round_up_for_mask (wi::shwi (101, prec),
2557 					 wi::shwi (0xfc, prec)));
2558 
2559   ASSERT_EQ (0x2bc, wi::round_down_for_mask (wi::shwi (0x2c2, prec),
2560 					     wi::shwi (0xabc, prec)));
2561   ASSERT_EQ (0x800, wi::round_up_for_mask (wi::shwi (0x2c2, prec),
2562 					   wi::shwi (0xabc, prec)));
2563 
2564   ASSERT_EQ (0xabc, wi::round_down_for_mask (wi::shwi (0xabd, prec),
2565 					     wi::shwi (0xabc, prec)));
2566   ASSERT_EQ (0, wi::round_up_for_mask (wi::shwi (0xabd, prec),
2567 				       wi::shwi (0xabc, prec)));
2568 
2569   ASSERT_EQ (0xabc, wi::round_down_for_mask (wi::shwi (0x1000, prec),
2570 					     wi::shwi (0xabc, prec)));
2571   ASSERT_EQ (0, wi::round_up_for_mask (wi::shwi (0x1000, prec),
2572 				       wi::shwi (0xabc, prec)));
2573 }
2574 
2575 /* Run all of the selftests within this file, for all value types.  */
2576 
2577 void
wide_int_cc_tests()2578 wide_int_cc_tests ()
2579 {
2580   run_all_wide_int_tests <wide_int> ();
2581   run_all_wide_int_tests <offset_int> ();
2582   run_all_wide_int_tests <widest_int> ();
2583   test_overflow ();
2584   test_round_for_mask ();
2585 }
2586 
2587 } // namespace selftest
2588 #endif /* CHECKING_P */
2589