1 /* Operations with very long integers.
2    Copyright (C) 2012-2019 Free Software Foundation, Inc.
3    Contributed by Kenneth Zadeck <zadeck@naturalbridge.com>
4 
5 This file is part of GCC.
6 
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by the
9 Free Software Foundation; either version 3, or (at your option) any
10 later version.
11 
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
15 for more details.
16 
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3.  If not see
19 <http://www.gnu.org/licenses/>.  */
20 
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "tree.h"
26 #include "selftest.h"
27 
28 
29 #define HOST_BITS_PER_HALF_WIDE_INT 32
30 #if HOST_BITS_PER_HALF_WIDE_INT == HOST_BITS_PER_LONG
31 # define HOST_HALF_WIDE_INT long
32 #elif HOST_BITS_PER_HALF_WIDE_INT == HOST_BITS_PER_INT
33 # define HOST_HALF_WIDE_INT int
34 #else
35 #error Please add support for HOST_HALF_WIDE_INT
36 #endif
37 
38 #define W_TYPE_SIZE HOST_BITS_PER_WIDE_INT
39 /* Do not include longlong.h when compiler is clang-based. See PR61146.  */
40 #if GCC_VERSION >= 3000 && (W_TYPE_SIZE == 32 || defined (__SIZEOF_INT128__)) && !defined(__clang__)
41 typedef unsigned HOST_HALF_WIDE_INT UHWtype;
42 typedef unsigned HOST_WIDE_INT UWtype;
43 typedef unsigned int UQItype __attribute__ ((mode (QI)));
44 typedef unsigned int USItype __attribute__ ((mode (SI)));
45 typedef unsigned int UDItype __attribute__ ((mode (DI)));
46 #if W_TYPE_SIZE == 32
47 typedef unsigned int UDWtype __attribute__ ((mode (DI)));
48 #else
49 typedef unsigned int UDWtype __attribute__ ((mode (TI)));
50 #endif
51 #include "longlong.h"
52 #endif
53 
54 static const HOST_WIDE_INT zeros[WIDE_INT_MAX_ELTS] = {};
55 
56 /*
57  * Internal utilities.
58  */
59 
60 /* Quantities to deal with values that hold half of a wide int.  Used
61    in multiply and divide.  */
62 #define HALF_INT_MASK ((HOST_WIDE_INT_1 << HOST_BITS_PER_HALF_WIDE_INT) - 1)
63 
64 #define BLOCK_OF(TARGET) ((TARGET) / HOST_BITS_PER_WIDE_INT)
65 #define BLOCKS_NEEDED(PREC) \
66   (PREC ? (((PREC) + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT) : 1)
67 #define SIGN_MASK(X) ((HOST_WIDE_INT) (X) < 0 ? -1 : 0)
68 
69 /* Return the value a VAL[I] if I < LEN, otherwise, return 0 or -1
70    based on the top existing bit of VAL. */
71 
72 static unsigned HOST_WIDE_INT
safe_uhwi(const HOST_WIDE_INT * val,unsigned int len,unsigned int i)73 safe_uhwi (const HOST_WIDE_INT *val, unsigned int len, unsigned int i)
74 {
75   return i < len ? val[i] : val[len - 1] < 0 ? HOST_WIDE_INT_M1 : 0;
76 }
77 
78 /* Convert the integer in VAL to canonical form, returning its new length.
79    LEN is the number of blocks currently in VAL and PRECISION is the number
80    of bits in the integer it represents.
81 
82    This function only changes the representation, not the value.  */
83 static unsigned int
canonize(HOST_WIDE_INT * val,unsigned int len,unsigned int precision)84 canonize (HOST_WIDE_INT *val, unsigned int len, unsigned int precision)
85 {
86   unsigned int blocks_needed = BLOCKS_NEEDED (precision);
87   HOST_WIDE_INT top;
88   int i;
89 
90   if (len > blocks_needed)
91     len = blocks_needed;
92 
93   if (len == 1)
94     return len;
95 
96   top = val[len - 1];
97   if (len * HOST_BITS_PER_WIDE_INT > precision)
98     val[len - 1] = top = sext_hwi (top, precision % HOST_BITS_PER_WIDE_INT);
99   if (top != 0 && top != (HOST_WIDE_INT)-1)
100     return len;
101 
102   /* At this point we know that the top is either 0 or -1.  Find the
103      first block that is not a copy of this.  */
104   for (i = len - 2; i >= 0; i--)
105     {
106       HOST_WIDE_INT x = val[i];
107       if (x != top)
108 	{
109 	  if (SIGN_MASK (x) == top)
110 	    return i + 1;
111 
112 	  /* We need an extra block because the top bit block i does
113 	     not match the extension.  */
114 	  return i + 2;
115 	}
116     }
117 
118   /* The number is 0 or -1.  */
119   return 1;
120 }
121 
122 /* VAL[0] is the unsigned result of an operation.  Canonize it by adding
123    another 0 block if needed, and return number of blocks needed.  */
124 
125 static inline unsigned int
canonize_uhwi(HOST_WIDE_INT * val,unsigned int precision)126 canonize_uhwi (HOST_WIDE_INT *val, unsigned int precision)
127 {
128   if (val[0] < 0 && precision > HOST_BITS_PER_WIDE_INT)
129     {
130       val[1] = 0;
131       return 2;
132     }
133   return 1;
134 }
135 
136 /*
137  * Conversion routines in and out of wide_int.
138  */
139 
140 /* Copy XLEN elements from XVAL to VAL.  If NEED_CANON, canonize the
141    result for an integer with precision PRECISION.  Return the length
142    of VAL (after any canonization.  */
143 unsigned int
from_array(HOST_WIDE_INT * val,const HOST_WIDE_INT * xval,unsigned int xlen,unsigned int precision,bool need_canon)144 wi::from_array (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
145 		unsigned int xlen, unsigned int precision, bool need_canon)
146 {
147   for (unsigned i = 0; i < xlen; i++)
148     val[i] = xval[i];
149   return need_canon ? canonize (val, xlen, precision) : xlen;
150 }
151 
152 /* Construct a wide int from a buffer of length LEN.  BUFFER will be
153    read according to byte endianness and word endianness of the target.
154    Only the lower BUFFER_LEN bytes of the result are set; the remaining
155    high bytes are cleared.  */
156 wide_int
from_buffer(const unsigned char * buffer,unsigned int buffer_len)157 wi::from_buffer (const unsigned char *buffer, unsigned int buffer_len)
158 {
159   unsigned int precision = buffer_len * BITS_PER_UNIT;
160   wide_int result = wide_int::create (precision);
161   unsigned int words = buffer_len / UNITS_PER_WORD;
162 
163   /* We have to clear all the bits ourself, as we merely or in values
164      below.  */
165   unsigned int len = BLOCKS_NEEDED (precision);
166   HOST_WIDE_INT *val = result.write_val ();
167   for (unsigned int i = 0; i < len; ++i)
168     val[i] = 0;
169 
170   for (unsigned int byte = 0; byte < buffer_len; byte++)
171     {
172       unsigned int offset;
173       unsigned int index;
174       unsigned int bitpos = byte * BITS_PER_UNIT;
175       unsigned HOST_WIDE_INT value;
176 
177       if (buffer_len > UNITS_PER_WORD)
178 	{
179 	  unsigned int word = byte / UNITS_PER_WORD;
180 
181 	  if (WORDS_BIG_ENDIAN)
182 	    word = (words - 1) - word;
183 
184 	  offset = word * UNITS_PER_WORD;
185 
186 	  if (BYTES_BIG_ENDIAN)
187 	    offset += (UNITS_PER_WORD - 1) - (byte % UNITS_PER_WORD);
188 	  else
189 	    offset += byte % UNITS_PER_WORD;
190 	}
191       else
192 	offset = BYTES_BIG_ENDIAN ? (buffer_len - 1) - byte : byte;
193 
194       value = (unsigned HOST_WIDE_INT) buffer[offset];
195 
196       index = bitpos / HOST_BITS_PER_WIDE_INT;
197       val[index] |= value << (bitpos % HOST_BITS_PER_WIDE_INT);
198     }
199 
200   result.set_len (canonize (val, len, precision));
201 
202   return result;
203 }
204 
205 /* Sets RESULT from X, the sign is taken according to SGN.  */
206 void
to_mpz(const wide_int_ref & x,mpz_t result,signop sgn)207 wi::to_mpz (const wide_int_ref &x, mpz_t result, signop sgn)
208 {
209   int len = x.get_len ();
210   const HOST_WIDE_INT *v = x.get_val ();
211   int excess = len * HOST_BITS_PER_WIDE_INT - x.get_precision ();
212 
213   if (wi::neg_p (x, sgn))
214     {
215       /* We use ones complement to avoid -x80..0 edge case that -
216 	 won't work on.  */
217       HOST_WIDE_INT *t = XALLOCAVEC (HOST_WIDE_INT, len);
218       for (int i = 0; i < len; i++)
219 	t[i] = ~v[i];
220       if (excess > 0)
221 	t[len - 1] = (unsigned HOST_WIDE_INT) t[len - 1] << excess >> excess;
222       mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, t);
223       mpz_com (result, result);
224     }
225   else if (excess > 0)
226     {
227       HOST_WIDE_INT *t = XALLOCAVEC (HOST_WIDE_INT, len);
228       for (int i = 0; i < len - 1; i++)
229 	t[i] = v[i];
230       t[len - 1] = (unsigned HOST_WIDE_INT) v[len - 1] << excess >> excess;
231       mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, t);
232     }
233   else
234     mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, v);
235 }
236 
237 /* Returns X converted to TYPE.  If WRAP is true, then out-of-range
238    values of VAL will be wrapped; otherwise, they will be set to the
239    appropriate minimum or maximum TYPE bound.  */
240 wide_int
from_mpz(const_tree type,mpz_t x,bool wrap)241 wi::from_mpz (const_tree type, mpz_t x, bool wrap)
242 {
243   size_t count, numb;
244   unsigned int prec = TYPE_PRECISION (type);
245   wide_int res = wide_int::create (prec);
246 
247   if (!wrap)
248     {
249       mpz_t min, max;
250 
251       mpz_init (min);
252       mpz_init (max);
253       get_type_static_bounds (type, min, max);
254 
255       if (mpz_cmp (x, min) < 0)
256 	mpz_set (x, min);
257       else if (mpz_cmp (x, max) > 0)
258 	mpz_set (x, max);
259 
260       mpz_clear (min);
261       mpz_clear (max);
262     }
263 
264   /* Determine the number of unsigned HOST_WIDE_INTs that are required
265      for representing the absolute value.  The code to calculate count is
266      extracted from the GMP manual, section "Integer Import and Export":
267      http://gmplib.org/manual/Integer-Import-and-Export.html  */
268   numb = CHAR_BIT * sizeof (HOST_WIDE_INT);
269   count = (mpz_sizeinbase (x, 2) + numb - 1) / numb;
270   HOST_WIDE_INT *val = res.write_val ();
271   /* Read the absolute value.
272 
273      Write directly to the wide_int storage if possible, otherwise leave
274      GMP to allocate the memory for us.  It might be slightly more efficient
275      to use mpz_tdiv_r_2exp for the latter case, but the situation is
276      pathological and it seems safer to operate on the original mpz value
277      in all cases.  */
278   void *valres = mpz_export (count <= WIDE_INT_MAX_ELTS ? val : 0,
279 			     &count, -1, sizeof (HOST_WIDE_INT), 0, 0, x);
280   if (count < 1)
281     {
282       val[0] = 0;
283       count = 1;
284     }
285   count = MIN (count, BLOCKS_NEEDED (prec));
286   if (valres != val)
287     {
288       memcpy (val, valres, count * sizeof (HOST_WIDE_INT));
289       free (valres);
290     }
291   /* Zero-extend the absolute value to PREC bits.  */
292   if (count < BLOCKS_NEEDED (prec) && val[count - 1] < 0)
293     val[count++] = 0;
294   else
295     count = canonize (val, count, prec);
296   res.set_len (count);
297 
298   if (mpz_sgn (x) < 0)
299     res = -res;
300 
301   return res;
302 }
303 
304 /*
305  * Largest and smallest values in a mode.
306  */
307 
308 /* Return the largest SGNed number that is representable in PRECISION bits.
309 
310    TODO: There is still code from the double_int era that trys to
311    make up for the fact that double int's could not represent the
312    min and max values of all types.  This code should be removed
313    because the min and max values can always be represented in
314    wide_ints and int-csts.  */
315 wide_int
max_value(unsigned int precision,signop sgn)316 wi::max_value (unsigned int precision, signop sgn)
317 {
318   gcc_checking_assert (precision != 0);
319   if (sgn == UNSIGNED)
320     /* The unsigned max is just all ones.  */
321     return shwi (-1, precision);
322   else
323     /* The signed max is all ones except the top bit.  This must be
324        explicitly represented.  */
325     return mask (precision - 1, false, precision);
326 }
327 
328 /* Return the largest SGNed number that is representable in PRECISION bits.  */
329 wide_int
min_value(unsigned int precision,signop sgn)330 wi::min_value (unsigned int precision, signop sgn)
331 {
332   gcc_checking_assert (precision != 0);
333   if (sgn == UNSIGNED)
334     return uhwi (0, precision);
335   else
336     /* The signed min is all zeros except the top bit.  This must be
337        explicitly represented.  */
338     return wi::set_bit_in_zero (precision - 1, precision);
339 }
340 
341 /*
342  * Public utilities.
343  */
344 
345 /* Convert the number represented by XVAL, XLEN and XPRECISION, which has
346    signedness SGN, to an integer that has PRECISION bits.  Store the blocks
347    in VAL and return the number of blocks used.
348 
349    This function can handle both extension (PRECISION > XPRECISION)
350    and truncation (PRECISION < XPRECISION).  */
351 unsigned int
force_to_size(HOST_WIDE_INT * val,const HOST_WIDE_INT * xval,unsigned int xlen,unsigned int xprecision,unsigned int precision,signop sgn)352 wi::force_to_size (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
353 		   unsigned int xlen, unsigned int xprecision,
354 		   unsigned int precision, signop sgn)
355 {
356   unsigned int blocks_needed = BLOCKS_NEEDED (precision);
357   unsigned int len = blocks_needed < xlen ? blocks_needed : xlen;
358   for (unsigned i = 0; i < len; i++)
359     val[i] = xval[i];
360 
361   if (precision > xprecision)
362     {
363       unsigned int small_xprecision = xprecision % HOST_BITS_PER_WIDE_INT;
364 
365       /* Expanding.  */
366       if (sgn == UNSIGNED)
367 	{
368 	  if (small_xprecision && len == BLOCKS_NEEDED (xprecision))
369 	    val[len - 1] = zext_hwi (val[len - 1], small_xprecision);
370 	  else if (val[len - 1] < 0)
371 	    {
372 	      while (len < BLOCKS_NEEDED (xprecision))
373 		val[len++] = -1;
374 	      if (small_xprecision)
375 		val[len - 1] = zext_hwi (val[len - 1], small_xprecision);
376 	      else
377 		val[len++] = 0;
378 	    }
379 	}
380       else
381 	{
382 	  if (small_xprecision && len == BLOCKS_NEEDED (xprecision))
383 	    val[len - 1] = sext_hwi (val[len - 1], small_xprecision);
384 	}
385     }
386   len = canonize (val, len, precision);
387 
388   return len;
389 }
390 
391 /* This function hides the fact that we cannot rely on the bits beyond
392    the precision.  This issue comes up in the relational comparisions
393    where we do allow comparisons of values of different precisions.  */
394 static inline HOST_WIDE_INT
selt(const HOST_WIDE_INT * a,unsigned int len,unsigned int blocks_needed,unsigned int small_prec,unsigned int index,signop sgn)395 selt (const HOST_WIDE_INT *a, unsigned int len,
396       unsigned int blocks_needed, unsigned int small_prec,
397       unsigned int index, signop sgn)
398 {
399   HOST_WIDE_INT val;
400   if (index < len)
401     val = a[index];
402   else if (index < blocks_needed || sgn == SIGNED)
403     /* Signed or within the precision.  */
404     val = SIGN_MASK (a[len - 1]);
405   else
406     /* Unsigned extension beyond the precision. */
407     val = 0;
408 
409   if (small_prec && index == blocks_needed - 1)
410     return (sgn == SIGNED
411 	    ? sext_hwi (val, small_prec)
412 	    : zext_hwi (val, small_prec));
413   else
414     return val;
415 }
416 
417 /* Find the highest bit represented in a wide int.  This will in
418    general have the same value as the sign bit.  */
419 static inline HOST_WIDE_INT
top_bit_of(const HOST_WIDE_INT * a,unsigned int len,unsigned int prec)420 top_bit_of (const HOST_WIDE_INT *a, unsigned int len, unsigned int prec)
421 {
422   int excess = len * HOST_BITS_PER_WIDE_INT - prec;
423   unsigned HOST_WIDE_INT val = a[len - 1];
424   if (excess > 0)
425     val <<= excess;
426   return val >> (HOST_BITS_PER_WIDE_INT - 1);
427 }
428 
429 /*
430  * Comparisons, note that only equality is an operator.  The other
431  * comparisons cannot be operators since they are inherently signed or
432  * unsigned and C++ has no such operators.
433  */
434 
435 /* Return true if OP0 == OP1.  */
436 bool
eq_p_large(const HOST_WIDE_INT * op0,unsigned int op0len,const HOST_WIDE_INT * op1,unsigned int op1len,unsigned int prec)437 wi::eq_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
438 		const HOST_WIDE_INT *op1, unsigned int op1len,
439 		unsigned int prec)
440 {
441   int l0 = op0len - 1;
442   unsigned int small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1);
443 
444   if (op0len != op1len)
445     return false;
446 
447   if (op0len == BLOCKS_NEEDED (prec) && small_prec)
448     {
449       /* It does not matter if we zext or sext here, we just have to
450 	 do both the same way.  */
451       if (zext_hwi (op0 [l0], small_prec) != zext_hwi (op1 [l0], small_prec))
452 	return false;
453       l0--;
454     }
455 
456   while (l0 >= 0)
457     if (op0[l0] != op1[l0])
458       return false;
459     else
460       l0--;
461 
462   return true;
463 }
464 
465 /* Return true if OP0 < OP1 using signed comparisons.  */
466 bool
lts_p_large(const HOST_WIDE_INT * op0,unsigned int op0len,unsigned int precision,const HOST_WIDE_INT * op1,unsigned int op1len)467 wi::lts_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
468 		 unsigned int precision,
469 		 const HOST_WIDE_INT *op1, unsigned int op1len)
470 {
471   HOST_WIDE_INT s0, s1;
472   unsigned HOST_WIDE_INT u0, u1;
473   unsigned int blocks_needed = BLOCKS_NEEDED (precision);
474   unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
475   int l = MAX (op0len - 1, op1len - 1);
476 
477   /* Only the top block is compared as signed.  The rest are unsigned
478      comparisons.  */
479   s0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
480   s1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
481   if (s0 < s1)
482     return true;
483   if (s0 > s1)
484     return false;
485 
486   l--;
487   while (l >= 0)
488     {
489       u0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
490       u1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
491 
492       if (u0 < u1)
493 	return true;
494       if (u0 > u1)
495 	return false;
496       l--;
497     }
498 
499   return false;
500 }
501 
502 /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
503    signed compares.  */
504 int
cmps_large(const HOST_WIDE_INT * op0,unsigned int op0len,unsigned int precision,const HOST_WIDE_INT * op1,unsigned int op1len)505 wi::cmps_large (const HOST_WIDE_INT *op0, unsigned int op0len,
506 		unsigned int precision,
507 		const HOST_WIDE_INT *op1, unsigned int op1len)
508 {
509   HOST_WIDE_INT s0, s1;
510   unsigned HOST_WIDE_INT u0, u1;
511   unsigned int blocks_needed = BLOCKS_NEEDED (precision);
512   unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
513   int l = MAX (op0len - 1, op1len - 1);
514 
515   /* Only the top block is compared as signed.  The rest are unsigned
516      comparisons.  */
517   s0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
518   s1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
519   if (s0 < s1)
520     return -1;
521   if (s0 > s1)
522     return 1;
523 
524   l--;
525   while (l >= 0)
526     {
527       u0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
528       u1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
529 
530       if (u0 < u1)
531 	return -1;
532       if (u0 > u1)
533 	return 1;
534       l--;
535     }
536 
537   return 0;
538 }
539 
540 /* Return true if OP0 < OP1 using unsigned comparisons.  */
541 bool
ltu_p_large(const HOST_WIDE_INT * op0,unsigned int op0len,unsigned int precision,const HOST_WIDE_INT * op1,unsigned int op1len)542 wi::ltu_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
543 		 unsigned int precision,
544 		 const HOST_WIDE_INT *op1, unsigned int op1len)
545 {
546   unsigned HOST_WIDE_INT x0;
547   unsigned HOST_WIDE_INT x1;
548   unsigned int blocks_needed = BLOCKS_NEEDED (precision);
549   unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
550   int l = MAX (op0len - 1, op1len - 1);
551 
552   while (l >= 0)
553     {
554       x0 = selt (op0, op0len, blocks_needed, small_prec, l, UNSIGNED);
555       x1 = selt (op1, op1len, blocks_needed, small_prec, l, UNSIGNED);
556       if (x0 < x1)
557 	return true;
558       if (x0 > x1)
559 	return false;
560       l--;
561     }
562 
563   return false;
564 }
565 
566 /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
567    unsigned compares.  */
568 int
cmpu_large(const HOST_WIDE_INT * op0,unsigned int op0len,unsigned int precision,const HOST_WIDE_INT * op1,unsigned int op1len)569 wi::cmpu_large (const HOST_WIDE_INT *op0, unsigned int op0len,
570 		unsigned int precision,
571 		const HOST_WIDE_INT *op1, unsigned int op1len)
572 {
573   unsigned HOST_WIDE_INT x0;
574   unsigned HOST_WIDE_INT x1;
575   unsigned int blocks_needed = BLOCKS_NEEDED (precision);
576   unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
577   int l = MAX (op0len - 1, op1len - 1);
578 
579   while (l >= 0)
580     {
581       x0 = selt (op0, op0len, blocks_needed, small_prec, l, UNSIGNED);
582       x1 = selt (op1, op1len, blocks_needed, small_prec, l, UNSIGNED);
583       if (x0 < x1)
584 	return -1;
585       if (x0 > x1)
586 	return 1;
587       l--;
588     }
589 
590   return 0;
591 }
592 
593 /*
594  * Extension.
595  */
596 
597 /* Sign-extend the number represented by XVAL and XLEN into VAL,
598    starting at OFFSET.  Return the number of blocks in VAL.  Both XVAL
599    and VAL have PRECISION bits.  */
600 unsigned int
sext_large(HOST_WIDE_INT * val,const HOST_WIDE_INT * xval,unsigned int xlen,unsigned int precision,unsigned int offset)601 wi::sext_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
602 		unsigned int xlen, unsigned int precision, unsigned int offset)
603 {
604   unsigned int len = offset / HOST_BITS_PER_WIDE_INT;
605   /* Extending beyond the precision is a no-op.  If we have only stored
606      OFFSET bits or fewer, the rest are already signs.  */
607   if (offset >= precision || len >= xlen)
608     {
609       for (unsigned i = 0; i < xlen; ++i)
610 	val[i] = xval[i];
611       return xlen;
612     }
613   unsigned int suboffset = offset % HOST_BITS_PER_WIDE_INT;
614   for (unsigned int i = 0; i < len; i++)
615     val[i] = xval[i];
616   if (suboffset > 0)
617     {
618       val[len] = sext_hwi (xval[len], suboffset);
619       len += 1;
620     }
621   return canonize (val, len, precision);
622 }
623 
624 /* Zero-extend the number represented by XVAL and XLEN into VAL,
625    starting at OFFSET.  Return the number of blocks in VAL.  Both XVAL
626    and VAL have PRECISION bits.  */
627 unsigned int
zext_large(HOST_WIDE_INT * val,const HOST_WIDE_INT * xval,unsigned int xlen,unsigned int precision,unsigned int offset)628 wi::zext_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
629 		unsigned int xlen, unsigned int precision, unsigned int offset)
630 {
631   unsigned int len = offset / HOST_BITS_PER_WIDE_INT;
632   /* Extending beyond the precision is a no-op.  If we have only stored
633      OFFSET bits or fewer, and the upper stored bit is zero, then there
634      is nothing to do.  */
635   if (offset >= precision || (len >= xlen && xval[xlen - 1] >= 0))
636     {
637       for (unsigned i = 0; i < xlen; ++i)
638 	val[i] = xval[i];
639       return xlen;
640     }
641   unsigned int suboffset = offset % HOST_BITS_PER_WIDE_INT;
642   for (unsigned int i = 0; i < len; i++)
643     val[i] = i < xlen ? xval[i] : -1;
644   if (suboffset > 0)
645     val[len] = zext_hwi (len < xlen ? xval[len] : -1, suboffset);
646   else
647     val[len] = 0;
648   return canonize (val, len + 1, precision);
649 }
650 
651 /*
652  * Masking, inserting, shifting, rotating.
653  */
654 
655 /* Insert WIDTH bits from Y into X starting at START.  */
656 wide_int
insert(const wide_int & x,const wide_int & y,unsigned int start,unsigned int width)657 wi::insert (const wide_int &x, const wide_int &y, unsigned int start,
658 	    unsigned int width)
659 {
660   wide_int result;
661   wide_int mask;
662   wide_int tmp;
663 
664   unsigned int precision = x.get_precision ();
665   if (start >= precision)
666     return x;
667 
668   gcc_checking_assert (precision >= width);
669 
670   if (start + width >= precision)
671     width = precision - start;
672 
673   mask = wi::shifted_mask (start, width, false, precision);
674   tmp = wi::lshift (wide_int::from (y, precision, UNSIGNED), start);
675   result = tmp & mask;
676 
677   tmp = wi::bit_and_not (x, mask);
678   result = result | tmp;
679 
680   return result;
681 }
682 
683 /* Copy the number represented by XVAL and XLEN into VAL, setting bit BIT.
684    Return the number of blocks in VAL.  Both XVAL and VAL have PRECISION
685    bits.  */
686 unsigned int
set_bit_large(HOST_WIDE_INT * val,const HOST_WIDE_INT * xval,unsigned int xlen,unsigned int precision,unsigned int bit)687 wi::set_bit_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
688 		   unsigned int xlen, unsigned int precision, unsigned int bit)
689 {
690   unsigned int block = bit / HOST_BITS_PER_WIDE_INT;
691   unsigned int subbit = bit % HOST_BITS_PER_WIDE_INT;
692 
693   if (block + 1 >= xlen)
694     {
695       /* The operation either affects the last current block or needs
696 	 a new block.  */
697       unsigned int len = block + 1;
698       for (unsigned int i = 0; i < len; i++)
699 	val[i] = safe_uhwi (xval, xlen, i);
700       val[block] |= HOST_WIDE_INT_1U << subbit;
701 
702       /* If the bit we just set is at the msb of the block, make sure
703 	 that any higher bits are zeros.  */
704       if (bit + 1 < precision && subbit == HOST_BITS_PER_WIDE_INT - 1)
705 	val[len++] = 0;
706       return len;
707     }
708   else
709     {
710       for (unsigned int i = 0; i < xlen; i++)
711 	val[i] = xval[i];
712       val[block] |= HOST_WIDE_INT_1U << subbit;
713       return canonize (val, xlen, precision);
714     }
715 }
716 
717 /* bswap THIS.  */
718 wide_int
bswap() const719 wide_int_storage::bswap () const
720 {
721   wide_int result = wide_int::create (precision);
722   unsigned int i, s;
723   unsigned int len = BLOCKS_NEEDED (precision);
724   unsigned int xlen = get_len ();
725   const HOST_WIDE_INT *xval = get_val ();
726   HOST_WIDE_INT *val = result.write_val ();
727 
728   /* This is not a well defined operation if the precision is not a
729      multiple of 8.  */
730   gcc_assert ((precision & 0x7) == 0);
731 
732   for (i = 0; i < len; i++)
733     val[i] = 0;
734 
735   /* Only swap the bytes that are not the padding.  */
736   for (s = 0; s < precision; s += 8)
737     {
738       unsigned int d = precision - s - 8;
739       unsigned HOST_WIDE_INT byte;
740 
741       unsigned int block = s / HOST_BITS_PER_WIDE_INT;
742       unsigned int offset = s & (HOST_BITS_PER_WIDE_INT - 1);
743 
744       byte = (safe_uhwi (xval, xlen, block) >> offset) & 0xff;
745 
746       block = d / HOST_BITS_PER_WIDE_INT;
747       offset = d & (HOST_BITS_PER_WIDE_INT - 1);
748 
749       val[block] |= byte << offset;
750     }
751 
752   result.set_len (canonize (val, len, precision));
753   return result;
754 }
755 
756 /* Fill VAL with a mask where the lower WIDTH bits are ones and the bits
757    above that up to PREC are zeros.  The result is inverted if NEGATE
758    is true.  Return the number of blocks in VAL.  */
759 unsigned int
mask(HOST_WIDE_INT * val,unsigned int width,bool negate,unsigned int prec)760 wi::mask (HOST_WIDE_INT *val, unsigned int width, bool negate,
761 	  unsigned int prec)
762 {
763   if (width >= prec)
764     {
765       val[0] = negate ? 0 : -1;
766       return 1;
767     }
768   else if (width == 0)
769     {
770       val[0] = negate ? -1 : 0;
771       return 1;
772     }
773 
774   unsigned int i = 0;
775   while (i < width / HOST_BITS_PER_WIDE_INT)
776     val[i++] = negate ? 0 : -1;
777 
778   unsigned int shift = width & (HOST_BITS_PER_WIDE_INT - 1);
779   if (shift != 0)
780     {
781       HOST_WIDE_INT last = (HOST_WIDE_INT_1U << shift) - 1;
782       val[i++] = negate ? ~last : last;
783     }
784   else
785     val[i++] = negate ? -1 : 0;
786 
787   return i;
788 }
789 
790 /* Fill VAL with a mask where the lower START bits are zeros, the next WIDTH
791    bits are ones, and the bits above that up to PREC are zeros.  The result
792    is inverted if NEGATE is true.  Return the number of blocks in VAL.  */
793 unsigned int
shifted_mask(HOST_WIDE_INT * val,unsigned int start,unsigned int width,bool negate,unsigned int prec)794 wi::shifted_mask (HOST_WIDE_INT *val, unsigned int start, unsigned int width,
795 		  bool negate, unsigned int prec)
796 {
797   if (start >= prec || width == 0)
798     {
799       val[0] = negate ? -1 : 0;
800       return 1;
801     }
802 
803   if (width > prec - start)
804     width = prec - start;
805   unsigned int end = start + width;
806 
807   unsigned int i = 0;
808   while (i < start / HOST_BITS_PER_WIDE_INT)
809     val[i++] = negate ? -1 : 0;
810 
811   unsigned int shift = start & (HOST_BITS_PER_WIDE_INT - 1);
812   if (shift)
813     {
814       HOST_WIDE_INT block = (HOST_WIDE_INT_1U << shift) - 1;
815       shift += width;
816       if (shift < HOST_BITS_PER_WIDE_INT)
817 	{
818 	  /* case 000111000 */
819 	  block = (HOST_WIDE_INT_1U << shift) - block - 1;
820 	  val[i++] = negate ? ~block : block;
821 	  return i;
822 	}
823       else
824 	/* ...111000 */
825 	val[i++] = negate ? block : ~block;
826     }
827 
828   while (i < end / HOST_BITS_PER_WIDE_INT)
829     /* 1111111 */
830     val[i++] = negate ? 0 : -1;
831 
832   shift = end & (HOST_BITS_PER_WIDE_INT - 1);
833   if (shift != 0)
834     {
835       /* 000011111 */
836       HOST_WIDE_INT block = (HOST_WIDE_INT_1U << shift) - 1;
837       val[i++] = negate ? ~block : block;
838     }
839   else if (end < prec)
840     val[i++] = negate ? -1 : 0;
841 
842   return i;
843 }
844 
845 /*
846  * logical operations.
847  */
848 
849 /* Set VAL to OP0 & OP1.  Return the number of blocks used.  */
850 unsigned int
and_large(HOST_WIDE_INT * val,const HOST_WIDE_INT * op0,unsigned int op0len,const HOST_WIDE_INT * op1,unsigned int op1len,unsigned int prec)851 wi::and_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
852 	       unsigned int op0len, const HOST_WIDE_INT *op1,
853 	       unsigned int op1len, unsigned int prec)
854 {
855   int l0 = op0len - 1;
856   int l1 = op1len - 1;
857   bool need_canon = true;
858 
859   unsigned int len = MAX (op0len, op1len);
860   if (l0 > l1)
861     {
862       HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
863       if (op1mask == 0)
864 	{
865 	  l0 = l1;
866 	  len = l1 + 1;
867 	}
868       else
869 	{
870 	  need_canon = false;
871 	  while (l0 > l1)
872 	    {
873 	      val[l0] = op0[l0];
874 	      l0--;
875 	    }
876 	}
877     }
878   else if (l1 > l0)
879     {
880       HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
881       if (op0mask == 0)
882 	len = l0 + 1;
883       else
884 	{
885 	  need_canon = false;
886 	  while (l1 > l0)
887 	    {
888 	      val[l1] = op1[l1];
889 	      l1--;
890 	    }
891 	}
892     }
893 
894   while (l0 >= 0)
895     {
896       val[l0] = op0[l0] & op1[l0];
897       l0--;
898     }
899 
900   if (need_canon)
901     len = canonize (val, len, prec);
902 
903   return len;
904 }
905 
906 /* Set VAL to OP0 & ~OP1.  Return the number of blocks used.  */
907 unsigned int
and_not_large(HOST_WIDE_INT * val,const HOST_WIDE_INT * op0,unsigned int op0len,const HOST_WIDE_INT * op1,unsigned int op1len,unsigned int prec)908 wi::and_not_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
909 		   unsigned int op0len, const HOST_WIDE_INT *op1,
910 		   unsigned int op1len, unsigned int prec)
911 {
912   wide_int result;
913   int l0 = op0len - 1;
914   int l1 = op1len - 1;
915   bool need_canon = true;
916 
917   unsigned int len = MAX (op0len, op1len);
918   if (l0 > l1)
919     {
920       HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
921       if (op1mask != 0)
922 	{
923 	  l0 = l1;
924 	  len = l1 + 1;
925 	}
926       else
927 	{
928 	  need_canon = false;
929 	  while (l0 > l1)
930 	    {
931 	      val[l0] = op0[l0];
932 	      l0--;
933 	    }
934 	}
935     }
936   else if (l1 > l0)
937     {
938       HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
939       if (op0mask == 0)
940 	len = l0 + 1;
941       else
942 	{
943 	  need_canon = false;
944 	  while (l1 > l0)
945 	    {
946 	      val[l1] = ~op1[l1];
947 	      l1--;
948 	    }
949 	}
950     }
951 
952   while (l0 >= 0)
953     {
954       val[l0] = op0[l0] & ~op1[l0];
955       l0--;
956     }
957 
958   if (need_canon)
959     len = canonize (val, len, prec);
960 
961   return len;
962 }
963 
964 /* Set VAL to OP0 | OP1.  Return the number of blocks used.  */
965 unsigned int
or_large(HOST_WIDE_INT * val,const HOST_WIDE_INT * op0,unsigned int op0len,const HOST_WIDE_INT * op1,unsigned int op1len,unsigned int prec)966 wi::or_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
967 	      unsigned int op0len, const HOST_WIDE_INT *op1,
968 	      unsigned int op1len, unsigned int prec)
969 {
970   wide_int result;
971   int l0 = op0len - 1;
972   int l1 = op1len - 1;
973   bool need_canon = true;
974 
975   unsigned int len = MAX (op0len, op1len);
976   if (l0 > l1)
977     {
978       HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
979       if (op1mask != 0)
980 	{
981 	  l0 = l1;
982 	  len = l1 + 1;
983 	}
984       else
985 	{
986 	  need_canon = false;
987 	  while (l0 > l1)
988 	    {
989 	      val[l0] = op0[l0];
990 	      l0--;
991 	    }
992 	}
993     }
994   else if (l1 > l0)
995     {
996       HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
997       if (op0mask != 0)
998 	len = l0 + 1;
999       else
1000 	{
1001 	  need_canon = false;
1002 	  while (l1 > l0)
1003 	    {
1004 	      val[l1] = op1[l1];
1005 	      l1--;
1006 	    }
1007 	}
1008     }
1009 
1010   while (l0 >= 0)
1011     {
1012       val[l0] = op0[l0] | op1[l0];
1013       l0--;
1014     }
1015 
1016   if (need_canon)
1017     len = canonize (val, len, prec);
1018 
1019   return len;
1020 }
1021 
1022 /* Set VAL to OP0 | ~OP1.  Return the number of blocks used.  */
1023 unsigned int
or_not_large(HOST_WIDE_INT * val,const HOST_WIDE_INT * op0,unsigned int op0len,const HOST_WIDE_INT * op1,unsigned int op1len,unsigned int prec)1024 wi::or_not_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1025 		  unsigned int op0len, const HOST_WIDE_INT *op1,
1026 		  unsigned int op1len, unsigned int prec)
1027 {
1028   wide_int result;
1029   int l0 = op0len - 1;
1030   int l1 = op1len - 1;
1031   bool need_canon = true;
1032 
1033   unsigned int len = MAX (op0len, op1len);
1034   if (l0 > l1)
1035     {
1036       HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
1037       if (op1mask == 0)
1038 	{
1039 	  l0 = l1;
1040 	  len = l1 + 1;
1041 	}
1042       else
1043 	{
1044 	  need_canon = false;
1045 	  while (l0 > l1)
1046 	    {
1047 	      val[l0] = op0[l0];
1048 	      l0--;
1049 	    }
1050 	}
1051     }
1052   else if (l1 > l0)
1053     {
1054       HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
1055       if (op0mask != 0)
1056 	len = l0 + 1;
1057       else
1058 	{
1059 	  need_canon = false;
1060 	  while (l1 > l0)
1061 	    {
1062 	      val[l1] = ~op1[l1];
1063 	      l1--;
1064 	    }
1065 	}
1066     }
1067 
1068   while (l0 >= 0)
1069     {
1070       val[l0] = op0[l0] | ~op1[l0];
1071       l0--;
1072     }
1073 
1074   if (need_canon)
1075     len = canonize (val, len, prec);
1076 
1077   return len;
1078 }
1079 
1080 /* Set VAL to OP0 ^ OP1.  Return the number of blocks used.  */
1081 unsigned int
xor_large(HOST_WIDE_INT * val,const HOST_WIDE_INT * op0,unsigned int op0len,const HOST_WIDE_INT * op1,unsigned int op1len,unsigned int prec)1082 wi::xor_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1083 	       unsigned int op0len, const HOST_WIDE_INT *op1,
1084 	       unsigned int op1len, unsigned int prec)
1085 {
1086   wide_int result;
1087   int l0 = op0len - 1;
1088   int l1 = op1len - 1;
1089 
1090   unsigned int len = MAX (op0len, op1len);
1091   if (l0 > l1)
1092     {
1093       HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
1094       while (l0 > l1)
1095 	{
1096 	  val[l0] = op0[l0] ^ op1mask;
1097 	  l0--;
1098 	}
1099     }
1100 
1101   if (l1 > l0)
1102     {
1103       HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
1104       while (l1 > l0)
1105 	{
1106 	  val[l1] = op0mask ^ op1[l1];
1107 	  l1--;
1108 	}
1109     }
1110 
1111   while (l0 >= 0)
1112     {
1113       val[l0] = op0[l0] ^ op1[l0];
1114       l0--;
1115     }
1116 
1117   return canonize (val, len, prec);
1118 }
1119 
1120 /*
1121  * math
1122  */
1123 
1124 /* Set VAL to OP0 + OP1.  If OVERFLOW is nonnull, record in *OVERFLOW
1125    whether the result overflows when OP0 and OP1 are treated as having
1126    signedness SGN.  Return the number of blocks in VAL.  */
1127 unsigned int
add_large(HOST_WIDE_INT * val,const HOST_WIDE_INT * op0,unsigned int op0len,const HOST_WIDE_INT * op1,unsigned int op1len,unsigned int prec,signop sgn,wi::overflow_type * overflow)1128 wi::add_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1129 	       unsigned int op0len, const HOST_WIDE_INT *op1,
1130 	       unsigned int op1len, unsigned int prec,
1131 	       signop sgn, wi::overflow_type *overflow)
1132 {
1133   unsigned HOST_WIDE_INT o0 = 0;
1134   unsigned HOST_WIDE_INT o1 = 0;
1135   unsigned HOST_WIDE_INT x = 0;
1136   unsigned HOST_WIDE_INT carry = 0;
1137   unsigned HOST_WIDE_INT old_carry = 0;
1138   unsigned HOST_WIDE_INT mask0, mask1;
1139   unsigned int i;
1140 
1141   unsigned int len = MAX (op0len, op1len);
1142   mask0 = -top_bit_of (op0, op0len, prec);
1143   mask1 = -top_bit_of (op1, op1len, prec);
1144   /* Add all of the explicitly defined elements.  */
1145 
1146   for (i = 0; i < len; i++)
1147     {
1148       o0 = i < op0len ? (unsigned HOST_WIDE_INT) op0[i] : mask0;
1149       o1 = i < op1len ? (unsigned HOST_WIDE_INT) op1[i] : mask1;
1150       x = o0 + o1 + carry;
1151       val[i] = x;
1152       old_carry = carry;
1153       carry = carry == 0 ? x < o0 : x <= o0;
1154     }
1155 
1156   if (len * HOST_BITS_PER_WIDE_INT < prec)
1157     {
1158       val[len] = mask0 + mask1 + carry;
1159       len++;
1160       if (overflow)
1161 	*overflow
1162 	  = (sgn == UNSIGNED && carry) ? wi::OVF_OVERFLOW : wi::OVF_NONE;
1163     }
1164   else if (overflow)
1165     {
1166       unsigned int shift = -prec % HOST_BITS_PER_WIDE_INT;
1167       if (sgn == SIGNED)
1168 	{
1169 	  unsigned HOST_WIDE_INT x = (val[len - 1] ^ o0) & (val[len - 1] ^ o1);
1170 	  if ((HOST_WIDE_INT) (x << shift) < 0)
1171 	    {
1172 	      if (o0 > (unsigned HOST_WIDE_INT) val[len - 1])
1173 		*overflow = wi::OVF_UNDERFLOW;
1174 	      else if (o0 < (unsigned HOST_WIDE_INT) val[len - 1])
1175 		*overflow = wi::OVF_OVERFLOW;
1176 	      else
1177 		*overflow = wi::OVF_NONE;
1178 	    }
1179 	  else
1180 	    *overflow = wi::OVF_NONE;
1181 	}
1182       else
1183 	{
1184 	  /* Put the MSB of X and O0 and in the top of the HWI.  */
1185 	  x <<= shift;
1186 	  o0 <<= shift;
1187 	  if (old_carry)
1188 	    *overflow = (x <= o0) ? wi::OVF_OVERFLOW : wi::OVF_NONE;
1189 	  else
1190 	    *overflow = (x < o0) ? wi::OVF_OVERFLOW : wi::OVF_NONE;
1191 	}
1192     }
1193 
1194   return canonize (val, len, prec);
1195 }
1196 
1197 /* Subroutines of the multiplication and division operations.  Unpack
1198    the first IN_LEN HOST_WIDE_INTs in INPUT into 2 * IN_LEN
1199    HOST_HALF_WIDE_INTs of RESULT.  The rest of RESULT is filled by
1200    uncompressing the top bit of INPUT[IN_LEN - 1].  */
1201 static void
wi_unpack(unsigned HOST_HALF_WIDE_INT * result,const HOST_WIDE_INT * input,unsigned int in_len,unsigned int out_len,unsigned int prec,signop sgn)1202 wi_unpack (unsigned HOST_HALF_WIDE_INT *result, const HOST_WIDE_INT *input,
1203 	   unsigned int in_len, unsigned int out_len,
1204 	   unsigned int prec, signop sgn)
1205 {
1206   unsigned int i;
1207   unsigned int j = 0;
1208   unsigned int small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1);
1209   unsigned int blocks_needed = BLOCKS_NEEDED (prec);
1210   HOST_WIDE_INT mask;
1211 
1212   if (sgn == SIGNED)
1213     {
1214       mask = -top_bit_of ((const HOST_WIDE_INT *) input, in_len, prec);
1215       mask &= HALF_INT_MASK;
1216     }
1217   else
1218     mask = 0;
1219 
1220   for (i = 0; i < blocks_needed - 1; i++)
1221     {
1222       HOST_WIDE_INT x = safe_uhwi (input, in_len, i);
1223       result[j++] = x;
1224       result[j++] = x >> HOST_BITS_PER_HALF_WIDE_INT;
1225     }
1226 
1227   HOST_WIDE_INT x = safe_uhwi (input, in_len, i);
1228   if (small_prec)
1229     {
1230       if (sgn == SIGNED)
1231 	x = sext_hwi (x, small_prec);
1232       else
1233 	x = zext_hwi (x, small_prec);
1234     }
1235   result[j++] = x;
1236   result[j++] = x >> HOST_BITS_PER_HALF_WIDE_INT;
1237 
1238   /* Smear the sign bit.  */
1239   while (j < out_len)
1240     result[j++] = mask;
1241 }
1242 
1243 /* The inverse of wi_unpack.  IN_LEN is the number of input
1244    blocks and PRECISION is the precision of the result.  Return the
1245    number of blocks in the canonicalized result.  */
1246 static unsigned int
wi_pack(HOST_WIDE_INT * result,const unsigned HOST_HALF_WIDE_INT * input,unsigned int in_len,unsigned int precision)1247 wi_pack (HOST_WIDE_INT *result,
1248 	 const unsigned HOST_HALF_WIDE_INT *input,
1249 	 unsigned int in_len, unsigned int precision)
1250 {
1251   unsigned int i = 0;
1252   unsigned int j = 0;
1253   unsigned int blocks_needed = BLOCKS_NEEDED (precision);
1254 
1255   while (i + 1 < in_len)
1256     {
1257       result[j++] = ((unsigned HOST_WIDE_INT) input[i]
1258 		     | ((unsigned HOST_WIDE_INT) input[i + 1]
1259 			<< HOST_BITS_PER_HALF_WIDE_INT));
1260       i += 2;
1261     }
1262 
1263   /* Handle the case where in_len is odd.   For this we zero extend.  */
1264   if (in_len & 1)
1265     result[j++] = (unsigned HOST_WIDE_INT) input[i];
1266   else if (j < blocks_needed)
1267     result[j++] = 0;
1268   return canonize (result, j, precision);
1269 }
1270 
1271 /* Multiply Op1 by Op2.  If HIGH is set, only the upper half of the
1272    result is returned.
1273 
1274    If HIGH is not set, throw away the upper half after the check is
1275    made to see if it overflows.  Unfortunately there is no better way
1276    to check for overflow than to do this.  If OVERFLOW is nonnull,
1277    record in *OVERFLOW whether the result overflowed.  SGN controls
1278    the signedness and is used to check overflow or if HIGH is set.
1279 
1280    NOTE: Overflow type for signed overflow is not yet implemented.  */
1281 unsigned int
mul_internal(HOST_WIDE_INT * val,const HOST_WIDE_INT * op1val,unsigned int op1len,const HOST_WIDE_INT * op2val,unsigned int op2len,unsigned int prec,signop sgn,wi::overflow_type * overflow,bool high)1282 wi::mul_internal (HOST_WIDE_INT *val, const HOST_WIDE_INT *op1val,
1283 		  unsigned int op1len, const HOST_WIDE_INT *op2val,
1284 		  unsigned int op2len, unsigned int prec, signop sgn,
1285 		  wi::overflow_type *overflow, bool high)
1286 {
1287   unsigned HOST_WIDE_INT o0, o1, k, t;
1288   unsigned int i;
1289   unsigned int j;
1290   unsigned int blocks_needed = BLOCKS_NEEDED (prec);
1291   unsigned int half_blocks_needed = blocks_needed * 2;
1292   /* The sizes here are scaled to support a 2x largest mode by 2x
1293      largest mode yielding a 4x largest mode result.  This is what is
1294      needed by vpn.  */
1295 
1296   unsigned HOST_HALF_WIDE_INT
1297     u[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1298   unsigned HOST_HALF_WIDE_INT
1299     v[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1300   /* The '2' in 'R' is because we are internally doing a full
1301      multiply.  */
1302   unsigned HOST_HALF_WIDE_INT
1303     r[2 * 4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1304   HOST_WIDE_INT mask = ((HOST_WIDE_INT)1 << HOST_BITS_PER_HALF_WIDE_INT) - 1;
1305 
1306   /* If the top level routine did not really pass in an overflow, then
1307      just make sure that we never attempt to set it.  */
1308   bool needs_overflow = (overflow != 0);
1309   if (needs_overflow)
1310     *overflow = wi::OVF_NONE;
1311 
1312   wide_int_ref op1 = wi::storage_ref (op1val, op1len, prec);
1313   wide_int_ref op2 = wi::storage_ref (op2val, op2len, prec);
1314 
1315   /* This is a surprisingly common case, so do it first.  */
1316   if (op1 == 0 || op2 == 0)
1317     {
1318       val[0] = 0;
1319       return 1;
1320     }
1321 
1322 #ifdef umul_ppmm
1323   if (sgn == UNSIGNED)
1324     {
1325       /* If the inputs are single HWIs and the output has room for at
1326 	 least two HWIs, we can use umul_ppmm directly.  */
1327       if (prec >= HOST_BITS_PER_WIDE_INT * 2
1328 	  && wi::fits_uhwi_p (op1)
1329 	  && wi::fits_uhwi_p (op2))
1330 	{
1331 	  /* This case never overflows.  */
1332 	  if (high)
1333 	    {
1334 	      val[0] = 0;
1335 	      return 1;
1336 	    }
1337 	  umul_ppmm (val[1], val[0], op1.ulow (), op2.ulow ());
1338 	  if (val[1] < 0 && prec > HOST_BITS_PER_WIDE_INT * 2)
1339 	    {
1340 	      val[2] = 0;
1341 	      return 3;
1342 	    }
1343 	  return 1 + (val[1] != 0 || val[0] < 0);
1344 	}
1345       /* Likewise if the output is a full single HWI, except that the
1346 	 upper HWI of the result is only used for determining overflow.
1347 	 (We handle this case inline when overflow isn't needed.)  */
1348       else if (prec == HOST_BITS_PER_WIDE_INT)
1349 	{
1350 	  unsigned HOST_WIDE_INT upper;
1351 	  umul_ppmm (upper, val[0], op1.ulow (), op2.ulow ());
1352 	  if (needs_overflow)
1353 	    /* Unsigned overflow can only be +OVERFLOW.  */
1354 	    *overflow = (upper != 0) ? wi::OVF_OVERFLOW : wi::OVF_NONE;
1355 	  if (high)
1356 	    val[0] = upper;
1357 	  return 1;
1358 	}
1359     }
1360 #endif
1361 
1362   /* Handle multiplications by 1.  */
1363   if (op1 == 1)
1364     {
1365       if (high)
1366 	{
1367 	  val[0] = wi::neg_p (op2, sgn) ? -1 : 0;
1368 	  return 1;
1369 	}
1370       for (i = 0; i < op2len; i++)
1371 	val[i] = op2val[i];
1372       return op2len;
1373     }
1374   if (op2 == 1)
1375     {
1376       if (high)
1377 	{
1378 	  val[0] = wi::neg_p (op1, sgn) ? -1 : 0;
1379 	  return 1;
1380 	}
1381       for (i = 0; i < op1len; i++)
1382 	val[i] = op1val[i];
1383       return op1len;
1384     }
1385 
1386   /* If we need to check for overflow, we can only do half wide
1387      multiplies quickly because we need to look at the top bits to
1388      check for the overflow.  */
1389   if ((high || needs_overflow)
1390       && (prec <= HOST_BITS_PER_HALF_WIDE_INT))
1391     {
1392       unsigned HOST_WIDE_INT r;
1393 
1394       if (sgn == SIGNED)
1395 	{
1396 	  o0 = op1.to_shwi ();
1397 	  o1 = op2.to_shwi ();
1398 	}
1399       else
1400 	{
1401 	  o0 = op1.to_uhwi ();
1402 	  o1 = op2.to_uhwi ();
1403 	}
1404 
1405       r = o0 * o1;
1406       if (needs_overflow)
1407 	{
1408 	  if (sgn == SIGNED)
1409 	    {
1410 	      if ((HOST_WIDE_INT) r != sext_hwi (r, prec))
1411 		/* FIXME: Signed overflow type is not implemented yet.  */
1412 		*overflow = OVF_UNKNOWN;
1413 	    }
1414 	  else
1415 	    {
1416 	      if ((r >> prec) != 0)
1417 		/* Unsigned overflow can only be +OVERFLOW.  */
1418 		*overflow = OVF_OVERFLOW;
1419 	    }
1420 	}
1421       val[0] = high ? r >> prec : r;
1422       return 1;
1423     }
1424 
1425   /* We do unsigned mul and then correct it.  */
1426   wi_unpack (u, op1val, op1len, half_blocks_needed, prec, SIGNED);
1427   wi_unpack (v, op2val, op2len, half_blocks_needed, prec, SIGNED);
1428 
1429   /* The 2 is for a full mult.  */
1430   memset (r, 0, half_blocks_needed * 2
1431 	  * HOST_BITS_PER_HALF_WIDE_INT / CHAR_BIT);
1432 
1433   for (j = 0; j < half_blocks_needed; j++)
1434     {
1435       k = 0;
1436       for (i = 0; i < half_blocks_needed; i++)
1437 	{
1438 	  t = ((unsigned HOST_WIDE_INT)u[i] * (unsigned HOST_WIDE_INT)v[j]
1439 	       + r[i + j] + k);
1440 	  r[i + j] = t & HALF_INT_MASK;
1441 	  k = t >> HOST_BITS_PER_HALF_WIDE_INT;
1442 	}
1443       r[j + half_blocks_needed] = k;
1444     }
1445 
1446   /* We did unsigned math above.  For signed we must adjust the
1447      product (assuming we need to see that).  */
1448   if (sgn == SIGNED && (high || needs_overflow))
1449     {
1450       unsigned HOST_WIDE_INT b;
1451       if (wi::neg_p (op1))
1452 	{
1453 	  b = 0;
1454 	  for (i = 0; i < half_blocks_needed; i++)
1455 	    {
1456 	      t = (unsigned HOST_WIDE_INT)r[i + half_blocks_needed]
1457 		- (unsigned HOST_WIDE_INT)v[i] - b;
1458 	      r[i + half_blocks_needed] = t & HALF_INT_MASK;
1459 	      b = t >> (HOST_BITS_PER_WIDE_INT - 1);
1460 	    }
1461 	}
1462       if (wi::neg_p (op2))
1463 	{
1464 	  b = 0;
1465 	  for (i = 0; i < half_blocks_needed; i++)
1466 	    {
1467 	      t = (unsigned HOST_WIDE_INT)r[i + half_blocks_needed]
1468 		- (unsigned HOST_WIDE_INT)u[i] - b;
1469 	      r[i + half_blocks_needed] = t & HALF_INT_MASK;
1470 	      b = t >> (HOST_BITS_PER_WIDE_INT - 1);
1471 	    }
1472 	}
1473     }
1474 
1475   if (needs_overflow)
1476     {
1477       HOST_WIDE_INT top;
1478 
1479       /* For unsigned, overflow is true if any of the top bits are set.
1480 	 For signed, overflow is true if any of the top bits are not equal
1481 	 to the sign bit.  */
1482       if (sgn == UNSIGNED)
1483 	top = 0;
1484       else
1485 	{
1486 	  top = r[(half_blocks_needed) - 1];
1487 	  top = SIGN_MASK (top << (HOST_BITS_PER_WIDE_INT / 2));
1488 	  top &= mask;
1489 	}
1490 
1491       for (i = half_blocks_needed; i < half_blocks_needed * 2; i++)
1492 	if (((HOST_WIDE_INT)(r[i] & mask)) != top)
1493 	  /* FIXME: Signed overflow type is not implemented yet.  */
1494 	  *overflow = (sgn == UNSIGNED) ? wi::OVF_OVERFLOW : wi::OVF_UNKNOWN;
1495     }
1496 
1497   int r_offset = high ? half_blocks_needed : 0;
1498   return wi_pack (val, &r[r_offset], half_blocks_needed, prec);
1499 }
1500 
1501 /* Compute the population count of X.  */
1502 int
popcount(const wide_int_ref & x)1503 wi::popcount (const wide_int_ref &x)
1504 {
1505   unsigned int i;
1506   int count;
1507 
1508   /* The high order block is special if it is the last block and the
1509      precision is not an even multiple of HOST_BITS_PER_WIDE_INT.  We
1510      have to clear out any ones above the precision before doing
1511      popcount on this block.  */
1512   count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
1513   unsigned int stop = x.len;
1514   if (count < 0)
1515     {
1516       count = popcount_hwi (x.uhigh () << -count);
1517       stop -= 1;
1518     }
1519   else
1520     {
1521       if (x.sign_mask () >= 0)
1522 	count = 0;
1523     }
1524 
1525   for (i = 0; i < stop; ++i)
1526     count += popcount_hwi (x.val[i]);
1527 
1528   return count;
1529 }
1530 
1531 /* Set VAL to OP0 - OP1.  If OVERFLOW is nonnull, record in *OVERFLOW
1532    whether the result overflows when OP0 and OP1 are treated as having
1533    signedness SGN.  Return the number of blocks in VAL.  */
1534 unsigned int
sub_large(HOST_WIDE_INT * val,const HOST_WIDE_INT * op0,unsigned int op0len,const HOST_WIDE_INT * op1,unsigned int op1len,unsigned int prec,signop sgn,wi::overflow_type * overflow)1535 wi::sub_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1536 	       unsigned int op0len, const HOST_WIDE_INT *op1,
1537 	       unsigned int op1len, unsigned int prec,
1538 	       signop sgn, wi::overflow_type *overflow)
1539 {
1540   unsigned HOST_WIDE_INT o0 = 0;
1541   unsigned HOST_WIDE_INT o1 = 0;
1542   unsigned HOST_WIDE_INT x = 0;
1543   /* We implement subtraction as an in place negate and add.  Negation
1544      is just inversion and add 1, so we can do the add of 1 by just
1545      starting the borrow in of the first element at 1.  */
1546   unsigned HOST_WIDE_INT borrow = 0;
1547   unsigned HOST_WIDE_INT old_borrow = 0;
1548 
1549   unsigned HOST_WIDE_INT mask0, mask1;
1550   unsigned int i;
1551 
1552   unsigned int len = MAX (op0len, op1len);
1553   mask0 = -top_bit_of (op0, op0len, prec);
1554   mask1 = -top_bit_of (op1, op1len, prec);
1555 
1556   /* Subtract all of the explicitly defined elements.  */
1557   for (i = 0; i < len; i++)
1558     {
1559       o0 = i < op0len ? (unsigned HOST_WIDE_INT)op0[i] : mask0;
1560       o1 = i < op1len ? (unsigned HOST_WIDE_INT)op1[i] : mask1;
1561       x = o0 - o1 - borrow;
1562       val[i] = x;
1563       old_borrow = borrow;
1564       borrow = borrow == 0 ? o0 < o1 : o0 <= o1;
1565     }
1566 
1567   if (len * HOST_BITS_PER_WIDE_INT < prec)
1568     {
1569       val[len] = mask0 - mask1 - borrow;
1570       len++;
1571       if (overflow)
1572 	*overflow = (sgn == UNSIGNED && borrow) ? OVF_UNDERFLOW : OVF_NONE;
1573     }
1574   else if (overflow)
1575     {
1576       unsigned int shift = -prec % HOST_BITS_PER_WIDE_INT;
1577       if (sgn == SIGNED)
1578 	{
1579 	  unsigned HOST_WIDE_INT x = (o0 ^ o1) & (val[len - 1] ^ o0);
1580 	  if ((HOST_WIDE_INT) (x << shift) < 0)
1581 	    {
1582 	      if (o0 > o1)
1583 		*overflow = OVF_UNDERFLOW;
1584 	      else if (o0 < o1)
1585 		*overflow = OVF_OVERFLOW;
1586 	      else
1587 		*overflow = OVF_NONE;
1588 	    }
1589 	  else
1590 	    *overflow = OVF_NONE;
1591 	}
1592       else
1593 	{
1594 	  /* Put the MSB of X and O0 and in the top of the HWI.  */
1595 	  x <<= shift;
1596 	  o0 <<= shift;
1597 	  if (old_borrow)
1598 	    *overflow = (x >= o0) ? OVF_UNDERFLOW : OVF_NONE;
1599 	  else
1600 	    *overflow = (x > o0) ? OVF_UNDERFLOW : OVF_NONE;
1601 	}
1602     }
1603 
1604   return canonize (val, len, prec);
1605 }
1606 
1607 
1608 /*
1609  * Division and Mod
1610  */
1611 
1612 /* Compute B_QUOTIENT and B_REMAINDER from B_DIVIDEND/B_DIVISOR.  The
1613    algorithm is a small modification of the algorithm in Hacker's
1614    Delight by Warren, which itself is a small modification of Knuth's
1615    algorithm.  M is the number of significant elements of U however
1616    there needs to be at least one extra element of B_DIVIDEND
1617    allocated, N is the number of elements of B_DIVISOR.  */
1618 static void
divmod_internal_2(unsigned HOST_HALF_WIDE_INT * b_quotient,unsigned HOST_HALF_WIDE_INT * b_remainder,unsigned HOST_HALF_WIDE_INT * b_dividend,unsigned HOST_HALF_WIDE_INT * b_divisor,int m,int n)1619 divmod_internal_2 (unsigned HOST_HALF_WIDE_INT *b_quotient,
1620 		   unsigned HOST_HALF_WIDE_INT *b_remainder,
1621 		   unsigned HOST_HALF_WIDE_INT *b_dividend,
1622 		   unsigned HOST_HALF_WIDE_INT *b_divisor,
1623 		   int m, int n)
1624 {
1625   /* The "digits" are a HOST_HALF_WIDE_INT which the size of half of a
1626      HOST_WIDE_INT and stored in the lower bits of each word.  This
1627      algorithm should work properly on both 32 and 64 bit
1628      machines.  */
1629   unsigned HOST_WIDE_INT b
1630     = (unsigned HOST_WIDE_INT)1 << HOST_BITS_PER_HALF_WIDE_INT;
1631   unsigned HOST_WIDE_INT qhat;   /* Estimate of quotient digit.  */
1632   unsigned HOST_WIDE_INT rhat;   /* A remainder.  */
1633   unsigned HOST_WIDE_INT p;      /* Product of two digits.  */
1634   HOST_WIDE_INT t, k;
1635   int i, j, s;
1636 
1637   /* Single digit divisor.  */
1638   if (n == 1)
1639     {
1640       k = 0;
1641       for (j = m - 1; j >= 0; j--)
1642 	{
1643 	  b_quotient[j] = (k * b + b_dividend[j])/b_divisor[0];
1644 	  k = ((k * b + b_dividend[j])
1645 	       - ((unsigned HOST_WIDE_INT)b_quotient[j]
1646 		  * (unsigned HOST_WIDE_INT)b_divisor[0]));
1647 	}
1648       b_remainder[0] = k;
1649       return;
1650     }
1651 
1652   s = clz_hwi (b_divisor[n-1]) - HOST_BITS_PER_HALF_WIDE_INT; /* CHECK clz */
1653 
1654   if (s)
1655     {
1656       /* Normalize B_DIVIDEND and B_DIVISOR.  Unlike the published
1657 	 algorithm, we can overwrite b_dividend and b_divisor, so we do
1658 	 that.  */
1659       for (i = n - 1; i > 0; i--)
1660 	b_divisor[i] = (b_divisor[i] << s)
1661 	  | (b_divisor[i-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s));
1662       b_divisor[0] = b_divisor[0] << s;
1663 
1664       b_dividend[m] = b_dividend[m-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s);
1665       for (i = m - 1; i > 0; i--)
1666 	b_dividend[i] = (b_dividend[i] << s)
1667 	  | (b_dividend[i-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s));
1668       b_dividend[0] = b_dividend[0] << s;
1669     }
1670 
1671   /* Main loop.  */
1672   for (j = m - n; j >= 0; j--)
1673     {
1674       qhat = (b_dividend[j+n] * b + b_dividend[j+n-1]) / b_divisor[n-1];
1675       rhat = (b_dividend[j+n] * b + b_dividend[j+n-1]) - qhat * b_divisor[n-1];
1676     again:
1677       if (qhat >= b || qhat * b_divisor[n-2] > b * rhat + b_dividend[j+n-2])
1678 	{
1679 	  qhat -= 1;
1680 	  rhat += b_divisor[n-1];
1681 	  if (rhat < b)
1682 	    goto again;
1683 	}
1684 
1685       /* Multiply and subtract.  */
1686       k = 0;
1687       for (i = 0; i < n; i++)
1688 	{
1689 	  p = qhat * b_divisor[i];
1690 	  t = b_dividend[i+j] - k - (p & HALF_INT_MASK);
1691 	  b_dividend[i + j] = t;
1692 	  k = ((p >> HOST_BITS_PER_HALF_WIDE_INT)
1693 	       - (t >> HOST_BITS_PER_HALF_WIDE_INT));
1694 	}
1695       t = b_dividend[j+n] - k;
1696       b_dividend[j+n] = t;
1697 
1698       b_quotient[j] = qhat;
1699       if (t < 0)
1700 	{
1701 	  b_quotient[j] -= 1;
1702 	  k = 0;
1703 	  for (i = 0; i < n; i++)
1704 	    {
1705 	      t = (HOST_WIDE_INT)b_dividend[i+j] + b_divisor[i] + k;
1706 	      b_dividend[i+j] = t;
1707 	      k = t >> HOST_BITS_PER_HALF_WIDE_INT;
1708 	    }
1709 	  b_dividend[j+n] += k;
1710 	}
1711     }
1712   if (s)
1713     for (i = 0; i < n; i++)
1714       b_remainder[i] = (b_dividend[i] >> s)
1715 	| (b_dividend[i+1] << (HOST_BITS_PER_HALF_WIDE_INT - s));
1716   else
1717     for (i = 0; i < n; i++)
1718       b_remainder[i] = b_dividend[i];
1719 }
1720 
1721 
1722 /* Divide DIVIDEND by DIVISOR, which have signedness SGN, and truncate
1723    the result.  If QUOTIENT is nonnull, store the value of the quotient
1724    there and return the number of blocks in it.  The return value is
1725    not defined otherwise.  If REMAINDER is nonnull, store the value
1726    of the remainder there and store the number of blocks in
1727    *REMAINDER_LEN.  If OFLOW is not null, store in *OFLOW whether
1728    the division overflowed.  */
1729 unsigned int
divmod_internal(HOST_WIDE_INT * quotient,unsigned int * remainder_len,HOST_WIDE_INT * remainder,const HOST_WIDE_INT * dividend_val,unsigned int dividend_len,unsigned int dividend_prec,const HOST_WIDE_INT * divisor_val,unsigned int divisor_len,unsigned int divisor_prec,signop sgn,wi::overflow_type * oflow)1730 wi::divmod_internal (HOST_WIDE_INT *quotient, unsigned int *remainder_len,
1731 		     HOST_WIDE_INT *remainder,
1732 		     const HOST_WIDE_INT *dividend_val,
1733 		     unsigned int dividend_len, unsigned int dividend_prec,
1734 		     const HOST_WIDE_INT *divisor_val, unsigned int divisor_len,
1735 		     unsigned int divisor_prec, signop sgn,
1736 		     wi::overflow_type *oflow)
1737 {
1738   unsigned int dividend_blocks_needed = 2 * BLOCKS_NEEDED (dividend_prec);
1739   unsigned int divisor_blocks_needed = 2 * BLOCKS_NEEDED (divisor_prec);
1740   unsigned HOST_HALF_WIDE_INT
1741     b_quotient[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1742   unsigned HOST_HALF_WIDE_INT
1743     b_remainder[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1744   unsigned HOST_HALF_WIDE_INT
1745     b_dividend[(4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT) + 1];
1746   unsigned HOST_HALF_WIDE_INT
1747     b_divisor[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1748   unsigned int m, n;
1749   bool dividend_neg = false;
1750   bool divisor_neg = false;
1751   bool overflow = false;
1752   wide_int neg_dividend, neg_divisor;
1753 
1754   wide_int_ref dividend = wi::storage_ref (dividend_val, dividend_len,
1755 					   dividend_prec);
1756   wide_int_ref divisor = wi::storage_ref (divisor_val, divisor_len,
1757 					  divisor_prec);
1758   if (divisor == 0)
1759     overflow = true;
1760 
1761   /* The smallest signed number / -1 causes overflow.  The dividend_len
1762      check is for speed rather than correctness.  */
1763   if (sgn == SIGNED
1764       && dividend_len == BLOCKS_NEEDED (dividend_prec)
1765       && divisor == -1
1766       && wi::only_sign_bit_p (dividend))
1767     overflow = true;
1768 
1769   /* Handle the overflow cases.  Viewed as unsigned value, the quotient of
1770      (signed min / -1) has the same representation as the orignal dividend.
1771      We have traditionally made division by zero act as division by one,
1772      so there too we use the original dividend.  */
1773   if (overflow)
1774     {
1775       if (remainder)
1776 	{
1777 	  *remainder_len = 1;
1778 	  remainder[0] = 0;
1779 	}
1780       if (oflow)
1781 	*oflow = OVF_OVERFLOW;
1782       if (quotient)
1783 	for (unsigned int i = 0; i < dividend_len; ++i)
1784 	  quotient[i] = dividend_val[i];
1785       return dividend_len;
1786     }
1787 
1788   if (oflow)
1789     *oflow = OVF_NONE;
1790 
1791   /* Do it on the host if you can.  */
1792   if (sgn == SIGNED
1793       && wi::fits_shwi_p (dividend)
1794       && wi::fits_shwi_p (divisor))
1795     {
1796       HOST_WIDE_INT o0 = dividend.to_shwi ();
1797       HOST_WIDE_INT o1 = divisor.to_shwi ();
1798 
1799       if (o0 == HOST_WIDE_INT_MIN && o1 == -1)
1800 	{
1801 	  gcc_checking_assert (dividend_prec > HOST_BITS_PER_WIDE_INT);
1802 	  if (quotient)
1803 	    {
1804 	      quotient[0] = HOST_WIDE_INT_MIN;
1805 	      quotient[1] = 0;
1806 	    }
1807 	  if (remainder)
1808 	    {
1809 	      remainder[0] = 0;
1810 	      *remainder_len = 1;
1811 	    }
1812 	  return 2;
1813 	}
1814       else
1815 	{
1816 	  if (quotient)
1817 	    quotient[0] = o0 / o1;
1818 	  if (remainder)
1819 	    {
1820 	      remainder[0] = o0 % o1;
1821 	      *remainder_len = 1;
1822 	    }
1823 	  return 1;
1824 	}
1825     }
1826 
1827   if (sgn == UNSIGNED
1828       && wi::fits_uhwi_p (dividend)
1829       && wi::fits_uhwi_p (divisor))
1830     {
1831       unsigned HOST_WIDE_INT o0 = dividend.to_uhwi ();
1832       unsigned HOST_WIDE_INT o1 = divisor.to_uhwi ();
1833       unsigned int quotient_len = 1;
1834 
1835       if (quotient)
1836 	{
1837 	  quotient[0] = o0 / o1;
1838 	  quotient_len = canonize_uhwi (quotient, dividend_prec);
1839 	}
1840       if (remainder)
1841 	{
1842 	  remainder[0] = o0 % o1;
1843 	  *remainder_len = canonize_uhwi (remainder, dividend_prec);
1844 	}
1845       return quotient_len;
1846     }
1847 
1848   /* Make the divisor and dividend positive and remember what we
1849      did.  */
1850   if (sgn == SIGNED)
1851     {
1852       if (wi::neg_p (dividend))
1853 	{
1854 	  neg_dividend = -dividend;
1855 	  dividend = neg_dividend;
1856 	  dividend_neg = true;
1857 	}
1858       if (wi::neg_p (divisor))
1859 	{
1860 	  neg_divisor = -divisor;
1861 	  divisor = neg_divisor;
1862 	  divisor_neg = true;
1863 	}
1864     }
1865 
1866   wi_unpack (b_dividend, dividend.get_val (), dividend.get_len (),
1867 	     dividend_blocks_needed, dividend_prec, sgn);
1868   wi_unpack (b_divisor, divisor.get_val (), divisor.get_len (),
1869 	     divisor_blocks_needed, divisor_prec, sgn);
1870 
1871   m = dividend_blocks_needed;
1872   b_dividend[m] = 0;
1873   while (m > 1 && b_dividend[m - 1] == 0)
1874     m--;
1875 
1876   n = divisor_blocks_needed;
1877   while (n > 1 && b_divisor[n - 1] == 0)
1878     n--;
1879 
1880   memset (b_quotient, 0, sizeof (b_quotient));
1881 
1882   divmod_internal_2 (b_quotient, b_remainder, b_dividend, b_divisor, m, n);
1883 
1884   unsigned int quotient_len = 0;
1885   if (quotient)
1886     {
1887       quotient_len = wi_pack (quotient, b_quotient, m, dividend_prec);
1888       /* The quotient is neg if exactly one of the divisor or dividend is
1889 	 neg.  */
1890       if (dividend_neg != divisor_neg)
1891 	quotient_len = wi::sub_large (quotient, zeros, 1, quotient,
1892 				      quotient_len, dividend_prec,
1893 				      UNSIGNED, 0);
1894     }
1895 
1896   if (remainder)
1897     {
1898       *remainder_len = wi_pack (remainder, b_remainder, n, dividend_prec);
1899       /* The remainder is always the same sign as the dividend.  */
1900       if (dividend_neg)
1901 	*remainder_len = wi::sub_large (remainder, zeros, 1, remainder,
1902 					*remainder_len, dividend_prec,
1903 					UNSIGNED, 0);
1904     }
1905 
1906   return quotient_len;
1907 }
1908 
1909 /*
1910  * Shifting, rotating and extraction.
1911  */
1912 
1913 /* Left shift XVAL by SHIFT and store the result in VAL.  Return the
1914    number of blocks in VAL.  Both XVAL and VAL have PRECISION bits.  */
1915 unsigned int
lshift_large(HOST_WIDE_INT * val,const HOST_WIDE_INT * xval,unsigned int xlen,unsigned int precision,unsigned int shift)1916 wi::lshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
1917 		  unsigned int xlen, unsigned int precision,
1918 		  unsigned int shift)
1919 {
1920   /* Split the shift into a whole-block shift and a subblock shift.  */
1921   unsigned int skip = shift / HOST_BITS_PER_WIDE_INT;
1922   unsigned int small_shift = shift % HOST_BITS_PER_WIDE_INT;
1923 
1924   /* The whole-block shift fills with zeros.  */
1925   unsigned int len = BLOCKS_NEEDED (precision);
1926   for (unsigned int i = 0; i < skip; ++i)
1927     val[i] = 0;
1928 
1929   /* It's easier to handle the simple block case specially.  */
1930   if (small_shift == 0)
1931     for (unsigned int i = skip; i < len; ++i)
1932       val[i] = safe_uhwi (xval, xlen, i - skip);
1933   else
1934     {
1935       /* The first unfilled output block is a left shift of the first
1936 	 block in XVAL.  The other output blocks contain bits from two
1937 	 consecutive input blocks.  */
1938       unsigned HOST_WIDE_INT carry = 0;
1939       for (unsigned int i = skip; i < len; ++i)
1940 	{
1941 	  unsigned HOST_WIDE_INT x = safe_uhwi (xval, xlen, i - skip);
1942 	  val[i] = (x << small_shift) | carry;
1943 	  carry = x >> (-small_shift % HOST_BITS_PER_WIDE_INT);
1944 	}
1945     }
1946   return canonize (val, len, precision);
1947 }
1948 
1949 /* Right shift XVAL by SHIFT and store the result in VAL.  Return the
1950    number of blocks in VAL.  The input has XPRECISION bits and the
1951    output has XPRECISION - SHIFT bits.  */
1952 static unsigned int
rshift_large_common(HOST_WIDE_INT * val,const HOST_WIDE_INT * xval,unsigned int xlen,unsigned int xprecision,unsigned int shift)1953 rshift_large_common (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
1954 		     unsigned int xlen, unsigned int xprecision,
1955 		     unsigned int shift)
1956 {
1957   /* Split the shift into a whole-block shift and a subblock shift.  */
1958   unsigned int skip = shift / HOST_BITS_PER_WIDE_INT;
1959   unsigned int small_shift = shift % HOST_BITS_PER_WIDE_INT;
1960 
1961   /* Work out how many blocks are needed to store the significant bits
1962      (excluding the upper zeros or signs).  */
1963   unsigned int len = BLOCKS_NEEDED (xprecision - shift);
1964 
1965   /* It's easier to handle the simple block case specially.  */
1966   if (small_shift == 0)
1967     for (unsigned int i = 0; i < len; ++i)
1968       val[i] = safe_uhwi (xval, xlen, i + skip);
1969   else
1970     {
1971       /* Each output block but the last is a combination of two input blocks.
1972 	 The last block is a right shift of the last block in XVAL.  */
1973       unsigned HOST_WIDE_INT curr = safe_uhwi (xval, xlen, skip);
1974       for (unsigned int i = 0; i < len; ++i)
1975 	{
1976 	  val[i] = curr >> small_shift;
1977 	  curr = safe_uhwi (xval, xlen, i + skip + 1);
1978 	  val[i] |= curr << (-small_shift % HOST_BITS_PER_WIDE_INT);
1979 	}
1980     }
1981   return len;
1982 }
1983 
1984 /* Logically right shift XVAL by SHIFT and store the result in VAL.
1985    Return the number of blocks in VAL.  XVAL has XPRECISION bits and
1986    VAL has PRECISION bits.  */
1987 unsigned int
lrshift_large(HOST_WIDE_INT * val,const HOST_WIDE_INT * xval,unsigned int xlen,unsigned int xprecision,unsigned int precision,unsigned int shift)1988 wi::lrshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
1989 		   unsigned int xlen, unsigned int xprecision,
1990 		   unsigned int precision, unsigned int shift)
1991 {
1992   unsigned int len = rshift_large_common (val, xval, xlen, xprecision, shift);
1993 
1994   /* The value we just created has precision XPRECISION - SHIFT.
1995      Zero-extend it to wider precisions.  */
1996   if (precision > xprecision - shift)
1997     {
1998       unsigned int small_prec = (xprecision - shift) % HOST_BITS_PER_WIDE_INT;
1999       if (small_prec)
2000 	val[len - 1] = zext_hwi (val[len - 1], small_prec);
2001       else if (val[len - 1] < 0)
2002 	{
2003 	  /* Add a new block with a zero. */
2004 	  val[len++] = 0;
2005 	  return len;
2006 	}
2007     }
2008   return canonize (val, len, precision);
2009 }
2010 
2011 /* Arithmetically right shift XVAL by SHIFT and store the result in VAL.
2012    Return the number of blocks in VAL.  XVAL has XPRECISION bits and
2013    VAL has PRECISION bits.  */
2014 unsigned int
arshift_large(HOST_WIDE_INT * val,const HOST_WIDE_INT * xval,unsigned int xlen,unsigned int xprecision,unsigned int precision,unsigned int shift)2015 wi::arshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
2016 		   unsigned int xlen, unsigned int xprecision,
2017 		   unsigned int precision, unsigned int shift)
2018 {
2019   unsigned int len = rshift_large_common (val, xval, xlen, xprecision, shift);
2020 
2021   /* The value we just created has precision XPRECISION - SHIFT.
2022      Sign-extend it to wider types.  */
2023   if (precision > xprecision - shift)
2024     {
2025       unsigned int small_prec = (xprecision - shift) % HOST_BITS_PER_WIDE_INT;
2026       if (small_prec)
2027 	val[len - 1] = sext_hwi (val[len - 1], small_prec);
2028     }
2029   return canonize (val, len, precision);
2030 }
2031 
2032 /* Return the number of leading (upper) zeros in X.  */
2033 int
clz(const wide_int_ref & x)2034 wi::clz (const wide_int_ref &x)
2035 {
2036   /* Calculate how many bits there above the highest represented block.  */
2037   int count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
2038 
2039   unsigned HOST_WIDE_INT high = x.uhigh ();
2040   if (count < 0)
2041     /* The upper -COUNT bits of HIGH are not part of the value.
2042        Clear them out.  */
2043     high = (high << -count) >> -count;
2044   else if (x.sign_mask () < 0)
2045     /* The upper bit is set, so there are no leading zeros.  */
2046     return 0;
2047 
2048   /* We don't need to look below HIGH.  Either HIGH is nonzero,
2049      or the top bit of the block below is nonzero; clz_hwi is
2050      HOST_BITS_PER_WIDE_INT in the latter case.  */
2051   return count + clz_hwi (high);
2052 }
2053 
2054 /* Return the number of redundant sign bits in X.  (That is, the number
2055    of bits immediately below the sign bit that have the same value as
2056    the sign bit.)  */
2057 int
clrsb(const wide_int_ref & x)2058 wi::clrsb (const wide_int_ref &x)
2059 {
2060   /* Calculate how many bits there above the highest represented block.  */
2061   int count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
2062 
2063   unsigned HOST_WIDE_INT high = x.uhigh ();
2064   unsigned HOST_WIDE_INT mask = -1;
2065   if (count < 0)
2066     {
2067       /* The upper -COUNT bits of HIGH are not part of the value.
2068 	 Clear them from both MASK and HIGH.  */
2069       mask >>= -count;
2070       high &= mask;
2071     }
2072 
2073   /* If the top bit is 1, count the number of leading 1s.  If the top
2074      bit is zero, count the number of leading zeros.  */
2075   if (high > mask / 2)
2076     high ^= mask;
2077 
2078   /* There are no sign bits below the top block, so we don't need to look
2079      beyond HIGH.  Note that clz_hwi is HOST_BITS_PER_WIDE_INT when
2080      HIGH is 0.  */
2081   return count + clz_hwi (high) - 1;
2082 }
2083 
2084 /* Return the number of trailing (lower) zeros in X.  */
2085 int
ctz(const wide_int_ref & x)2086 wi::ctz (const wide_int_ref &x)
2087 {
2088   if (x.len == 1 && x.ulow () == 0)
2089     return x.precision;
2090 
2091   /* Having dealt with the zero case, there must be a block with a
2092      nonzero bit.  We don't care about the bits above the first 1.  */
2093   unsigned int i = 0;
2094   while (x.val[i] == 0)
2095     ++i;
2096   return i * HOST_BITS_PER_WIDE_INT + ctz_hwi (x.val[i]);
2097 }
2098 
2099 /* If X is an exact power of 2, return the base-2 logarithm, otherwise
2100    return -1.  */
2101 int
exact_log2(const wide_int_ref & x)2102 wi::exact_log2 (const wide_int_ref &x)
2103 {
2104   /* Reject cases where there are implicit -1 blocks above HIGH.  */
2105   if (x.len * HOST_BITS_PER_WIDE_INT < x.precision && x.sign_mask () < 0)
2106     return -1;
2107 
2108   /* Set CRUX to the index of the entry that should be nonzero.
2109      If the top block is zero then the next lowest block (if any)
2110      must have the high bit set.  */
2111   unsigned int crux = x.len - 1;
2112   if (crux > 0 && x.val[crux] == 0)
2113     crux -= 1;
2114 
2115   /* Check that all lower blocks are zero.  */
2116   for (unsigned int i = 0; i < crux; ++i)
2117     if (x.val[i] != 0)
2118       return -1;
2119 
2120   /* Get a zero-extended form of block CRUX.  */
2121   unsigned HOST_WIDE_INT hwi = x.val[crux];
2122   if ((crux + 1) * HOST_BITS_PER_WIDE_INT > x.precision)
2123     hwi = zext_hwi (hwi, x.precision % HOST_BITS_PER_WIDE_INT);
2124 
2125   /* Now it's down to whether HWI is a power of 2.  */
2126   int res = ::exact_log2 (hwi);
2127   if (res >= 0)
2128     res += crux * HOST_BITS_PER_WIDE_INT;
2129   return res;
2130 }
2131 
2132 /* Return the base-2 logarithm of X, rounding down.  Return -1 if X is 0.  */
2133 int
floor_log2(const wide_int_ref & x)2134 wi::floor_log2 (const wide_int_ref &x)
2135 {
2136   return x.precision - 1 - clz (x);
2137 }
2138 
2139 /* Return the index of the first (lowest) set bit in X, counting from 1.
2140    Return 0 if X is 0.  */
2141 int
ffs(const wide_int_ref & x)2142 wi::ffs (const wide_int_ref &x)
2143 {
2144   return eq_p (x, 0) ? 0 : ctz (x) + 1;
2145 }
2146 
2147 /* Return true if sign-extending X to have precision PRECISION would give
2148    the minimum signed value at that precision.  */
2149 bool
only_sign_bit_p(const wide_int_ref & x,unsigned int precision)2150 wi::only_sign_bit_p (const wide_int_ref &x, unsigned int precision)
2151 {
2152   return ctz (x) + 1 == int (precision);
2153 }
2154 
2155 /* Return true if X represents the minimum signed value.  */
2156 bool
only_sign_bit_p(const wide_int_ref & x)2157 wi::only_sign_bit_p (const wide_int_ref &x)
2158 {
2159   return only_sign_bit_p (x, x.precision);
2160 }
2161 
2162 /* Return VAL if VAL has no bits set outside MASK.  Otherwise round VAL
2163    down to the previous value that has no bits set outside MASK.
2164    This rounding wraps for signed values if VAL is negative and
2165    the top bit of MASK is clear.
2166 
2167    For example, round_down_for_mask (6, 0xf1) would give 1 and
2168    round_down_for_mask (24, 0xf1) would give 17.  */
2169 
2170 wide_int
round_down_for_mask(const wide_int & val,const wide_int & mask)2171 wi::round_down_for_mask (const wide_int &val, const wide_int &mask)
2172 {
2173   /* Get the bits in VAL that are outside the mask.  */
2174   wide_int extra_bits = wi::bit_and_not (val, mask);
2175   if (extra_bits == 0)
2176     return val;
2177 
2178   /* Get a mask that includes the top bit in EXTRA_BITS and is all 1s
2179      below that bit.  */
2180   unsigned int precision = val.get_precision ();
2181   wide_int lower_mask = wi::mask (precision - wi::clz (extra_bits),
2182 				  false, precision);
2183 
2184   /* Clear the bits that aren't in MASK, but ensure that all bits
2185      in MASK below the top cleared bit are set.  */
2186   return (val & mask) | (mask & lower_mask);
2187 }
2188 
2189 /* Return VAL if VAL has no bits set outside MASK.  Otherwise round VAL
2190    up to the next value that has no bits set outside MASK.  The rounding
2191    wraps if there are no suitable values greater than VAL.
2192 
2193    For example, round_up_for_mask (6, 0xf1) would give 16 and
2194    round_up_for_mask (24, 0xf1) would give 32.  */
2195 
2196 wide_int
round_up_for_mask(const wide_int & val,const wide_int & mask)2197 wi::round_up_for_mask (const wide_int &val, const wide_int &mask)
2198 {
2199   /* Get the bits in VAL that are outside the mask.  */
2200   wide_int extra_bits = wi::bit_and_not (val, mask);
2201   if (extra_bits == 0)
2202     return val;
2203 
2204   /* Get a mask that is all 1s above the top bit in EXTRA_BITS.  */
2205   unsigned int precision = val.get_precision ();
2206   wide_int upper_mask = wi::mask (precision - wi::clz (extra_bits),
2207 				  true, precision);
2208 
2209   /* Get the bits of the mask that are above the top bit in EXTRA_BITS.  */
2210   upper_mask &= mask;
2211 
2212   /* Conceptually we need to:
2213 
2214      - clear bits of VAL outside UPPER_MASK
2215      - add the lowest bit in UPPER_MASK to VAL (or add 0 if UPPER_MASK is 0)
2216      - propagate the carry through the bits of VAL in UPPER_MASK
2217 
2218      If (~VAL & UPPER_MASK) is nonzero, the carry eventually
2219      reaches that bit and the process leaves all lower bits clear.
2220      If (~VAL & UPPER_MASK) is zero then the result is also zero.  */
2221   wide_int tmp = wi::bit_and_not (upper_mask, val);
2222 
2223   return (val | tmp) & -tmp;
2224 }
2225 
2226 /*
2227  * Private utilities.
2228  */
2229 
gt_ggc_mx(widest_int *)2230 void gt_ggc_mx (widest_int *) { }
gt_pch_nx(widest_int *,void (*)(void *,void *),void *)2231 void gt_pch_nx (widest_int *, void (*) (void *, void *), void *) { }
gt_pch_nx(widest_int *)2232 void gt_pch_nx (widest_int *) { }
2233 
2234 template void wide_int::dump () const;
2235 template void generic_wide_int <wide_int_ref_storage <false> >::dump () const;
2236 template void generic_wide_int <wide_int_ref_storage <true> >::dump () const;
2237 template void offset_int::dump () const;
2238 template void widest_int::dump () const;
2239 
2240 /* We could add all the above ::dump variants here, but wide_int and
2241    widest_int should handle the common cases.  Besides, you can always
2242    call the dump method directly.  */
2243 
2244 DEBUG_FUNCTION void
debug(const wide_int & ref)2245 debug (const wide_int &ref)
2246 {
2247   ref.dump ();
2248 }
2249 
2250 DEBUG_FUNCTION void
debug(const wide_int * ptr)2251 debug (const wide_int *ptr)
2252 {
2253   if (ptr)
2254     debug (*ptr);
2255   else
2256     fprintf (stderr, "<nil>\n");
2257 }
2258 
2259 DEBUG_FUNCTION void
debug(const widest_int & ref)2260 debug (const widest_int &ref)
2261 {
2262   ref.dump ();
2263 }
2264 
2265 DEBUG_FUNCTION void
debug(const widest_int * ptr)2266 debug (const widest_int *ptr)
2267 {
2268   if (ptr)
2269     debug (*ptr);
2270   else
2271     fprintf (stderr, "<nil>\n");
2272 }
2273 
2274 #if CHECKING_P
2275 
2276 namespace selftest {
2277 
2278 /* Selftests for wide ints.  We run these multiple times, once per type.  */
2279 
2280 /* Helper function for building a test value.  */
2281 
2282 template <class VALUE_TYPE>
2283 static VALUE_TYPE
2284 from_int (int i);
2285 
2286 /* Specializations of the fixture for each wide-int type.  */
2287 
2288 /* Specialization for VALUE_TYPE == wide_int.  */
2289 
2290 template <>
2291 wide_int
from_int(int i)2292 from_int (int i)
2293 {
2294   return wi::shwi (i, 32);
2295 }
2296 
2297 /* Specialization for VALUE_TYPE == offset_int.  */
2298 
2299 template <>
2300 offset_int
from_int(int i)2301 from_int (int i)
2302 {
2303   return offset_int (i);
2304 }
2305 
2306 /* Specialization for VALUE_TYPE == widest_int.  */
2307 
2308 template <>
2309 widest_int
from_int(int i)2310 from_int (int i)
2311 {
2312   return widest_int (i);
2313 }
2314 
2315 /* Verify that print_dec (WI, ..., SGN) gives the expected string
2316    representation (using base 10).  */
2317 
2318 static void
assert_deceq(const char * expected,const wide_int_ref & wi,signop sgn)2319 assert_deceq (const char *expected, const wide_int_ref &wi, signop sgn)
2320 {
2321   char buf[WIDE_INT_PRINT_BUFFER_SIZE];
2322   print_dec (wi, buf, sgn);
2323   ASSERT_STREQ (expected, buf);
2324 }
2325 
2326 /* Likewise for base 16.  */
2327 
2328 static void
assert_hexeq(const char * expected,const wide_int_ref & wi)2329 assert_hexeq (const char *expected, const wide_int_ref &wi)
2330 {
2331   char buf[WIDE_INT_PRINT_BUFFER_SIZE];
2332   print_hex (wi, buf);
2333   ASSERT_STREQ (expected, buf);
2334 }
2335 
2336 /* Test cases.  */
2337 
2338 /* Verify that print_dec and print_hex work for VALUE_TYPE.  */
2339 
2340 template <class VALUE_TYPE>
2341 static void
test_printing()2342 test_printing ()
2343 {
2344   VALUE_TYPE a = from_int<VALUE_TYPE> (42);
2345   assert_deceq ("42", a, SIGNED);
2346   assert_hexeq ("0x2a", a);
2347   assert_hexeq ("0x1fffffffffffffffff", wi::shwi (-1, 69));
2348   assert_hexeq ("0xffffffffffffffff", wi::mask (64, false, 69));
2349   assert_hexeq ("0xffffffffffffffff", wi::mask <widest_int> (64, false));
2350   if (WIDE_INT_MAX_PRECISION > 128)
2351     {
2352       assert_hexeq ("0x20000000000000000fffffffffffffffe",
2353 		    wi::lshift (1, 129) + wi::lshift (1, 64) - 2);
2354       assert_hexeq ("0x200000000000004000123456789abcdef",
2355 		    wi::lshift (1, 129) + wi::lshift (1, 74)
2356 		    + wi::lshift (0x1234567, 32) + 0x89abcdef);
2357     }
2358 }
2359 
2360 /* Verify that various operations work correctly for VALUE_TYPE,
2361    unary and binary, using both function syntax, and
2362    overloaded-operators.  */
2363 
2364 template <class VALUE_TYPE>
2365 static void
test_ops()2366 test_ops ()
2367 {
2368   VALUE_TYPE a = from_int<VALUE_TYPE> (7);
2369   VALUE_TYPE b = from_int<VALUE_TYPE> (3);
2370 
2371   /* Using functions.  */
2372   assert_deceq ("-7", wi::neg (a), SIGNED);
2373   assert_deceq ("10", wi::add (a, b), SIGNED);
2374   assert_deceq ("4", wi::sub (a, b), SIGNED);
2375   assert_deceq ("-4", wi::sub (b, a), SIGNED);
2376   assert_deceq ("21", wi::mul (a, b), SIGNED);
2377 
2378   /* Using operators.  */
2379   assert_deceq ("-7", -a, SIGNED);
2380   assert_deceq ("10", a + b, SIGNED);
2381   assert_deceq ("4", a - b, SIGNED);
2382   assert_deceq ("-4", b - a, SIGNED);
2383   assert_deceq ("21", a * b, SIGNED);
2384 }
2385 
2386 /* Verify that various comparisons work correctly for VALUE_TYPE.  */
2387 
2388 template <class VALUE_TYPE>
2389 static void
test_comparisons()2390 test_comparisons ()
2391 {
2392   VALUE_TYPE a = from_int<VALUE_TYPE> (7);
2393   VALUE_TYPE b = from_int<VALUE_TYPE> (3);
2394 
2395   /* == */
2396   ASSERT_TRUE (wi::eq_p (a, a));
2397   ASSERT_FALSE (wi::eq_p (a, b));
2398 
2399   /* != */
2400   ASSERT_TRUE (wi::ne_p (a, b));
2401   ASSERT_FALSE (wi::ne_p (a, a));
2402 
2403   /* < */
2404   ASSERT_FALSE (wi::lts_p (a, a));
2405   ASSERT_FALSE (wi::lts_p (a, b));
2406   ASSERT_TRUE (wi::lts_p (b, a));
2407 
2408   /* <= */
2409   ASSERT_TRUE (wi::les_p (a, a));
2410   ASSERT_FALSE (wi::les_p (a, b));
2411   ASSERT_TRUE (wi::les_p (b, a));
2412 
2413   /* > */
2414   ASSERT_FALSE (wi::gts_p (a, a));
2415   ASSERT_TRUE (wi::gts_p (a, b));
2416   ASSERT_FALSE (wi::gts_p (b, a));
2417 
2418   /* >= */
2419   ASSERT_TRUE (wi::ges_p (a, a));
2420   ASSERT_TRUE (wi::ges_p (a, b));
2421   ASSERT_FALSE (wi::ges_p (b, a));
2422 
2423   /* comparison */
2424   ASSERT_EQ (-1, wi::cmps (b, a));
2425   ASSERT_EQ (0, wi::cmps (a, a));
2426   ASSERT_EQ (1, wi::cmps (a, b));
2427 }
2428 
2429 /* Run all of the selftests, using the given VALUE_TYPE.  */
2430 
2431 template <class VALUE_TYPE>
run_all_wide_int_tests()2432 static void run_all_wide_int_tests ()
2433 {
2434   test_printing <VALUE_TYPE> ();
2435   test_ops <VALUE_TYPE> ();
2436   test_comparisons <VALUE_TYPE> ();
2437 }
2438 
2439 /* Test overflow conditions.  */
2440 
2441 static void
test_overflow()2442 test_overflow ()
2443 {
2444   static int precs[] = { 31, 32, 33, 63, 64, 65, 127, 128 };
2445   static int offsets[] = { 16, 1, 0 };
2446   for (unsigned int i = 0; i < ARRAY_SIZE (precs); ++i)
2447     for (unsigned int j = 0; j < ARRAY_SIZE (offsets); ++j)
2448       {
2449 	int prec = precs[i];
2450 	int offset = offsets[j];
2451 	wi::overflow_type overflow;
2452 	wide_int sum, diff;
2453 
2454 	sum = wi::add (wi::max_value (prec, UNSIGNED) - offset, 1,
2455 		       UNSIGNED, &overflow);
2456 	ASSERT_EQ (sum, -offset);
2457 	ASSERT_EQ (overflow != wi::OVF_NONE, offset == 0);
2458 
2459 	sum = wi::add (1, wi::max_value (prec, UNSIGNED) - offset,
2460 		       UNSIGNED, &overflow);
2461 	ASSERT_EQ (sum, -offset);
2462 	ASSERT_EQ (overflow != wi::OVF_NONE, offset == 0);
2463 
2464 	diff = wi::sub (wi::max_value (prec, UNSIGNED) - offset,
2465 			wi::max_value (prec, UNSIGNED),
2466 			UNSIGNED, &overflow);
2467 	ASSERT_EQ (diff, -offset);
2468 	ASSERT_EQ (overflow != wi::OVF_NONE, offset != 0);
2469 
2470 	diff = wi::sub (wi::max_value (prec, UNSIGNED) - offset,
2471 			wi::max_value (prec, UNSIGNED) - 1,
2472 			UNSIGNED, &overflow);
2473 	ASSERT_EQ (diff, 1 - offset);
2474 	ASSERT_EQ (overflow != wi::OVF_NONE, offset > 1);
2475     }
2476 }
2477 
2478 /* Test the round_{down,up}_for_mask functions.  */
2479 
2480 static void
test_round_for_mask()2481 test_round_for_mask ()
2482 {
2483   unsigned int prec = 18;
2484   ASSERT_EQ (17, wi::round_down_for_mask (wi::shwi (17, prec),
2485 					  wi::shwi (0xf1, prec)));
2486   ASSERT_EQ (17, wi::round_up_for_mask (wi::shwi (17, prec),
2487 					wi::shwi (0xf1, prec)));
2488 
2489   ASSERT_EQ (1, wi::round_down_for_mask (wi::shwi (6, prec),
2490 					 wi::shwi (0xf1, prec)));
2491   ASSERT_EQ (16, wi::round_up_for_mask (wi::shwi (6, prec),
2492 					wi::shwi (0xf1, prec)));
2493 
2494   ASSERT_EQ (17, wi::round_down_for_mask (wi::shwi (24, prec),
2495 					  wi::shwi (0xf1, prec)));
2496   ASSERT_EQ (32, wi::round_up_for_mask (wi::shwi (24, prec),
2497 					wi::shwi (0xf1, prec)));
2498 
2499   ASSERT_EQ (0x011, wi::round_down_for_mask (wi::shwi (0x22, prec),
2500 					     wi::shwi (0x111, prec)));
2501   ASSERT_EQ (0x100, wi::round_up_for_mask (wi::shwi (0x22, prec),
2502 					   wi::shwi (0x111, prec)));
2503 
2504   ASSERT_EQ (100, wi::round_down_for_mask (wi::shwi (101, prec),
2505 					   wi::shwi (0xfc, prec)));
2506   ASSERT_EQ (104, wi::round_up_for_mask (wi::shwi (101, prec),
2507 					 wi::shwi (0xfc, prec)));
2508 
2509   ASSERT_EQ (0x2bc, wi::round_down_for_mask (wi::shwi (0x2c2, prec),
2510 					     wi::shwi (0xabc, prec)));
2511   ASSERT_EQ (0x800, wi::round_up_for_mask (wi::shwi (0x2c2, prec),
2512 					   wi::shwi (0xabc, prec)));
2513 
2514   ASSERT_EQ (0xabc, wi::round_down_for_mask (wi::shwi (0xabd, prec),
2515 					     wi::shwi (0xabc, prec)));
2516   ASSERT_EQ (0, wi::round_up_for_mask (wi::shwi (0xabd, prec),
2517 				       wi::shwi (0xabc, prec)));
2518 
2519   ASSERT_EQ (0xabc, wi::round_down_for_mask (wi::shwi (0x1000, prec),
2520 					     wi::shwi (0xabc, prec)));
2521   ASSERT_EQ (0, wi::round_up_for_mask (wi::shwi (0x1000, prec),
2522 				       wi::shwi (0xabc, prec)));
2523 }
2524 
2525 /* Run all of the selftests within this file, for all value types.  */
2526 
2527 void
wide_int_cc_tests()2528 wide_int_cc_tests ()
2529 {
2530   run_all_wide_int_tests <wide_int> ();
2531   run_all_wide_int_tests <offset_int> ();
2532   run_all_wide_int_tests <widest_int> ();
2533   test_overflow ();
2534   test_round_for_mask ();
2535 }
2536 
2537 } // namespace selftest
2538 #endif /* CHECKING_P */
2539