xref: /dragonfly/contrib/gcc-8.0/gcc/wide-int.cc (revision 38fd1498)
1*38fd1498Szrj /* Operations with very long integers.
2*38fd1498Szrj    Copyright (C) 2012-2018 Free Software Foundation, Inc.
3*38fd1498Szrj    Contributed by Kenneth Zadeck <zadeck@naturalbridge.com>
4*38fd1498Szrj 
5*38fd1498Szrj This file is part of GCC.
6*38fd1498Szrj 
7*38fd1498Szrj GCC is free software; you can redistribute it and/or modify it
8*38fd1498Szrj under the terms of the GNU General Public License as published by the
9*38fd1498Szrj Free Software Foundation; either version 3, or (at your option) any
10*38fd1498Szrj later version.
11*38fd1498Szrj 
12*38fd1498Szrj GCC is distributed in the hope that it will be useful, but WITHOUT
13*38fd1498Szrj ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14*38fd1498Szrj FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
15*38fd1498Szrj for more details.
16*38fd1498Szrj 
17*38fd1498Szrj You should have received a copy of the GNU General Public License
18*38fd1498Szrj along with GCC; see the file COPYING3.  If not see
19*38fd1498Szrj <http://www.gnu.org/licenses/>.  */
20*38fd1498Szrj 
21*38fd1498Szrj #include "config.h"
22*38fd1498Szrj #include "system.h"
23*38fd1498Szrj #include "coretypes.h"
24*38fd1498Szrj #include "tm.h"
25*38fd1498Szrj #include "tree.h"
26*38fd1498Szrj #include "selftest.h"
27*38fd1498Szrj 
28*38fd1498Szrj 
29*38fd1498Szrj #define HOST_BITS_PER_HALF_WIDE_INT 32
30*38fd1498Szrj #if HOST_BITS_PER_HALF_WIDE_INT == HOST_BITS_PER_LONG
31*38fd1498Szrj # define HOST_HALF_WIDE_INT long
32*38fd1498Szrj #elif HOST_BITS_PER_HALF_WIDE_INT == HOST_BITS_PER_INT
33*38fd1498Szrj # define HOST_HALF_WIDE_INT int
34*38fd1498Szrj #else
35*38fd1498Szrj #error Please add support for HOST_HALF_WIDE_INT
36*38fd1498Szrj #endif
37*38fd1498Szrj 
38*38fd1498Szrj #define W_TYPE_SIZE HOST_BITS_PER_WIDE_INT
39*38fd1498Szrj /* Do not include longlong.h when compiler is clang-based. See PR61146.  */
40*38fd1498Szrj #if GCC_VERSION >= 3000 && (W_TYPE_SIZE == 32 || defined (__SIZEOF_INT128__)) && !defined(__clang__)
41*38fd1498Szrj typedef unsigned HOST_HALF_WIDE_INT UHWtype;
42*38fd1498Szrj typedef unsigned HOST_WIDE_INT UWtype;
43*38fd1498Szrj typedef unsigned int UQItype __attribute__ ((mode (QI)));
44*38fd1498Szrj typedef unsigned int USItype __attribute__ ((mode (SI)));
45*38fd1498Szrj typedef unsigned int UDItype __attribute__ ((mode (DI)));
46*38fd1498Szrj #if W_TYPE_SIZE == 32
47*38fd1498Szrj typedef unsigned int UDWtype __attribute__ ((mode (DI)));
48*38fd1498Szrj #else
49*38fd1498Szrj typedef unsigned int UDWtype __attribute__ ((mode (TI)));
50*38fd1498Szrj #endif
51*38fd1498Szrj #include "longlong.h"
52*38fd1498Szrj #endif
53*38fd1498Szrj 
54*38fd1498Szrj static const HOST_WIDE_INT zeros[WIDE_INT_MAX_ELTS] = {};
55*38fd1498Szrj 
56*38fd1498Szrj /*
57*38fd1498Szrj  * Internal utilities.
58*38fd1498Szrj  */
59*38fd1498Szrj 
60*38fd1498Szrj /* Quantities to deal with values that hold half of a wide int.  Used
61*38fd1498Szrj    in multiply and divide.  */
62*38fd1498Szrj #define HALF_INT_MASK ((HOST_WIDE_INT_1 << HOST_BITS_PER_HALF_WIDE_INT) - 1)
63*38fd1498Szrj 
64*38fd1498Szrj #define BLOCK_OF(TARGET) ((TARGET) / HOST_BITS_PER_WIDE_INT)
65*38fd1498Szrj #define BLOCKS_NEEDED(PREC) \
66*38fd1498Szrj   (PREC ? (((PREC) + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT) : 1)
67*38fd1498Szrj #define SIGN_MASK(X) ((HOST_WIDE_INT) (X) < 0 ? -1 : 0)
68*38fd1498Szrj 
69*38fd1498Szrj /* Return the value a VAL[I] if I < LEN, otherwise, return 0 or -1
70*38fd1498Szrj    based on the top existing bit of VAL. */
71*38fd1498Szrj 
72*38fd1498Szrj static unsigned HOST_WIDE_INT
safe_uhwi(const HOST_WIDE_INT * val,unsigned int len,unsigned int i)73*38fd1498Szrj safe_uhwi (const HOST_WIDE_INT *val, unsigned int len, unsigned int i)
74*38fd1498Szrj {
75*38fd1498Szrj   return i < len ? val[i] : val[len - 1] < 0 ? HOST_WIDE_INT_M1 : 0;
76*38fd1498Szrj }
77*38fd1498Szrj 
78*38fd1498Szrj /* Convert the integer in VAL to canonical form, returning its new length.
79*38fd1498Szrj    LEN is the number of blocks currently in VAL and PRECISION is the number
80*38fd1498Szrj    of bits in the integer it represents.
81*38fd1498Szrj 
82*38fd1498Szrj    This function only changes the representation, not the value.  */
83*38fd1498Szrj static unsigned int
canonize(HOST_WIDE_INT * val,unsigned int len,unsigned int precision)84*38fd1498Szrj canonize (HOST_WIDE_INT *val, unsigned int len, unsigned int precision)
85*38fd1498Szrj {
86*38fd1498Szrj   unsigned int blocks_needed = BLOCKS_NEEDED (precision);
87*38fd1498Szrj   HOST_WIDE_INT top;
88*38fd1498Szrj   int i;
89*38fd1498Szrj 
90*38fd1498Szrj   if (len > blocks_needed)
91*38fd1498Szrj     len = blocks_needed;
92*38fd1498Szrj 
93*38fd1498Szrj   if (len == 1)
94*38fd1498Szrj     return len;
95*38fd1498Szrj 
96*38fd1498Szrj   top = val[len - 1];
97*38fd1498Szrj   if (len * HOST_BITS_PER_WIDE_INT > precision)
98*38fd1498Szrj     val[len - 1] = top = sext_hwi (top, precision % HOST_BITS_PER_WIDE_INT);
99*38fd1498Szrj   if (top != 0 && top != (HOST_WIDE_INT)-1)
100*38fd1498Szrj     return len;
101*38fd1498Szrj 
102*38fd1498Szrj   /* At this point we know that the top is either 0 or -1.  Find the
103*38fd1498Szrj      first block that is not a copy of this.  */
104*38fd1498Szrj   for (i = len - 2; i >= 0; i--)
105*38fd1498Szrj     {
106*38fd1498Szrj       HOST_WIDE_INT x = val[i];
107*38fd1498Szrj       if (x != top)
108*38fd1498Szrj 	{
109*38fd1498Szrj 	  if (SIGN_MASK (x) == top)
110*38fd1498Szrj 	    return i + 1;
111*38fd1498Szrj 
112*38fd1498Szrj 	  /* We need an extra block because the top bit block i does
113*38fd1498Szrj 	     not match the extension.  */
114*38fd1498Szrj 	  return i + 2;
115*38fd1498Szrj 	}
116*38fd1498Szrj     }
117*38fd1498Szrj 
118*38fd1498Szrj   /* The number is 0 or -1.  */
119*38fd1498Szrj   return 1;
120*38fd1498Szrj }
121*38fd1498Szrj 
122*38fd1498Szrj /* VAL[0] is the unsigned result of an operation.  Canonize it by adding
123*38fd1498Szrj    another 0 block if needed, and return number of blocks needed.  */
124*38fd1498Szrj 
125*38fd1498Szrj static inline unsigned int
canonize_uhwi(HOST_WIDE_INT * val,unsigned int precision)126*38fd1498Szrj canonize_uhwi (HOST_WIDE_INT *val, unsigned int precision)
127*38fd1498Szrj {
128*38fd1498Szrj   if (val[0] < 0 && precision > HOST_BITS_PER_WIDE_INT)
129*38fd1498Szrj     {
130*38fd1498Szrj       val[1] = 0;
131*38fd1498Szrj       return 2;
132*38fd1498Szrj     }
133*38fd1498Szrj   return 1;
134*38fd1498Szrj }
135*38fd1498Szrj 
136*38fd1498Szrj /*
137*38fd1498Szrj  * Conversion routines in and out of wide_int.
138*38fd1498Szrj  */
139*38fd1498Szrj 
140*38fd1498Szrj /* Copy XLEN elements from XVAL to VAL.  If NEED_CANON, canonize the
141*38fd1498Szrj    result for an integer with precision PRECISION.  Return the length
142*38fd1498Szrj    of VAL (after any canonization.  */
143*38fd1498Szrj unsigned int
from_array(HOST_WIDE_INT * val,const HOST_WIDE_INT * xval,unsigned int xlen,unsigned int precision,bool need_canon)144*38fd1498Szrj wi::from_array (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
145*38fd1498Szrj 		unsigned int xlen, unsigned int precision, bool need_canon)
146*38fd1498Szrj {
147*38fd1498Szrj   for (unsigned i = 0; i < xlen; i++)
148*38fd1498Szrj     val[i] = xval[i];
149*38fd1498Szrj   return need_canon ? canonize (val, xlen, precision) : xlen;
150*38fd1498Szrj }
151*38fd1498Szrj 
152*38fd1498Szrj /* Construct a wide int from a buffer of length LEN.  BUFFER will be
153*38fd1498Szrj    read according to byte endianness and word endianness of the target.
154*38fd1498Szrj    Only the lower BUFFER_LEN bytes of the result are set; the remaining
155*38fd1498Szrj    high bytes are cleared.  */
156*38fd1498Szrj wide_int
from_buffer(const unsigned char * buffer,unsigned int buffer_len)157*38fd1498Szrj wi::from_buffer (const unsigned char *buffer, unsigned int buffer_len)
158*38fd1498Szrj {
159*38fd1498Szrj   unsigned int precision = buffer_len * BITS_PER_UNIT;
160*38fd1498Szrj   wide_int result = wide_int::create (precision);
161*38fd1498Szrj   unsigned int words = buffer_len / UNITS_PER_WORD;
162*38fd1498Szrj 
163*38fd1498Szrj   /* We have to clear all the bits ourself, as we merely or in values
164*38fd1498Szrj      below.  */
165*38fd1498Szrj   unsigned int len = BLOCKS_NEEDED (precision);
166*38fd1498Szrj   HOST_WIDE_INT *val = result.write_val ();
167*38fd1498Szrj   for (unsigned int i = 0; i < len; ++i)
168*38fd1498Szrj     val[i] = 0;
169*38fd1498Szrj 
170*38fd1498Szrj   for (unsigned int byte = 0; byte < buffer_len; byte++)
171*38fd1498Szrj     {
172*38fd1498Szrj       unsigned int offset;
173*38fd1498Szrj       unsigned int index;
174*38fd1498Szrj       unsigned int bitpos = byte * BITS_PER_UNIT;
175*38fd1498Szrj       unsigned HOST_WIDE_INT value;
176*38fd1498Szrj 
177*38fd1498Szrj       if (buffer_len > UNITS_PER_WORD)
178*38fd1498Szrj 	{
179*38fd1498Szrj 	  unsigned int word = byte / UNITS_PER_WORD;
180*38fd1498Szrj 
181*38fd1498Szrj 	  if (WORDS_BIG_ENDIAN)
182*38fd1498Szrj 	    word = (words - 1) - word;
183*38fd1498Szrj 
184*38fd1498Szrj 	  offset = word * UNITS_PER_WORD;
185*38fd1498Szrj 
186*38fd1498Szrj 	  if (BYTES_BIG_ENDIAN)
187*38fd1498Szrj 	    offset += (UNITS_PER_WORD - 1) - (byte % UNITS_PER_WORD);
188*38fd1498Szrj 	  else
189*38fd1498Szrj 	    offset += byte % UNITS_PER_WORD;
190*38fd1498Szrj 	}
191*38fd1498Szrj       else
192*38fd1498Szrj 	offset = BYTES_BIG_ENDIAN ? (buffer_len - 1) - byte : byte;
193*38fd1498Szrj 
194*38fd1498Szrj       value = (unsigned HOST_WIDE_INT) buffer[offset];
195*38fd1498Szrj 
196*38fd1498Szrj       index = bitpos / HOST_BITS_PER_WIDE_INT;
197*38fd1498Szrj       val[index] |= value << (bitpos % HOST_BITS_PER_WIDE_INT);
198*38fd1498Szrj     }
199*38fd1498Szrj 
200*38fd1498Szrj   result.set_len (canonize (val, len, precision));
201*38fd1498Szrj 
202*38fd1498Szrj   return result;
203*38fd1498Szrj }
204*38fd1498Szrj 
205*38fd1498Szrj /* Sets RESULT from X, the sign is taken according to SGN.  */
206*38fd1498Szrj void
to_mpz(const wide_int_ref & x,mpz_t result,signop sgn)207*38fd1498Szrj wi::to_mpz (const wide_int_ref &x, mpz_t result, signop sgn)
208*38fd1498Szrj {
209*38fd1498Szrj   int len = x.get_len ();
210*38fd1498Szrj   const HOST_WIDE_INT *v = x.get_val ();
211*38fd1498Szrj   int excess = len * HOST_BITS_PER_WIDE_INT - x.get_precision ();
212*38fd1498Szrj 
213*38fd1498Szrj   if (wi::neg_p (x, sgn))
214*38fd1498Szrj     {
215*38fd1498Szrj       /* We use ones complement to avoid -x80..0 edge case that -
216*38fd1498Szrj 	 won't work on.  */
217*38fd1498Szrj       HOST_WIDE_INT *t = XALLOCAVEC (HOST_WIDE_INT, len);
218*38fd1498Szrj       for (int i = 0; i < len; i++)
219*38fd1498Szrj 	t[i] = ~v[i];
220*38fd1498Szrj       if (excess > 0)
221*38fd1498Szrj 	t[len - 1] = (unsigned HOST_WIDE_INT) t[len - 1] << excess >> excess;
222*38fd1498Szrj       mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, t);
223*38fd1498Szrj       mpz_com (result, result);
224*38fd1498Szrj     }
225*38fd1498Szrj   else if (excess > 0)
226*38fd1498Szrj     {
227*38fd1498Szrj       HOST_WIDE_INT *t = XALLOCAVEC (HOST_WIDE_INT, len);
228*38fd1498Szrj       for (int i = 0; i < len - 1; i++)
229*38fd1498Szrj 	t[i] = v[i];
230*38fd1498Szrj       t[len - 1] = (unsigned HOST_WIDE_INT) v[len - 1] << excess >> excess;
231*38fd1498Szrj       mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, t);
232*38fd1498Szrj     }
233*38fd1498Szrj   else
234*38fd1498Szrj     mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, v);
235*38fd1498Szrj }
236*38fd1498Szrj 
237*38fd1498Szrj /* Returns X converted to TYPE.  If WRAP is true, then out-of-range
238*38fd1498Szrj    values of VAL will be wrapped; otherwise, they will be set to the
239*38fd1498Szrj    appropriate minimum or maximum TYPE bound.  */
240*38fd1498Szrj wide_int
from_mpz(const_tree type,mpz_t x,bool wrap)241*38fd1498Szrj wi::from_mpz (const_tree type, mpz_t x, bool wrap)
242*38fd1498Szrj {
243*38fd1498Szrj   size_t count, numb;
244*38fd1498Szrj   unsigned int prec = TYPE_PRECISION (type);
245*38fd1498Szrj   wide_int res = wide_int::create (prec);
246*38fd1498Szrj 
247*38fd1498Szrj   if (!wrap)
248*38fd1498Szrj     {
249*38fd1498Szrj       mpz_t min, max;
250*38fd1498Szrj 
251*38fd1498Szrj       mpz_init (min);
252*38fd1498Szrj       mpz_init (max);
253*38fd1498Szrj       get_type_static_bounds (type, min, max);
254*38fd1498Szrj 
255*38fd1498Szrj       if (mpz_cmp (x, min) < 0)
256*38fd1498Szrj 	mpz_set (x, min);
257*38fd1498Szrj       else if (mpz_cmp (x, max) > 0)
258*38fd1498Szrj 	mpz_set (x, max);
259*38fd1498Szrj 
260*38fd1498Szrj       mpz_clear (min);
261*38fd1498Szrj       mpz_clear (max);
262*38fd1498Szrj     }
263*38fd1498Szrj 
264*38fd1498Szrj   /* Determine the number of unsigned HOST_WIDE_INTs that are required
265*38fd1498Szrj      for representing the absolute value.  The code to calculate count is
266*38fd1498Szrj      extracted from the GMP manual, section "Integer Import and Export":
267*38fd1498Szrj      http://gmplib.org/manual/Integer-Import-and-Export.html  */
268*38fd1498Szrj   numb = CHAR_BIT * sizeof (HOST_WIDE_INT);
269*38fd1498Szrj   count = (mpz_sizeinbase (x, 2) + numb - 1) / numb;
270*38fd1498Szrj   HOST_WIDE_INT *val = res.write_val ();
271*38fd1498Szrj   /* Read the absolute value.
272*38fd1498Szrj 
273*38fd1498Szrj      Write directly to the wide_int storage if possible, otherwise leave
274*38fd1498Szrj      GMP to allocate the memory for us.  It might be slightly more efficient
275*38fd1498Szrj      to use mpz_tdiv_r_2exp for the latter case, but the situation is
276*38fd1498Szrj      pathological and it seems safer to operate on the original mpz value
277*38fd1498Szrj      in all cases.  */
278*38fd1498Szrj   void *valres = mpz_export (count <= WIDE_INT_MAX_ELTS ? val : 0,
279*38fd1498Szrj 			     &count, -1, sizeof (HOST_WIDE_INT), 0, 0, x);
280*38fd1498Szrj   if (count < 1)
281*38fd1498Szrj     {
282*38fd1498Szrj       val[0] = 0;
283*38fd1498Szrj       count = 1;
284*38fd1498Szrj     }
285*38fd1498Szrj   count = MIN (count, BLOCKS_NEEDED (prec));
286*38fd1498Szrj   if (valres != val)
287*38fd1498Szrj     {
288*38fd1498Szrj       memcpy (val, valres, count * sizeof (HOST_WIDE_INT));
289*38fd1498Szrj       free (valres);
290*38fd1498Szrj     }
291*38fd1498Szrj   /* Zero-extend the absolute value to PREC bits.  */
292*38fd1498Szrj   if (count < BLOCKS_NEEDED (prec) && val[count - 1] < 0)
293*38fd1498Szrj     val[count++] = 0;
294*38fd1498Szrj   else
295*38fd1498Szrj     count = canonize (val, count, prec);
296*38fd1498Szrj   res.set_len (count);
297*38fd1498Szrj 
298*38fd1498Szrj   if (mpz_sgn (x) < 0)
299*38fd1498Szrj     res = -res;
300*38fd1498Szrj 
301*38fd1498Szrj   return res;
302*38fd1498Szrj }
303*38fd1498Szrj 
304*38fd1498Szrj /*
305*38fd1498Szrj  * Largest and smallest values in a mode.
306*38fd1498Szrj  */
307*38fd1498Szrj 
308*38fd1498Szrj /* Return the largest SGNed number that is representable in PRECISION bits.
309*38fd1498Szrj 
310*38fd1498Szrj    TODO: There is still code from the double_int era that trys to
311*38fd1498Szrj    make up for the fact that double int's could not represent the
312*38fd1498Szrj    min and max values of all types.  This code should be removed
313*38fd1498Szrj    because the min and max values can always be represented in
314*38fd1498Szrj    wide_ints and int-csts.  */
315*38fd1498Szrj wide_int
max_value(unsigned int precision,signop sgn)316*38fd1498Szrj wi::max_value (unsigned int precision, signop sgn)
317*38fd1498Szrj {
318*38fd1498Szrj   gcc_checking_assert (precision != 0);
319*38fd1498Szrj   if (sgn == UNSIGNED)
320*38fd1498Szrj     /* The unsigned max is just all ones.  */
321*38fd1498Szrj     return shwi (-1, precision);
322*38fd1498Szrj   else
323*38fd1498Szrj     /* The signed max is all ones except the top bit.  This must be
324*38fd1498Szrj        explicitly represented.  */
325*38fd1498Szrj     return mask (precision - 1, false, precision);
326*38fd1498Szrj }
327*38fd1498Szrj 
328*38fd1498Szrj /* Return the largest SGNed number that is representable in PRECISION bits.  */
329*38fd1498Szrj wide_int
min_value(unsigned int precision,signop sgn)330*38fd1498Szrj wi::min_value (unsigned int precision, signop sgn)
331*38fd1498Szrj {
332*38fd1498Szrj   gcc_checking_assert (precision != 0);
333*38fd1498Szrj   if (sgn == UNSIGNED)
334*38fd1498Szrj     return uhwi (0, precision);
335*38fd1498Szrj   else
336*38fd1498Szrj     /* The signed min is all zeros except the top bit.  This must be
337*38fd1498Szrj        explicitly represented.  */
338*38fd1498Szrj     return wi::set_bit_in_zero (precision - 1, precision);
339*38fd1498Szrj }
340*38fd1498Szrj 
341*38fd1498Szrj /*
342*38fd1498Szrj  * Public utilities.
343*38fd1498Szrj  */
344*38fd1498Szrj 
345*38fd1498Szrj /* Convert the number represented by XVAL, XLEN and XPRECISION, which has
346*38fd1498Szrj    signedness SGN, to an integer that has PRECISION bits.  Store the blocks
347*38fd1498Szrj    in VAL and return the number of blocks used.
348*38fd1498Szrj 
349*38fd1498Szrj    This function can handle both extension (PRECISION > XPRECISION)
350*38fd1498Szrj    and truncation (PRECISION < XPRECISION).  */
351*38fd1498Szrj unsigned int
force_to_size(HOST_WIDE_INT * val,const HOST_WIDE_INT * xval,unsigned int xlen,unsigned int xprecision,unsigned int precision,signop sgn)352*38fd1498Szrj wi::force_to_size (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
353*38fd1498Szrj 		   unsigned int xlen, unsigned int xprecision,
354*38fd1498Szrj 		   unsigned int precision, signop sgn)
355*38fd1498Szrj {
356*38fd1498Szrj   unsigned int blocks_needed = BLOCKS_NEEDED (precision);
357*38fd1498Szrj   unsigned int len = blocks_needed < xlen ? blocks_needed : xlen;
358*38fd1498Szrj   for (unsigned i = 0; i < len; i++)
359*38fd1498Szrj     val[i] = xval[i];
360*38fd1498Szrj 
361*38fd1498Szrj   if (precision > xprecision)
362*38fd1498Szrj     {
363*38fd1498Szrj       unsigned int small_xprecision = xprecision % HOST_BITS_PER_WIDE_INT;
364*38fd1498Szrj 
365*38fd1498Szrj       /* Expanding.  */
366*38fd1498Szrj       if (sgn == UNSIGNED)
367*38fd1498Szrj 	{
368*38fd1498Szrj 	  if (small_xprecision && len == BLOCKS_NEEDED (xprecision))
369*38fd1498Szrj 	    val[len - 1] = zext_hwi (val[len - 1], small_xprecision);
370*38fd1498Szrj 	  else if (val[len - 1] < 0)
371*38fd1498Szrj 	    {
372*38fd1498Szrj 	      while (len < BLOCKS_NEEDED (xprecision))
373*38fd1498Szrj 		val[len++] = -1;
374*38fd1498Szrj 	      if (small_xprecision)
375*38fd1498Szrj 		val[len - 1] = zext_hwi (val[len - 1], small_xprecision);
376*38fd1498Szrj 	      else
377*38fd1498Szrj 		val[len++] = 0;
378*38fd1498Szrj 	    }
379*38fd1498Szrj 	}
380*38fd1498Szrj       else
381*38fd1498Szrj 	{
382*38fd1498Szrj 	  if (small_xprecision && len == BLOCKS_NEEDED (xprecision))
383*38fd1498Szrj 	    val[len - 1] = sext_hwi (val[len - 1], small_xprecision);
384*38fd1498Szrj 	}
385*38fd1498Szrj     }
386*38fd1498Szrj   len = canonize (val, len, precision);
387*38fd1498Szrj 
388*38fd1498Szrj   return len;
389*38fd1498Szrj }
390*38fd1498Szrj 
391*38fd1498Szrj /* This function hides the fact that we cannot rely on the bits beyond
392*38fd1498Szrj    the precision.  This issue comes up in the relational comparisions
393*38fd1498Szrj    where we do allow comparisons of values of different precisions.  */
394*38fd1498Szrj static inline HOST_WIDE_INT
selt(const HOST_WIDE_INT * a,unsigned int len,unsigned int blocks_needed,unsigned int small_prec,unsigned int index,signop sgn)395*38fd1498Szrj selt (const HOST_WIDE_INT *a, unsigned int len,
396*38fd1498Szrj       unsigned int blocks_needed, unsigned int small_prec,
397*38fd1498Szrj       unsigned int index, signop sgn)
398*38fd1498Szrj {
399*38fd1498Szrj   HOST_WIDE_INT val;
400*38fd1498Szrj   if (index < len)
401*38fd1498Szrj     val = a[index];
402*38fd1498Szrj   else if (index < blocks_needed || sgn == SIGNED)
403*38fd1498Szrj     /* Signed or within the precision.  */
404*38fd1498Szrj     val = SIGN_MASK (a[len - 1]);
405*38fd1498Szrj   else
406*38fd1498Szrj     /* Unsigned extension beyond the precision. */
407*38fd1498Szrj     val = 0;
408*38fd1498Szrj 
409*38fd1498Szrj   if (small_prec && index == blocks_needed - 1)
410*38fd1498Szrj     return (sgn == SIGNED
411*38fd1498Szrj 	    ? sext_hwi (val, small_prec)
412*38fd1498Szrj 	    : zext_hwi (val, small_prec));
413*38fd1498Szrj   else
414*38fd1498Szrj     return val;
415*38fd1498Szrj }
416*38fd1498Szrj 
417*38fd1498Szrj /* Find the highest bit represented in a wide int.  This will in
418*38fd1498Szrj    general have the same value as the sign bit.  */
419*38fd1498Szrj static inline HOST_WIDE_INT
top_bit_of(const HOST_WIDE_INT * a,unsigned int len,unsigned int prec)420*38fd1498Szrj top_bit_of (const HOST_WIDE_INT *a, unsigned int len, unsigned int prec)
421*38fd1498Szrj {
422*38fd1498Szrj   int excess = len * HOST_BITS_PER_WIDE_INT - prec;
423*38fd1498Szrj   unsigned HOST_WIDE_INT val = a[len - 1];
424*38fd1498Szrj   if (excess > 0)
425*38fd1498Szrj     val <<= excess;
426*38fd1498Szrj   return val >> (HOST_BITS_PER_WIDE_INT - 1);
427*38fd1498Szrj }
428*38fd1498Szrj 
429*38fd1498Szrj /*
430*38fd1498Szrj  * Comparisons, note that only equality is an operator.  The other
431*38fd1498Szrj  * comparisons cannot be operators since they are inherently signed or
432*38fd1498Szrj  * unsigned and C++ has no such operators.
433*38fd1498Szrj  */
434*38fd1498Szrj 
435*38fd1498Szrj /* Return true if OP0 == OP1.  */
436*38fd1498Szrj bool
eq_p_large(const HOST_WIDE_INT * op0,unsigned int op0len,const HOST_WIDE_INT * op1,unsigned int op1len,unsigned int prec)437*38fd1498Szrj wi::eq_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
438*38fd1498Szrj 		const HOST_WIDE_INT *op1, unsigned int op1len,
439*38fd1498Szrj 		unsigned int prec)
440*38fd1498Szrj {
441*38fd1498Szrj   int l0 = op0len - 1;
442*38fd1498Szrj   unsigned int small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1);
443*38fd1498Szrj 
444*38fd1498Szrj   if (op0len != op1len)
445*38fd1498Szrj     return false;
446*38fd1498Szrj 
447*38fd1498Szrj   if (op0len == BLOCKS_NEEDED (prec) && small_prec)
448*38fd1498Szrj     {
449*38fd1498Szrj       /* It does not matter if we zext or sext here, we just have to
450*38fd1498Szrj 	 do both the same way.  */
451*38fd1498Szrj       if (zext_hwi (op0 [l0], small_prec) != zext_hwi (op1 [l0], small_prec))
452*38fd1498Szrj 	return false;
453*38fd1498Szrj       l0--;
454*38fd1498Szrj     }
455*38fd1498Szrj 
456*38fd1498Szrj   while (l0 >= 0)
457*38fd1498Szrj     if (op0[l0] != op1[l0])
458*38fd1498Szrj       return false;
459*38fd1498Szrj     else
460*38fd1498Szrj       l0--;
461*38fd1498Szrj 
462*38fd1498Szrj   return true;
463*38fd1498Szrj }
464*38fd1498Szrj 
465*38fd1498Szrj /* Return true if OP0 < OP1 using signed comparisons.  */
466*38fd1498Szrj bool
lts_p_large(const HOST_WIDE_INT * op0,unsigned int op0len,unsigned int precision,const HOST_WIDE_INT * op1,unsigned int op1len)467*38fd1498Szrj wi::lts_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
468*38fd1498Szrj 		 unsigned int precision,
469*38fd1498Szrj 		 const HOST_WIDE_INT *op1, unsigned int op1len)
470*38fd1498Szrj {
471*38fd1498Szrj   HOST_WIDE_INT s0, s1;
472*38fd1498Szrj   unsigned HOST_WIDE_INT u0, u1;
473*38fd1498Szrj   unsigned int blocks_needed = BLOCKS_NEEDED (precision);
474*38fd1498Szrj   unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
475*38fd1498Szrj   int l = MAX (op0len - 1, op1len - 1);
476*38fd1498Szrj 
477*38fd1498Szrj   /* Only the top block is compared as signed.  The rest are unsigned
478*38fd1498Szrj      comparisons.  */
479*38fd1498Szrj   s0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
480*38fd1498Szrj   s1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
481*38fd1498Szrj   if (s0 < s1)
482*38fd1498Szrj     return true;
483*38fd1498Szrj   if (s0 > s1)
484*38fd1498Szrj     return false;
485*38fd1498Szrj 
486*38fd1498Szrj   l--;
487*38fd1498Szrj   while (l >= 0)
488*38fd1498Szrj     {
489*38fd1498Szrj       u0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
490*38fd1498Szrj       u1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
491*38fd1498Szrj 
492*38fd1498Szrj       if (u0 < u1)
493*38fd1498Szrj 	return true;
494*38fd1498Szrj       if (u0 > u1)
495*38fd1498Szrj 	return false;
496*38fd1498Szrj       l--;
497*38fd1498Szrj     }
498*38fd1498Szrj 
499*38fd1498Szrj   return false;
500*38fd1498Szrj }
501*38fd1498Szrj 
502*38fd1498Szrj /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
503*38fd1498Szrj    signed compares.  */
504*38fd1498Szrj int
cmps_large(const HOST_WIDE_INT * op0,unsigned int op0len,unsigned int precision,const HOST_WIDE_INT * op1,unsigned int op1len)505*38fd1498Szrj wi::cmps_large (const HOST_WIDE_INT *op0, unsigned int op0len,
506*38fd1498Szrj 		unsigned int precision,
507*38fd1498Szrj 		const HOST_WIDE_INT *op1, unsigned int op1len)
508*38fd1498Szrj {
509*38fd1498Szrj   HOST_WIDE_INT s0, s1;
510*38fd1498Szrj   unsigned HOST_WIDE_INT u0, u1;
511*38fd1498Szrj   unsigned int blocks_needed = BLOCKS_NEEDED (precision);
512*38fd1498Szrj   unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
513*38fd1498Szrj   int l = MAX (op0len - 1, op1len - 1);
514*38fd1498Szrj 
515*38fd1498Szrj   /* Only the top block is compared as signed.  The rest are unsigned
516*38fd1498Szrj      comparisons.  */
517*38fd1498Szrj   s0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
518*38fd1498Szrj   s1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
519*38fd1498Szrj   if (s0 < s1)
520*38fd1498Szrj     return -1;
521*38fd1498Szrj   if (s0 > s1)
522*38fd1498Szrj     return 1;
523*38fd1498Szrj 
524*38fd1498Szrj   l--;
525*38fd1498Szrj   while (l >= 0)
526*38fd1498Szrj     {
527*38fd1498Szrj       u0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
528*38fd1498Szrj       u1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
529*38fd1498Szrj 
530*38fd1498Szrj       if (u0 < u1)
531*38fd1498Szrj 	return -1;
532*38fd1498Szrj       if (u0 > u1)
533*38fd1498Szrj 	return 1;
534*38fd1498Szrj       l--;
535*38fd1498Szrj     }
536*38fd1498Szrj 
537*38fd1498Szrj   return 0;
538*38fd1498Szrj }
539*38fd1498Szrj 
540*38fd1498Szrj /* Return true if OP0 < OP1 using unsigned comparisons.  */
541*38fd1498Szrj bool
ltu_p_large(const HOST_WIDE_INT * op0,unsigned int op0len,unsigned int precision,const HOST_WIDE_INT * op1,unsigned int op1len)542*38fd1498Szrj wi::ltu_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
543*38fd1498Szrj 		 unsigned int precision,
544*38fd1498Szrj 		 const HOST_WIDE_INT *op1, unsigned int op1len)
545*38fd1498Szrj {
546*38fd1498Szrj   unsigned HOST_WIDE_INT x0;
547*38fd1498Szrj   unsigned HOST_WIDE_INT x1;
548*38fd1498Szrj   unsigned int blocks_needed = BLOCKS_NEEDED (precision);
549*38fd1498Szrj   unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
550*38fd1498Szrj   int l = MAX (op0len - 1, op1len - 1);
551*38fd1498Szrj 
552*38fd1498Szrj   while (l >= 0)
553*38fd1498Szrj     {
554*38fd1498Szrj       x0 = selt (op0, op0len, blocks_needed, small_prec, l, UNSIGNED);
555*38fd1498Szrj       x1 = selt (op1, op1len, blocks_needed, small_prec, l, UNSIGNED);
556*38fd1498Szrj       if (x0 < x1)
557*38fd1498Szrj 	return true;
558*38fd1498Szrj       if (x0 > x1)
559*38fd1498Szrj 	return false;
560*38fd1498Szrj       l--;
561*38fd1498Szrj     }
562*38fd1498Szrj 
563*38fd1498Szrj   return false;
564*38fd1498Szrj }
565*38fd1498Szrj 
566*38fd1498Szrj /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
567*38fd1498Szrj    unsigned compares.  */
568*38fd1498Szrj int
cmpu_large(const HOST_WIDE_INT * op0,unsigned int op0len,unsigned int precision,const HOST_WIDE_INT * op1,unsigned int op1len)569*38fd1498Szrj wi::cmpu_large (const HOST_WIDE_INT *op0, unsigned int op0len,
570*38fd1498Szrj 		unsigned int precision,
571*38fd1498Szrj 		const HOST_WIDE_INT *op1, unsigned int op1len)
572*38fd1498Szrj {
573*38fd1498Szrj   unsigned HOST_WIDE_INT x0;
574*38fd1498Szrj   unsigned HOST_WIDE_INT x1;
575*38fd1498Szrj   unsigned int blocks_needed = BLOCKS_NEEDED (precision);
576*38fd1498Szrj   unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
577*38fd1498Szrj   int l = MAX (op0len - 1, op1len - 1);
578*38fd1498Szrj 
579*38fd1498Szrj   while (l >= 0)
580*38fd1498Szrj     {
581*38fd1498Szrj       x0 = selt (op0, op0len, blocks_needed, small_prec, l, UNSIGNED);
582*38fd1498Szrj       x1 = selt (op1, op1len, blocks_needed, small_prec, l, UNSIGNED);
583*38fd1498Szrj       if (x0 < x1)
584*38fd1498Szrj 	return -1;
585*38fd1498Szrj       if (x0 > x1)
586*38fd1498Szrj 	return 1;
587*38fd1498Szrj       l--;
588*38fd1498Szrj     }
589*38fd1498Szrj 
590*38fd1498Szrj   return 0;
591*38fd1498Szrj }
592*38fd1498Szrj 
593*38fd1498Szrj /*
594*38fd1498Szrj  * Extension.
595*38fd1498Szrj  */
596*38fd1498Szrj 
597*38fd1498Szrj /* Sign-extend the number represented by XVAL and XLEN into VAL,
598*38fd1498Szrj    starting at OFFSET.  Return the number of blocks in VAL.  Both XVAL
599*38fd1498Szrj    and VAL have PRECISION bits.  */
600*38fd1498Szrj unsigned int
sext_large(HOST_WIDE_INT * val,const HOST_WIDE_INT * xval,unsigned int xlen,unsigned int precision,unsigned int offset)601*38fd1498Szrj wi::sext_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
602*38fd1498Szrj 		unsigned int xlen, unsigned int precision, unsigned int offset)
603*38fd1498Szrj {
604*38fd1498Szrj   unsigned int len = offset / HOST_BITS_PER_WIDE_INT;
605*38fd1498Szrj   /* Extending beyond the precision is a no-op.  If we have only stored
606*38fd1498Szrj      OFFSET bits or fewer, the rest are already signs.  */
607*38fd1498Szrj   if (offset >= precision || len >= xlen)
608*38fd1498Szrj     {
609*38fd1498Szrj       for (unsigned i = 0; i < xlen; ++i)
610*38fd1498Szrj 	val[i] = xval[i];
611*38fd1498Szrj       return xlen;
612*38fd1498Szrj     }
613*38fd1498Szrj   unsigned int suboffset = offset % HOST_BITS_PER_WIDE_INT;
614*38fd1498Szrj   for (unsigned int i = 0; i < len; i++)
615*38fd1498Szrj     val[i] = xval[i];
616*38fd1498Szrj   if (suboffset > 0)
617*38fd1498Szrj     {
618*38fd1498Szrj       val[len] = sext_hwi (xval[len], suboffset);
619*38fd1498Szrj       len += 1;
620*38fd1498Szrj     }
621*38fd1498Szrj   return canonize (val, len, precision);
622*38fd1498Szrj }
623*38fd1498Szrj 
624*38fd1498Szrj /* Zero-extend the number represented by XVAL and XLEN into VAL,
625*38fd1498Szrj    starting at OFFSET.  Return the number of blocks in VAL.  Both XVAL
626*38fd1498Szrj    and VAL have PRECISION bits.  */
627*38fd1498Szrj unsigned int
zext_large(HOST_WIDE_INT * val,const HOST_WIDE_INT * xval,unsigned int xlen,unsigned int precision,unsigned int offset)628*38fd1498Szrj wi::zext_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
629*38fd1498Szrj 		unsigned int xlen, unsigned int precision, unsigned int offset)
630*38fd1498Szrj {
631*38fd1498Szrj   unsigned int len = offset / HOST_BITS_PER_WIDE_INT;
632*38fd1498Szrj   /* Extending beyond the precision is a no-op.  If we have only stored
633*38fd1498Szrj      OFFSET bits or fewer, and the upper stored bit is zero, then there
634*38fd1498Szrj      is nothing to do.  */
635*38fd1498Szrj   if (offset >= precision || (len >= xlen && xval[xlen - 1] >= 0))
636*38fd1498Szrj     {
637*38fd1498Szrj       for (unsigned i = 0; i < xlen; ++i)
638*38fd1498Szrj 	val[i] = xval[i];
639*38fd1498Szrj       return xlen;
640*38fd1498Szrj     }
641*38fd1498Szrj   unsigned int suboffset = offset % HOST_BITS_PER_WIDE_INT;
642*38fd1498Szrj   for (unsigned int i = 0; i < len; i++)
643*38fd1498Szrj     val[i] = i < xlen ? xval[i] : -1;
644*38fd1498Szrj   if (suboffset > 0)
645*38fd1498Szrj     val[len] = zext_hwi (len < xlen ? xval[len] : -1, suboffset);
646*38fd1498Szrj   else
647*38fd1498Szrj     val[len] = 0;
648*38fd1498Szrj   return canonize (val, len + 1, precision);
649*38fd1498Szrj }
650*38fd1498Szrj 
651*38fd1498Szrj /*
652*38fd1498Szrj  * Masking, inserting, shifting, rotating.
653*38fd1498Szrj  */
654*38fd1498Szrj 
655*38fd1498Szrj /* Insert WIDTH bits from Y into X starting at START.  */
656*38fd1498Szrj wide_int
insert(const wide_int & x,const wide_int & y,unsigned int start,unsigned int width)657*38fd1498Szrj wi::insert (const wide_int &x, const wide_int &y, unsigned int start,
658*38fd1498Szrj 	    unsigned int width)
659*38fd1498Szrj {
660*38fd1498Szrj   wide_int result;
661*38fd1498Szrj   wide_int mask;
662*38fd1498Szrj   wide_int tmp;
663*38fd1498Szrj 
664*38fd1498Szrj   unsigned int precision = x.get_precision ();
665*38fd1498Szrj   if (start >= precision)
666*38fd1498Szrj     return x;
667*38fd1498Szrj 
668*38fd1498Szrj   gcc_checking_assert (precision >= width);
669*38fd1498Szrj 
670*38fd1498Szrj   if (start + width >= precision)
671*38fd1498Szrj     width = precision - start;
672*38fd1498Szrj 
673*38fd1498Szrj   mask = wi::shifted_mask (start, width, false, precision);
674*38fd1498Szrj   tmp = wi::lshift (wide_int::from (y, precision, UNSIGNED), start);
675*38fd1498Szrj   result = tmp & mask;
676*38fd1498Szrj 
677*38fd1498Szrj   tmp = wi::bit_and_not (x, mask);
678*38fd1498Szrj   result = result | tmp;
679*38fd1498Szrj 
680*38fd1498Szrj   return result;
681*38fd1498Szrj }
682*38fd1498Szrj 
683*38fd1498Szrj /* Copy the number represented by XVAL and XLEN into VAL, setting bit BIT.
684*38fd1498Szrj    Return the number of blocks in VAL.  Both XVAL and VAL have PRECISION
685*38fd1498Szrj    bits.  */
686*38fd1498Szrj unsigned int
set_bit_large(HOST_WIDE_INT * val,const HOST_WIDE_INT * xval,unsigned int xlen,unsigned int precision,unsigned int bit)687*38fd1498Szrj wi::set_bit_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
688*38fd1498Szrj 		   unsigned int xlen, unsigned int precision, unsigned int bit)
689*38fd1498Szrj {
690*38fd1498Szrj   unsigned int block = bit / HOST_BITS_PER_WIDE_INT;
691*38fd1498Szrj   unsigned int subbit = bit % HOST_BITS_PER_WIDE_INT;
692*38fd1498Szrj 
693*38fd1498Szrj   if (block + 1 >= xlen)
694*38fd1498Szrj     {
695*38fd1498Szrj       /* The operation either affects the last current block or needs
696*38fd1498Szrj 	 a new block.  */
697*38fd1498Szrj       unsigned int len = block + 1;
698*38fd1498Szrj       for (unsigned int i = 0; i < len; i++)
699*38fd1498Szrj 	val[i] = safe_uhwi (xval, xlen, i);
700*38fd1498Szrj       val[block] |= HOST_WIDE_INT_1U << subbit;
701*38fd1498Szrj 
702*38fd1498Szrj       /* If the bit we just set is at the msb of the block, make sure
703*38fd1498Szrj 	 that any higher bits are zeros.  */
704*38fd1498Szrj       if (bit + 1 < precision && subbit == HOST_BITS_PER_WIDE_INT - 1)
705*38fd1498Szrj 	val[len++] = 0;
706*38fd1498Szrj       return len;
707*38fd1498Szrj     }
708*38fd1498Szrj   else
709*38fd1498Szrj     {
710*38fd1498Szrj       for (unsigned int i = 0; i < xlen; i++)
711*38fd1498Szrj 	val[i] = xval[i];
712*38fd1498Szrj       val[block] |= HOST_WIDE_INT_1U << subbit;
713*38fd1498Szrj       return canonize (val, xlen, precision);
714*38fd1498Szrj     }
715*38fd1498Szrj }
716*38fd1498Szrj 
717*38fd1498Szrj /* bswap THIS.  */
718*38fd1498Szrj wide_int
bswap() const719*38fd1498Szrj wide_int_storage::bswap () const
720*38fd1498Szrj {
721*38fd1498Szrj   wide_int result = wide_int::create (precision);
722*38fd1498Szrj   unsigned int i, s;
723*38fd1498Szrj   unsigned int len = BLOCKS_NEEDED (precision);
724*38fd1498Szrj   unsigned int xlen = get_len ();
725*38fd1498Szrj   const HOST_WIDE_INT *xval = get_val ();
726*38fd1498Szrj   HOST_WIDE_INT *val = result.write_val ();
727*38fd1498Szrj 
728*38fd1498Szrj   /* This is not a well defined operation if the precision is not a
729*38fd1498Szrj      multiple of 8.  */
730*38fd1498Szrj   gcc_assert ((precision & 0x7) == 0);
731*38fd1498Szrj 
732*38fd1498Szrj   for (i = 0; i < len; i++)
733*38fd1498Szrj     val[i] = 0;
734*38fd1498Szrj 
735*38fd1498Szrj   /* Only swap the bytes that are not the padding.  */
736*38fd1498Szrj   for (s = 0; s < precision; s += 8)
737*38fd1498Szrj     {
738*38fd1498Szrj       unsigned int d = precision - s - 8;
739*38fd1498Szrj       unsigned HOST_WIDE_INT byte;
740*38fd1498Szrj 
741*38fd1498Szrj       unsigned int block = s / HOST_BITS_PER_WIDE_INT;
742*38fd1498Szrj       unsigned int offset = s & (HOST_BITS_PER_WIDE_INT - 1);
743*38fd1498Szrj 
744*38fd1498Szrj       byte = (safe_uhwi (xval, xlen, block) >> offset) & 0xff;
745*38fd1498Szrj 
746*38fd1498Szrj       block = d / HOST_BITS_PER_WIDE_INT;
747*38fd1498Szrj       offset = d & (HOST_BITS_PER_WIDE_INT - 1);
748*38fd1498Szrj 
749*38fd1498Szrj       val[block] |= byte << offset;
750*38fd1498Szrj     }
751*38fd1498Szrj 
752*38fd1498Szrj   result.set_len (canonize (val, len, precision));
753*38fd1498Szrj   return result;
754*38fd1498Szrj }
755*38fd1498Szrj 
756*38fd1498Szrj /* Fill VAL with a mask where the lower WIDTH bits are ones and the bits
757*38fd1498Szrj    above that up to PREC are zeros.  The result is inverted if NEGATE
758*38fd1498Szrj    is true.  Return the number of blocks in VAL.  */
759*38fd1498Szrj unsigned int
mask(HOST_WIDE_INT * val,unsigned int width,bool negate,unsigned int prec)760*38fd1498Szrj wi::mask (HOST_WIDE_INT *val, unsigned int width, bool negate,
761*38fd1498Szrj 	  unsigned int prec)
762*38fd1498Szrj {
763*38fd1498Szrj   if (width >= prec)
764*38fd1498Szrj     {
765*38fd1498Szrj       val[0] = negate ? 0 : -1;
766*38fd1498Szrj       return 1;
767*38fd1498Szrj     }
768*38fd1498Szrj   else if (width == 0)
769*38fd1498Szrj     {
770*38fd1498Szrj       val[0] = negate ? -1 : 0;
771*38fd1498Szrj       return 1;
772*38fd1498Szrj     }
773*38fd1498Szrj 
774*38fd1498Szrj   unsigned int i = 0;
775*38fd1498Szrj   while (i < width / HOST_BITS_PER_WIDE_INT)
776*38fd1498Szrj     val[i++] = negate ? 0 : -1;
777*38fd1498Szrj 
778*38fd1498Szrj   unsigned int shift = width & (HOST_BITS_PER_WIDE_INT - 1);
779*38fd1498Szrj   if (shift != 0)
780*38fd1498Szrj     {
781*38fd1498Szrj       HOST_WIDE_INT last = (HOST_WIDE_INT_1U << shift) - 1;
782*38fd1498Szrj       val[i++] = negate ? ~last : last;
783*38fd1498Szrj     }
784*38fd1498Szrj   else
785*38fd1498Szrj     val[i++] = negate ? -1 : 0;
786*38fd1498Szrj 
787*38fd1498Szrj   return i;
788*38fd1498Szrj }
789*38fd1498Szrj 
790*38fd1498Szrj /* Fill VAL with a mask where the lower START bits are zeros, the next WIDTH
791*38fd1498Szrj    bits are ones, and the bits above that up to PREC are zeros.  The result
792*38fd1498Szrj    is inverted if NEGATE is true.  Return the number of blocks in VAL.  */
793*38fd1498Szrj unsigned int
shifted_mask(HOST_WIDE_INT * val,unsigned int start,unsigned int width,bool negate,unsigned int prec)794*38fd1498Szrj wi::shifted_mask (HOST_WIDE_INT *val, unsigned int start, unsigned int width,
795*38fd1498Szrj 		  bool negate, unsigned int prec)
796*38fd1498Szrj {
797*38fd1498Szrj   if (start >= prec || width == 0)
798*38fd1498Szrj     {
799*38fd1498Szrj       val[0] = negate ? -1 : 0;
800*38fd1498Szrj       return 1;
801*38fd1498Szrj     }
802*38fd1498Szrj 
803*38fd1498Szrj   if (width > prec - start)
804*38fd1498Szrj     width = prec - start;
805*38fd1498Szrj   unsigned int end = start + width;
806*38fd1498Szrj 
807*38fd1498Szrj   unsigned int i = 0;
808*38fd1498Szrj   while (i < start / HOST_BITS_PER_WIDE_INT)
809*38fd1498Szrj     val[i++] = negate ? -1 : 0;
810*38fd1498Szrj 
811*38fd1498Szrj   unsigned int shift = start & (HOST_BITS_PER_WIDE_INT - 1);
812*38fd1498Szrj   if (shift)
813*38fd1498Szrj     {
814*38fd1498Szrj       HOST_WIDE_INT block = (HOST_WIDE_INT_1U << shift) - 1;
815*38fd1498Szrj       shift += width;
816*38fd1498Szrj       if (shift < HOST_BITS_PER_WIDE_INT)
817*38fd1498Szrj 	{
818*38fd1498Szrj 	  /* case 000111000 */
819*38fd1498Szrj 	  block = (HOST_WIDE_INT_1U << shift) - block - 1;
820*38fd1498Szrj 	  val[i++] = negate ? ~block : block;
821*38fd1498Szrj 	  return i;
822*38fd1498Szrj 	}
823*38fd1498Szrj       else
824*38fd1498Szrj 	/* ...111000 */
825*38fd1498Szrj 	val[i++] = negate ? block : ~block;
826*38fd1498Szrj     }
827*38fd1498Szrj 
828*38fd1498Szrj   while (i < end / HOST_BITS_PER_WIDE_INT)
829*38fd1498Szrj     /* 1111111 */
830*38fd1498Szrj     val[i++] = negate ? 0 : -1;
831*38fd1498Szrj 
832*38fd1498Szrj   shift = end & (HOST_BITS_PER_WIDE_INT - 1);
833*38fd1498Szrj   if (shift != 0)
834*38fd1498Szrj     {
835*38fd1498Szrj       /* 000011111 */
836*38fd1498Szrj       HOST_WIDE_INT block = (HOST_WIDE_INT_1U << shift) - 1;
837*38fd1498Szrj       val[i++] = negate ? ~block : block;
838*38fd1498Szrj     }
839*38fd1498Szrj   else if (end < prec)
840*38fd1498Szrj     val[i++] = negate ? -1 : 0;
841*38fd1498Szrj 
842*38fd1498Szrj   return i;
843*38fd1498Szrj }
844*38fd1498Szrj 
845*38fd1498Szrj /*
846*38fd1498Szrj  * logical operations.
847*38fd1498Szrj  */
848*38fd1498Szrj 
849*38fd1498Szrj /* Set VAL to OP0 & OP1.  Return the number of blocks used.  */
850*38fd1498Szrj unsigned int
and_large(HOST_WIDE_INT * val,const HOST_WIDE_INT * op0,unsigned int op0len,const HOST_WIDE_INT * op1,unsigned int op1len,unsigned int prec)851*38fd1498Szrj wi::and_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
852*38fd1498Szrj 	       unsigned int op0len, const HOST_WIDE_INT *op1,
853*38fd1498Szrj 	       unsigned int op1len, unsigned int prec)
854*38fd1498Szrj {
855*38fd1498Szrj   int l0 = op0len - 1;
856*38fd1498Szrj   int l1 = op1len - 1;
857*38fd1498Szrj   bool need_canon = true;
858*38fd1498Szrj 
859*38fd1498Szrj   unsigned int len = MAX (op0len, op1len);
860*38fd1498Szrj   if (l0 > l1)
861*38fd1498Szrj     {
862*38fd1498Szrj       HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
863*38fd1498Szrj       if (op1mask == 0)
864*38fd1498Szrj 	{
865*38fd1498Szrj 	  l0 = l1;
866*38fd1498Szrj 	  len = l1 + 1;
867*38fd1498Szrj 	}
868*38fd1498Szrj       else
869*38fd1498Szrj 	{
870*38fd1498Szrj 	  need_canon = false;
871*38fd1498Szrj 	  while (l0 > l1)
872*38fd1498Szrj 	    {
873*38fd1498Szrj 	      val[l0] = op0[l0];
874*38fd1498Szrj 	      l0--;
875*38fd1498Szrj 	    }
876*38fd1498Szrj 	}
877*38fd1498Szrj     }
878*38fd1498Szrj   else if (l1 > l0)
879*38fd1498Szrj     {
880*38fd1498Szrj       HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
881*38fd1498Szrj       if (op0mask == 0)
882*38fd1498Szrj 	len = l0 + 1;
883*38fd1498Szrj       else
884*38fd1498Szrj 	{
885*38fd1498Szrj 	  need_canon = false;
886*38fd1498Szrj 	  while (l1 > l0)
887*38fd1498Szrj 	    {
888*38fd1498Szrj 	      val[l1] = op1[l1];
889*38fd1498Szrj 	      l1--;
890*38fd1498Szrj 	    }
891*38fd1498Szrj 	}
892*38fd1498Szrj     }
893*38fd1498Szrj 
894*38fd1498Szrj   while (l0 >= 0)
895*38fd1498Szrj     {
896*38fd1498Szrj       val[l0] = op0[l0] & op1[l0];
897*38fd1498Szrj       l0--;
898*38fd1498Szrj     }
899*38fd1498Szrj 
900*38fd1498Szrj   if (need_canon)
901*38fd1498Szrj     len = canonize (val, len, prec);
902*38fd1498Szrj 
903*38fd1498Szrj   return len;
904*38fd1498Szrj }
905*38fd1498Szrj 
906*38fd1498Szrj /* Set VAL to OP0 & ~OP1.  Return the number of blocks used.  */
907*38fd1498Szrj unsigned int
and_not_large(HOST_WIDE_INT * val,const HOST_WIDE_INT * op0,unsigned int op0len,const HOST_WIDE_INT * op1,unsigned int op1len,unsigned int prec)908*38fd1498Szrj wi::and_not_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
909*38fd1498Szrj 		   unsigned int op0len, const HOST_WIDE_INT *op1,
910*38fd1498Szrj 		   unsigned int op1len, unsigned int prec)
911*38fd1498Szrj {
912*38fd1498Szrj   wide_int result;
913*38fd1498Szrj   int l0 = op0len - 1;
914*38fd1498Szrj   int l1 = op1len - 1;
915*38fd1498Szrj   bool need_canon = true;
916*38fd1498Szrj 
917*38fd1498Szrj   unsigned int len = MAX (op0len, op1len);
918*38fd1498Szrj   if (l0 > l1)
919*38fd1498Szrj     {
920*38fd1498Szrj       HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
921*38fd1498Szrj       if (op1mask != 0)
922*38fd1498Szrj 	{
923*38fd1498Szrj 	  l0 = l1;
924*38fd1498Szrj 	  len = l1 + 1;
925*38fd1498Szrj 	}
926*38fd1498Szrj       else
927*38fd1498Szrj 	{
928*38fd1498Szrj 	  need_canon = false;
929*38fd1498Szrj 	  while (l0 > l1)
930*38fd1498Szrj 	    {
931*38fd1498Szrj 	      val[l0] = op0[l0];
932*38fd1498Szrj 	      l0--;
933*38fd1498Szrj 	    }
934*38fd1498Szrj 	}
935*38fd1498Szrj     }
936*38fd1498Szrj   else if (l1 > l0)
937*38fd1498Szrj     {
938*38fd1498Szrj       HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
939*38fd1498Szrj       if (op0mask == 0)
940*38fd1498Szrj 	len = l0 + 1;
941*38fd1498Szrj       else
942*38fd1498Szrj 	{
943*38fd1498Szrj 	  need_canon = false;
944*38fd1498Szrj 	  while (l1 > l0)
945*38fd1498Szrj 	    {
946*38fd1498Szrj 	      val[l1] = ~op1[l1];
947*38fd1498Szrj 	      l1--;
948*38fd1498Szrj 	    }
949*38fd1498Szrj 	}
950*38fd1498Szrj     }
951*38fd1498Szrj 
952*38fd1498Szrj   while (l0 >= 0)
953*38fd1498Szrj     {
954*38fd1498Szrj       val[l0] = op0[l0] & ~op1[l0];
955*38fd1498Szrj       l0--;
956*38fd1498Szrj     }
957*38fd1498Szrj 
958*38fd1498Szrj   if (need_canon)
959*38fd1498Szrj     len = canonize (val, len, prec);
960*38fd1498Szrj 
961*38fd1498Szrj   return len;
962*38fd1498Szrj }
963*38fd1498Szrj 
964*38fd1498Szrj /* Set VAL to OP0 | OP1.  Return the number of blocks used.  */
965*38fd1498Szrj unsigned int
or_large(HOST_WIDE_INT * val,const HOST_WIDE_INT * op0,unsigned int op0len,const HOST_WIDE_INT * op1,unsigned int op1len,unsigned int prec)966*38fd1498Szrj wi::or_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
967*38fd1498Szrj 	      unsigned int op0len, const HOST_WIDE_INT *op1,
968*38fd1498Szrj 	      unsigned int op1len, unsigned int prec)
969*38fd1498Szrj {
970*38fd1498Szrj   wide_int result;
971*38fd1498Szrj   int l0 = op0len - 1;
972*38fd1498Szrj   int l1 = op1len - 1;
973*38fd1498Szrj   bool need_canon = true;
974*38fd1498Szrj 
975*38fd1498Szrj   unsigned int len = MAX (op0len, op1len);
976*38fd1498Szrj   if (l0 > l1)
977*38fd1498Szrj     {
978*38fd1498Szrj       HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
979*38fd1498Szrj       if (op1mask != 0)
980*38fd1498Szrj 	{
981*38fd1498Szrj 	  l0 = l1;
982*38fd1498Szrj 	  len = l1 + 1;
983*38fd1498Szrj 	}
984*38fd1498Szrj       else
985*38fd1498Szrj 	{
986*38fd1498Szrj 	  need_canon = false;
987*38fd1498Szrj 	  while (l0 > l1)
988*38fd1498Szrj 	    {
989*38fd1498Szrj 	      val[l0] = op0[l0];
990*38fd1498Szrj 	      l0--;
991*38fd1498Szrj 	    }
992*38fd1498Szrj 	}
993*38fd1498Szrj     }
994*38fd1498Szrj   else if (l1 > l0)
995*38fd1498Szrj     {
996*38fd1498Szrj       HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
997*38fd1498Szrj       if (op0mask != 0)
998*38fd1498Szrj 	len = l0 + 1;
999*38fd1498Szrj       else
1000*38fd1498Szrj 	{
1001*38fd1498Szrj 	  need_canon = false;
1002*38fd1498Szrj 	  while (l1 > l0)
1003*38fd1498Szrj 	    {
1004*38fd1498Szrj 	      val[l1] = op1[l1];
1005*38fd1498Szrj 	      l1--;
1006*38fd1498Szrj 	    }
1007*38fd1498Szrj 	}
1008*38fd1498Szrj     }
1009*38fd1498Szrj 
1010*38fd1498Szrj   while (l0 >= 0)
1011*38fd1498Szrj     {
1012*38fd1498Szrj       val[l0] = op0[l0] | op1[l0];
1013*38fd1498Szrj       l0--;
1014*38fd1498Szrj     }
1015*38fd1498Szrj 
1016*38fd1498Szrj   if (need_canon)
1017*38fd1498Szrj     len = canonize (val, len, prec);
1018*38fd1498Szrj 
1019*38fd1498Szrj   return len;
1020*38fd1498Szrj }
1021*38fd1498Szrj 
1022*38fd1498Szrj /* Set VAL to OP0 | ~OP1.  Return the number of blocks used.  */
1023*38fd1498Szrj unsigned int
or_not_large(HOST_WIDE_INT * val,const HOST_WIDE_INT * op0,unsigned int op0len,const HOST_WIDE_INT * op1,unsigned int op1len,unsigned int prec)1024*38fd1498Szrj wi::or_not_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1025*38fd1498Szrj 		  unsigned int op0len, const HOST_WIDE_INT *op1,
1026*38fd1498Szrj 		  unsigned int op1len, unsigned int prec)
1027*38fd1498Szrj {
1028*38fd1498Szrj   wide_int result;
1029*38fd1498Szrj   int l0 = op0len - 1;
1030*38fd1498Szrj   int l1 = op1len - 1;
1031*38fd1498Szrj   bool need_canon = true;
1032*38fd1498Szrj 
1033*38fd1498Szrj   unsigned int len = MAX (op0len, op1len);
1034*38fd1498Szrj   if (l0 > l1)
1035*38fd1498Szrj     {
1036*38fd1498Szrj       HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
1037*38fd1498Szrj       if (op1mask == 0)
1038*38fd1498Szrj 	{
1039*38fd1498Szrj 	  l0 = l1;
1040*38fd1498Szrj 	  len = l1 + 1;
1041*38fd1498Szrj 	}
1042*38fd1498Szrj       else
1043*38fd1498Szrj 	{
1044*38fd1498Szrj 	  need_canon = false;
1045*38fd1498Szrj 	  while (l0 > l1)
1046*38fd1498Szrj 	    {
1047*38fd1498Szrj 	      val[l0] = op0[l0];
1048*38fd1498Szrj 	      l0--;
1049*38fd1498Szrj 	    }
1050*38fd1498Szrj 	}
1051*38fd1498Szrj     }
1052*38fd1498Szrj   else if (l1 > l0)
1053*38fd1498Szrj     {
1054*38fd1498Szrj       HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
1055*38fd1498Szrj       if (op0mask != 0)
1056*38fd1498Szrj 	len = l0 + 1;
1057*38fd1498Szrj       else
1058*38fd1498Szrj 	{
1059*38fd1498Szrj 	  need_canon = false;
1060*38fd1498Szrj 	  while (l1 > l0)
1061*38fd1498Szrj 	    {
1062*38fd1498Szrj 	      val[l1] = ~op1[l1];
1063*38fd1498Szrj 	      l1--;
1064*38fd1498Szrj 	    }
1065*38fd1498Szrj 	}
1066*38fd1498Szrj     }
1067*38fd1498Szrj 
1068*38fd1498Szrj   while (l0 >= 0)
1069*38fd1498Szrj     {
1070*38fd1498Szrj       val[l0] = op0[l0] | ~op1[l0];
1071*38fd1498Szrj       l0--;
1072*38fd1498Szrj     }
1073*38fd1498Szrj 
1074*38fd1498Szrj   if (need_canon)
1075*38fd1498Szrj     len = canonize (val, len, prec);
1076*38fd1498Szrj 
1077*38fd1498Szrj   return len;
1078*38fd1498Szrj }
1079*38fd1498Szrj 
1080*38fd1498Szrj /* Set VAL to OP0 ^ OP1.  Return the number of blocks used.  */
1081*38fd1498Szrj unsigned int
xor_large(HOST_WIDE_INT * val,const HOST_WIDE_INT * op0,unsigned int op0len,const HOST_WIDE_INT * op1,unsigned int op1len,unsigned int prec)1082*38fd1498Szrj wi::xor_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1083*38fd1498Szrj 	       unsigned int op0len, const HOST_WIDE_INT *op1,
1084*38fd1498Szrj 	       unsigned int op1len, unsigned int prec)
1085*38fd1498Szrj {
1086*38fd1498Szrj   wide_int result;
1087*38fd1498Szrj   int l0 = op0len - 1;
1088*38fd1498Szrj   int l1 = op1len - 1;
1089*38fd1498Szrj 
1090*38fd1498Szrj   unsigned int len = MAX (op0len, op1len);
1091*38fd1498Szrj   if (l0 > l1)
1092*38fd1498Szrj     {
1093*38fd1498Szrj       HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
1094*38fd1498Szrj       while (l0 > l1)
1095*38fd1498Szrj 	{
1096*38fd1498Szrj 	  val[l0] = op0[l0] ^ op1mask;
1097*38fd1498Szrj 	  l0--;
1098*38fd1498Szrj 	}
1099*38fd1498Szrj     }
1100*38fd1498Szrj 
1101*38fd1498Szrj   if (l1 > l0)
1102*38fd1498Szrj     {
1103*38fd1498Szrj       HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
1104*38fd1498Szrj       while (l1 > l0)
1105*38fd1498Szrj 	{
1106*38fd1498Szrj 	  val[l1] = op0mask ^ op1[l1];
1107*38fd1498Szrj 	  l1--;
1108*38fd1498Szrj 	}
1109*38fd1498Szrj     }
1110*38fd1498Szrj 
1111*38fd1498Szrj   while (l0 >= 0)
1112*38fd1498Szrj     {
1113*38fd1498Szrj       val[l0] = op0[l0] ^ op1[l0];
1114*38fd1498Szrj       l0--;
1115*38fd1498Szrj     }
1116*38fd1498Szrj 
1117*38fd1498Szrj   return canonize (val, len, prec);
1118*38fd1498Szrj }
1119*38fd1498Szrj 
1120*38fd1498Szrj /*
1121*38fd1498Szrj  * math
1122*38fd1498Szrj  */
1123*38fd1498Szrj 
1124*38fd1498Szrj /* Set VAL to OP0 + OP1.  If OVERFLOW is nonnull, record in *OVERFLOW
1125*38fd1498Szrj    whether the result overflows when OP0 and OP1 are treated as having
1126*38fd1498Szrj    signedness SGN.  Return the number of blocks in VAL.  */
1127*38fd1498Szrj unsigned int
add_large(HOST_WIDE_INT * val,const HOST_WIDE_INT * op0,unsigned int op0len,const HOST_WIDE_INT * op1,unsigned int op1len,unsigned int prec,signop sgn,bool * overflow)1128*38fd1498Szrj wi::add_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1129*38fd1498Szrj 	       unsigned int op0len, const HOST_WIDE_INT *op1,
1130*38fd1498Szrj 	       unsigned int op1len, unsigned int prec,
1131*38fd1498Szrj 	       signop sgn, bool *overflow)
1132*38fd1498Szrj {
1133*38fd1498Szrj   unsigned HOST_WIDE_INT o0 = 0;
1134*38fd1498Szrj   unsigned HOST_WIDE_INT o1 = 0;
1135*38fd1498Szrj   unsigned HOST_WIDE_INT x = 0;
1136*38fd1498Szrj   unsigned HOST_WIDE_INT carry = 0;
1137*38fd1498Szrj   unsigned HOST_WIDE_INT old_carry = 0;
1138*38fd1498Szrj   unsigned HOST_WIDE_INT mask0, mask1;
1139*38fd1498Szrj   unsigned int i;
1140*38fd1498Szrj 
1141*38fd1498Szrj   unsigned int len = MAX (op0len, op1len);
1142*38fd1498Szrj   mask0 = -top_bit_of (op0, op0len, prec);
1143*38fd1498Szrj   mask1 = -top_bit_of (op1, op1len, prec);
1144*38fd1498Szrj   /* Add all of the explicitly defined elements.  */
1145*38fd1498Szrj 
1146*38fd1498Szrj   for (i = 0; i < len; i++)
1147*38fd1498Szrj     {
1148*38fd1498Szrj       o0 = i < op0len ? (unsigned HOST_WIDE_INT) op0[i] : mask0;
1149*38fd1498Szrj       o1 = i < op1len ? (unsigned HOST_WIDE_INT) op1[i] : mask1;
1150*38fd1498Szrj       x = o0 + o1 + carry;
1151*38fd1498Szrj       val[i] = x;
1152*38fd1498Szrj       old_carry = carry;
1153*38fd1498Szrj       carry = carry == 0 ? x < o0 : x <= o0;
1154*38fd1498Szrj     }
1155*38fd1498Szrj 
1156*38fd1498Szrj   if (len * HOST_BITS_PER_WIDE_INT < prec)
1157*38fd1498Szrj     {
1158*38fd1498Szrj       val[len] = mask0 + mask1 + carry;
1159*38fd1498Szrj       len++;
1160*38fd1498Szrj       if (overflow)
1161*38fd1498Szrj 	*overflow = (sgn == UNSIGNED && carry);
1162*38fd1498Szrj     }
1163*38fd1498Szrj   else if (overflow)
1164*38fd1498Szrj     {
1165*38fd1498Szrj       unsigned int shift = -prec % HOST_BITS_PER_WIDE_INT;
1166*38fd1498Szrj       if (sgn == SIGNED)
1167*38fd1498Szrj 	{
1168*38fd1498Szrj 	  unsigned HOST_WIDE_INT x = (val[len - 1] ^ o0) & (val[len - 1] ^ o1);
1169*38fd1498Szrj 	  *overflow = (HOST_WIDE_INT) (x << shift) < 0;
1170*38fd1498Szrj 	}
1171*38fd1498Szrj       else
1172*38fd1498Szrj 	{
1173*38fd1498Szrj 	  /* Put the MSB of X and O0 and in the top of the HWI.  */
1174*38fd1498Szrj 	  x <<= shift;
1175*38fd1498Szrj 	  o0 <<= shift;
1176*38fd1498Szrj 	  if (old_carry)
1177*38fd1498Szrj 	    *overflow = (x <= o0);
1178*38fd1498Szrj 	  else
1179*38fd1498Szrj 	    *overflow = (x < o0);
1180*38fd1498Szrj 	}
1181*38fd1498Szrj     }
1182*38fd1498Szrj 
1183*38fd1498Szrj   return canonize (val, len, prec);
1184*38fd1498Szrj }
1185*38fd1498Szrj 
1186*38fd1498Szrj /* Subroutines of the multiplication and division operations.  Unpack
1187*38fd1498Szrj    the first IN_LEN HOST_WIDE_INTs in INPUT into 2 * IN_LEN
1188*38fd1498Szrj    HOST_HALF_WIDE_INTs of RESULT.  The rest of RESULT is filled by
1189*38fd1498Szrj    uncompressing the top bit of INPUT[IN_LEN - 1].  */
1190*38fd1498Szrj static void
wi_unpack(unsigned HOST_HALF_WIDE_INT * result,const HOST_WIDE_INT * input,unsigned int in_len,unsigned int out_len,unsigned int prec,signop sgn)1191*38fd1498Szrj wi_unpack (unsigned HOST_HALF_WIDE_INT *result, const HOST_WIDE_INT *input,
1192*38fd1498Szrj 	   unsigned int in_len, unsigned int out_len,
1193*38fd1498Szrj 	   unsigned int prec, signop sgn)
1194*38fd1498Szrj {
1195*38fd1498Szrj   unsigned int i;
1196*38fd1498Szrj   unsigned int j = 0;
1197*38fd1498Szrj   unsigned int small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1);
1198*38fd1498Szrj   unsigned int blocks_needed = BLOCKS_NEEDED (prec);
1199*38fd1498Szrj   HOST_WIDE_INT mask;
1200*38fd1498Szrj 
1201*38fd1498Szrj   if (sgn == SIGNED)
1202*38fd1498Szrj     {
1203*38fd1498Szrj       mask = -top_bit_of ((const HOST_WIDE_INT *) input, in_len, prec);
1204*38fd1498Szrj       mask &= HALF_INT_MASK;
1205*38fd1498Szrj     }
1206*38fd1498Szrj   else
1207*38fd1498Szrj     mask = 0;
1208*38fd1498Szrj 
1209*38fd1498Szrj   for (i = 0; i < blocks_needed - 1; i++)
1210*38fd1498Szrj     {
1211*38fd1498Szrj       HOST_WIDE_INT x = safe_uhwi (input, in_len, i);
1212*38fd1498Szrj       result[j++] = x;
1213*38fd1498Szrj       result[j++] = x >> HOST_BITS_PER_HALF_WIDE_INT;
1214*38fd1498Szrj     }
1215*38fd1498Szrj 
1216*38fd1498Szrj   HOST_WIDE_INT x = safe_uhwi (input, in_len, i);
1217*38fd1498Szrj   if (small_prec)
1218*38fd1498Szrj     {
1219*38fd1498Szrj       if (sgn == SIGNED)
1220*38fd1498Szrj 	x = sext_hwi (x, small_prec);
1221*38fd1498Szrj       else
1222*38fd1498Szrj 	x = zext_hwi (x, small_prec);
1223*38fd1498Szrj     }
1224*38fd1498Szrj   result[j++] = x;
1225*38fd1498Szrj   result[j++] = x >> HOST_BITS_PER_HALF_WIDE_INT;
1226*38fd1498Szrj 
1227*38fd1498Szrj   /* Smear the sign bit.  */
1228*38fd1498Szrj   while (j < out_len)
1229*38fd1498Szrj     result[j++] = mask;
1230*38fd1498Szrj }
1231*38fd1498Szrj 
1232*38fd1498Szrj /* The inverse of wi_unpack.  IN_LEN is the number of input
1233*38fd1498Szrj    blocks and PRECISION is the precision of the result.  Return the
1234*38fd1498Szrj    number of blocks in the canonicalized result.  */
1235*38fd1498Szrj static unsigned int
wi_pack(HOST_WIDE_INT * result,const unsigned HOST_HALF_WIDE_INT * input,unsigned int in_len,unsigned int precision)1236*38fd1498Szrj wi_pack (HOST_WIDE_INT *result,
1237*38fd1498Szrj 	 const unsigned HOST_HALF_WIDE_INT *input,
1238*38fd1498Szrj 	 unsigned int in_len, unsigned int precision)
1239*38fd1498Szrj {
1240*38fd1498Szrj   unsigned int i = 0;
1241*38fd1498Szrj   unsigned int j = 0;
1242*38fd1498Szrj   unsigned int blocks_needed = BLOCKS_NEEDED (precision);
1243*38fd1498Szrj 
1244*38fd1498Szrj   while (i + 1 < in_len)
1245*38fd1498Szrj     {
1246*38fd1498Szrj       result[j++] = ((unsigned HOST_WIDE_INT) input[i]
1247*38fd1498Szrj 		     | ((unsigned HOST_WIDE_INT) input[i + 1]
1248*38fd1498Szrj 			<< HOST_BITS_PER_HALF_WIDE_INT));
1249*38fd1498Szrj       i += 2;
1250*38fd1498Szrj     }
1251*38fd1498Szrj 
1252*38fd1498Szrj   /* Handle the case where in_len is odd.   For this we zero extend.  */
1253*38fd1498Szrj   if (in_len & 1)
1254*38fd1498Szrj     result[j++] = (unsigned HOST_WIDE_INT) input[i];
1255*38fd1498Szrj   else if (j < blocks_needed)
1256*38fd1498Szrj     result[j++] = 0;
1257*38fd1498Szrj   return canonize (result, j, precision);
1258*38fd1498Szrj }
1259*38fd1498Szrj 
1260*38fd1498Szrj /* Multiply Op1 by Op2.  If HIGH is set, only the upper half of the
1261*38fd1498Szrj    result is returned.
1262*38fd1498Szrj 
1263*38fd1498Szrj    If HIGH is not set, throw away the upper half after the check is
1264*38fd1498Szrj    made to see if it overflows.  Unfortunately there is no better way
1265*38fd1498Szrj    to check for overflow than to do this.  If OVERFLOW is nonnull,
1266*38fd1498Szrj    record in *OVERFLOW whether the result overflowed.  SGN controls
1267*38fd1498Szrj    the signedness and is used to check overflow or if HIGH is set.  */
1268*38fd1498Szrj unsigned int
mul_internal(HOST_WIDE_INT * val,const HOST_WIDE_INT * op1val,unsigned int op1len,const HOST_WIDE_INT * op2val,unsigned int op2len,unsigned int prec,signop sgn,bool * overflow,bool high)1269*38fd1498Szrj wi::mul_internal (HOST_WIDE_INT *val, const HOST_WIDE_INT *op1val,
1270*38fd1498Szrj 		  unsigned int op1len, const HOST_WIDE_INT *op2val,
1271*38fd1498Szrj 		  unsigned int op2len, unsigned int prec, signop sgn,
1272*38fd1498Szrj 		  bool *overflow, bool high)
1273*38fd1498Szrj {
1274*38fd1498Szrj   unsigned HOST_WIDE_INT o0, o1, k, t;
1275*38fd1498Szrj   unsigned int i;
1276*38fd1498Szrj   unsigned int j;
1277*38fd1498Szrj   unsigned int blocks_needed = BLOCKS_NEEDED (prec);
1278*38fd1498Szrj   unsigned int half_blocks_needed = blocks_needed * 2;
1279*38fd1498Szrj   /* The sizes here are scaled to support a 2x largest mode by 2x
1280*38fd1498Szrj      largest mode yielding a 4x largest mode result.  This is what is
1281*38fd1498Szrj      needed by vpn.  */
1282*38fd1498Szrj 
1283*38fd1498Szrj   unsigned HOST_HALF_WIDE_INT
1284*38fd1498Szrj     u[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1285*38fd1498Szrj   unsigned HOST_HALF_WIDE_INT
1286*38fd1498Szrj     v[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1287*38fd1498Szrj   /* The '2' in 'R' is because we are internally doing a full
1288*38fd1498Szrj      multiply.  */
1289*38fd1498Szrj   unsigned HOST_HALF_WIDE_INT
1290*38fd1498Szrj     r[2 * 4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1291*38fd1498Szrj   HOST_WIDE_INT mask = ((HOST_WIDE_INT)1 << HOST_BITS_PER_HALF_WIDE_INT) - 1;
1292*38fd1498Szrj 
1293*38fd1498Szrj   /* If the top level routine did not really pass in an overflow, then
1294*38fd1498Szrj      just make sure that we never attempt to set it.  */
1295*38fd1498Szrj   bool needs_overflow = (overflow != 0);
1296*38fd1498Szrj   if (needs_overflow)
1297*38fd1498Szrj     *overflow = false;
1298*38fd1498Szrj 
1299*38fd1498Szrj   wide_int_ref op1 = wi::storage_ref (op1val, op1len, prec);
1300*38fd1498Szrj   wide_int_ref op2 = wi::storage_ref (op2val, op2len, prec);
1301*38fd1498Szrj 
1302*38fd1498Szrj   /* This is a surprisingly common case, so do it first.  */
1303*38fd1498Szrj   if (op1 == 0 || op2 == 0)
1304*38fd1498Szrj     {
1305*38fd1498Szrj       val[0] = 0;
1306*38fd1498Szrj       return 1;
1307*38fd1498Szrj     }
1308*38fd1498Szrj 
1309*38fd1498Szrj #ifdef umul_ppmm
1310*38fd1498Szrj   if (sgn == UNSIGNED)
1311*38fd1498Szrj     {
1312*38fd1498Szrj       /* If the inputs are single HWIs and the output has room for at
1313*38fd1498Szrj 	 least two HWIs, we can use umul_ppmm directly.  */
1314*38fd1498Szrj       if (prec >= HOST_BITS_PER_WIDE_INT * 2
1315*38fd1498Szrj 	  && wi::fits_uhwi_p (op1)
1316*38fd1498Szrj 	  && wi::fits_uhwi_p (op2))
1317*38fd1498Szrj 	{
1318*38fd1498Szrj 	  /* This case never overflows.  */
1319*38fd1498Szrj 	  if (high)
1320*38fd1498Szrj 	    {
1321*38fd1498Szrj 	      val[0] = 0;
1322*38fd1498Szrj 	      return 1;
1323*38fd1498Szrj 	    }
1324*38fd1498Szrj 	  umul_ppmm (val[1], val[0], op1.ulow (), op2.ulow ());
1325*38fd1498Szrj 	  if (val[1] < 0 && prec > HOST_BITS_PER_WIDE_INT * 2)
1326*38fd1498Szrj 	    {
1327*38fd1498Szrj 	      val[2] = 0;
1328*38fd1498Szrj 	      return 3;
1329*38fd1498Szrj 	    }
1330*38fd1498Szrj 	  return 1 + (val[1] != 0 || val[0] < 0);
1331*38fd1498Szrj 	}
1332*38fd1498Szrj       /* Likewise if the output is a full single HWI, except that the
1333*38fd1498Szrj 	 upper HWI of the result is only used for determining overflow.
1334*38fd1498Szrj 	 (We handle this case inline when overflow isn't needed.)  */
1335*38fd1498Szrj       else if (prec == HOST_BITS_PER_WIDE_INT)
1336*38fd1498Szrj 	{
1337*38fd1498Szrj 	  unsigned HOST_WIDE_INT upper;
1338*38fd1498Szrj 	  umul_ppmm (upper, val[0], op1.ulow (), op2.ulow ());
1339*38fd1498Szrj 	  if (needs_overflow)
1340*38fd1498Szrj 	    *overflow = (upper != 0);
1341*38fd1498Szrj 	  if (high)
1342*38fd1498Szrj 	    val[0] = upper;
1343*38fd1498Szrj 	  return 1;
1344*38fd1498Szrj 	}
1345*38fd1498Szrj     }
1346*38fd1498Szrj #endif
1347*38fd1498Szrj 
1348*38fd1498Szrj   /* Handle multiplications by 1.  */
1349*38fd1498Szrj   if (op1 == 1)
1350*38fd1498Szrj     {
1351*38fd1498Szrj       if (high)
1352*38fd1498Szrj 	{
1353*38fd1498Szrj 	  val[0] = wi::neg_p (op2, sgn) ? -1 : 0;
1354*38fd1498Szrj 	  return 1;
1355*38fd1498Szrj 	}
1356*38fd1498Szrj       for (i = 0; i < op2len; i++)
1357*38fd1498Szrj 	val[i] = op2val[i];
1358*38fd1498Szrj       return op2len;
1359*38fd1498Szrj     }
1360*38fd1498Szrj   if (op2 == 1)
1361*38fd1498Szrj     {
1362*38fd1498Szrj       if (high)
1363*38fd1498Szrj 	{
1364*38fd1498Szrj 	  val[0] = wi::neg_p (op1, sgn) ? -1 : 0;
1365*38fd1498Szrj 	  return 1;
1366*38fd1498Szrj 	}
1367*38fd1498Szrj       for (i = 0; i < op1len; i++)
1368*38fd1498Szrj 	val[i] = op1val[i];
1369*38fd1498Szrj       return op1len;
1370*38fd1498Szrj     }
1371*38fd1498Szrj 
1372*38fd1498Szrj   /* If we need to check for overflow, we can only do half wide
1373*38fd1498Szrj      multiplies quickly because we need to look at the top bits to
1374*38fd1498Szrj      check for the overflow.  */
1375*38fd1498Szrj   if ((high || needs_overflow)
1376*38fd1498Szrj       && (prec <= HOST_BITS_PER_HALF_WIDE_INT))
1377*38fd1498Szrj     {
1378*38fd1498Szrj       unsigned HOST_WIDE_INT r;
1379*38fd1498Szrj 
1380*38fd1498Szrj       if (sgn == SIGNED)
1381*38fd1498Szrj 	{
1382*38fd1498Szrj 	  o0 = op1.to_shwi ();
1383*38fd1498Szrj 	  o1 = op2.to_shwi ();
1384*38fd1498Szrj 	}
1385*38fd1498Szrj       else
1386*38fd1498Szrj 	{
1387*38fd1498Szrj 	  o0 = op1.to_uhwi ();
1388*38fd1498Szrj 	  o1 = op2.to_uhwi ();
1389*38fd1498Szrj 	}
1390*38fd1498Szrj 
1391*38fd1498Szrj       r = o0 * o1;
1392*38fd1498Szrj       if (needs_overflow)
1393*38fd1498Szrj 	{
1394*38fd1498Szrj 	  if (sgn == SIGNED)
1395*38fd1498Szrj 	    {
1396*38fd1498Szrj 	      if ((HOST_WIDE_INT) r != sext_hwi (r, prec))
1397*38fd1498Szrj 		*overflow = true;
1398*38fd1498Szrj 	    }
1399*38fd1498Szrj 	  else
1400*38fd1498Szrj 	    {
1401*38fd1498Szrj 	      if ((r >> prec) != 0)
1402*38fd1498Szrj 		*overflow = true;
1403*38fd1498Szrj 	    }
1404*38fd1498Szrj 	}
1405*38fd1498Szrj       val[0] = high ? r >> prec : r;
1406*38fd1498Szrj       return 1;
1407*38fd1498Szrj     }
1408*38fd1498Szrj 
1409*38fd1498Szrj   /* We do unsigned mul and then correct it.  */
1410*38fd1498Szrj   wi_unpack (u, op1val, op1len, half_blocks_needed, prec, SIGNED);
1411*38fd1498Szrj   wi_unpack (v, op2val, op2len, half_blocks_needed, prec, SIGNED);
1412*38fd1498Szrj 
1413*38fd1498Szrj   /* The 2 is for a full mult.  */
1414*38fd1498Szrj   memset (r, 0, half_blocks_needed * 2
1415*38fd1498Szrj 	  * HOST_BITS_PER_HALF_WIDE_INT / CHAR_BIT);
1416*38fd1498Szrj 
1417*38fd1498Szrj   for (j = 0; j < half_blocks_needed; j++)
1418*38fd1498Szrj     {
1419*38fd1498Szrj       k = 0;
1420*38fd1498Szrj       for (i = 0; i < half_blocks_needed; i++)
1421*38fd1498Szrj 	{
1422*38fd1498Szrj 	  t = ((unsigned HOST_WIDE_INT)u[i] * (unsigned HOST_WIDE_INT)v[j]
1423*38fd1498Szrj 	       + r[i + j] + k);
1424*38fd1498Szrj 	  r[i + j] = t & HALF_INT_MASK;
1425*38fd1498Szrj 	  k = t >> HOST_BITS_PER_HALF_WIDE_INT;
1426*38fd1498Szrj 	}
1427*38fd1498Szrj       r[j + half_blocks_needed] = k;
1428*38fd1498Szrj     }
1429*38fd1498Szrj 
1430*38fd1498Szrj   /* We did unsigned math above.  For signed we must adjust the
1431*38fd1498Szrj      product (assuming we need to see that).  */
1432*38fd1498Szrj   if (sgn == SIGNED && (high || needs_overflow))
1433*38fd1498Szrj     {
1434*38fd1498Szrj       unsigned HOST_WIDE_INT b;
1435*38fd1498Szrj       if (wi::neg_p (op1))
1436*38fd1498Szrj 	{
1437*38fd1498Szrj 	  b = 0;
1438*38fd1498Szrj 	  for (i = 0; i < half_blocks_needed; i++)
1439*38fd1498Szrj 	    {
1440*38fd1498Szrj 	      t = (unsigned HOST_WIDE_INT)r[i + half_blocks_needed]
1441*38fd1498Szrj 		- (unsigned HOST_WIDE_INT)v[i] - b;
1442*38fd1498Szrj 	      r[i + half_blocks_needed] = t & HALF_INT_MASK;
1443*38fd1498Szrj 	      b = t >> (HOST_BITS_PER_WIDE_INT - 1);
1444*38fd1498Szrj 	    }
1445*38fd1498Szrj 	}
1446*38fd1498Szrj       if (wi::neg_p (op2))
1447*38fd1498Szrj 	{
1448*38fd1498Szrj 	  b = 0;
1449*38fd1498Szrj 	  for (i = 0; i < half_blocks_needed; i++)
1450*38fd1498Szrj 	    {
1451*38fd1498Szrj 	      t = (unsigned HOST_WIDE_INT)r[i + half_blocks_needed]
1452*38fd1498Szrj 		- (unsigned HOST_WIDE_INT)u[i] - b;
1453*38fd1498Szrj 	      r[i + half_blocks_needed] = t & HALF_INT_MASK;
1454*38fd1498Szrj 	      b = t >> (HOST_BITS_PER_WIDE_INT - 1);
1455*38fd1498Szrj 	    }
1456*38fd1498Szrj 	}
1457*38fd1498Szrj     }
1458*38fd1498Szrj 
1459*38fd1498Szrj   if (needs_overflow)
1460*38fd1498Szrj     {
1461*38fd1498Szrj       HOST_WIDE_INT top;
1462*38fd1498Szrj 
1463*38fd1498Szrj       /* For unsigned, overflow is true if any of the top bits are set.
1464*38fd1498Szrj 	 For signed, overflow is true if any of the top bits are not equal
1465*38fd1498Szrj 	 to the sign bit.  */
1466*38fd1498Szrj       if (sgn == UNSIGNED)
1467*38fd1498Szrj 	top = 0;
1468*38fd1498Szrj       else
1469*38fd1498Szrj 	{
1470*38fd1498Szrj 	  top = r[(half_blocks_needed) - 1];
1471*38fd1498Szrj 	  top = SIGN_MASK (top << (HOST_BITS_PER_WIDE_INT / 2));
1472*38fd1498Szrj 	  top &= mask;
1473*38fd1498Szrj 	}
1474*38fd1498Szrj 
1475*38fd1498Szrj       for (i = half_blocks_needed; i < half_blocks_needed * 2; i++)
1476*38fd1498Szrj 	if (((HOST_WIDE_INT)(r[i] & mask)) != top)
1477*38fd1498Szrj 	  *overflow = true;
1478*38fd1498Szrj     }
1479*38fd1498Szrj 
1480*38fd1498Szrj   int r_offset = high ? half_blocks_needed : 0;
1481*38fd1498Szrj   return wi_pack (val, &r[r_offset], half_blocks_needed, prec);
1482*38fd1498Szrj }
1483*38fd1498Szrj 
1484*38fd1498Szrj /* Compute the population count of X.  */
1485*38fd1498Szrj int
popcount(const wide_int_ref & x)1486*38fd1498Szrj wi::popcount (const wide_int_ref &x)
1487*38fd1498Szrj {
1488*38fd1498Szrj   unsigned int i;
1489*38fd1498Szrj   int count;
1490*38fd1498Szrj 
1491*38fd1498Szrj   /* The high order block is special if it is the last block and the
1492*38fd1498Szrj      precision is not an even multiple of HOST_BITS_PER_WIDE_INT.  We
1493*38fd1498Szrj      have to clear out any ones above the precision before doing
1494*38fd1498Szrj      popcount on this block.  */
1495*38fd1498Szrj   count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
1496*38fd1498Szrj   unsigned int stop = x.len;
1497*38fd1498Szrj   if (count < 0)
1498*38fd1498Szrj     {
1499*38fd1498Szrj       count = popcount_hwi (x.uhigh () << -count);
1500*38fd1498Szrj       stop -= 1;
1501*38fd1498Szrj     }
1502*38fd1498Szrj   else
1503*38fd1498Szrj     {
1504*38fd1498Szrj       if (x.sign_mask () >= 0)
1505*38fd1498Szrj 	count = 0;
1506*38fd1498Szrj     }
1507*38fd1498Szrj 
1508*38fd1498Szrj   for (i = 0; i < stop; ++i)
1509*38fd1498Szrj     count += popcount_hwi (x.val[i]);
1510*38fd1498Szrj 
1511*38fd1498Szrj   return count;
1512*38fd1498Szrj }
1513*38fd1498Szrj 
1514*38fd1498Szrj /* Set VAL to OP0 - OP1.  If OVERFLOW is nonnull, record in *OVERFLOW
1515*38fd1498Szrj    whether the result overflows when OP0 and OP1 are treated as having
1516*38fd1498Szrj    signedness SGN.  Return the number of blocks in VAL.  */
1517*38fd1498Szrj unsigned int
sub_large(HOST_WIDE_INT * val,const HOST_WIDE_INT * op0,unsigned int op0len,const HOST_WIDE_INT * op1,unsigned int op1len,unsigned int prec,signop sgn,bool * overflow)1518*38fd1498Szrj wi::sub_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1519*38fd1498Szrj 	       unsigned int op0len, const HOST_WIDE_INT *op1,
1520*38fd1498Szrj 	       unsigned int op1len, unsigned int prec,
1521*38fd1498Szrj 	       signop sgn, bool *overflow)
1522*38fd1498Szrj {
1523*38fd1498Szrj   unsigned HOST_WIDE_INT o0 = 0;
1524*38fd1498Szrj   unsigned HOST_WIDE_INT o1 = 0;
1525*38fd1498Szrj   unsigned HOST_WIDE_INT x = 0;
1526*38fd1498Szrj   /* We implement subtraction as an in place negate and add.  Negation
1527*38fd1498Szrj      is just inversion and add 1, so we can do the add of 1 by just
1528*38fd1498Szrj      starting the borrow in of the first element at 1.  */
1529*38fd1498Szrj   unsigned HOST_WIDE_INT borrow = 0;
1530*38fd1498Szrj   unsigned HOST_WIDE_INT old_borrow = 0;
1531*38fd1498Szrj 
1532*38fd1498Szrj   unsigned HOST_WIDE_INT mask0, mask1;
1533*38fd1498Szrj   unsigned int i;
1534*38fd1498Szrj 
1535*38fd1498Szrj   unsigned int len = MAX (op0len, op1len);
1536*38fd1498Szrj   mask0 = -top_bit_of (op0, op0len, prec);
1537*38fd1498Szrj   mask1 = -top_bit_of (op1, op1len, prec);
1538*38fd1498Szrj 
1539*38fd1498Szrj   /* Subtract all of the explicitly defined elements.  */
1540*38fd1498Szrj   for (i = 0; i < len; i++)
1541*38fd1498Szrj     {
1542*38fd1498Szrj       o0 = i < op0len ? (unsigned HOST_WIDE_INT)op0[i] : mask0;
1543*38fd1498Szrj       o1 = i < op1len ? (unsigned HOST_WIDE_INT)op1[i] : mask1;
1544*38fd1498Szrj       x = o0 - o1 - borrow;
1545*38fd1498Szrj       val[i] = x;
1546*38fd1498Szrj       old_borrow = borrow;
1547*38fd1498Szrj       borrow = borrow == 0 ? o0 < o1 : o0 <= o1;
1548*38fd1498Szrj     }
1549*38fd1498Szrj 
1550*38fd1498Szrj   if (len * HOST_BITS_PER_WIDE_INT < prec)
1551*38fd1498Szrj     {
1552*38fd1498Szrj       val[len] = mask0 - mask1 - borrow;
1553*38fd1498Szrj       len++;
1554*38fd1498Szrj       if (overflow)
1555*38fd1498Szrj 	*overflow = (sgn == UNSIGNED && borrow);
1556*38fd1498Szrj     }
1557*38fd1498Szrj   else if (overflow)
1558*38fd1498Szrj     {
1559*38fd1498Szrj       unsigned int shift = -prec % HOST_BITS_PER_WIDE_INT;
1560*38fd1498Szrj       if (sgn == SIGNED)
1561*38fd1498Szrj 	{
1562*38fd1498Szrj 	  unsigned HOST_WIDE_INT x = (o0 ^ o1) & (val[len - 1] ^ o0);
1563*38fd1498Szrj 	  *overflow = (HOST_WIDE_INT) (x << shift) < 0;
1564*38fd1498Szrj 	}
1565*38fd1498Szrj       else
1566*38fd1498Szrj 	{
1567*38fd1498Szrj 	  /* Put the MSB of X and O0 and in the top of the HWI.  */
1568*38fd1498Szrj 	  x <<= shift;
1569*38fd1498Szrj 	  o0 <<= shift;
1570*38fd1498Szrj 	  if (old_borrow)
1571*38fd1498Szrj 	    *overflow = (x >= o0);
1572*38fd1498Szrj 	  else
1573*38fd1498Szrj 	    *overflow = (x > o0);
1574*38fd1498Szrj 	}
1575*38fd1498Szrj     }
1576*38fd1498Szrj 
1577*38fd1498Szrj   return canonize (val, len, prec);
1578*38fd1498Szrj }
1579*38fd1498Szrj 
1580*38fd1498Szrj 
1581*38fd1498Szrj /*
1582*38fd1498Szrj  * Division and Mod
1583*38fd1498Szrj  */
1584*38fd1498Szrj 
1585*38fd1498Szrj /* Compute B_QUOTIENT and B_REMAINDER from B_DIVIDEND/B_DIVISOR.  The
1586*38fd1498Szrj    algorithm is a small modification of the algorithm in Hacker's
1587*38fd1498Szrj    Delight by Warren, which itself is a small modification of Knuth's
1588*38fd1498Szrj    algorithm.  M is the number of significant elements of U however
1589*38fd1498Szrj    there needs to be at least one extra element of B_DIVIDEND
1590*38fd1498Szrj    allocated, N is the number of elements of B_DIVISOR.  */
1591*38fd1498Szrj static void
divmod_internal_2(unsigned HOST_HALF_WIDE_INT * b_quotient,unsigned HOST_HALF_WIDE_INT * b_remainder,unsigned HOST_HALF_WIDE_INT * b_dividend,unsigned HOST_HALF_WIDE_INT * b_divisor,int m,int n)1592*38fd1498Szrj divmod_internal_2 (unsigned HOST_HALF_WIDE_INT *b_quotient,
1593*38fd1498Szrj 		   unsigned HOST_HALF_WIDE_INT *b_remainder,
1594*38fd1498Szrj 		   unsigned HOST_HALF_WIDE_INT *b_dividend,
1595*38fd1498Szrj 		   unsigned HOST_HALF_WIDE_INT *b_divisor,
1596*38fd1498Szrj 		   int m, int n)
1597*38fd1498Szrj {
1598*38fd1498Szrj   /* The "digits" are a HOST_HALF_WIDE_INT which the size of half of a
1599*38fd1498Szrj      HOST_WIDE_INT and stored in the lower bits of each word.  This
1600*38fd1498Szrj      algorithm should work properly on both 32 and 64 bit
1601*38fd1498Szrj      machines.  */
1602*38fd1498Szrj   unsigned HOST_WIDE_INT b
1603*38fd1498Szrj     = (unsigned HOST_WIDE_INT)1 << HOST_BITS_PER_HALF_WIDE_INT;
1604*38fd1498Szrj   unsigned HOST_WIDE_INT qhat;   /* Estimate of quotient digit.  */
1605*38fd1498Szrj   unsigned HOST_WIDE_INT rhat;   /* A remainder.  */
1606*38fd1498Szrj   unsigned HOST_WIDE_INT p;      /* Product of two digits.  */
1607*38fd1498Szrj   HOST_WIDE_INT t, k;
1608*38fd1498Szrj   int i, j, s;
1609*38fd1498Szrj 
1610*38fd1498Szrj   /* Single digit divisor.  */
1611*38fd1498Szrj   if (n == 1)
1612*38fd1498Szrj     {
1613*38fd1498Szrj       k = 0;
1614*38fd1498Szrj       for (j = m - 1; j >= 0; j--)
1615*38fd1498Szrj 	{
1616*38fd1498Szrj 	  b_quotient[j] = (k * b + b_dividend[j])/b_divisor[0];
1617*38fd1498Szrj 	  k = ((k * b + b_dividend[j])
1618*38fd1498Szrj 	       - ((unsigned HOST_WIDE_INT)b_quotient[j]
1619*38fd1498Szrj 		  * (unsigned HOST_WIDE_INT)b_divisor[0]));
1620*38fd1498Szrj 	}
1621*38fd1498Szrj       b_remainder[0] = k;
1622*38fd1498Szrj       return;
1623*38fd1498Szrj     }
1624*38fd1498Szrj 
1625*38fd1498Szrj   s = clz_hwi (b_divisor[n-1]) - HOST_BITS_PER_HALF_WIDE_INT; /* CHECK clz */
1626*38fd1498Szrj 
1627*38fd1498Szrj   if (s)
1628*38fd1498Szrj     {
1629*38fd1498Szrj       /* Normalize B_DIVIDEND and B_DIVISOR.  Unlike the published
1630*38fd1498Szrj 	 algorithm, we can overwrite b_dividend and b_divisor, so we do
1631*38fd1498Szrj 	 that.  */
1632*38fd1498Szrj       for (i = n - 1; i > 0; i--)
1633*38fd1498Szrj 	b_divisor[i] = (b_divisor[i] << s)
1634*38fd1498Szrj 	  | (b_divisor[i-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s));
1635*38fd1498Szrj       b_divisor[0] = b_divisor[0] << s;
1636*38fd1498Szrj 
1637*38fd1498Szrj       b_dividend[m] = b_dividend[m-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s);
1638*38fd1498Szrj       for (i = m - 1; i > 0; i--)
1639*38fd1498Szrj 	b_dividend[i] = (b_dividend[i] << s)
1640*38fd1498Szrj 	  | (b_dividend[i-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s));
1641*38fd1498Szrj       b_dividend[0] = b_dividend[0] << s;
1642*38fd1498Szrj     }
1643*38fd1498Szrj 
1644*38fd1498Szrj   /* Main loop.  */
1645*38fd1498Szrj   for (j = m - n; j >= 0; j--)
1646*38fd1498Szrj     {
1647*38fd1498Szrj       qhat = (b_dividend[j+n] * b + b_dividend[j+n-1]) / b_divisor[n-1];
1648*38fd1498Szrj       rhat = (b_dividend[j+n] * b + b_dividend[j+n-1]) - qhat * b_divisor[n-1];
1649*38fd1498Szrj     again:
1650*38fd1498Szrj       if (qhat >= b || qhat * b_divisor[n-2] > b * rhat + b_dividend[j+n-2])
1651*38fd1498Szrj 	{
1652*38fd1498Szrj 	  qhat -= 1;
1653*38fd1498Szrj 	  rhat += b_divisor[n-1];
1654*38fd1498Szrj 	  if (rhat < b)
1655*38fd1498Szrj 	    goto again;
1656*38fd1498Szrj 	}
1657*38fd1498Szrj 
1658*38fd1498Szrj       /* Multiply and subtract.  */
1659*38fd1498Szrj       k = 0;
1660*38fd1498Szrj       for (i = 0; i < n; i++)
1661*38fd1498Szrj 	{
1662*38fd1498Szrj 	  p = qhat * b_divisor[i];
1663*38fd1498Szrj 	  t = b_dividend[i+j] - k - (p & HALF_INT_MASK);
1664*38fd1498Szrj 	  b_dividend[i + j] = t;
1665*38fd1498Szrj 	  k = ((p >> HOST_BITS_PER_HALF_WIDE_INT)
1666*38fd1498Szrj 	       - (t >> HOST_BITS_PER_HALF_WIDE_INT));
1667*38fd1498Szrj 	}
1668*38fd1498Szrj       t = b_dividend[j+n] - k;
1669*38fd1498Szrj       b_dividend[j+n] = t;
1670*38fd1498Szrj 
1671*38fd1498Szrj       b_quotient[j] = qhat;
1672*38fd1498Szrj       if (t < 0)
1673*38fd1498Szrj 	{
1674*38fd1498Szrj 	  b_quotient[j] -= 1;
1675*38fd1498Szrj 	  k = 0;
1676*38fd1498Szrj 	  for (i = 0; i < n; i++)
1677*38fd1498Szrj 	    {
1678*38fd1498Szrj 	      t = (HOST_WIDE_INT)b_dividend[i+j] + b_divisor[i] + k;
1679*38fd1498Szrj 	      b_dividend[i+j] = t;
1680*38fd1498Szrj 	      k = t >> HOST_BITS_PER_HALF_WIDE_INT;
1681*38fd1498Szrj 	    }
1682*38fd1498Szrj 	  b_dividend[j+n] += k;
1683*38fd1498Szrj 	}
1684*38fd1498Szrj     }
1685*38fd1498Szrj   if (s)
1686*38fd1498Szrj     for (i = 0; i < n; i++)
1687*38fd1498Szrj       b_remainder[i] = (b_dividend[i] >> s)
1688*38fd1498Szrj 	| (b_dividend[i+1] << (HOST_BITS_PER_HALF_WIDE_INT - s));
1689*38fd1498Szrj   else
1690*38fd1498Szrj     for (i = 0; i < n; i++)
1691*38fd1498Szrj       b_remainder[i] = b_dividend[i];
1692*38fd1498Szrj }
1693*38fd1498Szrj 
1694*38fd1498Szrj 
1695*38fd1498Szrj /* Divide DIVIDEND by DIVISOR, which have signedness SGN, and truncate
1696*38fd1498Szrj    the result.  If QUOTIENT is nonnull, store the value of the quotient
1697*38fd1498Szrj    there and return the number of blocks in it.  The return value is
1698*38fd1498Szrj    not defined otherwise.  If REMAINDER is nonnull, store the value
1699*38fd1498Szrj    of the remainder there and store the number of blocks in
1700*38fd1498Szrj    *REMAINDER_LEN.  If OFLOW is not null, store in *OFLOW whether
1701*38fd1498Szrj    the division overflowed.  */
1702*38fd1498Szrj unsigned int
divmod_internal(HOST_WIDE_INT * quotient,unsigned int * remainder_len,HOST_WIDE_INT * remainder,const HOST_WIDE_INT * dividend_val,unsigned int dividend_len,unsigned int dividend_prec,const HOST_WIDE_INT * divisor_val,unsigned int divisor_len,unsigned int divisor_prec,signop sgn,bool * oflow)1703*38fd1498Szrj wi::divmod_internal (HOST_WIDE_INT *quotient, unsigned int *remainder_len,
1704*38fd1498Szrj 		     HOST_WIDE_INT *remainder,
1705*38fd1498Szrj 		     const HOST_WIDE_INT *dividend_val,
1706*38fd1498Szrj 		     unsigned int dividend_len, unsigned int dividend_prec,
1707*38fd1498Szrj 		     const HOST_WIDE_INT *divisor_val, unsigned int divisor_len,
1708*38fd1498Szrj 		     unsigned int divisor_prec, signop sgn,
1709*38fd1498Szrj 		     bool *oflow)
1710*38fd1498Szrj {
1711*38fd1498Szrj   unsigned int dividend_blocks_needed = 2 * BLOCKS_NEEDED (dividend_prec);
1712*38fd1498Szrj   unsigned int divisor_blocks_needed = 2 * BLOCKS_NEEDED (divisor_prec);
1713*38fd1498Szrj   unsigned HOST_HALF_WIDE_INT
1714*38fd1498Szrj     b_quotient[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1715*38fd1498Szrj   unsigned HOST_HALF_WIDE_INT
1716*38fd1498Szrj     b_remainder[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1717*38fd1498Szrj   unsigned HOST_HALF_WIDE_INT
1718*38fd1498Szrj     b_dividend[(4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT) + 1];
1719*38fd1498Szrj   unsigned HOST_HALF_WIDE_INT
1720*38fd1498Szrj     b_divisor[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1721*38fd1498Szrj   unsigned int m, n;
1722*38fd1498Szrj   bool dividend_neg = false;
1723*38fd1498Szrj   bool divisor_neg = false;
1724*38fd1498Szrj   bool overflow = false;
1725*38fd1498Szrj   wide_int neg_dividend, neg_divisor;
1726*38fd1498Szrj 
1727*38fd1498Szrj   wide_int_ref dividend = wi::storage_ref (dividend_val, dividend_len,
1728*38fd1498Szrj 					   dividend_prec);
1729*38fd1498Szrj   wide_int_ref divisor = wi::storage_ref (divisor_val, divisor_len,
1730*38fd1498Szrj 					  divisor_prec);
1731*38fd1498Szrj   if (divisor == 0)
1732*38fd1498Szrj     overflow = true;
1733*38fd1498Szrj 
1734*38fd1498Szrj   /* The smallest signed number / -1 causes overflow.  The dividend_len
1735*38fd1498Szrj      check is for speed rather than correctness.  */
1736*38fd1498Szrj   if (sgn == SIGNED
1737*38fd1498Szrj       && dividend_len == BLOCKS_NEEDED (dividend_prec)
1738*38fd1498Szrj       && divisor == -1
1739*38fd1498Szrj       && wi::only_sign_bit_p (dividend))
1740*38fd1498Szrj     overflow = true;
1741*38fd1498Szrj 
1742*38fd1498Szrj   /* Handle the overflow cases.  Viewed as unsigned value, the quotient of
1743*38fd1498Szrj      (signed min / -1) has the same representation as the orignal dividend.
1744*38fd1498Szrj      We have traditionally made division by zero act as division by one,
1745*38fd1498Szrj      so there too we use the original dividend.  */
1746*38fd1498Szrj   if (overflow)
1747*38fd1498Szrj     {
1748*38fd1498Szrj       if (remainder)
1749*38fd1498Szrj 	{
1750*38fd1498Szrj 	  *remainder_len = 1;
1751*38fd1498Szrj 	  remainder[0] = 0;
1752*38fd1498Szrj 	}
1753*38fd1498Szrj       if (oflow != 0)
1754*38fd1498Szrj 	*oflow = true;
1755*38fd1498Szrj       if (quotient)
1756*38fd1498Szrj 	for (unsigned int i = 0; i < dividend_len; ++i)
1757*38fd1498Szrj 	  quotient[i] = dividend_val[i];
1758*38fd1498Szrj       return dividend_len;
1759*38fd1498Szrj     }
1760*38fd1498Szrj 
1761*38fd1498Szrj   if (oflow)
1762*38fd1498Szrj     *oflow = false;
1763*38fd1498Szrj 
1764*38fd1498Szrj   /* Do it on the host if you can.  */
1765*38fd1498Szrj   if (sgn == SIGNED
1766*38fd1498Szrj       && wi::fits_shwi_p (dividend)
1767*38fd1498Szrj       && wi::fits_shwi_p (divisor))
1768*38fd1498Szrj     {
1769*38fd1498Szrj       HOST_WIDE_INT o0 = dividend.to_shwi ();
1770*38fd1498Szrj       HOST_WIDE_INT o1 = divisor.to_shwi ();
1771*38fd1498Szrj 
1772*38fd1498Szrj       if (o0 == HOST_WIDE_INT_MIN && o1 == -1)
1773*38fd1498Szrj 	{
1774*38fd1498Szrj 	  gcc_checking_assert (dividend_prec > HOST_BITS_PER_WIDE_INT);
1775*38fd1498Szrj 	  if (quotient)
1776*38fd1498Szrj 	    {
1777*38fd1498Szrj 	      quotient[0] = HOST_WIDE_INT_MIN;
1778*38fd1498Szrj 	      quotient[1] = 0;
1779*38fd1498Szrj 	    }
1780*38fd1498Szrj 	  if (remainder)
1781*38fd1498Szrj 	    {
1782*38fd1498Szrj 	      remainder[0] = 0;
1783*38fd1498Szrj 	      *remainder_len = 1;
1784*38fd1498Szrj 	    }
1785*38fd1498Szrj 	  return 2;
1786*38fd1498Szrj 	}
1787*38fd1498Szrj       else
1788*38fd1498Szrj 	{
1789*38fd1498Szrj 	  if (quotient)
1790*38fd1498Szrj 	    quotient[0] = o0 / o1;
1791*38fd1498Szrj 	  if (remainder)
1792*38fd1498Szrj 	    {
1793*38fd1498Szrj 	      remainder[0] = o0 % o1;
1794*38fd1498Szrj 	      *remainder_len = 1;
1795*38fd1498Szrj 	    }
1796*38fd1498Szrj 	  return 1;
1797*38fd1498Szrj 	}
1798*38fd1498Szrj     }
1799*38fd1498Szrj 
1800*38fd1498Szrj   if (sgn == UNSIGNED
1801*38fd1498Szrj       && wi::fits_uhwi_p (dividend)
1802*38fd1498Szrj       && wi::fits_uhwi_p (divisor))
1803*38fd1498Szrj     {
1804*38fd1498Szrj       unsigned HOST_WIDE_INT o0 = dividend.to_uhwi ();
1805*38fd1498Szrj       unsigned HOST_WIDE_INT o1 = divisor.to_uhwi ();
1806*38fd1498Szrj       unsigned int quotient_len = 1;
1807*38fd1498Szrj 
1808*38fd1498Szrj       if (quotient)
1809*38fd1498Szrj 	{
1810*38fd1498Szrj 	  quotient[0] = o0 / o1;
1811*38fd1498Szrj 	  quotient_len = canonize_uhwi (quotient, dividend_prec);
1812*38fd1498Szrj 	}
1813*38fd1498Szrj       if (remainder)
1814*38fd1498Szrj 	{
1815*38fd1498Szrj 	  remainder[0] = o0 % o1;
1816*38fd1498Szrj 	  *remainder_len = canonize_uhwi (remainder, dividend_prec);
1817*38fd1498Szrj 	}
1818*38fd1498Szrj       return quotient_len;
1819*38fd1498Szrj     }
1820*38fd1498Szrj 
1821*38fd1498Szrj   /* Make the divisor and dividend positive and remember what we
1822*38fd1498Szrj      did.  */
1823*38fd1498Szrj   if (sgn == SIGNED)
1824*38fd1498Szrj     {
1825*38fd1498Szrj       if (wi::neg_p (dividend))
1826*38fd1498Szrj 	{
1827*38fd1498Szrj 	  neg_dividend = -dividend;
1828*38fd1498Szrj 	  dividend = neg_dividend;
1829*38fd1498Szrj 	  dividend_neg = true;
1830*38fd1498Szrj 	}
1831*38fd1498Szrj       if (wi::neg_p (divisor))
1832*38fd1498Szrj 	{
1833*38fd1498Szrj 	  neg_divisor = -divisor;
1834*38fd1498Szrj 	  divisor = neg_divisor;
1835*38fd1498Szrj 	  divisor_neg = true;
1836*38fd1498Szrj 	}
1837*38fd1498Szrj     }
1838*38fd1498Szrj 
1839*38fd1498Szrj   wi_unpack (b_dividend, dividend.get_val (), dividend.get_len (),
1840*38fd1498Szrj 	     dividend_blocks_needed, dividend_prec, sgn);
1841*38fd1498Szrj   wi_unpack (b_divisor, divisor.get_val (), divisor.get_len (),
1842*38fd1498Szrj 	     divisor_blocks_needed, divisor_prec, sgn);
1843*38fd1498Szrj 
1844*38fd1498Szrj   m = dividend_blocks_needed;
1845*38fd1498Szrj   b_dividend[m] = 0;
1846*38fd1498Szrj   while (m > 1 && b_dividend[m - 1] == 0)
1847*38fd1498Szrj     m--;
1848*38fd1498Szrj 
1849*38fd1498Szrj   n = divisor_blocks_needed;
1850*38fd1498Szrj   while (n > 1 && b_divisor[n - 1] == 0)
1851*38fd1498Szrj     n--;
1852*38fd1498Szrj 
1853*38fd1498Szrj   memset (b_quotient, 0, sizeof (b_quotient));
1854*38fd1498Szrj 
1855*38fd1498Szrj   divmod_internal_2 (b_quotient, b_remainder, b_dividend, b_divisor, m, n);
1856*38fd1498Szrj 
1857*38fd1498Szrj   unsigned int quotient_len = 0;
1858*38fd1498Szrj   if (quotient)
1859*38fd1498Szrj     {
1860*38fd1498Szrj       quotient_len = wi_pack (quotient, b_quotient, m, dividend_prec);
1861*38fd1498Szrj       /* The quotient is neg if exactly one of the divisor or dividend is
1862*38fd1498Szrj 	 neg.  */
1863*38fd1498Szrj       if (dividend_neg != divisor_neg)
1864*38fd1498Szrj 	quotient_len = wi::sub_large (quotient, zeros, 1, quotient,
1865*38fd1498Szrj 				      quotient_len, dividend_prec,
1866*38fd1498Szrj 				      UNSIGNED, 0);
1867*38fd1498Szrj     }
1868*38fd1498Szrj 
1869*38fd1498Szrj   if (remainder)
1870*38fd1498Szrj     {
1871*38fd1498Szrj       *remainder_len = wi_pack (remainder, b_remainder, n, dividend_prec);
1872*38fd1498Szrj       /* The remainder is always the same sign as the dividend.  */
1873*38fd1498Szrj       if (dividend_neg)
1874*38fd1498Szrj 	*remainder_len = wi::sub_large (remainder, zeros, 1, remainder,
1875*38fd1498Szrj 					*remainder_len, dividend_prec,
1876*38fd1498Szrj 					UNSIGNED, 0);
1877*38fd1498Szrj     }
1878*38fd1498Szrj 
1879*38fd1498Szrj   return quotient_len;
1880*38fd1498Szrj }
1881*38fd1498Szrj 
1882*38fd1498Szrj /*
1883*38fd1498Szrj  * Shifting, rotating and extraction.
1884*38fd1498Szrj  */
1885*38fd1498Szrj 
1886*38fd1498Szrj /* Left shift XVAL by SHIFT and store the result in VAL.  Return the
1887*38fd1498Szrj    number of blocks in VAL.  Both XVAL and VAL have PRECISION bits.  */
1888*38fd1498Szrj unsigned int
lshift_large(HOST_WIDE_INT * val,const HOST_WIDE_INT * xval,unsigned int xlen,unsigned int precision,unsigned int shift)1889*38fd1498Szrj wi::lshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
1890*38fd1498Szrj 		  unsigned int xlen, unsigned int precision,
1891*38fd1498Szrj 		  unsigned int shift)
1892*38fd1498Szrj {
1893*38fd1498Szrj   /* Split the shift into a whole-block shift and a subblock shift.  */
1894*38fd1498Szrj   unsigned int skip = shift / HOST_BITS_PER_WIDE_INT;
1895*38fd1498Szrj   unsigned int small_shift = shift % HOST_BITS_PER_WIDE_INT;
1896*38fd1498Szrj 
1897*38fd1498Szrj   /* The whole-block shift fills with zeros.  */
1898*38fd1498Szrj   unsigned int len = BLOCKS_NEEDED (precision);
1899*38fd1498Szrj   for (unsigned int i = 0; i < skip; ++i)
1900*38fd1498Szrj     val[i] = 0;
1901*38fd1498Szrj 
1902*38fd1498Szrj   /* It's easier to handle the simple block case specially.  */
1903*38fd1498Szrj   if (small_shift == 0)
1904*38fd1498Szrj     for (unsigned int i = skip; i < len; ++i)
1905*38fd1498Szrj       val[i] = safe_uhwi (xval, xlen, i - skip);
1906*38fd1498Szrj   else
1907*38fd1498Szrj     {
1908*38fd1498Szrj       /* The first unfilled output block is a left shift of the first
1909*38fd1498Szrj 	 block in XVAL.  The other output blocks contain bits from two
1910*38fd1498Szrj 	 consecutive input blocks.  */
1911*38fd1498Szrj       unsigned HOST_WIDE_INT carry = 0;
1912*38fd1498Szrj       for (unsigned int i = skip; i < len; ++i)
1913*38fd1498Szrj 	{
1914*38fd1498Szrj 	  unsigned HOST_WIDE_INT x = safe_uhwi (xval, xlen, i - skip);
1915*38fd1498Szrj 	  val[i] = (x << small_shift) | carry;
1916*38fd1498Szrj 	  carry = x >> (-small_shift % HOST_BITS_PER_WIDE_INT);
1917*38fd1498Szrj 	}
1918*38fd1498Szrj     }
1919*38fd1498Szrj   return canonize (val, len, precision);
1920*38fd1498Szrj }
1921*38fd1498Szrj 
1922*38fd1498Szrj /* Right shift XVAL by SHIFT and store the result in VAL.  Return the
1923*38fd1498Szrj    number of blocks in VAL.  The input has XPRECISION bits and the
1924*38fd1498Szrj    output has XPRECISION - SHIFT bits.  */
1925*38fd1498Szrj static unsigned int
rshift_large_common(HOST_WIDE_INT * val,const HOST_WIDE_INT * xval,unsigned int xlen,unsigned int xprecision,unsigned int shift)1926*38fd1498Szrj rshift_large_common (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
1927*38fd1498Szrj 		     unsigned int xlen, unsigned int xprecision,
1928*38fd1498Szrj 		     unsigned int shift)
1929*38fd1498Szrj {
1930*38fd1498Szrj   /* Split the shift into a whole-block shift and a subblock shift.  */
1931*38fd1498Szrj   unsigned int skip = shift / HOST_BITS_PER_WIDE_INT;
1932*38fd1498Szrj   unsigned int small_shift = shift % HOST_BITS_PER_WIDE_INT;
1933*38fd1498Szrj 
1934*38fd1498Szrj   /* Work out how many blocks are needed to store the significant bits
1935*38fd1498Szrj      (excluding the upper zeros or signs).  */
1936*38fd1498Szrj   unsigned int len = BLOCKS_NEEDED (xprecision - shift);
1937*38fd1498Szrj 
1938*38fd1498Szrj   /* It's easier to handle the simple block case specially.  */
1939*38fd1498Szrj   if (small_shift == 0)
1940*38fd1498Szrj     for (unsigned int i = 0; i < len; ++i)
1941*38fd1498Szrj       val[i] = safe_uhwi (xval, xlen, i + skip);
1942*38fd1498Szrj   else
1943*38fd1498Szrj     {
1944*38fd1498Szrj       /* Each output block but the last is a combination of two input blocks.
1945*38fd1498Szrj 	 The last block is a right shift of the last block in XVAL.  */
1946*38fd1498Szrj       unsigned HOST_WIDE_INT curr = safe_uhwi (xval, xlen, skip);
1947*38fd1498Szrj       for (unsigned int i = 0; i < len; ++i)
1948*38fd1498Szrj 	{
1949*38fd1498Szrj 	  val[i] = curr >> small_shift;
1950*38fd1498Szrj 	  curr = safe_uhwi (xval, xlen, i + skip + 1);
1951*38fd1498Szrj 	  val[i] |= curr << (-small_shift % HOST_BITS_PER_WIDE_INT);
1952*38fd1498Szrj 	}
1953*38fd1498Szrj     }
1954*38fd1498Szrj   return len;
1955*38fd1498Szrj }
1956*38fd1498Szrj 
1957*38fd1498Szrj /* Logically right shift XVAL by SHIFT and store the result in VAL.
1958*38fd1498Szrj    Return the number of blocks in VAL.  XVAL has XPRECISION bits and
1959*38fd1498Szrj    VAL has PRECISION bits.  */
1960*38fd1498Szrj unsigned int
lrshift_large(HOST_WIDE_INT * val,const HOST_WIDE_INT * xval,unsigned int xlen,unsigned int xprecision,unsigned int precision,unsigned int shift)1961*38fd1498Szrj wi::lrshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
1962*38fd1498Szrj 		   unsigned int xlen, unsigned int xprecision,
1963*38fd1498Szrj 		   unsigned int precision, unsigned int shift)
1964*38fd1498Szrj {
1965*38fd1498Szrj   unsigned int len = rshift_large_common (val, xval, xlen, xprecision, shift);
1966*38fd1498Szrj 
1967*38fd1498Szrj   /* The value we just created has precision XPRECISION - SHIFT.
1968*38fd1498Szrj      Zero-extend it to wider precisions.  */
1969*38fd1498Szrj   if (precision > xprecision - shift)
1970*38fd1498Szrj     {
1971*38fd1498Szrj       unsigned int small_prec = (xprecision - shift) % HOST_BITS_PER_WIDE_INT;
1972*38fd1498Szrj       if (small_prec)
1973*38fd1498Szrj 	val[len - 1] = zext_hwi (val[len - 1], small_prec);
1974*38fd1498Szrj       else if (val[len - 1] < 0)
1975*38fd1498Szrj 	{
1976*38fd1498Szrj 	  /* Add a new block with a zero. */
1977*38fd1498Szrj 	  val[len++] = 0;
1978*38fd1498Szrj 	  return len;
1979*38fd1498Szrj 	}
1980*38fd1498Szrj     }
1981*38fd1498Szrj   return canonize (val, len, precision);
1982*38fd1498Szrj }
1983*38fd1498Szrj 
1984*38fd1498Szrj /* Arithmetically right shift XVAL by SHIFT and store the result in VAL.
1985*38fd1498Szrj    Return the number of blocks in VAL.  XVAL has XPRECISION bits and
1986*38fd1498Szrj    VAL has PRECISION bits.  */
1987*38fd1498Szrj unsigned int
arshift_large(HOST_WIDE_INT * val,const HOST_WIDE_INT * xval,unsigned int xlen,unsigned int xprecision,unsigned int precision,unsigned int shift)1988*38fd1498Szrj wi::arshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
1989*38fd1498Szrj 		   unsigned int xlen, unsigned int xprecision,
1990*38fd1498Szrj 		   unsigned int precision, unsigned int shift)
1991*38fd1498Szrj {
1992*38fd1498Szrj   unsigned int len = rshift_large_common (val, xval, xlen, xprecision, shift);
1993*38fd1498Szrj 
1994*38fd1498Szrj   /* The value we just created has precision XPRECISION - SHIFT.
1995*38fd1498Szrj      Sign-extend it to wider types.  */
1996*38fd1498Szrj   if (precision > xprecision - shift)
1997*38fd1498Szrj     {
1998*38fd1498Szrj       unsigned int small_prec = (xprecision - shift) % HOST_BITS_PER_WIDE_INT;
1999*38fd1498Szrj       if (small_prec)
2000*38fd1498Szrj 	val[len - 1] = sext_hwi (val[len - 1], small_prec);
2001*38fd1498Szrj     }
2002*38fd1498Szrj   return canonize (val, len, precision);
2003*38fd1498Szrj }
2004*38fd1498Szrj 
2005*38fd1498Szrj /* Return the number of leading (upper) zeros in X.  */
2006*38fd1498Szrj int
clz(const wide_int_ref & x)2007*38fd1498Szrj wi::clz (const wide_int_ref &x)
2008*38fd1498Szrj {
2009*38fd1498Szrj   /* Calculate how many bits there above the highest represented block.  */
2010*38fd1498Szrj   int count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
2011*38fd1498Szrj 
2012*38fd1498Szrj   unsigned HOST_WIDE_INT high = x.uhigh ();
2013*38fd1498Szrj   if (count < 0)
2014*38fd1498Szrj     /* The upper -COUNT bits of HIGH are not part of the value.
2015*38fd1498Szrj        Clear them out.  */
2016*38fd1498Szrj     high = (high << -count) >> -count;
2017*38fd1498Szrj   else if (x.sign_mask () < 0)
2018*38fd1498Szrj     /* The upper bit is set, so there are no leading zeros.  */
2019*38fd1498Szrj     return 0;
2020*38fd1498Szrj 
2021*38fd1498Szrj   /* We don't need to look below HIGH.  Either HIGH is nonzero,
2022*38fd1498Szrj      or the top bit of the block below is nonzero; clz_hwi is
2023*38fd1498Szrj      HOST_BITS_PER_WIDE_INT in the latter case.  */
2024*38fd1498Szrj   return count + clz_hwi (high);
2025*38fd1498Szrj }
2026*38fd1498Szrj 
2027*38fd1498Szrj /* Return the number of redundant sign bits in X.  (That is, the number
2028*38fd1498Szrj    of bits immediately below the sign bit that have the same value as
2029*38fd1498Szrj    the sign bit.)  */
2030*38fd1498Szrj int
clrsb(const wide_int_ref & x)2031*38fd1498Szrj wi::clrsb (const wide_int_ref &x)
2032*38fd1498Szrj {
2033*38fd1498Szrj   /* Calculate how many bits there above the highest represented block.  */
2034*38fd1498Szrj   int count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
2035*38fd1498Szrj 
2036*38fd1498Szrj   unsigned HOST_WIDE_INT high = x.uhigh ();
2037*38fd1498Szrj   unsigned HOST_WIDE_INT mask = -1;
2038*38fd1498Szrj   if (count < 0)
2039*38fd1498Szrj     {
2040*38fd1498Szrj       /* The upper -COUNT bits of HIGH are not part of the value.
2041*38fd1498Szrj 	 Clear them from both MASK and HIGH.  */
2042*38fd1498Szrj       mask >>= -count;
2043*38fd1498Szrj       high &= mask;
2044*38fd1498Szrj     }
2045*38fd1498Szrj 
2046*38fd1498Szrj   /* If the top bit is 1, count the number of leading 1s.  If the top
2047*38fd1498Szrj      bit is zero, count the number of leading zeros.  */
2048*38fd1498Szrj   if (high > mask / 2)
2049*38fd1498Szrj     high ^= mask;
2050*38fd1498Szrj 
2051*38fd1498Szrj   /* There are no sign bits below the top block, so we don't need to look
2052*38fd1498Szrj      beyond HIGH.  Note that clz_hwi is HOST_BITS_PER_WIDE_INT when
2053*38fd1498Szrj      HIGH is 0.  */
2054*38fd1498Szrj   return count + clz_hwi (high) - 1;
2055*38fd1498Szrj }
2056*38fd1498Szrj 
2057*38fd1498Szrj /* Return the number of trailing (lower) zeros in X.  */
2058*38fd1498Szrj int
ctz(const wide_int_ref & x)2059*38fd1498Szrj wi::ctz (const wide_int_ref &x)
2060*38fd1498Szrj {
2061*38fd1498Szrj   if (x.len == 1 && x.ulow () == 0)
2062*38fd1498Szrj     return x.precision;
2063*38fd1498Szrj 
2064*38fd1498Szrj   /* Having dealt with the zero case, there must be a block with a
2065*38fd1498Szrj      nonzero bit.  We don't care about the bits above the first 1.  */
2066*38fd1498Szrj   unsigned int i = 0;
2067*38fd1498Szrj   while (x.val[i] == 0)
2068*38fd1498Szrj     ++i;
2069*38fd1498Szrj   return i * HOST_BITS_PER_WIDE_INT + ctz_hwi (x.val[i]);
2070*38fd1498Szrj }
2071*38fd1498Szrj 
2072*38fd1498Szrj /* If X is an exact power of 2, return the base-2 logarithm, otherwise
2073*38fd1498Szrj    return -1.  */
2074*38fd1498Szrj int
exact_log2(const wide_int_ref & x)2075*38fd1498Szrj wi::exact_log2 (const wide_int_ref &x)
2076*38fd1498Szrj {
2077*38fd1498Szrj   /* Reject cases where there are implicit -1 blocks above HIGH.  */
2078*38fd1498Szrj   if (x.len * HOST_BITS_PER_WIDE_INT < x.precision && x.sign_mask () < 0)
2079*38fd1498Szrj     return -1;
2080*38fd1498Szrj 
2081*38fd1498Szrj   /* Set CRUX to the index of the entry that should be nonzero.
2082*38fd1498Szrj      If the top block is zero then the next lowest block (if any)
2083*38fd1498Szrj      must have the high bit set.  */
2084*38fd1498Szrj   unsigned int crux = x.len - 1;
2085*38fd1498Szrj   if (crux > 0 && x.val[crux] == 0)
2086*38fd1498Szrj     crux -= 1;
2087*38fd1498Szrj 
2088*38fd1498Szrj   /* Check that all lower blocks are zero.  */
2089*38fd1498Szrj   for (unsigned int i = 0; i < crux; ++i)
2090*38fd1498Szrj     if (x.val[i] != 0)
2091*38fd1498Szrj       return -1;
2092*38fd1498Szrj 
2093*38fd1498Szrj   /* Get a zero-extended form of block CRUX.  */
2094*38fd1498Szrj   unsigned HOST_WIDE_INT hwi = x.val[crux];
2095*38fd1498Szrj   if ((crux + 1) * HOST_BITS_PER_WIDE_INT > x.precision)
2096*38fd1498Szrj     hwi = zext_hwi (hwi, x.precision % HOST_BITS_PER_WIDE_INT);
2097*38fd1498Szrj 
2098*38fd1498Szrj   /* Now it's down to whether HWI is a power of 2.  */
2099*38fd1498Szrj   int res = ::exact_log2 (hwi);
2100*38fd1498Szrj   if (res >= 0)
2101*38fd1498Szrj     res += crux * HOST_BITS_PER_WIDE_INT;
2102*38fd1498Szrj   return res;
2103*38fd1498Szrj }
2104*38fd1498Szrj 
2105*38fd1498Szrj /* Return the base-2 logarithm of X, rounding down.  Return -1 if X is 0.  */
2106*38fd1498Szrj int
floor_log2(const wide_int_ref & x)2107*38fd1498Szrj wi::floor_log2 (const wide_int_ref &x)
2108*38fd1498Szrj {
2109*38fd1498Szrj   return x.precision - 1 - clz (x);
2110*38fd1498Szrj }
2111*38fd1498Szrj 
2112*38fd1498Szrj /* Return the index of the first (lowest) set bit in X, counting from 1.
2113*38fd1498Szrj    Return 0 if X is 0.  */
2114*38fd1498Szrj int
ffs(const wide_int_ref & x)2115*38fd1498Szrj wi::ffs (const wide_int_ref &x)
2116*38fd1498Szrj {
2117*38fd1498Szrj   return eq_p (x, 0) ? 0 : ctz (x) + 1;
2118*38fd1498Szrj }
2119*38fd1498Szrj 
2120*38fd1498Szrj /* Return true if sign-extending X to have precision PRECISION would give
2121*38fd1498Szrj    the minimum signed value at that precision.  */
2122*38fd1498Szrj bool
only_sign_bit_p(const wide_int_ref & x,unsigned int precision)2123*38fd1498Szrj wi::only_sign_bit_p (const wide_int_ref &x, unsigned int precision)
2124*38fd1498Szrj {
2125*38fd1498Szrj   return ctz (x) + 1 == int (precision);
2126*38fd1498Szrj }
2127*38fd1498Szrj 
2128*38fd1498Szrj /* Return true if X represents the minimum signed value.  */
2129*38fd1498Szrj bool
only_sign_bit_p(const wide_int_ref & x)2130*38fd1498Szrj wi::only_sign_bit_p (const wide_int_ref &x)
2131*38fd1498Szrj {
2132*38fd1498Szrj   return only_sign_bit_p (x, x.precision);
2133*38fd1498Szrj }
2134*38fd1498Szrj 
2135*38fd1498Szrj /* Return VAL if VAL has no bits set outside MASK.  Otherwise round VAL
2136*38fd1498Szrj    down to the previous value that has no bits set outside MASK.
2137*38fd1498Szrj    This rounding wraps for signed values if VAL is negative and
2138*38fd1498Szrj    the top bit of MASK is clear.
2139*38fd1498Szrj 
2140*38fd1498Szrj    For example, round_down_for_mask (6, 0xf1) would give 1 and
2141*38fd1498Szrj    round_down_for_mask (24, 0xf1) would give 17.  */
2142*38fd1498Szrj 
2143*38fd1498Szrj wide_int
round_down_for_mask(const wide_int & val,const wide_int & mask)2144*38fd1498Szrj wi::round_down_for_mask (const wide_int &val, const wide_int &mask)
2145*38fd1498Szrj {
2146*38fd1498Szrj   /* Get the bits in VAL that are outside the mask.  */
2147*38fd1498Szrj   wide_int extra_bits = wi::bit_and_not (val, mask);
2148*38fd1498Szrj   if (extra_bits == 0)
2149*38fd1498Szrj     return val;
2150*38fd1498Szrj 
2151*38fd1498Szrj   /* Get a mask that includes the top bit in EXTRA_BITS and is all 1s
2152*38fd1498Szrj      below that bit.  */
2153*38fd1498Szrj   unsigned int precision = val.get_precision ();
2154*38fd1498Szrj   wide_int lower_mask = wi::mask (precision - wi::clz (extra_bits),
2155*38fd1498Szrj 				  false, precision);
2156*38fd1498Szrj 
2157*38fd1498Szrj   /* Clear the bits that aren't in MASK, but ensure that all bits
2158*38fd1498Szrj      in MASK below the top cleared bit are set.  */
2159*38fd1498Szrj   return (val & mask) | (mask & lower_mask);
2160*38fd1498Szrj }
2161*38fd1498Szrj 
2162*38fd1498Szrj /* Return VAL if VAL has no bits set outside MASK.  Otherwise round VAL
2163*38fd1498Szrj    up to the next value that has no bits set outside MASK.  The rounding
2164*38fd1498Szrj    wraps if there are no suitable values greater than VAL.
2165*38fd1498Szrj 
2166*38fd1498Szrj    For example, round_up_for_mask (6, 0xf1) would give 16 and
2167*38fd1498Szrj    round_up_for_mask (24, 0xf1) would give 32.  */
2168*38fd1498Szrj 
2169*38fd1498Szrj wide_int
round_up_for_mask(const wide_int & val,const wide_int & mask)2170*38fd1498Szrj wi::round_up_for_mask (const wide_int &val, const wide_int &mask)
2171*38fd1498Szrj {
2172*38fd1498Szrj   /* Get the bits in VAL that are outside the mask.  */
2173*38fd1498Szrj   wide_int extra_bits = wi::bit_and_not (val, mask);
2174*38fd1498Szrj   if (extra_bits == 0)
2175*38fd1498Szrj     return val;
2176*38fd1498Szrj 
2177*38fd1498Szrj   /* Get a mask that is all 1s above the top bit in EXTRA_BITS.  */
2178*38fd1498Szrj   unsigned int precision = val.get_precision ();
2179*38fd1498Szrj   wide_int upper_mask = wi::mask (precision - wi::clz (extra_bits),
2180*38fd1498Szrj 				  true, precision);
2181*38fd1498Szrj 
2182*38fd1498Szrj   /* Get the bits of the mask that are above the top bit in EXTRA_BITS.  */
2183*38fd1498Szrj   upper_mask &= mask;
2184*38fd1498Szrj 
2185*38fd1498Szrj   /* Conceptually we need to:
2186*38fd1498Szrj 
2187*38fd1498Szrj      - clear bits of VAL outside UPPER_MASK
2188*38fd1498Szrj      - add the lowest bit in UPPER_MASK to VAL (or add 0 if UPPER_MASK is 0)
2189*38fd1498Szrj      - propagate the carry through the bits of VAL in UPPER_MASK
2190*38fd1498Szrj 
2191*38fd1498Szrj      If (~VAL & UPPER_MASK) is nonzero, the carry eventually
2192*38fd1498Szrj      reaches that bit and the process leaves all lower bits clear.
2193*38fd1498Szrj      If (~VAL & UPPER_MASK) is zero then the result is also zero.  */
2194*38fd1498Szrj   wide_int tmp = wi::bit_and_not (upper_mask, val);
2195*38fd1498Szrj 
2196*38fd1498Szrj   return (val | tmp) & -tmp;
2197*38fd1498Szrj }
2198*38fd1498Szrj 
2199*38fd1498Szrj /*
2200*38fd1498Szrj  * Private utilities.
2201*38fd1498Szrj  */
2202*38fd1498Szrj 
gt_ggc_mx(widest_int *)2203*38fd1498Szrj void gt_ggc_mx (widest_int *) { }
gt_pch_nx(widest_int *,void (*)(void *,void *),void *)2204*38fd1498Szrj void gt_pch_nx (widest_int *, void (*) (void *, void *), void *) { }
gt_pch_nx(widest_int *)2205*38fd1498Szrj void gt_pch_nx (widest_int *) { }
2206*38fd1498Szrj 
2207*38fd1498Szrj template void wide_int::dump () const;
2208*38fd1498Szrj template void generic_wide_int <wide_int_ref_storage <false> >::dump () const;
2209*38fd1498Szrj template void generic_wide_int <wide_int_ref_storage <true> >::dump () const;
2210*38fd1498Szrj template void offset_int::dump () const;
2211*38fd1498Szrj template void widest_int::dump () const;
2212*38fd1498Szrj 
2213*38fd1498Szrj /* We could add all the above ::dump variants here, but wide_int and
2214*38fd1498Szrj    widest_int should handle the common cases.  Besides, you can always
2215*38fd1498Szrj    call the dump method directly.  */
2216*38fd1498Szrj 
2217*38fd1498Szrj DEBUG_FUNCTION void
debug(const wide_int & ref)2218*38fd1498Szrj debug (const wide_int &ref)
2219*38fd1498Szrj {
2220*38fd1498Szrj   ref.dump ();
2221*38fd1498Szrj }
2222*38fd1498Szrj 
2223*38fd1498Szrj DEBUG_FUNCTION void
debug(const wide_int * ptr)2224*38fd1498Szrj debug (const wide_int *ptr)
2225*38fd1498Szrj {
2226*38fd1498Szrj   if (ptr)
2227*38fd1498Szrj     debug (*ptr);
2228*38fd1498Szrj   else
2229*38fd1498Szrj     fprintf (stderr, "<nil>\n");
2230*38fd1498Szrj }
2231*38fd1498Szrj 
2232*38fd1498Szrj DEBUG_FUNCTION void
debug(const widest_int & ref)2233*38fd1498Szrj debug (const widest_int &ref)
2234*38fd1498Szrj {
2235*38fd1498Szrj   ref.dump ();
2236*38fd1498Szrj }
2237*38fd1498Szrj 
2238*38fd1498Szrj DEBUG_FUNCTION void
debug(const widest_int * ptr)2239*38fd1498Szrj debug (const widest_int *ptr)
2240*38fd1498Szrj {
2241*38fd1498Szrj   if (ptr)
2242*38fd1498Szrj     debug (*ptr);
2243*38fd1498Szrj   else
2244*38fd1498Szrj     fprintf (stderr, "<nil>\n");
2245*38fd1498Szrj }
2246*38fd1498Szrj 
2247*38fd1498Szrj #if CHECKING_P
2248*38fd1498Szrj 
2249*38fd1498Szrj namespace selftest {
2250*38fd1498Szrj 
2251*38fd1498Szrj /* Selftests for wide ints.  We run these multiple times, once per type.  */
2252*38fd1498Szrj 
2253*38fd1498Szrj /* Helper function for building a test value.  */
2254*38fd1498Szrj 
2255*38fd1498Szrj template <class VALUE_TYPE>
2256*38fd1498Szrj static VALUE_TYPE
2257*38fd1498Szrj from_int (int i);
2258*38fd1498Szrj 
2259*38fd1498Szrj /* Specializations of the fixture for each wide-int type.  */
2260*38fd1498Szrj 
2261*38fd1498Szrj /* Specialization for VALUE_TYPE == wide_int.  */
2262*38fd1498Szrj 
2263*38fd1498Szrj template <>
2264*38fd1498Szrj wide_int
from_int(int i)2265*38fd1498Szrj from_int (int i)
2266*38fd1498Szrj {
2267*38fd1498Szrj   return wi::shwi (i, 32);
2268*38fd1498Szrj }
2269*38fd1498Szrj 
2270*38fd1498Szrj /* Specialization for VALUE_TYPE == offset_int.  */
2271*38fd1498Szrj 
2272*38fd1498Szrj template <>
2273*38fd1498Szrj offset_int
from_int(int i)2274*38fd1498Szrj from_int (int i)
2275*38fd1498Szrj {
2276*38fd1498Szrj   return offset_int (i);
2277*38fd1498Szrj }
2278*38fd1498Szrj 
2279*38fd1498Szrj /* Specialization for VALUE_TYPE == widest_int.  */
2280*38fd1498Szrj 
2281*38fd1498Szrj template <>
2282*38fd1498Szrj widest_int
from_int(int i)2283*38fd1498Szrj from_int (int i)
2284*38fd1498Szrj {
2285*38fd1498Szrj   return widest_int (i);
2286*38fd1498Szrj }
2287*38fd1498Szrj 
2288*38fd1498Szrj /* Verify that print_dec (WI, ..., SGN) gives the expected string
2289*38fd1498Szrj    representation (using base 10).  */
2290*38fd1498Szrj 
2291*38fd1498Szrj static void
assert_deceq(const char * expected,const wide_int_ref & wi,signop sgn)2292*38fd1498Szrj assert_deceq (const char *expected, const wide_int_ref &wi, signop sgn)
2293*38fd1498Szrj {
2294*38fd1498Szrj   char buf[WIDE_INT_PRINT_BUFFER_SIZE];
2295*38fd1498Szrj   print_dec (wi, buf, sgn);
2296*38fd1498Szrj   ASSERT_STREQ (expected, buf);
2297*38fd1498Szrj }
2298*38fd1498Szrj 
2299*38fd1498Szrj /* Likewise for base 16.  */
2300*38fd1498Szrj 
2301*38fd1498Szrj static void
assert_hexeq(const char * expected,const wide_int_ref & wi)2302*38fd1498Szrj assert_hexeq (const char *expected, const wide_int_ref &wi)
2303*38fd1498Szrj {
2304*38fd1498Szrj   char buf[WIDE_INT_PRINT_BUFFER_SIZE];
2305*38fd1498Szrj   print_hex (wi, buf);
2306*38fd1498Szrj   ASSERT_STREQ (expected, buf);
2307*38fd1498Szrj }
2308*38fd1498Szrj 
2309*38fd1498Szrj /* Test cases.  */
2310*38fd1498Szrj 
2311*38fd1498Szrj /* Verify that print_dec and print_hex work for VALUE_TYPE.  */
2312*38fd1498Szrj 
2313*38fd1498Szrj template <class VALUE_TYPE>
2314*38fd1498Szrj static void
test_printing()2315*38fd1498Szrj test_printing ()
2316*38fd1498Szrj {
2317*38fd1498Szrj   VALUE_TYPE a = from_int<VALUE_TYPE> (42);
2318*38fd1498Szrj   assert_deceq ("42", a, SIGNED);
2319*38fd1498Szrj   assert_hexeq ("0x2a", a);
2320*38fd1498Szrj   assert_hexeq ("0x1fffffffffffffffff", wi::shwi (-1, 69));
2321*38fd1498Szrj   assert_hexeq ("0xffffffffffffffff", wi::mask (64, false, 69));
2322*38fd1498Szrj   assert_hexeq ("0xffffffffffffffff", wi::mask <widest_int> (64, false));
2323*38fd1498Szrj   if (WIDE_INT_MAX_PRECISION > 128)
2324*38fd1498Szrj     {
2325*38fd1498Szrj       assert_hexeq ("0x20000000000000000fffffffffffffffe",
2326*38fd1498Szrj 		    wi::lshift (1, 129) + wi::lshift (1, 64) - 2);
2327*38fd1498Szrj       assert_hexeq ("0x200000000000004000123456789abcdef",
2328*38fd1498Szrj 		    wi::lshift (1, 129) + wi::lshift (1, 74)
2329*38fd1498Szrj 		    + wi::lshift (0x1234567, 32) + 0x89abcdef);
2330*38fd1498Szrj     }
2331*38fd1498Szrj }
2332*38fd1498Szrj 
2333*38fd1498Szrj /* Verify that various operations work correctly for VALUE_TYPE,
2334*38fd1498Szrj    unary and binary, using both function syntax, and
2335*38fd1498Szrj    overloaded-operators.  */
2336*38fd1498Szrj 
2337*38fd1498Szrj template <class VALUE_TYPE>
2338*38fd1498Szrj static void
test_ops()2339*38fd1498Szrj test_ops ()
2340*38fd1498Szrj {
2341*38fd1498Szrj   VALUE_TYPE a = from_int<VALUE_TYPE> (7);
2342*38fd1498Szrj   VALUE_TYPE b = from_int<VALUE_TYPE> (3);
2343*38fd1498Szrj 
2344*38fd1498Szrj   /* Using functions.  */
2345*38fd1498Szrj   assert_deceq ("-7", wi::neg (a), SIGNED);
2346*38fd1498Szrj   assert_deceq ("10", wi::add (a, b), SIGNED);
2347*38fd1498Szrj   assert_deceq ("4", wi::sub (a, b), SIGNED);
2348*38fd1498Szrj   assert_deceq ("-4", wi::sub (b, a), SIGNED);
2349*38fd1498Szrj   assert_deceq ("21", wi::mul (a, b), SIGNED);
2350*38fd1498Szrj 
2351*38fd1498Szrj   /* Using operators.  */
2352*38fd1498Szrj   assert_deceq ("-7", -a, SIGNED);
2353*38fd1498Szrj   assert_deceq ("10", a + b, SIGNED);
2354*38fd1498Szrj   assert_deceq ("4", a - b, SIGNED);
2355*38fd1498Szrj   assert_deceq ("-4", b - a, SIGNED);
2356*38fd1498Szrj   assert_deceq ("21", a * b, SIGNED);
2357*38fd1498Szrj }
2358*38fd1498Szrj 
2359*38fd1498Szrj /* Verify that various comparisons work correctly for VALUE_TYPE.  */
2360*38fd1498Szrj 
2361*38fd1498Szrj template <class VALUE_TYPE>
2362*38fd1498Szrj static void
test_comparisons()2363*38fd1498Szrj test_comparisons ()
2364*38fd1498Szrj {
2365*38fd1498Szrj   VALUE_TYPE a = from_int<VALUE_TYPE> (7);
2366*38fd1498Szrj   VALUE_TYPE b = from_int<VALUE_TYPE> (3);
2367*38fd1498Szrj 
2368*38fd1498Szrj   /* == */
2369*38fd1498Szrj   ASSERT_TRUE (wi::eq_p (a, a));
2370*38fd1498Szrj   ASSERT_FALSE (wi::eq_p (a, b));
2371*38fd1498Szrj 
2372*38fd1498Szrj   /* != */
2373*38fd1498Szrj   ASSERT_TRUE (wi::ne_p (a, b));
2374*38fd1498Szrj   ASSERT_FALSE (wi::ne_p (a, a));
2375*38fd1498Szrj 
2376*38fd1498Szrj   /* < */
2377*38fd1498Szrj   ASSERT_FALSE (wi::lts_p (a, a));
2378*38fd1498Szrj   ASSERT_FALSE (wi::lts_p (a, b));
2379*38fd1498Szrj   ASSERT_TRUE (wi::lts_p (b, a));
2380*38fd1498Szrj 
2381*38fd1498Szrj   /* <= */
2382*38fd1498Szrj   ASSERT_TRUE (wi::les_p (a, a));
2383*38fd1498Szrj   ASSERT_FALSE (wi::les_p (a, b));
2384*38fd1498Szrj   ASSERT_TRUE (wi::les_p (b, a));
2385*38fd1498Szrj 
2386*38fd1498Szrj   /* > */
2387*38fd1498Szrj   ASSERT_FALSE (wi::gts_p (a, a));
2388*38fd1498Szrj   ASSERT_TRUE (wi::gts_p (a, b));
2389*38fd1498Szrj   ASSERT_FALSE (wi::gts_p (b, a));
2390*38fd1498Szrj 
2391*38fd1498Szrj   /* >= */
2392*38fd1498Szrj   ASSERT_TRUE (wi::ges_p (a, a));
2393*38fd1498Szrj   ASSERT_TRUE (wi::ges_p (a, b));
2394*38fd1498Szrj   ASSERT_FALSE (wi::ges_p (b, a));
2395*38fd1498Szrj 
2396*38fd1498Szrj   /* comparison */
2397*38fd1498Szrj   ASSERT_EQ (-1, wi::cmps (b, a));
2398*38fd1498Szrj   ASSERT_EQ (0, wi::cmps (a, a));
2399*38fd1498Szrj   ASSERT_EQ (1, wi::cmps (a, b));
2400*38fd1498Szrj }
2401*38fd1498Szrj 
2402*38fd1498Szrj /* Run all of the selftests, using the given VALUE_TYPE.  */
2403*38fd1498Szrj 
2404*38fd1498Szrj template <class VALUE_TYPE>
run_all_wide_int_tests()2405*38fd1498Szrj static void run_all_wide_int_tests ()
2406*38fd1498Szrj {
2407*38fd1498Szrj   test_printing <VALUE_TYPE> ();
2408*38fd1498Szrj   test_ops <VALUE_TYPE> ();
2409*38fd1498Szrj   test_comparisons <VALUE_TYPE> ();
2410*38fd1498Szrj }
2411*38fd1498Szrj 
2412*38fd1498Szrj /* Test overflow conditions.  */
2413*38fd1498Szrj 
2414*38fd1498Szrj static void
test_overflow()2415*38fd1498Szrj test_overflow ()
2416*38fd1498Szrj {
2417*38fd1498Szrj   static int precs[] = { 31, 32, 33, 63, 64, 65, 127, 128 };
2418*38fd1498Szrj   static int offsets[] = { 16, 1, 0 };
2419*38fd1498Szrj   for (unsigned int i = 0; i < ARRAY_SIZE (precs); ++i)
2420*38fd1498Szrj     for (unsigned int j = 0; j < ARRAY_SIZE (offsets); ++j)
2421*38fd1498Szrj       {
2422*38fd1498Szrj 	int prec = precs[i];
2423*38fd1498Szrj 	int offset = offsets[j];
2424*38fd1498Szrj 	bool overflow;
2425*38fd1498Szrj 	wide_int sum, diff;
2426*38fd1498Szrj 
2427*38fd1498Szrj 	sum = wi::add (wi::max_value (prec, UNSIGNED) - offset, 1,
2428*38fd1498Szrj 		       UNSIGNED, &overflow);
2429*38fd1498Szrj 	ASSERT_EQ (sum, -offset);
2430*38fd1498Szrj 	ASSERT_EQ (overflow, offset == 0);
2431*38fd1498Szrj 
2432*38fd1498Szrj 	sum = wi::add (1, wi::max_value (prec, UNSIGNED) - offset,
2433*38fd1498Szrj 		       UNSIGNED, &overflow);
2434*38fd1498Szrj 	ASSERT_EQ (sum, -offset);
2435*38fd1498Szrj 	ASSERT_EQ (overflow, offset == 0);
2436*38fd1498Szrj 
2437*38fd1498Szrj 	diff = wi::sub (wi::max_value (prec, UNSIGNED) - offset,
2438*38fd1498Szrj 			wi::max_value (prec, UNSIGNED),
2439*38fd1498Szrj 			UNSIGNED, &overflow);
2440*38fd1498Szrj 	ASSERT_EQ (diff, -offset);
2441*38fd1498Szrj 	ASSERT_EQ (overflow, offset != 0);
2442*38fd1498Szrj 
2443*38fd1498Szrj 	diff = wi::sub (wi::max_value (prec, UNSIGNED) - offset,
2444*38fd1498Szrj 			wi::max_value (prec, UNSIGNED) - 1,
2445*38fd1498Szrj 			UNSIGNED, &overflow);
2446*38fd1498Szrj 	ASSERT_EQ (diff, 1 - offset);
2447*38fd1498Szrj 	ASSERT_EQ (overflow, offset > 1);
2448*38fd1498Szrj     }
2449*38fd1498Szrj }
2450*38fd1498Szrj 
2451*38fd1498Szrj /* Test the round_{down,up}_for_mask functions.  */
2452*38fd1498Szrj 
2453*38fd1498Szrj static void
test_round_for_mask()2454*38fd1498Szrj test_round_for_mask ()
2455*38fd1498Szrj {
2456*38fd1498Szrj   unsigned int prec = 18;
2457*38fd1498Szrj   ASSERT_EQ (17, wi::round_down_for_mask (wi::shwi (17, prec),
2458*38fd1498Szrj 					  wi::shwi (0xf1, prec)));
2459*38fd1498Szrj   ASSERT_EQ (17, wi::round_up_for_mask (wi::shwi (17, prec),
2460*38fd1498Szrj 					wi::shwi (0xf1, prec)));
2461*38fd1498Szrj 
2462*38fd1498Szrj   ASSERT_EQ (1, wi::round_down_for_mask (wi::shwi (6, prec),
2463*38fd1498Szrj 					 wi::shwi (0xf1, prec)));
2464*38fd1498Szrj   ASSERT_EQ (16, wi::round_up_for_mask (wi::shwi (6, prec),
2465*38fd1498Szrj 					wi::shwi (0xf1, prec)));
2466*38fd1498Szrj 
2467*38fd1498Szrj   ASSERT_EQ (17, wi::round_down_for_mask (wi::shwi (24, prec),
2468*38fd1498Szrj 					  wi::shwi (0xf1, prec)));
2469*38fd1498Szrj   ASSERT_EQ (32, wi::round_up_for_mask (wi::shwi (24, prec),
2470*38fd1498Szrj 					wi::shwi (0xf1, prec)));
2471*38fd1498Szrj 
2472*38fd1498Szrj   ASSERT_EQ (0x011, wi::round_down_for_mask (wi::shwi (0x22, prec),
2473*38fd1498Szrj 					     wi::shwi (0x111, prec)));
2474*38fd1498Szrj   ASSERT_EQ (0x100, wi::round_up_for_mask (wi::shwi (0x22, prec),
2475*38fd1498Szrj 					   wi::shwi (0x111, prec)));
2476*38fd1498Szrj 
2477*38fd1498Szrj   ASSERT_EQ (100, wi::round_down_for_mask (wi::shwi (101, prec),
2478*38fd1498Szrj 					   wi::shwi (0xfc, prec)));
2479*38fd1498Szrj   ASSERT_EQ (104, wi::round_up_for_mask (wi::shwi (101, prec),
2480*38fd1498Szrj 					 wi::shwi (0xfc, prec)));
2481*38fd1498Szrj 
2482*38fd1498Szrj   ASSERT_EQ (0x2bc, wi::round_down_for_mask (wi::shwi (0x2c2, prec),
2483*38fd1498Szrj 					     wi::shwi (0xabc, prec)));
2484*38fd1498Szrj   ASSERT_EQ (0x800, wi::round_up_for_mask (wi::shwi (0x2c2, prec),
2485*38fd1498Szrj 					   wi::shwi (0xabc, prec)));
2486*38fd1498Szrj 
2487*38fd1498Szrj   ASSERT_EQ (0xabc, wi::round_down_for_mask (wi::shwi (0xabd, prec),
2488*38fd1498Szrj 					     wi::shwi (0xabc, prec)));
2489*38fd1498Szrj   ASSERT_EQ (0, wi::round_up_for_mask (wi::shwi (0xabd, prec),
2490*38fd1498Szrj 				       wi::shwi (0xabc, prec)));
2491*38fd1498Szrj 
2492*38fd1498Szrj   ASSERT_EQ (0xabc, wi::round_down_for_mask (wi::shwi (0x1000, prec),
2493*38fd1498Szrj 					     wi::shwi (0xabc, prec)));
2494*38fd1498Szrj   ASSERT_EQ (0, wi::round_up_for_mask (wi::shwi (0x1000, prec),
2495*38fd1498Szrj 				       wi::shwi (0xabc, prec)));
2496*38fd1498Szrj }
2497*38fd1498Szrj 
2498*38fd1498Szrj /* Run all of the selftests within this file, for all value types.  */
2499*38fd1498Szrj 
2500*38fd1498Szrj void
wide_int_cc_tests()2501*38fd1498Szrj wide_int_cc_tests ()
2502*38fd1498Szrj {
2503*38fd1498Szrj   run_all_wide_int_tests <wide_int> ();
2504*38fd1498Szrj   run_all_wide_int_tests <offset_int> ();
2505*38fd1498Szrj   run_all_wide_int_tests <widest_int> ();
2506*38fd1498Szrj   test_overflow ();
2507*38fd1498Szrj   test_round_for_mask ();
2508*38fd1498Szrj }
2509*38fd1498Szrj 
2510*38fd1498Szrj } // namespace selftest
2511*38fd1498Szrj #endif /* CHECKING_P */
2512