1 /* Operations with very long integers.
2    Copyright (C) 2012-2016 Free Software Foundation, Inc.
3    Contributed by Kenneth Zadeck <zadeck@naturalbridge.com>
4 
5 This file is part of GCC.
6 
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by the
9 Free Software Foundation; either version 3, or (at your option) any
10 later version.
11 
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
15 for more details.
16 
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3.  If not see
19 <http://www.gnu.org/licenses/>.  */
20 
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "tree.h"
26 
27 
28 #define HOST_BITS_PER_HALF_WIDE_INT 32
29 #if HOST_BITS_PER_HALF_WIDE_INT == HOST_BITS_PER_LONG
30 # define HOST_HALF_WIDE_INT long
31 #elif HOST_BITS_PER_HALF_WIDE_INT == HOST_BITS_PER_INT
32 # define HOST_HALF_WIDE_INT int
33 #else
34 #error Please add support for HOST_HALF_WIDE_INT
35 #endif
36 
37 #define W_TYPE_SIZE HOST_BITS_PER_WIDE_INT
38 /* Do not include longlong.h when compiler is clang-based. See PR61146.  */
39 #if GCC_VERSION >= 3000 && (W_TYPE_SIZE == 32 || defined (__SIZEOF_INT128__)) && !defined(__clang__)
40 typedef unsigned HOST_HALF_WIDE_INT UHWtype;
41 typedef unsigned HOST_WIDE_INT UWtype;
42 typedef unsigned int UQItype __attribute__ ((mode (QI)));
43 typedef unsigned int USItype __attribute__ ((mode (SI)));
44 typedef unsigned int UDItype __attribute__ ((mode (DI)));
45 #if W_TYPE_SIZE == 32
46 typedef unsigned int UDWtype __attribute__ ((mode (DI)));
47 #else
48 typedef unsigned int UDWtype __attribute__ ((mode (TI)));
49 #endif
50 #include "longlong.h"
51 #endif
52 
53 static const HOST_WIDE_INT zeros[WIDE_INT_MAX_ELTS] = {};
54 
55 /*
56  * Internal utilities.
57  */
58 
59 /* Quantities to deal with values that hold half of a wide int.  Used
60    in multiply and divide.  */
61 #define HALF_INT_MASK (((HOST_WIDE_INT) 1 << HOST_BITS_PER_HALF_WIDE_INT) - 1)
62 
63 #define BLOCK_OF(TARGET) ((TARGET) / HOST_BITS_PER_WIDE_INT)
64 #define BLOCKS_NEEDED(PREC) \
65   (PREC ? (((PREC) + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT) : 1)
66 #define SIGN_MASK(X) ((HOST_WIDE_INT) (X) < 0 ? -1 : 0)
67 
68 /* Return the value a VAL[I] if I < LEN, otherwise, return 0 or -1
69    based on the top existing bit of VAL. */
70 
71 static unsigned HOST_WIDE_INT
safe_uhwi(const HOST_WIDE_INT * val,unsigned int len,unsigned int i)72 safe_uhwi (const HOST_WIDE_INT *val, unsigned int len, unsigned int i)
73 {
74   return i < len ? val[i] : val[len - 1] < 0 ? (HOST_WIDE_INT) -1 : 0;
75 }
76 
77 /* Convert the integer in VAL to canonical form, returning its new length.
78    LEN is the number of blocks currently in VAL and PRECISION is the number
79    of bits in the integer it represents.
80 
81    This function only changes the representation, not the value.  */
82 static unsigned int
canonize(HOST_WIDE_INT * val,unsigned int len,unsigned int precision)83 canonize (HOST_WIDE_INT *val, unsigned int len, unsigned int precision)
84 {
85   unsigned int blocks_needed = BLOCKS_NEEDED (precision);
86   HOST_WIDE_INT top;
87   int i;
88 
89   if (len > blocks_needed)
90     len = blocks_needed;
91 
92   if (len == 1)
93     return len;
94 
95   top = val[len - 1];
96   if (len * HOST_BITS_PER_WIDE_INT > precision)
97     val[len - 1] = top = sext_hwi (top, precision % HOST_BITS_PER_WIDE_INT);
98   if (top != 0 && top != (HOST_WIDE_INT)-1)
99     return len;
100 
101   /* At this point we know that the top is either 0 or -1.  Find the
102      first block that is not a copy of this.  */
103   for (i = len - 2; i >= 0; i--)
104     {
105       HOST_WIDE_INT x = val[i];
106       if (x != top)
107 	{
108 	  if (SIGN_MASK (x) == top)
109 	    return i + 1;
110 
111 	  /* We need an extra block because the top bit block i does
112 	     not match the extension.  */
113 	  return i + 2;
114 	}
115     }
116 
117   /* The number is 0 or -1.  */
118   return 1;
119 }
120 
121 /* VAL[0] is the unsigned result of an operation.  Canonize it by adding
122    another 0 block if needed, and return number of blocks needed.  */
123 
124 static inline unsigned int
canonize_uhwi(HOST_WIDE_INT * val,unsigned int precision)125 canonize_uhwi (HOST_WIDE_INT *val, unsigned int precision)
126 {
127   if (val[0] < 0 && precision > HOST_BITS_PER_WIDE_INT)
128     {
129       val[1] = 0;
130       return 2;
131     }
132   return 1;
133 }
134 
135 /*
136  * Conversion routines in and out of wide_int.
137  */
138 
139 /* Copy XLEN elements from XVAL to VAL.  If NEED_CANON, canonize the
140    result for an integer with precision PRECISION.  Return the length
141    of VAL (after any canonization.  */
142 unsigned int
from_array(HOST_WIDE_INT * val,const HOST_WIDE_INT * xval,unsigned int xlen,unsigned int precision,bool need_canon)143 wi::from_array (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
144 		unsigned int xlen, unsigned int precision, bool need_canon)
145 {
146   for (unsigned i = 0; i < xlen; i++)
147     val[i] = xval[i];
148   return need_canon ? canonize (val, xlen, precision) : xlen;
149 }
150 
151 /* Construct a wide int from a buffer of length LEN.  BUFFER will be
152    read according to byte endianess and word endianess of the target.
153    Only the lower BUFFER_LEN bytes of the result are set; the remaining
154    high bytes are cleared.  */
155 wide_int
from_buffer(const unsigned char * buffer,unsigned int buffer_len)156 wi::from_buffer (const unsigned char *buffer, unsigned int buffer_len)
157 {
158   unsigned int precision = buffer_len * BITS_PER_UNIT;
159   wide_int result = wide_int::create (precision);
160   unsigned int words = buffer_len / UNITS_PER_WORD;
161 
162   /* We have to clear all the bits ourself, as we merely or in values
163      below.  */
164   unsigned int len = BLOCKS_NEEDED (precision);
165   HOST_WIDE_INT *val = result.write_val ();
166   for (unsigned int i = 0; i < len; ++i)
167     val[i] = 0;
168 
169   for (unsigned int byte = 0; byte < buffer_len; byte++)
170     {
171       unsigned int offset;
172       unsigned int index;
173       unsigned int bitpos = byte * BITS_PER_UNIT;
174       unsigned HOST_WIDE_INT value;
175 
176       if (buffer_len > UNITS_PER_WORD)
177 	{
178 	  unsigned int word = byte / UNITS_PER_WORD;
179 
180 	  if (WORDS_BIG_ENDIAN)
181 	    word = (words - 1) - word;
182 
183 	  offset = word * UNITS_PER_WORD;
184 
185 	  if (BYTES_BIG_ENDIAN)
186 	    offset += (UNITS_PER_WORD - 1) - (byte % UNITS_PER_WORD);
187 	  else
188 	    offset += byte % UNITS_PER_WORD;
189 	}
190       else
191 	offset = BYTES_BIG_ENDIAN ? (buffer_len - 1) - byte : byte;
192 
193       value = (unsigned HOST_WIDE_INT) buffer[offset];
194 
195       index = bitpos / HOST_BITS_PER_WIDE_INT;
196       val[index] |= value << (bitpos % HOST_BITS_PER_WIDE_INT);
197     }
198 
199   result.set_len (canonize (val, len, precision));
200 
201   return result;
202 }
203 
204 /* Sets RESULT from X, the sign is taken according to SGN.  */
205 void
to_mpz(const wide_int_ref & x,mpz_t result,signop sgn)206 wi::to_mpz (const wide_int_ref &x, mpz_t result, signop sgn)
207 {
208   int len = x.get_len ();
209   const HOST_WIDE_INT *v = x.get_val ();
210   int excess = len * HOST_BITS_PER_WIDE_INT - x.get_precision ();
211 
212   if (wi::neg_p (x, sgn))
213     {
214       /* We use ones complement to avoid -x80..0 edge case that -
215 	 won't work on.  */
216       HOST_WIDE_INT *t = XALLOCAVEC (HOST_WIDE_INT, len);
217       for (int i = 0; i < len; i++)
218 	t[i] = ~v[i];
219       if (excess > 0)
220 	t[len - 1] = (unsigned HOST_WIDE_INT) t[len - 1] << excess >> excess;
221       mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, t);
222       mpz_com (result, result);
223     }
224   else if (excess > 0)
225     {
226       HOST_WIDE_INT *t = XALLOCAVEC (HOST_WIDE_INT, len);
227       for (int i = 0; i < len - 1; i++)
228 	t[i] = v[i];
229       t[len - 1] = (unsigned HOST_WIDE_INT) v[len - 1] << excess >> excess;
230       mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, t);
231     }
232   else
233     mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, v);
234 }
235 
236 /* Returns X converted to TYPE.  If WRAP is true, then out-of-range
237    values of VAL will be wrapped; otherwise, they will be set to the
238    appropriate minimum or maximum TYPE bound.  */
239 wide_int
from_mpz(const_tree type,mpz_t x,bool wrap)240 wi::from_mpz (const_tree type, mpz_t x, bool wrap)
241 {
242   size_t count, numb;
243   unsigned int prec = TYPE_PRECISION (type);
244   wide_int res = wide_int::create (prec);
245 
246   if (!wrap)
247     {
248       mpz_t min, max;
249 
250       mpz_init (min);
251       mpz_init (max);
252       get_type_static_bounds (type, min, max);
253 
254       if (mpz_cmp (x, min) < 0)
255 	mpz_set (x, min);
256       else if (mpz_cmp (x, max) > 0)
257 	mpz_set (x, max);
258 
259       mpz_clear (min);
260       mpz_clear (max);
261     }
262 
263   /* Determine the number of unsigned HOST_WIDE_INTs that are required
264      for representing the absolute value.  The code to calculate count is
265      extracted from the GMP manual, section "Integer Import and Export":
266      http://gmplib.org/manual/Integer-Import-and-Export.html  */
267   numb = CHAR_BIT * sizeof (HOST_WIDE_INT);
268   count = (mpz_sizeinbase (x, 2) + numb - 1) / numb;
269   HOST_WIDE_INT *val = res.write_val ();
270   /* Read the absolute value.
271 
272      Write directly to the wide_int storage if possible, otherwise leave
273      GMP to allocate the memory for us.  It might be slightly more efficient
274      to use mpz_tdiv_r_2exp for the latter case, but the situation is
275      pathological and it seems safer to operate on the original mpz value
276      in all cases.  */
277   void *valres = mpz_export (count <= WIDE_INT_MAX_ELTS ? val : 0,
278 			     &count, -1, sizeof (HOST_WIDE_INT), 0, 0, x);
279   if (count < 1)
280     {
281       val[0] = 0;
282       count = 1;
283     }
284   count = MIN (count, BLOCKS_NEEDED (prec));
285   if (valres != val)
286     {
287       memcpy (val, valres, count * sizeof (HOST_WIDE_INT));
288       free (valres);
289     }
290   /* Zero-extend the absolute value to PREC bits.  */
291   if (count < BLOCKS_NEEDED (prec) && val[count - 1] < 0)
292     val[count++] = 0;
293   else
294     count = canonize (val, count, prec);
295   res.set_len (count);
296 
297   if (mpz_sgn (x) < 0)
298     res = -res;
299 
300   return res;
301 }
302 
303 /*
304  * Largest and smallest values in a mode.
305  */
306 
307 /* Return the largest SGNed number that is representable in PRECISION bits.
308 
309    TODO: There is still code from the double_int era that trys to
310    make up for the fact that double int's could not represent the
311    min and max values of all types.  This code should be removed
312    because the min and max values can always be represented in
313    wide_ints and int-csts.  */
314 wide_int
max_value(unsigned int precision,signop sgn)315 wi::max_value (unsigned int precision, signop sgn)
316 {
317   gcc_checking_assert (precision != 0);
318   if (sgn == UNSIGNED)
319     /* The unsigned max is just all ones.  */
320     return shwi (-1, precision);
321   else
322     /* The signed max is all ones except the top bit.  This must be
323        explicitly represented.  */
324     return mask (precision - 1, false, precision);
325 }
326 
327 /* Return the largest SGNed number that is representable in PRECISION bits.  */
328 wide_int
min_value(unsigned int precision,signop sgn)329 wi::min_value (unsigned int precision, signop sgn)
330 {
331   gcc_checking_assert (precision != 0);
332   if (sgn == UNSIGNED)
333     return uhwi (0, precision);
334   else
335     /* The signed min is all zeros except the top bit.  This must be
336        explicitly represented.  */
337     return wi::set_bit_in_zero (precision - 1, precision);
338 }
339 
340 /*
341  * Public utilities.
342  */
343 
344 /* Convert the number represented by XVAL, XLEN and XPRECISION, which has
345    signedness SGN, to an integer that has PRECISION bits.  Store the blocks
346    in VAL and return the number of blocks used.
347 
348    This function can handle both extension (PRECISION > XPRECISION)
349    and truncation (PRECISION < XPRECISION).  */
350 unsigned int
force_to_size(HOST_WIDE_INT * val,const HOST_WIDE_INT * xval,unsigned int xlen,unsigned int xprecision,unsigned int precision,signop sgn)351 wi::force_to_size (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
352 		   unsigned int xlen, unsigned int xprecision,
353 		   unsigned int precision, signop sgn)
354 {
355   unsigned int blocks_needed = BLOCKS_NEEDED (precision);
356   unsigned int len = blocks_needed < xlen ? blocks_needed : xlen;
357   for (unsigned i = 0; i < len; i++)
358     val[i] = xval[i];
359 
360   if (precision > xprecision)
361     {
362       unsigned int small_xprecision = xprecision % HOST_BITS_PER_WIDE_INT;
363 
364       /* Expanding.  */
365       if (sgn == UNSIGNED)
366 	{
367 	  if (small_xprecision && len == BLOCKS_NEEDED (xprecision))
368 	    val[len - 1] = zext_hwi (val[len - 1], small_xprecision);
369 	  else if (val[len - 1] < 0)
370 	    {
371 	      while (len < BLOCKS_NEEDED (xprecision))
372 		val[len++] = -1;
373 	      if (small_xprecision)
374 		val[len - 1] = zext_hwi (val[len - 1], small_xprecision);
375 	      else
376 		val[len++] = 0;
377 	    }
378 	}
379       else
380 	{
381 	  if (small_xprecision && len == BLOCKS_NEEDED (xprecision))
382 	    val[len - 1] = sext_hwi (val[len - 1], small_xprecision);
383 	}
384     }
385   len = canonize (val, len, precision);
386 
387   return len;
388 }
389 
390 /* This function hides the fact that we cannot rely on the bits beyond
391    the precision.  This issue comes up in the relational comparisions
392    where we do allow comparisons of values of different precisions.  */
393 static inline HOST_WIDE_INT
selt(const HOST_WIDE_INT * a,unsigned int len,unsigned int blocks_needed,unsigned int small_prec,unsigned int index,signop sgn)394 selt (const HOST_WIDE_INT *a, unsigned int len,
395       unsigned int blocks_needed, unsigned int small_prec,
396       unsigned int index, signop sgn)
397 {
398   HOST_WIDE_INT val;
399   if (index < len)
400     val = a[index];
401   else if (index < blocks_needed || sgn == SIGNED)
402     /* Signed or within the precision.  */
403     val = SIGN_MASK (a[len - 1]);
404   else
405     /* Unsigned extension beyond the precision. */
406     val = 0;
407 
408   if (small_prec && index == blocks_needed - 1)
409     return (sgn == SIGNED
410 	    ? sext_hwi (val, small_prec)
411 	    : zext_hwi (val, small_prec));
412   else
413     return val;
414 }
415 
416 /* Find the highest bit represented in a wide int.  This will in
417    general have the same value as the sign bit.  */
418 static inline HOST_WIDE_INT
top_bit_of(const HOST_WIDE_INT * a,unsigned int len,unsigned int prec)419 top_bit_of (const HOST_WIDE_INT *a, unsigned int len, unsigned int prec)
420 {
421   int excess = len * HOST_BITS_PER_WIDE_INT - prec;
422   unsigned HOST_WIDE_INT val = a[len - 1];
423   if (excess > 0)
424     val <<= excess;
425   return val >> (HOST_BITS_PER_WIDE_INT - 1);
426 }
427 
428 /*
429  * Comparisons, note that only equality is an operator.  The other
430  * comparisons cannot be operators since they are inherently signed or
431  * unsigned and C++ has no such operators.
432  */
433 
434 /* Return true if OP0 == OP1.  */
435 bool
eq_p_large(const HOST_WIDE_INT * op0,unsigned int op0len,const HOST_WIDE_INT * op1,unsigned int op1len,unsigned int prec)436 wi::eq_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
437 		const HOST_WIDE_INT *op1, unsigned int op1len,
438 		unsigned int prec)
439 {
440   int l0 = op0len - 1;
441   unsigned int small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1);
442 
443   if (op0len != op1len)
444     return false;
445 
446   if (op0len == BLOCKS_NEEDED (prec) && small_prec)
447     {
448       /* It does not matter if we zext or sext here, we just have to
449 	 do both the same way.  */
450       if (zext_hwi (op0 [l0], small_prec) != zext_hwi (op1 [l0], small_prec))
451 	return false;
452       l0--;
453     }
454 
455   while (l0 >= 0)
456     if (op0[l0] != op1[l0])
457       return false;
458     else
459       l0--;
460 
461   return true;
462 }
463 
464 /* Return true if OP0 < OP1 using signed comparisons.  */
465 bool
lts_p_large(const HOST_WIDE_INT * op0,unsigned int op0len,unsigned int precision,const HOST_WIDE_INT * op1,unsigned int op1len)466 wi::lts_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
467 		 unsigned int precision,
468 		 const HOST_WIDE_INT *op1, unsigned int op1len)
469 {
470   HOST_WIDE_INT s0, s1;
471   unsigned HOST_WIDE_INT u0, u1;
472   unsigned int blocks_needed = BLOCKS_NEEDED (precision);
473   unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
474   int l = MAX (op0len - 1, op1len - 1);
475 
476   /* Only the top block is compared as signed.  The rest are unsigned
477      comparisons.  */
478   s0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
479   s1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
480   if (s0 < s1)
481     return true;
482   if (s0 > s1)
483     return false;
484 
485   l--;
486   while (l >= 0)
487     {
488       u0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
489       u1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
490 
491       if (u0 < u1)
492 	return true;
493       if (u0 > u1)
494 	return false;
495       l--;
496     }
497 
498   return false;
499 }
500 
501 /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
502    signed compares.  */
503 int
cmps_large(const HOST_WIDE_INT * op0,unsigned int op0len,unsigned int precision,const HOST_WIDE_INT * op1,unsigned int op1len)504 wi::cmps_large (const HOST_WIDE_INT *op0, unsigned int op0len,
505 		unsigned int precision,
506 		const HOST_WIDE_INT *op1, unsigned int op1len)
507 {
508   HOST_WIDE_INT s0, s1;
509   unsigned HOST_WIDE_INT u0, u1;
510   unsigned int blocks_needed = BLOCKS_NEEDED (precision);
511   unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
512   int l = MAX (op0len - 1, op1len - 1);
513 
514   /* Only the top block is compared as signed.  The rest are unsigned
515      comparisons.  */
516   s0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
517   s1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
518   if (s0 < s1)
519     return -1;
520   if (s0 > s1)
521     return 1;
522 
523   l--;
524   while (l >= 0)
525     {
526       u0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
527       u1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
528 
529       if (u0 < u1)
530 	return -1;
531       if (u0 > u1)
532 	return 1;
533       l--;
534     }
535 
536   return 0;
537 }
538 
539 /* Return true if OP0 < OP1 using unsigned comparisons.  */
540 bool
ltu_p_large(const HOST_WIDE_INT * op0,unsigned int op0len,unsigned int precision,const HOST_WIDE_INT * op1,unsigned int op1len)541 wi::ltu_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
542 		 unsigned int precision,
543 		 const HOST_WIDE_INT *op1, unsigned int op1len)
544 {
545   unsigned HOST_WIDE_INT x0;
546   unsigned HOST_WIDE_INT x1;
547   unsigned int blocks_needed = BLOCKS_NEEDED (precision);
548   unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
549   int l = MAX (op0len - 1, op1len - 1);
550 
551   while (l >= 0)
552     {
553       x0 = selt (op0, op0len, blocks_needed, small_prec, l, UNSIGNED);
554       x1 = selt (op1, op1len, blocks_needed, small_prec, l, UNSIGNED);
555       if (x0 < x1)
556 	return true;
557       if (x0 > x1)
558 	return false;
559       l--;
560     }
561 
562   return false;
563 }
564 
565 /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
566    unsigned compares.  */
567 int
cmpu_large(const HOST_WIDE_INT * op0,unsigned int op0len,unsigned int precision,const HOST_WIDE_INT * op1,unsigned int op1len)568 wi::cmpu_large (const HOST_WIDE_INT *op0, unsigned int op0len,
569 		unsigned int precision,
570 		const HOST_WIDE_INT *op1, unsigned int op1len)
571 {
572   unsigned HOST_WIDE_INT x0;
573   unsigned HOST_WIDE_INT x1;
574   unsigned int blocks_needed = BLOCKS_NEEDED (precision);
575   unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
576   int l = MAX (op0len - 1, op1len - 1);
577 
578   while (l >= 0)
579     {
580       x0 = selt (op0, op0len, blocks_needed, small_prec, l, UNSIGNED);
581       x1 = selt (op1, op1len, blocks_needed, small_prec, l, UNSIGNED);
582       if (x0 < x1)
583 	return -1;
584       if (x0 > x1)
585 	return 1;
586       l--;
587     }
588 
589   return 0;
590 }
591 
592 /*
593  * Extension.
594  */
595 
596 /* Sign-extend the number represented by XVAL and XLEN into VAL,
597    starting at OFFSET.  Return the number of blocks in VAL.  Both XVAL
598    and VAL have PRECISION bits.  */
599 unsigned int
sext_large(HOST_WIDE_INT * val,const HOST_WIDE_INT * xval,unsigned int xlen,unsigned int precision,unsigned int offset)600 wi::sext_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
601 		unsigned int xlen, unsigned int precision, unsigned int offset)
602 {
603   unsigned int len = offset / HOST_BITS_PER_WIDE_INT;
604   /* Extending beyond the precision is a no-op.  If we have only stored
605      OFFSET bits or fewer, the rest are already signs.  */
606   if (offset >= precision || len >= xlen)
607     {
608       for (unsigned i = 0; i < xlen; ++i)
609 	val[i] = xval[i];
610       return xlen;
611     }
612   unsigned int suboffset = offset % HOST_BITS_PER_WIDE_INT;
613   for (unsigned int i = 0; i < len; i++)
614     val[i] = xval[i];
615   if (suboffset > 0)
616     {
617       val[len] = sext_hwi (xval[len], suboffset);
618       len += 1;
619     }
620   return canonize (val, len, precision);
621 }
622 
623 /* Zero-extend the number represented by XVAL and XLEN into VAL,
624    starting at OFFSET.  Return the number of blocks in VAL.  Both XVAL
625    and VAL have PRECISION bits.  */
626 unsigned int
zext_large(HOST_WIDE_INT * val,const HOST_WIDE_INT * xval,unsigned int xlen,unsigned int precision,unsigned int offset)627 wi::zext_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
628 		unsigned int xlen, unsigned int precision, unsigned int offset)
629 {
630   unsigned int len = offset / HOST_BITS_PER_WIDE_INT;
631   /* Extending beyond the precision is a no-op.  If we have only stored
632      OFFSET bits or fewer, and the upper stored bit is zero, then there
633      is nothing to do.  */
634   if (offset >= precision || (len >= xlen && xval[xlen - 1] >= 0))
635     {
636       for (unsigned i = 0; i < xlen; ++i)
637 	val[i] = xval[i];
638       return xlen;
639     }
640   unsigned int suboffset = offset % HOST_BITS_PER_WIDE_INT;
641   for (unsigned int i = 0; i < len; i++)
642     val[i] = i < xlen ? xval[i] : -1;
643   if (suboffset > 0)
644     val[len] = zext_hwi (len < xlen ? xval[len] : -1, suboffset);
645   else
646     val[len] = 0;
647   return canonize (val, len + 1, precision);
648 }
649 
650 /*
651  * Masking, inserting, shifting, rotating.
652  */
653 
654 /* Insert WIDTH bits from Y into X starting at START.  */
655 wide_int
insert(const wide_int & x,const wide_int & y,unsigned int start,unsigned int width)656 wi::insert (const wide_int &x, const wide_int &y, unsigned int start,
657 	    unsigned int width)
658 {
659   wide_int result;
660   wide_int mask;
661   wide_int tmp;
662 
663   unsigned int precision = x.get_precision ();
664   if (start >= precision)
665     return x;
666 
667   gcc_checking_assert (precision >= width);
668 
669   if (start + width >= precision)
670     width = precision - start;
671 
672   mask = wi::shifted_mask (start, width, false, precision);
673   tmp = wi::lshift (wide_int::from (y, precision, UNSIGNED), start);
674   result = tmp & mask;
675 
676   tmp = wi::bit_and_not (x, mask);
677   result = result | tmp;
678 
679   return result;
680 }
681 
682 /* Copy the number represented by XVAL and XLEN into VAL, setting bit BIT.
683    Return the number of blocks in VAL.  Both XVAL and VAL have PRECISION
684    bits.  */
685 unsigned int
set_bit_large(HOST_WIDE_INT * val,const HOST_WIDE_INT * xval,unsigned int xlen,unsigned int precision,unsigned int bit)686 wi::set_bit_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
687 		   unsigned int xlen, unsigned int precision, unsigned int bit)
688 {
689   unsigned int block = bit / HOST_BITS_PER_WIDE_INT;
690   unsigned int subbit = bit % HOST_BITS_PER_WIDE_INT;
691 
692   if (block + 1 >= xlen)
693     {
694       /* The operation either affects the last current block or needs
695 	 a new block.  */
696       unsigned int len = block + 1;
697       for (unsigned int i = 0; i < len; i++)
698 	val[i] = safe_uhwi (xval, xlen, i);
699       val[block] |= (unsigned HOST_WIDE_INT) 1 << subbit;
700 
701       /* If the bit we just set is at the msb of the block, make sure
702 	 that any higher bits are zeros.  */
703       if (bit + 1 < precision && subbit == HOST_BITS_PER_WIDE_INT - 1)
704 	val[len++] = 0;
705       return len;
706     }
707   else
708     {
709       for (unsigned int i = 0; i < xlen; i++)
710 	val[i] = xval[i];
711       val[block] |= (unsigned HOST_WIDE_INT) 1 << subbit;
712       return canonize (val, xlen, precision);
713     }
714 }
715 
716 /* bswap THIS.  */
717 wide_int
bswap() const718 wide_int_storage::bswap () const
719 {
720   wide_int result = wide_int::create (precision);
721   unsigned int i, s;
722   unsigned int len = BLOCKS_NEEDED (precision);
723   unsigned int xlen = get_len ();
724   const HOST_WIDE_INT *xval = get_val ();
725   HOST_WIDE_INT *val = result.write_val ();
726 
727   /* This is not a well defined operation if the precision is not a
728      multiple of 8.  */
729   gcc_assert ((precision & 0x7) == 0);
730 
731   for (i = 0; i < len; i++)
732     val[i] = 0;
733 
734   /* Only swap the bytes that are not the padding.  */
735   for (s = 0; s < precision; s += 8)
736     {
737       unsigned int d = precision - s - 8;
738       unsigned HOST_WIDE_INT byte;
739 
740       unsigned int block = s / HOST_BITS_PER_WIDE_INT;
741       unsigned int offset = s & (HOST_BITS_PER_WIDE_INT - 1);
742 
743       byte = (safe_uhwi (xval, xlen, block) >> offset) & 0xff;
744 
745       block = d / HOST_BITS_PER_WIDE_INT;
746       offset = d & (HOST_BITS_PER_WIDE_INT - 1);
747 
748       val[block] |= byte << offset;
749     }
750 
751   result.set_len (canonize (val, len, precision));
752   return result;
753 }
754 
755 /* Fill VAL with a mask where the lower WIDTH bits are ones and the bits
756    above that up to PREC are zeros.  The result is inverted if NEGATE
757    is true.  Return the number of blocks in VAL.  */
758 unsigned int
mask(HOST_WIDE_INT * val,unsigned int width,bool negate,unsigned int prec)759 wi::mask (HOST_WIDE_INT *val, unsigned int width, bool negate,
760 	  unsigned int prec)
761 {
762   if (width >= prec)
763     {
764       val[0] = negate ? 0 : -1;
765       return 1;
766     }
767   else if (width == 0)
768     {
769       val[0] = negate ? -1 : 0;
770       return 1;
771     }
772 
773   unsigned int i = 0;
774   while (i < width / HOST_BITS_PER_WIDE_INT)
775     val[i++] = negate ? 0 : -1;
776 
777   unsigned int shift = width & (HOST_BITS_PER_WIDE_INT - 1);
778   if (shift != 0)
779     {
780       HOST_WIDE_INT last = ((unsigned HOST_WIDE_INT) 1 << shift) - 1;
781       val[i++] = negate ? ~last : last;
782     }
783   else
784     val[i++] = negate ? -1 : 0;
785 
786   return i;
787 }
788 
789 /* Fill VAL with a mask where the lower START bits are zeros, the next WIDTH
790    bits are ones, and the bits above that up to PREC are zeros.  The result
791    is inverted if NEGATE is true.  Return the number of blocks in VAL.  */
792 unsigned int
shifted_mask(HOST_WIDE_INT * val,unsigned int start,unsigned int width,bool negate,unsigned int prec)793 wi::shifted_mask (HOST_WIDE_INT *val, unsigned int start, unsigned int width,
794 		  bool negate, unsigned int prec)
795 {
796   if (start >= prec || width == 0)
797     {
798       val[0] = negate ? -1 : 0;
799       return 1;
800     }
801 
802   if (width > prec - start)
803     width = prec - start;
804   unsigned int end = start + width;
805 
806   unsigned int i = 0;
807   while (i < start / HOST_BITS_PER_WIDE_INT)
808     val[i++] = negate ? -1 : 0;
809 
810   unsigned int shift = start & (HOST_BITS_PER_WIDE_INT - 1);
811   if (shift)
812     {
813       HOST_WIDE_INT block = ((unsigned HOST_WIDE_INT) 1 << shift) - 1;
814       shift += width;
815       if (shift < HOST_BITS_PER_WIDE_INT)
816 	{
817 	  /* case 000111000 */
818 	  block = ((unsigned HOST_WIDE_INT) 1 << shift) - block - 1;
819 	  val[i++] = negate ? ~block : block;
820 	  return i;
821 	}
822       else
823 	/* ...111000 */
824 	val[i++] = negate ? block : ~block;
825     }
826 
827   while (i < end / HOST_BITS_PER_WIDE_INT)
828     /* 1111111 */
829     val[i++] = negate ? 0 : -1;
830 
831   shift = end & (HOST_BITS_PER_WIDE_INT - 1);
832   if (shift != 0)
833     {
834       /* 000011111 */
835       HOST_WIDE_INT block = ((unsigned HOST_WIDE_INT) 1 << shift) - 1;
836       val[i++] = negate ? ~block : block;
837     }
838   else if (end < prec)
839     val[i++] = negate ? -1 : 0;
840 
841   return i;
842 }
843 
844 /*
845  * logical operations.
846  */
847 
848 /* Set VAL to OP0 & OP1.  Return the number of blocks used.  */
849 unsigned int
and_large(HOST_WIDE_INT * val,const HOST_WIDE_INT * op0,unsigned int op0len,const HOST_WIDE_INT * op1,unsigned int op1len,unsigned int prec)850 wi::and_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
851 	       unsigned int op0len, const HOST_WIDE_INT *op1,
852 	       unsigned int op1len, unsigned int prec)
853 {
854   int l0 = op0len - 1;
855   int l1 = op1len - 1;
856   bool need_canon = true;
857 
858   unsigned int len = MAX (op0len, op1len);
859   if (l0 > l1)
860     {
861       HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
862       if (op1mask == 0)
863 	{
864 	  l0 = l1;
865 	  len = l1 + 1;
866 	}
867       else
868 	{
869 	  need_canon = false;
870 	  while (l0 > l1)
871 	    {
872 	      val[l0] = op0[l0];
873 	      l0--;
874 	    }
875 	}
876     }
877   else if (l1 > l0)
878     {
879       HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
880       if (op0mask == 0)
881 	len = l0 + 1;
882       else
883 	{
884 	  need_canon = false;
885 	  while (l1 > l0)
886 	    {
887 	      val[l1] = op1[l1];
888 	      l1--;
889 	    }
890 	}
891     }
892 
893   while (l0 >= 0)
894     {
895       val[l0] = op0[l0] & op1[l0];
896       l0--;
897     }
898 
899   if (need_canon)
900     len = canonize (val, len, prec);
901 
902   return len;
903 }
904 
905 /* Set VAL to OP0 & ~OP1.  Return the number of blocks used.  */
906 unsigned int
and_not_large(HOST_WIDE_INT * val,const HOST_WIDE_INT * op0,unsigned int op0len,const HOST_WIDE_INT * op1,unsigned int op1len,unsigned int prec)907 wi::and_not_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
908 		   unsigned int op0len, const HOST_WIDE_INT *op1,
909 		   unsigned int op1len, unsigned int prec)
910 {
911   wide_int result;
912   int l0 = op0len - 1;
913   int l1 = op1len - 1;
914   bool need_canon = true;
915 
916   unsigned int len = MAX (op0len, op1len);
917   if (l0 > l1)
918     {
919       HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
920       if (op1mask != 0)
921 	{
922 	  l0 = l1;
923 	  len = l1 + 1;
924 	}
925       else
926 	{
927 	  need_canon = false;
928 	  while (l0 > l1)
929 	    {
930 	      val[l0] = op0[l0];
931 	      l0--;
932 	    }
933 	}
934     }
935   else if (l1 > l0)
936     {
937       HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
938       if (op0mask == 0)
939 	len = l0 + 1;
940       else
941 	{
942 	  need_canon = false;
943 	  while (l1 > l0)
944 	    {
945 	      val[l1] = ~op1[l1];
946 	      l1--;
947 	    }
948 	}
949     }
950 
951   while (l0 >= 0)
952     {
953       val[l0] = op0[l0] & ~op1[l0];
954       l0--;
955     }
956 
957   if (need_canon)
958     len = canonize (val, len, prec);
959 
960   return len;
961 }
962 
963 /* Set VAL to OP0 | OP1.  Return the number of blocks used.  */
964 unsigned int
or_large(HOST_WIDE_INT * val,const HOST_WIDE_INT * op0,unsigned int op0len,const HOST_WIDE_INT * op1,unsigned int op1len,unsigned int prec)965 wi::or_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
966 	      unsigned int op0len, const HOST_WIDE_INT *op1,
967 	      unsigned int op1len, unsigned int prec)
968 {
969   wide_int result;
970   int l0 = op0len - 1;
971   int l1 = op1len - 1;
972   bool need_canon = true;
973 
974   unsigned int len = MAX (op0len, op1len);
975   if (l0 > l1)
976     {
977       HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
978       if (op1mask != 0)
979 	{
980 	  l0 = l1;
981 	  len = l1 + 1;
982 	}
983       else
984 	{
985 	  need_canon = false;
986 	  while (l0 > l1)
987 	    {
988 	      val[l0] = op0[l0];
989 	      l0--;
990 	    }
991 	}
992     }
993   else if (l1 > l0)
994     {
995       HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
996       if (op0mask != 0)
997 	len = l0 + 1;
998       else
999 	{
1000 	  need_canon = false;
1001 	  while (l1 > l0)
1002 	    {
1003 	      val[l1] = op1[l1];
1004 	      l1--;
1005 	    }
1006 	}
1007     }
1008 
1009   while (l0 >= 0)
1010     {
1011       val[l0] = op0[l0] | op1[l0];
1012       l0--;
1013     }
1014 
1015   if (need_canon)
1016     len = canonize (val, len, prec);
1017 
1018   return len;
1019 }
1020 
1021 /* Set VAL to OP0 | ~OP1.  Return the number of blocks used.  */
1022 unsigned int
or_not_large(HOST_WIDE_INT * val,const HOST_WIDE_INT * op0,unsigned int op0len,const HOST_WIDE_INT * op1,unsigned int op1len,unsigned int prec)1023 wi::or_not_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1024 		  unsigned int op0len, const HOST_WIDE_INT *op1,
1025 		  unsigned int op1len, unsigned int prec)
1026 {
1027   wide_int result;
1028   int l0 = op0len - 1;
1029   int l1 = op1len - 1;
1030   bool need_canon = true;
1031 
1032   unsigned int len = MAX (op0len, op1len);
1033   if (l0 > l1)
1034     {
1035       HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
1036       if (op1mask == 0)
1037 	{
1038 	  l0 = l1;
1039 	  len = l1 + 1;
1040 	}
1041       else
1042 	{
1043 	  need_canon = false;
1044 	  while (l0 > l1)
1045 	    {
1046 	      val[l0] = op0[l0];
1047 	      l0--;
1048 	    }
1049 	}
1050     }
1051   else if (l1 > l0)
1052     {
1053       HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
1054       if (op0mask != 0)
1055 	len = l0 + 1;
1056       else
1057 	{
1058 	  need_canon = false;
1059 	  while (l1 > l0)
1060 	    {
1061 	      val[l1] = ~op1[l1];
1062 	      l1--;
1063 	    }
1064 	}
1065     }
1066 
1067   while (l0 >= 0)
1068     {
1069       val[l0] = op0[l0] | ~op1[l0];
1070       l0--;
1071     }
1072 
1073   if (need_canon)
1074     len = canonize (val, len, prec);
1075 
1076   return len;
1077 }
1078 
1079 /* Set VAL to OP0 ^ OP1.  Return the number of blocks used.  */
1080 unsigned int
xor_large(HOST_WIDE_INT * val,const HOST_WIDE_INT * op0,unsigned int op0len,const HOST_WIDE_INT * op1,unsigned int op1len,unsigned int prec)1081 wi::xor_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1082 	       unsigned int op0len, const HOST_WIDE_INT *op1,
1083 	       unsigned int op1len, unsigned int prec)
1084 {
1085   wide_int result;
1086   int l0 = op0len - 1;
1087   int l1 = op1len - 1;
1088 
1089   unsigned int len = MAX (op0len, op1len);
1090   if (l0 > l1)
1091     {
1092       HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
1093       while (l0 > l1)
1094 	{
1095 	  val[l0] = op0[l0] ^ op1mask;
1096 	  l0--;
1097 	}
1098     }
1099 
1100   if (l1 > l0)
1101     {
1102       HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
1103       while (l1 > l0)
1104 	{
1105 	  val[l1] = op0mask ^ op1[l1];
1106 	  l1--;
1107 	}
1108     }
1109 
1110   while (l0 >= 0)
1111     {
1112       val[l0] = op0[l0] ^ op1[l0];
1113       l0--;
1114     }
1115 
1116   return canonize (val, len, prec);
1117 }
1118 
1119 /*
1120  * math
1121  */
1122 
1123 /* Set VAL to OP0 + OP1.  If OVERFLOW is nonnull, record in *OVERFLOW
1124    whether the result overflows when OP0 and OP1 are treated as having
1125    signedness SGN.  Return the number of blocks in VAL.  */
1126 unsigned int
add_large(HOST_WIDE_INT * val,const HOST_WIDE_INT * op0,unsigned int op0len,const HOST_WIDE_INT * op1,unsigned int op1len,unsigned int prec,signop sgn,bool * overflow)1127 wi::add_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1128 	       unsigned int op0len, const HOST_WIDE_INT *op1,
1129 	       unsigned int op1len, unsigned int prec,
1130 	       signop sgn, bool *overflow)
1131 {
1132   unsigned HOST_WIDE_INT o0 = 0;
1133   unsigned HOST_WIDE_INT o1 = 0;
1134   unsigned HOST_WIDE_INT x = 0;
1135   unsigned HOST_WIDE_INT carry = 0;
1136   unsigned HOST_WIDE_INT old_carry = 0;
1137   unsigned HOST_WIDE_INT mask0, mask1;
1138   unsigned int i;
1139 
1140   unsigned int len = MAX (op0len, op1len);
1141   mask0 = -top_bit_of (op0, op0len, prec);
1142   mask1 = -top_bit_of (op1, op1len, prec);
1143   /* Add all of the explicitly defined elements.  */
1144 
1145   for (i = 0; i < len; i++)
1146     {
1147       o0 = i < op0len ? (unsigned HOST_WIDE_INT) op0[i] : mask0;
1148       o1 = i < op1len ? (unsigned HOST_WIDE_INT) op1[i] : mask1;
1149       x = o0 + o1 + carry;
1150       val[i] = x;
1151       old_carry = carry;
1152       carry = carry == 0 ? x < o0 : x <= o0;
1153     }
1154 
1155   if (len * HOST_BITS_PER_WIDE_INT < prec)
1156     {
1157       val[len] = mask0 + mask1 + carry;
1158       len++;
1159       if (overflow)
1160 	*overflow = false;
1161     }
1162   else if (overflow)
1163     {
1164       unsigned int shift = -prec % HOST_BITS_PER_WIDE_INT;
1165       if (sgn == SIGNED)
1166 	{
1167 	  unsigned HOST_WIDE_INT x = (val[len - 1] ^ o0) & (val[len - 1] ^ o1);
1168 	  *overflow = (HOST_WIDE_INT) (x << shift) < 0;
1169 	}
1170       else
1171 	{
1172 	  /* Put the MSB of X and O0 and in the top of the HWI.  */
1173 	  x <<= shift;
1174 	  o0 <<= shift;
1175 	  if (old_carry)
1176 	    *overflow = (x <= o0);
1177 	  else
1178 	    *overflow = (x < o0);
1179 	}
1180     }
1181 
1182   return canonize (val, len, prec);
1183 }
1184 
1185 /* Subroutines of the multiplication and division operations.  Unpack
1186    the first IN_LEN HOST_WIDE_INTs in INPUT into 2 * IN_LEN
1187    HOST_HALF_WIDE_INTs of RESULT.  The rest of RESULT is filled by
1188    uncompressing the top bit of INPUT[IN_LEN - 1].  */
1189 static void
wi_unpack(unsigned HOST_HALF_WIDE_INT * result,const HOST_WIDE_INT * input,unsigned int in_len,unsigned int out_len,unsigned int prec,signop sgn)1190 wi_unpack (unsigned HOST_HALF_WIDE_INT *result, const HOST_WIDE_INT *input,
1191 	   unsigned int in_len, unsigned int out_len,
1192 	   unsigned int prec, signop sgn)
1193 {
1194   unsigned int i;
1195   unsigned int j = 0;
1196   unsigned int small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1);
1197   unsigned int blocks_needed = BLOCKS_NEEDED (prec);
1198   HOST_WIDE_INT mask;
1199 
1200   if (sgn == SIGNED)
1201     {
1202       mask = -top_bit_of ((const HOST_WIDE_INT *) input, in_len, prec);
1203       mask &= HALF_INT_MASK;
1204     }
1205   else
1206     mask = 0;
1207 
1208   for (i = 0; i < blocks_needed - 1; i++)
1209     {
1210       HOST_WIDE_INT x = safe_uhwi (input, in_len, i);
1211       result[j++] = x;
1212       result[j++] = x >> HOST_BITS_PER_HALF_WIDE_INT;
1213     }
1214 
1215   HOST_WIDE_INT x = safe_uhwi (input, in_len, i);
1216   if (small_prec)
1217     {
1218       if (sgn == SIGNED)
1219 	x = sext_hwi (x, small_prec);
1220       else
1221 	x = zext_hwi (x, small_prec);
1222     }
1223   result[j++] = x;
1224   result[j++] = x >> HOST_BITS_PER_HALF_WIDE_INT;
1225 
1226   /* Smear the sign bit.  */
1227   while (j < out_len)
1228     result[j++] = mask;
1229 }
1230 
1231 /* The inverse of wi_unpack.  IN_LEN is the number of input
1232    blocks and PRECISION is the precision of the result.  Return the
1233    number of blocks in the canonicalized result.  */
1234 static unsigned int
wi_pack(HOST_WIDE_INT * result,const unsigned HOST_HALF_WIDE_INT * input,unsigned int in_len,unsigned int precision)1235 wi_pack (HOST_WIDE_INT *result,
1236 	 const unsigned HOST_HALF_WIDE_INT *input,
1237 	 unsigned int in_len, unsigned int precision)
1238 {
1239   unsigned int i = 0;
1240   unsigned int j = 0;
1241   unsigned int blocks_needed = BLOCKS_NEEDED (precision);
1242 
1243   while (i + 1 < in_len)
1244     {
1245       result[j++] = ((unsigned HOST_WIDE_INT) input[i]
1246 		     | ((unsigned HOST_WIDE_INT) input[i + 1]
1247 			<< HOST_BITS_PER_HALF_WIDE_INT));
1248       i += 2;
1249     }
1250 
1251   /* Handle the case where in_len is odd.   For this we zero extend.  */
1252   if (in_len & 1)
1253     result[j++] = (unsigned HOST_WIDE_INT) input[i];
1254   else if (j < blocks_needed)
1255     result[j++] = 0;
1256   return canonize (result, j, precision);
1257 }
1258 
1259 /* Multiply Op1 by Op2.  If HIGH is set, only the upper half of the
1260    result is returned.
1261 
1262    If HIGH is not set, throw away the upper half after the check is
1263    made to see if it overflows.  Unfortunately there is no better way
1264    to check for overflow than to do this.  If OVERFLOW is nonnull,
1265    record in *OVERFLOW whether the result overflowed.  SGN controls
1266    the signedness and is used to check overflow or if HIGH is set.  */
1267 unsigned int
mul_internal(HOST_WIDE_INT * val,const HOST_WIDE_INT * op1val,unsigned int op1len,const HOST_WIDE_INT * op2val,unsigned int op2len,unsigned int prec,signop sgn,bool * overflow,bool high)1268 wi::mul_internal (HOST_WIDE_INT *val, const HOST_WIDE_INT *op1val,
1269 		  unsigned int op1len, const HOST_WIDE_INT *op2val,
1270 		  unsigned int op2len, unsigned int prec, signop sgn,
1271 		  bool *overflow, bool high)
1272 {
1273   unsigned HOST_WIDE_INT o0, o1, k, t;
1274   unsigned int i;
1275   unsigned int j;
1276   unsigned int blocks_needed = BLOCKS_NEEDED (prec);
1277   unsigned int half_blocks_needed = blocks_needed * 2;
1278   /* The sizes here are scaled to support a 2x largest mode by 2x
1279      largest mode yielding a 4x largest mode result.  This is what is
1280      needed by vpn.  */
1281 
1282   unsigned HOST_HALF_WIDE_INT
1283     u[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1284   unsigned HOST_HALF_WIDE_INT
1285     v[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1286   /* The '2' in 'R' is because we are internally doing a full
1287      multiply.  */
1288   unsigned HOST_HALF_WIDE_INT
1289     r[2 * 4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1290   HOST_WIDE_INT mask = ((HOST_WIDE_INT)1 << HOST_BITS_PER_HALF_WIDE_INT) - 1;
1291 
1292   /* If the top level routine did not really pass in an overflow, then
1293      just make sure that we never attempt to set it.  */
1294   bool needs_overflow = (overflow != 0);
1295   if (needs_overflow)
1296     *overflow = false;
1297 
1298   wide_int_ref op1 = wi::storage_ref (op1val, op1len, prec);
1299   wide_int_ref op2 = wi::storage_ref (op2val, op2len, prec);
1300 
1301   /* This is a surprisingly common case, so do it first.  */
1302   if (op1 == 0 || op2 == 0)
1303     {
1304       val[0] = 0;
1305       return 1;
1306     }
1307 
1308 #ifdef umul_ppmm
1309   if (sgn == UNSIGNED)
1310     {
1311       /* If the inputs are single HWIs and the output has room for at
1312 	 least two HWIs, we can use umul_ppmm directly.  */
1313       if (prec >= HOST_BITS_PER_WIDE_INT * 2
1314 	  && wi::fits_uhwi_p (op1)
1315 	  && wi::fits_uhwi_p (op2))
1316 	{
1317 	  /* This case never overflows.  */
1318 	  if (high)
1319 	    {
1320 	      val[0] = 0;
1321 	      return 1;
1322 	    }
1323 	  umul_ppmm (val[1], val[0], op1.ulow (), op2.ulow ());
1324 	  if (val[1] < 0 && prec > HOST_BITS_PER_WIDE_INT * 2)
1325 	    {
1326 	      val[2] = 0;
1327 	      return 3;
1328 	    }
1329 	  return 1 + (val[1] != 0 || val[0] < 0);
1330 	}
1331       /* Likewise if the output is a full single HWI, except that the
1332 	 upper HWI of the result is only used for determining overflow.
1333 	 (We handle this case inline when overflow isn't needed.)  */
1334       else if (prec == HOST_BITS_PER_WIDE_INT)
1335 	{
1336 	  unsigned HOST_WIDE_INT upper;
1337 	  umul_ppmm (upper, val[0], op1.ulow (), op2.ulow ());
1338 	  if (needs_overflow)
1339 	    *overflow = (upper != 0);
1340 	  if (high)
1341 	    val[0] = upper;
1342 	  return 1;
1343 	}
1344     }
1345 #endif
1346 
1347   /* Handle multiplications by 1.  */
1348   if (op1 == 1)
1349     {
1350       if (high)
1351 	{
1352 	  val[0] = wi::neg_p (op2, sgn) ? -1 : 0;
1353 	  return 1;
1354 	}
1355       for (i = 0; i < op2len; i++)
1356 	val[i] = op2val[i];
1357       return op2len;
1358     }
1359   if (op2 == 1)
1360     {
1361       if (high)
1362 	{
1363 	  val[0] = wi::neg_p (op1, sgn) ? -1 : 0;
1364 	  return 1;
1365 	}
1366       for (i = 0; i < op1len; i++)
1367 	val[i] = op1val[i];
1368       return op1len;
1369     }
1370 
1371   /* If we need to check for overflow, we can only do half wide
1372      multiplies quickly because we need to look at the top bits to
1373      check for the overflow.  */
1374   if ((high || needs_overflow)
1375       && (prec <= HOST_BITS_PER_HALF_WIDE_INT))
1376     {
1377       unsigned HOST_WIDE_INT r;
1378 
1379       if (sgn == SIGNED)
1380 	{
1381 	  o0 = op1.to_shwi ();
1382 	  o1 = op2.to_shwi ();
1383 	}
1384       else
1385 	{
1386 	  o0 = op1.to_uhwi ();
1387 	  o1 = op2.to_uhwi ();
1388 	}
1389 
1390       r = o0 * o1;
1391       if (needs_overflow)
1392 	{
1393 	  if (sgn == SIGNED)
1394 	    {
1395 	      if ((HOST_WIDE_INT) r != sext_hwi (r, prec))
1396 		*overflow = true;
1397 	    }
1398 	  else
1399 	    {
1400 	      if ((r >> prec) != 0)
1401 		*overflow = true;
1402 	    }
1403 	}
1404       val[0] = high ? r >> prec : r;
1405       return 1;
1406     }
1407 
1408   /* We do unsigned mul and then correct it.  */
1409   wi_unpack (u, op1val, op1len, half_blocks_needed, prec, SIGNED);
1410   wi_unpack (v, op2val, op2len, half_blocks_needed, prec, SIGNED);
1411 
1412   /* The 2 is for a full mult.  */
1413   memset (r, 0, half_blocks_needed * 2
1414 	  * HOST_BITS_PER_HALF_WIDE_INT / CHAR_BIT);
1415 
1416   for (j = 0; j < half_blocks_needed; j++)
1417     {
1418       k = 0;
1419       for (i = 0; i < half_blocks_needed; i++)
1420 	{
1421 	  t = ((unsigned HOST_WIDE_INT)u[i] * (unsigned HOST_WIDE_INT)v[j]
1422 	       + r[i + j] + k);
1423 	  r[i + j] = t & HALF_INT_MASK;
1424 	  k = t >> HOST_BITS_PER_HALF_WIDE_INT;
1425 	}
1426       r[j + half_blocks_needed] = k;
1427     }
1428 
1429   /* We did unsigned math above.  For signed we must adjust the
1430      product (assuming we need to see that).  */
1431   if (sgn == SIGNED && (high || needs_overflow))
1432     {
1433       unsigned HOST_WIDE_INT b;
1434       if (wi::neg_p (op1))
1435 	{
1436 	  b = 0;
1437 	  for (i = 0; i < half_blocks_needed; i++)
1438 	    {
1439 	      t = (unsigned HOST_WIDE_INT)r[i + half_blocks_needed]
1440 		- (unsigned HOST_WIDE_INT)v[i] - b;
1441 	      r[i + half_blocks_needed] = t & HALF_INT_MASK;
1442 	      b = t >> (HOST_BITS_PER_WIDE_INT - 1);
1443 	    }
1444 	}
1445       if (wi::neg_p (op2))
1446 	{
1447 	  b = 0;
1448 	  for (i = 0; i < half_blocks_needed; i++)
1449 	    {
1450 	      t = (unsigned HOST_WIDE_INT)r[i + half_blocks_needed]
1451 		- (unsigned HOST_WIDE_INT)u[i] - b;
1452 	      r[i + half_blocks_needed] = t & HALF_INT_MASK;
1453 	      b = t >> (HOST_BITS_PER_WIDE_INT - 1);
1454 	    }
1455 	}
1456     }
1457 
1458   if (needs_overflow)
1459     {
1460       HOST_WIDE_INT top;
1461 
1462       /* For unsigned, overflow is true if any of the top bits are set.
1463 	 For signed, overflow is true if any of the top bits are not equal
1464 	 to the sign bit.  */
1465       if (sgn == UNSIGNED)
1466 	top = 0;
1467       else
1468 	{
1469 	  top = r[(half_blocks_needed) - 1];
1470 	  top = SIGN_MASK (top << (HOST_BITS_PER_WIDE_INT / 2));
1471 	  top &= mask;
1472 	}
1473 
1474       for (i = half_blocks_needed; i < half_blocks_needed * 2; i++)
1475 	if (((HOST_WIDE_INT)(r[i] & mask)) != top)
1476 	  *overflow = true;
1477     }
1478 
1479   int r_offset = high ? half_blocks_needed : 0;
1480   return wi_pack (val, &r[r_offset], half_blocks_needed, prec);
1481 }
1482 
1483 /* Compute the population count of X.  */
1484 int
popcount(const wide_int_ref & x)1485 wi::popcount (const wide_int_ref &x)
1486 {
1487   unsigned int i;
1488   int count;
1489 
1490   /* The high order block is special if it is the last block and the
1491      precision is not an even multiple of HOST_BITS_PER_WIDE_INT.  We
1492      have to clear out any ones above the precision before doing
1493      popcount on this block.  */
1494   count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
1495   unsigned int stop = x.len;
1496   if (count < 0)
1497     {
1498       count = popcount_hwi (x.uhigh () << -count);
1499       stop -= 1;
1500     }
1501   else
1502     {
1503       if (x.sign_mask () >= 0)
1504 	count = 0;
1505     }
1506 
1507   for (i = 0; i < stop; ++i)
1508     count += popcount_hwi (x.val[i]);
1509 
1510   return count;
1511 }
1512 
1513 /* Set VAL to OP0 - OP1.  If OVERFLOW is nonnull, record in *OVERFLOW
1514    whether the result overflows when OP0 and OP1 are treated as having
1515    signedness SGN.  Return the number of blocks in VAL.  */
1516 unsigned int
sub_large(HOST_WIDE_INT * val,const HOST_WIDE_INT * op0,unsigned int op0len,const HOST_WIDE_INT * op1,unsigned int op1len,unsigned int prec,signop sgn,bool * overflow)1517 wi::sub_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1518 	       unsigned int op0len, const HOST_WIDE_INT *op1,
1519 	       unsigned int op1len, unsigned int prec,
1520 	       signop sgn, bool *overflow)
1521 {
1522   unsigned HOST_WIDE_INT o0 = 0;
1523   unsigned HOST_WIDE_INT o1 = 0;
1524   unsigned HOST_WIDE_INT x = 0;
1525   /* We implement subtraction as an in place negate and add.  Negation
1526      is just inversion and add 1, so we can do the add of 1 by just
1527      starting the borrow in of the first element at 1.  */
1528   unsigned HOST_WIDE_INT borrow = 0;
1529   unsigned HOST_WIDE_INT old_borrow = 0;
1530 
1531   unsigned HOST_WIDE_INT mask0, mask1;
1532   unsigned int i;
1533 
1534   unsigned int len = MAX (op0len, op1len);
1535   mask0 = -top_bit_of (op0, op0len, prec);
1536   mask1 = -top_bit_of (op1, op1len, prec);
1537 
1538   /* Subtract all of the explicitly defined elements.  */
1539   for (i = 0; i < len; i++)
1540     {
1541       o0 = i < op0len ? (unsigned HOST_WIDE_INT)op0[i] : mask0;
1542       o1 = i < op1len ? (unsigned HOST_WIDE_INT)op1[i] : mask1;
1543       x = o0 - o1 - borrow;
1544       val[i] = x;
1545       old_borrow = borrow;
1546       borrow = borrow == 0 ? o0 < o1 : o0 <= o1;
1547     }
1548 
1549   if (len * HOST_BITS_PER_WIDE_INT < prec)
1550     {
1551       val[len] = mask0 - mask1 - borrow;
1552       len++;
1553       if (overflow)
1554 	*overflow = false;
1555     }
1556   else if (overflow)
1557     {
1558       unsigned int shift = -prec % HOST_BITS_PER_WIDE_INT;
1559       if (sgn == SIGNED)
1560 	{
1561 	  unsigned HOST_WIDE_INT x = (o0 ^ o1) & (val[len - 1] ^ o0);
1562 	  *overflow = (HOST_WIDE_INT) (x << shift) < 0;
1563 	}
1564       else
1565 	{
1566 	  /* Put the MSB of X and O0 and in the top of the HWI.  */
1567 	  x <<= shift;
1568 	  o0 <<= shift;
1569 	  if (old_borrow)
1570 	    *overflow = (x >= o0);
1571 	  else
1572 	    *overflow = (x > o0);
1573 	}
1574     }
1575 
1576   return canonize (val, len, prec);
1577 }
1578 
1579 
1580 /*
1581  * Division and Mod
1582  */
1583 
1584 /* Compute B_QUOTIENT and B_REMAINDER from B_DIVIDEND/B_DIVISOR.  The
1585    algorithm is a small modification of the algorithm in Hacker's
1586    Delight by Warren, which itself is a small modification of Knuth's
1587    algorithm.  M is the number of significant elements of U however
1588    there needs to be at least one extra element of B_DIVIDEND
1589    allocated, N is the number of elements of B_DIVISOR.  */
1590 static void
divmod_internal_2(unsigned HOST_HALF_WIDE_INT * b_quotient,unsigned HOST_HALF_WIDE_INT * b_remainder,unsigned HOST_HALF_WIDE_INT * b_dividend,unsigned HOST_HALF_WIDE_INT * b_divisor,int m,int n)1591 divmod_internal_2 (unsigned HOST_HALF_WIDE_INT *b_quotient,
1592 		   unsigned HOST_HALF_WIDE_INT *b_remainder,
1593 		   unsigned HOST_HALF_WIDE_INT *b_dividend,
1594 		   unsigned HOST_HALF_WIDE_INT *b_divisor,
1595 		   int m, int n)
1596 {
1597   /* The "digits" are a HOST_HALF_WIDE_INT which the size of half of a
1598      HOST_WIDE_INT and stored in the lower bits of each word.  This
1599      algorithm should work properly on both 32 and 64 bit
1600      machines.  */
1601   unsigned HOST_WIDE_INT b
1602     = (unsigned HOST_WIDE_INT)1 << HOST_BITS_PER_HALF_WIDE_INT;
1603   unsigned HOST_WIDE_INT qhat;   /* Estimate of quotient digit.  */
1604   unsigned HOST_WIDE_INT rhat;   /* A remainder.  */
1605   unsigned HOST_WIDE_INT p;      /* Product of two digits.  */
1606   HOST_WIDE_INT t, k;
1607   int i, j, s;
1608 
1609   /* Single digit divisor.  */
1610   if (n == 1)
1611     {
1612       k = 0;
1613       for (j = m - 1; j >= 0; j--)
1614 	{
1615 	  b_quotient[j] = (k * b + b_dividend[j])/b_divisor[0];
1616 	  k = ((k * b + b_dividend[j])
1617 	       - ((unsigned HOST_WIDE_INT)b_quotient[j]
1618 		  * (unsigned HOST_WIDE_INT)b_divisor[0]));
1619 	}
1620       b_remainder[0] = k;
1621       return;
1622     }
1623 
1624   s = clz_hwi (b_divisor[n-1]) - HOST_BITS_PER_HALF_WIDE_INT; /* CHECK clz */
1625 
1626   if (s)
1627     {
1628       /* Normalize B_DIVIDEND and B_DIVISOR.  Unlike the published
1629 	 algorithm, we can overwrite b_dividend and b_divisor, so we do
1630 	 that.  */
1631       for (i = n - 1; i > 0; i--)
1632 	b_divisor[i] = (b_divisor[i] << s)
1633 	  | (b_divisor[i-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s));
1634       b_divisor[0] = b_divisor[0] << s;
1635 
1636       b_dividend[m] = b_dividend[m-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s);
1637       for (i = m - 1; i > 0; i--)
1638 	b_dividend[i] = (b_dividend[i] << s)
1639 	  | (b_dividend[i-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s));
1640       b_dividend[0] = b_dividend[0] << s;
1641     }
1642 
1643   /* Main loop.  */
1644   for (j = m - n; j >= 0; j--)
1645     {
1646       qhat = (b_dividend[j+n] * b + b_dividend[j+n-1]) / b_divisor[n-1];
1647       rhat = (b_dividend[j+n] * b + b_dividend[j+n-1]) - qhat * b_divisor[n-1];
1648     again:
1649       if (qhat >= b || qhat * b_divisor[n-2] > b * rhat + b_dividend[j+n-2])
1650 	{
1651 	  qhat -= 1;
1652 	  rhat += b_divisor[n-1];
1653 	  if (rhat < b)
1654 	    goto again;
1655 	}
1656 
1657       /* Multiply and subtract.  */
1658       k = 0;
1659       for (i = 0; i < n; i++)
1660 	{
1661 	  p = qhat * b_divisor[i];
1662 	  t = b_dividend[i+j] - k - (p & HALF_INT_MASK);
1663 	  b_dividend[i + j] = t;
1664 	  k = ((p >> HOST_BITS_PER_HALF_WIDE_INT)
1665 	       - (t >> HOST_BITS_PER_HALF_WIDE_INT));
1666 	}
1667       t = b_dividend[j+n] - k;
1668       b_dividend[j+n] = t;
1669 
1670       b_quotient[j] = qhat;
1671       if (t < 0)
1672 	{
1673 	  b_quotient[j] -= 1;
1674 	  k = 0;
1675 	  for (i = 0; i < n; i++)
1676 	    {
1677 	      t = (HOST_WIDE_INT)b_dividend[i+j] + b_divisor[i] + k;
1678 	      b_dividend[i+j] = t;
1679 	      k = t >> HOST_BITS_PER_HALF_WIDE_INT;
1680 	    }
1681 	  b_dividend[j+n] += k;
1682 	}
1683     }
1684   if (s)
1685     for (i = 0; i < n; i++)
1686       b_remainder[i] = (b_dividend[i] >> s)
1687 	| (b_dividend[i+1] << (HOST_BITS_PER_HALF_WIDE_INT - s));
1688   else
1689     for (i = 0; i < n; i++)
1690       b_remainder[i] = b_dividend[i];
1691 }
1692 
1693 
1694 /* Divide DIVIDEND by DIVISOR, which have signedness SGN, and truncate
1695    the result.  If QUOTIENT is nonnull, store the value of the quotient
1696    there and return the number of blocks in it.  The return value is
1697    not defined otherwise.  If REMAINDER is nonnull, store the value
1698    of the remainder there and store the number of blocks in
1699    *REMAINDER_LEN.  If OFLOW is not null, store in *OFLOW whether
1700    the division overflowed.  */
1701 unsigned int
divmod_internal(HOST_WIDE_INT * quotient,unsigned int * remainder_len,HOST_WIDE_INT * remainder,const HOST_WIDE_INT * dividend_val,unsigned int dividend_len,unsigned int dividend_prec,const HOST_WIDE_INT * divisor_val,unsigned int divisor_len,unsigned int divisor_prec,signop sgn,bool * oflow)1702 wi::divmod_internal (HOST_WIDE_INT *quotient, unsigned int *remainder_len,
1703 		     HOST_WIDE_INT *remainder,
1704 		     const HOST_WIDE_INT *dividend_val,
1705 		     unsigned int dividend_len, unsigned int dividend_prec,
1706 		     const HOST_WIDE_INT *divisor_val, unsigned int divisor_len,
1707 		     unsigned int divisor_prec, signop sgn,
1708 		     bool *oflow)
1709 {
1710   unsigned int dividend_blocks_needed = 2 * BLOCKS_NEEDED (dividend_prec);
1711   unsigned int divisor_blocks_needed = 2 * BLOCKS_NEEDED (divisor_prec);
1712   unsigned HOST_HALF_WIDE_INT
1713     b_quotient[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1714   unsigned HOST_HALF_WIDE_INT
1715     b_remainder[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1716   unsigned HOST_HALF_WIDE_INT
1717     b_dividend[(4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT) + 1];
1718   unsigned HOST_HALF_WIDE_INT
1719     b_divisor[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1720   unsigned int m, n;
1721   bool dividend_neg = false;
1722   bool divisor_neg = false;
1723   bool overflow = false;
1724   wide_int neg_dividend, neg_divisor;
1725 
1726   wide_int_ref dividend = wi::storage_ref (dividend_val, dividend_len,
1727 					   dividend_prec);
1728   wide_int_ref divisor = wi::storage_ref (divisor_val, divisor_len,
1729 					  divisor_prec);
1730   if (divisor == 0)
1731     overflow = true;
1732 
1733   /* The smallest signed number / -1 causes overflow.  The dividend_len
1734      check is for speed rather than correctness.  */
1735   if (sgn == SIGNED
1736       && dividend_len == BLOCKS_NEEDED (dividend_prec)
1737       && divisor == -1
1738       && wi::only_sign_bit_p (dividend))
1739     overflow = true;
1740 
1741   /* Handle the overflow cases.  Viewed as unsigned value, the quotient of
1742      (signed min / -1) has the same representation as the orignal dividend.
1743      We have traditionally made division by zero act as division by one,
1744      so there too we use the original dividend.  */
1745   if (overflow)
1746     {
1747       if (remainder)
1748 	{
1749 	  *remainder_len = 1;
1750 	  remainder[0] = 0;
1751 	}
1752       if (oflow != 0)
1753 	*oflow = true;
1754       if (quotient)
1755 	for (unsigned int i = 0; i < dividend_len; ++i)
1756 	  quotient[i] = dividend_val[i];
1757       return dividend_len;
1758     }
1759 
1760   if (oflow)
1761     *oflow = false;
1762 
1763   /* Do it on the host if you can.  */
1764   if (sgn == SIGNED
1765       && wi::fits_shwi_p (dividend)
1766       && wi::fits_shwi_p (divisor))
1767     {
1768       HOST_WIDE_INT o0 = dividend.to_shwi ();
1769       HOST_WIDE_INT o1 = divisor.to_shwi ();
1770 
1771       if (o0 == HOST_WIDE_INT_MIN && o1 == -1)
1772 	{
1773 	  gcc_checking_assert (dividend_prec > HOST_BITS_PER_WIDE_INT);
1774 	  if (quotient)
1775 	    {
1776 	      quotient[0] = HOST_WIDE_INT_MIN;
1777 	      quotient[1] = 0;
1778 	    }
1779 	  if (remainder)
1780 	    {
1781 	      remainder[0] = 0;
1782 	      *remainder_len = 1;
1783 	    }
1784 	  return 2;
1785 	}
1786       else
1787 	{
1788 	  if (quotient)
1789 	    quotient[0] = o0 / o1;
1790 	  if (remainder)
1791 	    {
1792 	      remainder[0] = o0 % o1;
1793 	      *remainder_len = 1;
1794 	    }
1795 	  return 1;
1796 	}
1797     }
1798 
1799   if (sgn == UNSIGNED
1800       && wi::fits_uhwi_p (dividend)
1801       && wi::fits_uhwi_p (divisor))
1802     {
1803       unsigned HOST_WIDE_INT o0 = dividend.to_uhwi ();
1804       unsigned HOST_WIDE_INT o1 = divisor.to_uhwi ();
1805       unsigned int quotient_len = 1;
1806 
1807       if (quotient)
1808 	{
1809 	  quotient[0] = o0 / o1;
1810 	  quotient_len = canonize_uhwi (quotient, dividend_prec);
1811 	}
1812       if (remainder)
1813 	{
1814 	  remainder[0] = o0 % o1;
1815 	  *remainder_len = canonize_uhwi (remainder, dividend_prec);
1816 	}
1817       return quotient_len;
1818     }
1819 
1820   /* Make the divisor and dividend positive and remember what we
1821      did.  */
1822   if (sgn == SIGNED)
1823     {
1824       if (wi::neg_p (dividend))
1825 	{
1826 	  neg_dividend = -dividend;
1827 	  dividend = neg_dividend;
1828 	  dividend_neg = true;
1829 	}
1830       if (wi::neg_p (divisor))
1831 	{
1832 	  neg_divisor = -divisor;
1833 	  divisor = neg_divisor;
1834 	  divisor_neg = true;
1835 	}
1836     }
1837 
1838   wi_unpack (b_dividend, dividend.get_val (), dividend.get_len (),
1839 	     dividend_blocks_needed, dividend_prec, sgn);
1840   wi_unpack (b_divisor, divisor.get_val (), divisor.get_len (),
1841 	     divisor_blocks_needed, divisor_prec, sgn);
1842 
1843   m = dividend_blocks_needed;
1844   b_dividend[m] = 0;
1845   while (m > 1 && b_dividend[m - 1] == 0)
1846     m--;
1847 
1848   n = divisor_blocks_needed;
1849   while (n > 1 && b_divisor[n - 1] == 0)
1850     n--;
1851 
1852   memset (b_quotient, 0, sizeof (b_quotient));
1853 
1854   divmod_internal_2 (b_quotient, b_remainder, b_dividend, b_divisor, m, n);
1855 
1856   unsigned int quotient_len = 0;
1857   if (quotient)
1858     {
1859       quotient_len = wi_pack (quotient, b_quotient, m, dividend_prec);
1860       /* The quotient is neg if exactly one of the divisor or dividend is
1861 	 neg.  */
1862       if (dividend_neg != divisor_neg)
1863 	quotient_len = wi::sub_large (quotient, zeros, 1, quotient,
1864 				      quotient_len, dividend_prec,
1865 				      UNSIGNED, 0);
1866     }
1867 
1868   if (remainder)
1869     {
1870       *remainder_len = wi_pack (remainder, b_remainder, n, dividend_prec);
1871       /* The remainder is always the same sign as the dividend.  */
1872       if (dividend_neg)
1873 	*remainder_len = wi::sub_large (remainder, zeros, 1, remainder,
1874 					*remainder_len, dividend_prec,
1875 					UNSIGNED, 0);
1876     }
1877 
1878   return quotient_len;
1879 }
1880 
1881 /*
1882  * Shifting, rotating and extraction.
1883  */
1884 
1885 /* Left shift XVAL by SHIFT and store the result in VAL.  Return the
1886    number of blocks in VAL.  Both XVAL and VAL have PRECISION bits.  */
1887 unsigned int
lshift_large(HOST_WIDE_INT * val,const HOST_WIDE_INT * xval,unsigned int xlen,unsigned int precision,unsigned int shift)1888 wi::lshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
1889 		  unsigned int xlen, unsigned int precision,
1890 		  unsigned int shift)
1891 {
1892   /* Split the shift into a whole-block shift and a subblock shift.  */
1893   unsigned int skip = shift / HOST_BITS_PER_WIDE_INT;
1894   unsigned int small_shift = shift % HOST_BITS_PER_WIDE_INT;
1895 
1896   /* The whole-block shift fills with zeros.  */
1897   unsigned int len = BLOCKS_NEEDED (precision);
1898   for (unsigned int i = 0; i < skip; ++i)
1899     val[i] = 0;
1900 
1901   /* It's easier to handle the simple block case specially.  */
1902   if (small_shift == 0)
1903     for (unsigned int i = skip; i < len; ++i)
1904       val[i] = safe_uhwi (xval, xlen, i - skip);
1905   else
1906     {
1907       /* The first unfilled output block is a left shift of the first
1908 	 block in XVAL.  The other output blocks contain bits from two
1909 	 consecutive input blocks.  */
1910       unsigned HOST_WIDE_INT carry = 0;
1911       for (unsigned int i = skip; i < len; ++i)
1912 	{
1913 	  unsigned HOST_WIDE_INT x = safe_uhwi (xval, xlen, i - skip);
1914 	  val[i] = (x << small_shift) | carry;
1915 	  carry = x >> (-small_shift % HOST_BITS_PER_WIDE_INT);
1916 	}
1917     }
1918   return canonize (val, len, precision);
1919 }
1920 
1921 /* Right shift XVAL by SHIFT and store the result in VAL.  Return the
1922    number of blocks in VAL.  The input has XPRECISION bits and the
1923    output has XPRECISION - SHIFT bits.  */
1924 static unsigned int
rshift_large_common(HOST_WIDE_INT * val,const HOST_WIDE_INT * xval,unsigned int xlen,unsigned int xprecision,unsigned int shift)1925 rshift_large_common (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
1926 		     unsigned int xlen, unsigned int xprecision,
1927 		     unsigned int shift)
1928 {
1929   /* Split the shift into a whole-block shift and a subblock shift.  */
1930   unsigned int skip = shift / HOST_BITS_PER_WIDE_INT;
1931   unsigned int small_shift = shift % HOST_BITS_PER_WIDE_INT;
1932 
1933   /* Work out how many blocks are needed to store the significant bits
1934      (excluding the upper zeros or signs).  */
1935   unsigned int len = BLOCKS_NEEDED (xprecision - shift);
1936 
1937   /* It's easier to handle the simple block case specially.  */
1938   if (small_shift == 0)
1939     for (unsigned int i = 0; i < len; ++i)
1940       val[i] = safe_uhwi (xval, xlen, i + skip);
1941   else
1942     {
1943       /* Each output block but the last is a combination of two input blocks.
1944 	 The last block is a right shift of the last block in XVAL.  */
1945       unsigned HOST_WIDE_INT curr = safe_uhwi (xval, xlen, skip);
1946       for (unsigned int i = 0; i < len; ++i)
1947 	{
1948 	  val[i] = curr >> small_shift;
1949 	  curr = safe_uhwi (xval, xlen, i + skip + 1);
1950 	  val[i] |= curr << (-small_shift % HOST_BITS_PER_WIDE_INT);
1951 	}
1952     }
1953   return len;
1954 }
1955 
1956 /* Logically right shift XVAL by SHIFT and store the result in VAL.
1957    Return the number of blocks in VAL.  XVAL has XPRECISION bits and
1958    VAL has PRECISION bits.  */
1959 unsigned int
lrshift_large(HOST_WIDE_INT * val,const HOST_WIDE_INT * xval,unsigned int xlen,unsigned int xprecision,unsigned int precision,unsigned int shift)1960 wi::lrshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
1961 		   unsigned int xlen, unsigned int xprecision,
1962 		   unsigned int precision, unsigned int shift)
1963 {
1964   unsigned int len = rshift_large_common (val, xval, xlen, xprecision, shift);
1965 
1966   /* The value we just created has precision XPRECISION - SHIFT.
1967      Zero-extend it to wider precisions.  */
1968   if (precision > xprecision - shift)
1969     {
1970       unsigned int small_prec = (xprecision - shift) % HOST_BITS_PER_WIDE_INT;
1971       if (small_prec)
1972 	val[len - 1] = zext_hwi (val[len - 1], small_prec);
1973       else if (val[len - 1] < 0)
1974 	{
1975 	  /* Add a new block with a zero. */
1976 	  val[len++] = 0;
1977 	  return len;
1978 	}
1979     }
1980   return canonize (val, len, precision);
1981 }
1982 
1983 /* Arithmetically right shift XVAL by SHIFT and store the result in VAL.
1984    Return the number of blocks in VAL.  XVAL has XPRECISION bits and
1985    VAL has PRECISION bits.  */
1986 unsigned int
arshift_large(HOST_WIDE_INT * val,const HOST_WIDE_INT * xval,unsigned int xlen,unsigned int xprecision,unsigned int precision,unsigned int shift)1987 wi::arshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
1988 		   unsigned int xlen, unsigned int xprecision,
1989 		   unsigned int precision, unsigned int shift)
1990 {
1991   unsigned int len = rshift_large_common (val, xval, xlen, xprecision, shift);
1992 
1993   /* The value we just created has precision XPRECISION - SHIFT.
1994      Sign-extend it to wider types.  */
1995   if (precision > xprecision - shift)
1996     {
1997       unsigned int small_prec = (xprecision - shift) % HOST_BITS_PER_WIDE_INT;
1998       if (small_prec)
1999 	val[len - 1] = sext_hwi (val[len - 1], small_prec);
2000     }
2001   return canonize (val, len, precision);
2002 }
2003 
2004 /* Return the number of leading (upper) zeros in X.  */
2005 int
clz(const wide_int_ref & x)2006 wi::clz (const wide_int_ref &x)
2007 {
2008   /* Calculate how many bits there above the highest represented block.  */
2009   int count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
2010 
2011   unsigned HOST_WIDE_INT high = x.uhigh ();
2012   if (count < 0)
2013     /* The upper -COUNT bits of HIGH are not part of the value.
2014        Clear them out.  */
2015     high = (high << -count) >> -count;
2016   else if (x.sign_mask () < 0)
2017     /* The upper bit is set, so there are no leading zeros.  */
2018     return 0;
2019 
2020   /* We don't need to look below HIGH.  Either HIGH is nonzero,
2021      or the top bit of the block below is nonzero; clz_hwi is
2022      HOST_BITS_PER_WIDE_INT in the latter case.  */
2023   return count + clz_hwi (high);
2024 }
2025 
2026 /* Return the number of redundant sign bits in X.  (That is, the number
2027    of bits immediately below the sign bit that have the same value as
2028    the sign bit.)  */
2029 int
clrsb(const wide_int_ref & x)2030 wi::clrsb (const wide_int_ref &x)
2031 {
2032   /* Calculate how many bits there above the highest represented block.  */
2033   int count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
2034 
2035   unsigned HOST_WIDE_INT high = x.uhigh ();
2036   unsigned HOST_WIDE_INT mask = -1;
2037   if (count < 0)
2038     {
2039       /* The upper -COUNT bits of HIGH are not part of the value.
2040 	 Clear them from both MASK and HIGH.  */
2041       mask >>= -count;
2042       high &= mask;
2043     }
2044 
2045   /* If the top bit is 1, count the number of leading 1s.  If the top
2046      bit is zero, count the number of leading zeros.  */
2047   if (high > mask / 2)
2048     high ^= mask;
2049 
2050   /* There are no sign bits below the top block, so we don't need to look
2051      beyond HIGH.  Note that clz_hwi is HOST_BITS_PER_WIDE_INT when
2052      HIGH is 0.  */
2053   return count + clz_hwi (high) - 1;
2054 }
2055 
2056 /* Return the number of trailing (lower) zeros in X.  */
2057 int
ctz(const wide_int_ref & x)2058 wi::ctz (const wide_int_ref &x)
2059 {
2060   if (x.len == 1 && x.ulow () == 0)
2061     return x.precision;
2062 
2063   /* Having dealt with the zero case, there must be a block with a
2064      nonzero bit.  We don't care about the bits above the first 1.  */
2065   unsigned int i = 0;
2066   while (x.val[i] == 0)
2067     ++i;
2068   return i * HOST_BITS_PER_WIDE_INT + ctz_hwi (x.val[i]);
2069 }
2070 
2071 /* If X is an exact power of 2, return the base-2 logarithm, otherwise
2072    return -1.  */
2073 int
exact_log2(const wide_int_ref & x)2074 wi::exact_log2 (const wide_int_ref &x)
2075 {
2076   /* Reject cases where there are implicit -1 blocks above HIGH.  */
2077   if (x.len * HOST_BITS_PER_WIDE_INT < x.precision && x.sign_mask () < 0)
2078     return -1;
2079 
2080   /* Set CRUX to the index of the entry that should be nonzero.
2081      If the top block is zero then the next lowest block (if any)
2082      must have the high bit set.  */
2083   unsigned int crux = x.len - 1;
2084   if (crux > 0 && x.val[crux] == 0)
2085     crux -= 1;
2086 
2087   /* Check that all lower blocks are zero.  */
2088   for (unsigned int i = 0; i < crux; ++i)
2089     if (x.val[i] != 0)
2090       return -1;
2091 
2092   /* Get a zero-extended form of block CRUX.  */
2093   unsigned HOST_WIDE_INT hwi = x.val[crux];
2094   if ((crux + 1) * HOST_BITS_PER_WIDE_INT > x.precision)
2095     hwi = zext_hwi (hwi, x.precision % HOST_BITS_PER_WIDE_INT);
2096 
2097   /* Now it's down to whether HWI is a power of 2.  */
2098   int res = ::exact_log2 (hwi);
2099   if (res >= 0)
2100     res += crux * HOST_BITS_PER_WIDE_INT;
2101   return res;
2102 }
2103 
2104 /* Return the base-2 logarithm of X, rounding down.  Return -1 if X is 0.  */
2105 int
floor_log2(const wide_int_ref & x)2106 wi::floor_log2 (const wide_int_ref &x)
2107 {
2108   return x.precision - 1 - clz (x);
2109 }
2110 
2111 /* Return the index of the first (lowest) set bit in X, counting from 1.
2112    Return 0 if X is 0.  */
2113 int
ffs(const wide_int_ref & x)2114 wi::ffs (const wide_int_ref &x)
2115 {
2116   return eq_p (x, 0) ? 0 : ctz (x) + 1;
2117 }
2118 
2119 /* Return true if sign-extending X to have precision PRECISION would give
2120    the minimum signed value at that precision.  */
2121 bool
only_sign_bit_p(const wide_int_ref & x,unsigned int precision)2122 wi::only_sign_bit_p (const wide_int_ref &x, unsigned int precision)
2123 {
2124   return ctz (x) + 1 == int (precision);
2125 }
2126 
2127 /* Return true if X represents the minimum signed value.  */
2128 bool
only_sign_bit_p(const wide_int_ref & x)2129 wi::only_sign_bit_p (const wide_int_ref &x)
2130 {
2131   return only_sign_bit_p (x, x.precision);
2132 }
2133 
2134 /*
2135  * Private utilities.
2136  */
2137 
gt_ggc_mx(widest_int *)2138 void gt_ggc_mx (widest_int *) { }
gt_pch_nx(widest_int *,void (*)(void *,void *),void *)2139 void gt_pch_nx (widest_int *, void (*) (void *, void *), void *) { }
gt_pch_nx(widest_int *)2140 void gt_pch_nx (widest_int *) { }
2141 
2142 template void wide_int::dump () const;
2143 template void generic_wide_int <wide_int_ref_storage <false> >::dump () const;
2144 template void generic_wide_int <wide_int_ref_storage <true> >::dump () const;
2145 template void offset_int::dump () const;
2146 template void widest_int::dump () const;
2147