xref: /dragonfly/contrib/gcc-8.0/gcc/wide-int.h (revision 38fd1498)
1*38fd1498Szrj /* Operations with very long integers.  -*- C++ -*-
2*38fd1498Szrj    Copyright (C) 2012-2018 Free Software Foundation, Inc.
3*38fd1498Szrj 
4*38fd1498Szrj This file is part of GCC.
5*38fd1498Szrj 
6*38fd1498Szrj GCC is free software; you can redistribute it and/or modify it
7*38fd1498Szrj under the terms of the GNU General Public License as published by the
8*38fd1498Szrj Free Software Foundation; either version 3, or (at your option) any
9*38fd1498Szrj later version.
10*38fd1498Szrj 
11*38fd1498Szrj GCC is distributed in the hope that it will be useful, but WITHOUT
12*38fd1498Szrj ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13*38fd1498Szrj FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14*38fd1498Szrj for more details.
15*38fd1498Szrj 
16*38fd1498Szrj You should have received a copy of the GNU General Public License
17*38fd1498Szrj along with GCC; see the file COPYING3.  If not see
18*38fd1498Szrj <http://www.gnu.org/licenses/>.  */
19*38fd1498Szrj 
20*38fd1498Szrj #ifndef WIDE_INT_H
21*38fd1498Szrj #define WIDE_INT_H
22*38fd1498Szrj 
23*38fd1498Szrj /* wide-int.[cc|h] implements a class that efficiently performs
24*38fd1498Szrj    mathematical operations on finite precision integers.  wide_ints
25*38fd1498Szrj    are designed to be transient - they are not for long term storage
26*38fd1498Szrj    of values.  There is tight integration between wide_ints and the
27*38fd1498Szrj    other longer storage GCC representations (rtl and tree).
28*38fd1498Szrj 
29*38fd1498Szrj    The actual precision of a wide_int depends on the flavor.  There
30*38fd1498Szrj    are three predefined flavors:
31*38fd1498Szrj 
32*38fd1498Szrj      1) wide_int (the default).  This flavor does the math in the
33*38fd1498Szrj      precision of its input arguments.  It is assumed (and checked)
34*38fd1498Szrj      that the precisions of the operands and results are consistent.
35*38fd1498Szrj      This is the most efficient flavor.  It is not possible to examine
36*38fd1498Szrj      bits above the precision that has been specified.  Because of
37*38fd1498Szrj      this, the default flavor has semantics that are simple to
38*38fd1498Szrj      understand and in general model the underlying hardware that the
39*38fd1498Szrj      compiler is targetted for.
40*38fd1498Szrj 
41*38fd1498Szrj      This flavor must be used at the RTL level of gcc because there
42*38fd1498Szrj      is, in general, not enough information in the RTL representation
43*38fd1498Szrj      to extend a value beyond the precision specified in the mode.
44*38fd1498Szrj 
45*38fd1498Szrj      This flavor should also be used at the TREE and GIMPLE levels of
46*38fd1498Szrj      the compiler except for the circumstances described in the
47*38fd1498Szrj      descriptions of the other two flavors.
48*38fd1498Szrj 
49*38fd1498Szrj      The default wide_int representation does not contain any
50*38fd1498Szrj      information inherent about signedness of the represented value,
51*38fd1498Szrj      so it can be used to represent both signed and unsigned numbers.
52*38fd1498Szrj      For operations where the results depend on signedness (full width
53*38fd1498Szrj      multiply, division, shifts, comparisons, and operations that need
54*38fd1498Szrj      overflow detected), the signedness must be specified separately.
55*38fd1498Szrj 
56*38fd1498Szrj      2) offset_int.  This is a fixed-precision integer that can hold
57*38fd1498Szrj      any address offset, measured in either bits or bytes, with at
58*38fd1498Szrj      least one extra sign bit.  At the moment the maximum address
59*38fd1498Szrj      size GCC supports is 64 bits.  With 8-bit bytes and an extra
60*38fd1498Szrj      sign bit, offset_int therefore needs to have at least 68 bits
61*38fd1498Szrj      of precision.  We round this up to 128 bits for efficiency.
62*38fd1498Szrj      Values of type T are converted to this precision by sign- or
63*38fd1498Szrj      zero-extending them based on the signedness of T.
64*38fd1498Szrj 
65*38fd1498Szrj      The extra sign bit means that offset_int is effectively a signed
66*38fd1498Szrj      128-bit integer, i.e. it behaves like int128_t.
67*38fd1498Szrj 
68*38fd1498Szrj      Since the values are logically signed, there is no need to
69*38fd1498Szrj      distinguish between signed and unsigned operations.  Sign-sensitive
70*38fd1498Szrj      comparison operators <, <=, > and >= are therefore supported.
71*38fd1498Szrj      Shift operators << and >> are also supported, with >> being
72*38fd1498Szrj      an _arithmetic_ right shift.
73*38fd1498Szrj 
74*38fd1498Szrj      [ Note that, even though offset_int is effectively int128_t,
75*38fd1498Szrj        it can still be useful to use unsigned comparisons like
76*38fd1498Szrj        wi::leu_p (a, b) as a more efficient short-hand for
77*38fd1498Szrj        "a >= 0 && a <= b". ]
78*38fd1498Szrj 
79*38fd1498Szrj      3) widest_int.  This representation is an approximation of
80*38fd1498Szrj      infinite precision math.  However, it is not really infinite
81*38fd1498Szrj      precision math as in the GMP library.  It is really finite
82*38fd1498Szrj      precision math where the precision is 4 times the size of the
83*38fd1498Szrj      largest integer that the target port can represent.
84*38fd1498Szrj 
85*38fd1498Szrj      Like offset_int, widest_int is wider than all the values that
86*38fd1498Szrj      it needs to represent, so the integers are logically signed.
87*38fd1498Szrj      Sign-sensitive comparison operators <, <=, > and >= are supported,
88*38fd1498Szrj      as are << and >>.
89*38fd1498Szrj 
90*38fd1498Szrj      There are several places in the GCC where this should/must be used:
91*38fd1498Szrj 
92*38fd1498Szrj      * Code that does induction variable optimizations.  This code
93*38fd1498Szrj        works with induction variables of many different types at the
94*38fd1498Szrj        same time.  Because of this, it ends up doing many different
95*38fd1498Szrj        calculations where the operands are not compatible types.  The
96*38fd1498Szrj        widest_int makes this easy, because it provides a field where
97*38fd1498Szrj        nothing is lost when converting from any variable,
98*38fd1498Szrj 
99*38fd1498Szrj      * There are a small number of passes that currently use the
100*38fd1498Szrj        widest_int that should use the default.  These should be
101*38fd1498Szrj        changed.
102*38fd1498Szrj 
103*38fd1498Szrj    There are surprising features of offset_int and widest_int
104*38fd1498Szrj    that the users should be careful about:
105*38fd1498Szrj 
106*38fd1498Szrj      1) Shifts and rotations are just weird.  You have to specify a
107*38fd1498Szrj      precision in which the shift or rotate is to happen in.  The bits
108*38fd1498Szrj      above this precision are zeroed.  While this is what you
109*38fd1498Szrj      want, it is clearly non obvious.
110*38fd1498Szrj 
111*38fd1498Szrj      2) Larger precision math sometimes does not produce the same
112*38fd1498Szrj      answer as would be expected for doing the math at the proper
113*38fd1498Szrj      precision.  In particular, a multiply followed by a divide will
114*38fd1498Szrj      produce a different answer if the first product is larger than
115*38fd1498Szrj      what can be represented in the input precision.
116*38fd1498Szrj 
117*38fd1498Szrj    The offset_int and the widest_int flavors are more expensive
118*38fd1498Szrj    than the default wide int, so in addition to the caveats with these
119*38fd1498Szrj    two, the default is the prefered representation.
120*38fd1498Szrj 
121*38fd1498Szrj    All three flavors of wide_int are represented as a vector of
122*38fd1498Szrj    HOST_WIDE_INTs.  The default and widest_int vectors contain enough elements
123*38fd1498Szrj    to hold a value of MAX_BITSIZE_MODE_ANY_INT bits.  offset_int contains only
124*38fd1498Szrj    enough elements to hold ADDR_MAX_PRECISION bits.  The values are stored
125*38fd1498Szrj    in the vector with the least significant HOST_BITS_PER_WIDE_INT bits
126*38fd1498Szrj    in element 0.
127*38fd1498Szrj 
128*38fd1498Szrj    The default wide_int contains three fields: the vector (VAL),
129*38fd1498Szrj    the precision and a length (LEN).  The length is the number of HWIs
130*38fd1498Szrj    needed to represent the value.  widest_int and offset_int have a
131*38fd1498Szrj    constant precision that cannot be changed, so they only store the
132*38fd1498Szrj    VAL and LEN fields.
133*38fd1498Szrj 
134*38fd1498Szrj    Since most integers used in a compiler are small values, it is
135*38fd1498Szrj    generally profitable to use a representation of the value that is
136*38fd1498Szrj    as small as possible.  LEN is used to indicate the number of
137*38fd1498Szrj    elements of the vector that are in use.  The numbers are stored as
138*38fd1498Szrj    sign extended numbers as a means of compression.  Leading
139*38fd1498Szrj    HOST_WIDE_INTs that contain strings of either -1 or 0 are removed
140*38fd1498Szrj    as long as they can be reconstructed from the top bit that is being
141*38fd1498Szrj    represented.
142*38fd1498Szrj 
143*38fd1498Szrj    The precision and length of a wide_int are always greater than 0.
144*38fd1498Szrj    Any bits in a wide_int above the precision are sign-extended from the
145*38fd1498Szrj    most significant bit.  For example, a 4-bit value 0x8 is represented as
146*38fd1498Szrj    VAL = { 0xf...fff8 }.  However, as an optimization, we allow other integer
147*38fd1498Szrj    constants to be represented with undefined bits above the precision.
148*38fd1498Szrj    This allows INTEGER_CSTs to be pre-extended according to TYPE_SIGN,
149*38fd1498Szrj    so that the INTEGER_CST representation can be used both in TYPE_PRECISION
150*38fd1498Szrj    and in wider precisions.
151*38fd1498Szrj 
152*38fd1498Szrj    There are constructors to create the various forms of wide_int from
153*38fd1498Szrj    trees, rtl and constants.  For trees the options are:
154*38fd1498Szrj 
155*38fd1498Szrj 	     tree t = ...;
156*38fd1498Szrj 	     wi::to_wide (t)     // Treat T as a wide_int
157*38fd1498Szrj 	     wi::to_offset (t)   // Treat T as an offset_int
158*38fd1498Szrj 	     wi::to_widest (t)   // Treat T as a widest_int
159*38fd1498Szrj 
160*38fd1498Szrj    All three are light-weight accessors that should have no overhead
161*38fd1498Szrj    in release builds.  If it is useful for readability reasons to
162*38fd1498Szrj    store the result in a temporary variable, the preferred method is:
163*38fd1498Szrj 
164*38fd1498Szrj 	     wi::tree_to_wide_ref twide = wi::to_wide (t);
165*38fd1498Szrj 	     wi::tree_to_offset_ref toffset = wi::to_offset (t);
166*38fd1498Szrj 	     wi::tree_to_widest_ref twidest = wi::to_widest (t);
167*38fd1498Szrj 
168*38fd1498Szrj    To make an rtx into a wide_int, you have to pair it with a mode.
169*38fd1498Szrj    The canonical way to do this is with rtx_mode_t as in:
170*38fd1498Szrj 
171*38fd1498Szrj 	     rtx r = ...
172*38fd1498Szrj 	     wide_int x = rtx_mode_t (r, mode);
173*38fd1498Szrj 
174*38fd1498Szrj    Similarly, a wide_int can only be constructed from a host value if
175*38fd1498Szrj    the target precision is given explicitly, such as in:
176*38fd1498Szrj 
177*38fd1498Szrj 	     wide_int x = wi::shwi (c, prec); // sign-extend C if necessary
178*38fd1498Szrj 	     wide_int y = wi::uhwi (c, prec); // zero-extend C if necessary
179*38fd1498Szrj 
180*38fd1498Szrj    However, offset_int and widest_int have an inherent precision and so
181*38fd1498Szrj    can be initialized directly from a host value:
182*38fd1498Szrj 
183*38fd1498Szrj 	     offset_int x = (int) c;          // sign-extend C
184*38fd1498Szrj 	     widest_int x = (unsigned int) c; // zero-extend C
185*38fd1498Szrj 
186*38fd1498Szrj    It is also possible to do arithmetic directly on rtx_mode_ts and
187*38fd1498Szrj    constants.  For example:
188*38fd1498Szrj 
189*38fd1498Szrj 	     wi::add (r1, r2);    // add equal-sized rtx_mode_ts r1 and r2
190*38fd1498Szrj 	     wi::add (r1, 1);     // add 1 to rtx_mode_t r1
191*38fd1498Szrj 	     wi::lshift (1, 100); // 1 << 100 as a widest_int
192*38fd1498Szrj 
193*38fd1498Szrj    Many binary operations place restrictions on the combinations of inputs,
194*38fd1498Szrj    using the following rules:
195*38fd1498Szrj 
196*38fd1498Szrj    - {rtx, wide_int} op {rtx, wide_int} -> wide_int
197*38fd1498Szrj        The inputs must be the same precision.  The result is a wide_int
198*38fd1498Szrj        of the same precision
199*38fd1498Szrj 
200*38fd1498Szrj    - {rtx, wide_int} op (un)signed HOST_WIDE_INT -> wide_int
201*38fd1498Szrj      (un)signed HOST_WIDE_INT op {rtx, wide_int} -> wide_int
202*38fd1498Szrj        The HOST_WIDE_INT is extended or truncated to the precision of
203*38fd1498Szrj        the other input.  The result is a wide_int of the same precision
204*38fd1498Szrj        as that input.
205*38fd1498Szrj 
206*38fd1498Szrj    - (un)signed HOST_WIDE_INT op (un)signed HOST_WIDE_INT -> widest_int
207*38fd1498Szrj        The inputs are extended to widest_int precision and produce a
208*38fd1498Szrj        widest_int result.
209*38fd1498Szrj 
210*38fd1498Szrj    - offset_int op offset_int -> offset_int
211*38fd1498Szrj      offset_int op (un)signed HOST_WIDE_INT -> offset_int
212*38fd1498Szrj      (un)signed HOST_WIDE_INT op offset_int -> offset_int
213*38fd1498Szrj 
214*38fd1498Szrj    - widest_int op widest_int -> widest_int
215*38fd1498Szrj      widest_int op (un)signed HOST_WIDE_INT -> widest_int
216*38fd1498Szrj      (un)signed HOST_WIDE_INT op widest_int -> widest_int
217*38fd1498Szrj 
218*38fd1498Szrj    Other combinations like:
219*38fd1498Szrj 
220*38fd1498Szrj    - widest_int op offset_int and
221*38fd1498Szrj    - wide_int op offset_int
222*38fd1498Szrj 
223*38fd1498Szrj    are not allowed.  The inputs should instead be extended or truncated
224*38fd1498Szrj    so that they match.
225*38fd1498Szrj 
226*38fd1498Szrj    The inputs to comparison functions like wi::eq_p and wi::lts_p
227*38fd1498Szrj    follow the same compatibility rules, although their return types
228*38fd1498Szrj    are different.  Unary functions on X produce the same result as
229*38fd1498Szrj    a binary operation X + X.  Shift functions X op Y also produce
230*38fd1498Szrj    the same result as X + X; the precision of the shift amount Y
231*38fd1498Szrj    can be arbitrarily different from X.  */
232*38fd1498Szrj 
233*38fd1498Szrj /* The MAX_BITSIZE_MODE_ANY_INT is automatically generated by a very
234*38fd1498Szrj    early examination of the target's mode file.  The WIDE_INT_MAX_ELTS
235*38fd1498Szrj    can accomodate at least 1 more bit so that unsigned numbers of that
236*38fd1498Szrj    mode can be represented as a signed value.  Note that it is still
237*38fd1498Szrj    possible to create fixed_wide_ints that have precisions greater than
238*38fd1498Szrj    MAX_BITSIZE_MODE_ANY_INT.  This can be useful when representing a
239*38fd1498Szrj    double-width multiplication result, for example.  */
240*38fd1498Szrj #define WIDE_INT_MAX_ELTS \
241*38fd1498Szrj   ((MAX_BITSIZE_MODE_ANY_INT + HOST_BITS_PER_WIDE_INT) / HOST_BITS_PER_WIDE_INT)
242*38fd1498Szrj 
243*38fd1498Szrj #define WIDE_INT_MAX_PRECISION (WIDE_INT_MAX_ELTS * HOST_BITS_PER_WIDE_INT)
244*38fd1498Szrj 
245*38fd1498Szrj /* This is the max size of any pointer on any machine.  It does not
246*38fd1498Szrj    seem to be as easy to sniff this out of the machine description as
247*38fd1498Szrj    it is for MAX_BITSIZE_MODE_ANY_INT since targets may support
248*38fd1498Szrj    multiple address sizes and may have different address sizes for
249*38fd1498Szrj    different address spaces.  However, currently the largest pointer
250*38fd1498Szrj    on any platform is 64 bits.  When that changes, then it is likely
251*38fd1498Szrj    that a target hook should be defined so that targets can make this
252*38fd1498Szrj    value larger for those targets.  */
253*38fd1498Szrj #define ADDR_MAX_BITSIZE 64
254*38fd1498Szrj 
255*38fd1498Szrj /* This is the internal precision used when doing any address
256*38fd1498Szrj    arithmetic.  The '4' is really 3 + 1.  Three of the bits are for
257*38fd1498Szrj    the number of extra bits needed to do bit addresses and the other bit
258*38fd1498Szrj    is to allow everything to be signed without loosing any precision.
259*38fd1498Szrj    Then everything is rounded up to the next HWI for efficiency.  */
260*38fd1498Szrj #define ADDR_MAX_PRECISION \
261*38fd1498Szrj   ((ADDR_MAX_BITSIZE + 4 + HOST_BITS_PER_WIDE_INT - 1) \
262*38fd1498Szrj    & ~(HOST_BITS_PER_WIDE_INT - 1))
263*38fd1498Szrj 
264*38fd1498Szrj /* The number of HWIs needed to store an offset_int.  */
265*38fd1498Szrj #define OFFSET_INT_ELTS (ADDR_MAX_PRECISION / HOST_BITS_PER_WIDE_INT)
266*38fd1498Szrj 
267*38fd1498Szrj /* The type of result produced by a binary operation on types T1 and T2.
268*38fd1498Szrj    Defined purely for brevity.  */
269*38fd1498Szrj #define WI_BINARY_RESULT(T1, T2) \
270*38fd1498Szrj   typename wi::binary_traits <T1, T2>::result_type
271*38fd1498Szrj 
272*38fd1498Szrj /* Likewise for binary operators, which excludes the case in which neither
273*38fd1498Szrj    T1 nor T2 is a wide-int-based type.  */
274*38fd1498Szrj #define WI_BINARY_OPERATOR_RESULT(T1, T2) \
275*38fd1498Szrj   typename wi::binary_traits <T1, T2>::operator_result
276*38fd1498Szrj 
277*38fd1498Szrj /* The type of result produced by T1 << T2.  Leads to substitution failure
278*38fd1498Szrj    if the operation isn't supported.  Defined purely for brevity.  */
279*38fd1498Szrj #define WI_SIGNED_SHIFT_RESULT(T1, T2) \
280*38fd1498Szrj   typename wi::binary_traits <T1, T2>::signed_shift_result_type
281*38fd1498Szrj 
282*38fd1498Szrj /* The type of result produced by a sign-agnostic binary predicate on
283*38fd1498Szrj    types T1 and T2.  This is bool if wide-int operations make sense for
284*38fd1498Szrj    T1 and T2 and leads to substitution failure otherwise.  */
285*38fd1498Szrj #define WI_BINARY_PREDICATE_RESULT(T1, T2) \
286*38fd1498Szrj   typename wi::binary_traits <T1, T2>::predicate_result
287*38fd1498Szrj 
288*38fd1498Szrj /* The type of result produced by a signed binary predicate on types T1 and T2.
289*38fd1498Szrj    This is bool if signed comparisons make sense for T1 and T2 and leads to
290*38fd1498Szrj    substitution failure otherwise.  */
291*38fd1498Szrj #define WI_SIGNED_BINARY_PREDICATE_RESULT(T1, T2) \
292*38fd1498Szrj   typename wi::binary_traits <T1, T2>::signed_predicate_result
293*38fd1498Szrj 
294*38fd1498Szrj /* The type of result produced by a unary operation on type T.  */
295*38fd1498Szrj #define WI_UNARY_RESULT(T) \
296*38fd1498Szrj   typename wi::binary_traits <T, T>::result_type
297*38fd1498Szrj 
298*38fd1498Szrj /* Define a variable RESULT to hold the result of a binary operation on
299*38fd1498Szrj    X and Y, which have types T1 and T2 respectively.  Define VAL to
300*38fd1498Szrj    point to the blocks of RESULT.  Once the user of the macro has
301*38fd1498Szrj    filled in VAL, it should call RESULT.set_len to set the number
302*38fd1498Szrj    of initialized blocks.  */
303*38fd1498Szrj #define WI_BINARY_RESULT_VAR(RESULT, VAL, T1, X, T2, Y) \
304*38fd1498Szrj   WI_BINARY_RESULT (T1, T2) RESULT = \
305*38fd1498Szrj     wi::int_traits <WI_BINARY_RESULT (T1, T2)>::get_binary_result (X, Y); \
306*38fd1498Szrj   HOST_WIDE_INT *VAL = RESULT.write_val ()
307*38fd1498Szrj 
308*38fd1498Szrj /* Similar for the result of a unary operation on X, which has type T.  */
309*38fd1498Szrj #define WI_UNARY_RESULT_VAR(RESULT, VAL, T, X) \
310*38fd1498Szrj   WI_UNARY_RESULT (T) RESULT = \
311*38fd1498Szrj     wi::int_traits <WI_UNARY_RESULT (T)>::get_binary_result (X, X); \
312*38fd1498Szrj   HOST_WIDE_INT *VAL = RESULT.write_val ()
313*38fd1498Szrj 
314*38fd1498Szrj template <typename T> class generic_wide_int;
315*38fd1498Szrj template <int N> class fixed_wide_int_storage;
316*38fd1498Szrj class wide_int_storage;
317*38fd1498Szrj 
318*38fd1498Szrj /* An N-bit integer.  Until we can use typedef templates, use this instead.  */
319*38fd1498Szrj #define FIXED_WIDE_INT(N) \
320*38fd1498Szrj   generic_wide_int < fixed_wide_int_storage <N> >
321*38fd1498Szrj 
322*38fd1498Szrj typedef generic_wide_int <wide_int_storage> wide_int;
323*38fd1498Szrj typedef FIXED_WIDE_INT (ADDR_MAX_PRECISION) offset_int;
324*38fd1498Szrj typedef FIXED_WIDE_INT (WIDE_INT_MAX_PRECISION) widest_int;
325*38fd1498Szrj 
326*38fd1498Szrj /* wi::storage_ref can be a reference to a primitive type,
327*38fd1498Szrj    so this is the conservatively-correct setting.  */
328*38fd1498Szrj template <bool SE, bool HDP = true>
329*38fd1498Szrj struct wide_int_ref_storage;
330*38fd1498Szrj 
331*38fd1498Szrj typedef generic_wide_int <wide_int_ref_storage <false> > wide_int_ref;
332*38fd1498Szrj 
333*38fd1498Szrj /* This can be used instead of wide_int_ref if the referenced value is
334*38fd1498Szrj    known to have type T.  It carries across properties of T's representation,
335*38fd1498Szrj    such as whether excess upper bits in a HWI are defined, and can therefore
336*38fd1498Szrj    help avoid redundant work.
337*38fd1498Szrj 
338*38fd1498Szrj    The macro could be replaced with a template typedef, once we're able
339*38fd1498Szrj    to use those.  */
340*38fd1498Szrj #define WIDE_INT_REF_FOR(T) \
341*38fd1498Szrj   generic_wide_int \
342*38fd1498Szrj     <wide_int_ref_storage <wi::int_traits <T>::is_sign_extended, \
343*38fd1498Szrj 			   wi::int_traits <T>::host_dependent_precision> >
344*38fd1498Szrj 
345*38fd1498Szrj namespace wi
346*38fd1498Szrj {
347*38fd1498Szrj   /* Classifies an integer based on its precision.  */
348*38fd1498Szrj   enum precision_type {
349*38fd1498Szrj     /* The integer has both a precision and defined signedness.  This allows
350*38fd1498Szrj        the integer to be converted to any width, since we know whether to fill
351*38fd1498Szrj        any extra bits with zeros or signs.  */
352*38fd1498Szrj     FLEXIBLE_PRECISION,
353*38fd1498Szrj 
354*38fd1498Szrj     /* The integer has a variable precision but no defined signedness.  */
355*38fd1498Szrj     VAR_PRECISION,
356*38fd1498Szrj 
357*38fd1498Szrj     /* The integer has a constant precision (known at GCC compile time)
358*38fd1498Szrj        and is signed.  */
359*38fd1498Szrj     CONST_PRECISION
360*38fd1498Szrj   };
361*38fd1498Szrj 
362*38fd1498Szrj   /* This class, which has no default implementation, is expected to
363*38fd1498Szrj      provide the following members:
364*38fd1498Szrj 
365*38fd1498Szrj      static const enum precision_type precision_type;
366*38fd1498Szrj        Classifies the type of T.
367*38fd1498Szrj 
368*38fd1498Szrj      static const unsigned int precision;
369*38fd1498Szrj        Only defined if precision_type == CONST_PRECISION.  Specifies the
370*38fd1498Szrj        precision of all integers of type T.
371*38fd1498Szrj 
372*38fd1498Szrj      static const bool host_dependent_precision;
373*38fd1498Szrj        True if the precision of T depends (or can depend) on the host.
374*38fd1498Szrj 
375*38fd1498Szrj      static unsigned int get_precision (const T &x)
376*38fd1498Szrj        Return the number of bits in X.
377*38fd1498Szrj 
378*38fd1498Szrj      static wi::storage_ref *decompose (HOST_WIDE_INT *scratch,
379*38fd1498Szrj 					unsigned int precision, const T &x)
380*38fd1498Szrj        Decompose X as a PRECISION-bit integer, returning the associated
381*38fd1498Szrj        wi::storage_ref.  SCRATCH is available as scratch space if needed.
382*38fd1498Szrj        The routine should assert that PRECISION is acceptable.  */
383*38fd1498Szrj   template <typename T> struct int_traits;
384*38fd1498Szrj 
385*38fd1498Szrj   /* This class provides a single type, result_type, which specifies the
386*38fd1498Szrj      type of integer produced by a binary operation whose inputs have
387*38fd1498Szrj      types T1 and T2.  The definition should be symmetric.  */
388*38fd1498Szrj   template <typename T1, typename T2,
389*38fd1498Szrj 	    enum precision_type P1 = int_traits <T1>::precision_type,
390*38fd1498Szrj 	    enum precision_type P2 = int_traits <T2>::precision_type>
391*38fd1498Szrj   struct binary_traits;
392*38fd1498Szrj 
393*38fd1498Szrj   /* Specify the result type for each supported combination of binary
394*38fd1498Szrj      inputs.  Note that CONST_PRECISION and VAR_PRECISION cannot be
395*38fd1498Szrj      mixed, in order to give stronger type checking.  When both inputs
396*38fd1498Szrj      are CONST_PRECISION, they must have the same precision.  */
397*38fd1498Szrj   template <typename T1, typename T2>
398*38fd1498Szrj   struct binary_traits <T1, T2, FLEXIBLE_PRECISION, FLEXIBLE_PRECISION>
399*38fd1498Szrj   {
400*38fd1498Szrj     typedef widest_int result_type;
401*38fd1498Szrj     /* Don't define operators for this combination.  */
402*38fd1498Szrj   };
403*38fd1498Szrj 
404*38fd1498Szrj   template <typename T1, typename T2>
405*38fd1498Szrj   struct binary_traits <T1, T2, FLEXIBLE_PRECISION, VAR_PRECISION>
406*38fd1498Szrj   {
407*38fd1498Szrj     typedef wide_int result_type;
408*38fd1498Szrj     typedef result_type operator_result;
409*38fd1498Szrj     typedef bool predicate_result;
410*38fd1498Szrj   };
411*38fd1498Szrj 
412*38fd1498Szrj   template <typename T1, typename T2>
413*38fd1498Szrj   struct binary_traits <T1, T2, FLEXIBLE_PRECISION, CONST_PRECISION>
414*38fd1498Szrj   {
415*38fd1498Szrj     /* Spelled out explicitly (rather than through FIXED_WIDE_INT)
416*38fd1498Szrj        so as not to confuse gengtype.  */
417*38fd1498Szrj     typedef generic_wide_int < fixed_wide_int_storage
418*38fd1498Szrj 			       <int_traits <T2>::precision> > result_type;
419*38fd1498Szrj     typedef result_type operator_result;
420*38fd1498Szrj     typedef bool predicate_result;
421*38fd1498Szrj     typedef result_type signed_shift_result_type;
422*38fd1498Szrj     typedef bool signed_predicate_result;
423*38fd1498Szrj   };
424*38fd1498Szrj 
425*38fd1498Szrj   template <typename T1, typename T2>
426*38fd1498Szrj   struct binary_traits <T1, T2, VAR_PRECISION, FLEXIBLE_PRECISION>
427*38fd1498Szrj   {
428*38fd1498Szrj     typedef wide_int result_type;
429*38fd1498Szrj     typedef result_type operator_result;
430*38fd1498Szrj     typedef bool predicate_result;
431*38fd1498Szrj   };
432*38fd1498Szrj 
433*38fd1498Szrj   template <typename T1, typename T2>
434*38fd1498Szrj   struct binary_traits <T1, T2, CONST_PRECISION, FLEXIBLE_PRECISION>
435*38fd1498Szrj   {
436*38fd1498Szrj     /* Spelled out explicitly (rather than through FIXED_WIDE_INT)
437*38fd1498Szrj        so as not to confuse gengtype.  */
438*38fd1498Szrj     typedef generic_wide_int < fixed_wide_int_storage
439*38fd1498Szrj 			       <int_traits <T1>::precision> > result_type;
440*38fd1498Szrj     typedef result_type operator_result;
441*38fd1498Szrj     typedef bool predicate_result;
442*38fd1498Szrj     typedef result_type signed_shift_result_type;
443*38fd1498Szrj     typedef bool signed_predicate_result;
444*38fd1498Szrj   };
445*38fd1498Szrj 
446*38fd1498Szrj   template <typename T1, typename T2>
447*38fd1498Szrj   struct binary_traits <T1, T2, CONST_PRECISION, CONST_PRECISION>
448*38fd1498Szrj   {
449*38fd1498Szrj     STATIC_ASSERT (int_traits <T1>::precision == int_traits <T2>::precision);
450*38fd1498Szrj     /* Spelled out explicitly (rather than through FIXED_WIDE_INT)
451*38fd1498Szrj        so as not to confuse gengtype.  */
452*38fd1498Szrj     typedef generic_wide_int < fixed_wide_int_storage
453*38fd1498Szrj 			       <int_traits <T1>::precision> > result_type;
454*38fd1498Szrj     typedef result_type operator_result;
455*38fd1498Szrj     typedef bool predicate_result;
456*38fd1498Szrj     typedef result_type signed_shift_result_type;
457*38fd1498Szrj     typedef bool signed_predicate_result;
458*38fd1498Szrj   };
459*38fd1498Szrj 
460*38fd1498Szrj   template <typename T1, typename T2>
461*38fd1498Szrj   struct binary_traits <T1, T2, VAR_PRECISION, VAR_PRECISION>
462*38fd1498Szrj   {
463*38fd1498Szrj     typedef wide_int result_type;
464*38fd1498Szrj     typedef result_type operator_result;
465*38fd1498Szrj     typedef bool predicate_result;
466*38fd1498Szrj   };
467*38fd1498Szrj }
468*38fd1498Szrj 
469*38fd1498Szrj /* Public functions for querying and operating on integers.  */
470*38fd1498Szrj namespace wi
471*38fd1498Szrj {
472*38fd1498Szrj   template <typename T>
473*38fd1498Szrj   unsigned int get_precision (const T &);
474*38fd1498Szrj 
475*38fd1498Szrj   template <typename T1, typename T2>
476*38fd1498Szrj   unsigned int get_binary_precision (const T1 &, const T2 &);
477*38fd1498Szrj 
478*38fd1498Szrj   template <typename T1, typename T2>
479*38fd1498Szrj   void copy (T1 &, const T2 &);
480*38fd1498Szrj 
481*38fd1498Szrj #define UNARY_PREDICATE \
482*38fd1498Szrj   template <typename T> bool
483*38fd1498Szrj #define UNARY_FUNCTION \
484*38fd1498Szrj   template <typename T> WI_UNARY_RESULT (T)
485*38fd1498Szrj #define BINARY_PREDICATE \
486*38fd1498Szrj   template <typename T1, typename T2> bool
487*38fd1498Szrj #define BINARY_FUNCTION \
488*38fd1498Szrj   template <typename T1, typename T2> WI_BINARY_RESULT (T1, T2)
489*38fd1498Szrj #define SHIFT_FUNCTION \
490*38fd1498Szrj   template <typename T1, typename T2> WI_UNARY_RESULT (T1)
491*38fd1498Szrj 
492*38fd1498Szrj   UNARY_PREDICATE fits_shwi_p (const T &);
493*38fd1498Szrj   UNARY_PREDICATE fits_uhwi_p (const T &);
494*38fd1498Szrj   UNARY_PREDICATE neg_p (const T &, signop = SIGNED);
495*38fd1498Szrj 
496*38fd1498Szrj   template <typename T>
497*38fd1498Szrj   HOST_WIDE_INT sign_mask (const T &);
498*38fd1498Szrj 
499*38fd1498Szrj   BINARY_PREDICATE eq_p (const T1 &, const T2 &);
500*38fd1498Szrj   BINARY_PREDICATE ne_p (const T1 &, const T2 &);
501*38fd1498Szrj   BINARY_PREDICATE lt_p (const T1 &, const T2 &, signop);
502*38fd1498Szrj   BINARY_PREDICATE lts_p (const T1 &, const T2 &);
503*38fd1498Szrj   BINARY_PREDICATE ltu_p (const T1 &, const T2 &);
504*38fd1498Szrj   BINARY_PREDICATE le_p (const T1 &, const T2 &, signop);
505*38fd1498Szrj   BINARY_PREDICATE les_p (const T1 &, const T2 &);
506*38fd1498Szrj   BINARY_PREDICATE leu_p (const T1 &, const T2 &);
507*38fd1498Szrj   BINARY_PREDICATE gt_p (const T1 &, const T2 &, signop);
508*38fd1498Szrj   BINARY_PREDICATE gts_p (const T1 &, const T2 &);
509*38fd1498Szrj   BINARY_PREDICATE gtu_p (const T1 &, const T2 &);
510*38fd1498Szrj   BINARY_PREDICATE ge_p (const T1 &, const T2 &, signop);
511*38fd1498Szrj   BINARY_PREDICATE ges_p (const T1 &, const T2 &);
512*38fd1498Szrj   BINARY_PREDICATE geu_p (const T1 &, const T2 &);
513*38fd1498Szrj 
514*38fd1498Szrj   template <typename T1, typename T2>
515*38fd1498Szrj   int cmp (const T1 &, const T2 &, signop);
516*38fd1498Szrj 
517*38fd1498Szrj   template <typename T1, typename T2>
518*38fd1498Szrj   int cmps (const T1 &, const T2 &);
519*38fd1498Szrj 
520*38fd1498Szrj   template <typename T1, typename T2>
521*38fd1498Szrj   int cmpu (const T1 &, const T2 &);
522*38fd1498Szrj 
523*38fd1498Szrj   UNARY_FUNCTION bit_not (const T &);
524*38fd1498Szrj   UNARY_FUNCTION neg (const T &);
525*38fd1498Szrj   UNARY_FUNCTION neg (const T &, bool *);
526*38fd1498Szrj   UNARY_FUNCTION abs (const T &);
527*38fd1498Szrj   UNARY_FUNCTION ext (const T &, unsigned int, signop);
528*38fd1498Szrj   UNARY_FUNCTION sext (const T &, unsigned int);
529*38fd1498Szrj   UNARY_FUNCTION zext (const T &, unsigned int);
530*38fd1498Szrj   UNARY_FUNCTION set_bit (const T &, unsigned int);
531*38fd1498Szrj 
532*38fd1498Szrj   BINARY_FUNCTION min (const T1 &, const T2 &, signop);
533*38fd1498Szrj   BINARY_FUNCTION smin (const T1 &, const T2 &);
534*38fd1498Szrj   BINARY_FUNCTION umin (const T1 &, const T2 &);
535*38fd1498Szrj   BINARY_FUNCTION max (const T1 &, const T2 &, signop);
536*38fd1498Szrj   BINARY_FUNCTION smax (const T1 &, const T2 &);
537*38fd1498Szrj   BINARY_FUNCTION umax (const T1 &, const T2 &);
538*38fd1498Szrj 
539*38fd1498Szrj   BINARY_FUNCTION bit_and (const T1 &, const T2 &);
540*38fd1498Szrj   BINARY_FUNCTION bit_and_not (const T1 &, const T2 &);
541*38fd1498Szrj   BINARY_FUNCTION bit_or (const T1 &, const T2 &);
542*38fd1498Szrj   BINARY_FUNCTION bit_or_not (const T1 &, const T2 &);
543*38fd1498Szrj   BINARY_FUNCTION bit_xor (const T1 &, const T2 &);
544*38fd1498Szrj   BINARY_FUNCTION add (const T1 &, const T2 &);
545*38fd1498Szrj   BINARY_FUNCTION add (const T1 &, const T2 &, signop, bool *);
546*38fd1498Szrj   BINARY_FUNCTION sub (const T1 &, const T2 &);
547*38fd1498Szrj   BINARY_FUNCTION sub (const T1 &, const T2 &, signop, bool *);
548*38fd1498Szrj   BINARY_FUNCTION mul (const T1 &, const T2 &);
549*38fd1498Szrj   BINARY_FUNCTION mul (const T1 &, const T2 &, signop, bool *);
550*38fd1498Szrj   BINARY_FUNCTION smul (const T1 &, const T2 &, bool *);
551*38fd1498Szrj   BINARY_FUNCTION umul (const T1 &, const T2 &, bool *);
552*38fd1498Szrj   BINARY_FUNCTION mul_high (const T1 &, const T2 &, signop);
553*38fd1498Szrj   BINARY_FUNCTION div_trunc (const T1 &, const T2 &, signop, bool * = 0);
554*38fd1498Szrj   BINARY_FUNCTION sdiv_trunc (const T1 &, const T2 &);
555*38fd1498Szrj   BINARY_FUNCTION udiv_trunc (const T1 &, const T2 &);
556*38fd1498Szrj   BINARY_FUNCTION div_floor (const T1 &, const T2 &, signop, bool * = 0);
557*38fd1498Szrj   BINARY_FUNCTION udiv_floor (const T1 &, const T2 &);
558*38fd1498Szrj   BINARY_FUNCTION sdiv_floor (const T1 &, const T2 &);
559*38fd1498Szrj   BINARY_FUNCTION div_ceil (const T1 &, const T2 &, signop, bool * = 0);
560*38fd1498Szrj   BINARY_FUNCTION udiv_ceil (const T1 &, const T2 &);
561*38fd1498Szrj   BINARY_FUNCTION div_round (const T1 &, const T2 &, signop, bool * = 0);
562*38fd1498Szrj   BINARY_FUNCTION divmod_trunc (const T1 &, const T2 &, signop,
563*38fd1498Szrj 				WI_BINARY_RESULT (T1, T2) *);
564*38fd1498Szrj   BINARY_FUNCTION gcd (const T1 &, const T2 &, signop = UNSIGNED);
565*38fd1498Szrj   BINARY_FUNCTION mod_trunc (const T1 &, const T2 &, signop, bool * = 0);
566*38fd1498Szrj   BINARY_FUNCTION smod_trunc (const T1 &, const T2 &);
567*38fd1498Szrj   BINARY_FUNCTION umod_trunc (const T1 &, const T2 &);
568*38fd1498Szrj   BINARY_FUNCTION mod_floor (const T1 &, const T2 &, signop, bool * = 0);
569*38fd1498Szrj   BINARY_FUNCTION umod_floor (const T1 &, const T2 &);
570*38fd1498Szrj   BINARY_FUNCTION mod_ceil (const T1 &, const T2 &, signop, bool * = 0);
571*38fd1498Szrj   BINARY_FUNCTION mod_round (const T1 &, const T2 &, signop, bool * = 0);
572*38fd1498Szrj 
573*38fd1498Szrj   template <typename T1, typename T2>
574*38fd1498Szrj   bool multiple_of_p (const T1 &, const T2 &, signop);
575*38fd1498Szrj 
576*38fd1498Szrj   template <typename T1, typename T2>
577*38fd1498Szrj   bool multiple_of_p (const T1 &, const T2 &, signop,
578*38fd1498Szrj 		      WI_BINARY_RESULT (T1, T2) *);
579*38fd1498Szrj 
580*38fd1498Szrj   SHIFT_FUNCTION lshift (const T1 &, const T2 &);
581*38fd1498Szrj   SHIFT_FUNCTION lrshift (const T1 &, const T2 &);
582*38fd1498Szrj   SHIFT_FUNCTION arshift (const T1 &, const T2 &);
583*38fd1498Szrj   SHIFT_FUNCTION rshift (const T1 &, const T2 &, signop sgn);
584*38fd1498Szrj   SHIFT_FUNCTION lrotate (const T1 &, const T2 &, unsigned int = 0);
585*38fd1498Szrj   SHIFT_FUNCTION rrotate (const T1 &, const T2 &, unsigned int = 0);
586*38fd1498Szrj 
587*38fd1498Szrj #undef SHIFT_FUNCTION
588*38fd1498Szrj #undef BINARY_PREDICATE
589*38fd1498Szrj #undef BINARY_FUNCTION
590*38fd1498Szrj #undef UNARY_PREDICATE
591*38fd1498Szrj #undef UNARY_FUNCTION
592*38fd1498Szrj 
593*38fd1498Szrj   bool only_sign_bit_p (const wide_int_ref &, unsigned int);
594*38fd1498Szrj   bool only_sign_bit_p (const wide_int_ref &);
595*38fd1498Szrj   int clz (const wide_int_ref &);
596*38fd1498Szrj   int clrsb (const wide_int_ref &);
597*38fd1498Szrj   int ctz (const wide_int_ref &);
598*38fd1498Szrj   int exact_log2 (const wide_int_ref &);
599*38fd1498Szrj   int floor_log2 (const wide_int_ref &);
600*38fd1498Szrj   int ffs (const wide_int_ref &);
601*38fd1498Szrj   int popcount (const wide_int_ref &);
602*38fd1498Szrj   int parity (const wide_int_ref &);
603*38fd1498Szrj 
604*38fd1498Szrj   template <typename T>
605*38fd1498Szrj   unsigned HOST_WIDE_INT extract_uhwi (const T &, unsigned int, unsigned int);
606*38fd1498Szrj 
607*38fd1498Szrj   template <typename T>
608*38fd1498Szrj   unsigned int min_precision (const T &, signop);
609*38fd1498Szrj }
610*38fd1498Szrj 
611*38fd1498Szrj namespace wi
612*38fd1498Szrj {
613*38fd1498Szrj   /* Contains the components of a decomposed integer for easy, direct
614*38fd1498Szrj      access.  */
615*38fd1498Szrj   struct storage_ref
616*38fd1498Szrj   {
617*38fd1498Szrj     storage_ref () {}
618*38fd1498Szrj     storage_ref (const HOST_WIDE_INT *, unsigned int, unsigned int);
619*38fd1498Szrj 
620*38fd1498Szrj     const HOST_WIDE_INT *val;
621*38fd1498Szrj     unsigned int len;
622*38fd1498Szrj     unsigned int precision;
623*38fd1498Szrj 
624*38fd1498Szrj     /* Provide enough trappings for this class to act as storage for
625*38fd1498Szrj        generic_wide_int.  */
626*38fd1498Szrj     unsigned int get_len () const;
627*38fd1498Szrj     unsigned int get_precision () const;
628*38fd1498Szrj     const HOST_WIDE_INT *get_val () const;
629*38fd1498Szrj   };
630*38fd1498Szrj }
631*38fd1498Szrj 
632*38fd1498Szrj inline::wi::storage_ref::storage_ref (const HOST_WIDE_INT *val_in,
633*38fd1498Szrj 				      unsigned int len_in,
634*38fd1498Szrj 				      unsigned int precision_in)
635*38fd1498Szrj   : val (val_in), len (len_in), precision (precision_in)
636*38fd1498Szrj {
637*38fd1498Szrj }
638*38fd1498Szrj 
639*38fd1498Szrj inline unsigned int
640*38fd1498Szrj wi::storage_ref::get_len () const
641*38fd1498Szrj {
642*38fd1498Szrj   return len;
643*38fd1498Szrj }
644*38fd1498Szrj 
645*38fd1498Szrj inline unsigned int
646*38fd1498Szrj wi::storage_ref::get_precision () const
647*38fd1498Szrj {
648*38fd1498Szrj   return precision;
649*38fd1498Szrj }
650*38fd1498Szrj 
651*38fd1498Szrj inline const HOST_WIDE_INT *
652*38fd1498Szrj wi::storage_ref::get_val () const
653*38fd1498Szrj {
654*38fd1498Szrj   return val;
655*38fd1498Szrj }
656*38fd1498Szrj 
657*38fd1498Szrj /* This class defines an integer type using the storage provided by the
658*38fd1498Szrj    template argument.  The storage class must provide the following
659*38fd1498Szrj    functions:
660*38fd1498Szrj 
661*38fd1498Szrj    unsigned int get_precision () const
662*38fd1498Szrj      Return the number of bits in the integer.
663*38fd1498Szrj 
664*38fd1498Szrj    HOST_WIDE_INT *get_val () const
665*38fd1498Szrj      Return a pointer to the array of blocks that encodes the integer.
666*38fd1498Szrj 
667*38fd1498Szrj    unsigned int get_len () const
668*38fd1498Szrj      Return the number of blocks in get_val ().  If this is smaller
669*38fd1498Szrj      than the number of blocks implied by get_precision (), the
670*38fd1498Szrj      remaining blocks are sign extensions of block get_len () - 1.
671*38fd1498Szrj 
672*38fd1498Szrj    Although not required by generic_wide_int itself, writable storage
673*38fd1498Szrj    classes can also provide the following functions:
674*38fd1498Szrj 
675*38fd1498Szrj    HOST_WIDE_INT *write_val ()
676*38fd1498Szrj      Get a modifiable version of get_val ()
677*38fd1498Szrj 
678*38fd1498Szrj    unsigned int set_len (unsigned int len)
679*38fd1498Szrj      Set the value returned by get_len () to LEN.  */
680*38fd1498Szrj template <typename storage>
681*38fd1498Szrj class GTY(()) generic_wide_int : public storage
682*38fd1498Szrj {
683*38fd1498Szrj public:
684*38fd1498Szrj   generic_wide_int ();
685*38fd1498Szrj 
686*38fd1498Szrj   template <typename T>
687*38fd1498Szrj   generic_wide_int (const T &);
688*38fd1498Szrj 
689*38fd1498Szrj   template <typename T>
690*38fd1498Szrj   generic_wide_int (const T &, unsigned int);
691*38fd1498Szrj 
692*38fd1498Szrj   /* Conversions.  */
693*38fd1498Szrj   HOST_WIDE_INT to_shwi (unsigned int) const;
694*38fd1498Szrj   HOST_WIDE_INT to_shwi () const;
695*38fd1498Szrj   unsigned HOST_WIDE_INT to_uhwi (unsigned int) const;
696*38fd1498Szrj   unsigned HOST_WIDE_INT to_uhwi () const;
697*38fd1498Szrj   HOST_WIDE_INT to_short_addr () const;
698*38fd1498Szrj 
699*38fd1498Szrj   /* Public accessors for the interior of a wide int.  */
700*38fd1498Szrj   HOST_WIDE_INT sign_mask () const;
701*38fd1498Szrj   HOST_WIDE_INT elt (unsigned int) const;
702*38fd1498Szrj   unsigned HOST_WIDE_INT ulow () const;
703*38fd1498Szrj   unsigned HOST_WIDE_INT uhigh () const;
704*38fd1498Szrj   HOST_WIDE_INT slow () const;
705*38fd1498Szrj   HOST_WIDE_INT shigh () const;
706*38fd1498Szrj 
707*38fd1498Szrj   template <typename T>
708*38fd1498Szrj   generic_wide_int &operator = (const T &);
709*38fd1498Szrj 
710*38fd1498Szrj #define ASSIGNMENT_OPERATOR(OP, F) \
711*38fd1498Szrj   template <typename T> \
712*38fd1498Szrj     generic_wide_int &OP (const T &c) { return (*this = wi::F (*this, c)); }
713*38fd1498Szrj 
714*38fd1498Szrj /* Restrict these to cases where the shift operator is defined.  */
715*38fd1498Szrj #define SHIFT_ASSIGNMENT_OPERATOR(OP, OP2) \
716*38fd1498Szrj   template <typename T> \
717*38fd1498Szrj     generic_wide_int &OP (const T &c) { return (*this = *this OP2 c); }
718*38fd1498Szrj 
719*38fd1498Szrj #define INCDEC_OPERATOR(OP, DELTA) \
720*38fd1498Szrj   generic_wide_int &OP () { *this += DELTA; return *this; }
721*38fd1498Szrj 
722*38fd1498Szrj   ASSIGNMENT_OPERATOR (operator &=, bit_and)
723*38fd1498Szrj   ASSIGNMENT_OPERATOR (operator |=, bit_or)
724*38fd1498Szrj   ASSIGNMENT_OPERATOR (operator ^=, bit_xor)
725*38fd1498Szrj   ASSIGNMENT_OPERATOR (operator +=, add)
726*38fd1498Szrj   ASSIGNMENT_OPERATOR (operator -=, sub)
727*38fd1498Szrj   ASSIGNMENT_OPERATOR (operator *=, mul)
728*38fd1498Szrj   ASSIGNMENT_OPERATOR (operator <<=, lshift)
729*38fd1498Szrj   SHIFT_ASSIGNMENT_OPERATOR (operator >>=, >>)
730*38fd1498Szrj   INCDEC_OPERATOR (operator ++, 1)
731*38fd1498Szrj   INCDEC_OPERATOR (operator --, -1)
732*38fd1498Szrj 
733*38fd1498Szrj #undef SHIFT_ASSIGNMENT_OPERATOR
734*38fd1498Szrj #undef ASSIGNMENT_OPERATOR
735*38fd1498Szrj #undef INCDEC_OPERATOR
736*38fd1498Szrj 
737*38fd1498Szrj   /* Debugging functions.  */
738*38fd1498Szrj   void dump () const;
739*38fd1498Szrj 
740*38fd1498Szrj   static const bool is_sign_extended
741*38fd1498Szrj     = wi::int_traits <generic_wide_int <storage> >::is_sign_extended;
742*38fd1498Szrj };
743*38fd1498Szrj 
744*38fd1498Szrj template <typename storage>
745*38fd1498Szrj inline generic_wide_int <storage>::generic_wide_int () {}
746*38fd1498Szrj 
747*38fd1498Szrj template <typename storage>
748*38fd1498Szrj template <typename T>
749*38fd1498Szrj inline generic_wide_int <storage>::generic_wide_int (const T &x)
750*38fd1498Szrj   : storage (x)
751*38fd1498Szrj {
752*38fd1498Szrj }
753*38fd1498Szrj 
754*38fd1498Szrj template <typename storage>
755*38fd1498Szrj template <typename T>
756*38fd1498Szrj inline generic_wide_int <storage>::generic_wide_int (const T &x,
757*38fd1498Szrj 						     unsigned int precision)
758*38fd1498Szrj   : storage (x, precision)
759*38fd1498Szrj {
760*38fd1498Szrj }
761*38fd1498Szrj 
762*38fd1498Szrj /* Return THIS as a signed HOST_WIDE_INT, sign-extending from PRECISION.
763*38fd1498Szrj    If THIS does not fit in PRECISION, the information is lost.  */
764*38fd1498Szrj template <typename storage>
765*38fd1498Szrj inline HOST_WIDE_INT
766*38fd1498Szrj generic_wide_int <storage>::to_shwi (unsigned int precision) const
767*38fd1498Szrj {
768*38fd1498Szrj   if (precision < HOST_BITS_PER_WIDE_INT)
769*38fd1498Szrj     return sext_hwi (this->get_val ()[0], precision);
770*38fd1498Szrj   else
771*38fd1498Szrj     return this->get_val ()[0];
772*38fd1498Szrj }
773*38fd1498Szrj 
774*38fd1498Szrj /* Return THIS as a signed HOST_WIDE_INT, in its natural precision.  */
775*38fd1498Szrj template <typename storage>
776*38fd1498Szrj inline HOST_WIDE_INT
777*38fd1498Szrj generic_wide_int <storage>::to_shwi () const
778*38fd1498Szrj {
779*38fd1498Szrj   if (is_sign_extended)
780*38fd1498Szrj     return this->get_val ()[0];
781*38fd1498Szrj   else
782*38fd1498Szrj     return to_shwi (this->get_precision ());
783*38fd1498Szrj }
784*38fd1498Szrj 
785*38fd1498Szrj /* Return THIS as an unsigned HOST_WIDE_INT, zero-extending from
786*38fd1498Szrj    PRECISION.  If THIS does not fit in PRECISION, the information
787*38fd1498Szrj    is lost.  */
788*38fd1498Szrj template <typename storage>
789*38fd1498Szrj inline unsigned HOST_WIDE_INT
790*38fd1498Szrj generic_wide_int <storage>::to_uhwi (unsigned int precision) const
791*38fd1498Szrj {
792*38fd1498Szrj   if (precision < HOST_BITS_PER_WIDE_INT)
793*38fd1498Szrj     return zext_hwi (this->get_val ()[0], precision);
794*38fd1498Szrj   else
795*38fd1498Szrj     return this->get_val ()[0];
796*38fd1498Szrj }
797*38fd1498Szrj 
798*38fd1498Szrj /* Return THIS as an signed HOST_WIDE_INT, in its natural precision.  */
799*38fd1498Szrj template <typename storage>
800*38fd1498Szrj inline unsigned HOST_WIDE_INT
801*38fd1498Szrj generic_wide_int <storage>::to_uhwi () const
802*38fd1498Szrj {
803*38fd1498Szrj   return to_uhwi (this->get_precision ());
804*38fd1498Szrj }
805*38fd1498Szrj 
806*38fd1498Szrj /* TODO: The compiler is half converted from using HOST_WIDE_INT to
807*38fd1498Szrj    represent addresses to using offset_int to represent addresses.
808*38fd1498Szrj    We use to_short_addr at the interface from new code to old,
809*38fd1498Szrj    unconverted code.  */
810*38fd1498Szrj template <typename storage>
811*38fd1498Szrj inline HOST_WIDE_INT
812*38fd1498Szrj generic_wide_int <storage>::to_short_addr () const
813*38fd1498Szrj {
814*38fd1498Szrj   return this->get_val ()[0];
815*38fd1498Szrj }
816*38fd1498Szrj 
817*38fd1498Szrj /* Return the implicit value of blocks above get_len ().  */
818*38fd1498Szrj template <typename storage>
819*38fd1498Szrj inline HOST_WIDE_INT
820*38fd1498Szrj generic_wide_int <storage>::sign_mask () const
821*38fd1498Szrj {
822*38fd1498Szrj   unsigned int len = this->get_len ();
823*38fd1498Szrj   unsigned HOST_WIDE_INT high = this->get_val ()[len - 1];
824*38fd1498Szrj   if (!is_sign_extended)
825*38fd1498Szrj     {
826*38fd1498Szrj       unsigned int precision = this->get_precision ();
827*38fd1498Szrj       int excess = len * HOST_BITS_PER_WIDE_INT - precision;
828*38fd1498Szrj       if (excess > 0)
829*38fd1498Szrj 	high <<= excess;
830*38fd1498Szrj     }
831*38fd1498Szrj   return (HOST_WIDE_INT) (high) < 0 ? -1 : 0;
832*38fd1498Szrj }
833*38fd1498Szrj 
834*38fd1498Szrj /* Return the signed value of the least-significant explicitly-encoded
835*38fd1498Szrj    block.  */
836*38fd1498Szrj template <typename storage>
837*38fd1498Szrj inline HOST_WIDE_INT
838*38fd1498Szrj generic_wide_int <storage>::slow () const
839*38fd1498Szrj {
840*38fd1498Szrj   return this->get_val ()[0];
841*38fd1498Szrj }
842*38fd1498Szrj 
843*38fd1498Szrj /* Return the signed value of the most-significant explicitly-encoded
844*38fd1498Szrj    block.  */
845*38fd1498Szrj template <typename storage>
846*38fd1498Szrj inline HOST_WIDE_INT
847*38fd1498Szrj generic_wide_int <storage>::shigh () const
848*38fd1498Szrj {
849*38fd1498Szrj   return this->get_val ()[this->get_len () - 1];
850*38fd1498Szrj }
851*38fd1498Szrj 
852*38fd1498Szrj /* Return the unsigned value of the least-significant
853*38fd1498Szrj    explicitly-encoded block.  */
854*38fd1498Szrj template <typename storage>
855*38fd1498Szrj inline unsigned HOST_WIDE_INT
856*38fd1498Szrj generic_wide_int <storage>::ulow () const
857*38fd1498Szrj {
858*38fd1498Szrj   return this->get_val ()[0];
859*38fd1498Szrj }
860*38fd1498Szrj 
861*38fd1498Szrj /* Return the unsigned value of the most-significant
862*38fd1498Szrj    explicitly-encoded block.  */
863*38fd1498Szrj template <typename storage>
864*38fd1498Szrj inline unsigned HOST_WIDE_INT
865*38fd1498Szrj generic_wide_int <storage>::uhigh () const
866*38fd1498Szrj {
867*38fd1498Szrj   return this->get_val ()[this->get_len () - 1];
868*38fd1498Szrj }
869*38fd1498Szrj 
870*38fd1498Szrj /* Return block I, which might be implicitly or explicit encoded.  */
871*38fd1498Szrj template <typename storage>
872*38fd1498Szrj inline HOST_WIDE_INT
873*38fd1498Szrj generic_wide_int <storage>::elt (unsigned int i) const
874*38fd1498Szrj {
875*38fd1498Szrj   if (i >= this->get_len ())
876*38fd1498Szrj     return sign_mask ();
877*38fd1498Szrj   else
878*38fd1498Szrj     return this->get_val ()[i];
879*38fd1498Szrj }
880*38fd1498Szrj 
881*38fd1498Szrj template <typename storage>
882*38fd1498Szrj template <typename T>
883*38fd1498Szrj inline generic_wide_int <storage> &
884*38fd1498Szrj generic_wide_int <storage>::operator = (const T &x)
885*38fd1498Szrj {
886*38fd1498Szrj   storage::operator = (x);
887*38fd1498Szrj   return *this;
888*38fd1498Szrj }
889*38fd1498Szrj 
890*38fd1498Szrj /* Dump the contents of the integer to stderr, for debugging.  */
891*38fd1498Szrj template <typename storage>
892*38fd1498Szrj void
893*38fd1498Szrj generic_wide_int <storage>::dump () const
894*38fd1498Szrj {
895*38fd1498Szrj   unsigned int len = this->get_len ();
896*38fd1498Szrj   const HOST_WIDE_INT *val = this->get_val ();
897*38fd1498Szrj   unsigned int precision = this->get_precision ();
898*38fd1498Szrj   fprintf (stderr, "[");
899*38fd1498Szrj   if (len * HOST_BITS_PER_WIDE_INT < precision)
900*38fd1498Szrj     fprintf (stderr, "...,");
901*38fd1498Szrj   for (unsigned int i = 0; i < len - 1; ++i)
902*38fd1498Szrj     fprintf (stderr, HOST_WIDE_INT_PRINT_HEX ",", val[len - 1 - i]);
903*38fd1498Szrj   fprintf (stderr, HOST_WIDE_INT_PRINT_HEX "], precision = %d\n",
904*38fd1498Szrj 	   val[0], precision);
905*38fd1498Szrj }
906*38fd1498Szrj 
907*38fd1498Szrj namespace wi
908*38fd1498Szrj {
909*38fd1498Szrj   template <typename storage>
910*38fd1498Szrj   struct int_traits < generic_wide_int <storage> >
911*38fd1498Szrj     : public wi::int_traits <storage>
912*38fd1498Szrj   {
913*38fd1498Szrj     static unsigned int get_precision (const generic_wide_int <storage> &);
914*38fd1498Szrj     static wi::storage_ref decompose (HOST_WIDE_INT *, unsigned int,
915*38fd1498Szrj 				      const generic_wide_int <storage> &);
916*38fd1498Szrj   };
917*38fd1498Szrj }
918*38fd1498Szrj 
919*38fd1498Szrj template <typename storage>
920*38fd1498Szrj inline unsigned int
921*38fd1498Szrj wi::int_traits < generic_wide_int <storage> >::
922*38fd1498Szrj get_precision (const generic_wide_int <storage> &x)
923*38fd1498Szrj {
924*38fd1498Szrj   return x.get_precision ();
925*38fd1498Szrj }
926*38fd1498Szrj 
927*38fd1498Szrj template <typename storage>
928*38fd1498Szrj inline wi::storage_ref
929*38fd1498Szrj wi::int_traits < generic_wide_int <storage> >::
930*38fd1498Szrj decompose (HOST_WIDE_INT *, unsigned int precision,
931*38fd1498Szrj 	   const generic_wide_int <storage> &x)
932*38fd1498Szrj {
933*38fd1498Szrj   gcc_checking_assert (precision == x.get_precision ());
934*38fd1498Szrj   return wi::storage_ref (x.get_val (), x.get_len (), precision);
935*38fd1498Szrj }
936*38fd1498Szrj 
937*38fd1498Szrj /* Provide the storage for a wide_int_ref.  This acts like a read-only
938*38fd1498Szrj    wide_int, with the optimization that VAL is normally a pointer to
939*38fd1498Szrj    another integer's storage, so that no array copy is needed.  */
940*38fd1498Szrj template <bool SE, bool HDP>
941*38fd1498Szrj struct wide_int_ref_storage : public wi::storage_ref
942*38fd1498Szrj {
943*38fd1498Szrj private:
944*38fd1498Szrj   /* Scratch space that can be used when decomposing the original integer.
945*38fd1498Szrj      It must live as long as this object.  */
946*38fd1498Szrj   HOST_WIDE_INT scratch[2];
947*38fd1498Szrj 
948*38fd1498Szrj public:
949*38fd1498Szrj   wide_int_ref_storage () {}
950*38fd1498Szrj 
951*38fd1498Szrj   wide_int_ref_storage (const wi::storage_ref &);
952*38fd1498Szrj 
953*38fd1498Szrj   template <typename T>
954*38fd1498Szrj   wide_int_ref_storage (const T &);
955*38fd1498Szrj 
956*38fd1498Szrj   template <typename T>
957*38fd1498Szrj   wide_int_ref_storage (const T &, unsigned int);
958*38fd1498Szrj };
959*38fd1498Szrj 
960*38fd1498Szrj /* Create a reference from an existing reference.  */
961*38fd1498Szrj template <bool SE, bool HDP>
962*38fd1498Szrj inline wide_int_ref_storage <SE, HDP>::
963*38fd1498Szrj wide_int_ref_storage (const wi::storage_ref &x)
964*38fd1498Szrj   : storage_ref (x)
965*38fd1498Szrj {}
966*38fd1498Szrj 
967*38fd1498Szrj /* Create a reference to integer X in its natural precision.  Note
968*38fd1498Szrj    that the natural precision is host-dependent for primitive
969*38fd1498Szrj    types.  */
970*38fd1498Szrj template <bool SE, bool HDP>
971*38fd1498Szrj template <typename T>
972*38fd1498Szrj inline wide_int_ref_storage <SE, HDP>::wide_int_ref_storage (const T &x)
973*38fd1498Szrj   : storage_ref (wi::int_traits <T>::decompose (scratch,
974*38fd1498Szrj 						wi::get_precision (x), x))
975*38fd1498Szrj {
976*38fd1498Szrj }
977*38fd1498Szrj 
978*38fd1498Szrj /* Create a reference to integer X in precision PRECISION.  */
979*38fd1498Szrj template <bool SE, bool HDP>
980*38fd1498Szrj template <typename T>
981*38fd1498Szrj inline wide_int_ref_storage <SE, HDP>::
982*38fd1498Szrj wide_int_ref_storage (const T &x, unsigned int precision)
983*38fd1498Szrj   : storage_ref (wi::int_traits <T>::decompose (scratch, precision, x))
984*38fd1498Szrj {
985*38fd1498Szrj }
986*38fd1498Szrj 
987*38fd1498Szrj namespace wi
988*38fd1498Szrj {
989*38fd1498Szrj   template <bool SE, bool HDP>
990*38fd1498Szrj   struct int_traits <wide_int_ref_storage <SE, HDP> >
991*38fd1498Szrj   {
992*38fd1498Szrj     static const enum precision_type precision_type = VAR_PRECISION;
993*38fd1498Szrj     static const bool host_dependent_precision = HDP;
994*38fd1498Szrj     static const bool is_sign_extended = SE;
995*38fd1498Szrj   };
996*38fd1498Szrj }
997*38fd1498Szrj 
998*38fd1498Szrj namespace wi
999*38fd1498Szrj {
1000*38fd1498Szrj   unsigned int force_to_size (HOST_WIDE_INT *, const HOST_WIDE_INT *,
1001*38fd1498Szrj 			      unsigned int, unsigned int, unsigned int,
1002*38fd1498Szrj 			      signop sgn);
1003*38fd1498Szrj   unsigned int from_array (HOST_WIDE_INT *, const HOST_WIDE_INT *,
1004*38fd1498Szrj 			   unsigned int, unsigned int, bool = true);
1005*38fd1498Szrj }
1006*38fd1498Szrj 
1007*38fd1498Szrj /* The storage used by wide_int.  */
1008*38fd1498Szrj class GTY(()) wide_int_storage
1009*38fd1498Szrj {
1010*38fd1498Szrj private:
1011*38fd1498Szrj   HOST_WIDE_INT val[WIDE_INT_MAX_ELTS];
1012*38fd1498Szrj   unsigned int len;
1013*38fd1498Szrj   unsigned int precision;
1014*38fd1498Szrj 
1015*38fd1498Szrj public:
1016*38fd1498Szrj   wide_int_storage ();
1017*38fd1498Szrj   template <typename T>
1018*38fd1498Szrj   wide_int_storage (const T &);
1019*38fd1498Szrj 
1020*38fd1498Szrj   /* The standard generic_wide_int storage methods.  */
1021*38fd1498Szrj   unsigned int get_precision () const;
1022*38fd1498Szrj   const HOST_WIDE_INT *get_val () const;
1023*38fd1498Szrj   unsigned int get_len () const;
1024*38fd1498Szrj   HOST_WIDE_INT *write_val ();
1025*38fd1498Szrj   void set_len (unsigned int, bool = false);
1026*38fd1498Szrj 
1027*38fd1498Szrj   template <typename T>
1028*38fd1498Szrj   wide_int_storage &operator = (const T &);
1029*38fd1498Szrj 
1030*38fd1498Szrj   static wide_int from (const wide_int_ref &, unsigned int, signop);
1031*38fd1498Szrj   static wide_int from_array (const HOST_WIDE_INT *, unsigned int,
1032*38fd1498Szrj 			      unsigned int, bool = true);
1033*38fd1498Szrj   static wide_int create (unsigned int);
1034*38fd1498Szrj 
1035*38fd1498Szrj   /* FIXME: target-dependent, so should disappear.  */
1036*38fd1498Szrj   wide_int bswap () const;
1037*38fd1498Szrj };
1038*38fd1498Szrj 
1039*38fd1498Szrj namespace wi
1040*38fd1498Szrj {
1041*38fd1498Szrj   template <>
1042*38fd1498Szrj   struct int_traits <wide_int_storage>
1043*38fd1498Szrj   {
1044*38fd1498Szrj     static const enum precision_type precision_type = VAR_PRECISION;
1045*38fd1498Szrj     /* Guaranteed by a static assert in the wide_int_storage constructor.  */
1046*38fd1498Szrj     static const bool host_dependent_precision = false;
1047*38fd1498Szrj     static const bool is_sign_extended = true;
1048*38fd1498Szrj     template <typename T1, typename T2>
1049*38fd1498Szrj     static wide_int get_binary_result (const T1 &, const T2 &);
1050*38fd1498Szrj   };
1051*38fd1498Szrj }
1052*38fd1498Szrj 
1053*38fd1498Szrj inline wide_int_storage::wide_int_storage () {}
1054*38fd1498Szrj 
1055*38fd1498Szrj /* Initialize the storage from integer X, in its natural precision.
1056*38fd1498Szrj    Note that we do not allow integers with host-dependent precision
1057*38fd1498Szrj    to become wide_ints; wide_ints must always be logically independent
1058*38fd1498Szrj    of the host.  */
1059*38fd1498Szrj template <typename T>
1060*38fd1498Szrj inline wide_int_storage::wide_int_storage (const T &x)
1061*38fd1498Szrj {
1062*38fd1498Szrj   { STATIC_ASSERT (!wi::int_traits<T>::host_dependent_precision); }
1063*38fd1498Szrj   { STATIC_ASSERT (wi::int_traits<T>::precision_type != wi::CONST_PRECISION); }
1064*38fd1498Szrj   WIDE_INT_REF_FOR (T) xi (x);
1065*38fd1498Szrj   precision = xi.precision;
1066*38fd1498Szrj   wi::copy (*this, xi);
1067*38fd1498Szrj }
1068*38fd1498Szrj 
1069*38fd1498Szrj template <typename T>
1070*38fd1498Szrj inline wide_int_storage&
1071*38fd1498Szrj wide_int_storage::operator = (const T &x)
1072*38fd1498Szrj {
1073*38fd1498Szrj   { STATIC_ASSERT (!wi::int_traits<T>::host_dependent_precision); }
1074*38fd1498Szrj   { STATIC_ASSERT (wi::int_traits<T>::precision_type != wi::CONST_PRECISION); }
1075*38fd1498Szrj   WIDE_INT_REF_FOR (T) xi (x);
1076*38fd1498Szrj   precision = xi.precision;
1077*38fd1498Szrj   wi::copy (*this, xi);
1078*38fd1498Szrj   return *this;
1079*38fd1498Szrj }
1080*38fd1498Szrj 
1081*38fd1498Szrj inline unsigned int
1082*38fd1498Szrj wide_int_storage::get_precision () const
1083*38fd1498Szrj {
1084*38fd1498Szrj   return precision;
1085*38fd1498Szrj }
1086*38fd1498Szrj 
1087*38fd1498Szrj inline const HOST_WIDE_INT *
1088*38fd1498Szrj wide_int_storage::get_val () const
1089*38fd1498Szrj {
1090*38fd1498Szrj   return val;
1091*38fd1498Szrj }
1092*38fd1498Szrj 
1093*38fd1498Szrj inline unsigned int
1094*38fd1498Szrj wide_int_storage::get_len () const
1095*38fd1498Szrj {
1096*38fd1498Szrj   return len;
1097*38fd1498Szrj }
1098*38fd1498Szrj 
1099*38fd1498Szrj inline HOST_WIDE_INT *
1100*38fd1498Szrj wide_int_storage::write_val ()
1101*38fd1498Szrj {
1102*38fd1498Szrj   return val;
1103*38fd1498Szrj }
1104*38fd1498Szrj 
1105*38fd1498Szrj inline void
1106*38fd1498Szrj wide_int_storage::set_len (unsigned int l, bool is_sign_extended)
1107*38fd1498Szrj {
1108*38fd1498Szrj   len = l;
1109*38fd1498Szrj   if (!is_sign_extended && len * HOST_BITS_PER_WIDE_INT > precision)
1110*38fd1498Szrj     val[len - 1] = sext_hwi (val[len - 1],
1111*38fd1498Szrj 			     precision % HOST_BITS_PER_WIDE_INT);
1112*38fd1498Szrj }
1113*38fd1498Szrj 
1114*38fd1498Szrj /* Treat X as having signedness SGN and convert it to a PRECISION-bit
1115*38fd1498Szrj    number.  */
1116*38fd1498Szrj inline wide_int
1117*38fd1498Szrj wide_int_storage::from (const wide_int_ref &x, unsigned int precision,
1118*38fd1498Szrj 			signop sgn)
1119*38fd1498Szrj {
1120*38fd1498Szrj   wide_int result = wide_int::create (precision);
1121*38fd1498Szrj   result.set_len (wi::force_to_size (result.write_val (), x.val, x.len,
1122*38fd1498Szrj 				     x.precision, precision, sgn));
1123*38fd1498Szrj   return result;
1124*38fd1498Szrj }
1125*38fd1498Szrj 
1126*38fd1498Szrj /* Create a wide_int from the explicit block encoding given by VAL and
1127*38fd1498Szrj    LEN.  PRECISION is the precision of the integer.  NEED_CANON_P is
1128*38fd1498Szrj    true if the encoding may have redundant trailing blocks.  */
1129*38fd1498Szrj inline wide_int
1130*38fd1498Szrj wide_int_storage::from_array (const HOST_WIDE_INT *val, unsigned int len,
1131*38fd1498Szrj 			      unsigned int precision, bool need_canon_p)
1132*38fd1498Szrj {
1133*38fd1498Szrj   wide_int result = wide_int::create (precision);
1134*38fd1498Szrj   result.set_len (wi::from_array (result.write_val (), val, len, precision,
1135*38fd1498Szrj 				  need_canon_p));
1136*38fd1498Szrj   return result;
1137*38fd1498Szrj }
1138*38fd1498Szrj 
1139*38fd1498Szrj /* Return an uninitialized wide_int with precision PRECISION.  */
1140*38fd1498Szrj inline wide_int
1141*38fd1498Szrj wide_int_storage::create (unsigned int precision)
1142*38fd1498Szrj {
1143*38fd1498Szrj   wide_int x;
1144*38fd1498Szrj   x.precision = precision;
1145*38fd1498Szrj   return x;
1146*38fd1498Szrj }
1147*38fd1498Szrj 
1148*38fd1498Szrj template <typename T1, typename T2>
1149*38fd1498Szrj inline wide_int
1150*38fd1498Szrj wi::int_traits <wide_int_storage>::get_binary_result (const T1 &x, const T2 &y)
1151*38fd1498Szrj {
1152*38fd1498Szrj   /* This shouldn't be used for two flexible-precision inputs.  */
1153*38fd1498Szrj   STATIC_ASSERT (wi::int_traits <T1>::precision_type != FLEXIBLE_PRECISION
1154*38fd1498Szrj 		 || wi::int_traits <T2>::precision_type != FLEXIBLE_PRECISION);
1155*38fd1498Szrj   if (wi::int_traits <T1>::precision_type == FLEXIBLE_PRECISION)
1156*38fd1498Szrj     return wide_int::create (wi::get_precision (y));
1157*38fd1498Szrj   else
1158*38fd1498Szrj     return wide_int::create (wi::get_precision (x));
1159*38fd1498Szrj }
1160*38fd1498Szrj 
1161*38fd1498Szrj /* The storage used by FIXED_WIDE_INT (N).  */
1162*38fd1498Szrj template <int N>
1163*38fd1498Szrj class GTY(()) fixed_wide_int_storage
1164*38fd1498Szrj {
1165*38fd1498Szrj private:
1166*38fd1498Szrj   HOST_WIDE_INT val[(N + HOST_BITS_PER_WIDE_INT + 1) / HOST_BITS_PER_WIDE_INT];
1167*38fd1498Szrj   unsigned int len;
1168*38fd1498Szrj 
1169*38fd1498Szrj public:
1170*38fd1498Szrj   fixed_wide_int_storage ();
1171*38fd1498Szrj   template <typename T>
1172*38fd1498Szrj   fixed_wide_int_storage (const T &);
1173*38fd1498Szrj 
1174*38fd1498Szrj   /* The standard generic_wide_int storage methods.  */
1175*38fd1498Szrj   unsigned int get_precision () const;
1176*38fd1498Szrj   const HOST_WIDE_INT *get_val () const;
1177*38fd1498Szrj   unsigned int get_len () const;
1178*38fd1498Szrj   HOST_WIDE_INT *write_val ();
1179*38fd1498Szrj   void set_len (unsigned int, bool = false);
1180*38fd1498Szrj 
1181*38fd1498Szrj   static FIXED_WIDE_INT (N) from (const wide_int_ref &, signop);
1182*38fd1498Szrj   static FIXED_WIDE_INT (N) from_array (const HOST_WIDE_INT *, unsigned int,
1183*38fd1498Szrj 					bool = true);
1184*38fd1498Szrj };
1185*38fd1498Szrj 
1186*38fd1498Szrj namespace wi
1187*38fd1498Szrj {
1188*38fd1498Szrj   template <int N>
1189*38fd1498Szrj   struct int_traits < fixed_wide_int_storage <N> >
1190*38fd1498Szrj   {
1191*38fd1498Szrj     static const enum precision_type precision_type = CONST_PRECISION;
1192*38fd1498Szrj     static const bool host_dependent_precision = false;
1193*38fd1498Szrj     static const bool is_sign_extended = true;
1194*38fd1498Szrj     static const unsigned int precision = N;
1195*38fd1498Szrj     template <typename T1, typename T2>
1196*38fd1498Szrj     static FIXED_WIDE_INT (N) get_binary_result (const T1 &, const T2 &);
1197*38fd1498Szrj   };
1198*38fd1498Szrj }
1199*38fd1498Szrj 
1200*38fd1498Szrj template <int N>
1201*38fd1498Szrj inline fixed_wide_int_storage <N>::fixed_wide_int_storage () {}
1202*38fd1498Szrj 
1203*38fd1498Szrj /* Initialize the storage from integer X, in precision N.  */
1204*38fd1498Szrj template <int N>
1205*38fd1498Szrj template <typename T>
1206*38fd1498Szrj inline fixed_wide_int_storage <N>::fixed_wide_int_storage (const T &x)
1207*38fd1498Szrj {
1208*38fd1498Szrj   /* Check for type compatibility.  We don't want to initialize a
1209*38fd1498Szrj      fixed-width integer from something like a wide_int.  */
1210*38fd1498Szrj   WI_BINARY_RESULT (T, FIXED_WIDE_INT (N)) *assertion ATTRIBUTE_UNUSED;
1211*38fd1498Szrj   wi::copy (*this, WIDE_INT_REF_FOR (T) (x, N));
1212*38fd1498Szrj }
1213*38fd1498Szrj 
1214*38fd1498Szrj template <int N>
1215*38fd1498Szrj inline unsigned int
1216*38fd1498Szrj fixed_wide_int_storage <N>::get_precision () const
1217*38fd1498Szrj {
1218*38fd1498Szrj   return N;
1219*38fd1498Szrj }
1220*38fd1498Szrj 
1221*38fd1498Szrj template <int N>
1222*38fd1498Szrj inline const HOST_WIDE_INT *
1223*38fd1498Szrj fixed_wide_int_storage <N>::get_val () const
1224*38fd1498Szrj {
1225*38fd1498Szrj   return val;
1226*38fd1498Szrj }
1227*38fd1498Szrj 
1228*38fd1498Szrj template <int N>
1229*38fd1498Szrj inline unsigned int
1230*38fd1498Szrj fixed_wide_int_storage <N>::get_len () const
1231*38fd1498Szrj {
1232*38fd1498Szrj   return len;
1233*38fd1498Szrj }
1234*38fd1498Szrj 
1235*38fd1498Szrj template <int N>
1236*38fd1498Szrj inline HOST_WIDE_INT *
1237*38fd1498Szrj fixed_wide_int_storage <N>::write_val ()
1238*38fd1498Szrj {
1239*38fd1498Szrj   return val;
1240*38fd1498Szrj }
1241*38fd1498Szrj 
1242*38fd1498Szrj template <int N>
1243*38fd1498Szrj inline void
1244*38fd1498Szrj fixed_wide_int_storage <N>::set_len (unsigned int l, bool)
1245*38fd1498Szrj {
1246*38fd1498Szrj   len = l;
1247*38fd1498Szrj   /* There are no excess bits in val[len - 1].  */
1248*38fd1498Szrj   STATIC_ASSERT (N % HOST_BITS_PER_WIDE_INT == 0);
1249*38fd1498Szrj }
1250*38fd1498Szrj 
1251*38fd1498Szrj /* Treat X as having signedness SGN and convert it to an N-bit number.  */
1252*38fd1498Szrj template <int N>
1253*38fd1498Szrj inline FIXED_WIDE_INT (N)
1254*38fd1498Szrj fixed_wide_int_storage <N>::from (const wide_int_ref &x, signop sgn)
1255*38fd1498Szrj {
1256*38fd1498Szrj   FIXED_WIDE_INT (N) result;
1257*38fd1498Szrj   result.set_len (wi::force_to_size (result.write_val (), x.val, x.len,
1258*38fd1498Szrj 				     x.precision, N, sgn));
1259*38fd1498Szrj   return result;
1260*38fd1498Szrj }
1261*38fd1498Szrj 
1262*38fd1498Szrj /* Create a FIXED_WIDE_INT (N) from the explicit block encoding given by
1263*38fd1498Szrj    VAL and LEN.  NEED_CANON_P is true if the encoding may have redundant
1264*38fd1498Szrj    trailing blocks.  */
1265*38fd1498Szrj template <int N>
1266*38fd1498Szrj inline FIXED_WIDE_INT (N)
1267*38fd1498Szrj fixed_wide_int_storage <N>::from_array (const HOST_WIDE_INT *val,
1268*38fd1498Szrj 					unsigned int len,
1269*38fd1498Szrj 					bool need_canon_p)
1270*38fd1498Szrj {
1271*38fd1498Szrj   FIXED_WIDE_INT (N) result;
1272*38fd1498Szrj   result.set_len (wi::from_array (result.write_val (), val, len,
1273*38fd1498Szrj 				  N, need_canon_p));
1274*38fd1498Szrj   return result;
1275*38fd1498Szrj }
1276*38fd1498Szrj 
1277*38fd1498Szrj template <int N>
1278*38fd1498Szrj template <typename T1, typename T2>
1279*38fd1498Szrj inline FIXED_WIDE_INT (N)
1280*38fd1498Szrj wi::int_traits < fixed_wide_int_storage <N> >::
1281*38fd1498Szrj get_binary_result (const T1 &, const T2 &)
1282*38fd1498Szrj {
1283*38fd1498Szrj   return FIXED_WIDE_INT (N) ();
1284*38fd1498Szrj }
1285*38fd1498Szrj 
1286*38fd1498Szrj /* A reference to one element of a trailing_wide_ints structure.  */
1287*38fd1498Szrj class trailing_wide_int_storage
1288*38fd1498Szrj {
1289*38fd1498Szrj private:
1290*38fd1498Szrj   /* The precision of the integer, which is a fixed property of the
1291*38fd1498Szrj      parent trailing_wide_ints.  */
1292*38fd1498Szrj   unsigned int m_precision;
1293*38fd1498Szrj 
1294*38fd1498Szrj   /* A pointer to the length field.  */
1295*38fd1498Szrj   unsigned char *m_len;
1296*38fd1498Szrj 
1297*38fd1498Szrj   /* A pointer to the HWI array.  There are enough elements to hold all
1298*38fd1498Szrj      values of precision M_PRECISION.  */
1299*38fd1498Szrj   HOST_WIDE_INT *m_val;
1300*38fd1498Szrj 
1301*38fd1498Szrj public:
1302*38fd1498Szrj   trailing_wide_int_storage (unsigned int, unsigned char *, HOST_WIDE_INT *);
1303*38fd1498Szrj 
1304*38fd1498Szrj   /* The standard generic_wide_int storage methods.  */
1305*38fd1498Szrj   unsigned int get_len () const;
1306*38fd1498Szrj   unsigned int get_precision () const;
1307*38fd1498Szrj   const HOST_WIDE_INT *get_val () const;
1308*38fd1498Szrj   HOST_WIDE_INT *write_val ();
1309*38fd1498Szrj   void set_len (unsigned int, bool = false);
1310*38fd1498Szrj 
1311*38fd1498Szrj   template <typename T>
1312*38fd1498Szrj   trailing_wide_int_storage &operator = (const T &);
1313*38fd1498Szrj };
1314*38fd1498Szrj 
1315*38fd1498Szrj typedef generic_wide_int <trailing_wide_int_storage> trailing_wide_int;
1316*38fd1498Szrj 
1317*38fd1498Szrj /* trailing_wide_int behaves like a wide_int.  */
1318*38fd1498Szrj namespace wi
1319*38fd1498Szrj {
1320*38fd1498Szrj   template <>
1321*38fd1498Szrj   struct int_traits <trailing_wide_int_storage>
1322*38fd1498Szrj     : public int_traits <wide_int_storage> {};
1323*38fd1498Szrj }
1324*38fd1498Szrj 
1325*38fd1498Szrj /* An array of N wide_int-like objects that can be put at the end of
1326*38fd1498Szrj    a variable-sized structure.  Use extra_size to calculate how many
1327*38fd1498Szrj    bytes beyond the sizeof need to be allocated.  Use set_precision
1328*38fd1498Szrj    to initialize the structure.  */
1329*38fd1498Szrj template <int N>
1330*38fd1498Szrj class GTY((user)) trailing_wide_ints
1331*38fd1498Szrj {
1332*38fd1498Szrj private:
1333*38fd1498Szrj   /* The shared precision of each number.  */
1334*38fd1498Szrj   unsigned short m_precision;
1335*38fd1498Szrj 
1336*38fd1498Szrj   /* The shared maximum length of each number.  */
1337*38fd1498Szrj   unsigned char m_max_len;
1338*38fd1498Szrj 
1339*38fd1498Szrj   /* The current length of each number.  */
1340*38fd1498Szrj   unsigned char m_len[N];
1341*38fd1498Szrj 
1342*38fd1498Szrj   /* The variable-length part of the structure, which always contains
1343*38fd1498Szrj      at least one HWI.  Element I starts at index I * M_MAX_LEN.  */
1344*38fd1498Szrj   HOST_WIDE_INT m_val[1];
1345*38fd1498Szrj 
1346*38fd1498Szrj public:
1347*38fd1498Szrj   typedef WIDE_INT_REF_FOR (trailing_wide_int_storage) const_reference;
1348*38fd1498Szrj 
1349*38fd1498Szrj   void set_precision (unsigned int);
1350*38fd1498Szrj   unsigned int get_precision () const { return m_precision; }
1351*38fd1498Szrj   trailing_wide_int operator [] (unsigned int);
1352*38fd1498Szrj   const_reference operator [] (unsigned int) const;
1353*38fd1498Szrj   static size_t extra_size (unsigned int);
1354*38fd1498Szrj   size_t extra_size () const { return extra_size (m_precision); }
1355*38fd1498Szrj };
1356*38fd1498Szrj 
1357*38fd1498Szrj inline trailing_wide_int_storage::
1358*38fd1498Szrj trailing_wide_int_storage (unsigned int precision, unsigned char *len,
1359*38fd1498Szrj 			   HOST_WIDE_INT *val)
1360*38fd1498Szrj   : m_precision (precision), m_len (len), m_val (val)
1361*38fd1498Szrj {
1362*38fd1498Szrj }
1363*38fd1498Szrj 
1364*38fd1498Szrj inline unsigned int
1365*38fd1498Szrj trailing_wide_int_storage::get_len () const
1366*38fd1498Szrj {
1367*38fd1498Szrj   return *m_len;
1368*38fd1498Szrj }
1369*38fd1498Szrj 
1370*38fd1498Szrj inline unsigned int
1371*38fd1498Szrj trailing_wide_int_storage::get_precision () const
1372*38fd1498Szrj {
1373*38fd1498Szrj   return m_precision;
1374*38fd1498Szrj }
1375*38fd1498Szrj 
1376*38fd1498Szrj inline const HOST_WIDE_INT *
1377*38fd1498Szrj trailing_wide_int_storage::get_val () const
1378*38fd1498Szrj {
1379*38fd1498Szrj   return m_val;
1380*38fd1498Szrj }
1381*38fd1498Szrj 
1382*38fd1498Szrj inline HOST_WIDE_INT *
1383*38fd1498Szrj trailing_wide_int_storage::write_val ()
1384*38fd1498Szrj {
1385*38fd1498Szrj   return m_val;
1386*38fd1498Szrj }
1387*38fd1498Szrj 
1388*38fd1498Szrj inline void
1389*38fd1498Szrj trailing_wide_int_storage::set_len (unsigned int len, bool is_sign_extended)
1390*38fd1498Szrj {
1391*38fd1498Szrj   *m_len = len;
1392*38fd1498Szrj   if (!is_sign_extended && len * HOST_BITS_PER_WIDE_INT > m_precision)
1393*38fd1498Szrj     m_val[len - 1] = sext_hwi (m_val[len - 1],
1394*38fd1498Szrj 			       m_precision % HOST_BITS_PER_WIDE_INT);
1395*38fd1498Szrj }
1396*38fd1498Szrj 
1397*38fd1498Szrj template <typename T>
1398*38fd1498Szrj inline trailing_wide_int_storage &
1399*38fd1498Szrj trailing_wide_int_storage::operator = (const T &x)
1400*38fd1498Szrj {
1401*38fd1498Szrj   WIDE_INT_REF_FOR (T) xi (x, m_precision);
1402*38fd1498Szrj   wi::copy (*this, xi);
1403*38fd1498Szrj   return *this;
1404*38fd1498Szrj }
1405*38fd1498Szrj 
1406*38fd1498Szrj /* Initialize the structure and record that all elements have precision
1407*38fd1498Szrj    PRECISION.  */
1408*38fd1498Szrj template <int N>
1409*38fd1498Szrj inline void
1410*38fd1498Szrj trailing_wide_ints <N>::set_precision (unsigned int precision)
1411*38fd1498Szrj {
1412*38fd1498Szrj   m_precision = precision;
1413*38fd1498Szrj   m_max_len = ((precision + HOST_BITS_PER_WIDE_INT - 1)
1414*38fd1498Szrj 	       / HOST_BITS_PER_WIDE_INT);
1415*38fd1498Szrj }
1416*38fd1498Szrj 
1417*38fd1498Szrj /* Return a reference to element INDEX.  */
1418*38fd1498Szrj template <int N>
1419*38fd1498Szrj inline trailing_wide_int
1420*38fd1498Szrj trailing_wide_ints <N>::operator [] (unsigned int index)
1421*38fd1498Szrj {
1422*38fd1498Szrj   return trailing_wide_int_storage (m_precision, &m_len[index],
1423*38fd1498Szrj 				    &m_val[index * m_max_len]);
1424*38fd1498Szrj }
1425*38fd1498Szrj 
1426*38fd1498Szrj template <int N>
1427*38fd1498Szrj inline typename trailing_wide_ints <N>::const_reference
1428*38fd1498Szrj trailing_wide_ints <N>::operator [] (unsigned int index) const
1429*38fd1498Szrj {
1430*38fd1498Szrj   return wi::storage_ref (&m_val[index * m_max_len],
1431*38fd1498Szrj 			  m_len[index], m_precision);
1432*38fd1498Szrj }
1433*38fd1498Szrj 
1434*38fd1498Szrj /* Return how many extra bytes need to be added to the end of the structure
1435*38fd1498Szrj    in order to handle N wide_ints of precision PRECISION.  */
1436*38fd1498Szrj template <int N>
1437*38fd1498Szrj inline size_t
1438*38fd1498Szrj trailing_wide_ints <N>::extra_size (unsigned int precision)
1439*38fd1498Szrj {
1440*38fd1498Szrj   unsigned int max_len = ((precision + HOST_BITS_PER_WIDE_INT - 1)
1441*38fd1498Szrj 			  / HOST_BITS_PER_WIDE_INT);
1442*38fd1498Szrj   return (N * max_len - 1) * sizeof (HOST_WIDE_INT);
1443*38fd1498Szrj }
1444*38fd1498Szrj 
1445*38fd1498Szrj /* This macro is used in structures that end with a trailing_wide_ints field
1446*38fd1498Szrj    called FIELD.  It declares get_NAME() and set_NAME() methods to access
1447*38fd1498Szrj    element I of FIELD.  */
1448*38fd1498Szrj #define TRAILING_WIDE_INT_ACCESSOR(NAME, FIELD, I) \
1449*38fd1498Szrj   trailing_wide_int get_##NAME () { return FIELD[I]; } \
1450*38fd1498Szrj   template <typename T> void set_##NAME (const T &x) { FIELD[I] = x; }
1451*38fd1498Szrj 
1452*38fd1498Szrj namespace wi
1453*38fd1498Szrj {
1454*38fd1498Szrj   /* Implementation of int_traits for primitive integer types like "int".  */
1455*38fd1498Szrj   template <typename T, bool signed_p>
1456*38fd1498Szrj   struct primitive_int_traits
1457*38fd1498Szrj   {
1458*38fd1498Szrj     static const enum precision_type precision_type = FLEXIBLE_PRECISION;
1459*38fd1498Szrj     static const bool host_dependent_precision = true;
1460*38fd1498Szrj     static const bool is_sign_extended = true;
1461*38fd1498Szrj     static unsigned int get_precision (T);
1462*38fd1498Szrj     static wi::storage_ref decompose (HOST_WIDE_INT *, unsigned int, T);
1463*38fd1498Szrj   };
1464*38fd1498Szrj }
1465*38fd1498Szrj 
1466*38fd1498Szrj template <typename T, bool signed_p>
1467*38fd1498Szrj inline unsigned int
1468*38fd1498Szrj wi::primitive_int_traits <T, signed_p>::get_precision (T)
1469*38fd1498Szrj {
1470*38fd1498Szrj   return sizeof (T) * CHAR_BIT;
1471*38fd1498Szrj }
1472*38fd1498Szrj 
1473*38fd1498Szrj template <typename T, bool signed_p>
1474*38fd1498Szrj inline wi::storage_ref
1475*38fd1498Szrj wi::primitive_int_traits <T, signed_p>::decompose (HOST_WIDE_INT *scratch,
1476*38fd1498Szrj 						   unsigned int precision, T x)
1477*38fd1498Szrj {
1478*38fd1498Szrj   scratch[0] = x;
1479*38fd1498Szrj   if (signed_p || scratch[0] >= 0 || precision <= HOST_BITS_PER_WIDE_INT)
1480*38fd1498Szrj     return wi::storage_ref (scratch, 1, precision);
1481*38fd1498Szrj   scratch[1] = 0;
1482*38fd1498Szrj   return wi::storage_ref (scratch, 2, precision);
1483*38fd1498Szrj }
1484*38fd1498Szrj 
1485*38fd1498Szrj /* Allow primitive C types to be used in wi:: routines.  */
1486*38fd1498Szrj namespace wi
1487*38fd1498Szrj {
1488*38fd1498Szrj   template <>
1489*38fd1498Szrj   struct int_traits <unsigned char>
1490*38fd1498Szrj     : public primitive_int_traits <unsigned char, false> {};
1491*38fd1498Szrj 
1492*38fd1498Szrj   template <>
1493*38fd1498Szrj   struct int_traits <unsigned short>
1494*38fd1498Szrj     : public primitive_int_traits <unsigned short, false> {};
1495*38fd1498Szrj 
1496*38fd1498Szrj   template <>
1497*38fd1498Szrj   struct int_traits <int>
1498*38fd1498Szrj     : public primitive_int_traits <int, true> {};
1499*38fd1498Szrj 
1500*38fd1498Szrj   template <>
1501*38fd1498Szrj   struct int_traits <unsigned int>
1502*38fd1498Szrj     : public primitive_int_traits <unsigned int, false> {};
1503*38fd1498Szrj 
1504*38fd1498Szrj   template <>
1505*38fd1498Szrj   struct int_traits <long>
1506*38fd1498Szrj     : public primitive_int_traits <long, true> {};
1507*38fd1498Szrj 
1508*38fd1498Szrj   template <>
1509*38fd1498Szrj   struct int_traits <unsigned long>
1510*38fd1498Szrj     : public primitive_int_traits <unsigned long, false> {};
1511*38fd1498Szrj 
1512*38fd1498Szrj #if defined HAVE_LONG_LONG
1513*38fd1498Szrj   template <>
1514*38fd1498Szrj   struct int_traits <long long>
1515*38fd1498Szrj     : public primitive_int_traits <long long, true> {};
1516*38fd1498Szrj 
1517*38fd1498Szrj   template <>
1518*38fd1498Szrj   struct int_traits <unsigned long long>
1519*38fd1498Szrj     : public primitive_int_traits <unsigned long long, false> {};
1520*38fd1498Szrj #endif
1521*38fd1498Szrj }
1522*38fd1498Szrj 
1523*38fd1498Szrj namespace wi
1524*38fd1498Szrj {
1525*38fd1498Szrj   /* Stores HWI-sized integer VAL, treating it as having signedness SGN
1526*38fd1498Szrj      and precision PRECISION.  */
1527*38fd1498Szrj   struct hwi_with_prec
1528*38fd1498Szrj   {
1529*38fd1498Szrj     hwi_with_prec () {}
1530*38fd1498Szrj     hwi_with_prec (HOST_WIDE_INT, unsigned int, signop);
1531*38fd1498Szrj     HOST_WIDE_INT val;
1532*38fd1498Szrj     unsigned int precision;
1533*38fd1498Szrj     signop sgn;
1534*38fd1498Szrj   };
1535*38fd1498Szrj 
1536*38fd1498Szrj   hwi_with_prec shwi (HOST_WIDE_INT, unsigned int);
1537*38fd1498Szrj   hwi_with_prec uhwi (unsigned HOST_WIDE_INT, unsigned int);
1538*38fd1498Szrj 
1539*38fd1498Szrj   hwi_with_prec minus_one (unsigned int);
1540*38fd1498Szrj   hwi_with_prec zero (unsigned int);
1541*38fd1498Szrj   hwi_with_prec one (unsigned int);
1542*38fd1498Szrj   hwi_with_prec two (unsigned int);
1543*38fd1498Szrj }
1544*38fd1498Szrj 
1545*38fd1498Szrj inline wi::hwi_with_prec::hwi_with_prec (HOST_WIDE_INT v, unsigned int p,
1546*38fd1498Szrj 					 signop s)
1547*38fd1498Szrj   : precision (p), sgn (s)
1548*38fd1498Szrj {
1549*38fd1498Szrj   if (precision < HOST_BITS_PER_WIDE_INT)
1550*38fd1498Szrj     val = sext_hwi (v, precision);
1551*38fd1498Szrj   else
1552*38fd1498Szrj     val = v;
1553*38fd1498Szrj }
1554*38fd1498Szrj 
1555*38fd1498Szrj /* Return a signed integer that has value VAL and precision PRECISION.  */
1556*38fd1498Szrj inline wi::hwi_with_prec
1557*38fd1498Szrj wi::shwi (HOST_WIDE_INT val, unsigned int precision)
1558*38fd1498Szrj {
1559*38fd1498Szrj   return hwi_with_prec (val, precision, SIGNED);
1560*38fd1498Szrj }
1561*38fd1498Szrj 
1562*38fd1498Szrj /* Return an unsigned integer that has value VAL and precision PRECISION.  */
1563*38fd1498Szrj inline wi::hwi_with_prec
1564*38fd1498Szrj wi::uhwi (unsigned HOST_WIDE_INT val, unsigned int precision)
1565*38fd1498Szrj {
1566*38fd1498Szrj   return hwi_with_prec (val, precision, UNSIGNED);
1567*38fd1498Szrj }
1568*38fd1498Szrj 
1569*38fd1498Szrj /* Return a wide int of -1 with precision PRECISION.  */
1570*38fd1498Szrj inline wi::hwi_with_prec
1571*38fd1498Szrj wi::minus_one (unsigned int precision)
1572*38fd1498Szrj {
1573*38fd1498Szrj   return wi::shwi (-1, precision);
1574*38fd1498Szrj }
1575*38fd1498Szrj 
1576*38fd1498Szrj /* Return a wide int of 0 with precision PRECISION.  */
1577*38fd1498Szrj inline wi::hwi_with_prec
1578*38fd1498Szrj wi::zero (unsigned int precision)
1579*38fd1498Szrj {
1580*38fd1498Szrj   return wi::shwi (0, precision);
1581*38fd1498Szrj }
1582*38fd1498Szrj 
1583*38fd1498Szrj /* Return a wide int of 1 with precision PRECISION.  */
1584*38fd1498Szrj inline wi::hwi_with_prec
1585*38fd1498Szrj wi::one (unsigned int precision)
1586*38fd1498Szrj {
1587*38fd1498Szrj   return wi::shwi (1, precision);
1588*38fd1498Szrj }
1589*38fd1498Szrj 
1590*38fd1498Szrj /* Return a wide int of 2 with precision PRECISION.  */
1591*38fd1498Szrj inline wi::hwi_with_prec
1592*38fd1498Szrj wi::two (unsigned int precision)
1593*38fd1498Szrj {
1594*38fd1498Szrj   return wi::shwi (2, precision);
1595*38fd1498Szrj }
1596*38fd1498Szrj 
1597*38fd1498Szrj namespace wi
1598*38fd1498Szrj {
1599*38fd1498Szrj   /* ints_for<T>::zero (X) returns a zero that, when asssigned to a T,
1600*38fd1498Szrj      gives that T the same precision as X.  */
1601*38fd1498Szrj   template<typename T, precision_type = int_traits<T>::precision_type>
1602*38fd1498Szrj   struct ints_for
1603*38fd1498Szrj   {
1604*38fd1498Szrj     static int zero (const T &) { return 0; }
1605*38fd1498Szrj   };
1606*38fd1498Szrj 
1607*38fd1498Szrj   template<typename T>
1608*38fd1498Szrj   struct ints_for<T, VAR_PRECISION>
1609*38fd1498Szrj   {
1610*38fd1498Szrj     static hwi_with_prec zero (const T &);
1611*38fd1498Szrj   };
1612*38fd1498Szrj }
1613*38fd1498Szrj 
1614*38fd1498Szrj template<typename T>
1615*38fd1498Szrj inline wi::hwi_with_prec
1616*38fd1498Szrj wi::ints_for<T, wi::VAR_PRECISION>::zero (const T &x)
1617*38fd1498Szrj {
1618*38fd1498Szrj   return wi::zero (wi::get_precision (x));
1619*38fd1498Szrj }
1620*38fd1498Szrj 
1621*38fd1498Szrj namespace wi
1622*38fd1498Szrj {
1623*38fd1498Szrj   template <>
1624*38fd1498Szrj   struct int_traits <wi::hwi_with_prec>
1625*38fd1498Szrj   {
1626*38fd1498Szrj     static const enum precision_type precision_type = VAR_PRECISION;
1627*38fd1498Szrj     /* hwi_with_prec has an explicitly-given precision, rather than the
1628*38fd1498Szrj        precision of HOST_WIDE_INT.  */
1629*38fd1498Szrj     static const bool host_dependent_precision = false;
1630*38fd1498Szrj     static const bool is_sign_extended = true;
1631*38fd1498Szrj     static unsigned int get_precision (const wi::hwi_with_prec &);
1632*38fd1498Szrj     static wi::storage_ref decompose (HOST_WIDE_INT *, unsigned int,
1633*38fd1498Szrj 				      const wi::hwi_with_prec &);
1634*38fd1498Szrj   };
1635*38fd1498Szrj }
1636*38fd1498Szrj 
1637*38fd1498Szrj inline unsigned int
1638*38fd1498Szrj wi::int_traits <wi::hwi_with_prec>::get_precision (const wi::hwi_with_prec &x)
1639*38fd1498Szrj {
1640*38fd1498Szrj   return x.precision;
1641*38fd1498Szrj }
1642*38fd1498Szrj 
1643*38fd1498Szrj inline wi::storage_ref
1644*38fd1498Szrj wi::int_traits <wi::hwi_with_prec>::
1645*38fd1498Szrj decompose (HOST_WIDE_INT *scratch, unsigned int precision,
1646*38fd1498Szrj 	   const wi::hwi_with_prec &x)
1647*38fd1498Szrj {
1648*38fd1498Szrj   gcc_checking_assert (precision == x.precision);
1649*38fd1498Szrj   scratch[0] = x.val;
1650*38fd1498Szrj   if (x.sgn == SIGNED || x.val >= 0 || precision <= HOST_BITS_PER_WIDE_INT)
1651*38fd1498Szrj     return wi::storage_ref (scratch, 1, precision);
1652*38fd1498Szrj   scratch[1] = 0;
1653*38fd1498Szrj   return wi::storage_ref (scratch, 2, precision);
1654*38fd1498Szrj }
1655*38fd1498Szrj 
1656*38fd1498Szrj /* Private functions for handling large cases out of line.  They take
1657*38fd1498Szrj    individual length and array parameters because that is cheaper for
1658*38fd1498Szrj    the inline caller than constructing an object on the stack and
1659*38fd1498Szrj    passing a reference to it.  (Although many callers use wide_int_refs,
1660*38fd1498Szrj    we generally want those to be removed by SRA.)  */
1661*38fd1498Szrj namespace wi
1662*38fd1498Szrj {
1663*38fd1498Szrj   bool eq_p_large (const HOST_WIDE_INT *, unsigned int,
1664*38fd1498Szrj 		   const HOST_WIDE_INT *, unsigned int, unsigned int);
1665*38fd1498Szrj   bool lts_p_large (const HOST_WIDE_INT *, unsigned int, unsigned int,
1666*38fd1498Szrj 		    const HOST_WIDE_INT *, unsigned int);
1667*38fd1498Szrj   bool ltu_p_large (const HOST_WIDE_INT *, unsigned int, unsigned int,
1668*38fd1498Szrj 		    const HOST_WIDE_INT *, unsigned int);
1669*38fd1498Szrj   int cmps_large (const HOST_WIDE_INT *, unsigned int, unsigned int,
1670*38fd1498Szrj 		  const HOST_WIDE_INT *, unsigned int);
1671*38fd1498Szrj   int cmpu_large (const HOST_WIDE_INT *, unsigned int, unsigned int,
1672*38fd1498Szrj 		  const HOST_WIDE_INT *, unsigned int);
1673*38fd1498Szrj   unsigned int sext_large (HOST_WIDE_INT *, const HOST_WIDE_INT *,
1674*38fd1498Szrj 			   unsigned int,
1675*38fd1498Szrj 			   unsigned int, unsigned int);
1676*38fd1498Szrj   unsigned int zext_large (HOST_WIDE_INT *, const HOST_WIDE_INT *,
1677*38fd1498Szrj 			   unsigned int,
1678*38fd1498Szrj 			   unsigned int, unsigned int);
1679*38fd1498Szrj   unsigned int set_bit_large (HOST_WIDE_INT *, const HOST_WIDE_INT *,
1680*38fd1498Szrj 			      unsigned int, unsigned int, unsigned int);
1681*38fd1498Szrj   unsigned int lshift_large (HOST_WIDE_INT *, const HOST_WIDE_INT *,
1682*38fd1498Szrj 			     unsigned int, unsigned int, unsigned int);
1683*38fd1498Szrj   unsigned int lrshift_large (HOST_WIDE_INT *, const HOST_WIDE_INT *,
1684*38fd1498Szrj 			      unsigned int, unsigned int, unsigned int,
1685*38fd1498Szrj 			      unsigned int);
1686*38fd1498Szrj   unsigned int arshift_large (HOST_WIDE_INT *, const HOST_WIDE_INT *,
1687*38fd1498Szrj 			      unsigned int, unsigned int, unsigned int,
1688*38fd1498Szrj 			      unsigned int);
1689*38fd1498Szrj   unsigned int and_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int,
1690*38fd1498Szrj 			  const HOST_WIDE_INT *, unsigned int, unsigned int);
1691*38fd1498Szrj   unsigned int and_not_large (HOST_WIDE_INT *, const HOST_WIDE_INT *,
1692*38fd1498Szrj 			      unsigned int, const HOST_WIDE_INT *,
1693*38fd1498Szrj 			      unsigned int, unsigned int);
1694*38fd1498Szrj   unsigned int or_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int,
1695*38fd1498Szrj 			 const HOST_WIDE_INT *, unsigned int, unsigned int);
1696*38fd1498Szrj   unsigned int or_not_large (HOST_WIDE_INT *, const HOST_WIDE_INT *,
1697*38fd1498Szrj 			     unsigned int, const HOST_WIDE_INT *,
1698*38fd1498Szrj 			     unsigned int, unsigned int);
1699*38fd1498Szrj   unsigned int xor_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int,
1700*38fd1498Szrj 			  const HOST_WIDE_INT *, unsigned int, unsigned int);
1701*38fd1498Szrj   unsigned int add_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int,
1702*38fd1498Szrj 			  const HOST_WIDE_INT *, unsigned int, unsigned int,
1703*38fd1498Szrj 			  signop, bool *);
1704*38fd1498Szrj   unsigned int sub_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int,
1705*38fd1498Szrj 			  const HOST_WIDE_INT *, unsigned int, unsigned int,
1706*38fd1498Szrj 			  signop, bool *);
1707*38fd1498Szrj   unsigned int mul_internal (HOST_WIDE_INT *, const HOST_WIDE_INT *,
1708*38fd1498Szrj 			     unsigned int, const HOST_WIDE_INT *,
1709*38fd1498Szrj 			     unsigned int, unsigned int, signop, bool *,
1710*38fd1498Szrj 			     bool);
1711*38fd1498Szrj   unsigned int divmod_internal (HOST_WIDE_INT *, unsigned int *,
1712*38fd1498Szrj 				HOST_WIDE_INT *, const HOST_WIDE_INT *,
1713*38fd1498Szrj 				unsigned int, unsigned int,
1714*38fd1498Szrj 				const HOST_WIDE_INT *,
1715*38fd1498Szrj 				unsigned int, unsigned int,
1716*38fd1498Szrj 				signop, bool *);
1717*38fd1498Szrj }
1718*38fd1498Szrj 
1719*38fd1498Szrj /* Return the number of bits that integer X can hold.  */
1720*38fd1498Szrj template <typename T>
1721*38fd1498Szrj inline unsigned int
1722*38fd1498Szrj wi::get_precision (const T &x)
1723*38fd1498Szrj {
1724*38fd1498Szrj   return wi::int_traits <T>::get_precision (x);
1725*38fd1498Szrj }
1726*38fd1498Szrj 
1727*38fd1498Szrj /* Return the number of bits that the result of a binary operation can
1728*38fd1498Szrj    hold when the input operands are X and Y.  */
1729*38fd1498Szrj template <typename T1, typename T2>
1730*38fd1498Szrj inline unsigned int
1731*38fd1498Szrj wi::get_binary_precision (const T1 &x, const T2 &y)
1732*38fd1498Szrj {
1733*38fd1498Szrj   return get_precision (wi::int_traits <WI_BINARY_RESULT (T1, T2)>::
1734*38fd1498Szrj 			get_binary_result (x, y));
1735*38fd1498Szrj }
1736*38fd1498Szrj 
1737*38fd1498Szrj /* Copy the contents of Y to X, but keeping X's current precision.  */
1738*38fd1498Szrj template <typename T1, typename T2>
1739*38fd1498Szrj inline void
1740*38fd1498Szrj wi::copy (T1 &x, const T2 &y)
1741*38fd1498Szrj {
1742*38fd1498Szrj   HOST_WIDE_INT *xval = x.write_val ();
1743*38fd1498Szrj   const HOST_WIDE_INT *yval = y.get_val ();
1744*38fd1498Szrj   unsigned int len = y.get_len ();
1745*38fd1498Szrj   unsigned int i = 0;
1746*38fd1498Szrj   do
1747*38fd1498Szrj     xval[i] = yval[i];
1748*38fd1498Szrj   while (++i < len);
1749*38fd1498Szrj   x.set_len (len, y.is_sign_extended);
1750*38fd1498Szrj }
1751*38fd1498Szrj 
1752*38fd1498Szrj /* Return true if X fits in a HOST_WIDE_INT with no loss of precision.  */
1753*38fd1498Szrj template <typename T>
1754*38fd1498Szrj inline bool
1755*38fd1498Szrj wi::fits_shwi_p (const T &x)
1756*38fd1498Szrj {
1757*38fd1498Szrj   WIDE_INT_REF_FOR (T) xi (x);
1758*38fd1498Szrj   return xi.len == 1;
1759*38fd1498Szrj }
1760*38fd1498Szrj 
1761*38fd1498Szrj /* Return true if X fits in an unsigned HOST_WIDE_INT with no loss of
1762*38fd1498Szrj    precision.  */
1763*38fd1498Szrj template <typename T>
1764*38fd1498Szrj inline bool
1765*38fd1498Szrj wi::fits_uhwi_p (const T &x)
1766*38fd1498Szrj {
1767*38fd1498Szrj   WIDE_INT_REF_FOR (T) xi (x);
1768*38fd1498Szrj   if (xi.precision <= HOST_BITS_PER_WIDE_INT)
1769*38fd1498Szrj     return true;
1770*38fd1498Szrj   if (xi.len == 1)
1771*38fd1498Szrj     return xi.slow () >= 0;
1772*38fd1498Szrj   return xi.len == 2 && xi.uhigh () == 0;
1773*38fd1498Szrj }
1774*38fd1498Szrj 
1775*38fd1498Szrj /* Return true if X is negative based on the interpretation of SGN.
1776*38fd1498Szrj    For UNSIGNED, this is always false.  */
1777*38fd1498Szrj template <typename T>
1778*38fd1498Szrj inline bool
1779*38fd1498Szrj wi::neg_p (const T &x, signop sgn)
1780*38fd1498Szrj {
1781*38fd1498Szrj   WIDE_INT_REF_FOR (T) xi (x);
1782*38fd1498Szrj   if (sgn == UNSIGNED)
1783*38fd1498Szrj     return false;
1784*38fd1498Szrj   return xi.sign_mask () < 0;
1785*38fd1498Szrj }
1786*38fd1498Szrj 
1787*38fd1498Szrj /* Return -1 if the top bit of X is set and 0 if the top bit is clear.  */
1788*38fd1498Szrj template <typename T>
1789*38fd1498Szrj inline HOST_WIDE_INT
1790*38fd1498Szrj wi::sign_mask (const T &x)
1791*38fd1498Szrj {
1792*38fd1498Szrj   WIDE_INT_REF_FOR (T) xi (x);
1793*38fd1498Szrj   return xi.sign_mask ();
1794*38fd1498Szrj }
1795*38fd1498Szrj 
1796*38fd1498Szrj /* Return true if X == Y.  X and Y must be binary-compatible.  */
1797*38fd1498Szrj template <typename T1, typename T2>
1798*38fd1498Szrj inline bool
1799*38fd1498Szrj wi::eq_p (const T1 &x, const T2 &y)
1800*38fd1498Szrj {
1801*38fd1498Szrj   unsigned int precision = get_binary_precision (x, y);
1802*38fd1498Szrj   WIDE_INT_REF_FOR (T1) xi (x, precision);
1803*38fd1498Szrj   WIDE_INT_REF_FOR (T2) yi (y, precision);
1804*38fd1498Szrj   if (xi.is_sign_extended && yi.is_sign_extended)
1805*38fd1498Szrj     {
1806*38fd1498Szrj       /* This case reduces to array equality.  */
1807*38fd1498Szrj       if (xi.len != yi.len)
1808*38fd1498Szrj 	return false;
1809*38fd1498Szrj       unsigned int i = 0;
1810*38fd1498Szrj       do
1811*38fd1498Szrj 	if (xi.val[i] != yi.val[i])
1812*38fd1498Szrj 	  return false;
1813*38fd1498Szrj       while (++i != xi.len);
1814*38fd1498Szrj       return true;
1815*38fd1498Szrj     }
1816*38fd1498Szrj   if (__builtin_expect (yi.len == 1, true))
1817*38fd1498Szrj     {
1818*38fd1498Szrj       /* XI is only equal to YI if it too has a single HWI.  */
1819*38fd1498Szrj       if (xi.len != 1)
1820*38fd1498Szrj 	return false;
1821*38fd1498Szrj       /* Excess bits in xi.val[0] will be signs or zeros, so comparisons
1822*38fd1498Szrj 	 with 0 are simple.  */
1823*38fd1498Szrj       if (STATIC_CONSTANT_P (yi.val[0] == 0))
1824*38fd1498Szrj 	return xi.val[0] == 0;
1825*38fd1498Szrj       /* Otherwise flush out any excess bits first.  */
1826*38fd1498Szrj       unsigned HOST_WIDE_INT diff = xi.val[0] ^ yi.val[0];
1827*38fd1498Szrj       int excess = HOST_BITS_PER_WIDE_INT - precision;
1828*38fd1498Szrj       if (excess > 0)
1829*38fd1498Szrj 	diff <<= excess;
1830*38fd1498Szrj       return diff == 0;
1831*38fd1498Szrj     }
1832*38fd1498Szrj   return eq_p_large (xi.val, xi.len, yi.val, yi.len, precision);
1833*38fd1498Szrj }
1834*38fd1498Szrj 
1835*38fd1498Szrj /* Return true if X != Y.  X and Y must be binary-compatible.  */
1836*38fd1498Szrj template <typename T1, typename T2>
1837*38fd1498Szrj inline bool
1838*38fd1498Szrj wi::ne_p (const T1 &x, const T2 &y)
1839*38fd1498Szrj {
1840*38fd1498Szrj   return !eq_p (x, y);
1841*38fd1498Szrj }
1842*38fd1498Szrj 
1843*38fd1498Szrj /* Return true if X < Y when both are treated as signed values.  */
1844*38fd1498Szrj template <typename T1, typename T2>
1845*38fd1498Szrj inline bool
1846*38fd1498Szrj wi::lts_p (const T1 &x, const T2 &y)
1847*38fd1498Szrj {
1848*38fd1498Szrj   unsigned int precision = get_binary_precision (x, y);
1849*38fd1498Szrj   WIDE_INT_REF_FOR (T1) xi (x, precision);
1850*38fd1498Szrj   WIDE_INT_REF_FOR (T2) yi (y, precision);
1851*38fd1498Szrj   /* We optimize x < y, where y is 64 or fewer bits.  */
1852*38fd1498Szrj   if (wi::fits_shwi_p (yi))
1853*38fd1498Szrj     {
1854*38fd1498Szrj       /* Make lts_p (x, 0) as efficient as wi::neg_p (x).  */
1855*38fd1498Szrj       if (STATIC_CONSTANT_P (yi.val[0] == 0))
1856*38fd1498Szrj 	return neg_p (xi);
1857*38fd1498Szrj       /* If x fits directly into a shwi, we can compare directly.  */
1858*38fd1498Szrj       if (wi::fits_shwi_p (xi))
1859*38fd1498Szrj 	return xi.to_shwi () < yi.to_shwi ();
1860*38fd1498Szrj       /* If x doesn't fit and is negative, then it must be more
1861*38fd1498Szrj 	 negative than any value in y, and hence smaller than y.  */
1862*38fd1498Szrj       if (neg_p (xi))
1863*38fd1498Szrj 	return true;
1864*38fd1498Szrj       /* If x is positive, then it must be larger than any value in y,
1865*38fd1498Szrj 	 and hence greater than y.  */
1866*38fd1498Szrj       return false;
1867*38fd1498Szrj     }
1868*38fd1498Szrj   /* Optimize the opposite case, if it can be detected at compile time.  */
1869*38fd1498Szrj   if (STATIC_CONSTANT_P (xi.len == 1))
1870*38fd1498Szrj     /* If YI is negative it is lower than the least HWI.
1871*38fd1498Szrj        If YI is positive it is greater than the greatest HWI.  */
1872*38fd1498Szrj     return !neg_p (yi);
1873*38fd1498Szrj   return lts_p_large (xi.val, xi.len, precision, yi.val, yi.len);
1874*38fd1498Szrj }
1875*38fd1498Szrj 
1876*38fd1498Szrj /* Return true if X < Y when both are treated as unsigned values.  */
1877*38fd1498Szrj template <typename T1, typename T2>
1878*38fd1498Szrj inline bool
1879*38fd1498Szrj wi::ltu_p (const T1 &x, const T2 &y)
1880*38fd1498Szrj {
1881*38fd1498Szrj   unsigned int precision = get_binary_precision (x, y);
1882*38fd1498Szrj   WIDE_INT_REF_FOR (T1) xi (x, precision);
1883*38fd1498Szrj   WIDE_INT_REF_FOR (T2) yi (y, precision);
1884*38fd1498Szrj   /* Optimize comparisons with constants.  */
1885*38fd1498Szrj   if (STATIC_CONSTANT_P (yi.len == 1 && yi.val[0] >= 0))
1886*38fd1498Szrj     return xi.len == 1 && xi.to_uhwi () < (unsigned HOST_WIDE_INT) yi.val[0];
1887*38fd1498Szrj   if (STATIC_CONSTANT_P (xi.len == 1 && xi.val[0] >= 0))
1888*38fd1498Szrj     return yi.len != 1 || yi.to_uhwi () > (unsigned HOST_WIDE_INT) xi.val[0];
1889*38fd1498Szrj   /* Optimize the case of two HWIs.  The HWIs are implicitly sign-extended
1890*38fd1498Szrj      for precisions greater than HOST_BITS_WIDE_INT, but sign-extending both
1891*38fd1498Szrj      values does not change the result.  */
1892*38fd1498Szrj   if (__builtin_expect (xi.len + yi.len == 2, true))
1893*38fd1498Szrj     {
1894*38fd1498Szrj       unsigned HOST_WIDE_INT xl = xi.to_uhwi ();
1895*38fd1498Szrj       unsigned HOST_WIDE_INT yl = yi.to_uhwi ();
1896*38fd1498Szrj       return xl < yl;
1897*38fd1498Szrj     }
1898*38fd1498Szrj   return ltu_p_large (xi.val, xi.len, precision, yi.val, yi.len);
1899*38fd1498Szrj }
1900*38fd1498Szrj 
1901*38fd1498Szrj /* Return true if X < Y.  Signedness of X and Y is indicated by SGN.  */
1902*38fd1498Szrj template <typename T1, typename T2>
1903*38fd1498Szrj inline bool
1904*38fd1498Szrj wi::lt_p (const T1 &x, const T2 &y, signop sgn)
1905*38fd1498Szrj {
1906*38fd1498Szrj   if (sgn == SIGNED)
1907*38fd1498Szrj     return lts_p (x, y);
1908*38fd1498Szrj   else
1909*38fd1498Szrj     return ltu_p (x, y);
1910*38fd1498Szrj }
1911*38fd1498Szrj 
1912*38fd1498Szrj /* Return true if X <= Y when both are treated as signed values.  */
1913*38fd1498Szrj template <typename T1, typename T2>
1914*38fd1498Szrj inline bool
1915*38fd1498Szrj wi::les_p (const T1 &x, const T2 &y)
1916*38fd1498Szrj {
1917*38fd1498Szrj   return !lts_p (y, x);
1918*38fd1498Szrj }
1919*38fd1498Szrj 
1920*38fd1498Szrj /* Return true if X <= Y when both are treated as unsigned values.  */
1921*38fd1498Szrj template <typename T1, typename T2>
1922*38fd1498Szrj inline bool
1923*38fd1498Szrj wi::leu_p (const T1 &x, const T2 &y)
1924*38fd1498Szrj {
1925*38fd1498Szrj   return !ltu_p (y, x);
1926*38fd1498Szrj }
1927*38fd1498Szrj 
1928*38fd1498Szrj /* Return true if X <= Y.  Signedness of X and Y is indicated by SGN.  */
1929*38fd1498Szrj template <typename T1, typename T2>
1930*38fd1498Szrj inline bool
1931*38fd1498Szrj wi::le_p (const T1 &x, const T2 &y, signop sgn)
1932*38fd1498Szrj {
1933*38fd1498Szrj   if (sgn == SIGNED)
1934*38fd1498Szrj     return les_p (x, y);
1935*38fd1498Szrj   else
1936*38fd1498Szrj     return leu_p (x, y);
1937*38fd1498Szrj }
1938*38fd1498Szrj 
1939*38fd1498Szrj /* Return true if X > Y when both are treated as signed values.  */
1940*38fd1498Szrj template <typename T1, typename T2>
1941*38fd1498Szrj inline bool
1942*38fd1498Szrj wi::gts_p (const T1 &x, const T2 &y)
1943*38fd1498Szrj {
1944*38fd1498Szrj   return lts_p (y, x);
1945*38fd1498Szrj }
1946*38fd1498Szrj 
1947*38fd1498Szrj /* Return true if X > Y when both are treated as unsigned values.  */
1948*38fd1498Szrj template <typename T1, typename T2>
1949*38fd1498Szrj inline bool
1950*38fd1498Szrj wi::gtu_p (const T1 &x, const T2 &y)
1951*38fd1498Szrj {
1952*38fd1498Szrj   return ltu_p (y, x);
1953*38fd1498Szrj }
1954*38fd1498Szrj 
1955*38fd1498Szrj /* Return true if X > Y.  Signedness of X and Y is indicated by SGN.  */
1956*38fd1498Szrj template <typename T1, typename T2>
1957*38fd1498Szrj inline bool
1958*38fd1498Szrj wi::gt_p (const T1 &x, const T2 &y, signop sgn)
1959*38fd1498Szrj {
1960*38fd1498Szrj   if (sgn == SIGNED)
1961*38fd1498Szrj     return gts_p (x, y);
1962*38fd1498Szrj   else
1963*38fd1498Szrj     return gtu_p (x, y);
1964*38fd1498Szrj }
1965*38fd1498Szrj 
1966*38fd1498Szrj /* Return true if X >= Y when both are treated as signed values.  */
1967*38fd1498Szrj template <typename T1, typename T2>
1968*38fd1498Szrj inline bool
1969*38fd1498Szrj wi::ges_p (const T1 &x, const T2 &y)
1970*38fd1498Szrj {
1971*38fd1498Szrj   return !lts_p (x, y);
1972*38fd1498Szrj }
1973*38fd1498Szrj 
1974*38fd1498Szrj /* Return true if X >= Y when both are treated as unsigned values.  */
1975*38fd1498Szrj template <typename T1, typename T2>
1976*38fd1498Szrj inline bool
1977*38fd1498Szrj wi::geu_p (const T1 &x, const T2 &y)
1978*38fd1498Szrj {
1979*38fd1498Szrj   return !ltu_p (x, y);
1980*38fd1498Szrj }
1981*38fd1498Szrj 
1982*38fd1498Szrj /* Return true if X >= Y.  Signedness of X and Y is indicated by SGN.  */
1983*38fd1498Szrj template <typename T1, typename T2>
1984*38fd1498Szrj inline bool
1985*38fd1498Szrj wi::ge_p (const T1 &x, const T2 &y, signop sgn)
1986*38fd1498Szrj {
1987*38fd1498Szrj   if (sgn == SIGNED)
1988*38fd1498Szrj     return ges_p (x, y);
1989*38fd1498Szrj   else
1990*38fd1498Szrj     return geu_p (x, y);
1991*38fd1498Szrj }
1992*38fd1498Szrj 
1993*38fd1498Szrj /* Return -1 if X < Y, 0 if X == Y and 1 if X > Y.  Treat both X and Y
1994*38fd1498Szrj    as signed values.  */
1995*38fd1498Szrj template <typename T1, typename T2>
1996*38fd1498Szrj inline int
1997*38fd1498Szrj wi::cmps (const T1 &x, const T2 &y)
1998*38fd1498Szrj {
1999*38fd1498Szrj   unsigned int precision = get_binary_precision (x, y);
2000*38fd1498Szrj   WIDE_INT_REF_FOR (T1) xi (x, precision);
2001*38fd1498Szrj   WIDE_INT_REF_FOR (T2) yi (y, precision);
2002*38fd1498Szrj   if (wi::fits_shwi_p (yi))
2003*38fd1498Szrj     {
2004*38fd1498Szrj       /* Special case for comparisons with 0.  */
2005*38fd1498Szrj       if (STATIC_CONSTANT_P (yi.val[0] == 0))
2006*38fd1498Szrj 	return neg_p (xi) ? -1 : !(xi.len == 1 && xi.val[0] == 0);
2007*38fd1498Szrj       /* If x fits into a signed HWI, we can compare directly.  */
2008*38fd1498Szrj       if (wi::fits_shwi_p (xi))
2009*38fd1498Szrj 	{
2010*38fd1498Szrj 	  HOST_WIDE_INT xl = xi.to_shwi ();
2011*38fd1498Szrj 	  HOST_WIDE_INT yl = yi.to_shwi ();
2012*38fd1498Szrj 	  return xl < yl ? -1 : xl > yl;
2013*38fd1498Szrj 	}
2014*38fd1498Szrj       /* If x doesn't fit and is negative, then it must be more
2015*38fd1498Szrj 	 negative than any signed HWI, and hence smaller than y.  */
2016*38fd1498Szrj       if (neg_p (xi))
2017*38fd1498Szrj 	return -1;
2018*38fd1498Szrj       /* If x is positive, then it must be larger than any signed HWI,
2019*38fd1498Szrj 	 and hence greater than y.  */
2020*38fd1498Szrj       return 1;
2021*38fd1498Szrj     }
2022*38fd1498Szrj   /* Optimize the opposite case, if it can be detected at compile time.  */
2023*38fd1498Szrj   if (STATIC_CONSTANT_P (xi.len == 1))
2024*38fd1498Szrj     /* If YI is negative it is lower than the least HWI.
2025*38fd1498Szrj        If YI is positive it is greater than the greatest HWI.  */
2026*38fd1498Szrj     return neg_p (yi) ? 1 : -1;
2027*38fd1498Szrj   return cmps_large (xi.val, xi.len, precision, yi.val, yi.len);
2028*38fd1498Szrj }
2029*38fd1498Szrj 
2030*38fd1498Szrj /* Return -1 if X < Y, 0 if X == Y and 1 if X > Y.  Treat both X and Y
2031*38fd1498Szrj    as unsigned values.  */
2032*38fd1498Szrj template <typename T1, typename T2>
2033*38fd1498Szrj inline int
2034*38fd1498Szrj wi::cmpu (const T1 &x, const T2 &y)
2035*38fd1498Szrj {
2036*38fd1498Szrj   unsigned int precision = get_binary_precision (x, y);
2037*38fd1498Szrj   WIDE_INT_REF_FOR (T1) xi (x, precision);
2038*38fd1498Szrj   WIDE_INT_REF_FOR (T2) yi (y, precision);
2039*38fd1498Szrj   /* Optimize comparisons with constants.  */
2040*38fd1498Szrj   if (STATIC_CONSTANT_P (yi.len == 1 && yi.val[0] >= 0))
2041*38fd1498Szrj     {
2042*38fd1498Szrj       /* If XI doesn't fit in a HWI then it must be larger than YI.  */
2043*38fd1498Szrj       if (xi.len != 1)
2044*38fd1498Szrj 	return 1;
2045*38fd1498Szrj       /* Otherwise compare directly.  */
2046*38fd1498Szrj       unsigned HOST_WIDE_INT xl = xi.to_uhwi ();
2047*38fd1498Szrj       unsigned HOST_WIDE_INT yl = yi.val[0];
2048*38fd1498Szrj       return xl < yl ? -1 : xl > yl;
2049*38fd1498Szrj     }
2050*38fd1498Szrj   if (STATIC_CONSTANT_P (xi.len == 1 && xi.val[0] >= 0))
2051*38fd1498Szrj     {
2052*38fd1498Szrj       /* If YI doesn't fit in a HWI then it must be larger than XI.  */
2053*38fd1498Szrj       if (yi.len != 1)
2054*38fd1498Szrj 	return -1;
2055*38fd1498Szrj       /* Otherwise compare directly.  */
2056*38fd1498Szrj       unsigned HOST_WIDE_INT xl = xi.val[0];
2057*38fd1498Szrj       unsigned HOST_WIDE_INT yl = yi.to_uhwi ();
2058*38fd1498Szrj       return xl < yl ? -1 : xl > yl;
2059*38fd1498Szrj     }
2060*38fd1498Szrj   /* Optimize the case of two HWIs.  The HWIs are implicitly sign-extended
2061*38fd1498Szrj      for precisions greater than HOST_BITS_WIDE_INT, but sign-extending both
2062*38fd1498Szrj      values does not change the result.  */
2063*38fd1498Szrj   if (__builtin_expect (xi.len + yi.len == 2, true))
2064*38fd1498Szrj     {
2065*38fd1498Szrj       unsigned HOST_WIDE_INT xl = xi.to_uhwi ();
2066*38fd1498Szrj       unsigned HOST_WIDE_INT yl = yi.to_uhwi ();
2067*38fd1498Szrj       return xl < yl ? -1 : xl > yl;
2068*38fd1498Szrj     }
2069*38fd1498Szrj   return cmpu_large (xi.val, xi.len, precision, yi.val, yi.len);
2070*38fd1498Szrj }
2071*38fd1498Szrj 
2072*38fd1498Szrj /* Return -1 if X < Y, 0 if X == Y and 1 if X > Y.  Signedness of
2073*38fd1498Szrj    X and Y indicated by SGN.  */
2074*38fd1498Szrj template <typename T1, typename T2>
2075*38fd1498Szrj inline int
2076*38fd1498Szrj wi::cmp (const T1 &x, const T2 &y, signop sgn)
2077*38fd1498Szrj {
2078*38fd1498Szrj   if (sgn == SIGNED)
2079*38fd1498Szrj     return cmps (x, y);
2080*38fd1498Szrj   else
2081*38fd1498Szrj     return cmpu (x, y);
2082*38fd1498Szrj }
2083*38fd1498Szrj 
2084*38fd1498Szrj /* Return ~x.  */
2085*38fd1498Szrj template <typename T>
2086*38fd1498Szrj inline WI_UNARY_RESULT (T)
2087*38fd1498Szrj wi::bit_not (const T &x)
2088*38fd1498Szrj {
2089*38fd1498Szrj   WI_UNARY_RESULT_VAR (result, val, T, x);
2090*38fd1498Szrj   WIDE_INT_REF_FOR (T) xi (x, get_precision (result));
2091*38fd1498Szrj   for (unsigned int i = 0; i < xi.len; ++i)
2092*38fd1498Szrj     val[i] = ~xi.val[i];
2093*38fd1498Szrj   result.set_len (xi.len);
2094*38fd1498Szrj   return result;
2095*38fd1498Szrj }
2096*38fd1498Szrj 
2097*38fd1498Szrj /* Return -x.  */
2098*38fd1498Szrj template <typename T>
2099*38fd1498Szrj inline WI_UNARY_RESULT (T)
2100*38fd1498Szrj wi::neg (const T &x)
2101*38fd1498Szrj {
2102*38fd1498Szrj   return sub (0, x);
2103*38fd1498Szrj }
2104*38fd1498Szrj 
2105*38fd1498Szrj /* Return -x.  Indicate in *OVERFLOW if X is the minimum signed value.  */
2106*38fd1498Szrj template <typename T>
2107*38fd1498Szrj inline WI_UNARY_RESULT (T)
2108*38fd1498Szrj wi::neg (const T &x, bool *overflow)
2109*38fd1498Szrj {
2110*38fd1498Szrj   *overflow = only_sign_bit_p (x);
2111*38fd1498Szrj   return sub (0, x);
2112*38fd1498Szrj }
2113*38fd1498Szrj 
2114*38fd1498Szrj /* Return the absolute value of x.  */
2115*38fd1498Szrj template <typename T>
2116*38fd1498Szrj inline WI_UNARY_RESULT (T)
2117*38fd1498Szrj wi::abs (const T &x)
2118*38fd1498Szrj {
2119*38fd1498Szrj   return neg_p (x) ? neg (x) : WI_UNARY_RESULT (T) (x);
2120*38fd1498Szrj }
2121*38fd1498Szrj 
2122*38fd1498Szrj /* Return the result of sign-extending the low OFFSET bits of X.  */
2123*38fd1498Szrj template <typename T>
2124*38fd1498Szrj inline WI_UNARY_RESULT (T)
2125*38fd1498Szrj wi::sext (const T &x, unsigned int offset)
2126*38fd1498Szrj {
2127*38fd1498Szrj   WI_UNARY_RESULT_VAR (result, val, T, x);
2128*38fd1498Szrj   unsigned int precision = get_precision (result);
2129*38fd1498Szrj   WIDE_INT_REF_FOR (T) xi (x, precision);
2130*38fd1498Szrj 
2131*38fd1498Szrj   if (offset <= HOST_BITS_PER_WIDE_INT)
2132*38fd1498Szrj     {
2133*38fd1498Szrj       val[0] = sext_hwi (xi.ulow (), offset);
2134*38fd1498Szrj       result.set_len (1, true);
2135*38fd1498Szrj     }
2136*38fd1498Szrj   else
2137*38fd1498Szrj     result.set_len (sext_large (val, xi.val, xi.len, precision, offset));
2138*38fd1498Szrj   return result;
2139*38fd1498Szrj }
2140*38fd1498Szrj 
2141*38fd1498Szrj /* Return the result of zero-extending the low OFFSET bits of X.  */
2142*38fd1498Szrj template <typename T>
2143*38fd1498Szrj inline WI_UNARY_RESULT (T)
2144*38fd1498Szrj wi::zext (const T &x, unsigned int offset)
2145*38fd1498Szrj {
2146*38fd1498Szrj   WI_UNARY_RESULT_VAR (result, val, T, x);
2147*38fd1498Szrj   unsigned int precision = get_precision (result);
2148*38fd1498Szrj   WIDE_INT_REF_FOR (T) xi (x, precision);
2149*38fd1498Szrj 
2150*38fd1498Szrj   /* This is not just an optimization, it is actually required to
2151*38fd1498Szrj      maintain canonization.  */
2152*38fd1498Szrj   if (offset >= precision)
2153*38fd1498Szrj     {
2154*38fd1498Szrj       wi::copy (result, xi);
2155*38fd1498Szrj       return result;
2156*38fd1498Szrj     }
2157*38fd1498Szrj 
2158*38fd1498Szrj   /* In these cases we know that at least the top bit will be clear,
2159*38fd1498Szrj      so no sign extension is necessary.  */
2160*38fd1498Szrj   if (offset < HOST_BITS_PER_WIDE_INT)
2161*38fd1498Szrj     {
2162*38fd1498Szrj       val[0] = zext_hwi (xi.ulow (), offset);
2163*38fd1498Szrj       result.set_len (1, true);
2164*38fd1498Szrj     }
2165*38fd1498Szrj   else
2166*38fd1498Szrj     result.set_len (zext_large (val, xi.val, xi.len, precision, offset), true);
2167*38fd1498Szrj   return result;
2168*38fd1498Szrj }
2169*38fd1498Szrj 
2170*38fd1498Szrj /* Return the result of extending the low OFFSET bits of X according to
2171*38fd1498Szrj    signedness SGN.  */
2172*38fd1498Szrj template <typename T>
2173*38fd1498Szrj inline WI_UNARY_RESULT (T)
2174*38fd1498Szrj wi::ext (const T &x, unsigned int offset, signop sgn)
2175*38fd1498Szrj {
2176*38fd1498Szrj   return sgn == SIGNED ? sext (x, offset) : zext (x, offset);
2177*38fd1498Szrj }
2178*38fd1498Szrj 
2179*38fd1498Szrj /* Return an integer that represents X | (1 << bit).  */
2180*38fd1498Szrj template <typename T>
2181*38fd1498Szrj inline WI_UNARY_RESULT (T)
2182*38fd1498Szrj wi::set_bit (const T &x, unsigned int bit)
2183*38fd1498Szrj {
2184*38fd1498Szrj   WI_UNARY_RESULT_VAR (result, val, T, x);
2185*38fd1498Szrj   unsigned int precision = get_precision (result);
2186*38fd1498Szrj   WIDE_INT_REF_FOR (T) xi (x, precision);
2187*38fd1498Szrj   if (precision <= HOST_BITS_PER_WIDE_INT)
2188*38fd1498Szrj     {
2189*38fd1498Szrj       val[0] = xi.ulow () | (HOST_WIDE_INT_1U << bit);
2190*38fd1498Szrj       result.set_len (1);
2191*38fd1498Szrj     }
2192*38fd1498Szrj   else
2193*38fd1498Szrj     result.set_len (set_bit_large (val, xi.val, xi.len, precision, bit));
2194*38fd1498Szrj   return result;
2195*38fd1498Szrj }
2196*38fd1498Szrj 
2197*38fd1498Szrj /* Return the mininum of X and Y, treating them both as having
2198*38fd1498Szrj    signedness SGN.  */
2199*38fd1498Szrj template <typename T1, typename T2>
2200*38fd1498Szrj inline WI_BINARY_RESULT (T1, T2)
2201*38fd1498Szrj wi::min (const T1 &x, const T2 &y, signop sgn)
2202*38fd1498Szrj {
2203*38fd1498Szrj   WI_BINARY_RESULT_VAR (result, val ATTRIBUTE_UNUSED, T1, x, T2, y);
2204*38fd1498Szrj   unsigned int precision = get_precision (result);
2205*38fd1498Szrj   if (wi::le_p (x, y, sgn))
2206*38fd1498Szrj     wi::copy (result, WIDE_INT_REF_FOR (T1) (x, precision));
2207*38fd1498Szrj   else
2208*38fd1498Szrj     wi::copy (result, WIDE_INT_REF_FOR (T2) (y, precision));
2209*38fd1498Szrj   return result;
2210*38fd1498Szrj }
2211*38fd1498Szrj 
2212*38fd1498Szrj /* Return the minimum of X and Y, treating both as signed values.  */
2213*38fd1498Szrj template <typename T1, typename T2>
2214*38fd1498Szrj inline WI_BINARY_RESULT (T1, T2)
2215*38fd1498Szrj wi::smin (const T1 &x, const T2 &y)
2216*38fd1498Szrj {
2217*38fd1498Szrj   return wi::min (x, y, SIGNED);
2218*38fd1498Szrj }
2219*38fd1498Szrj 
2220*38fd1498Szrj /* Return the minimum of X and Y, treating both as unsigned values.  */
2221*38fd1498Szrj template <typename T1, typename T2>
2222*38fd1498Szrj inline WI_BINARY_RESULT (T1, T2)
2223*38fd1498Szrj wi::umin (const T1 &x, const T2 &y)
2224*38fd1498Szrj {
2225*38fd1498Szrj   return wi::min (x, y, UNSIGNED);
2226*38fd1498Szrj }
2227*38fd1498Szrj 
2228*38fd1498Szrj /* Return the maxinum of X and Y, treating them both as having
2229*38fd1498Szrj    signedness SGN.  */
2230*38fd1498Szrj template <typename T1, typename T2>
2231*38fd1498Szrj inline WI_BINARY_RESULT (T1, T2)
2232*38fd1498Szrj wi::max (const T1 &x, const T2 &y, signop sgn)
2233*38fd1498Szrj {
2234*38fd1498Szrj   WI_BINARY_RESULT_VAR (result, val ATTRIBUTE_UNUSED, T1, x, T2, y);
2235*38fd1498Szrj   unsigned int precision = get_precision (result);
2236*38fd1498Szrj   if (wi::ge_p (x, y, sgn))
2237*38fd1498Szrj     wi::copy (result, WIDE_INT_REF_FOR (T1) (x, precision));
2238*38fd1498Szrj   else
2239*38fd1498Szrj     wi::copy (result, WIDE_INT_REF_FOR (T2) (y, precision));
2240*38fd1498Szrj   return result;
2241*38fd1498Szrj }
2242*38fd1498Szrj 
2243*38fd1498Szrj /* Return the maximum of X and Y, treating both as signed values.  */
2244*38fd1498Szrj template <typename T1, typename T2>
2245*38fd1498Szrj inline WI_BINARY_RESULT (T1, T2)
2246*38fd1498Szrj wi::smax (const T1 &x, const T2 &y)
2247*38fd1498Szrj {
2248*38fd1498Szrj   return wi::max (x, y, SIGNED);
2249*38fd1498Szrj }
2250*38fd1498Szrj 
2251*38fd1498Szrj /* Return the maximum of X and Y, treating both as unsigned values.  */
2252*38fd1498Szrj template <typename T1, typename T2>
2253*38fd1498Szrj inline WI_BINARY_RESULT (T1, T2)
2254*38fd1498Szrj wi::umax (const T1 &x, const T2 &y)
2255*38fd1498Szrj {
2256*38fd1498Szrj   return wi::max (x, y, UNSIGNED);
2257*38fd1498Szrj }
2258*38fd1498Szrj 
2259*38fd1498Szrj /* Return X & Y.  */
2260*38fd1498Szrj template <typename T1, typename T2>
2261*38fd1498Szrj inline WI_BINARY_RESULT (T1, T2)
2262*38fd1498Szrj wi::bit_and (const T1 &x, const T2 &y)
2263*38fd1498Szrj {
2264*38fd1498Szrj   WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
2265*38fd1498Szrj   unsigned int precision = get_precision (result);
2266*38fd1498Szrj   WIDE_INT_REF_FOR (T1) xi (x, precision);
2267*38fd1498Szrj   WIDE_INT_REF_FOR (T2) yi (y, precision);
2268*38fd1498Szrj   bool is_sign_extended = xi.is_sign_extended && yi.is_sign_extended;
2269*38fd1498Szrj   if (__builtin_expect (xi.len + yi.len == 2, true))
2270*38fd1498Szrj     {
2271*38fd1498Szrj       val[0] = xi.ulow () & yi.ulow ();
2272*38fd1498Szrj       result.set_len (1, is_sign_extended);
2273*38fd1498Szrj     }
2274*38fd1498Szrj   else
2275*38fd1498Szrj     result.set_len (and_large (val, xi.val, xi.len, yi.val, yi.len,
2276*38fd1498Szrj 			       precision), is_sign_extended);
2277*38fd1498Szrj   return result;
2278*38fd1498Szrj }
2279*38fd1498Szrj 
2280*38fd1498Szrj /* Return X & ~Y.  */
2281*38fd1498Szrj template <typename T1, typename T2>
2282*38fd1498Szrj inline WI_BINARY_RESULT (T1, T2)
2283*38fd1498Szrj wi::bit_and_not (const T1 &x, const T2 &y)
2284*38fd1498Szrj {
2285*38fd1498Szrj   WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
2286*38fd1498Szrj   unsigned int precision = get_precision (result);
2287*38fd1498Szrj   WIDE_INT_REF_FOR (T1) xi (x, precision);
2288*38fd1498Szrj   WIDE_INT_REF_FOR (T2) yi (y, precision);
2289*38fd1498Szrj   bool is_sign_extended = xi.is_sign_extended && yi.is_sign_extended;
2290*38fd1498Szrj   if (__builtin_expect (xi.len + yi.len == 2, true))
2291*38fd1498Szrj     {
2292*38fd1498Szrj       val[0] = xi.ulow () & ~yi.ulow ();
2293*38fd1498Szrj       result.set_len (1, is_sign_extended);
2294*38fd1498Szrj     }
2295*38fd1498Szrj   else
2296*38fd1498Szrj     result.set_len (and_not_large (val, xi.val, xi.len, yi.val, yi.len,
2297*38fd1498Szrj 				   precision), is_sign_extended);
2298*38fd1498Szrj   return result;
2299*38fd1498Szrj }
2300*38fd1498Szrj 
2301*38fd1498Szrj /* Return X | Y.  */
2302*38fd1498Szrj template <typename T1, typename T2>
2303*38fd1498Szrj inline WI_BINARY_RESULT (T1, T2)
2304*38fd1498Szrj wi::bit_or (const T1 &x, const T2 &y)
2305*38fd1498Szrj {
2306*38fd1498Szrj   WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
2307*38fd1498Szrj   unsigned int precision = get_precision (result);
2308*38fd1498Szrj   WIDE_INT_REF_FOR (T1) xi (x, precision);
2309*38fd1498Szrj   WIDE_INT_REF_FOR (T2) yi (y, precision);
2310*38fd1498Szrj   bool is_sign_extended = xi.is_sign_extended && yi.is_sign_extended;
2311*38fd1498Szrj   if (__builtin_expect (xi.len + yi.len == 2, true))
2312*38fd1498Szrj     {
2313*38fd1498Szrj       val[0] = xi.ulow () | yi.ulow ();
2314*38fd1498Szrj       result.set_len (1, is_sign_extended);
2315*38fd1498Szrj     }
2316*38fd1498Szrj   else
2317*38fd1498Szrj     result.set_len (or_large (val, xi.val, xi.len,
2318*38fd1498Szrj 			      yi.val, yi.len, precision), is_sign_extended);
2319*38fd1498Szrj   return result;
2320*38fd1498Szrj }
2321*38fd1498Szrj 
2322*38fd1498Szrj /* Return X | ~Y.  */
2323*38fd1498Szrj template <typename T1, typename T2>
2324*38fd1498Szrj inline WI_BINARY_RESULT (T1, T2)
2325*38fd1498Szrj wi::bit_or_not (const T1 &x, const T2 &y)
2326*38fd1498Szrj {
2327*38fd1498Szrj   WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
2328*38fd1498Szrj   unsigned int precision = get_precision (result);
2329*38fd1498Szrj   WIDE_INT_REF_FOR (T1) xi (x, precision);
2330*38fd1498Szrj   WIDE_INT_REF_FOR (T2) yi (y, precision);
2331*38fd1498Szrj   bool is_sign_extended = xi.is_sign_extended && yi.is_sign_extended;
2332*38fd1498Szrj   if (__builtin_expect (xi.len + yi.len == 2, true))
2333*38fd1498Szrj     {
2334*38fd1498Szrj       val[0] = xi.ulow () | ~yi.ulow ();
2335*38fd1498Szrj       result.set_len (1, is_sign_extended);
2336*38fd1498Szrj     }
2337*38fd1498Szrj   else
2338*38fd1498Szrj     result.set_len (or_not_large (val, xi.val, xi.len, yi.val, yi.len,
2339*38fd1498Szrj 				  precision), is_sign_extended);
2340*38fd1498Szrj   return result;
2341*38fd1498Szrj }
2342*38fd1498Szrj 
2343*38fd1498Szrj /* Return X ^ Y.  */
2344*38fd1498Szrj template <typename T1, typename T2>
2345*38fd1498Szrj inline WI_BINARY_RESULT (T1, T2)
2346*38fd1498Szrj wi::bit_xor (const T1 &x, const T2 &y)
2347*38fd1498Szrj {
2348*38fd1498Szrj   WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
2349*38fd1498Szrj   unsigned int precision = get_precision (result);
2350*38fd1498Szrj   WIDE_INT_REF_FOR (T1) xi (x, precision);
2351*38fd1498Szrj   WIDE_INT_REF_FOR (T2) yi (y, precision);
2352*38fd1498Szrj   bool is_sign_extended = xi.is_sign_extended && yi.is_sign_extended;
2353*38fd1498Szrj   if (__builtin_expect (xi.len + yi.len == 2, true))
2354*38fd1498Szrj     {
2355*38fd1498Szrj       val[0] = xi.ulow () ^ yi.ulow ();
2356*38fd1498Szrj       result.set_len (1, is_sign_extended);
2357*38fd1498Szrj     }
2358*38fd1498Szrj   else
2359*38fd1498Szrj     result.set_len (xor_large (val, xi.val, xi.len,
2360*38fd1498Szrj 			       yi.val, yi.len, precision), is_sign_extended);
2361*38fd1498Szrj   return result;
2362*38fd1498Szrj }
2363*38fd1498Szrj 
2364*38fd1498Szrj /* Return X + Y.  */
2365*38fd1498Szrj template <typename T1, typename T2>
2366*38fd1498Szrj inline WI_BINARY_RESULT (T1, T2)
2367*38fd1498Szrj wi::add (const T1 &x, const T2 &y)
2368*38fd1498Szrj {
2369*38fd1498Szrj   WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
2370*38fd1498Szrj   unsigned int precision = get_precision (result);
2371*38fd1498Szrj   WIDE_INT_REF_FOR (T1) xi (x, precision);
2372*38fd1498Szrj   WIDE_INT_REF_FOR (T2) yi (y, precision);
2373*38fd1498Szrj   if (precision <= HOST_BITS_PER_WIDE_INT)
2374*38fd1498Szrj     {
2375*38fd1498Szrj       val[0] = xi.ulow () + yi.ulow ();
2376*38fd1498Szrj       result.set_len (1);
2377*38fd1498Szrj     }
2378*38fd1498Szrj   /* If the precision is known at compile time to be greater than
2379*38fd1498Szrj      HOST_BITS_PER_WIDE_INT, we can optimize the single-HWI case
2380*38fd1498Szrj      knowing that (a) all bits in those HWIs are significant and
2381*38fd1498Szrj      (b) the result has room for at least two HWIs.  This provides
2382*38fd1498Szrj      a fast path for things like offset_int and widest_int.
2383*38fd1498Szrj 
2384*38fd1498Szrj      The STATIC_CONSTANT_P test prevents this path from being
2385*38fd1498Szrj      used for wide_ints.  wide_ints with precisions greater than
2386*38fd1498Szrj      HOST_BITS_PER_WIDE_INT are relatively rare and there's not much
2387*38fd1498Szrj      point handling them inline.  */
2388*38fd1498Szrj   else if (STATIC_CONSTANT_P (precision > HOST_BITS_PER_WIDE_INT)
2389*38fd1498Szrj 	   && __builtin_expect (xi.len + yi.len == 2, true))
2390*38fd1498Szrj     {
2391*38fd1498Szrj       unsigned HOST_WIDE_INT xl = xi.ulow ();
2392*38fd1498Szrj       unsigned HOST_WIDE_INT yl = yi.ulow ();
2393*38fd1498Szrj       unsigned HOST_WIDE_INT resultl = xl + yl;
2394*38fd1498Szrj       val[0] = resultl;
2395*38fd1498Szrj       val[1] = (HOST_WIDE_INT) resultl < 0 ? 0 : -1;
2396*38fd1498Szrj       result.set_len (1 + (((resultl ^ xl) & (resultl ^ yl))
2397*38fd1498Szrj 			   >> (HOST_BITS_PER_WIDE_INT - 1)));
2398*38fd1498Szrj     }
2399*38fd1498Szrj   else
2400*38fd1498Szrj     result.set_len (add_large (val, xi.val, xi.len,
2401*38fd1498Szrj 			       yi.val, yi.len, precision,
2402*38fd1498Szrj 			       UNSIGNED, 0));
2403*38fd1498Szrj   return result;
2404*38fd1498Szrj }
2405*38fd1498Szrj 
2406*38fd1498Szrj /* Return X + Y.  Treat X and Y as having the signednes given by SGN
2407*38fd1498Szrj    and indicate in *OVERFLOW whether the operation overflowed.  */
2408*38fd1498Szrj template <typename T1, typename T2>
2409*38fd1498Szrj inline WI_BINARY_RESULT (T1, T2)
2410*38fd1498Szrj wi::add (const T1 &x, const T2 &y, signop sgn, bool *overflow)
2411*38fd1498Szrj {
2412*38fd1498Szrj   WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
2413*38fd1498Szrj   unsigned int precision = get_precision (result);
2414*38fd1498Szrj   WIDE_INT_REF_FOR (T1) xi (x, precision);
2415*38fd1498Szrj   WIDE_INT_REF_FOR (T2) yi (y, precision);
2416*38fd1498Szrj   if (precision <= HOST_BITS_PER_WIDE_INT)
2417*38fd1498Szrj     {
2418*38fd1498Szrj       unsigned HOST_WIDE_INT xl = xi.ulow ();
2419*38fd1498Szrj       unsigned HOST_WIDE_INT yl = yi.ulow ();
2420*38fd1498Szrj       unsigned HOST_WIDE_INT resultl = xl + yl;
2421*38fd1498Szrj       if (sgn == SIGNED)
2422*38fd1498Szrj 	*overflow = (((resultl ^ xl) & (resultl ^ yl))
2423*38fd1498Szrj 		     >> (precision - 1)) & 1;
2424*38fd1498Szrj       else
2425*38fd1498Szrj 	*overflow = ((resultl << (HOST_BITS_PER_WIDE_INT - precision))
2426*38fd1498Szrj 		     < (xl << (HOST_BITS_PER_WIDE_INT - precision)));
2427*38fd1498Szrj       val[0] = resultl;
2428*38fd1498Szrj       result.set_len (1);
2429*38fd1498Szrj     }
2430*38fd1498Szrj   else
2431*38fd1498Szrj     result.set_len (add_large (val, xi.val, xi.len,
2432*38fd1498Szrj 			       yi.val, yi.len, precision,
2433*38fd1498Szrj 			       sgn, overflow));
2434*38fd1498Szrj   return result;
2435*38fd1498Szrj }
2436*38fd1498Szrj 
2437*38fd1498Szrj /* Return X - Y.  */
2438*38fd1498Szrj template <typename T1, typename T2>
2439*38fd1498Szrj inline WI_BINARY_RESULT (T1, T2)
2440*38fd1498Szrj wi::sub (const T1 &x, const T2 &y)
2441*38fd1498Szrj {
2442*38fd1498Szrj   WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
2443*38fd1498Szrj   unsigned int precision = get_precision (result);
2444*38fd1498Szrj   WIDE_INT_REF_FOR (T1) xi (x, precision);
2445*38fd1498Szrj   WIDE_INT_REF_FOR (T2) yi (y, precision);
2446*38fd1498Szrj   if (precision <= HOST_BITS_PER_WIDE_INT)
2447*38fd1498Szrj     {
2448*38fd1498Szrj       val[0] = xi.ulow () - yi.ulow ();
2449*38fd1498Szrj       result.set_len (1);
2450*38fd1498Szrj     }
2451*38fd1498Szrj   /* If the precision is known at compile time to be greater than
2452*38fd1498Szrj      HOST_BITS_PER_WIDE_INT, we can optimize the single-HWI case
2453*38fd1498Szrj      knowing that (a) all bits in those HWIs are significant and
2454*38fd1498Szrj      (b) the result has room for at least two HWIs.  This provides
2455*38fd1498Szrj      a fast path for things like offset_int and widest_int.
2456*38fd1498Szrj 
2457*38fd1498Szrj      The STATIC_CONSTANT_P test prevents this path from being
2458*38fd1498Szrj      used for wide_ints.  wide_ints with precisions greater than
2459*38fd1498Szrj      HOST_BITS_PER_WIDE_INT are relatively rare and there's not much
2460*38fd1498Szrj      point handling them inline.  */
2461*38fd1498Szrj   else if (STATIC_CONSTANT_P (precision > HOST_BITS_PER_WIDE_INT)
2462*38fd1498Szrj 	   && __builtin_expect (xi.len + yi.len == 2, true))
2463*38fd1498Szrj     {
2464*38fd1498Szrj       unsigned HOST_WIDE_INT xl = xi.ulow ();
2465*38fd1498Szrj       unsigned HOST_WIDE_INT yl = yi.ulow ();
2466*38fd1498Szrj       unsigned HOST_WIDE_INT resultl = xl - yl;
2467*38fd1498Szrj       val[0] = resultl;
2468*38fd1498Szrj       val[1] = (HOST_WIDE_INT) resultl < 0 ? 0 : -1;
2469*38fd1498Szrj       result.set_len (1 + (((resultl ^ xl) & (xl ^ yl))
2470*38fd1498Szrj 			   >> (HOST_BITS_PER_WIDE_INT - 1)));
2471*38fd1498Szrj     }
2472*38fd1498Szrj   else
2473*38fd1498Szrj     result.set_len (sub_large (val, xi.val, xi.len,
2474*38fd1498Szrj 			       yi.val, yi.len, precision,
2475*38fd1498Szrj 			       UNSIGNED, 0));
2476*38fd1498Szrj   return result;
2477*38fd1498Szrj }
2478*38fd1498Szrj 
2479*38fd1498Szrj /* Return X - Y.  Treat X and Y as having the signednes given by SGN
2480*38fd1498Szrj    and indicate in *OVERFLOW whether the operation overflowed.  */
2481*38fd1498Szrj template <typename T1, typename T2>
2482*38fd1498Szrj inline WI_BINARY_RESULT (T1, T2)
2483*38fd1498Szrj wi::sub (const T1 &x, const T2 &y, signop sgn, bool *overflow)
2484*38fd1498Szrj {
2485*38fd1498Szrj   WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
2486*38fd1498Szrj   unsigned int precision = get_precision (result);
2487*38fd1498Szrj   WIDE_INT_REF_FOR (T1) xi (x, precision);
2488*38fd1498Szrj   WIDE_INT_REF_FOR (T2) yi (y, precision);
2489*38fd1498Szrj   if (precision <= HOST_BITS_PER_WIDE_INT)
2490*38fd1498Szrj     {
2491*38fd1498Szrj       unsigned HOST_WIDE_INT xl = xi.ulow ();
2492*38fd1498Szrj       unsigned HOST_WIDE_INT yl = yi.ulow ();
2493*38fd1498Szrj       unsigned HOST_WIDE_INT resultl = xl - yl;
2494*38fd1498Szrj       if (sgn == SIGNED)
2495*38fd1498Szrj 	*overflow = (((xl ^ yl) & (resultl ^ xl)) >> (precision - 1)) & 1;
2496*38fd1498Szrj       else
2497*38fd1498Szrj 	*overflow = ((resultl << (HOST_BITS_PER_WIDE_INT - precision))
2498*38fd1498Szrj 		     > (xl << (HOST_BITS_PER_WIDE_INT - precision)));
2499*38fd1498Szrj       val[0] = resultl;
2500*38fd1498Szrj       result.set_len (1);
2501*38fd1498Szrj     }
2502*38fd1498Szrj   else
2503*38fd1498Szrj     result.set_len (sub_large (val, xi.val, xi.len,
2504*38fd1498Szrj 			       yi.val, yi.len, precision,
2505*38fd1498Szrj 			       sgn, overflow));
2506*38fd1498Szrj   return result;
2507*38fd1498Szrj }
2508*38fd1498Szrj 
2509*38fd1498Szrj /* Return X * Y.  */
2510*38fd1498Szrj template <typename T1, typename T2>
2511*38fd1498Szrj inline WI_BINARY_RESULT (T1, T2)
2512*38fd1498Szrj wi::mul (const T1 &x, const T2 &y)
2513*38fd1498Szrj {
2514*38fd1498Szrj   WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
2515*38fd1498Szrj   unsigned int precision = get_precision (result);
2516*38fd1498Szrj   WIDE_INT_REF_FOR (T1) xi (x, precision);
2517*38fd1498Szrj   WIDE_INT_REF_FOR (T2) yi (y, precision);
2518*38fd1498Szrj   if (precision <= HOST_BITS_PER_WIDE_INT)
2519*38fd1498Szrj     {
2520*38fd1498Szrj       val[0] = xi.ulow () * yi.ulow ();
2521*38fd1498Szrj       result.set_len (1);
2522*38fd1498Szrj     }
2523*38fd1498Szrj   else
2524*38fd1498Szrj     result.set_len (mul_internal (val, xi.val, xi.len, yi.val, yi.len,
2525*38fd1498Szrj 				  precision, UNSIGNED, 0, false));
2526*38fd1498Szrj   return result;
2527*38fd1498Szrj }
2528*38fd1498Szrj 
2529*38fd1498Szrj /* Return X * Y.  Treat X and Y as having the signednes given by SGN
2530*38fd1498Szrj    and indicate in *OVERFLOW whether the operation overflowed.  */
2531*38fd1498Szrj template <typename T1, typename T2>
2532*38fd1498Szrj inline WI_BINARY_RESULT (T1, T2)
2533*38fd1498Szrj wi::mul (const T1 &x, const T2 &y, signop sgn, bool *overflow)
2534*38fd1498Szrj {
2535*38fd1498Szrj   WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
2536*38fd1498Szrj   unsigned int precision = get_precision (result);
2537*38fd1498Szrj   WIDE_INT_REF_FOR (T1) xi (x, precision);
2538*38fd1498Szrj   WIDE_INT_REF_FOR (T2) yi (y, precision);
2539*38fd1498Szrj   result.set_len (mul_internal (val, xi.val, xi.len,
2540*38fd1498Szrj 				yi.val, yi.len, precision,
2541*38fd1498Szrj 				sgn, overflow, false));
2542*38fd1498Szrj   return result;
2543*38fd1498Szrj }
2544*38fd1498Szrj 
2545*38fd1498Szrj /* Return X * Y, treating both X and Y as signed values.  Indicate in
2546*38fd1498Szrj    *OVERFLOW whether the operation overflowed.  */
2547*38fd1498Szrj template <typename T1, typename T2>
2548*38fd1498Szrj inline WI_BINARY_RESULT (T1, T2)
2549*38fd1498Szrj wi::smul (const T1 &x, const T2 &y, bool *overflow)
2550*38fd1498Szrj {
2551*38fd1498Szrj   return mul (x, y, SIGNED, overflow);
2552*38fd1498Szrj }
2553*38fd1498Szrj 
2554*38fd1498Szrj /* Return X * Y, treating both X and Y as unsigned values.  Indicate in
2555*38fd1498Szrj    *OVERFLOW whether the operation overflowed.  */
2556*38fd1498Szrj template <typename T1, typename T2>
2557*38fd1498Szrj inline WI_BINARY_RESULT (T1, T2)
2558*38fd1498Szrj wi::umul (const T1 &x, const T2 &y, bool *overflow)
2559*38fd1498Szrj {
2560*38fd1498Szrj   return mul (x, y, UNSIGNED, overflow);
2561*38fd1498Szrj }
2562*38fd1498Szrj 
2563*38fd1498Szrj /* Perform a widening multiplication of X and Y, extending the values
2564*38fd1498Szrj    according to SGN, and return the high part of the result.  */
2565*38fd1498Szrj template <typename T1, typename T2>
2566*38fd1498Szrj inline WI_BINARY_RESULT (T1, T2)
2567*38fd1498Szrj wi::mul_high (const T1 &x, const T2 &y, signop sgn)
2568*38fd1498Szrj {
2569*38fd1498Szrj   WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
2570*38fd1498Szrj   unsigned int precision = get_precision (result);
2571*38fd1498Szrj   WIDE_INT_REF_FOR (T1) xi (x, precision);
2572*38fd1498Szrj   WIDE_INT_REF_FOR (T2) yi (y, precision);
2573*38fd1498Szrj   result.set_len (mul_internal (val, xi.val, xi.len,
2574*38fd1498Szrj 				yi.val, yi.len, precision,
2575*38fd1498Szrj 				sgn, 0, true));
2576*38fd1498Szrj   return result;
2577*38fd1498Szrj }
2578*38fd1498Szrj 
2579*38fd1498Szrj /* Return X / Y, rouding towards 0.  Treat X and Y as having the
2580*38fd1498Szrj    signedness given by SGN.  Indicate in *OVERFLOW if the result
2581*38fd1498Szrj    overflows.  */
2582*38fd1498Szrj template <typename T1, typename T2>
2583*38fd1498Szrj inline WI_BINARY_RESULT (T1, T2)
2584*38fd1498Szrj wi::div_trunc (const T1 &x, const T2 &y, signop sgn, bool *overflow)
2585*38fd1498Szrj {
2586*38fd1498Szrj   WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y);
2587*38fd1498Szrj   unsigned int precision = get_precision (quotient);
2588*38fd1498Szrj   WIDE_INT_REF_FOR (T1) xi (x, precision);
2589*38fd1498Szrj   WIDE_INT_REF_FOR (T2) yi (y);
2590*38fd1498Szrj 
2591*38fd1498Szrj   quotient.set_len (divmod_internal (quotient_val, 0, 0, xi.val, xi.len,
2592*38fd1498Szrj 				     precision,
2593*38fd1498Szrj 				     yi.val, yi.len, yi.precision,
2594*38fd1498Szrj 				     sgn, overflow));
2595*38fd1498Szrj   return quotient;
2596*38fd1498Szrj }
2597*38fd1498Szrj 
2598*38fd1498Szrj /* Return X / Y, rouding towards 0.  Treat X and Y as signed values.  */
2599*38fd1498Szrj template <typename T1, typename T2>
2600*38fd1498Szrj inline WI_BINARY_RESULT (T1, T2)
2601*38fd1498Szrj wi::sdiv_trunc (const T1 &x, const T2 &y)
2602*38fd1498Szrj {
2603*38fd1498Szrj   return div_trunc (x, y, SIGNED);
2604*38fd1498Szrj }
2605*38fd1498Szrj 
2606*38fd1498Szrj /* Return X / Y, rouding towards 0.  Treat X and Y as unsigned values.  */
2607*38fd1498Szrj template <typename T1, typename T2>
2608*38fd1498Szrj inline WI_BINARY_RESULT (T1, T2)
2609*38fd1498Szrj wi::udiv_trunc (const T1 &x, const T2 &y)
2610*38fd1498Szrj {
2611*38fd1498Szrj   return div_trunc (x, y, UNSIGNED);
2612*38fd1498Szrj }
2613*38fd1498Szrj 
2614*38fd1498Szrj /* Return X / Y, rouding towards -inf.  Treat X and Y as having the
2615*38fd1498Szrj    signedness given by SGN.  Indicate in *OVERFLOW if the result
2616*38fd1498Szrj    overflows.  */
2617*38fd1498Szrj template <typename T1, typename T2>
2618*38fd1498Szrj inline WI_BINARY_RESULT (T1, T2)
2619*38fd1498Szrj wi::div_floor (const T1 &x, const T2 &y, signop sgn, bool *overflow)
2620*38fd1498Szrj {
2621*38fd1498Szrj   WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y);
2622*38fd1498Szrj   WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y);
2623*38fd1498Szrj   unsigned int precision = get_precision (quotient);
2624*38fd1498Szrj   WIDE_INT_REF_FOR (T1) xi (x, precision);
2625*38fd1498Szrj   WIDE_INT_REF_FOR (T2) yi (y);
2626*38fd1498Szrj 
2627*38fd1498Szrj   unsigned int remainder_len;
2628*38fd1498Szrj   quotient.set_len (divmod_internal (quotient_val,
2629*38fd1498Szrj 				     &remainder_len, remainder_val,
2630*38fd1498Szrj 				     xi.val, xi.len, precision,
2631*38fd1498Szrj 				     yi.val, yi.len, yi.precision, sgn,
2632*38fd1498Szrj 				     overflow));
2633*38fd1498Szrj   remainder.set_len (remainder_len);
2634*38fd1498Szrj   if (wi::neg_p (x, sgn) != wi::neg_p (y, sgn) && remainder != 0)
2635*38fd1498Szrj     return quotient - 1;
2636*38fd1498Szrj   return quotient;
2637*38fd1498Szrj }
2638*38fd1498Szrj 
2639*38fd1498Szrj /* Return X / Y, rouding towards -inf.  Treat X and Y as signed values.  */
2640*38fd1498Szrj template <typename T1, typename T2>
2641*38fd1498Szrj inline WI_BINARY_RESULT (T1, T2)
2642*38fd1498Szrj wi::sdiv_floor (const T1 &x, const T2 &y)
2643*38fd1498Szrj {
2644*38fd1498Szrj   return div_floor (x, y, SIGNED);
2645*38fd1498Szrj }
2646*38fd1498Szrj 
2647*38fd1498Szrj /* Return X / Y, rouding towards -inf.  Treat X and Y as unsigned values.  */
2648*38fd1498Szrj /* ??? Why do we have both this and udiv_trunc.  Aren't they the same?  */
2649*38fd1498Szrj template <typename T1, typename T2>
2650*38fd1498Szrj inline WI_BINARY_RESULT (T1, T2)
2651*38fd1498Szrj wi::udiv_floor (const T1 &x, const T2 &y)
2652*38fd1498Szrj {
2653*38fd1498Szrj   return div_floor (x, y, UNSIGNED);
2654*38fd1498Szrj }
2655*38fd1498Szrj 
2656*38fd1498Szrj /* Return X / Y, rouding towards +inf.  Treat X and Y as having the
2657*38fd1498Szrj    signedness given by SGN.  Indicate in *OVERFLOW if the result
2658*38fd1498Szrj    overflows.  */
2659*38fd1498Szrj template <typename T1, typename T2>
2660*38fd1498Szrj inline WI_BINARY_RESULT (T1, T2)
2661*38fd1498Szrj wi::div_ceil (const T1 &x, const T2 &y, signop sgn, bool *overflow)
2662*38fd1498Szrj {
2663*38fd1498Szrj   WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y);
2664*38fd1498Szrj   WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y);
2665*38fd1498Szrj   unsigned int precision = get_precision (quotient);
2666*38fd1498Szrj   WIDE_INT_REF_FOR (T1) xi (x, precision);
2667*38fd1498Szrj   WIDE_INT_REF_FOR (T2) yi (y);
2668*38fd1498Szrj 
2669*38fd1498Szrj   unsigned int remainder_len;
2670*38fd1498Szrj   quotient.set_len (divmod_internal (quotient_val,
2671*38fd1498Szrj 				     &remainder_len, remainder_val,
2672*38fd1498Szrj 				     xi.val, xi.len, precision,
2673*38fd1498Szrj 				     yi.val, yi.len, yi.precision, sgn,
2674*38fd1498Szrj 				     overflow));
2675*38fd1498Szrj   remainder.set_len (remainder_len);
2676*38fd1498Szrj   if (wi::neg_p (x, sgn) == wi::neg_p (y, sgn) && remainder != 0)
2677*38fd1498Szrj     return quotient + 1;
2678*38fd1498Szrj   return quotient;
2679*38fd1498Szrj }
2680*38fd1498Szrj 
2681*38fd1498Szrj /* Return X / Y, rouding towards +inf.  Treat X and Y as unsigned values.  */
2682*38fd1498Szrj template <typename T1, typename T2>
2683*38fd1498Szrj inline WI_BINARY_RESULT (T1, T2)
2684*38fd1498Szrj wi::udiv_ceil (const T1 &x, const T2 &y)
2685*38fd1498Szrj {
2686*38fd1498Szrj   return div_ceil (x, y, UNSIGNED);
2687*38fd1498Szrj }
2688*38fd1498Szrj 
2689*38fd1498Szrj /* Return X / Y, rouding towards nearest with ties away from zero.
2690*38fd1498Szrj    Treat X and Y as having the signedness given by SGN.  Indicate
2691*38fd1498Szrj    in *OVERFLOW if the result overflows.  */
2692*38fd1498Szrj template <typename T1, typename T2>
2693*38fd1498Szrj inline WI_BINARY_RESULT (T1, T2)
2694*38fd1498Szrj wi::div_round (const T1 &x, const T2 &y, signop sgn, bool *overflow)
2695*38fd1498Szrj {
2696*38fd1498Szrj   WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y);
2697*38fd1498Szrj   WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y);
2698*38fd1498Szrj   unsigned int precision = get_precision (quotient);
2699*38fd1498Szrj   WIDE_INT_REF_FOR (T1) xi (x, precision);
2700*38fd1498Szrj   WIDE_INT_REF_FOR (T2) yi (y);
2701*38fd1498Szrj 
2702*38fd1498Szrj   unsigned int remainder_len;
2703*38fd1498Szrj   quotient.set_len (divmod_internal (quotient_val,
2704*38fd1498Szrj 				     &remainder_len, remainder_val,
2705*38fd1498Szrj 				     xi.val, xi.len, precision,
2706*38fd1498Szrj 				     yi.val, yi.len, yi.precision, sgn,
2707*38fd1498Szrj 				     overflow));
2708*38fd1498Szrj   remainder.set_len (remainder_len);
2709*38fd1498Szrj 
2710*38fd1498Szrj   if (remainder != 0)
2711*38fd1498Szrj     {
2712*38fd1498Szrj       if (sgn == SIGNED)
2713*38fd1498Szrj 	{
2714*38fd1498Szrj 	  WI_BINARY_RESULT (T1, T2) abs_remainder = wi::abs (remainder);
2715*38fd1498Szrj 	  if (wi::geu_p (abs_remainder, wi::sub (wi::abs (y), abs_remainder)))
2716*38fd1498Szrj 	    {
2717*38fd1498Szrj 	      if (wi::neg_p (x, sgn) != wi::neg_p (y, sgn))
2718*38fd1498Szrj 		return quotient - 1;
2719*38fd1498Szrj 	      else
2720*38fd1498Szrj 		return quotient + 1;
2721*38fd1498Szrj 	    }
2722*38fd1498Szrj 	}
2723*38fd1498Szrj       else
2724*38fd1498Szrj 	{
2725*38fd1498Szrj 	  if (wi::geu_p (remainder, wi::sub (y, remainder)))
2726*38fd1498Szrj 	    return quotient + 1;
2727*38fd1498Szrj 	}
2728*38fd1498Szrj     }
2729*38fd1498Szrj   return quotient;
2730*38fd1498Szrj }
2731*38fd1498Szrj 
2732*38fd1498Szrj /* Return X / Y, rouding towards 0.  Treat X and Y as having the
2733*38fd1498Szrj    signedness given by SGN.  Store the remainder in *REMAINDER_PTR.  */
2734*38fd1498Szrj template <typename T1, typename T2>
2735*38fd1498Szrj inline WI_BINARY_RESULT (T1, T2)
2736*38fd1498Szrj wi::divmod_trunc (const T1 &x, const T2 &y, signop sgn,
2737*38fd1498Szrj 		  WI_BINARY_RESULT (T1, T2) *remainder_ptr)
2738*38fd1498Szrj {
2739*38fd1498Szrj   WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y);
2740*38fd1498Szrj   WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y);
2741*38fd1498Szrj   unsigned int precision = get_precision (quotient);
2742*38fd1498Szrj   WIDE_INT_REF_FOR (T1) xi (x, precision);
2743*38fd1498Szrj   WIDE_INT_REF_FOR (T2) yi (y);
2744*38fd1498Szrj 
2745*38fd1498Szrj   unsigned int remainder_len;
2746*38fd1498Szrj   quotient.set_len (divmod_internal (quotient_val,
2747*38fd1498Szrj 				     &remainder_len, remainder_val,
2748*38fd1498Szrj 				     xi.val, xi.len, precision,
2749*38fd1498Szrj 				     yi.val, yi.len, yi.precision, sgn, 0));
2750*38fd1498Szrj   remainder.set_len (remainder_len);
2751*38fd1498Szrj 
2752*38fd1498Szrj   *remainder_ptr = remainder;
2753*38fd1498Szrj   return quotient;
2754*38fd1498Szrj }
2755*38fd1498Szrj 
2756*38fd1498Szrj /* Compute the greatest common divisor of two numbers A and B using
2757*38fd1498Szrj    Euclid's algorithm.  */
2758*38fd1498Szrj template <typename T1, typename T2>
2759*38fd1498Szrj inline WI_BINARY_RESULT (T1, T2)
2760*38fd1498Szrj wi::gcd (const T1 &a, const T2 &b, signop sgn)
2761*38fd1498Szrj {
2762*38fd1498Szrj   T1 x, y, z;
2763*38fd1498Szrj 
2764*38fd1498Szrj   x = wi::abs (a);
2765*38fd1498Szrj   y = wi::abs (b);
2766*38fd1498Szrj 
2767*38fd1498Szrj   while (gt_p (x, 0, sgn))
2768*38fd1498Szrj     {
2769*38fd1498Szrj       z = mod_trunc (y, x, sgn);
2770*38fd1498Szrj       y = x;
2771*38fd1498Szrj       x = z;
2772*38fd1498Szrj     }
2773*38fd1498Szrj 
2774*38fd1498Szrj   return y;
2775*38fd1498Szrj }
2776*38fd1498Szrj 
2777*38fd1498Szrj /* Compute X / Y, rouding towards 0, and return the remainder.
2778*38fd1498Szrj    Treat X and Y as having the signedness given by SGN.  Indicate
2779*38fd1498Szrj    in *OVERFLOW if the division overflows.  */
2780*38fd1498Szrj template <typename T1, typename T2>
2781*38fd1498Szrj inline WI_BINARY_RESULT (T1, T2)
2782*38fd1498Szrj wi::mod_trunc (const T1 &x, const T2 &y, signop sgn, bool *overflow)
2783*38fd1498Szrj {
2784*38fd1498Szrj   WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y);
2785*38fd1498Szrj   unsigned int precision = get_precision (remainder);
2786*38fd1498Szrj   WIDE_INT_REF_FOR (T1) xi (x, precision);
2787*38fd1498Szrj   WIDE_INT_REF_FOR (T2) yi (y);
2788*38fd1498Szrj 
2789*38fd1498Szrj   unsigned int remainder_len;
2790*38fd1498Szrj   divmod_internal (0, &remainder_len, remainder_val,
2791*38fd1498Szrj 		   xi.val, xi.len, precision,
2792*38fd1498Szrj 		   yi.val, yi.len, yi.precision, sgn, overflow);
2793*38fd1498Szrj   remainder.set_len (remainder_len);
2794*38fd1498Szrj 
2795*38fd1498Szrj   return remainder;
2796*38fd1498Szrj }
2797*38fd1498Szrj 
2798*38fd1498Szrj /* Compute X / Y, rouding towards 0, and return the remainder.
2799*38fd1498Szrj    Treat X and Y as signed values.  */
2800*38fd1498Szrj template <typename T1, typename T2>
2801*38fd1498Szrj inline WI_BINARY_RESULT (T1, T2)
2802*38fd1498Szrj wi::smod_trunc (const T1 &x, const T2 &y)
2803*38fd1498Szrj {
2804*38fd1498Szrj   return mod_trunc (x, y, SIGNED);
2805*38fd1498Szrj }
2806*38fd1498Szrj 
2807*38fd1498Szrj /* Compute X / Y, rouding towards 0, and return the remainder.
2808*38fd1498Szrj    Treat X and Y as unsigned values.  */
2809*38fd1498Szrj template <typename T1, typename T2>
2810*38fd1498Szrj inline WI_BINARY_RESULT (T1, T2)
2811*38fd1498Szrj wi::umod_trunc (const T1 &x, const T2 &y)
2812*38fd1498Szrj {
2813*38fd1498Szrj   return mod_trunc (x, y, UNSIGNED);
2814*38fd1498Szrj }
2815*38fd1498Szrj 
2816*38fd1498Szrj /* Compute X / Y, rouding towards -inf, and return the remainder.
2817*38fd1498Szrj    Treat X and Y as having the signedness given by SGN.  Indicate
2818*38fd1498Szrj    in *OVERFLOW if the division overflows.  */
2819*38fd1498Szrj template <typename T1, typename T2>
2820*38fd1498Szrj inline WI_BINARY_RESULT (T1, T2)
2821*38fd1498Szrj wi::mod_floor (const T1 &x, const T2 &y, signop sgn, bool *overflow)
2822*38fd1498Szrj {
2823*38fd1498Szrj   WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y);
2824*38fd1498Szrj   WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y);
2825*38fd1498Szrj   unsigned int precision = get_precision (quotient);
2826*38fd1498Szrj   WIDE_INT_REF_FOR (T1) xi (x, precision);
2827*38fd1498Szrj   WIDE_INT_REF_FOR (T2) yi (y);
2828*38fd1498Szrj 
2829*38fd1498Szrj   unsigned int remainder_len;
2830*38fd1498Szrj   quotient.set_len (divmod_internal (quotient_val,
2831*38fd1498Szrj 				     &remainder_len, remainder_val,
2832*38fd1498Szrj 				     xi.val, xi.len, precision,
2833*38fd1498Szrj 				     yi.val, yi.len, yi.precision, sgn,
2834*38fd1498Szrj 				     overflow));
2835*38fd1498Szrj   remainder.set_len (remainder_len);
2836*38fd1498Szrj 
2837*38fd1498Szrj   if (wi::neg_p (x, sgn) != wi::neg_p (y, sgn) && remainder != 0)
2838*38fd1498Szrj     return remainder + y;
2839*38fd1498Szrj   return remainder;
2840*38fd1498Szrj }
2841*38fd1498Szrj 
2842*38fd1498Szrj /* Compute X / Y, rouding towards -inf, and return the remainder.
2843*38fd1498Szrj    Treat X and Y as unsigned values.  */
2844*38fd1498Szrj /* ??? Why do we have both this and umod_trunc.  Aren't they the same?  */
2845*38fd1498Szrj template <typename T1, typename T2>
2846*38fd1498Szrj inline WI_BINARY_RESULT (T1, T2)
2847*38fd1498Szrj wi::umod_floor (const T1 &x, const T2 &y)
2848*38fd1498Szrj {
2849*38fd1498Szrj   return mod_floor (x, y, UNSIGNED);
2850*38fd1498Szrj }
2851*38fd1498Szrj 
2852*38fd1498Szrj /* Compute X / Y, rouding towards +inf, and return the remainder.
2853*38fd1498Szrj    Treat X and Y as having the signedness given by SGN.  Indicate
2854*38fd1498Szrj    in *OVERFLOW if the division overflows.  */
2855*38fd1498Szrj template <typename T1, typename T2>
2856*38fd1498Szrj inline WI_BINARY_RESULT (T1, T2)
2857*38fd1498Szrj wi::mod_ceil (const T1 &x, const T2 &y, signop sgn, bool *overflow)
2858*38fd1498Szrj {
2859*38fd1498Szrj   WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y);
2860*38fd1498Szrj   WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y);
2861*38fd1498Szrj   unsigned int precision = get_precision (quotient);
2862*38fd1498Szrj   WIDE_INT_REF_FOR (T1) xi (x, precision);
2863*38fd1498Szrj   WIDE_INT_REF_FOR (T2) yi (y);
2864*38fd1498Szrj 
2865*38fd1498Szrj   unsigned int remainder_len;
2866*38fd1498Szrj   quotient.set_len (divmod_internal (quotient_val,
2867*38fd1498Szrj 				     &remainder_len, remainder_val,
2868*38fd1498Szrj 				     xi.val, xi.len, precision,
2869*38fd1498Szrj 				     yi.val, yi.len, yi.precision, sgn,
2870*38fd1498Szrj 				     overflow));
2871*38fd1498Szrj   remainder.set_len (remainder_len);
2872*38fd1498Szrj 
2873*38fd1498Szrj   if (wi::neg_p (x, sgn) == wi::neg_p (y, sgn) && remainder != 0)
2874*38fd1498Szrj     return remainder - y;
2875*38fd1498Szrj   return remainder;
2876*38fd1498Szrj }
2877*38fd1498Szrj 
2878*38fd1498Szrj /* Compute X / Y, rouding towards nearest with ties away from zero,
2879*38fd1498Szrj    and return the remainder.  Treat X and Y as having the signedness
2880*38fd1498Szrj    given by SGN.  Indicate in *OVERFLOW if the division overflows.  */
2881*38fd1498Szrj template <typename T1, typename T2>
2882*38fd1498Szrj inline WI_BINARY_RESULT (T1, T2)
2883*38fd1498Szrj wi::mod_round (const T1 &x, const T2 &y, signop sgn, bool *overflow)
2884*38fd1498Szrj {
2885*38fd1498Szrj   WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y);
2886*38fd1498Szrj   WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y);
2887*38fd1498Szrj   unsigned int precision = get_precision (quotient);
2888*38fd1498Szrj   WIDE_INT_REF_FOR (T1) xi (x, precision);
2889*38fd1498Szrj   WIDE_INT_REF_FOR (T2) yi (y);
2890*38fd1498Szrj 
2891*38fd1498Szrj   unsigned int remainder_len;
2892*38fd1498Szrj   quotient.set_len (divmod_internal (quotient_val,
2893*38fd1498Szrj 				     &remainder_len, remainder_val,
2894*38fd1498Szrj 				     xi.val, xi.len, precision,
2895*38fd1498Szrj 				     yi.val, yi.len, yi.precision, sgn,
2896*38fd1498Szrj 				     overflow));
2897*38fd1498Szrj   remainder.set_len (remainder_len);
2898*38fd1498Szrj 
2899*38fd1498Szrj   if (remainder != 0)
2900*38fd1498Szrj     {
2901*38fd1498Szrj       if (sgn == SIGNED)
2902*38fd1498Szrj 	{
2903*38fd1498Szrj 	  WI_BINARY_RESULT (T1, T2) abs_remainder = wi::abs (remainder);
2904*38fd1498Szrj 	  if (wi::geu_p (abs_remainder, wi::sub (wi::abs (y), abs_remainder)))
2905*38fd1498Szrj 	    {
2906*38fd1498Szrj 	      if (wi::neg_p (x, sgn) != wi::neg_p (y, sgn))
2907*38fd1498Szrj 		return remainder + y;
2908*38fd1498Szrj 	      else
2909*38fd1498Szrj 		return remainder - y;
2910*38fd1498Szrj 	    }
2911*38fd1498Szrj 	}
2912*38fd1498Szrj       else
2913*38fd1498Szrj 	{
2914*38fd1498Szrj 	  if (wi::geu_p (remainder, wi::sub (y, remainder)))
2915*38fd1498Szrj 	    return remainder - y;
2916*38fd1498Szrj 	}
2917*38fd1498Szrj     }
2918*38fd1498Szrj   return remainder;
2919*38fd1498Szrj }
2920*38fd1498Szrj 
2921*38fd1498Szrj /* Return true if X is a multiple of Y.  Treat X and Y as having the
2922*38fd1498Szrj    signedness given by SGN.  */
2923*38fd1498Szrj template <typename T1, typename T2>
2924*38fd1498Szrj inline bool
2925*38fd1498Szrj wi::multiple_of_p (const T1 &x, const T2 &y, signop sgn)
2926*38fd1498Szrj {
2927*38fd1498Szrj   return wi::mod_trunc (x, y, sgn) == 0;
2928*38fd1498Szrj }
2929*38fd1498Szrj 
2930*38fd1498Szrj /* Return true if X is a multiple of Y, storing X / Y in *RES if so.
2931*38fd1498Szrj    Treat X and Y as having the signedness given by SGN.  */
2932*38fd1498Szrj template <typename T1, typename T2>
2933*38fd1498Szrj inline bool
2934*38fd1498Szrj wi::multiple_of_p (const T1 &x, const T2 &y, signop sgn,
2935*38fd1498Szrj 		   WI_BINARY_RESULT (T1, T2) *res)
2936*38fd1498Szrj {
2937*38fd1498Szrj   WI_BINARY_RESULT (T1, T2) remainder;
2938*38fd1498Szrj   WI_BINARY_RESULT (T1, T2) quotient
2939*38fd1498Szrj     = divmod_trunc (x, y, sgn, &remainder);
2940*38fd1498Szrj   if (remainder == 0)
2941*38fd1498Szrj     {
2942*38fd1498Szrj       *res = quotient;
2943*38fd1498Szrj       return true;
2944*38fd1498Szrj     }
2945*38fd1498Szrj   return false;
2946*38fd1498Szrj }
2947*38fd1498Szrj 
2948*38fd1498Szrj /* Return X << Y.  Return 0 if Y is greater than or equal to
2949*38fd1498Szrj    the precision of X.  */
2950*38fd1498Szrj template <typename T1, typename T2>
2951*38fd1498Szrj inline WI_UNARY_RESULT (T1)
2952*38fd1498Szrj wi::lshift (const T1 &x, const T2 &y)
2953*38fd1498Szrj {
2954*38fd1498Szrj   WI_UNARY_RESULT_VAR (result, val, T1, x);
2955*38fd1498Szrj   unsigned int precision = get_precision (result);
2956*38fd1498Szrj   WIDE_INT_REF_FOR (T1) xi (x, precision);
2957*38fd1498Szrj   WIDE_INT_REF_FOR (T2) yi (y);
2958*38fd1498Szrj   /* Handle the simple cases quickly.   */
2959*38fd1498Szrj   if (geu_p (yi, precision))
2960*38fd1498Szrj     {
2961*38fd1498Szrj       val[0] = 0;
2962*38fd1498Szrj       result.set_len (1);
2963*38fd1498Szrj     }
2964*38fd1498Szrj   else
2965*38fd1498Szrj     {
2966*38fd1498Szrj       unsigned int shift = yi.to_uhwi ();
2967*38fd1498Szrj       /* For fixed-precision integers like offset_int and widest_int,
2968*38fd1498Szrj 	 handle the case where the shift value is constant and the
2969*38fd1498Szrj 	 result is a single nonnegative HWI (meaning that we don't
2970*38fd1498Szrj 	 need to worry about val[1]).  This is particularly common
2971*38fd1498Szrj 	 for converting a byte count to a bit count.
2972*38fd1498Szrj 
2973*38fd1498Szrj 	 For variable-precision integers like wide_int, handle HWI
2974*38fd1498Szrj 	 and sub-HWI integers inline.  */
2975*38fd1498Szrj       if (STATIC_CONSTANT_P (xi.precision > HOST_BITS_PER_WIDE_INT)
2976*38fd1498Szrj 	  ? (STATIC_CONSTANT_P (shift < HOST_BITS_PER_WIDE_INT - 1)
2977*38fd1498Szrj 	     && xi.len == 1
2978*38fd1498Szrj 	     && xi.val[0] <= (HOST_WIDE_INT) ((unsigned HOST_WIDE_INT)
2979*38fd1498Szrj 					      HOST_WIDE_INT_MAX >> shift))
2980*38fd1498Szrj 	  : precision <= HOST_BITS_PER_WIDE_INT)
2981*38fd1498Szrj 	{
2982*38fd1498Szrj 	  val[0] = xi.ulow () << shift;
2983*38fd1498Szrj 	  result.set_len (1);
2984*38fd1498Szrj 	}
2985*38fd1498Szrj       else
2986*38fd1498Szrj 	result.set_len (lshift_large (val, xi.val, xi.len,
2987*38fd1498Szrj 				      precision, shift));
2988*38fd1498Szrj     }
2989*38fd1498Szrj   return result;
2990*38fd1498Szrj }
2991*38fd1498Szrj 
2992*38fd1498Szrj /* Return X >> Y, using a logical shift.  Return 0 if Y is greater than
2993*38fd1498Szrj    or equal to the precision of X.  */
2994*38fd1498Szrj template <typename T1, typename T2>
2995*38fd1498Szrj inline WI_UNARY_RESULT (T1)
2996*38fd1498Szrj wi::lrshift (const T1 &x, const T2 &y)
2997*38fd1498Szrj {
2998*38fd1498Szrj   WI_UNARY_RESULT_VAR (result, val, T1, x);
2999*38fd1498Szrj   /* Do things in the precision of the input rather than the output,
3000*38fd1498Szrj      since the result can be no larger than that.  */
3001*38fd1498Szrj   WIDE_INT_REF_FOR (T1) xi (x);
3002*38fd1498Szrj   WIDE_INT_REF_FOR (T2) yi (y);
3003*38fd1498Szrj   /* Handle the simple cases quickly.   */
3004*38fd1498Szrj   if (geu_p (yi, xi.precision))
3005*38fd1498Szrj     {
3006*38fd1498Szrj       val[0] = 0;
3007*38fd1498Szrj       result.set_len (1);
3008*38fd1498Szrj     }
3009*38fd1498Szrj   else
3010*38fd1498Szrj     {
3011*38fd1498Szrj       unsigned int shift = yi.to_uhwi ();
3012*38fd1498Szrj       /* For fixed-precision integers like offset_int and widest_int,
3013*38fd1498Szrj 	 handle the case where the shift value is constant and the
3014*38fd1498Szrj 	 shifted value is a single nonnegative HWI (meaning that all
3015*38fd1498Szrj 	 bits above the HWI are zero).  This is particularly common
3016*38fd1498Szrj 	 for converting a bit count to a byte count.
3017*38fd1498Szrj 
3018*38fd1498Szrj 	 For variable-precision integers like wide_int, handle HWI
3019*38fd1498Szrj 	 and sub-HWI integers inline.  */
3020*38fd1498Szrj       if (STATIC_CONSTANT_P (xi.precision > HOST_BITS_PER_WIDE_INT)
3021*38fd1498Szrj 	  ? (shift < HOST_BITS_PER_WIDE_INT
3022*38fd1498Szrj 	     && xi.len == 1
3023*38fd1498Szrj 	     && xi.val[0] >= 0)
3024*38fd1498Szrj 	  : xi.precision <= HOST_BITS_PER_WIDE_INT)
3025*38fd1498Szrj 	{
3026*38fd1498Szrj 	  val[0] = xi.to_uhwi () >> shift;
3027*38fd1498Szrj 	  result.set_len (1);
3028*38fd1498Szrj 	}
3029*38fd1498Szrj       else
3030*38fd1498Szrj 	result.set_len (lrshift_large (val, xi.val, xi.len, xi.precision,
3031*38fd1498Szrj 				       get_precision (result), shift));
3032*38fd1498Szrj     }
3033*38fd1498Szrj   return result;
3034*38fd1498Szrj }
3035*38fd1498Szrj 
3036*38fd1498Szrj /* Return X >> Y, using an arithmetic shift.  Return a sign mask if
3037*38fd1498Szrj    Y is greater than or equal to the precision of X.  */
3038*38fd1498Szrj template <typename T1, typename T2>
3039*38fd1498Szrj inline WI_UNARY_RESULT (T1)
3040*38fd1498Szrj wi::arshift (const T1 &x, const T2 &y)
3041*38fd1498Szrj {
3042*38fd1498Szrj   WI_UNARY_RESULT_VAR (result, val, T1, x);
3043*38fd1498Szrj   /* Do things in the precision of the input rather than the output,
3044*38fd1498Szrj      since the result can be no larger than that.  */
3045*38fd1498Szrj   WIDE_INT_REF_FOR (T1) xi (x);
3046*38fd1498Szrj   WIDE_INT_REF_FOR (T2) yi (y);
3047*38fd1498Szrj   /* Handle the simple cases quickly.   */
3048*38fd1498Szrj   if (geu_p (yi, xi.precision))
3049*38fd1498Szrj     {
3050*38fd1498Szrj       val[0] = sign_mask (x);
3051*38fd1498Szrj       result.set_len (1);
3052*38fd1498Szrj     }
3053*38fd1498Szrj   else
3054*38fd1498Szrj     {
3055*38fd1498Szrj       unsigned int shift = yi.to_uhwi ();
3056*38fd1498Szrj       if (xi.precision <= HOST_BITS_PER_WIDE_INT)
3057*38fd1498Szrj 	{
3058*38fd1498Szrj 	  val[0] = sext_hwi (xi.ulow () >> shift, xi.precision - shift);
3059*38fd1498Szrj 	  result.set_len (1, true);
3060*38fd1498Szrj 	}
3061*38fd1498Szrj       else
3062*38fd1498Szrj 	result.set_len (arshift_large (val, xi.val, xi.len, xi.precision,
3063*38fd1498Szrj 				       get_precision (result), shift));
3064*38fd1498Szrj     }
3065*38fd1498Szrj   return result;
3066*38fd1498Szrj }
3067*38fd1498Szrj 
3068*38fd1498Szrj /* Return X >> Y, using an arithmetic shift if SGN is SIGNED and a
3069*38fd1498Szrj    logical shift otherwise.  */
3070*38fd1498Szrj template <typename T1, typename T2>
3071*38fd1498Szrj inline WI_UNARY_RESULT (T1)
3072*38fd1498Szrj wi::rshift (const T1 &x, const T2 &y, signop sgn)
3073*38fd1498Szrj {
3074*38fd1498Szrj   if (sgn == UNSIGNED)
3075*38fd1498Szrj     return lrshift (x, y);
3076*38fd1498Szrj   else
3077*38fd1498Szrj     return arshift (x, y);
3078*38fd1498Szrj }
3079*38fd1498Szrj 
3080*38fd1498Szrj /* Return the result of rotating the low WIDTH bits of X left by Y
3081*38fd1498Szrj    bits and zero-extending the result.  Use a full-width rotate if
3082*38fd1498Szrj    WIDTH is zero.  */
3083*38fd1498Szrj template <typename T1, typename T2>
3084*38fd1498Szrj WI_UNARY_RESULT (T1)
3085*38fd1498Szrj wi::lrotate (const T1 &x, const T2 &y, unsigned int width)
3086*38fd1498Szrj {
3087*38fd1498Szrj   unsigned int precision = get_binary_precision (x, x);
3088*38fd1498Szrj   if (width == 0)
3089*38fd1498Szrj     width = precision;
3090*38fd1498Szrj   WI_UNARY_RESULT (T2) ymod = umod_trunc (y, width);
3091*38fd1498Szrj   WI_UNARY_RESULT (T1) left = wi::lshift (x, ymod);
3092*38fd1498Szrj   WI_UNARY_RESULT (T1) right = wi::lrshift (x, wi::sub (width, ymod));
3093*38fd1498Szrj   if (width != precision)
3094*38fd1498Szrj     return wi::zext (left, width) | wi::zext (right, width);
3095*38fd1498Szrj   return left | right;
3096*38fd1498Szrj }
3097*38fd1498Szrj 
3098*38fd1498Szrj /* Return the result of rotating the low WIDTH bits of X right by Y
3099*38fd1498Szrj    bits and zero-extending the result.  Use a full-width rotate if
3100*38fd1498Szrj    WIDTH is zero.  */
3101*38fd1498Szrj template <typename T1, typename T2>
3102*38fd1498Szrj WI_UNARY_RESULT (T1)
3103*38fd1498Szrj wi::rrotate (const T1 &x, const T2 &y, unsigned int width)
3104*38fd1498Szrj {
3105*38fd1498Szrj   unsigned int precision = get_binary_precision (x, x);
3106*38fd1498Szrj   if (width == 0)
3107*38fd1498Szrj     width = precision;
3108*38fd1498Szrj   WI_UNARY_RESULT (T2) ymod = umod_trunc (y, width);
3109*38fd1498Szrj   WI_UNARY_RESULT (T1) right = wi::lrshift (x, ymod);
3110*38fd1498Szrj   WI_UNARY_RESULT (T1) left = wi::lshift (x, wi::sub (width, ymod));
3111*38fd1498Szrj   if (width != precision)
3112*38fd1498Szrj     return wi::zext (left, width) | wi::zext (right, width);
3113*38fd1498Szrj   return left | right;
3114*38fd1498Szrj }
3115*38fd1498Szrj 
3116*38fd1498Szrj /* Return 0 if the number of 1s in X is even and 1 if the number of 1s
3117*38fd1498Szrj    is odd.  */
3118*38fd1498Szrj inline int
3119*38fd1498Szrj wi::parity (const wide_int_ref &x)
3120*38fd1498Szrj {
3121*38fd1498Szrj   return popcount (x) & 1;
3122*38fd1498Szrj }
3123*38fd1498Szrj 
3124*38fd1498Szrj /* Extract WIDTH bits from X, starting at BITPOS.  */
3125*38fd1498Szrj template <typename T>
3126*38fd1498Szrj inline unsigned HOST_WIDE_INT
3127*38fd1498Szrj wi::extract_uhwi (const T &x, unsigned int bitpos, unsigned int width)
3128*38fd1498Szrj {
3129*38fd1498Szrj   unsigned precision = get_precision (x);
3130*38fd1498Szrj   if (precision < bitpos + width)
3131*38fd1498Szrj     precision = bitpos + width;
3132*38fd1498Szrj   WIDE_INT_REF_FOR (T) xi (x, precision);
3133*38fd1498Szrj 
3134*38fd1498Szrj   /* Handle this rare case after the above, so that we assert about
3135*38fd1498Szrj      bogus BITPOS values.  */
3136*38fd1498Szrj   if (width == 0)
3137*38fd1498Szrj     return 0;
3138*38fd1498Szrj 
3139*38fd1498Szrj   unsigned int start = bitpos / HOST_BITS_PER_WIDE_INT;
3140*38fd1498Szrj   unsigned int shift = bitpos % HOST_BITS_PER_WIDE_INT;
3141*38fd1498Szrj   unsigned HOST_WIDE_INT res = xi.elt (start);
3142*38fd1498Szrj   res >>= shift;
3143*38fd1498Szrj   if (shift + width > HOST_BITS_PER_WIDE_INT)
3144*38fd1498Szrj     {
3145*38fd1498Szrj       unsigned HOST_WIDE_INT upper = xi.elt (start + 1);
3146*38fd1498Szrj       res |= upper << (-shift % HOST_BITS_PER_WIDE_INT);
3147*38fd1498Szrj     }
3148*38fd1498Szrj   return zext_hwi (res, width);
3149*38fd1498Szrj }
3150*38fd1498Szrj 
3151*38fd1498Szrj /* Return the minimum precision needed to store X with sign SGN.  */
3152*38fd1498Szrj template <typename T>
3153*38fd1498Szrj inline unsigned int
3154*38fd1498Szrj wi::min_precision (const T &x, signop sgn)
3155*38fd1498Szrj {
3156*38fd1498Szrj   if (sgn == SIGNED)
3157*38fd1498Szrj     return get_precision (x) - clrsb (x);
3158*38fd1498Szrj   else
3159*38fd1498Szrj     return get_precision (x) - clz (x);
3160*38fd1498Szrj }
3161*38fd1498Szrj 
3162*38fd1498Szrj #define SIGNED_BINARY_PREDICATE(OP, F)			\
3163*38fd1498Szrj   template <typename T1, typename T2>			\
3164*38fd1498Szrj     inline WI_SIGNED_BINARY_PREDICATE_RESULT (T1, T2)	\
3165*38fd1498Szrj     OP (const T1 &x, const T2 &y)			\
3166*38fd1498Szrj     {							\
3167*38fd1498Szrj       return wi::F (x, y);				\
3168*38fd1498Szrj     }
3169*38fd1498Szrj 
3170*38fd1498Szrj SIGNED_BINARY_PREDICATE (operator <, lts_p)
3171*38fd1498Szrj SIGNED_BINARY_PREDICATE (operator <=, les_p)
3172*38fd1498Szrj SIGNED_BINARY_PREDICATE (operator >, gts_p)
3173*38fd1498Szrj SIGNED_BINARY_PREDICATE (operator >=, ges_p)
3174*38fd1498Szrj 
3175*38fd1498Szrj #undef SIGNED_BINARY_PREDICATE
3176*38fd1498Szrj 
3177*38fd1498Szrj #define UNARY_OPERATOR(OP, F) \
3178*38fd1498Szrj   template<typename T> \
3179*38fd1498Szrj   WI_UNARY_RESULT (generic_wide_int<T>) \
3180*38fd1498Szrj   OP (const generic_wide_int<T> &x) \
3181*38fd1498Szrj   { \
3182*38fd1498Szrj     return wi::F (x); \
3183*38fd1498Szrj   }
3184*38fd1498Szrj 
3185*38fd1498Szrj #define BINARY_PREDICATE(OP, F) \
3186*38fd1498Szrj   template<typename T1, typename T2> \
3187*38fd1498Szrj   WI_BINARY_PREDICATE_RESULT (T1, T2) \
3188*38fd1498Szrj   OP (const T1 &x, const T2 &y) \
3189*38fd1498Szrj   { \
3190*38fd1498Szrj     return wi::F (x, y); \
3191*38fd1498Szrj   }
3192*38fd1498Szrj 
3193*38fd1498Szrj #define BINARY_OPERATOR(OP, F) \
3194*38fd1498Szrj   template<typename T1, typename T2> \
3195*38fd1498Szrj   WI_BINARY_OPERATOR_RESULT (T1, T2) \
3196*38fd1498Szrj   OP (const T1 &x, const T2 &y) \
3197*38fd1498Szrj   { \
3198*38fd1498Szrj     return wi::F (x, y); \
3199*38fd1498Szrj   }
3200*38fd1498Szrj 
3201*38fd1498Szrj #define SHIFT_OPERATOR(OP, F) \
3202*38fd1498Szrj   template<typename T1, typename T2> \
3203*38fd1498Szrj   WI_BINARY_OPERATOR_RESULT (T1, T1) \
3204*38fd1498Szrj   OP (const T1 &x, const T2 &y) \
3205*38fd1498Szrj   { \
3206*38fd1498Szrj     return wi::F (x, y); \
3207*38fd1498Szrj   }
3208*38fd1498Szrj 
3209*38fd1498Szrj UNARY_OPERATOR (operator ~, bit_not)
3210*38fd1498Szrj UNARY_OPERATOR (operator -, neg)
3211*38fd1498Szrj BINARY_PREDICATE (operator ==, eq_p)
3212*38fd1498Szrj BINARY_PREDICATE (operator !=, ne_p)
3213*38fd1498Szrj BINARY_OPERATOR (operator &, bit_and)
3214*38fd1498Szrj BINARY_OPERATOR (operator |, bit_or)
3215*38fd1498Szrj BINARY_OPERATOR (operator ^, bit_xor)
3216*38fd1498Szrj BINARY_OPERATOR (operator +, add)
3217*38fd1498Szrj BINARY_OPERATOR (operator -, sub)
3218*38fd1498Szrj BINARY_OPERATOR (operator *, mul)
3219*38fd1498Szrj SHIFT_OPERATOR (operator <<, lshift)
3220*38fd1498Szrj 
3221*38fd1498Szrj #undef UNARY_OPERATOR
3222*38fd1498Szrj #undef BINARY_PREDICATE
3223*38fd1498Szrj #undef BINARY_OPERATOR
3224*38fd1498Szrj #undef SHIFT_OPERATOR
3225*38fd1498Szrj 
3226*38fd1498Szrj template <typename T1, typename T2>
3227*38fd1498Szrj inline WI_SIGNED_SHIFT_RESULT (T1, T2)
3228*38fd1498Szrj operator >> (const T1 &x, const T2 &y)
3229*38fd1498Szrj {
3230*38fd1498Szrj   return wi::arshift (x, y);
3231*38fd1498Szrj }
3232*38fd1498Szrj 
3233*38fd1498Szrj template <typename T1, typename T2>
3234*38fd1498Szrj inline WI_SIGNED_SHIFT_RESULT (T1, T2)
3235*38fd1498Szrj operator / (const T1 &x, const T2 &y)
3236*38fd1498Szrj {
3237*38fd1498Szrj   return wi::sdiv_trunc (x, y);
3238*38fd1498Szrj }
3239*38fd1498Szrj 
3240*38fd1498Szrj template <typename T1, typename T2>
3241*38fd1498Szrj inline WI_SIGNED_SHIFT_RESULT (T1, T2)
3242*38fd1498Szrj operator % (const T1 &x, const T2 &y)
3243*38fd1498Szrj {
3244*38fd1498Szrj   return wi::smod_trunc (x, y);
3245*38fd1498Szrj }
3246*38fd1498Szrj 
3247*38fd1498Szrj template<typename T>
3248*38fd1498Szrj void
3249*38fd1498Szrj gt_ggc_mx (generic_wide_int <T> *)
3250*38fd1498Szrj {
3251*38fd1498Szrj }
3252*38fd1498Szrj 
3253*38fd1498Szrj template<typename T>
3254*38fd1498Szrj void
3255*38fd1498Szrj gt_pch_nx (generic_wide_int <T> *)
3256*38fd1498Szrj {
3257*38fd1498Szrj }
3258*38fd1498Szrj 
3259*38fd1498Szrj template<typename T>
3260*38fd1498Szrj void
3261*38fd1498Szrj gt_pch_nx (generic_wide_int <T> *, void (*) (void *, void *), void *)
3262*38fd1498Szrj {
3263*38fd1498Szrj }
3264*38fd1498Szrj 
3265*38fd1498Szrj template<int N>
3266*38fd1498Szrj void
3267*38fd1498Szrj gt_ggc_mx (trailing_wide_ints <N> *)
3268*38fd1498Szrj {
3269*38fd1498Szrj }
3270*38fd1498Szrj 
3271*38fd1498Szrj template<int N>
3272*38fd1498Szrj void
3273*38fd1498Szrj gt_pch_nx (trailing_wide_ints <N> *)
3274*38fd1498Szrj {
3275*38fd1498Szrj }
3276*38fd1498Szrj 
3277*38fd1498Szrj template<int N>
3278*38fd1498Szrj void
3279*38fd1498Szrj gt_pch_nx (trailing_wide_ints <N> *, void (*) (void *, void *), void *)
3280*38fd1498Szrj {
3281*38fd1498Szrj }
3282*38fd1498Szrj 
3283*38fd1498Szrj namespace wi
3284*38fd1498Szrj {
3285*38fd1498Szrj   /* Used for overloaded functions in which the only other acceptable
3286*38fd1498Szrj      scalar type is a pointer.  It stops a plain 0 from being treated
3287*38fd1498Szrj      as a null pointer.  */
3288*38fd1498Szrj   struct never_used1 {};
3289*38fd1498Szrj   struct never_used2 {};
3290*38fd1498Szrj 
3291*38fd1498Szrj   wide_int min_value (unsigned int, signop);
3292*38fd1498Szrj   wide_int min_value (never_used1 *);
3293*38fd1498Szrj   wide_int min_value (never_used2 *);
3294*38fd1498Szrj   wide_int max_value (unsigned int, signop);
3295*38fd1498Szrj   wide_int max_value (never_used1 *);
3296*38fd1498Szrj   wide_int max_value (never_used2 *);
3297*38fd1498Szrj 
3298*38fd1498Szrj   /* FIXME: this is target dependent, so should be elsewhere.
3299*38fd1498Szrj      It also seems to assume that CHAR_BIT == BITS_PER_UNIT.  */
3300*38fd1498Szrj   wide_int from_buffer (const unsigned char *, unsigned int);
3301*38fd1498Szrj 
3302*38fd1498Szrj #ifndef GENERATOR_FILE
3303*38fd1498Szrj   void to_mpz (const wide_int_ref &, mpz_t, signop);
3304*38fd1498Szrj #endif
3305*38fd1498Szrj 
3306*38fd1498Szrj   wide_int mask (unsigned int, bool, unsigned int);
3307*38fd1498Szrj   wide_int shifted_mask (unsigned int, unsigned int, bool, unsigned int);
3308*38fd1498Szrj   wide_int set_bit_in_zero (unsigned int, unsigned int);
3309*38fd1498Szrj   wide_int insert (const wide_int &x, const wide_int &y, unsigned int,
3310*38fd1498Szrj 		   unsigned int);
3311*38fd1498Szrj   wide_int round_down_for_mask (const wide_int &, const wide_int &);
3312*38fd1498Szrj   wide_int round_up_for_mask (const wide_int &, const wide_int &);
3313*38fd1498Szrj 
3314*38fd1498Szrj   template <typename T>
3315*38fd1498Szrj   T mask (unsigned int, bool);
3316*38fd1498Szrj 
3317*38fd1498Szrj   template <typename T>
3318*38fd1498Szrj   T shifted_mask (unsigned int, unsigned int, bool);
3319*38fd1498Szrj 
3320*38fd1498Szrj   template <typename T>
3321*38fd1498Szrj   T set_bit_in_zero (unsigned int);
3322*38fd1498Szrj 
3323*38fd1498Szrj   unsigned int mask (HOST_WIDE_INT *, unsigned int, bool, unsigned int);
3324*38fd1498Szrj   unsigned int shifted_mask (HOST_WIDE_INT *, unsigned int, unsigned int,
3325*38fd1498Szrj 			     bool, unsigned int);
3326*38fd1498Szrj   unsigned int from_array (HOST_WIDE_INT *, const HOST_WIDE_INT *,
3327*38fd1498Szrj 			   unsigned int, unsigned int, bool);
3328*38fd1498Szrj }
3329*38fd1498Szrj 
3330*38fd1498Szrj /* Return a PRECISION-bit integer in which the low WIDTH bits are set
3331*38fd1498Szrj    and the other bits are clear, or the inverse if NEGATE_P.  */
3332*38fd1498Szrj inline wide_int
3333*38fd1498Szrj wi::mask (unsigned int width, bool negate_p, unsigned int precision)
3334*38fd1498Szrj {
3335*38fd1498Szrj   wide_int result = wide_int::create (precision);
3336*38fd1498Szrj   result.set_len (mask (result.write_val (), width, negate_p, precision));
3337*38fd1498Szrj   return result;
3338*38fd1498Szrj }
3339*38fd1498Szrj 
3340*38fd1498Szrj /* Return a PRECISION-bit integer in which the low START bits are clear,
3341*38fd1498Szrj    the next WIDTH bits are set, and the other bits are clear,
3342*38fd1498Szrj    or the inverse if NEGATE_P.  */
3343*38fd1498Szrj inline wide_int
3344*38fd1498Szrj wi::shifted_mask (unsigned int start, unsigned int width, bool negate_p,
3345*38fd1498Szrj 		  unsigned int precision)
3346*38fd1498Szrj {
3347*38fd1498Szrj   wide_int result = wide_int::create (precision);
3348*38fd1498Szrj   result.set_len (shifted_mask (result.write_val (), start, width, negate_p,
3349*38fd1498Szrj 				precision));
3350*38fd1498Szrj   return result;
3351*38fd1498Szrj }
3352*38fd1498Szrj 
3353*38fd1498Szrj /* Return a PRECISION-bit integer in which bit BIT is set and all the
3354*38fd1498Szrj    others are clear.  */
3355*38fd1498Szrj inline wide_int
3356*38fd1498Szrj wi::set_bit_in_zero (unsigned int bit, unsigned int precision)
3357*38fd1498Szrj {
3358*38fd1498Szrj   return shifted_mask (bit, 1, false, precision);
3359*38fd1498Szrj }
3360*38fd1498Szrj 
3361*38fd1498Szrj /* Return an integer of type T in which the low WIDTH bits are set
3362*38fd1498Szrj    and the other bits are clear, or the inverse if NEGATE_P.  */
3363*38fd1498Szrj template <typename T>
3364*38fd1498Szrj inline T
3365*38fd1498Szrj wi::mask (unsigned int width, bool negate_p)
3366*38fd1498Szrj {
3367*38fd1498Szrj   STATIC_ASSERT (wi::int_traits<T>::precision);
3368*38fd1498Szrj   T result;
3369*38fd1498Szrj   result.set_len (mask (result.write_val (), width, negate_p,
3370*38fd1498Szrj 			wi::int_traits <T>::precision));
3371*38fd1498Szrj   return result;
3372*38fd1498Szrj }
3373*38fd1498Szrj 
3374*38fd1498Szrj /* Return an integer of type T in which the low START bits are clear,
3375*38fd1498Szrj    the next WIDTH bits are set, and the other bits are clear, or the
3376*38fd1498Szrj    inverse if NEGATE_P.  */
3377*38fd1498Szrj template <typename T>
3378*38fd1498Szrj inline T
3379*38fd1498Szrj wi::shifted_mask (unsigned int start, unsigned int width, bool negate_p)
3380*38fd1498Szrj {
3381*38fd1498Szrj   STATIC_ASSERT (wi::int_traits<T>::precision);
3382*38fd1498Szrj   T result;
3383*38fd1498Szrj   result.set_len (shifted_mask (result.write_val (), start, width,
3384*38fd1498Szrj 				negate_p,
3385*38fd1498Szrj 				wi::int_traits <T>::precision));
3386*38fd1498Szrj   return result;
3387*38fd1498Szrj }
3388*38fd1498Szrj 
3389*38fd1498Szrj /* Return an integer of type T in which bit BIT is set and all the
3390*38fd1498Szrj    others are clear.  */
3391*38fd1498Szrj template <typename T>
3392*38fd1498Szrj inline T
3393*38fd1498Szrj wi::set_bit_in_zero (unsigned int bit)
3394*38fd1498Szrj {
3395*38fd1498Szrj   return shifted_mask <T> (bit, 1, false);
3396*38fd1498Szrj }
3397*38fd1498Szrj 
3398*38fd1498Szrj #endif /* WIDE_INT_H */
3399