1/* Match-and-simplify patterns for shared GENERIC and GIMPLE folding.
2   This file is consumed by genmatch which produces gimple-match.c
3   and generic-match.c from it.
4
5   Copyright (C) 2014-2020 Free Software Foundation, Inc.
6   Contributed by Richard Biener <rguenther@suse.de>
7   and Prathamesh Kulkarni  <bilbotheelffriend@gmail.com>
8
9This file is part of GCC.
10
11GCC is free software; you can redistribute it and/or modify it under
12the terms of the GNU General Public License as published by the Free
13Software Foundation; either version 3, or (at your option) any later
14version.
15
16GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17WARRANTY; without even the implied warranty of MERCHANTABILITY or
18FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
19for more details.
20
21You should have received a copy of the GNU General Public License
22along with GCC; see the file COPYING3.  If not see
23<http://www.gnu.org/licenses/>.  */
24
25
26/* Generic tree predicates we inherit.  */
27(define_predicates
28   integer_onep integer_zerop integer_all_onesp integer_minus_onep
29   integer_each_onep integer_truep integer_nonzerop
30   real_zerop real_onep real_minus_onep
31   zerop
32   initializer_each_zero_or_onep
33   CONSTANT_CLASS_P
34   tree_expr_nonnegative_p
35   tree_expr_nonzero_p
36   integer_valued_real_p
37   integer_pow2p
38   uniform_integer_cst_p
39   HONOR_NANS
40   uniform_vector_p)
41
42/* Operator lists.  */
43(define_operator_list tcc_comparison
44  lt   le   eq ne ge   gt   unordered ordered   unlt unle ungt unge uneq ltgt)
45(define_operator_list inverted_tcc_comparison
46  ge   gt   ne eq lt   le   ordered   unordered ge   gt   le   lt   ltgt uneq)
47(define_operator_list inverted_tcc_comparison_with_nans
48  unge ungt ne eq unlt unle ordered   unordered ge   gt   le   lt   ltgt uneq)
49(define_operator_list swapped_tcc_comparison
50  gt   ge   eq ne le   lt   unordered ordered   ungt unge unlt unle uneq ltgt)
51(define_operator_list simple_comparison         lt   le   eq ne ge   gt)
52(define_operator_list swapped_simple_comparison gt   ge   eq ne le   lt)
53
54#include "cfn-operators.pd"
55
56/* Define operand lists for math rounding functions {,i,l,ll}FN,
57   where the versions prefixed with "i" return an int, those prefixed with
58   "l" return a long and those prefixed with "ll" return a long long.
59
60   Also define operand lists:
61
62     X<FN>F for all float functions, in the order i, l, ll
63     X<FN> for all double functions, in the same order
64     X<FN>L for all long double functions, in the same order.  */
65#define DEFINE_INT_AND_FLOAT_ROUND_FN(FN) \
66  (define_operator_list X##FN##F BUILT_IN_I##FN##F \
67				 BUILT_IN_L##FN##F \
68				 BUILT_IN_LL##FN##F) \
69  (define_operator_list X##FN BUILT_IN_I##FN \
70			      BUILT_IN_L##FN \
71			      BUILT_IN_LL##FN) \
72  (define_operator_list X##FN##L BUILT_IN_I##FN##L \
73				 BUILT_IN_L##FN##L \
74				 BUILT_IN_LL##FN##L)
75
76DEFINE_INT_AND_FLOAT_ROUND_FN (FLOOR)
77DEFINE_INT_AND_FLOAT_ROUND_FN (CEIL)
78DEFINE_INT_AND_FLOAT_ROUND_FN (ROUND)
79DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
80
81/* Binary operations and their associated IFN_COND_* function.  */
82(define_operator_list UNCOND_BINARY
83  plus minus
84  mult trunc_div trunc_mod rdiv
85  min max
86  bit_and bit_ior bit_xor
87  lshift rshift)
88(define_operator_list COND_BINARY
89  IFN_COND_ADD IFN_COND_SUB
90  IFN_COND_MUL IFN_COND_DIV IFN_COND_MOD IFN_COND_RDIV
91  IFN_COND_MIN IFN_COND_MAX
92  IFN_COND_AND IFN_COND_IOR IFN_COND_XOR
93  IFN_COND_SHL IFN_COND_SHR)
94
95/* Same for ternary operations.  */
96(define_operator_list UNCOND_TERNARY
97  IFN_FMA IFN_FMS IFN_FNMA IFN_FNMS)
98(define_operator_list COND_TERNARY
99  IFN_COND_FMA IFN_COND_FMS IFN_COND_FNMA IFN_COND_FNMS)
100
101/* With nop_convert? combine convert? and view_convert? in one pattern
102   plus conditionalize on tree_nop_conversion_p conversions.  */
103(match (nop_convert @0)
104 (convert @0)
105 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))))
106(match (nop_convert @0)
107 (view_convert @0)
108 (if (VECTOR_TYPE_P (type) && VECTOR_TYPE_P (TREE_TYPE (@0))
109      && known_eq (TYPE_VECTOR_SUBPARTS (type),
110		   TYPE_VECTOR_SUBPARTS (TREE_TYPE (@0)))
111      && tree_nop_conversion_p (TREE_TYPE (type), TREE_TYPE (TREE_TYPE (@0))))))
112
113/* Transform likes of (char) ABS_EXPR <(int) x> into (char) ABSU_EXPR <x>
114   ABSU_EXPR returns unsigned absolute value of the operand and the operand
115   of the ABSU_EXPR will have the corresponding signed type.  */
116(simplify (abs (convert @0))
117 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
118      && !TYPE_UNSIGNED (TREE_TYPE (@0))
119      && element_precision (type) > element_precision (TREE_TYPE (@0)))
120  (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); }
121   (convert (absu:utype @0)))))
122
123
124/* Simplifications of operations with one constant operand and
125   simplifications to constants or single values.  */
126
127(for op (plus pointer_plus minus bit_ior bit_xor)
128  (simplify
129    (op @0 integer_zerop)
130    (non_lvalue @0)))
131
132/* 0 +p index -> (type)index */
133(simplify
134 (pointer_plus integer_zerop @1)
135 (non_lvalue (convert @1)))
136
137/* ptr - 0 -> (type)ptr */
138(simplify
139 (pointer_diff @0 integer_zerop)
140 (convert @0))
141
142/* See if ARG1 is zero and X + ARG1 reduces to X.
143   Likewise if the operands are reversed.  */
144(simplify
145 (plus:c @0 real_zerop@1)
146 (if (fold_real_zero_addition_p (type, @1, 0))
147  (non_lvalue @0)))
148
149/* See if ARG1 is zero and X - ARG1 reduces to X.  */
150(simplify
151 (minus @0 real_zerop@1)
152 (if (fold_real_zero_addition_p (type, @1, 1))
153  (non_lvalue @0)))
154
155/* Even if the fold_real_zero_addition_p can't simplify X + 0.0
156   into X, we can optimize (X + 0.0) + 0.0 or (X + 0.0) - 0.0
157   or (X - 0.0) + 0.0 into X + 0.0 and (X - 0.0) - 0.0 into X - 0.0
158   if not -frounding-math.  For sNaNs the first operation would raise
159   exceptions but turn the result into qNan, so the second operation
160   would not raise it.   */
161(for inner_op (plus minus)
162 (for outer_op (plus minus)
163  (simplify
164   (outer_op (inner_op@3 @0 REAL_CST@1) REAL_CST@2)
165    (if (real_zerop (@1)
166	 && real_zerop (@2)
167	 && !HONOR_SIGN_DEPENDENT_ROUNDING (type))
168     (with { bool inner_plus = ((inner_op == PLUS_EXPR)
169				^ REAL_VALUE_MINUS_ZERO (TREE_REAL_CST (@1)));
170	     bool outer_plus
171	       = ((outer_op == PLUS_EXPR)
172		  ^ REAL_VALUE_MINUS_ZERO (TREE_REAL_CST (@2))); }
173      (if (outer_plus && !inner_plus)
174       (outer_op @0 @2)
175       @3))))))
176
177/* Simplify x - x.
178   This is unsafe for certain floats even in non-IEEE formats.
179   In IEEE, it is unsafe because it does wrong for NaNs.
180   Also note that operand_equal_p is always false if an operand
181   is volatile.  */
182(simplify
183 (minus @0 @0)
184 (if (!FLOAT_TYPE_P (type) || !HONOR_NANS (type))
185  { build_zero_cst (type); }))
186(simplify
187 (pointer_diff @@0 @0)
188 { build_zero_cst (type); })
189
190(simplify
191 (mult @0 integer_zerop@1)
192 @1)
193
194/* Maybe fold x * 0 to 0.  The expressions aren't the same
195   when x is NaN, since x * 0 is also NaN.  Nor are they the
196   same in modes with signed zeros, since multiplying a
197   negative value by 0 gives -0, not +0.  */
198(simplify
199 (mult @0 real_zerop@1)
200 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type))
201  @1))
202
203/* In IEEE floating point, x*1 is not equivalent to x for snans.
204   Likewise for complex arithmetic with signed zeros.  */
205(simplify
206 (mult @0 real_onep)
207 (if (!HONOR_SNANS (type)
208      && (!HONOR_SIGNED_ZEROS (type)
209          || !COMPLEX_FLOAT_TYPE_P (type)))
210  (non_lvalue @0)))
211
212/* Transform x * -1.0 into -x.  */
213(simplify
214 (mult @0 real_minus_onep)
215  (if (!HONOR_SNANS (type)
216       && (!HONOR_SIGNED_ZEROS (type)
217           || !COMPLEX_FLOAT_TYPE_P (type)))
218   (negate @0)))
219
220/* Transform { 0 or 1 } * { 0 or 1 } into { 0 or 1 } & { 0 or 1 } */
221(simplify
222 (mult SSA_NAME@1 SSA_NAME@2)
223  (if (INTEGRAL_TYPE_P (type)
224       && get_nonzero_bits (@1) == 1
225       && get_nonzero_bits (@2) == 1)
226   (bit_and @1 @2)))
227
228/* Transform x * { 0 or 1, 0 or 1, ... } into x & { 0 or -1, 0 or -1, ...},
229   unless the target has native support for the former but not the latter.  */
230(simplify
231 (mult @0 VECTOR_CST@1)
232 (if (initializer_each_zero_or_onep (@1)
233      && !HONOR_SNANS (type)
234      && !HONOR_SIGNED_ZEROS (type))
235  (with { tree itype = FLOAT_TYPE_P (type) ? unsigned_type_for (type) : type; }
236   (if (itype
237	&& (!VECTOR_MODE_P (TYPE_MODE (type))
238	    || (VECTOR_MODE_P (TYPE_MODE (itype))
239		&& optab_handler (and_optab,
240				  TYPE_MODE (itype)) != CODE_FOR_nothing)))
241    (view_convert (bit_and:itype (view_convert @0)
242				 (ne @1 { build_zero_cst (type); })))))))
243
244(for cmp (gt ge lt le)
245     outp (convert convert negate negate)
246     outn (negate negate convert convert)
247 /* Transform X * (X > 0.0 ? 1.0 : -1.0) into abs(X). */
248 /* Transform X * (X >= 0.0 ? 1.0 : -1.0) into abs(X). */
249 /* Transform X * (X < 0.0 ? 1.0 : -1.0) into -abs(X). */
250 /* Transform X * (X <= 0.0 ? 1.0 : -1.0) into -abs(X). */
251 (simplify
252  (mult:c @0 (cond (cmp @0 real_zerop) real_onep@1 real_minus_onep))
253  (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type))
254   (outp (abs @0))))
255 /* Transform X * (X > 0.0 ? -1.0 : 1.0) into -abs(X). */
256 /* Transform X * (X >= 0.0 ? -1.0 : 1.0) into -abs(X). */
257 /* Transform X * (X < 0.0 ? -1.0 : 1.0) into abs(X). */
258 /* Transform X * (X <= 0.0 ? -1.0 : 1.0) into abs(X). */
259 (simplify
260  (mult:c @0 (cond (cmp @0 real_zerop) real_minus_onep real_onep@1))
261  (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type))
262   (outn (abs @0)))))
263
264/* Transform X * copysign (1.0, X) into abs(X). */
265(simplify
266 (mult:c @0 (COPYSIGN_ALL real_onep @0))
267 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type))
268  (abs @0)))
269
270/* Transform X * copysign (1.0, -X) into -abs(X). */
271(simplify
272 (mult:c @0 (COPYSIGN_ALL real_onep (negate @0)))
273 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type))
274  (negate (abs @0))))
275
276/* Transform copysign (CST, X) into copysign (ABS(CST), X). */
277(simplify
278 (COPYSIGN_ALL REAL_CST@0 @1)
279 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@0)))
280  (COPYSIGN_ALL (negate @0) @1)))
281
282/* X * 1, X / 1 -> X.  */
283(for op (mult trunc_div ceil_div floor_div round_div exact_div)
284  (simplify
285    (op @0 integer_onep)
286    (non_lvalue @0)))
287
288/* (A / (1 << B)) -> (A >> B).
289   Only for unsigned A.  For signed A, this would not preserve rounding
290   toward zero.
291   For example: (-1 / ( 1 << B)) !=  -1 >> B.
292   Also also widening conversions, like:
293   (A / (unsigned long long) (1U << B)) -> (A >> B)
294   or
295   (A / (unsigned long long) (1 << B)) -> (A >> B).
296   If the left shift is signed, it can be done only if the upper bits
297   of A starting from shift's type sign bit are zero, as
298   (unsigned long long) (1 << 31) is -2147483648ULL, not 2147483648ULL,
299   so it is valid only if A >> 31 is zero.  */
300(simplify
301 (trunc_div @0 (convert? (lshift integer_onep@1 @2)))
302 (if ((TYPE_UNSIGNED (type) || tree_expr_nonnegative_p (@0))
303      && (!VECTOR_TYPE_P (type)
304	  || target_supports_op_p (type, RSHIFT_EXPR, optab_vector)
305	  || target_supports_op_p (type, RSHIFT_EXPR, optab_scalar))
306      && (useless_type_conversion_p (type, TREE_TYPE (@1))
307	  || (element_precision (type) >= element_precision (TREE_TYPE (@1))
308	      && (TYPE_UNSIGNED (TREE_TYPE (@1))
309		  || (element_precision (type)
310		      == element_precision (TREE_TYPE (@1)))
311		  || (INTEGRAL_TYPE_P (type)
312		      && (tree_nonzero_bits (@0)
313			  & wi::mask (element_precision (TREE_TYPE (@1)) - 1,
314				      true,
315				      element_precision (type))) == 0)))))
316  (rshift @0 @2)))
317
318/* Preserve explicit divisions by 0: the C++ front-end wants to detect
319   undefined behavior in constexpr evaluation, and assuming that the division
320   traps enables better optimizations than these anyway.  */
321(for div (trunc_div ceil_div floor_div round_div exact_div)
322 /* 0 / X is always zero.  */
323 (simplify
324  (div integer_zerop@0 @1)
325  /* But not for 0 / 0 so that we can get the proper warnings and errors.  */
326  (if (!integer_zerop (@1))
327   @0))
328  /* X / -1 is -X.  */
329 (simplify
330   (div @0 integer_minus_onep@1)
331   (if (!TYPE_UNSIGNED (type))
332    (negate @0)))
333 /* X / X is one.  */
334 (simplify
335  (div @0 @0)
336  /* But not for 0 / 0 so that we can get the proper warnings and errors.
337     And not for _Fract types where we can't build 1.  */
338  (if (!integer_zerop (@0) && !ALL_FRACT_MODE_P (TYPE_MODE (type)))
339   { build_one_cst (type); }))
340 /* X / abs (X) is X < 0 ? -1 : 1.  */
341 (simplify
342   (div:C @0 (abs @0))
343   (if (INTEGRAL_TYPE_P (type)
344	&& TYPE_OVERFLOW_UNDEFINED (type))
345    (cond (lt @0 { build_zero_cst (type); })
346          { build_minus_one_cst (type); } { build_one_cst (type); })))
347 /* X / -X is -1.  */
348 (simplify
349   (div:C @0 (negate @0))
350   (if ((INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type))
351	&& TYPE_OVERFLOW_UNDEFINED (type))
352    { build_minus_one_cst (type); })))
353
354/* For unsigned integral types, FLOOR_DIV_EXPR is the same as
355   TRUNC_DIV_EXPR.  Rewrite into the latter in this case.  */
356(simplify
357 (floor_div @0 @1)
358 (if ((INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type))
359      && TYPE_UNSIGNED (type))
360  (trunc_div @0 @1)))
361
362/* Combine two successive divisions.  Note that combining ceil_div
363   and floor_div is trickier and combining round_div even more so.  */
364(for div (trunc_div exact_div)
365 (simplify
366  (div (div@3 @0 INTEGER_CST@1) INTEGER_CST@2)
367  (with {
368    wi::overflow_type overflow;
369    wide_int mul = wi::mul (wi::to_wide (@1), wi::to_wide (@2),
370			    TYPE_SIGN (type), &overflow);
371   }
372   (if (div == EXACT_DIV_EXPR
373	|| optimize_successive_divisions_p (@2, @3))
374    (if (!overflow)
375     (div @0 { wide_int_to_tree (type, mul); })
376     (if (TYPE_UNSIGNED (type)
377	  || mul != wi::min_value (TYPE_PRECISION (type), SIGNED))
378      { build_zero_cst (type); }))))))
379
380/* Combine successive multiplications.  Similar to above, but handling
381   overflow is different.  */
382(simplify
383 (mult (mult @0 INTEGER_CST@1) INTEGER_CST@2)
384 (with {
385   wi::overflow_type overflow;
386   wide_int mul = wi::mul (wi::to_wide (@1), wi::to_wide (@2),
387			   TYPE_SIGN (type), &overflow);
388  }
389  /* Skip folding on overflow: the only special case is @1 * @2 == -INT_MIN,
390     otherwise undefined overflow implies that @0 must be zero.  */
391  (if (!overflow || TYPE_OVERFLOW_WRAPS (type))
392   (mult @0 { wide_int_to_tree (type, mul); }))))
393
394/* Optimize A / A to 1.0 if we don't care about
395   NaNs or Infinities.  */
396(simplify
397 (rdiv @0 @0)
398 (if (FLOAT_TYPE_P (type)
399      && ! HONOR_NANS (type)
400      && ! HONOR_INFINITIES (type))
401  { build_one_cst (type); }))
402
403/* Optimize -A / A to -1.0 if we don't care about
404   NaNs or Infinities.  */
405(simplify
406 (rdiv:C @0 (negate @0))
407 (if (FLOAT_TYPE_P (type)
408      && ! HONOR_NANS (type)
409      && ! HONOR_INFINITIES (type))
410  { build_minus_one_cst (type); }))
411
412/* PR71078: x / abs(x) -> copysign (1.0, x) */
413(simplify
414 (rdiv:C (convert? @0) (convert? (abs @0)))
415  (if (SCALAR_FLOAT_TYPE_P (type)
416       && ! HONOR_NANS (type)
417       && ! HONOR_INFINITIES (type))
418   (switch
419    (if (types_match (type, float_type_node))
420     (BUILT_IN_COPYSIGNF { build_one_cst (type); } (convert @0)))
421    (if (types_match (type, double_type_node))
422     (BUILT_IN_COPYSIGN { build_one_cst (type); } (convert @0)))
423    (if (types_match (type, long_double_type_node))
424     (BUILT_IN_COPYSIGNL { build_one_cst (type); } (convert @0))))))
425
426/* In IEEE floating point, x/1 is not equivalent to x for snans.  */
427(simplify
428 (rdiv @0 real_onep)
429 (if (!HONOR_SNANS (type))
430  (non_lvalue @0)))
431
432/* In IEEE floating point, x/-1 is not equivalent to -x for snans.  */
433(simplify
434 (rdiv @0 real_minus_onep)
435 (if (!HONOR_SNANS (type))
436  (negate @0)))
437
438(if (flag_reciprocal_math)
439 /* Convert (A/B)/C to A/(B*C). */
440 (simplify
441  (rdiv (rdiv:s @0 @1) @2)
442  (rdiv @0 (mult @1 @2)))
443
444 /* Canonicalize x / (C1 * y) to (x * C2) / y.  */
445 (simplify
446  (rdiv @0 (mult:s @1 REAL_CST@2))
447  (with
448   { tree tem = const_binop (RDIV_EXPR, type, build_one_cst (type), @2); }
449   (if (tem)
450    (rdiv (mult @0 { tem; } ) @1))))
451
452 /* Convert A/(B/C) to (A/B)*C  */
453 (simplify
454  (rdiv @0 (rdiv:s @1 @2))
455   (mult (rdiv @0 @1) @2)))
456
457/* Simplify x / (- y) to -x / y.  */
458(simplify
459 (rdiv @0 (negate @1))
460 (rdiv (negate @0) @1))
461
462(if (flag_unsafe_math_optimizations)
463 /* Simplify (C / x op 0.0) to x op 0.0 for C != 0, C != Inf/Nan.
464    Since C / x may underflow to zero, do this only for unsafe math.  */
465 (for op (lt le gt ge)
466      neg_op (gt ge lt le)
467  (simplify
468   (op (rdiv REAL_CST@0 @1) real_zerop@2)
469   (if (!HONOR_SIGNED_ZEROS (@1) && !HONOR_INFINITIES (@1))
470    (switch
471     (if (real_less (&dconst0, TREE_REAL_CST_PTR (@0)))
472      (op @1 @2))
473     /* For C < 0, use the inverted operator.  */
474     (if (real_less (TREE_REAL_CST_PTR (@0), &dconst0))
475      (neg_op @1 @2)))))))
476
477/* Optimize (X & (-A)) / A where A is a power of 2, to X >> log2(A) */
478(for div (trunc_div ceil_div floor_div round_div exact_div)
479 (simplify
480  (div (convert? (bit_and @0 INTEGER_CST@1)) INTEGER_CST@2)
481  (if (integer_pow2p (@2)
482       && tree_int_cst_sgn (@2) > 0
483       && tree_nop_conversion_p (type, TREE_TYPE (@0))
484       && wi::to_wide (@2) + wi::to_wide (@1) == 0)
485   (rshift (convert @0)
486	   { build_int_cst (integer_type_node,
487			    wi::exact_log2 (wi::to_wide (@2))); }))))
488
489/* If ARG1 is a constant, we can convert this to a multiply by the
490   reciprocal.  This does not have the same rounding properties,
491   so only do this if -freciprocal-math.  We can actually
492   always safely do it if ARG1 is a power of two, but it's hard to
493   tell if it is or not in a portable manner.  */
494(for cst (REAL_CST COMPLEX_CST VECTOR_CST)
495 (simplify
496  (rdiv @0 cst@1)
497  (if (optimize)
498   (if (flag_reciprocal_math
499	&& !real_zerop (@1))
500    (with
501     { tree tem = const_binop (RDIV_EXPR, type, build_one_cst (type), @1); }
502     (if (tem)
503      (mult @0 { tem; } )))
504    (if (cst != COMPLEX_CST)
505     (with { tree inverse = exact_inverse (type, @1); }
506      (if (inverse)
507       (mult @0 { inverse; } ))))))))
508
509(for mod (ceil_mod floor_mod round_mod trunc_mod)
510 /* 0 % X is always zero.  */
511 (simplify
512  (mod integer_zerop@0 @1)
513  /* But not for 0 % 0 so that we can get the proper warnings and errors.  */
514  (if (!integer_zerop (@1))
515   @0))
516 /* X % 1 is always zero.  */
517 (simplify
518  (mod @0 integer_onep)
519  { build_zero_cst (type); })
520 /* X % -1 is zero.  */
521 (simplify
522  (mod @0 integer_minus_onep@1)
523  (if (!TYPE_UNSIGNED (type))
524   { build_zero_cst (type); }))
525 /* X % X is zero.  */
526 (simplify
527  (mod @0 @0)
528  /* But not for 0 % 0 so that we can get the proper warnings and errors.  */
529  (if (!integer_zerop (@0))
530   { build_zero_cst (type); }))
531 /* (X % Y) % Y is just X % Y.  */
532 (simplify
533  (mod (mod@2 @0 @1) @1)
534  @2)
535 /* From extract_muldiv_1: (X * C1) % C2 is zero if C1 is a multiple of C2.  */
536 (simplify
537  (mod (mult @0 INTEGER_CST@1) INTEGER_CST@2)
538  (if (ANY_INTEGRAL_TYPE_P (type)
539       && TYPE_OVERFLOW_UNDEFINED (type)
540       && wi::multiple_of_p (wi::to_wide (@1), wi::to_wide (@2),
541			     TYPE_SIGN (type)))
542   { build_zero_cst (type); }))
543 /* For (X % C) == 0, if X is signed and C is power of 2, use unsigned
544    modulo and comparison, since it is simpler and equivalent.  */
545 (for cmp (eq ne)
546  (simplify
547   (cmp (mod @0 integer_pow2p@2) integer_zerop@1)
548   (if (!TYPE_UNSIGNED (TREE_TYPE (@0)))
549    (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); }
550     (cmp (mod (convert:utype @0) (convert:utype @2)) (convert:utype @1)))))))
551
552/* X % -C is the same as X % C.  */
553(simplify
554 (trunc_mod @0 INTEGER_CST@1)
555  (if (TYPE_SIGN (type) == SIGNED
556       && !TREE_OVERFLOW (@1)
557       && wi::neg_p (wi::to_wide (@1))
558       && !TYPE_OVERFLOW_TRAPS (type)
559       /* Avoid this transformation if C is INT_MIN, i.e. C == -C.  */
560       && !sign_bit_p (@1, @1))
561   (trunc_mod @0 (negate @1))))
562
563/* X % -Y is the same as X % Y.  */
564(simplify
565 (trunc_mod @0 (convert? (negate @1)))
566 (if (INTEGRAL_TYPE_P (type)
567      && !TYPE_UNSIGNED (type)
568      && !TYPE_OVERFLOW_TRAPS (type)
569      && tree_nop_conversion_p (type, TREE_TYPE (@1))
570      /* Avoid this transformation if X might be INT_MIN or
571	 Y might be -1, because we would then change valid
572	 INT_MIN % -(-1) into invalid INT_MIN % -1.  */
573      && (expr_not_equal_to (@0, wi::to_wide (TYPE_MIN_VALUE (type)))
574	  || expr_not_equal_to (@1, wi::minus_one (TYPE_PRECISION
575							(TREE_TYPE (@1))))))
576  (trunc_mod @0 (convert @1))))
577
578/* X - (X / Y) * Y is the same as X % Y.  */
579(simplify
580 (minus (convert1? @0) (convert2? (mult:c (trunc_div @@0 @@1) @1)))
581 (if (INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type))
582  (convert (trunc_mod @0 @1))))
583
584/* Optimize TRUNC_MOD_EXPR by a power of two into a BIT_AND_EXPR,
585   i.e. "X % C" into "X & (C - 1)", if X and C are positive.
586   Also optimize A % (C << N)  where C is a power of 2,
587   to A & ((C << N) - 1).  */
588(match (power_of_two_cand @1)
589 INTEGER_CST@1)
590(match (power_of_two_cand @1)
591 (lshift INTEGER_CST@1 @2))
592(for mod (trunc_mod floor_mod)
593 (simplify
594  (mod @0 (convert? (power_of_two_cand@1 @2)))
595  (if ((TYPE_UNSIGNED (type) || tree_expr_nonnegative_p (@0))
596       /* Allow any integral conversions of the divisor, except
597	  conversion from narrower signed to wider unsigned type
598	  where if @1 would be negative power of two, the divisor
599	  would not be a power of two.  */
600       && INTEGRAL_TYPE_P (type)
601       && INTEGRAL_TYPE_P (TREE_TYPE (@1))
602       && (TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@1))
603	   || TYPE_UNSIGNED (TREE_TYPE (@1))
604	   || !TYPE_UNSIGNED (type))
605       && integer_pow2p (@2) && tree_int_cst_sgn (@2) > 0)
606   (with { tree utype = TREE_TYPE (@1);
607	   if (!TYPE_OVERFLOW_WRAPS (utype))
608	     utype = unsigned_type_for (utype); }
609    (bit_and @0 (convert (minus (convert:utype @1)
610				{ build_one_cst (utype); })))))))
611
612/* Simplify (unsigned t * 2)/2 -> unsigned t & 0x7FFFFFFF.  */
613(simplify
614 (trunc_div (mult @0 integer_pow2p@1) @1)
615 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
616  (bit_and @0 { wide_int_to_tree
617		(type, wi::mask (TYPE_PRECISION (type)
618				 - wi::exact_log2 (wi::to_wide (@1)),
619				 false, TYPE_PRECISION (type))); })))
620
621/* Simplify (unsigned t / 2) * 2 -> unsigned t & ~1.  */
622(simplify
623 (mult (trunc_div @0 integer_pow2p@1) @1)
624 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
625  (bit_and @0 (negate @1))))
626
627/* Simplify (t * 2) / 2) -> t.  */
628(for div (trunc_div ceil_div floor_div round_div exact_div)
629 (simplify
630  (div (mult:c @0 @1) @1)
631  (if (ANY_INTEGRAL_TYPE_P (type)
632       && TYPE_OVERFLOW_UNDEFINED (type))
633   @0)))
634
635(for op (negate abs)
636 /* Simplify cos(-x) and cos(|x|) -> cos(x).  Similarly for cosh.  */
637 (for coss (COS COSH)
638  (simplify
639   (coss (op @0))
640    (coss @0)))
641 /* Simplify pow(-x, y) and pow(|x|,y) -> pow(x,y) if y is an even integer.  */
642 (for pows (POW)
643  (simplify
644   (pows (op @0) REAL_CST@1)
645   (with { HOST_WIDE_INT n; }
646    (if (real_isinteger (&TREE_REAL_CST (@1), &n) && (n & 1) == 0)
647     (pows @0 @1)))))
648 /* Likewise for powi.  */
649 (for pows (POWI)
650  (simplify
651   (pows (op @0) INTEGER_CST@1)
652   (if ((wi::to_wide (@1) & 1) == 0)
653    (pows @0 @1))))
654 /* Strip negate and abs from both operands of hypot.  */
655 (for hypots (HYPOT)
656  (simplify
657   (hypots (op @0) @1)
658   (hypots @0 @1))
659  (simplify
660   (hypots @0 (op @1))
661   (hypots @0 @1)))
662 /* copysign(-x, y) and copysign(abs(x), y) -> copysign(x, y).  */
663 (for copysigns (COPYSIGN_ALL)
664  (simplify
665   (copysigns (op @0) @1)
666   (copysigns @0 @1))))
667
668/* abs(x)*abs(x) -> x*x.  Should be valid for all types.  */
669(simplify
670 (mult (abs@1 @0) @1)
671 (mult @0 @0))
672
673/* Convert absu(x)*absu(x) -> x*x.  */
674(simplify
675 (mult (absu@1 @0) @1)
676 (mult (convert@2 @0) @2))
677
678/* cos(copysign(x, y)) -> cos(x).  Similarly for cosh.  */
679(for coss (COS COSH)
680     copysigns (COPYSIGN)
681 (simplify
682  (coss (copysigns @0 @1))
683   (coss @0)))
684
685/* pow(copysign(x, y), z) -> pow(x, z) if z is an even integer.  */
686(for pows (POW)
687     copysigns (COPYSIGN)
688 (simplify
689  (pows (copysigns @0 @2) REAL_CST@1)
690  (with { HOST_WIDE_INT n; }
691   (if (real_isinteger (&TREE_REAL_CST (@1), &n) && (n & 1) == 0)
692    (pows @0 @1)))))
693/* Likewise for powi.  */
694(for pows (POWI)
695     copysigns (COPYSIGN)
696 (simplify
697  (pows (copysigns @0 @2) INTEGER_CST@1)
698  (if ((wi::to_wide (@1) & 1) == 0)
699   (pows @0 @1))))
700
701(for hypots (HYPOT)
702     copysigns (COPYSIGN)
703 /* hypot(copysign(x, y), z) -> hypot(x, z).  */
704 (simplify
705  (hypots (copysigns @0 @1) @2)
706  (hypots @0 @2))
707 /* hypot(x, copysign(y, z)) -> hypot(x, y).  */
708 (simplify
709  (hypots @0 (copysigns @1 @2))
710  (hypots @0 @1)))
711
712/* copysign(x, CST) -> [-]abs (x).  */
713(for copysigns (COPYSIGN_ALL)
714 (simplify
715  (copysigns @0 REAL_CST@1)
716  (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1)))
717   (negate (abs @0))
718   (abs @0))))
719
720/* copysign(copysign(x, y), z) -> copysign(x, z).  */
721(for copysigns (COPYSIGN_ALL)
722 (simplify
723  (copysigns (copysigns @0 @1) @2)
724  (copysigns @0 @2)))
725
726/* copysign(x,y)*copysign(x,y) -> x*x.  */
727(for copysigns (COPYSIGN_ALL)
728 (simplify
729  (mult (copysigns@2 @0 @1) @2)
730  (mult @0 @0)))
731
732/* ccos(-x) -> ccos(x).  Similarly for ccosh.  */
733(for ccoss (CCOS CCOSH)
734 (simplify
735  (ccoss (negate @0))
736   (ccoss @0)))
737
738/* cabs(-x) and cos(conj(x)) -> cabs(x).  */
739(for ops (conj negate)
740 (for cabss (CABS)
741  (simplify
742   (cabss (ops @0))
743   (cabss @0))))
744
745/* Fold (a * (1 << b)) into (a << b)  */
746(simplify
747 (mult:c @0 (convert? (lshift integer_onep@1 @2)))
748  (if (! FLOAT_TYPE_P (type)
749       && tree_nop_conversion_p (type, TREE_TYPE (@1)))
750   (lshift @0 @2)))
751
752/* Fold (1 << (C - x)) where C = precision(type) - 1
753   into ((1 << C) >> x). */
754(simplify
755 (lshift integer_onep@0 (minus@1 INTEGER_CST@2 @3))
756  (if (INTEGRAL_TYPE_P (type)
757       && wi::eq_p (wi::to_wide (@2), TYPE_PRECISION (type) - 1)
758       && single_use (@1))
759   (if (TYPE_UNSIGNED (type))
760     (rshift (lshift @0 @2) @3)
761   (with
762    { tree utype = unsigned_type_for (type); }
763    (convert (rshift (lshift (convert:utype @0) @2) @3))))))
764
765/* Fold (C1/X)*C2 into (C1*C2)/X.  */
766(simplify
767 (mult (rdiv@3 REAL_CST@0 @1) REAL_CST@2)
768  (if (flag_associative_math
769       && single_use (@3))
770   (with
771    { tree tem = const_binop (MULT_EXPR, type, @0, @2); }
772    (if (tem)
773     (rdiv { tem; } @1)))))
774
775/* Simplify ~X & X as zero.  */
776(simplify
777 (bit_and:c (convert? @0) (convert? (bit_not @0)))
778  { build_zero_cst (type); })
779
780/* PR71636: Transform x & ((1U << b) - 1) -> x & ~(~0U << b);  */
781(simplify
782  (bit_and:c @0 (plus:s (lshift:s integer_onep @1) integer_minus_onep))
783  (if (TYPE_UNSIGNED (type))
784    (bit_and @0 (bit_not (lshift { build_all_ones_cst (type); } @1)))))
785
786(for bitop (bit_and bit_ior)
787     cmp (eq ne)
788 /* PR35691: Transform
789    (x == 0 & y == 0) -> (x | typeof(x)(y)) == 0.
790    (x != 0 | y != 0) -> (x | typeof(x)(y)) != 0.  */
791 (simplify
792  (bitop (cmp @0 integer_zerop@2) (cmp @1 integer_zerop))
793   (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
794	&& INTEGRAL_TYPE_P (TREE_TYPE (@1))
795	&& TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1)))
796    (cmp (bit_ior @0 (convert @1)) @2)))
797 /* Transform:
798    (x == -1 & y == -1) -> (x & typeof(x)(y)) == -1.
799    (x != -1 | y != -1) -> (x & typeof(x)(y)) != -1.  */
800 (simplify
801  (bitop (cmp @0 integer_all_onesp@2) (cmp @1 integer_all_onesp))
802   (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
803	&& INTEGRAL_TYPE_P (TREE_TYPE (@1))
804	&& TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1)))
805    (cmp (bit_and @0 (convert @1)) @2))))
806
807/* Fold (A & ~B) - (A & B) into (A ^ B) - B.  */
808(simplify
809 (minus (bit_and:cs @0 (bit_not @1)) (bit_and:cs @0 @1))
810  (minus (bit_xor @0 @1) @1))
811(simplify
812 (minus (bit_and:s @0 INTEGER_CST@2) (bit_and:s @0 INTEGER_CST@1))
813 (if (~wi::to_wide (@2) == wi::to_wide (@1))
814  (minus (bit_xor @0 @1) @1)))
815
816/* Fold (A & B) - (A & ~B) into B - (A ^ B).  */
817(simplify
818 (minus (bit_and:cs @0 @1) (bit_and:cs @0 (bit_not @1)))
819  (minus @1 (bit_xor @0 @1)))
820
821/* Simplify (X & ~Y) |^+ (~X & Y) -> X ^ Y.  */
822(for op (bit_ior bit_xor plus)
823 (simplify
824  (op (bit_and:c @0 (bit_not @1)) (bit_and:c (bit_not @0) @1))
825   (bit_xor @0 @1))
826 (simplify
827  (op:c (bit_and @0 INTEGER_CST@2) (bit_and (bit_not @0) INTEGER_CST@1))
828  (if (~wi::to_wide (@2) == wi::to_wide (@1))
829   (bit_xor @0 @1))))
830
831/* PR53979: Transform ((a ^ b) | a) -> (a | b) */
832(simplify
833  (bit_ior:c (bit_xor:c @0 @1) @0)
834  (bit_ior @0 @1))
835
836/* (a & ~b) | (a ^ b)  -->  a ^ b  */
837(simplify
838 (bit_ior:c (bit_and:c @0 (bit_not @1)) (bit_xor:c@2 @0 @1))
839 @2)
840
841/* (a & ~b) ^ ~a  -->  ~(a & b)  */
842(simplify
843 (bit_xor:c (bit_and:cs @0 (bit_not @1)) (bit_not @0))
844 (bit_not (bit_and @0 @1)))
845
846/* (~a & b) ^ a  -->   (a | b)   */
847(simplify
848 (bit_xor:c (bit_and:cs (bit_not @0) @1) @0)
849 (bit_ior @0 @1))
850
851/* (a | b) & ~(a ^ b)  -->  a & b  */
852(simplify
853 (bit_and:c (bit_ior @0 @1) (bit_not (bit_xor:c @0 @1)))
854 (bit_and @0 @1))
855
856/* a | ~(a ^ b)  -->  a | ~b  */
857(simplify
858 (bit_ior:c @0 (bit_not:s (bit_xor:c @0 @1)))
859 (bit_ior @0 (bit_not @1)))
860
861/* (a | b) | (a &^ b)  -->  a | b  */
862(for op (bit_and bit_xor)
863 (simplify
864  (bit_ior:c (bit_ior@2 @0 @1) (op:c @0 @1))
865  @2))
866
867/* (a & b) | ~(a ^ b)  -->  ~(a ^ b)  */
868(simplify
869 (bit_ior:c (bit_and:c @0 @1) (bit_not@2 (bit_xor @0 @1)))
870 @2)
871
872/* ~(~a & b)  -->  a | ~b  */
873(simplify
874 (bit_not (bit_and:cs (bit_not @0) @1))
875 (bit_ior @0 (bit_not @1)))
876
877/* ~(~a | b) --> a & ~b */
878(simplify
879 (bit_not (bit_ior:cs (bit_not @0) @1))
880 (bit_and @0 (bit_not @1)))
881
882/* Simplify (~X & Y) to X ^ Y if we know that (X & ~Y) is 0.  */
883#if GIMPLE
884(simplify
885 (bit_and (bit_not SSA_NAME@0) INTEGER_CST@1)
886 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
887      && wi::bit_and_not (get_nonzero_bits (@0), wi::to_wide (@1)) == 0)
888  (bit_xor @0 @1)))
889#endif
890
891/* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
892   ((A & N) + B) & M -> (A + B) & M
893   Similarly if (N & M) == 0,
894   ((A | N) + B) & M -> (A + B) & M
895   and for - instead of + (or unary - instead of +)
896   and/or ^ instead of |.
897   If B is constant and (B & M) == 0, fold into A & M.  */
898(for op (plus minus)
899 (for bitop (bit_and bit_ior bit_xor)
900  (simplify
901   (bit_and (op:s (bitop:s@0 @3 INTEGER_CST@4) @1) INTEGER_CST@2)
902    (with
903     { tree pmop[2];
904       tree utype = fold_bit_and_mask (TREE_TYPE (@0), @2, op, @0, bitop,
905				       @3, @4, @1, ERROR_MARK, NULL_TREE,
906				       NULL_TREE, pmop); }
907     (if (utype)
908      (convert (bit_and (op (convert:utype { pmop[0]; })
909			    (convert:utype { pmop[1]; }))
910			(convert:utype @2))))))
911  (simplify
912   (bit_and (op:s @0 (bitop:s@1 @3 INTEGER_CST@4)) INTEGER_CST@2)
913    (with
914     { tree pmop[2];
915       tree utype = fold_bit_and_mask (TREE_TYPE (@0), @2, op, @0, ERROR_MARK,
916				       NULL_TREE, NULL_TREE, @1, bitop, @3,
917				       @4, pmop); }
918     (if (utype)
919      (convert (bit_and (op (convert:utype { pmop[0]; })
920			    (convert:utype { pmop[1]; }))
921			(convert:utype @2)))))))
922 (simplify
923  (bit_and (op:s @0 @1) INTEGER_CST@2)
924   (with
925    { tree pmop[2];
926      tree utype = fold_bit_and_mask (TREE_TYPE (@0), @2, op, @0, ERROR_MARK,
927				      NULL_TREE, NULL_TREE, @1, ERROR_MARK,
928				      NULL_TREE, NULL_TREE, pmop); }
929    (if (utype)
930     (convert (bit_and (op (convert:utype { pmop[0]; })
931			   (convert:utype { pmop[1]; }))
932		       (convert:utype @2)))))))
933(for bitop (bit_and bit_ior bit_xor)
934 (simplify
935  (bit_and (negate:s (bitop:s@0 @2 INTEGER_CST@3)) INTEGER_CST@1)
936   (with
937    { tree pmop[2];
938      tree utype = fold_bit_and_mask (TREE_TYPE (@0), @1, NEGATE_EXPR, @0,
939				      bitop, @2, @3, NULL_TREE, ERROR_MARK,
940				      NULL_TREE, NULL_TREE, pmop); }
941    (if (utype)
942     (convert (bit_and (negate (convert:utype { pmop[0]; }))
943		       (convert:utype @1)))))))
944
945/* X % Y is smaller than Y.  */
946(for cmp (lt ge)
947 (simplify
948  (cmp (trunc_mod @0 @1) @1)
949  (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
950   { constant_boolean_node (cmp == LT_EXPR, type); })))
951(for cmp (gt le)
952 (simplify
953  (cmp @1 (trunc_mod @0 @1))
954  (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
955   { constant_boolean_node (cmp == GT_EXPR, type); })))
956
957/* x | ~0 -> ~0  */
958(simplify
959 (bit_ior @0 integer_all_onesp@1)
960 @1)
961
962/* x | 0 -> x  */
963(simplify
964 (bit_ior @0 integer_zerop)
965 @0)
966
967/* x & 0 -> 0  */
968(simplify
969 (bit_and @0 integer_zerop@1)
970 @1)
971
972/* ~x | x -> -1 */
973/* ~x ^ x -> -1 */
974/* ~x + x -> -1 */
975(for op (bit_ior bit_xor plus)
976 (simplify
977  (op:c (convert? @0) (convert? (bit_not @0)))
978  (convert { build_all_ones_cst (TREE_TYPE (@0)); })))
979
980/* x ^ x -> 0 */
981(simplify
982  (bit_xor @0 @0)
983  { build_zero_cst (type); })
984
985/* Canonicalize X ^ ~0 to ~X.  */
986(simplify
987  (bit_xor @0 integer_all_onesp@1)
988  (bit_not @0))
989
990/* x & ~0 -> x  */
991(simplify
992 (bit_and @0 integer_all_onesp)
993  (non_lvalue @0))
994
995/* x & x -> x,  x | x -> x  */
996(for bitop (bit_and bit_ior)
997 (simplify
998  (bitop @0 @0)
999  (non_lvalue @0)))
1000
1001/* x & C -> x if we know that x & ~C == 0.  */
1002#if GIMPLE
1003(simplify
1004 (bit_and SSA_NAME@0 INTEGER_CST@1)
1005 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
1006      && wi::bit_and_not (get_nonzero_bits (@0), wi::to_wide (@1)) == 0)
1007  @0))
1008#endif
1009
1010/* x + (x & 1) -> (x + 1) & ~1 */
1011(simplify
1012 (plus:c @0 (bit_and:s @0 integer_onep@1))
1013 (bit_and (plus @0 @1) (bit_not @1)))
1014
1015/* x & ~(x & y) -> x & ~y */
1016/* x | ~(x | y) -> x | ~y  */
1017(for bitop (bit_and bit_ior)
1018 (simplify
1019  (bitop:c @0 (bit_not (bitop:cs @0 @1)))
1020  (bitop @0 (bit_not @1))))
1021
1022/* (~x & y) | ~(x | y) -> ~x */
1023(simplify
1024 (bit_ior:c (bit_and:c (bit_not@2 @0) @1) (bit_not (bit_ior:c @0 @1)))
1025 @2)
1026
1027/* (x | y) ^ (x | ~y) -> ~x */
1028(simplify
1029 (bit_xor:c (bit_ior:c @0 @1) (bit_ior:c @0 (bit_not @1)))
1030 (bit_not @0))
1031
1032/* (x & y) | ~(x | y) -> ~(x ^ y) */
1033(simplify
1034 (bit_ior:c (bit_and:s @0 @1) (bit_not:s (bit_ior:s @0 @1)))
1035 (bit_not (bit_xor @0 @1)))
1036
1037/* (~x | y) ^ (x ^ y) -> x | ~y */
1038(simplify
1039 (bit_xor:c (bit_ior:cs (bit_not @0) @1) (bit_xor:s @0 @1))
1040 (bit_ior @0 (bit_not @1)))
1041
1042/* (x ^ y) | ~(x | y) -> ~(x & y) */
1043(simplify
1044 (bit_ior:c (bit_xor:s @0 @1) (bit_not:s (bit_ior:s @0 @1)))
1045 (bit_not (bit_and @0 @1)))
1046
1047/* (x | y) & ~x -> y & ~x */
1048/* (x & y) | ~x -> y | ~x */
1049(for bitop (bit_and bit_ior)
1050     rbitop (bit_ior bit_and)
1051 (simplify
1052  (bitop:c (rbitop:c @0 @1) (bit_not@2 @0))
1053  (bitop @1 @2)))
1054
1055/* (x & y) ^ (x | y) -> x ^ y */
1056(simplify
1057 (bit_xor:c (bit_and @0 @1) (bit_ior @0 @1))
1058 (bit_xor @0 @1))
1059
1060/* (x ^ y) ^ (x | y) -> x & y */
1061(simplify
1062 (bit_xor:c (bit_xor @0 @1) (bit_ior @0 @1))
1063 (bit_and @0 @1))
1064
1065/* (x & y) + (x ^ y) -> x | y */
1066/* (x & y) | (x ^ y) -> x | y */
1067/* (x & y) ^ (x ^ y) -> x | y */
1068(for op (plus bit_ior bit_xor)
1069 (simplify
1070  (op:c (bit_and @0 @1) (bit_xor @0 @1))
1071  (bit_ior @0 @1)))
1072
1073/* (x & y) + (x | y) -> x + y */
1074(simplify
1075 (plus:c (bit_and @0 @1) (bit_ior @0 @1))
1076 (plus @0 @1))
1077
1078/* (x + y) - (x | y) -> x & y */
1079(simplify
1080 (minus (plus @0 @1) (bit_ior @0 @1))
1081 (if (!TYPE_OVERFLOW_SANITIZED (type) && !TYPE_OVERFLOW_TRAPS (type)
1082      && !TYPE_SATURATING (type))
1083  (bit_and @0 @1)))
1084
1085/* (x + y) - (x & y) -> x | y */
1086(simplify
1087 (minus (plus @0 @1) (bit_and @0 @1))
1088 (if (!TYPE_OVERFLOW_SANITIZED (type) && !TYPE_OVERFLOW_TRAPS (type)
1089      && !TYPE_SATURATING (type))
1090  (bit_ior @0 @1)))
1091
1092/* (x | y) - (x ^ y) -> x & y */
1093(simplify
1094 (minus (bit_ior @0 @1) (bit_xor @0 @1))
1095 (bit_and @0 @1))
1096
1097/* (x | y) - (x & y) -> x ^ y */
1098(simplify
1099 (minus (bit_ior @0 @1) (bit_and @0 @1))
1100 (bit_xor @0 @1))
1101
1102/* (x | y) & ~(x & y) -> x ^ y */
1103(simplify
1104 (bit_and:c (bit_ior @0 @1) (bit_not (bit_and @0 @1)))
1105 (bit_xor @0 @1))
1106
1107/* (x | y) & (~x ^ y) -> x & y */
1108(simplify
1109 (bit_and:c (bit_ior:c @0 @1) (bit_xor:c @1 (bit_not @0)))
1110 (bit_and @0 @1))
1111
1112/* (~x | y) & (x | ~y) -> ~(x ^ y) */
1113(simplify
1114 (bit_and (bit_ior:cs (bit_not @0) @1) (bit_ior:cs @0 (bit_not @1)))
1115 (bit_not (bit_xor @0 @1)))
1116
1117/* (~x | y) ^ (x | ~y) -> x ^ y */
1118(simplify
1119 (bit_xor (bit_ior:c (bit_not @0) @1) (bit_ior:c @0 (bit_not @1)))
1120 (bit_xor @0 @1))
1121
1122/* ~x & ~y -> ~(x | y)
1123   ~x | ~y -> ~(x & y) */
1124(for op (bit_and bit_ior)
1125     rop (bit_ior bit_and)
1126 (simplify
1127  (op (convert1? (bit_not @0)) (convert2? (bit_not @1)))
1128  (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
1129       && element_precision (type) <= element_precision (TREE_TYPE (@1)))
1130   (bit_not (rop (convert @0) (convert @1))))))
1131
1132/* If we are XORing or adding two BIT_AND_EXPR's, both of which are and'ing
1133   with a constant, and the two constants have no bits in common,
1134   we should treat this as a BIT_IOR_EXPR since this may produce more
1135   simplifications.  */
1136(for op (bit_xor plus)
1137 (simplify
1138  (op (convert1? (bit_and@4 @0 INTEGER_CST@1))
1139      (convert2? (bit_and@5 @2 INTEGER_CST@3)))
1140  (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
1141       && tree_nop_conversion_p (type, TREE_TYPE (@2))
1142       && (wi::to_wide (@1) & wi::to_wide (@3)) == 0)
1143   (bit_ior (convert @4) (convert @5)))))
1144
1145/* (X | Y) ^ X -> Y & ~ X*/
1146(simplify
1147 (bit_xor:c (convert1? (bit_ior:c @@0 @1)) (convert2? @0))
1148 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1149  (convert (bit_and @1 (bit_not @0)))))
1150
1151/* Convert ~X ^ ~Y to X ^ Y.  */
1152(simplify
1153 (bit_xor (convert1? (bit_not @0)) (convert2? (bit_not @1)))
1154 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
1155      && element_precision (type) <= element_precision (TREE_TYPE (@1)))
1156  (bit_xor (convert @0) (convert @1))))
1157
1158/* Convert ~X ^ C to X ^ ~C.  */
1159(simplify
1160 (bit_xor (convert? (bit_not @0)) INTEGER_CST@1)
1161 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1162  (bit_xor (convert @0) (bit_not @1))))
1163
1164/* Fold (X & Y) ^ Y and (X ^ Y) & Y as ~X & Y.  */
1165(for opo (bit_and bit_xor)
1166     opi (bit_xor bit_and)
1167 (simplify
1168  (opo:c (opi:cs @0 @1) @1)
1169  (bit_and (bit_not @0) @1)))
1170
1171/* Given a bit-wise operation CODE applied to ARG0 and ARG1, see if both
1172   operands are another bit-wise operation with a common input.  If so,
1173   distribute the bit operations to save an operation and possibly two if
1174   constants are involved.  For example, convert
1175     (A | B) & (A | C) into A | (B & C)
1176   Further simplification will occur if B and C are constants.  */
1177(for op (bit_and bit_ior bit_xor)
1178     rop (bit_ior bit_and bit_and)
1179 (simplify
1180  (op (convert? (rop:c @@0 @1)) (convert? (rop:c @0 @2)))
1181  (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
1182       && tree_nop_conversion_p (type, TREE_TYPE (@2)))
1183   (rop (convert @0) (op (convert @1) (convert @2))))))
1184
1185/* Some simple reassociation for bit operations, also handled in reassoc.  */
1186/* (X & Y) & Y -> X & Y
1187   (X | Y) | Y -> X | Y  */
1188(for op (bit_and bit_ior)
1189 (simplify
1190  (op:c (convert1?@2 (op:c @0 @@1)) (convert2? @1))
1191  @2))
1192/* (X ^ Y) ^ Y -> X  */
1193(simplify
1194 (bit_xor:c (convert1? (bit_xor:c @0 @@1)) (convert2? @1))
1195 (convert @0))
1196/* (X & Y) & (X & Z) -> (X & Y) & Z
1197   (X | Y) | (X | Z) -> (X | Y) | Z  */
1198(for op (bit_and bit_ior)
1199 (simplify
1200  (op (convert1?@3 (op:c@4 @0 @1)) (convert2?@5 (op:c@6 @0 @2)))
1201  (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
1202       && tree_nop_conversion_p (type, TREE_TYPE (@2)))
1203   (if (single_use (@5) && single_use (@6))
1204    (op @3 (convert @2))
1205    (if (single_use (@3) && single_use (@4))
1206     (op (convert @1) @5))))))
1207/* (X ^ Y) ^ (X ^ Z) -> Y ^ Z  */
1208(simplify
1209 (bit_xor (convert1? (bit_xor:c @0 @1)) (convert2? (bit_xor:c @0 @2)))
1210 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
1211      && tree_nop_conversion_p (type, TREE_TYPE (@2)))
1212  (bit_xor (convert @1) (convert @2))))
1213
1214/* Convert abs (abs (X)) into abs (X).
1215   also absu (absu (X)) into absu (X).  */
1216(simplify
1217 (abs (abs@1 @0))
1218 @1)
1219
1220(simplify
1221 (absu (convert@2 (absu@1 @0)))
1222 (if (tree_nop_conversion_p (TREE_TYPE (@2), TREE_TYPE (@1)))
1223  @1))
1224
1225/* Convert abs[u] (-X) -> abs[u] (X).  */
1226(simplify
1227 (abs (negate @0))
1228 (abs @0))
1229
1230(simplify
1231 (absu (negate @0))
1232 (absu @0))
1233
1234/* Convert abs[u] (X)  where X is nonnegative -> (X).  */
1235(simplify
1236 (abs tree_expr_nonnegative_p@0)
1237 @0)
1238
1239(simplify
1240 (absu tree_expr_nonnegative_p@0)
1241 (convert @0))
1242
1243/* A few cases of fold-const.c negate_expr_p predicate.  */
1244(match negate_expr_p
1245 INTEGER_CST
1246 (if ((INTEGRAL_TYPE_P (type)
1247       && TYPE_UNSIGNED (type))
1248      || (!TYPE_OVERFLOW_SANITIZED (type)
1249	  && may_negate_without_overflow_p (t)))))
1250(match negate_expr_p
1251 FIXED_CST)
1252(match negate_expr_p
1253 (negate @0)
1254 (if (!TYPE_OVERFLOW_SANITIZED (type))))
1255(match negate_expr_p
1256 REAL_CST
1257 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (t)))))
1258/* VECTOR_CST handling of non-wrapping types would recurse in unsupported
1259   ways.  */
1260(match negate_expr_p
1261 VECTOR_CST
1262 (if (FLOAT_TYPE_P (TREE_TYPE (type)) || TYPE_OVERFLOW_WRAPS (type))))
1263(match negate_expr_p
1264 (minus @0 @1)
1265 (if ((ANY_INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_WRAPS (type))
1266      || (FLOAT_TYPE_P (type)
1267	  && !HONOR_SIGN_DEPENDENT_ROUNDING (type)
1268	  && !HONOR_SIGNED_ZEROS (type)))))
1269
1270/* (-A) * (-B) -> A * B  */
1271(simplify
1272 (mult:c (convert1? (negate @0)) (convert2? negate_expr_p@1))
1273  (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
1274       && tree_nop_conversion_p (type, TREE_TYPE (@1)))
1275   (mult (convert @0) (convert (negate @1)))))
1276
1277/* -(A + B) -> (-B) - A.  */
1278(simplify
1279 (negate (plus:c @0 negate_expr_p@1))
1280 (if (!HONOR_SIGN_DEPENDENT_ROUNDING (element_mode (type))
1281      && !HONOR_SIGNED_ZEROS (element_mode (type)))
1282  (minus (negate @1) @0)))
1283
1284/* -(A - B) -> B - A.  */
1285(simplify
1286 (negate (minus @0 @1))
1287 (if ((ANY_INTEGRAL_TYPE_P (type) && !TYPE_OVERFLOW_SANITIZED (type))
1288      || (FLOAT_TYPE_P (type)
1289	  && !HONOR_SIGN_DEPENDENT_ROUNDING (type)
1290	  && !HONOR_SIGNED_ZEROS (type)))
1291  (minus @1 @0)))
1292(simplify
1293 (negate (pointer_diff @0 @1))
1294 (if (TYPE_OVERFLOW_UNDEFINED (type))
1295  (pointer_diff @1 @0)))
1296
1297/* A - B -> A + (-B) if B is easily negatable.  */
1298(simplify
1299 (minus @0 negate_expr_p@1)
1300 (if (!FIXED_POINT_TYPE_P (type))
1301 (plus @0 (negate @1))))
1302
1303/* Try to fold (type) X op CST -> (type) (X op ((type-x) CST))
1304   when profitable.
1305   For bitwise binary operations apply operand conversions to the
1306   binary operation result instead of to the operands.  This allows
1307   to combine successive conversions and bitwise binary operations.
1308   We combine the above two cases by using a conditional convert.  */
1309(for bitop (bit_and bit_ior bit_xor)
1310 (simplify
1311  (bitop (convert @0) (convert? @1))
1312  (if (((TREE_CODE (@1) == INTEGER_CST
1313	 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
1314	 && int_fits_type_p (@1, TREE_TYPE (@0)))
1315	|| types_match (@0, @1))
1316       /* ???  This transform conflicts with fold-const.c doing
1317	  Convert (T)(x & c) into (T)x & (T)c, if c is an integer
1318	  constants (if x has signed type, the sign bit cannot be set
1319	  in c).  This folds extension into the BIT_AND_EXPR.
1320	  Restrict it to GIMPLE to avoid endless recursions.  */
1321       && (bitop != BIT_AND_EXPR || GIMPLE)
1322       && (/* That's a good idea if the conversion widens the operand, thus
1323	      after hoisting the conversion the operation will be narrower.  */
1324	   TYPE_PRECISION (TREE_TYPE (@0)) < TYPE_PRECISION (type)
1325	   /* It's also a good idea if the conversion is to a non-integer
1326	      mode.  */
1327	   || GET_MODE_CLASS (TYPE_MODE (type)) != MODE_INT
1328	   /* Or if the precision of TO is not the same as the precision
1329	      of its mode.  */
1330	   || !type_has_mode_precision_p (type)))
1331   (convert (bitop @0 (convert @1))))))
1332
1333(for bitop (bit_and bit_ior)
1334     rbitop (bit_ior bit_and)
1335  /* (x | y) & x -> x */
1336  /* (x & y) | x -> x */
1337 (simplify
1338  (bitop:c (rbitop:c @0 @1) @0)
1339  @0)
1340 /* (~x | y) & x -> x & y */
1341 /* (~x & y) | x -> x | y */
1342 (simplify
1343  (bitop:c (rbitop:c (bit_not @0) @1) @0)
1344  (bitop @0 @1)))
1345
1346/* (x | CST1) & CST2 -> (x & CST2) | (CST1 & CST2) */
1347(simplify
1348  (bit_and (bit_ior @0 CONSTANT_CLASS_P@1) CONSTANT_CLASS_P@2)
1349  (bit_ior (bit_and @0 @2) (bit_and @1 @2)))
1350
1351/* Combine successive equal operations with constants.  */
1352(for bitop (bit_and bit_ior bit_xor)
1353 (simplify
1354  (bitop (bitop @0 CONSTANT_CLASS_P@1) CONSTANT_CLASS_P@2)
1355  (if (!CONSTANT_CLASS_P (@0))
1356   /* This is the canonical form regardless of whether (bitop @1 @2) can be
1357      folded to a constant.  */
1358   (bitop @0 (bitop @1 @2))
1359   /* In this case we have three constants and (bitop @0 @1) doesn't fold
1360      to a constant.  This can happen if @0 or @1 is a POLY_INT_CST and if
1361      the values involved are such that the operation can't be decided at
1362      compile time.  Try folding one of @0 or @1 with @2 to see whether
1363      that combination can be decided at compile time.
1364
1365      Keep the existing form if both folds fail, to avoid endless
1366      oscillation.  */
1367   (with { tree cst1 = const_binop (bitop, type, @0, @2); }
1368    (if (cst1)
1369     (bitop @1 { cst1; })
1370     (with { tree cst2 = const_binop (bitop, type, @1, @2); }
1371      (if (cst2)
1372       (bitop @0 { cst2; }))))))))
1373
1374/* Try simple folding for X op !X, and X op X with the help
1375   of the truth_valued_p and logical_inverted_value predicates.  */
1376(match truth_valued_p
1377 @0
1378 (if (INTEGRAL_TYPE_P (type) && TYPE_PRECISION (type) == 1)))
1379(for op (tcc_comparison truth_and truth_andif truth_or truth_orif truth_xor)
1380 (match truth_valued_p
1381  (op @0 @1)))
1382(match truth_valued_p
1383  (truth_not @0))
1384
1385(match (logical_inverted_value @0)
1386 (truth_not @0))
1387(match (logical_inverted_value @0)
1388 (bit_not truth_valued_p@0))
1389(match (logical_inverted_value @0)
1390 (eq @0 integer_zerop))
1391(match (logical_inverted_value @0)
1392 (ne truth_valued_p@0 integer_truep))
1393(match (logical_inverted_value @0)
1394 (bit_xor truth_valued_p@0 integer_truep))
1395
1396/* X & !X -> 0.  */
1397(simplify
1398 (bit_and:c @0 (logical_inverted_value @0))
1399 { build_zero_cst (type); })
1400/* X | !X and X ^ !X -> 1, , if X is truth-valued.  */
1401(for op (bit_ior bit_xor)
1402 (simplify
1403  (op:c truth_valued_p@0 (logical_inverted_value @0))
1404  { constant_boolean_node (true, type); }))
1405/* X ==/!= !X is false/true.  */
1406(for op (eq ne)
1407 (simplify
1408  (op:c truth_valued_p@0 (logical_inverted_value @0))
1409  { constant_boolean_node (op == NE_EXPR ? true : false, type); }))
1410
1411/* ~~x -> x */
1412(simplify
1413  (bit_not (bit_not @0))
1414  @0)
1415
1416/* Convert ~ (-A) to A - 1.  */
1417(simplify
1418 (bit_not (convert? (negate @0)))
1419 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
1420      || !TYPE_UNSIGNED (TREE_TYPE (@0)))
1421  (convert (minus @0 { build_each_one_cst (TREE_TYPE (@0)); }))))
1422
1423/* Convert - (~A) to A + 1.  */
1424(simplify
1425 (negate (nop_convert? (bit_not @0)))
1426 (plus (view_convert @0) { build_each_one_cst (type); }))
1427
1428/* Convert ~ (A - 1) or ~ (A + -1) to -A.  */
1429(simplify
1430 (bit_not (convert? (minus @0 integer_each_onep)))
1431 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
1432      || !TYPE_UNSIGNED (TREE_TYPE (@0)))
1433  (convert (negate @0))))
1434(simplify
1435 (bit_not (convert? (plus @0 integer_all_onesp)))
1436 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
1437      || !TYPE_UNSIGNED (TREE_TYPE (@0)))
1438  (convert (negate @0))))
1439
1440/* Part of convert ~(X ^ Y) to ~X ^ Y or X ^ ~Y if ~X or ~Y simplify.  */
1441(simplify
1442 (bit_not (convert? (bit_xor @0 INTEGER_CST@1)))
1443 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1444  (convert (bit_xor @0 (bit_not @1)))))
1445(simplify
1446 (bit_not (convert? (bit_xor:c (bit_not @0) @1)))
1447 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1448  (convert (bit_xor @0 @1))))
1449
1450/* Otherwise prefer ~(X ^ Y) to ~X ^ Y as more canonical.  */
1451(simplify
1452 (bit_xor:c (nop_convert?:s (bit_not:s @0)) @1)
1453 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1454  (bit_not (bit_xor (view_convert @0) @1))))
1455
1456/* (x & ~m) | (y & m) -> ((x ^ y) & m) ^ x */
1457(simplify
1458 (bit_ior:c (bit_and:cs @0 (bit_not @2)) (bit_and:cs @1 @2))
1459 (bit_xor (bit_and (bit_xor @0 @1) @2) @0))
1460
1461/* Fold A - (A & B) into ~B & A.  */
1462(simplify
1463 (minus (convert1? @0) (convert2?:s (bit_and:cs @@0 @1)))
1464 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
1465      && tree_nop_conversion_p (type, TREE_TYPE (@1)))
1466  (convert (bit_and (bit_not @1) @0))))
1467
1468/* (m1 CMP m2) * d -> (m1 CMP m2) ? d : 0  */
1469(for cmp (gt lt ge le)
1470(simplify
1471 (mult (convert (cmp @0 @1)) @2)
1472  (if (GIMPLE || !TREE_SIDE_EFFECTS (@2))
1473   (cond (cmp @0 @1) @2 { build_zero_cst (type); }))))
1474
1475/* For integral types with undefined overflow and C != 0 fold
1476   x * C EQ/NE y * C into x EQ/NE y.  */
1477(for cmp (eq ne)
1478 (simplify
1479  (cmp (mult:c @0 @1) (mult:c @2 @1))
1480  (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
1481       && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1482       && tree_expr_nonzero_p (@1))
1483   (cmp @0 @2))))
1484
1485/* For integral types with wrapping overflow and C odd fold
1486   x * C EQ/NE y * C into x EQ/NE y.  */
1487(for cmp (eq ne)
1488 (simplify
1489  (cmp (mult @0 INTEGER_CST@1) (mult @2 @1))
1490  (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
1491       && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))
1492       && (TREE_INT_CST_LOW (@1) & 1) != 0)
1493   (cmp @0 @2))))
1494
1495/* For integral types with undefined overflow and C != 0 fold
1496   x * C RELOP y * C into:
1497
1498   x RELOP y for nonnegative C
1499   y RELOP x for negative C  */
1500(for cmp (lt gt le ge)
1501 (simplify
1502  (cmp (mult:c @0 @1) (mult:c @2 @1))
1503  (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
1504       && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1505   (if (tree_expr_nonnegative_p (@1) && tree_expr_nonzero_p (@1))
1506    (cmp @0 @2)
1507   (if (TREE_CODE (@1) == INTEGER_CST
1508	&& wi::neg_p (wi::to_wide (@1), TYPE_SIGN (TREE_TYPE (@1))))
1509    (cmp @2 @0))))))
1510
1511/* (X - 1U) <= INT_MAX-1U into (int) X > 0.  */
1512(for cmp (le gt)
1513     icmp (gt le)
1514 (simplify
1515  (cmp (plus @0 integer_minus_onep@1) INTEGER_CST@2)
1516   (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
1517	&& TYPE_UNSIGNED (TREE_TYPE (@0))
1518	&& TYPE_PRECISION (TREE_TYPE (@0)) > 1
1519	&& (wi::to_wide (@2)
1520	    == wi::max_value (TYPE_PRECISION (TREE_TYPE (@0)), SIGNED) - 1))
1521    (with { tree stype = signed_type_for (TREE_TYPE (@0)); }
1522     (icmp (convert:stype @0) { build_int_cst (stype, 0); })))))
1523
1524/* X / 4 < Y / 4 iff X < Y when the division is known to be exact.  */
1525(for cmp (simple_comparison)
1526 (simplify
1527  (cmp (convert?@3 (exact_div @0 INTEGER_CST@2)) (convert? (exact_div @1 @2)))
1528  (if (element_precision (@3) >= element_precision (@0)
1529       && types_match (@0, @1))
1530   (if (wi::lt_p (wi::to_wide (@2), 0, TYPE_SIGN (TREE_TYPE (@2))))
1531    (if (!TYPE_UNSIGNED (TREE_TYPE (@3)))
1532     (cmp @1 @0)
1533     (if (tree_expr_nonzero_p (@0) && tree_expr_nonzero_p (@1))
1534      (with
1535       {
1536	tree utype = unsigned_type_for (TREE_TYPE (@0));
1537       }
1538       (cmp (convert:utype @1) (convert:utype @0)))))
1539    (if (wi::gt_p (wi::to_wide (@2), 1, TYPE_SIGN (TREE_TYPE (@2))))
1540     (if (TYPE_UNSIGNED (TREE_TYPE (@0)) || !TYPE_UNSIGNED (TREE_TYPE (@3)))
1541      (cmp @0 @1)
1542      (with
1543       {
1544	tree utype = unsigned_type_for (TREE_TYPE (@0));
1545       }
1546       (cmp (convert:utype @0) (convert:utype @1)))))))))
1547
1548/* X / C1 op C2 into a simple range test.  */
1549(for cmp (simple_comparison)
1550 (simplify
1551  (cmp (trunc_div:s @0 INTEGER_CST@1) INTEGER_CST@2)
1552  (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
1553       && integer_nonzerop (@1)
1554       && !TREE_OVERFLOW (@1)
1555       && !TREE_OVERFLOW (@2))
1556   (with { tree lo, hi; bool neg_overflow;
1557	   enum tree_code code = fold_div_compare (cmp, @1, @2, &lo, &hi,
1558						   &neg_overflow); }
1559    (switch
1560     (if (code == LT_EXPR || code == GE_EXPR)
1561       (if (TREE_OVERFLOW (lo))
1562	{ build_int_cst (type, (code == LT_EXPR) ^ neg_overflow); }
1563	(if (code == LT_EXPR)
1564	 (lt @0 { lo; })
1565	 (ge @0 { lo; }))))
1566     (if (code == LE_EXPR || code == GT_EXPR)
1567       (if (TREE_OVERFLOW (hi))
1568	{ build_int_cst (type, (code == LE_EXPR) ^ neg_overflow); }
1569	(if (code == LE_EXPR)
1570	 (le @0 { hi; })
1571	 (gt @0 { hi; }))))
1572     (if (!lo && !hi)
1573      { build_int_cst (type, code == NE_EXPR); })
1574     (if (code == EQ_EXPR && !hi)
1575      (ge @0 { lo; }))
1576     (if (code == EQ_EXPR && !lo)
1577      (le @0 { hi; }))
1578     (if (code == NE_EXPR && !hi)
1579      (lt @0 { lo; }))
1580     (if (code == NE_EXPR && !lo)
1581      (gt @0 { hi; }))
1582     (if (GENERIC)
1583      { build_range_check (UNKNOWN_LOCATION, type, @0, code == EQ_EXPR,
1584			   lo, hi); })
1585     (with
1586      {
1587	tree etype = range_check_type (TREE_TYPE (@0));
1588	if (etype)
1589	  {
1590	    hi = fold_convert (etype, hi);
1591	    lo = fold_convert (etype, lo);
1592	    hi = const_binop (MINUS_EXPR, etype, hi, lo);
1593	  }
1594      }
1595      (if (etype && hi && !TREE_OVERFLOW (hi))
1596       (if (code == EQ_EXPR)
1597	(le (minus (convert:etype @0) { lo; }) { hi; })
1598	(gt (minus (convert:etype @0) { lo; }) { hi; })))))))))
1599
1600/* X + Z < Y + Z is the same as X < Y when there is no overflow.  */
1601(for op (lt le ge gt)
1602 (simplify
1603  (op (plus:c @0 @2) (plus:c @1 @2))
1604  (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1605       && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1606   (op @0 @1))))
1607/* For equality and subtraction, this is also true with wrapping overflow.  */
1608(for op (eq ne minus)
1609 (simplify
1610  (op (plus:c @0 @2) (plus:c @1 @2))
1611  (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1612       && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1613	   || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))))
1614   (op @0 @1))))
1615
1616/* X - Z < Y - Z is the same as X < Y when there is no overflow.  */
1617(for op (lt le ge gt)
1618 (simplify
1619  (op (minus @0 @2) (minus @1 @2))
1620  (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1621       && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1622   (op @0 @1))))
1623/* For equality and subtraction, this is also true with wrapping overflow.  */
1624(for op (eq ne minus)
1625 (simplify
1626  (op (minus @0 @2) (minus @1 @2))
1627  (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1628       && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1629	   || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))))
1630   (op @0 @1))))
1631/* And for pointers...  */
1632(for op (simple_comparison)
1633 (simplify
1634  (op (pointer_diff@3 @0 @2) (pointer_diff @1 @2))
1635  (if (!TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2)))
1636   (op @0 @1))))
1637(simplify
1638 (minus (pointer_diff@3 @0 @2) (pointer_diff @1 @2))
1639 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@3))
1640      && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2)))
1641  (pointer_diff @0 @1)))
1642
1643/* Z - X < Z - Y is the same as Y < X when there is no overflow.  */
1644(for op (lt le ge gt)
1645 (simplify
1646  (op (minus @2 @0) (minus @2 @1))
1647  (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1648       && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1649   (op @1 @0))))
1650/* For equality and subtraction, this is also true with wrapping overflow.  */
1651(for op (eq ne minus)
1652 (simplify
1653  (op (minus @2 @0) (minus @2 @1))
1654  (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1655       && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1656	   || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))))
1657   (op @1 @0))))
1658/* And for pointers...  */
1659(for op (simple_comparison)
1660 (simplify
1661  (op (pointer_diff@3 @2 @0) (pointer_diff @2 @1))
1662  (if (!TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2)))
1663   (op @1 @0))))
1664(simplify
1665 (minus (pointer_diff@3 @2 @0) (pointer_diff @2 @1))
1666 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@3))
1667      && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2)))
1668  (pointer_diff @1 @0)))
1669
1670/* X + Y < Y is the same as X < 0 when there is no overflow.  */
1671(for op (lt le gt ge)
1672 (simplify
1673  (op:c (plus:c@2 @0 @1) @1)
1674  (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1675       && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1676       && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@0))
1677       && (CONSTANT_CLASS_P (@0) || single_use (@2)))
1678   (op @0 { build_zero_cst (TREE_TYPE (@0)); }))))
1679/* For equality, this is also true with wrapping overflow.  */
1680(for op (eq ne)
1681 (simplify
1682  (op:c (nop_convert?@3 (plus:c@2 @0 (convert1? @1))) (convert2? @1))
1683  (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1684       && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1685	   || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
1686       && (CONSTANT_CLASS_P (@0) || (single_use (@2) && single_use (@3)))
1687       && tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@2))
1688       && tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@1)))
1689   (op @0 { build_zero_cst (TREE_TYPE (@0)); })))
1690 (simplify
1691  (op:c (nop_convert?@3 (pointer_plus@2 (convert1? @0) @1)) (convert2? @0))
1692  (if (tree_nop_conversion_p (TREE_TYPE (@2), TREE_TYPE (@0))
1693       && tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@0))
1694       && (CONSTANT_CLASS_P (@1) || (single_use (@2) && single_use (@3))))
1695   (op @1 { build_zero_cst (TREE_TYPE (@1)); }))))
1696
1697/* X - Y < X is the same as Y > 0 when there is no overflow.
1698   For equality, this is also true with wrapping overflow.  */
1699(for op (simple_comparison)
1700 (simplify
1701  (op:c @0 (minus@2 @0 @1))
1702  (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1703       && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1704	   || ((op == EQ_EXPR || op == NE_EXPR)
1705	       && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))))
1706       && (CONSTANT_CLASS_P (@1) || single_use (@2)))
1707   (op @1 { build_zero_cst (TREE_TYPE (@1)); }))))
1708
1709/* Transform:
1710   (X / Y) == 0 -> X < Y if X, Y are unsigned.
1711   (X / Y) != 0 -> X >= Y, if X, Y are unsigned.  */
1712(for cmp (eq ne)
1713     ocmp (lt ge)
1714 (simplify
1715  (cmp (trunc_div @0 @1) integer_zerop)
1716  (if (TYPE_UNSIGNED (TREE_TYPE (@0))
1717       /* Complex ==/!= is allowed, but not </>=.  */
1718       && TREE_CODE (TREE_TYPE (@0)) != COMPLEX_TYPE
1719       && (VECTOR_TYPE_P (type) || !VECTOR_TYPE_P (TREE_TYPE (@0))))
1720   (ocmp @0 @1))))
1721
1722/* X == C - X can never be true if C is odd.  */
1723(for cmp (eq ne)
1724 (simplify
1725  (cmp:c (convert? @0) (convert1? (minus INTEGER_CST@1 (convert2? @0))))
1726  (if (TREE_INT_CST_LOW (@1) & 1)
1727   { constant_boolean_node (cmp == NE_EXPR, type); })))
1728
1729/* Arguments on which one can call get_nonzero_bits to get the bits
1730   possibly set.  */
1731(match with_possible_nonzero_bits
1732 INTEGER_CST@0)
1733(match with_possible_nonzero_bits
1734 SSA_NAME@0
1735 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0)))))
1736/* Slightly extended version, do not make it recursive to keep it cheap.  */
1737(match (with_possible_nonzero_bits2 @0)
1738 with_possible_nonzero_bits@0)
1739(match (with_possible_nonzero_bits2 @0)
1740 (bit_and:c with_possible_nonzero_bits@0 @2))
1741
1742/* Same for bits that are known to be set, but we do not have
1743   an equivalent to get_nonzero_bits yet.  */
1744(match (with_certain_nonzero_bits2 @0)
1745 INTEGER_CST@0)
1746(match (with_certain_nonzero_bits2 @0)
1747 (bit_ior @1 INTEGER_CST@0))
1748
1749/* X == C (or X & Z == Y | C) is impossible if ~nonzero(X) & C != 0.  */
1750(for cmp (eq ne)
1751 (simplify
1752  (cmp:c (with_possible_nonzero_bits2 @0) (with_certain_nonzero_bits2 @1))
1753  (if (wi::bit_and_not (wi::to_wide (@1), get_nonzero_bits (@0)) != 0)
1754   { constant_boolean_node (cmp == NE_EXPR, type); })))
1755
1756/* ((X inner_op C0) outer_op C1)
1757   With X being a tree where value_range has reasoned certain bits to always be
1758   zero throughout its computed value range,
1759   inner_op = {|,^}, outer_op = {|,^} and inner_op != outer_op
1760   where zero_mask has 1's for all bits that are sure to be 0 in
1761   and 0's otherwise.
1762   if (inner_op == '^') C0 &= ~C1;
1763   if ((C0 & ~zero_mask) == 0) then emit (X outer_op (C0 outer_op C1)
1764   if ((C1 & ~zero_mask) == 0) then emit (X inner_op (C0 outer_op C1)
1765*/
1766(for inner_op (bit_ior bit_xor)
1767     outer_op (bit_xor bit_ior)
1768(simplify
1769 (outer_op
1770  (inner_op:s @2 INTEGER_CST@0) INTEGER_CST@1)
1771 (with
1772  {
1773    bool fail = false;
1774    wide_int zero_mask_not;
1775    wide_int C0;
1776    wide_int cst_emit;
1777
1778    if (TREE_CODE (@2) == SSA_NAME)
1779      zero_mask_not = get_nonzero_bits (@2);
1780    else
1781      fail = true;
1782
1783    if (inner_op == BIT_XOR_EXPR)
1784      {
1785	C0 = wi::bit_and_not (wi::to_wide (@0), wi::to_wide (@1));
1786	cst_emit = C0 | wi::to_wide (@1);
1787      }
1788    else
1789      {
1790	C0 = wi::to_wide (@0);
1791	cst_emit = C0 ^ wi::to_wide (@1);
1792      }
1793  }
1794  (if (!fail && (C0 & zero_mask_not) == 0)
1795   (outer_op @2 { wide_int_to_tree (type, cst_emit); })
1796   (if (!fail && (wi::to_wide (@1) & zero_mask_not) == 0)
1797    (inner_op @2 { wide_int_to_tree (type, cst_emit); }))))))
1798
1799/* Associate (p +p off1) +p off2 as (p +p (off1 + off2)).  */
1800(simplify
1801  (pointer_plus (pointer_plus:s @0 @1) @3)
1802  (pointer_plus @0 (plus @1 @3)))
1803
1804/* Pattern match
1805     tem1 = (long) ptr1;
1806     tem2 = (long) ptr2;
1807     tem3 = tem2 - tem1;
1808     tem4 = (unsigned long) tem3;
1809     tem5 = ptr1 + tem4;
1810   and produce
1811     tem5 = ptr2;  */
1812(simplify
1813  (pointer_plus @0 (convert?@2 (minus@3 (convert @1) (convert @0))))
1814  /* Conditionally look through a sign-changing conversion.  */
1815  (if (TYPE_PRECISION (TREE_TYPE (@2)) == TYPE_PRECISION (TREE_TYPE (@3))
1816       && ((GIMPLE && useless_type_conversion_p (type, TREE_TYPE (@1)))
1817	    || (GENERIC && type == TREE_TYPE (@1))))
1818   @1))
1819(simplify
1820  (pointer_plus @0 (convert?@2 (pointer_diff@3 @1 @@0)))
1821  (if (TYPE_PRECISION (TREE_TYPE (@2)) >= TYPE_PRECISION (TREE_TYPE (@3)))
1822   (convert @1)))
1823
1824/* Pattern match
1825     tem = (sizetype) ptr;
1826     tem = tem & algn;
1827     tem = -tem;
1828     ... = ptr p+ tem;
1829   and produce the simpler and easier to analyze with respect to alignment
1830     ... = ptr & ~algn;  */
1831(simplify
1832  (pointer_plus @0 (negate (bit_and (convert @0) INTEGER_CST@1)))
1833  (with { tree algn = wide_int_to_tree (TREE_TYPE (@0), ~wi::to_wide (@1)); }
1834   (bit_and @0 { algn; })))
1835
1836/* Try folding difference of addresses.  */
1837(simplify
1838 (minus (convert ADDR_EXPR@0) (convert @1))
1839 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1840  (with { poly_int64 diff; }
1841   (if (ptr_difference_const (@0, @1, &diff))
1842    { build_int_cst_type (type, diff); }))))
1843(simplify
1844 (minus (convert @0) (convert ADDR_EXPR@1))
1845 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1846  (with { poly_int64 diff; }
1847   (if (ptr_difference_const (@0, @1, &diff))
1848    { build_int_cst_type (type, diff); }))))
1849(simplify
1850 (pointer_diff (convert?@2 ADDR_EXPR@0) (convert1?@3 @1))
1851 (if (tree_nop_conversion_p (TREE_TYPE(@2), TREE_TYPE (@0))
1852      && tree_nop_conversion_p (TREE_TYPE(@3), TREE_TYPE (@1)))
1853  (with { poly_int64 diff; }
1854   (if (ptr_difference_const (@0, @1, &diff))
1855    { build_int_cst_type (type, diff); }))))
1856(simplify
1857 (pointer_diff (convert?@2 @0) (convert1?@3 ADDR_EXPR@1))
1858 (if (tree_nop_conversion_p (TREE_TYPE(@2), TREE_TYPE (@0))
1859      && tree_nop_conversion_p (TREE_TYPE(@3), TREE_TYPE (@1)))
1860  (with { poly_int64 diff; }
1861   (if (ptr_difference_const (@0, @1, &diff))
1862    { build_int_cst_type (type, diff); }))))
1863
1864/* Canonicalize (T *)(ptr - ptr-cst) to &MEM[ptr + -ptr-cst].  */
1865(simplify
1866 (convert (pointer_diff @0 INTEGER_CST@1))
1867 (if (POINTER_TYPE_P (type))
1868  { build_fold_addr_expr_with_type
1869      (build2 (MEM_REF, char_type_node, @0,
1870	       wide_int_to_tree (ptr_type_node, wi::neg (wi::to_wide (@1)))),
1871	       type); }))
1872
1873/* If arg0 is derived from the address of an object or function, we may
1874   be able to fold this expression using the object or function's
1875   alignment.  */
1876(simplify
1877 (bit_and (convert? @0) INTEGER_CST@1)
1878 (if (POINTER_TYPE_P (TREE_TYPE (@0))
1879      && tree_nop_conversion_p (type, TREE_TYPE (@0)))
1880  (with
1881   {
1882     unsigned int align;
1883     unsigned HOST_WIDE_INT bitpos;
1884     get_pointer_alignment_1 (@0, &align, &bitpos);
1885   }
1886   (if (wi::ltu_p (wi::to_wide (@1), align / BITS_PER_UNIT))
1887    { wide_int_to_tree (type, (wi::to_wide (@1)
1888			       & (bitpos / BITS_PER_UNIT))); }))))
1889
1890(match min_value
1891 INTEGER_CST
1892 (if (INTEGRAL_TYPE_P (type)
1893      && wi::eq_p (wi::to_wide (t), wi::min_value (type)))))
1894
1895(match max_value
1896 INTEGER_CST
1897 (if (INTEGRAL_TYPE_P (type)
1898      && wi::eq_p (wi::to_wide (t), wi::max_value (type)))))
1899
1900/* x >  y  &&  x != XXX_MIN  -->  x > y
1901   x >  y  &&  x == XXX_MIN  -->  false . */
1902(for eqne (eq ne)
1903 (simplify
1904  (bit_and:c (gt:c@2 @0 @1) (eqne @0 min_value))
1905   (switch
1906    (if (eqne == EQ_EXPR)
1907     { constant_boolean_node (false, type); })
1908    (if (eqne == NE_EXPR)
1909     @2)
1910    )))
1911
1912/* x <  y  &&  x != XXX_MAX  -->  x < y
1913   x <  y  &&  x == XXX_MAX  -->  false.  */
1914(for eqne (eq ne)
1915 (simplify
1916  (bit_and:c (lt:c@2 @0 @1) (eqne @0 max_value))
1917   (switch
1918    (if (eqne == EQ_EXPR)
1919     { constant_boolean_node (false, type); })
1920    (if (eqne == NE_EXPR)
1921     @2)
1922    )))
1923
1924/* x <=  y  &&  x == XXX_MIN  -->  x == XXX_MIN.  */
1925(simplify
1926 (bit_and:c (le:c @0 @1) (eq@2 @0 min_value))
1927  @2)
1928
1929/* x >=  y  &&  x == XXX_MAX  -->  x == XXX_MAX.  */
1930(simplify
1931 (bit_and:c (ge:c @0 @1) (eq@2 @0 max_value))
1932  @2)
1933
1934/* x >  y  ||  x != XXX_MIN   -->  x != XXX_MIN.  */
1935(simplify
1936 (bit_ior:c (gt:c @0 @1) (ne@2 @0 min_value))
1937  @2)
1938
1939/* x <=  y  ||  x != XXX_MIN   -->  true.  */
1940(simplify
1941 (bit_ior:c (le:c @0 @1) (ne @0 min_value))
1942  { constant_boolean_node (true, type); })
1943
1944/* x <=  y  ||  x == XXX_MIN   -->  x <= y.  */
1945(simplify
1946 (bit_ior:c (le:c@2 @0 @1) (eq @0 min_value))
1947  @2)
1948
1949/* x <  y  ||  x != XXX_MAX   -->  x != XXX_MAX.  */
1950(simplify
1951 (bit_ior:c (lt:c @0 @1) (ne@2 @0 max_value))
1952  @2)
1953
1954/* x >=  y  ||  x != XXX_MAX   -->  true
1955   x >=  y  ||  x == XXX_MAX   -->  x >= y.  */
1956(for eqne (eq ne)
1957 (simplify
1958  (bit_ior:c (ge:c@2 @0 @1) (eqne @0 max_value))
1959   (switch
1960    (if (eqne == EQ_EXPR)
1961     @2)
1962    (if (eqne == NE_EXPR)
1963     { constant_boolean_node (true, type); }))))
1964
1965/* Convert (X == CST1) && (X OP2 CST2) to a known value
1966   based on CST1 OP2 CST2.  Similarly for (X != CST1).  */
1967
1968(for code1 (eq ne)
1969 (for code2 (eq ne lt gt le ge)
1970  (simplify
1971   (bit_and:c (code1@3 @0 INTEGER_CST@1) (code2@4 @0 INTEGER_CST@2))
1972    (with
1973     {
1974      int cmp = tree_int_cst_compare (@1, @2);
1975      bool val;
1976      switch (code2)
1977	 {
1978	case EQ_EXPR: val = (cmp == 0); break;
1979	case NE_EXPR: val = (cmp != 0); break;
1980	case LT_EXPR: val = (cmp < 0); break;
1981	case GT_EXPR: val = (cmp > 0); break;
1982	case LE_EXPR: val = (cmp <= 0); break;
1983	case GE_EXPR: val = (cmp >= 0); break;
1984	default: gcc_unreachable ();
1985	}
1986     }
1987     (switch
1988      (if (code1 == EQ_EXPR && val) @3)
1989      (if (code1 == EQ_EXPR && !val) { constant_boolean_node (false, type); })
1990      (if (code1 == NE_EXPR && !val) @4))))))
1991
1992/* Convert (X OP1 CST1) && (X OP2 CST2).  */
1993
1994(for code1 (lt le gt ge)
1995 (for code2 (lt le gt ge)
1996  (simplify
1997  (bit_and (code1:c@3 @0 INTEGER_CST@1) (code2:c@4 @0 INTEGER_CST@2))
1998   (with
1999    {
2000     int cmp = tree_int_cst_compare (@1, @2);
2001    }
2002    (switch
2003     /* Choose the more restrictive of two < or <= comparisons.  */
2004     (if ((code1 == LT_EXPR || code1 == LE_EXPR)
2005	  && (code2 == LT_EXPR || code2 == LE_EXPR))
2006      (if ((cmp < 0) || (cmp == 0 && code1 == LT_EXPR))
2007       @3
2008       @4))
2009     /* Likewise chose the more restrictive of two > or >= comparisons.  */
2010     (if ((code1 == GT_EXPR || code1 == GE_EXPR)
2011	  && (code2 == GT_EXPR || code2 == GE_EXPR))
2012      (if ((cmp > 0) || (cmp == 0 && code1 == GT_EXPR))
2013       @3
2014       @4))
2015     /* Check for singleton ranges.  */
2016     (if (cmp == 0
2017	  && ((code1 == LE_EXPR && code2 == GE_EXPR)
2018	    || (code1 == GE_EXPR && code2 == LE_EXPR)))
2019      (eq @0 @1))
2020     /* Check for disjoint ranges.  */
2021     (if (cmp <= 0
2022	  && (code1 == LT_EXPR || code1 == LE_EXPR)
2023	  && (code2 == GT_EXPR || code2 == GE_EXPR))
2024      { constant_boolean_node (false, type); })
2025     (if (cmp >= 0
2026	  && (code1 == GT_EXPR || code1 == GE_EXPR)
2027	  && (code2 == LT_EXPR || code2 == LE_EXPR))
2028      { constant_boolean_node (false, type); })
2029     )))))
2030
2031/* Convert (X == CST1) || (X OP2 CST2) to a known value
2032   based on CST1 OP2 CST2.  Similarly for (X != CST1).  */
2033
2034(for code1 (eq ne)
2035 (for code2 (eq ne lt gt le ge)
2036  (simplify
2037   (bit_ior:c (code1@3 @0 INTEGER_CST@1) (code2@4 @0 INTEGER_CST@2))
2038    (with
2039     {
2040      int cmp = tree_int_cst_compare (@1, @2);
2041      bool val;
2042      switch (code2)
2043	{
2044	case EQ_EXPR: val = (cmp == 0); break;
2045	case NE_EXPR: val = (cmp != 0); break;
2046	case LT_EXPR: val = (cmp < 0); break;
2047	case GT_EXPR: val = (cmp > 0); break;
2048	case LE_EXPR: val = (cmp <= 0); break;
2049	case GE_EXPR: val = (cmp >= 0); break;
2050	default: gcc_unreachable ();
2051	}
2052     }
2053     (switch
2054      (if (code1 == EQ_EXPR && val) @4)
2055      (if (code1 == NE_EXPR && val) { constant_boolean_node (true, type); })
2056      (if (code1 == NE_EXPR && !val) @3))))))
2057
2058/* Convert (X OP1 CST1) || (X OP2 CST2).  */
2059
2060(for code1 (lt le gt ge)
2061 (for code2 (lt le gt ge)
2062  (simplify
2063  (bit_ior (code1@3 @0 INTEGER_CST@1) (code2@4 @0 INTEGER_CST@2))
2064   (with
2065    {
2066     int cmp = tree_int_cst_compare (@1, @2);
2067    }
2068    (switch
2069     /* Choose the more restrictive of two < or <= comparisons.  */
2070     (if ((code1 == LT_EXPR || code1 == LE_EXPR)
2071	  && (code2 == LT_EXPR || code2 == LE_EXPR))
2072      (if ((cmp < 0) || (cmp == 0 && code1 == LT_EXPR))
2073       @4
2074       @3))
2075     /* Likewise chose the more restrictive of two > or >= comparisons.  */
2076     (if ((code1 == GT_EXPR || code1 == GE_EXPR)
2077	  && (code2 == GT_EXPR || code2 == GE_EXPR))
2078      (if ((cmp > 0) || (cmp == 0 && code1 == GT_EXPR))
2079       @4
2080       @3))
2081     /* Check for singleton ranges.  */
2082     (if (cmp == 0
2083	  && ((code1 == LT_EXPR && code2 == GT_EXPR)
2084	      || (code1 == GT_EXPR && code2 == LT_EXPR)))
2085      (ne @0 @2))
2086     /* Check for disjoint ranges.  */
2087     (if (cmp >= 0
2088	  && (code1 == LT_EXPR || code1 == LE_EXPR)
2089	  && (code2 == GT_EXPR || code2 == GE_EXPR))
2090      { constant_boolean_node (true, type); })
2091     (if (cmp <= 0
2092	  && (code1 == GT_EXPR || code1 == GE_EXPR)
2093	  && (code2 == LT_EXPR || code2 == LE_EXPR))
2094      { constant_boolean_node (true, type); })
2095     )))))
2096
2097/* We can't reassociate at all for saturating types.  */
2098(if (!TYPE_SATURATING (type))
2099
2100 /* Contract negates.  */
2101 /* A + (-B) -> A - B */
2102 (simplify
2103  (plus:c @0 (convert? (negate @1)))
2104  /* Apply STRIP_NOPS on the negate.  */
2105  (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
2106       && !TYPE_OVERFLOW_SANITIZED (type))
2107   (with
2108    {
2109     tree t1 = type;
2110     if (INTEGRAL_TYPE_P (type)
2111	 && TYPE_OVERFLOW_WRAPS (type) != TYPE_OVERFLOW_WRAPS (TREE_TYPE (@1)))
2112       t1 = TYPE_OVERFLOW_WRAPS (type) ? type : TREE_TYPE (@1);
2113    }
2114    (convert (minus (convert:t1 @0) (convert:t1 @1))))))
2115 /* A - (-B) -> A + B */
2116 (simplify
2117  (minus @0 (convert? (negate @1)))
2118  (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
2119       && !TYPE_OVERFLOW_SANITIZED (type))
2120   (with
2121    {
2122     tree t1 = type;
2123     if (INTEGRAL_TYPE_P (type)
2124	 && TYPE_OVERFLOW_WRAPS (type) != TYPE_OVERFLOW_WRAPS (TREE_TYPE (@1)))
2125       t1 = TYPE_OVERFLOW_WRAPS (type) ? type : TREE_TYPE (@1);
2126    }
2127    (convert (plus (convert:t1 @0) (convert:t1 @1))))))
2128 /* -(T)(-A) -> (T)A
2129    Sign-extension is ok except for INT_MIN, which thankfully cannot
2130    happen without overflow.  */
2131 (simplify
2132  (negate (convert (negate @1)))
2133  (if (INTEGRAL_TYPE_P (type)
2134       && (TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@1))
2135	   || (!TYPE_UNSIGNED (TREE_TYPE (@1))
2136	       && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1))))
2137       && !TYPE_OVERFLOW_SANITIZED (type)
2138       && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@1)))
2139   (convert @1)))
2140 (simplify
2141  (negate (convert negate_expr_p@1))
2142  (if (SCALAR_FLOAT_TYPE_P (type)
2143       && ((DECIMAL_FLOAT_TYPE_P (type)
2144	    == DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@1))
2145	    && TYPE_PRECISION (type) >= TYPE_PRECISION (TREE_TYPE (@1)))
2146	   || !HONOR_SIGN_DEPENDENT_ROUNDING (type)))
2147   (convert (negate @1))))
2148 (simplify
2149  (negate (nop_convert? (negate @1)))
2150  (if (!TYPE_OVERFLOW_SANITIZED (type)
2151       && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@1)))
2152   (view_convert @1)))
2153
2154 /* We can't reassociate floating-point unless -fassociative-math
2155    or fixed-point plus or minus because of saturation to +-Inf.  */
2156 (if ((!FLOAT_TYPE_P (type) || flag_associative_math)
2157      && !FIXED_POINT_TYPE_P (type))
2158
2159  /* Match patterns that allow contracting a plus-minus pair
2160     irrespective of overflow issues.  */
2161  /* (A +- B) - A       ->  +- B */
2162  /* (A +- B) -+ B      ->  A */
2163  /* A - (A +- B)       -> -+ B */
2164  /* A +- (B -+ A)      ->  +- B */
2165  (simplify
2166   (minus (nop_convert1? (plus:c (nop_convert2? @0) @1)) @0)
2167   (view_convert @1))
2168  (simplify
2169   (minus (nop_convert1? (minus (nop_convert2? @0) @1)) @0)
2170   (if (!ANY_INTEGRAL_TYPE_P (type)
2171	|| TYPE_OVERFLOW_WRAPS (type))
2172   (negate (view_convert @1))
2173   (view_convert (negate @1))))
2174  (simplify
2175   (plus:c (nop_convert1? (minus @0 (nop_convert2? @1))) @1)
2176   (view_convert @0))
2177  (simplify
2178   (minus @0 (nop_convert1? (plus:c (nop_convert2? @0) @1)))
2179    (if (!ANY_INTEGRAL_TYPE_P (type)
2180	 || TYPE_OVERFLOW_WRAPS (type))
2181     (negate (view_convert @1))
2182     (view_convert (negate @1))))
2183  (simplify
2184   (minus @0 (nop_convert1? (minus (nop_convert2? @0) @1)))
2185   (view_convert @1))
2186  /* (A +- B) + (C - A)   -> C +- B */
2187  /* (A +  B) - (A - C)   -> B + C */
2188  /* More cases are handled with comparisons.  */
2189  (simplify
2190   (plus:c (plus:c @0 @1) (minus @2 @0))
2191   (plus @2 @1))
2192  (simplify
2193   (plus:c (minus @0 @1) (minus @2 @0))
2194   (minus @2 @1))
2195  (simplify
2196   (plus:c (pointer_diff @0 @1) (pointer_diff @2 @0))
2197   (if (TYPE_OVERFLOW_UNDEFINED (type)
2198	&& !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@0)))
2199    (pointer_diff @2 @1)))
2200  (simplify
2201   (minus (plus:c @0 @1) (minus @0 @2))
2202   (plus @1 @2))
2203
2204  /* (A +- CST1) +- CST2 -> A + CST3
2205     Use view_convert because it is safe for vectors and equivalent for
2206     scalars.  */
2207  (for outer_op (plus minus)
2208   (for inner_op (plus minus)
2209	neg_inner_op (minus plus)
2210    (simplify
2211     (outer_op (nop_convert? (inner_op @0 CONSTANT_CLASS_P@1))
2212	       CONSTANT_CLASS_P@2)
2213     /* If one of the types wraps, use that one.  */
2214     (if (!ANY_INTEGRAL_TYPE_P (type) || TYPE_OVERFLOW_WRAPS (type))
2215      /* If all 3 captures are CONSTANT_CLASS_P, punt, as we might recurse
2216	 forever if something doesn't simplify into a constant.  */
2217      (if (!CONSTANT_CLASS_P (@0))
2218       (if (outer_op == PLUS_EXPR)
2219	(plus (view_convert @0) (inner_op @2 (view_convert @1)))
2220	(minus (view_convert @0) (neg_inner_op @2 (view_convert @1)))))
2221      (if (!ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
2222	   || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
2223       (if (outer_op == PLUS_EXPR)
2224	(view_convert (plus @0 (inner_op (view_convert @2) @1)))
2225	(view_convert (minus @0 (neg_inner_op (view_convert @2) @1))))
2226       /* If the constant operation overflows we cannot do the transform
2227	  directly as we would introduce undefined overflow, for example
2228	  with (a - 1) + INT_MIN.  */
2229       (if (types_match (type, @0))
2230	(with { tree cst = const_binop (outer_op == inner_op
2231					? PLUS_EXPR : MINUS_EXPR,
2232					type, @1, @2); }
2233	 (if (cst && !TREE_OVERFLOW (cst))
2234	  (inner_op @0 { cst; } )
2235	  /* X+INT_MAX+1 is X-INT_MIN.  */
2236	  (if (INTEGRAL_TYPE_P (type) && cst
2237	       && wi::to_wide (cst) == wi::min_value (type))
2238	   (neg_inner_op @0 { wide_int_to_tree (type, wi::to_wide (cst)); })
2239	   /* Last resort, use some unsigned type.  */
2240	   (with { tree utype = unsigned_type_for (type); }
2241	    (if (utype)
2242	     (view_convert (inner_op
2243			    (view_convert:utype @0)
2244			    (view_convert:utype
2245			     { drop_tree_overflow (cst); }))))))))))))))
2246
2247  /* (CST1 - A) +- CST2 -> CST3 - A  */
2248  (for outer_op (plus minus)
2249   (simplify
2250    (outer_op (nop_convert? (minus CONSTANT_CLASS_P@1 @0)) CONSTANT_CLASS_P@2)
2251    /* If one of the types wraps, use that one.  */
2252    (if (!ANY_INTEGRAL_TYPE_P (type) || TYPE_OVERFLOW_WRAPS (type))
2253     /* If all 3 captures are CONSTANT_CLASS_P, punt, as we might recurse
2254	forever if something doesn't simplify into a constant.  */
2255     (if (!CONSTANT_CLASS_P (@0))
2256      (minus (outer_op (view_convert @1) @2) (view_convert @0)))
2257     (if (!ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
2258	  || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
2259      (view_convert (minus (outer_op @1 (view_convert @2)) @0))
2260      (if (types_match (type, @0))
2261       (with { tree cst = const_binop (outer_op, type, @1, @2); }
2262	(if (cst && !TREE_OVERFLOW (cst))
2263	 (minus { cst; } @0))))))))
2264
2265  /* CST1 - (CST2 - A) -> CST3 + A
2266     Use view_convert because it is safe for vectors and equivalent for
2267     scalars.  */
2268  (simplify
2269   (minus CONSTANT_CLASS_P@1 (nop_convert? (minus CONSTANT_CLASS_P@2 @0)))
2270   /* If one of the types wraps, use that one.  */
2271   (if (!ANY_INTEGRAL_TYPE_P (type) || TYPE_OVERFLOW_WRAPS (type))
2272    /* If all 3 captures are CONSTANT_CLASS_P, punt, as we might recurse
2273      forever if something doesn't simplify into a constant.  */
2274    (if (!CONSTANT_CLASS_P (@0))
2275     (plus (view_convert @0) (minus @1 (view_convert @2))))
2276    (if (!ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
2277	 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
2278     (view_convert (plus @0 (minus (view_convert @1) @2)))
2279     (if (types_match (type, @0))
2280      (with { tree cst = const_binop (MINUS_EXPR, type, @1, @2); }
2281       (if (cst && !TREE_OVERFLOW (cst))
2282	(plus { cst; } @0)))))))
2283
2284/* ((T)(A)) + CST -> (T)(A + CST)  */
2285#if GIMPLE
2286  (simplify
2287   (plus (convert SSA_NAME@0) INTEGER_CST@1)
2288    (if (TREE_CODE (TREE_TYPE (@0)) == INTEGER_TYPE
2289         && TREE_CODE (type) == INTEGER_TYPE
2290         && TYPE_PRECISION (type) > TYPE_PRECISION (TREE_TYPE (@0))
2291         && int_fits_type_p (@1, TREE_TYPE (@0)))
2292     /* Perform binary operation inside the cast if the constant fits
2293        and (A + CST)'s range does not overflow.  */
2294     (with
2295      {
2296	wi::overflow_type min_ovf = wi::OVF_OVERFLOW,
2297			  max_ovf = wi::OVF_OVERFLOW;
2298        tree inner_type = TREE_TYPE (@0);
2299
2300	wide_int w1
2301	  = wide_int::from (wi::to_wide (@1), TYPE_PRECISION (inner_type),
2302			    TYPE_SIGN (inner_type));
2303
2304        wide_int wmin0, wmax0;
2305        if (get_range_info (@0, &wmin0, &wmax0) == VR_RANGE)
2306          {
2307            wi::add (wmin0, w1, TYPE_SIGN (inner_type), &min_ovf);
2308            wi::add (wmax0, w1, TYPE_SIGN (inner_type), &max_ovf);
2309          }
2310      }
2311     (if (min_ovf == wi::OVF_NONE && max_ovf == wi::OVF_NONE)
2312      (convert (plus @0 { wide_int_to_tree (TREE_TYPE (@0), w1); } )))
2313     )))
2314#endif
2315
2316/* ((T)(A + CST1)) + CST2 -> (T)(A) + (T)CST1 + CST2  */
2317#if GIMPLE
2318  (for op (plus minus)
2319   (simplify
2320    (plus (convert:s (op:s @0 INTEGER_CST@1)) INTEGER_CST@2)
2321     (if (TREE_CODE (TREE_TYPE (@0)) == INTEGER_TYPE
2322	  && TREE_CODE (type) == INTEGER_TYPE
2323	  && TYPE_PRECISION (type) > TYPE_PRECISION (TREE_TYPE (@0))
2324	  && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
2325	  && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@0))
2326	  && TYPE_OVERFLOW_WRAPS (type))
2327       (plus (convert @0) (op @2 (convert @1))))))
2328#endif
2329
2330  /* ~A + A -> -1 */
2331  (simplify
2332   (plus:c (bit_not @0) @0)
2333   (if (!TYPE_OVERFLOW_TRAPS (type))
2334    { build_all_ones_cst (type); }))
2335
2336  /* ~A + 1 -> -A */
2337  (simplify
2338   (plus (convert? (bit_not @0)) integer_each_onep)
2339   (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
2340    (negate (convert @0))))
2341
2342  /* -A - 1 -> ~A */
2343  (simplify
2344   (minus (convert? (negate @0)) integer_each_onep)
2345   (if (!TYPE_OVERFLOW_TRAPS (type)
2346	&& tree_nop_conversion_p (type, TREE_TYPE (@0)))
2347    (bit_not (convert @0))))
2348
2349  /* -1 - A -> ~A */
2350  (simplify
2351   (minus integer_all_onesp @0)
2352   (bit_not @0))
2353
2354  /* (T)(P + A) - (T)P -> (T) A */
2355  (simplify
2356   (minus (convert (plus:c @@0 @1))
2357    (convert? @0))
2358   (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
2359	/* For integer types, if A has a smaller type
2360	   than T the result depends on the possible
2361	   overflow in P + A.
2362	   E.g. T=size_t, A=(unsigned)429497295, P>0.
2363	   However, if an overflow in P + A would cause
2364	   undefined behavior, we can assume that there
2365	   is no overflow.  */
2366	|| (INTEGRAL_TYPE_P (TREE_TYPE (@1))
2367	    && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1))))
2368    (convert @1)))
2369  (simplify
2370   (minus (convert (pointer_plus @@0 @1))
2371    (convert @0))
2372   (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
2373	/* For pointer types, if the conversion of A to the
2374	   final type requires a sign- or zero-extension,
2375	   then we have to punt - it is not defined which
2376	   one is correct.  */
2377	|| (POINTER_TYPE_P (TREE_TYPE (@0))
2378	    && TREE_CODE (@1) == INTEGER_CST
2379	    && tree_int_cst_sign_bit (@1) == 0))
2380    (convert @1)))
2381   (simplify
2382    (pointer_diff (pointer_plus @@0 @1) @0)
2383    /* The second argument of pointer_plus must be interpreted as signed, and
2384       thus sign-extended if necessary.  */
2385    (with { tree stype = signed_type_for (TREE_TYPE (@1)); }
2386     /* Use view_convert instead of convert here, as POINTER_PLUS_EXPR
2387	second arg is unsigned even when we need to consider it as signed,
2388	we don't want to diagnose overflow here.  */
2389     (convert (view_convert:stype @1))))
2390
2391  /* (T)P - (T)(P + A) -> -(T) A */
2392  (simplify
2393   (minus (convert? @0)
2394    (convert (plus:c @@0 @1)))
2395   (if (INTEGRAL_TYPE_P (type)
2396	&& TYPE_OVERFLOW_UNDEFINED (type)
2397	&& element_precision (type) <= element_precision (TREE_TYPE (@1)))
2398    (with { tree utype = unsigned_type_for (type); }
2399     (convert (negate (convert:utype @1))))
2400    (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
2401	 /* For integer types, if A has a smaller type
2402	    than T the result depends on the possible
2403	    overflow in P + A.
2404	    E.g. T=size_t, A=(unsigned)429497295, P>0.
2405	    However, if an overflow in P + A would cause
2406	    undefined behavior, we can assume that there
2407	    is no overflow.  */
2408	 || (INTEGRAL_TYPE_P (TREE_TYPE (@1))
2409	     && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1))))
2410     (negate (convert @1)))))
2411  (simplify
2412   (minus (convert @0)
2413    (convert (pointer_plus @@0 @1)))
2414   (if (INTEGRAL_TYPE_P (type)
2415	&& TYPE_OVERFLOW_UNDEFINED (type)
2416	&& element_precision (type) <= element_precision (TREE_TYPE (@1)))
2417    (with { tree utype = unsigned_type_for (type); }
2418     (convert (negate (convert:utype @1))))
2419    (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
2420	 /* For pointer types, if the conversion of A to the
2421	    final type requires a sign- or zero-extension,
2422	    then we have to punt - it is not defined which
2423	    one is correct.  */
2424	 || (POINTER_TYPE_P (TREE_TYPE (@0))
2425	     && TREE_CODE (@1) == INTEGER_CST
2426	     && tree_int_cst_sign_bit (@1) == 0))
2427     (negate (convert @1)))))
2428   (simplify
2429    (pointer_diff @0 (pointer_plus @@0 @1))
2430    /* The second argument of pointer_plus must be interpreted as signed, and
2431       thus sign-extended if necessary.  */
2432    (with { tree stype = signed_type_for (TREE_TYPE (@1)); }
2433     /* Use view_convert instead of convert here, as POINTER_PLUS_EXPR
2434	second arg is unsigned even when we need to consider it as signed,
2435	we don't want to diagnose overflow here.  */
2436     (negate (convert (view_convert:stype @1)))))
2437
2438  /* (T)(P + A) - (T)(P + B) -> (T)A - (T)B */
2439  (simplify
2440   (minus (convert (plus:c @@0 @1))
2441    (convert (plus:c @0 @2)))
2442   (if (INTEGRAL_TYPE_P (type)
2443	&& TYPE_OVERFLOW_UNDEFINED (type)
2444	&& element_precision (type) <= element_precision (TREE_TYPE (@1))
2445	&& element_precision (type) <= element_precision (TREE_TYPE (@2)))
2446    (with { tree utype = unsigned_type_for (type); }
2447     (convert (minus (convert:utype @1) (convert:utype @2))))
2448    (if (((element_precision (type) <= element_precision (TREE_TYPE (@1)))
2449	  == (element_precision (type) <= element_precision (TREE_TYPE (@2))))
2450	 && (element_precision (type) <= element_precision (TREE_TYPE (@1))
2451	     /* For integer types, if A has a smaller type
2452		than T the result depends on the possible
2453		overflow in P + A.
2454		E.g. T=size_t, A=(unsigned)429497295, P>0.
2455		However, if an overflow in P + A would cause
2456		undefined behavior, we can assume that there
2457		is no overflow.  */
2458	     || (INTEGRAL_TYPE_P (TREE_TYPE (@1))
2459		 && INTEGRAL_TYPE_P (TREE_TYPE (@2))
2460		 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1))
2461		 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@2)))))
2462     (minus (convert @1) (convert @2)))))
2463  (simplify
2464   (minus (convert (pointer_plus @@0 @1))
2465    (convert (pointer_plus @0 @2)))
2466   (if (INTEGRAL_TYPE_P (type)
2467	&& TYPE_OVERFLOW_UNDEFINED (type)
2468	&& element_precision (type) <= element_precision (TREE_TYPE (@1)))
2469    (with { tree utype = unsigned_type_for (type); }
2470     (convert (minus (convert:utype @1) (convert:utype @2))))
2471    (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
2472	 /* For pointer types, if the conversion of A to the
2473	    final type requires a sign- or zero-extension,
2474	    then we have to punt - it is not defined which
2475	    one is correct.  */
2476	 || (POINTER_TYPE_P (TREE_TYPE (@0))
2477	     && TREE_CODE (@1) == INTEGER_CST
2478	     && tree_int_cst_sign_bit (@1) == 0
2479	     && TREE_CODE (@2) == INTEGER_CST
2480	     && tree_int_cst_sign_bit (@2) == 0))
2481     (minus (convert @1) (convert @2)))))
2482   (simplify
2483    (pointer_diff (pointer_plus @@0 @1) (pointer_plus @0 @2))
2484    /* The second argument of pointer_plus must be interpreted as signed, and
2485       thus sign-extended if necessary.  */
2486    (with { tree stype = signed_type_for (TREE_TYPE (@1)); }
2487     /* Use view_convert instead of convert here, as POINTER_PLUS_EXPR
2488	second arg is unsigned even when we need to consider it as signed,
2489	we don't want to diagnose overflow here.  */
2490     (minus (convert (view_convert:stype @1))
2491	    (convert (view_convert:stype @2)))))))
2492
2493/* (A * C) +- (B * C) -> (A+-B) * C and (A * C) +- A -> A * (C+-1).
2494    Modeled after fold_plusminus_mult_expr.  */
2495(if (!TYPE_SATURATING (type)
2496     && (!FLOAT_TYPE_P (type) || flag_associative_math))
2497 (for plusminus (plus minus)
2498  (simplify
2499   (plusminus (mult:cs@3 @0 @1) (mult:cs@4 @0 @2))
2500   (if ((!ANY_INTEGRAL_TYPE_P (type)
2501	 || TYPE_OVERFLOW_WRAPS (type)
2502	 || (INTEGRAL_TYPE_P (type)
2503	     && tree_expr_nonzero_p (@0)
2504	     && expr_not_equal_to (@0, wi::minus_one (TYPE_PRECISION (type)))))
2505	/* If @1 +- @2 is constant require a hard single-use on either
2506	   original operand (but not on both).  */
2507	&& (single_use (@3) || single_use (@4)))
2508    (mult (plusminus @1 @2) @0)))
2509  /* We cannot generate constant 1 for fract.  */
2510  (if (!ALL_FRACT_MODE_P (TYPE_MODE (type)))
2511   (simplify
2512    (plusminus @0 (mult:c@3 @0 @2))
2513    (if ((!ANY_INTEGRAL_TYPE_P (type)
2514	  || TYPE_OVERFLOW_WRAPS (type)
2515	  /* For @0 + @0*@2 this transformation would introduce UB
2516	     (where there was none before) for @0 in [-1,0] and @2 max.
2517	     For @0 - @0*@2 this transformation would introduce UB
2518	     for @0 0 and @2 in [min,min+1] or @0 -1 and @2 min+1.  */
2519	  || (INTEGRAL_TYPE_P (type)
2520	      && ((tree_expr_nonzero_p (@0)
2521		   && expr_not_equal_to (@0,
2522				wi::minus_one (TYPE_PRECISION (type))))
2523		  || (plusminus == PLUS_EXPR
2524		      ? expr_not_equal_to (@2,
2525			    wi::max_value (TYPE_PRECISION (type), SIGNED))
2526		      /* Let's ignore the @0 -1 and @2 min case.  */
2527		      : (expr_not_equal_to (@2,
2528			    wi::min_value (TYPE_PRECISION (type), SIGNED))
2529			 && expr_not_equal_to (@2,
2530				wi::min_value (TYPE_PRECISION (type), SIGNED)
2531				+ 1))))))
2532	 && single_use (@3))
2533     (mult (plusminus { build_one_cst (type); } @2) @0)))
2534   (simplify
2535    (plusminus (mult:c@3 @0 @2) @0)
2536    (if ((!ANY_INTEGRAL_TYPE_P (type)
2537	  || TYPE_OVERFLOW_WRAPS (type)
2538	  /* For @0*@2 + @0 this transformation would introduce UB
2539	     (where there was none before) for @0 in [-1,0] and @2 max.
2540	     For @0*@2 - @0 this transformation would introduce UB
2541	     for @0 0 and @2 min.  */
2542	  || (INTEGRAL_TYPE_P (type)
2543	      && ((tree_expr_nonzero_p (@0)
2544		   && (plusminus == MINUS_EXPR
2545		       || expr_not_equal_to (@0,
2546				wi::minus_one (TYPE_PRECISION (type)))))
2547		  || expr_not_equal_to (@2,
2548			(plusminus == PLUS_EXPR
2549			 ? wi::max_value (TYPE_PRECISION (type), SIGNED)
2550			 : wi::min_value (TYPE_PRECISION (type), SIGNED))))))
2551	 && single_use (@3))
2552     (mult (plusminus @2 { build_one_cst (type); }) @0))))))
2553
2554/* Simplifications of MIN_EXPR, MAX_EXPR, fmin() and fmax().  */
2555
2556(for minmax (min max FMIN_ALL FMAX_ALL)
2557 (simplify
2558  (minmax @0 @0)
2559  @0))
2560/* min(max(x,y),y) -> y.  */
2561(simplify
2562 (min:c (max:c @0 @1) @1)
2563 @1)
2564/* max(min(x,y),y) -> y.  */
2565(simplify
2566 (max:c (min:c @0 @1) @1)
2567 @1)
2568/* max(a,-a) -> abs(a).  */
2569(simplify
2570 (max:c @0 (negate @0))
2571 (if (TREE_CODE (type) != COMPLEX_TYPE
2572      && (! ANY_INTEGRAL_TYPE_P (type)
2573	  || TYPE_OVERFLOW_UNDEFINED (type)))
2574  (abs @0)))
2575/* min(a,-a) -> -abs(a).  */
2576(simplify
2577 (min:c @0 (negate @0))
2578 (if (TREE_CODE (type) != COMPLEX_TYPE
2579      && (! ANY_INTEGRAL_TYPE_P (type)
2580	  || TYPE_OVERFLOW_UNDEFINED (type)))
2581  (negate (abs @0))))
2582(simplify
2583 (min @0 @1)
2584 (switch
2585  (if (INTEGRAL_TYPE_P (type)
2586       && TYPE_MIN_VALUE (type)
2587       && operand_equal_p (@1, TYPE_MIN_VALUE (type), OEP_ONLY_CONST))
2588   @1)
2589  (if (INTEGRAL_TYPE_P (type)
2590       && TYPE_MAX_VALUE (type)
2591       && operand_equal_p (@1, TYPE_MAX_VALUE (type), OEP_ONLY_CONST))
2592   @0)))
2593(simplify
2594 (max @0 @1)
2595 (switch
2596  (if (INTEGRAL_TYPE_P (type)
2597       && TYPE_MAX_VALUE (type)
2598       && operand_equal_p (@1, TYPE_MAX_VALUE (type), OEP_ONLY_CONST))
2599   @1)
2600  (if (INTEGRAL_TYPE_P (type)
2601       && TYPE_MIN_VALUE (type)
2602       && operand_equal_p (@1, TYPE_MIN_VALUE (type), OEP_ONLY_CONST))
2603   @0)))
2604
2605/* max (a, a + CST) -> a + CST where CST is positive.  */
2606/* max (a, a + CST) -> a where CST is negative.  */
2607(simplify
2608 (max:c @0 (plus@2 @0 INTEGER_CST@1))
2609  (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
2610   (if (tree_int_cst_sgn (@1) > 0)
2611    @2
2612    @0)))
2613
2614/* min (a, a + CST) -> a where CST is positive.  */
2615/* min (a, a + CST) -> a + CST where CST is negative. */
2616(simplify
2617 (min:c @0 (plus@2 @0 INTEGER_CST@1))
2618  (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
2619   (if (tree_int_cst_sgn (@1) > 0)
2620    @0
2621    @2)))
2622
2623/* (convert (minmax ((convert (x) c)))) -> minmax (x c) if x is promoted
2624   and the outer convert demotes the expression back to x's type.  */
2625(for minmax (min max)
2626 (simplify
2627  (convert (minmax@0 (convert @1) INTEGER_CST@2))
2628  (if (INTEGRAL_TYPE_P (type)
2629       && types_match (@1, type) && int_fits_type_p (@2, type)
2630       && TYPE_SIGN (TREE_TYPE (@0)) == TYPE_SIGN (type)
2631       && TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (type))
2632   (minmax @1 (convert @2)))))
2633
2634(for minmax (FMIN_ALL FMAX_ALL)
2635 /* If either argument is NaN, return the other one.  Avoid the
2636    transformation if we get (and honor) a signalling NaN.  */
2637 (simplify
2638  (minmax:c @0 REAL_CST@1)
2639  (if (real_isnan (TREE_REAL_CST_PTR (@1))
2640       && (!HONOR_SNANS (@1) || !TREE_REAL_CST (@1).signalling))
2641   @0)))
2642/* Convert fmin/fmax to MIN_EXPR/MAX_EXPR.  C99 requires these
2643   functions to return the numeric arg if the other one is NaN.
2644   MIN and MAX don't honor that, so only transform if -ffinite-math-only
2645   is set.  C99 doesn't require -0.0 to be handled, so we don't have to
2646   worry about it either.  */
2647(if (flag_finite_math_only)
2648 (simplify
2649  (FMIN_ALL @0 @1)
2650  (min @0 @1))
2651 (simplify
2652  (FMAX_ALL @0 @1)
2653  (max @0 @1)))
2654/* min (-A, -B) -> -max (A, B)  */
2655(for minmax (min max FMIN_ALL FMAX_ALL)
2656     maxmin (max min FMAX_ALL FMIN_ALL)
2657 (simplify
2658  (minmax (negate:s@2 @0) (negate:s@3 @1))
2659  (if (FLOAT_TYPE_P (TREE_TYPE (@0))
2660       || (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
2661           && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))))
2662   (negate (maxmin @0 @1)))))
2663/* MIN (~X, ~Y) -> ~MAX (X, Y)
2664   MAX (~X, ~Y) -> ~MIN (X, Y)  */
2665(for minmax (min max)
2666 maxmin (max min)
2667 (simplify
2668  (minmax (bit_not:s@2 @0) (bit_not:s@3 @1))
2669  (bit_not (maxmin @0 @1))))
2670
2671/* MIN (X, Y) == X -> X <= Y  */
2672(for minmax (min min max max)
2673     cmp    (eq  ne  eq  ne )
2674     out    (le  gt  ge  lt )
2675 (simplify
2676  (cmp:c (minmax:c @0 @1) @0)
2677  (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)))
2678   (out @0 @1))))
2679/* MIN (X, 5) == 0 -> X == 0
2680   MIN (X, 5) == 7 -> false  */
2681(for cmp (eq ne)
2682 (simplify
2683  (cmp (min @0 INTEGER_CST@1) INTEGER_CST@2)
2684  (if (wi::lt_p (wi::to_wide (@1), wi::to_wide (@2),
2685		 TYPE_SIGN (TREE_TYPE (@0))))
2686   { constant_boolean_node (cmp == NE_EXPR, type); }
2687   (if (wi::gt_p (wi::to_wide (@1), wi::to_wide (@2),
2688		  TYPE_SIGN (TREE_TYPE (@0))))
2689    (cmp @0 @2)))))
2690(for cmp (eq ne)
2691 (simplify
2692  (cmp (max @0 INTEGER_CST@1) INTEGER_CST@2)
2693  (if (wi::gt_p (wi::to_wide (@1), wi::to_wide (@2),
2694		 TYPE_SIGN (TREE_TYPE (@0))))
2695   { constant_boolean_node (cmp == NE_EXPR, type); }
2696   (if (wi::lt_p (wi::to_wide (@1), wi::to_wide (@2),
2697		  TYPE_SIGN (TREE_TYPE (@0))))
2698    (cmp @0 @2)))))
2699/* MIN (X, C1) < C2 -> X < C2 || C1 < C2  */
2700(for minmax (min     min     max     max     min     min     max     max    )
2701     cmp    (lt      le      gt      ge      gt      ge      lt      le     )
2702     comb   (bit_ior bit_ior bit_ior bit_ior bit_and bit_and bit_and bit_and)
2703 (simplify
2704  (cmp (minmax @0 INTEGER_CST@1) INTEGER_CST@2)
2705  (comb (cmp @0 @2) (cmp @1 @2))))
2706
2707/* Undo fancy way of writing max/min or other ?: expressions,
2708   like a - ((a - b) & -(a < b)), in this case into (a < b) ? b : a.
2709   People normally use ?: and that is what we actually try to optimize.  */
2710(for cmp (simple_comparison)
2711 (simplify
2712  (minus @0 (bit_and:c (minus @0 @1)
2713		       (convert? (negate@4 (convert? (cmp@5 @2 @3))))))
2714  (if (INTEGRAL_TYPE_P (type)
2715       && INTEGRAL_TYPE_P (TREE_TYPE (@4))
2716       && TREE_CODE (TREE_TYPE (@4)) != BOOLEAN_TYPE
2717       && INTEGRAL_TYPE_P (TREE_TYPE (@5))
2718       && (TYPE_PRECISION (TREE_TYPE (@4)) >= TYPE_PRECISION (type)
2719	   || !TYPE_UNSIGNED (TREE_TYPE (@4)))
2720       && (GIMPLE || !TREE_SIDE_EFFECTS (@1)))
2721   (cond (cmp @2 @3) @1 @0)))
2722 (simplify
2723  (plus:c @0 (bit_and:c (minus @1 @0)
2724			(convert? (negate@4 (convert? (cmp@5 @2 @3))))))
2725  (if (INTEGRAL_TYPE_P (type)
2726       && INTEGRAL_TYPE_P (TREE_TYPE (@4))
2727       && TREE_CODE (TREE_TYPE (@4)) != BOOLEAN_TYPE
2728       && INTEGRAL_TYPE_P (TREE_TYPE (@5))
2729       && (TYPE_PRECISION (TREE_TYPE (@4)) >= TYPE_PRECISION (type)
2730	   || !TYPE_UNSIGNED (TREE_TYPE (@4)))
2731       && (GIMPLE || !TREE_SIDE_EFFECTS (@1)))
2732   (cond (cmp @2 @3) @1 @0))))
2733
2734/* Simplifications of shift and rotates.  */
2735
2736(for rotate (lrotate rrotate)
2737 (simplify
2738  (rotate integer_all_onesp@0 @1)
2739  @0))
2740
2741/* Optimize -1 >> x for arithmetic right shifts.  */
2742(simplify
2743 (rshift integer_all_onesp@0 @1)
2744 (if (!TYPE_UNSIGNED (type)
2745      && tree_expr_nonnegative_p (@1))
2746  @0))
2747
2748/* Optimize (x >> c) << c into x & (-1<<c).  */
2749(simplify
2750 (lshift (nop_convert? (rshift @0 INTEGER_CST@1)) @1)
2751 (if (wi::ltu_p (wi::to_wide (@1), element_precision (type)))
2752  /* It doesn't matter if the right shift is arithmetic or logical.  */
2753  (bit_and (view_convert @0) (lshift { build_minus_one_cst (type); } @1))))
2754
2755(simplify
2756 (lshift (convert (convert@2 (rshift @0 INTEGER_CST@1))) @1)
2757 (if (wi::ltu_p (wi::to_wide (@1), element_precision (type))
2758      /* Allow intermediate conversion to integral type with whatever sign, as
2759	 long as the low TYPE_PRECISION (type)
2760	 - TYPE_PRECISION (TREE_TYPE (@2)) bits are preserved.  */
2761      && INTEGRAL_TYPE_P (type)
2762      && INTEGRAL_TYPE_P (TREE_TYPE (@2))
2763      && INTEGRAL_TYPE_P (TREE_TYPE (@0))
2764      && TYPE_PRECISION (type) == TYPE_PRECISION (TREE_TYPE (@0))
2765      && (TYPE_PRECISION (TREE_TYPE (@2)) >= TYPE_PRECISION (type)
2766	  || wi::geu_p (wi::to_wide (@1),
2767			TYPE_PRECISION (type)
2768			- TYPE_PRECISION (TREE_TYPE (@2)))))
2769  (bit_and (convert @0) (lshift { build_minus_one_cst (type); } @1))))
2770
2771/* Optimize (x << c) >> c into x & ((unsigned)-1 >> c) for unsigned
2772   types.  */
2773(simplify
2774 (rshift (lshift @0 INTEGER_CST@1) @1)
2775 (if (TYPE_UNSIGNED (type)
2776      && (wi::ltu_p (wi::to_wide (@1), element_precision (type))))
2777  (bit_and @0 (rshift { build_minus_one_cst (type); } @1))))
2778
2779(for shiftrotate (lrotate rrotate lshift rshift)
2780 (simplify
2781  (shiftrotate @0 integer_zerop)
2782  (non_lvalue @0))
2783 (simplify
2784  (shiftrotate integer_zerop@0 @1)
2785  @0)
2786 /* Prefer vector1 << scalar to vector1 << vector2
2787    if vector2 is uniform.  */
2788 (for vec (VECTOR_CST CONSTRUCTOR)
2789  (simplify
2790   (shiftrotate @0 vec@1)
2791   (with { tree tem = uniform_vector_p (@1); }
2792    (if (tem)
2793     (shiftrotate @0 { tem; }))))))
2794
2795/* Simplify X << Y where Y's low width bits are 0 to X, as only valid
2796   Y is 0.  Similarly for X >> Y.  */
2797#if GIMPLE
2798(for shift (lshift rshift)
2799 (simplify
2800  (shift @0 SSA_NAME@1)
2801   (if (INTEGRAL_TYPE_P (TREE_TYPE (@1)))
2802    (with {
2803      int width = ceil_log2 (element_precision (TREE_TYPE (@0)));
2804      int prec = TYPE_PRECISION (TREE_TYPE (@1));
2805     }
2806     (if ((get_nonzero_bits (@1) & wi::mask (width, false, prec)) == 0)
2807      @0)))))
2808#endif
2809
2810/* Rewrite an LROTATE_EXPR by a constant into an
2811   RROTATE_EXPR by a new constant.  */
2812(simplify
2813 (lrotate @0 INTEGER_CST@1)
2814 (rrotate @0 { const_binop (MINUS_EXPR, TREE_TYPE (@1),
2815			    build_int_cst (TREE_TYPE (@1),
2816					   element_precision (type)), @1); }))
2817
2818/* Turn (a OP c1) OP c2 into a OP (c1+c2).  */
2819(for op (lrotate rrotate rshift lshift)
2820 (simplify
2821  (op (op @0 INTEGER_CST@1) INTEGER_CST@2)
2822  (with { unsigned int prec = element_precision (type); }
2823   (if (wi::ge_p (wi::to_wide (@1), 0, TYPE_SIGN (TREE_TYPE (@1)))
2824        && wi::lt_p (wi::to_wide (@1), prec, TYPE_SIGN (TREE_TYPE (@1)))
2825        && wi::ge_p (wi::to_wide (@2), 0, TYPE_SIGN (TREE_TYPE (@2)))
2826	&& wi::lt_p (wi::to_wide (@2), prec, TYPE_SIGN (TREE_TYPE (@2))))
2827    (with { unsigned int low = (tree_to_uhwi (@1)
2828				+ tree_to_uhwi (@2)); }
2829     /* Deal with a OP (c1 + c2) being undefined but (a OP c1) OP c2
2830        being well defined.  */
2831     (if (low >= prec)
2832      (if (op == LROTATE_EXPR || op == RROTATE_EXPR)
2833       (op @0 { build_int_cst (TREE_TYPE (@1), low % prec); })
2834       (if (TYPE_UNSIGNED (type) || op == LSHIFT_EXPR)
2835        { build_zero_cst (type); }
2836        (op @0 { build_int_cst (TREE_TYPE (@1), prec - 1); })))
2837      (op @0 { build_int_cst (TREE_TYPE (@1), low); })))))))
2838
2839
2840/* ((1 << A) & 1) != 0 -> A == 0
2841   ((1 << A) & 1) == 0 -> A != 0 */
2842(for cmp (ne eq)
2843     icmp (eq ne)
2844 (simplify
2845  (cmp (bit_and (lshift integer_onep @0) integer_onep) integer_zerop)
2846  (icmp @0 { build_zero_cst (TREE_TYPE (@0)); })))
2847
2848/* (CST1 << A) == CST2 -> A == ctz (CST2) - ctz (CST1)
2849   (CST1 << A) != CST2 -> A != ctz (CST2) - ctz (CST1)
2850   if CST2 != 0.  */
2851(for cmp (ne eq)
2852 (simplify
2853  (cmp (lshift INTEGER_CST@0 @1) INTEGER_CST@2)
2854  (with { int cand = wi::ctz (wi::to_wide (@2)) - wi::ctz (wi::to_wide (@0)); }
2855   (if (cand < 0
2856	|| (!integer_zerop (@2)
2857	    && wi::lshift (wi::to_wide (@0), cand) != wi::to_wide (@2)))
2858    { constant_boolean_node (cmp == NE_EXPR, type); }
2859    (if (!integer_zerop (@2)
2860	 && wi::lshift (wi::to_wide (@0), cand) == wi::to_wide (@2))
2861     (cmp @1 { build_int_cst (TREE_TYPE (@1), cand); }))))))
2862
2863/* Fold (X << C1) & C2 into (X << C1) & (C2 | ((1 << C1) - 1))
2864        (X >> C1) & C2 into (X >> C1) & (C2 | ~((type) -1 >> C1))
2865   if the new mask might be further optimized.  */
2866(for shift (lshift rshift)
2867 (simplify
2868  (bit_and (convert?:s@4 (shift:s@5 (convert1?@3 @0) INTEGER_CST@1))
2869           INTEGER_CST@2)
2870   (if (tree_nop_conversion_p (TREE_TYPE (@4), TREE_TYPE (@5))
2871	&& TYPE_PRECISION (type) <= HOST_BITS_PER_WIDE_INT
2872	&& tree_fits_uhwi_p (@1)
2873	&& tree_to_uhwi (@1) > 0
2874	&& tree_to_uhwi (@1) < TYPE_PRECISION (type))
2875    (with
2876     {
2877       unsigned int shiftc = tree_to_uhwi (@1);
2878       unsigned HOST_WIDE_INT mask = TREE_INT_CST_LOW (@2);
2879       unsigned HOST_WIDE_INT newmask, zerobits = 0;
2880       tree shift_type = TREE_TYPE (@3);
2881       unsigned int prec;
2882
2883       if (shift == LSHIFT_EXPR)
2884	 zerobits = ((HOST_WIDE_INT_1U << shiftc) - 1);
2885       else if (shift == RSHIFT_EXPR
2886		&& type_has_mode_precision_p (shift_type))
2887	 {
2888	   prec = TYPE_PRECISION (TREE_TYPE (@3));
2889	   tree arg00 = @0;
2890	   /* See if more bits can be proven as zero because of
2891	      zero extension.  */
2892	   if (@3 != @0
2893	       && TYPE_UNSIGNED (TREE_TYPE (@0)))
2894	     {
2895	       tree inner_type = TREE_TYPE (@0);
2896	       if (type_has_mode_precision_p (inner_type)
2897		   && TYPE_PRECISION (inner_type) < prec)
2898		 {
2899		   prec = TYPE_PRECISION (inner_type);
2900		   /* See if we can shorten the right shift.  */
2901		   if (shiftc < prec)
2902		     shift_type = inner_type;
2903		   /* Otherwise X >> C1 is all zeros, so we'll optimize
2904		      it into (X, 0) later on by making sure zerobits
2905		      is all ones.  */
2906		 }
2907	     }
2908	   zerobits = HOST_WIDE_INT_M1U;
2909	   if (shiftc < prec)
2910	     {
2911	       zerobits >>= HOST_BITS_PER_WIDE_INT - shiftc;
2912	       zerobits <<= prec - shiftc;
2913	     }
2914	   /* For arithmetic shift if sign bit could be set, zerobits
2915	      can contain actually sign bits, so no transformation is
2916	      possible, unless MASK masks them all away.  In that
2917	      case the shift needs to be converted into logical shift.  */
2918	   if (!TYPE_UNSIGNED (TREE_TYPE (@3))
2919	       && prec == TYPE_PRECISION (TREE_TYPE (@3)))
2920	     {
2921	       if ((mask & zerobits) == 0)
2922		 shift_type = unsigned_type_for (TREE_TYPE (@3));
2923	       else
2924		 zerobits = 0;
2925	     }
2926	 }
2927     }
2928     /* ((X << 16) & 0xff00) is (X, 0).  */
2929     (if ((mask & zerobits) == mask)
2930      { build_int_cst (type, 0); }
2931      (with { newmask = mask | zerobits; }
2932       (if (newmask != mask && (newmask & (newmask + 1)) == 0)
2933        (with
2934	 {
2935	   /* Only do the transformation if NEWMASK is some integer
2936	      mode's mask.  */
2937	   for (prec = BITS_PER_UNIT;
2938	        prec < HOST_BITS_PER_WIDE_INT; prec <<= 1)
2939	     if (newmask == (HOST_WIDE_INT_1U << prec) - 1)
2940	       break;
2941	 }
2942	 (if (prec < HOST_BITS_PER_WIDE_INT
2943	      || newmask == HOST_WIDE_INT_M1U)
2944	  (with
2945	   { tree newmaskt = build_int_cst_type (TREE_TYPE (@2), newmask); }
2946	   (if (!tree_int_cst_equal (newmaskt, @2))
2947	    (if (shift_type != TREE_TYPE (@3))
2948	     (bit_and (convert (shift:shift_type (convert @3) @1)) { newmaskt; })
2949	     (bit_and @4 { newmaskt; })))))))))))))
2950
2951/* Fold (X {&,^,|} C2) << C1 into (X << C1) {&,^,|} (C2 << C1)
2952   (X {&,^,|} C2) >> C1 into (X >> C1) & (C2 >> C1).  */
2953(for shift (lshift rshift)
2954 (for bit_op (bit_and bit_xor bit_ior)
2955  (simplify
2956   (shift (convert?:s (bit_op:s @0 INTEGER_CST@2)) INTEGER_CST@1)
2957   (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
2958    (with { tree mask = int_const_binop (shift, fold_convert (type, @2), @1); }
2959     (bit_op (shift (convert @0) @1) { mask; }))))))
2960
2961/* ~(~X >> Y) -> X >> Y (for arithmetic shift).  */
2962(simplify
2963 (bit_not (convert1?:s (rshift:s (convert2?@0 (bit_not @1)) @2)))
2964  (if (!TYPE_UNSIGNED (TREE_TYPE (@0))
2965       && (element_precision (TREE_TYPE (@0))
2966	   <= element_precision (TREE_TYPE (@1))
2967	   || !TYPE_UNSIGNED (TREE_TYPE (@1))))
2968   (with
2969    { tree shift_type = TREE_TYPE (@0); }
2970     (convert (rshift (convert:shift_type @1) @2)))))
2971
2972/* ~(~X >>r Y) -> X >>r Y
2973   ~(~X <<r Y) -> X <<r Y */
2974(for rotate (lrotate rrotate)
2975 (simplify
2976  (bit_not (convert1?:s (rotate:s (convert2?@0 (bit_not @1)) @2)))
2977   (if ((element_precision (TREE_TYPE (@0))
2978	 <= element_precision (TREE_TYPE (@1))
2979	 || !TYPE_UNSIGNED (TREE_TYPE (@1)))
2980        && (element_precision (type) <= element_precision (TREE_TYPE (@0))
2981	    || !TYPE_UNSIGNED (TREE_TYPE (@0))))
2982    (with
2983     { tree rotate_type = TREE_TYPE (@0); }
2984      (convert (rotate (convert:rotate_type @1) @2))))))
2985
2986/* Simplifications of conversions.  */
2987
2988/* Basic strip-useless-type-conversions / strip_nops.  */
2989(for cvt (convert view_convert float fix_trunc)
2990 (simplify
2991  (cvt @0)
2992  (if ((GIMPLE && useless_type_conversion_p (type, TREE_TYPE (@0)))
2993       || (GENERIC && type == TREE_TYPE (@0)))
2994   @0)))
2995
2996/* Contract view-conversions.  */
2997(simplify
2998  (view_convert (view_convert @0))
2999  (view_convert @0))
3000
3001/* For integral conversions with the same precision or pointer
3002   conversions use a NOP_EXPR instead.  */
3003(simplify
3004  (view_convert @0)
3005  (if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
3006       && (INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0)))
3007       && TYPE_PRECISION (type) == TYPE_PRECISION (TREE_TYPE (@0)))
3008   (convert @0)))
3009
3010/* Strip inner integral conversions that do not change precision or size, or
3011   zero-extend while keeping the same size (for bool-to-char).  */
3012(simplify
3013  (view_convert (convert@0 @1))
3014  (if ((INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0)))
3015       && (INTEGRAL_TYPE_P (TREE_TYPE (@1)) || POINTER_TYPE_P (TREE_TYPE (@1)))
3016       && TYPE_SIZE (TREE_TYPE (@0)) == TYPE_SIZE (TREE_TYPE (@1))
3017       && (TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1))
3018	   || (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (TREE_TYPE (@1))
3019	       && TYPE_UNSIGNED (TREE_TYPE (@1)))))
3020   (view_convert @1)))
3021
3022/* Simplify a view-converted empty constructor.  */
3023(simplify
3024  (view_convert CONSTRUCTOR@0)
3025  (if (TREE_CODE (@0) != SSA_NAME
3026       && CONSTRUCTOR_NELTS (@0) == 0)
3027   { build_zero_cst (type); }))
3028
3029/* Re-association barriers around constants and other re-association
3030   barriers can be removed.  */
3031(simplify
3032 (paren CONSTANT_CLASS_P@0)
3033 @0)
3034(simplify
3035 (paren (paren@1 @0))
3036 @1)
3037
3038/* Handle cases of two conversions in a row.  */
3039(for ocvt (convert float fix_trunc)
3040 (for icvt (convert float)
3041  (simplify
3042   (ocvt (icvt@1 @0))
3043   (with
3044    {
3045      tree inside_type = TREE_TYPE (@0);
3046      tree inter_type = TREE_TYPE (@1);
3047      int inside_int = INTEGRAL_TYPE_P (inside_type);
3048      int inside_ptr = POINTER_TYPE_P (inside_type);
3049      int inside_float = FLOAT_TYPE_P (inside_type);
3050      int inside_vec = VECTOR_TYPE_P (inside_type);
3051      unsigned int inside_prec = TYPE_PRECISION (inside_type);
3052      int inside_unsignedp = TYPE_UNSIGNED (inside_type);
3053      int inter_int = INTEGRAL_TYPE_P (inter_type);
3054      int inter_ptr = POINTER_TYPE_P (inter_type);
3055      int inter_float = FLOAT_TYPE_P (inter_type);
3056      int inter_vec = VECTOR_TYPE_P (inter_type);
3057      unsigned int inter_prec = TYPE_PRECISION (inter_type);
3058      int inter_unsignedp = TYPE_UNSIGNED (inter_type);
3059      int final_int = INTEGRAL_TYPE_P (type);
3060      int final_ptr = POINTER_TYPE_P (type);
3061      int final_float = FLOAT_TYPE_P (type);
3062      int final_vec = VECTOR_TYPE_P (type);
3063      unsigned int final_prec = TYPE_PRECISION (type);
3064      int final_unsignedp = TYPE_UNSIGNED (type);
3065    }
3066   (switch
3067    /* In addition to the cases of two conversions in a row
3068       handled below, if we are converting something to its own
3069       type via an object of identical or wider precision, neither
3070       conversion is needed.  */
3071    (if (((GIMPLE && useless_type_conversion_p (type, inside_type))
3072	  || (GENERIC
3073	      && TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (inside_type)))
3074	 && (((inter_int || inter_ptr) && final_int)
3075	     || (inter_float && final_float))
3076	 && inter_prec >= final_prec)
3077     (ocvt @0))
3078
3079    /* Likewise, if the intermediate and initial types are either both
3080       float or both integer, we don't need the middle conversion if the
3081       former is wider than the latter and doesn't change the signedness
3082       (for integers).  Avoid this if the final type is a pointer since
3083       then we sometimes need the middle conversion.  */
3084    (if (((inter_int && inside_int) || (inter_float && inside_float))
3085	 && (final_int || final_float)
3086	 && inter_prec >= inside_prec
3087	 && (inter_float || inter_unsignedp == inside_unsignedp))
3088     (ocvt @0))
3089
3090    /* If we have a sign-extension of a zero-extended value, we can
3091       replace that by a single zero-extension.  Likewise if the
3092       final conversion does not change precision we can drop the
3093       intermediate conversion.  */
3094    (if (inside_int && inter_int && final_int
3095	 && ((inside_prec < inter_prec && inter_prec < final_prec
3096	      && inside_unsignedp && !inter_unsignedp)
3097	     || final_prec == inter_prec))
3098     (ocvt @0))
3099
3100    /* Two conversions in a row are not needed unless:
3101	- some conversion is floating-point (overstrict for now), or
3102	- some conversion is a vector (overstrict for now), or
3103	- the intermediate type is narrower than both initial and
3104	  final, or
3105	- the intermediate type and innermost type differ in signedness,
3106	  and the outermost type is wider than the intermediate, or
3107	- the initial type is a pointer type and the precisions of the
3108	  intermediate and final types differ, or
3109	- the final type is a pointer type and the precisions of the
3110	  initial and intermediate types differ.  */
3111    (if (! inside_float && ! inter_float && ! final_float
3112	 && ! inside_vec && ! inter_vec && ! final_vec
3113	 && (inter_prec >= inside_prec || inter_prec >= final_prec)
3114	 && ! (inside_int && inter_int
3115	       && inter_unsignedp != inside_unsignedp
3116	       && inter_prec < final_prec)
3117	 && ((inter_unsignedp && inter_prec > inside_prec)
3118	     == (final_unsignedp && final_prec > inter_prec))
3119	 && ! (inside_ptr && inter_prec != final_prec)
3120	 && ! (final_ptr && inside_prec != inter_prec))
3121     (ocvt @0))
3122
3123    /* A truncation to an unsigned type (a zero-extension) should be
3124       canonicalized as bitwise and of a mask.  */
3125    (if (GIMPLE /* PR70366: doing this in GENERIC breaks -Wconversion.  */
3126	 && final_int && inter_int && inside_int
3127	 && final_prec == inside_prec
3128	 && final_prec > inter_prec
3129	 && inter_unsignedp)
3130     (convert (bit_and @0 { wide_int_to_tree
3131	                      (inside_type,
3132			       wi::mask (inter_prec, false,
3133					 TYPE_PRECISION (inside_type))); })))
3134
3135    /* If we are converting an integer to a floating-point that can
3136       represent it exactly and back to an integer, we can skip the
3137       floating-point conversion.  */
3138    (if (GIMPLE /* PR66211 */
3139	 && inside_int && inter_float && final_int &&
3140	 (unsigned) significand_size (TYPE_MODE (inter_type))
3141	 >= inside_prec - !inside_unsignedp)
3142     (convert @0)))))))
3143
3144/* If we have a narrowing conversion to an integral type that is fed by a
3145   BIT_AND_EXPR, we might be able to remove the BIT_AND_EXPR if it merely
3146   masks off bits outside the final type (and nothing else).  */
3147(simplify
3148  (convert (bit_and @0 INTEGER_CST@1))
3149  (if (INTEGRAL_TYPE_P (type)
3150       && INTEGRAL_TYPE_P (TREE_TYPE (@0))
3151       && TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@0))
3152       && operand_equal_p (@1, build_low_bits_mask (TREE_TYPE (@1),
3153						    TYPE_PRECISION (type)), 0))
3154   (convert @0)))
3155
3156
3157/* (X /[ex] A) * A -> X.  */
3158(simplify
3159  (mult (convert1? (exact_div @0 @@1)) (convert2? @1))
3160  (convert @0))
3161
3162/* Simplify (A / B) * B + (A % B) -> A.  */
3163(for div (trunc_div ceil_div floor_div round_div)
3164     mod (trunc_mod ceil_mod floor_mod round_mod)
3165  (simplify
3166   (plus:c (mult:c (div @0 @1) @1) (mod @0 @1))
3167   @0))
3168
3169/* ((X /[ex] A) +- B) * A  -->  X +- A * B.  */
3170(for op (plus minus)
3171 (simplify
3172  (mult (convert1? (op (convert2? (exact_div @0 INTEGER_CST@@1)) INTEGER_CST@2)) @1)
3173  (if (tree_nop_conversion_p (type, TREE_TYPE (@2))
3174       && tree_nop_conversion_p (TREE_TYPE (@0), TREE_TYPE (@2)))
3175   (with
3176     {
3177       wi::overflow_type overflow;
3178       wide_int mul = wi::mul (wi::to_wide (@1), wi::to_wide (@2),
3179			       TYPE_SIGN (type), &overflow);
3180     }
3181     (if (types_match (type, TREE_TYPE (@2))
3182 	 && types_match (TREE_TYPE (@0), TREE_TYPE (@2)) && !overflow)
3183      (op @0 { wide_int_to_tree (type, mul); })
3184      (with { tree utype = unsigned_type_for (type); }
3185       (convert (op (convert:utype @0)
3186		    (mult (convert:utype @1) (convert:utype @2))))))))))
3187
3188/* Canonicalization of binary operations.  */
3189
3190/* Convert X + -C into X - C.  */
3191(simplify
3192 (plus @0 REAL_CST@1)
3193 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1)))
3194  (with { tree tem = const_unop (NEGATE_EXPR, type, @1); }
3195   (if (!TREE_OVERFLOW (tem) || !flag_trapping_math)
3196    (minus @0 { tem; })))))
3197
3198/* Convert x+x into x*2.  */
3199(simplify
3200 (plus @0 @0)
3201 (if (SCALAR_FLOAT_TYPE_P (type))
3202  (mult @0 { build_real (type, dconst2); })
3203  (if (INTEGRAL_TYPE_P (type))
3204   (mult @0 { build_int_cst (type, 2); }))))
3205
3206/* 0 - X  ->  -X.  */
3207(simplify
3208 (minus integer_zerop @1)
3209 (negate @1))
3210(simplify
3211 (pointer_diff integer_zerop @1)
3212 (negate (convert @1)))
3213
3214/* (ARG0 - ARG1) is the same as (-ARG1 + ARG0).  So check whether
3215   ARG0 is zero and X + ARG0 reduces to X, since that would mean
3216   (-ARG1 + ARG0) reduces to -ARG1.  */
3217(simplify
3218 (minus real_zerop@0 @1)
3219 (if (fold_real_zero_addition_p (type, @0, 0))
3220  (negate @1)))
3221
3222/* Transform x * -1 into -x.  */
3223(simplify
3224 (mult @0 integer_minus_onep)
3225 (negate @0))
3226
3227/* Reassociate (X * CST) * Y to (X * Y) * CST.  This does not introduce
3228   signed overflow for CST != 0 && CST != -1.  */
3229(simplify
3230 (mult:c (mult:s@3 @0 INTEGER_CST@1) @2)
3231 (if (TREE_CODE (@2) != INTEGER_CST
3232      && single_use (@3)
3233      && !integer_zerop (@1) && !integer_minus_onep (@1))
3234  (mult (mult @0 @2) @1)))
3235
3236/* True if we can easily extract the real and imaginary parts of a complex
3237   number.  */
3238(match compositional_complex
3239 (convert? (complex @0 @1)))
3240
3241/* COMPLEX_EXPR and REALPART/IMAGPART_EXPR cancellations.  */
3242(simplify
3243 (complex (realpart @0) (imagpart @0))
3244 @0)
3245(simplify
3246 (realpart (complex @0 @1))
3247 @0)
3248(simplify
3249 (imagpart (complex @0 @1))
3250 @1)
3251
3252/* Sometimes we only care about half of a complex expression.  */
3253(simplify
3254 (realpart (convert?:s (conj:s @0)))
3255 (convert (realpart @0)))
3256(simplify
3257 (imagpart (convert?:s (conj:s @0)))
3258 (convert (negate (imagpart @0))))
3259(for part (realpart imagpart)
3260 (for op (plus minus)
3261  (simplify
3262   (part (convert?:s@2 (op:s @0 @1)))
3263   (convert (op (part @0) (part @1))))))
3264(simplify
3265 (realpart (convert?:s (CEXPI:s @0)))
3266 (convert (COS @0)))
3267(simplify
3268 (imagpart (convert?:s (CEXPI:s @0)))
3269 (convert (SIN @0)))
3270
3271/* conj(conj(x)) -> x  */
3272(simplify
3273 (conj (convert? (conj @0)))
3274 (if (tree_nop_conversion_p (TREE_TYPE (@0), type))
3275  (convert @0)))
3276
3277/* conj({x,y}) -> {x,-y}  */
3278(simplify
3279 (conj (convert?:s (complex:s @0 @1)))
3280 (with { tree itype = TREE_TYPE (type); }
3281  (complex (convert:itype @0) (negate (convert:itype @1)))))
3282
3283/* BSWAP simplifications, transforms checked by gcc.dg/builtin-bswap-8.c.  */
3284(for bswap (BUILT_IN_BSWAP16 BUILT_IN_BSWAP32 BUILT_IN_BSWAP64)
3285 (simplify
3286  (bswap (bswap @0))
3287  @0)
3288 (simplify
3289  (bswap (bit_not (bswap @0)))
3290  (bit_not @0))
3291 (for bitop (bit_xor bit_ior bit_and)
3292  (simplify
3293   (bswap (bitop:c (bswap @0) @1))
3294   (bitop @0 (bswap @1)))))
3295
3296
3297/* Combine COND_EXPRs and VEC_COND_EXPRs.  */
3298
3299/* Simplify constant conditions.
3300   Only optimize constant conditions when the selected branch
3301   has the same type as the COND_EXPR.  This avoids optimizing
3302   away "c ? x : throw", where the throw has a void type.
3303   Note that we cannot throw away the fold-const.c variant nor
3304   this one as we depend on doing this transform before possibly
3305   A ? B : B -> B triggers and the fold-const.c one can optimize
3306   0 ? A : B to B even if A has side-effects.  Something
3307   genmatch cannot handle.  */
3308(simplify
3309 (cond INTEGER_CST@0 @1 @2)
3310 (if (integer_zerop (@0))
3311  (if (!VOID_TYPE_P (TREE_TYPE (@2)) || VOID_TYPE_P (type))
3312   @2)
3313  (if (!VOID_TYPE_P (TREE_TYPE (@1)) || VOID_TYPE_P (type))
3314   @1)))
3315(simplify
3316 (vec_cond VECTOR_CST@0 @1 @2)
3317 (if (integer_all_onesp (@0))
3318  @1
3319  (if (integer_zerop (@0))
3320   @2)))
3321
3322/* Sink unary operations to constant branches, but only if we do fold it to
3323   constants.  */
3324(for op (negate bit_not abs absu)
3325 (simplify
3326  (op (vec_cond @0 VECTOR_CST@1 VECTOR_CST@2))
3327  (with
3328   {
3329     tree cst1, cst2;
3330     cst1 = const_unop (op, type, @1);
3331     if (cst1)
3332       cst2 = const_unop (op, type, @2);
3333   }
3334   (if (cst1 && cst2)
3335    (vec_cond @0 { cst1; } { cst2; })))))
3336
3337/* Simplification moved from fold_cond_expr_with_comparison.  It may also
3338   be extended.  */
3339/* This pattern implements two kinds simplification:
3340
3341   Case 1)
3342   (cond (cmp (convert1? x) c1) (convert2? x) c2) -> (minmax (x c)) if:
3343     1) Conversions are type widening from smaller type.
3344     2) Const c1 equals to c2 after canonicalizing comparison.
3345     3) Comparison has tree code LT, LE, GT or GE.
3346   This specific pattern is needed when (cmp (convert x) c) may not
3347   be simplified by comparison patterns because of multiple uses of
3348   x.  It also makes sense here because simplifying across multiple
3349   referred var is always benefitial for complicated cases.
3350
3351   Case 2)
3352   (cond (eq (convert1? x) c1) (convert2? x) c2) -> (cond (eq x c1) c1 c2).  */
3353(for cmp (lt le gt ge eq)
3354 (simplify
3355  (cond (cmp (convert1? @1) INTEGER_CST@3) (convert2? @1) INTEGER_CST@2)
3356  (with
3357   {
3358     tree from_type = TREE_TYPE (@1);
3359     tree c1_type = TREE_TYPE (@3), c2_type = TREE_TYPE (@2);
3360     enum tree_code code = ERROR_MARK;
3361
3362     if (INTEGRAL_TYPE_P (from_type)
3363	 && int_fits_type_p (@2, from_type)
3364	 && (types_match (c1_type, from_type)
3365	     || (TYPE_PRECISION (c1_type) > TYPE_PRECISION (from_type)
3366		 && (TYPE_UNSIGNED (from_type)
3367		     || TYPE_SIGN (c1_type) == TYPE_SIGN (from_type))))
3368	 && (types_match (c2_type, from_type)
3369	     || (TYPE_PRECISION (c2_type) > TYPE_PRECISION (from_type)
3370		 && (TYPE_UNSIGNED (from_type)
3371		     || TYPE_SIGN (c2_type) == TYPE_SIGN (from_type)))))
3372       {
3373	 if (cmp != EQ_EXPR)
3374	   {
3375	     if (wi::to_widest (@3) == (wi::to_widest (@2) - 1))
3376	       {
3377		 /* X <= Y - 1 equals to X < Y.  */
3378		 if (cmp == LE_EXPR)
3379		   code = LT_EXPR;
3380		 /* X > Y - 1 equals to X >= Y.  */
3381		 if (cmp == GT_EXPR)
3382		   code = GE_EXPR;
3383	       }
3384	     if (wi::to_widest (@3) == (wi::to_widest (@2) + 1))
3385	       {
3386		 /* X < Y + 1 equals to X <= Y.  */
3387		 if (cmp == LT_EXPR)
3388		   code = LE_EXPR;
3389		 /* X >= Y + 1 equals to X > Y.  */
3390		 if (cmp == GE_EXPR)
3391		   code = GT_EXPR;
3392	       }
3393	     if (code != ERROR_MARK
3394		 || wi::to_widest (@2) == wi::to_widest (@3))
3395	       {
3396		 if (cmp == LT_EXPR || cmp == LE_EXPR)
3397		   code = MIN_EXPR;
3398		 if (cmp == GT_EXPR || cmp == GE_EXPR)
3399		   code = MAX_EXPR;
3400	       }
3401	   }
3402	 /* Can do A == C1 ? A : C2  ->  A == C1 ? C1 : C2?  */
3403	 else if (int_fits_type_p (@3, from_type))
3404	   code = EQ_EXPR;
3405       }
3406   }
3407   (if (code == MAX_EXPR)
3408    (convert (max @1 (convert @2)))
3409    (if (code == MIN_EXPR)
3410     (convert (min @1 (convert @2)))
3411     (if (code == EQ_EXPR)
3412      (convert (cond (eq @1 (convert @3))
3413		     (convert:from_type @3) (convert:from_type @2)))))))))
3414
3415/* (cond (cmp (convert? x) c1) (op x c2) c3) -> (op (minmax x c1) c2) if:
3416
3417     1) OP is PLUS or MINUS.
3418     2) CMP is LT, LE, GT or GE.
3419     3) C3 == (C1 op C2), and computation doesn't have undefined behavior.
3420
3421   This pattern also handles special cases like:
3422
3423     A) Operand x is a unsigned to signed type conversion and c1 is
3424	integer zero.  In this case,
3425	  (signed type)x  < 0  <=>  x  > MAX_VAL(signed type)
3426	  (signed type)x >= 0  <=>  x <= MAX_VAL(signed type)
3427     B) Const c1 may not equal to (C3 op' C2).  In this case we also
3428	check equality for (c1+1) and (c1-1) by adjusting comparison
3429	code.
3430
3431   TODO: Though signed type is handled by this pattern, it cannot be
3432   simplified at the moment because C standard requires additional
3433   type promotion.  In order to match&simplify it here, the IR needs
3434   to be cleaned up by other optimizers, i.e, VRP.  */
3435(for op (plus minus)
3436 (for cmp (lt le gt ge)
3437  (simplify
3438   (cond (cmp (convert? @X) INTEGER_CST@1) (op @X INTEGER_CST@2) INTEGER_CST@3)
3439   (with { tree from_type = TREE_TYPE (@X), to_type = TREE_TYPE (@1); }
3440    (if (types_match (from_type, to_type)
3441	 /* Check if it is special case A).  */
3442	 || (TYPE_UNSIGNED (from_type)
3443	     && !TYPE_UNSIGNED (to_type)
3444	     && TYPE_PRECISION (from_type) == TYPE_PRECISION (to_type)
3445	     && integer_zerop (@1)
3446	     && (cmp == LT_EXPR || cmp == GE_EXPR)))
3447     (with
3448      {
3449	wi::overflow_type overflow = wi::OVF_NONE;
3450	enum tree_code code, cmp_code = cmp;
3451	wide_int real_c1;
3452	wide_int c1 = wi::to_wide (@1);
3453	wide_int c2 = wi::to_wide (@2);
3454	wide_int c3 = wi::to_wide (@3);
3455	signop sgn = TYPE_SIGN (from_type);
3456
3457	/* Handle special case A), given x of unsigned type:
3458	    ((signed type)x  < 0) <=> (x  > MAX_VAL(signed type))
3459	    ((signed type)x >= 0) <=> (x <= MAX_VAL(signed type))  */
3460	if (!types_match (from_type, to_type))
3461	  {
3462	    if (cmp_code == LT_EXPR)
3463	      cmp_code = GT_EXPR;
3464	    if (cmp_code == GE_EXPR)
3465	      cmp_code = LE_EXPR;
3466	    c1 = wi::max_value (to_type);
3467	  }
3468	/* To simplify this pattern, we require c3 = (c1 op c2).  Here we
3469	   compute (c3 op' c2) and check if it equals to c1 with op' being
3470	   the inverted operator of op.  Make sure overflow doesn't happen
3471	   if it is undefined.  */
3472	if (op == PLUS_EXPR)
3473	  real_c1 = wi::sub (c3, c2, sgn, &overflow);
3474	else
3475	  real_c1 = wi::add (c3, c2, sgn, &overflow);
3476
3477	code = cmp_code;
3478	if (!overflow || !TYPE_OVERFLOW_UNDEFINED (from_type))
3479	  {
3480	    /* Check if c1 equals to real_c1.  Boundary condition is handled
3481	       by adjusting comparison operation if necessary.  */
3482	    if (!wi::cmp (wi::sub (real_c1, 1, sgn, &overflow), c1, sgn)
3483		&& !overflow)
3484	      {
3485		/* X <= Y - 1 equals to X < Y.  */
3486		if (cmp_code == LE_EXPR)
3487		  code = LT_EXPR;
3488		/* X > Y - 1 equals to X >= Y.  */
3489		if (cmp_code == GT_EXPR)
3490		  code = GE_EXPR;
3491	      }
3492	    if (!wi::cmp (wi::add (real_c1, 1, sgn, &overflow), c1, sgn)
3493		&& !overflow)
3494	      {
3495		/* X < Y + 1 equals to X <= Y.  */
3496		if (cmp_code == LT_EXPR)
3497		  code = LE_EXPR;
3498		/* X >= Y + 1 equals to X > Y.  */
3499		if (cmp_code == GE_EXPR)
3500		  code = GT_EXPR;
3501	      }
3502	    if (code != cmp_code || !wi::cmp (real_c1, c1, sgn))
3503	      {
3504		if (cmp_code == LT_EXPR || cmp_code == LE_EXPR)
3505		  code = MIN_EXPR;
3506		if (cmp_code == GT_EXPR || cmp_code == GE_EXPR)
3507		  code = MAX_EXPR;
3508	      }
3509	  }
3510      }
3511      (if (code == MAX_EXPR)
3512       (op (max @X { wide_int_to_tree (from_type, real_c1); })
3513	   { wide_int_to_tree (from_type, c2); })
3514       (if (code == MIN_EXPR)
3515	(op (min @X { wide_int_to_tree (from_type, real_c1); })
3516	    { wide_int_to_tree (from_type, c2); })))))))))
3517
3518(for cnd (cond vec_cond)
3519 /* A ? B : (A ? X : C) -> A ? B : C.  */
3520 (simplify
3521  (cnd @0 (cnd @0 @1 @2) @3)
3522  (cnd @0 @1 @3))
3523 (simplify
3524  (cnd @0 @1 (cnd @0 @2 @3))
3525  (cnd @0 @1 @3))
3526 /* A ? B : (!A ? C : X) -> A ? B : C.  */
3527 /* ???  This matches embedded conditions open-coded because genmatch
3528    would generate matching code for conditions in separate stmts only.
3529    The following is still important to merge then and else arm cases
3530    from if-conversion.  */
3531 (simplify
3532  (cnd @0 @1 (cnd @2 @3 @4))
3533  (if (inverse_conditions_p (@0, @2))
3534   (cnd @0 @1 @3)))
3535 (simplify
3536  (cnd @0 (cnd @1 @2 @3) @4)
3537  (if (inverse_conditions_p (@0, @1))
3538   (cnd @0 @3 @4)))
3539
3540 /* A ? B : B -> B.  */
3541 (simplify
3542  (cnd @0 @1 @1)
3543  @1)
3544
3545 /* !A ? B : C -> A ? C : B.  */
3546 (simplify
3547  (cnd (logical_inverted_value truth_valued_p@0) @1 @2)
3548  (cnd @0 @2 @1)))
3549
3550/* A + (B vcmp C ? 1 : 0) -> A - (B vcmp C ? -1 : 0), since vector comparisons
3551   return all -1 or all 0 results.  */
3552/* ??? We could instead convert all instances of the vec_cond to negate,
3553   but that isn't necessarily a win on its own.  */
3554(simplify
3555 (plus:c @3 (view_convert? (vec_cond:s @0 integer_each_onep@1 integer_zerop@2)))
3556 (if (VECTOR_TYPE_P (type)
3557      && known_eq (TYPE_VECTOR_SUBPARTS (type),
3558		   TYPE_VECTOR_SUBPARTS (TREE_TYPE (@1)))
3559      && (TYPE_MODE (TREE_TYPE (type))
3560          == TYPE_MODE (TREE_TYPE (TREE_TYPE (@1)))))
3561  (minus @3 (view_convert (vec_cond @0 (negate @1) @2)))))
3562
3563/* ... likewise A - (B vcmp C ? 1 : 0) -> A + (B vcmp C ? -1 : 0).  */
3564(simplify
3565 (minus @3 (view_convert? (vec_cond:s @0 integer_each_onep@1 integer_zerop@2)))
3566 (if (VECTOR_TYPE_P (type)
3567      && known_eq (TYPE_VECTOR_SUBPARTS (type),
3568		   TYPE_VECTOR_SUBPARTS (TREE_TYPE (@1)))
3569      && (TYPE_MODE (TREE_TYPE (type))
3570          == TYPE_MODE (TREE_TYPE (TREE_TYPE (@1)))))
3571  (plus @3 (view_convert (vec_cond @0 (negate @1) @2)))))
3572
3573
3574/* Simplifications of comparisons.  */
3575
3576/* See if we can reduce the magnitude of a constant involved in a
3577   comparison by changing the comparison code.  This is a canonicalization
3578   formerly done by maybe_canonicalize_comparison_1.  */
3579(for cmp  (le gt)
3580     acmp (lt ge)
3581 (simplify
3582  (cmp @0 uniform_integer_cst_p@1)
3583  (with { tree cst = uniform_integer_cst_p (@1); }
3584   (if (tree_int_cst_sgn (cst) == -1)
3585     (acmp @0 { build_uniform_cst (TREE_TYPE (@1),
3586				   wide_int_to_tree (TREE_TYPE (cst),
3587						     wi::to_wide (cst)
3588						     + 1)); })))))
3589(for cmp  (ge lt)
3590     acmp (gt le)
3591 (simplify
3592  (cmp @0 uniform_integer_cst_p@1)
3593  (with { tree cst = uniform_integer_cst_p (@1); }
3594   (if (tree_int_cst_sgn (cst) == 1)
3595    (acmp @0 { build_uniform_cst (TREE_TYPE (@1),
3596				  wide_int_to_tree (TREE_TYPE (cst),
3597				  wi::to_wide (cst) - 1)); })))))
3598
3599/* We can simplify a logical negation of a comparison to the
3600   inverted comparison.  As we cannot compute an expression
3601   operator using invert_tree_comparison we have to simulate
3602   that with expression code iteration.  */
3603(for cmp (tcc_comparison)
3604     icmp (inverted_tcc_comparison)
3605     ncmp (inverted_tcc_comparison_with_nans)
3606 /* Ideally we'd like to combine the following two patterns
3607    and handle some more cases by using
3608      (logical_inverted_value (cmp @0 @1))
3609    here but for that genmatch would need to "inline" that.
3610    For now implement what forward_propagate_comparison did.  */
3611 (simplify
3612  (bit_not (cmp @0 @1))
3613  (if (VECTOR_TYPE_P (type)
3614       || (INTEGRAL_TYPE_P (type) && TYPE_PRECISION (type) == 1))
3615   /* Comparison inversion may be impossible for trapping math,
3616      invert_tree_comparison will tell us.  But we can't use
3617      a computed operator in the replacement tree thus we have
3618      to play the trick below.  */
3619   (with { enum tree_code ic = invert_tree_comparison
3620             (cmp, HONOR_NANS (@0)); }
3621    (if (ic == icmp)
3622     (icmp @0 @1)
3623     (if (ic == ncmp)
3624      (ncmp @0 @1))))))
3625 (simplify
3626  (bit_xor (cmp @0 @1) integer_truep)
3627  (with { enum tree_code ic = invert_tree_comparison
3628            (cmp, HONOR_NANS (@0)); }
3629   (if (ic == icmp)
3630    (icmp @0 @1)
3631    (if (ic == ncmp)
3632     (ncmp @0 @1))))))
3633
3634/* Transform comparisons of the form X - Y CMP 0 to X CMP Y.
3635   ??? The transformation is valid for the other operators if overflow
3636   is undefined for the type, but performing it here badly interacts
3637   with the transformation in fold_cond_expr_with_comparison which
3638   attempts to synthetize ABS_EXPR.  */
3639(for cmp (eq ne)
3640 (for sub (minus pointer_diff)
3641  (simplify
3642   (cmp (sub@2 @0 @1) integer_zerop)
3643   (if (single_use (@2))
3644    (cmp @0 @1)))))
3645
3646/* Transform comparisons of the form X * C1 CMP 0 to X CMP 0 in the
3647   signed arithmetic case.  That form is created by the compiler
3648   often enough for folding it to be of value.  One example is in
3649   computing loop trip counts after Operator Strength Reduction.  */
3650(for cmp (simple_comparison)
3651     scmp (swapped_simple_comparison)
3652 (simplify
3653  (cmp (mult@3 @0 INTEGER_CST@1) integer_zerop@2)
3654  /* Handle unfolded multiplication by zero.  */
3655  (if (integer_zerop (@1))
3656   (cmp @1 @2)
3657   (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
3658	&& TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
3659	&& single_use (@3))
3660    /* If @1 is negative we swap the sense of the comparison.  */
3661    (if (tree_int_cst_sgn (@1) < 0)
3662     (scmp @0 @2)
3663     (cmp @0 @2))))))
3664
3665/* Simplify comparison of something with itself.  For IEEE
3666   floating-point, we can only do some of these simplifications.  */
3667(for cmp (eq ge le)
3668 (simplify
3669  (cmp @0 @0)
3670  (if (! FLOAT_TYPE_P (TREE_TYPE (@0))
3671       || ! HONOR_NANS (@0))
3672   { constant_boolean_node (true, type); }
3673   (if (cmp != EQ_EXPR)
3674    (eq @0 @0)))))
3675(for cmp (ne gt lt)
3676 (simplify
3677  (cmp @0 @0)
3678  (if (cmp != NE_EXPR
3679       || ! FLOAT_TYPE_P (TREE_TYPE (@0))
3680       || ! HONOR_NANS (@0))
3681   { constant_boolean_node (false, type); })))
3682(for cmp (unle unge uneq)
3683 (simplify
3684  (cmp @0 @0)
3685  { constant_boolean_node (true, type); }))
3686(for cmp (unlt ungt)
3687 (simplify
3688  (cmp @0 @0)
3689  (unordered @0 @0)))
3690(simplify
3691 (ltgt @0 @0)
3692 (if (!flag_trapping_math)
3693  { constant_boolean_node (false, type); }))
3694
3695/* Fold ~X op ~Y as Y op X.  */
3696(for cmp (simple_comparison)
3697 (simplify
3698  (cmp (bit_not@2 @0) (bit_not@3 @1))
3699  (if (single_use (@2) && single_use (@3))
3700   (cmp @1 @0))))
3701
3702/* Fold ~X op C as X op' ~C, where op' is the swapped comparison.  */
3703(for cmp (simple_comparison)
3704     scmp (swapped_simple_comparison)
3705 (simplify
3706  (cmp (bit_not@2 @0) CONSTANT_CLASS_P@1)
3707  (if (single_use (@2)
3708       && (TREE_CODE (@1) == INTEGER_CST || TREE_CODE (@1) == VECTOR_CST))
3709   (scmp @0 (bit_not @1)))))
3710
3711(for cmp (simple_comparison)
3712 /* Fold (double)float1 CMP (double)float2 into float1 CMP float2.  */
3713 (simplify
3714  (cmp (convert@2 @0) (convert? @1))
3715  (if (FLOAT_TYPE_P (TREE_TYPE (@0))
3716       && (DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@2))
3717	   == DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@0)))
3718       && (DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@2))
3719	   == DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@1))))
3720   (with
3721    {
3722      tree type1 = TREE_TYPE (@1);
3723      if (TREE_CODE (@1) == REAL_CST && !DECIMAL_FLOAT_TYPE_P (type1))
3724        {
3725	  REAL_VALUE_TYPE orig = TREE_REAL_CST (@1);
3726	  if (TYPE_PRECISION (type1) > TYPE_PRECISION (float_type_node)
3727	      && exact_real_truncate (TYPE_MODE (float_type_node), &orig))
3728	    type1 = float_type_node;
3729	  if (TYPE_PRECISION (type1) > TYPE_PRECISION (double_type_node)
3730	      && exact_real_truncate (TYPE_MODE (double_type_node), &orig))
3731	    type1 = double_type_node;
3732        }
3733      tree newtype
3734        = (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (type1)
3735	   ? TREE_TYPE (@0) : type1);
3736    }
3737    (if (TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (newtype))
3738     (cmp (convert:newtype @0) (convert:newtype @1))))))
3739
3740 (simplify
3741  (cmp @0 REAL_CST@1)
3742  /* IEEE doesn't distinguish +0 and -0 in comparisons.  */
3743  (switch
3744   /* a CMP (-0) -> a CMP 0  */
3745   (if (REAL_VALUE_MINUS_ZERO (TREE_REAL_CST (@1)))
3746    (cmp @0 { build_real (TREE_TYPE (@1), dconst0); }))
3747   /* x != NaN is always true, other ops are always false.  */
3748   (if (REAL_VALUE_ISNAN (TREE_REAL_CST (@1))
3749	&& ! HONOR_SNANS (@1))
3750    { constant_boolean_node (cmp == NE_EXPR, type); })
3751   /* Fold comparisons against infinity.  */
3752   (if (REAL_VALUE_ISINF (TREE_REAL_CST (@1))
3753	&& MODE_HAS_INFINITIES (TYPE_MODE (TREE_TYPE (@1))))
3754    (with
3755     {
3756       REAL_VALUE_TYPE max;
3757       enum tree_code code = cmp;
3758       bool neg = REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1));
3759       if (neg)
3760         code = swap_tree_comparison (code);
3761     }
3762     (switch
3763      /* x > +Inf is always false, if we ignore NaNs or exceptions.  */
3764      (if (code == GT_EXPR
3765	   && !(HONOR_NANS (@0) && flag_trapping_math))
3766       { constant_boolean_node (false, type); })
3767      (if (code == LE_EXPR)
3768       /* x <= +Inf is always true, if we don't care about NaNs.  */
3769       (if (! HONOR_NANS (@0))
3770	{ constant_boolean_node (true, type); }
3771	/* x <= +Inf is the same as x == x, i.e. !isnan(x), but this loses
3772	   an "invalid" exception.  */
3773	(if (!flag_trapping_math)
3774	 (eq @0 @0))))
3775      /* x == +Inf and x >= +Inf are always equal to x > DBL_MAX, but
3776	 for == this introduces an exception for x a NaN.  */
3777      (if ((code == EQ_EXPR && !(HONOR_NANS (@0) && flag_trapping_math))
3778	   || code == GE_EXPR)
3779       (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); }
3780	(if (neg)
3781	 (lt @0 { build_real (TREE_TYPE (@0), max); })
3782	 (gt @0 { build_real (TREE_TYPE (@0), max); }))))
3783      /* x < +Inf is always equal to x <= DBL_MAX.  */
3784      (if (code == LT_EXPR)
3785       (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); }
3786	(if (neg)
3787	 (ge @0 { build_real (TREE_TYPE (@0), max); })
3788	 (le @0 { build_real (TREE_TYPE (@0), max); }))))
3789      /* x != +Inf is always equal to !(x > DBL_MAX), but this introduces
3790	 an exception for x a NaN so use an unordered comparison.  */
3791      (if (code == NE_EXPR)
3792       (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); }
3793	(if (! HONOR_NANS (@0))
3794	 (if (neg)
3795	  (ge @0 { build_real (TREE_TYPE (@0), max); })
3796	  (le @0 { build_real (TREE_TYPE (@0), max); }))
3797	 (if (neg)
3798	  (unge @0 { build_real (TREE_TYPE (@0), max); })
3799	  (unle @0 { build_real (TREE_TYPE (@0), max); }))))))))))
3800
3801 /* If this is a comparison of a real constant with a PLUS_EXPR
3802    or a MINUS_EXPR of a real constant, we can convert it into a
3803    comparison with a revised real constant as long as no overflow
3804    occurs when unsafe_math_optimizations are enabled.  */
3805 (if (flag_unsafe_math_optimizations)
3806  (for op (plus minus)
3807   (simplify
3808    (cmp (op @0 REAL_CST@1) REAL_CST@2)
3809    (with
3810     {
3811       tree tem = const_binop (op == PLUS_EXPR ? MINUS_EXPR : PLUS_EXPR,
3812			       TREE_TYPE (@1), @2, @1);
3813     }
3814     (if (tem && !TREE_OVERFLOW (tem))
3815      (cmp @0 { tem; }))))))
3816
3817 /* Likewise, we can simplify a comparison of a real constant with
3818    a MINUS_EXPR whose first operand is also a real constant, i.e.
3819    (c1 - x) < c2 becomes x > c1-c2.  Reordering is allowed on
3820    floating-point types only if -fassociative-math is set.  */
3821 (if (flag_associative_math)
3822  (simplify
3823   (cmp (minus REAL_CST@0 @1) REAL_CST@2)
3824   (with { tree tem = const_binop (MINUS_EXPR, TREE_TYPE (@1), @0, @2); }
3825    (if (tem && !TREE_OVERFLOW (tem))
3826     (cmp { tem; } @1)))))
3827
3828 /* Fold comparisons against built-in math functions.  */
3829 (if (flag_unsafe_math_optimizations && ! flag_errno_math)
3830  (for sq (SQRT)
3831   (simplify
3832    (cmp (sq @0) REAL_CST@1)
3833    (switch
3834     (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1)))
3835      (switch
3836       /* sqrt(x) < y is always false, if y is negative.  */
3837       (if (cmp == EQ_EXPR || cmp == LT_EXPR || cmp == LE_EXPR)
3838	{ constant_boolean_node (false, type); })
3839       /* sqrt(x) > y is always true, if y is negative and we
3840	  don't care about NaNs, i.e. negative values of x.  */
3841       (if (cmp == NE_EXPR || !HONOR_NANS (@0))
3842	{ constant_boolean_node (true, type); })
3843       /* sqrt(x) > y is the same as x >= 0, if y is negative.  */
3844       (ge @0 { build_real (TREE_TYPE (@0), dconst0); })))
3845     (if (real_equal (TREE_REAL_CST_PTR (@1), &dconst0))
3846      (switch
3847       /* sqrt(x) < 0 is always false.  */
3848       (if (cmp == LT_EXPR)
3849	{ constant_boolean_node (false, type); })
3850       /* sqrt(x) >= 0 is always true if we don't care about NaNs.  */
3851       (if (cmp == GE_EXPR && !HONOR_NANS (@0))
3852	{ constant_boolean_node (true, type); })
3853       /* sqrt(x) <= 0 -> x == 0.  */
3854       (if (cmp == LE_EXPR)
3855	(eq @0 @1))
3856       /* Otherwise sqrt(x) cmp 0 -> x cmp 0.  Here cmp can be >=, >,
3857          == or !=.  In the last case:
3858
3859	    (sqrt(x) != 0) == (NaN != 0) == true == (x != 0)
3860
3861	  if x is negative or NaN.  Due to -funsafe-math-optimizations,
3862	  the results for other x follow from natural arithmetic.  */
3863       (cmp @0 @1)))
3864     (if ((cmp == LT_EXPR
3865	   || cmp == LE_EXPR
3866	   || cmp == GT_EXPR
3867	   || cmp == GE_EXPR)
3868	  && !REAL_VALUE_ISNAN (TREE_REAL_CST (@1))
3869	  /* Give up for -frounding-math.  */
3870	  && !HONOR_SIGN_DEPENDENT_ROUNDING (TREE_TYPE (@0)))
3871      (with
3872       {
3873	 REAL_VALUE_TYPE c2;
3874	 enum tree_code ncmp = cmp;
3875	 const real_format *fmt
3876	   = REAL_MODE_FORMAT (TYPE_MODE (TREE_TYPE (@0)));
3877	 real_arithmetic (&c2, MULT_EXPR,
3878			  &TREE_REAL_CST (@1), &TREE_REAL_CST (@1));
3879	 real_convert (&c2, fmt, &c2);
3880	 /* See PR91734: if c2 is inexact and sqrt(c2) < c (or sqrt(c2) >= c),
3881	    then change LT_EXPR into LE_EXPR or GE_EXPR into GT_EXPR.  */
3882	 if (!REAL_VALUE_ISINF (c2))
3883	   {
3884	     tree c3 = fold_const_call (CFN_SQRT, TREE_TYPE (@0),
3885					build_real (TREE_TYPE (@0), c2));
3886	     if (c3 == NULL_TREE || TREE_CODE (c3) != REAL_CST)
3887	       ncmp = ERROR_MARK;
3888	     else if ((cmp == LT_EXPR || cmp == GE_EXPR)
3889		      && real_less (&TREE_REAL_CST (c3), &TREE_REAL_CST (@1)))
3890	       ncmp = cmp == LT_EXPR ? LE_EXPR : GT_EXPR;
3891	     else if ((cmp == LE_EXPR || cmp == GT_EXPR)
3892		      && real_less (&TREE_REAL_CST (@1), &TREE_REAL_CST (c3)))
3893	       ncmp = cmp == LE_EXPR ? LT_EXPR : GE_EXPR;
3894	     else
3895	       {
3896		 /* With rounding to even, sqrt of up to 3 different values
3897		    gives the same normal result, so in some cases c2 needs
3898		    to be adjusted.  */
3899		 REAL_VALUE_TYPE c2alt, tow;
3900		 if (cmp == LT_EXPR || cmp == GE_EXPR)
3901		   tow = dconst0;
3902		 else
3903		   real_inf (&tow);
3904		 real_nextafter (&c2alt, fmt, &c2, &tow);
3905		 real_convert (&c2alt, fmt, &c2alt);
3906		 if (REAL_VALUE_ISINF (c2alt))
3907		   ncmp = ERROR_MARK;
3908		 else
3909		   {
3910		     c3 = fold_const_call (CFN_SQRT, TREE_TYPE (@0),
3911					   build_real (TREE_TYPE (@0), c2alt));
3912		     if (c3 == NULL_TREE || TREE_CODE (c3) != REAL_CST)
3913		       ncmp = ERROR_MARK;
3914		     else if (real_equal (&TREE_REAL_CST (c3),
3915					  &TREE_REAL_CST (@1)))
3916		       c2 = c2alt;
3917		   }
3918	       }
3919	   }
3920       }
3921       (if (cmp == GT_EXPR || cmp == GE_EXPR)
3922	(if (REAL_VALUE_ISINF (c2))
3923	 /* sqrt(x) > y is x == +Inf, when y is very large.  */
3924	 (if (HONOR_INFINITIES (@0))
3925	  (eq @0 { build_real (TREE_TYPE (@0), c2); })
3926	  { constant_boolean_node (false, type); })
3927	 /* sqrt(x) > c is the same as x > c*c.  */
3928	 (if (ncmp != ERROR_MARK)
3929	  (if (ncmp == GE_EXPR)
3930	   (ge @0 { build_real (TREE_TYPE (@0), c2); })
3931	   (gt @0 { build_real (TREE_TYPE (@0), c2); }))))
3932	/* else if (cmp == LT_EXPR || cmp == LE_EXPR)  */
3933	(if (REAL_VALUE_ISINF (c2))
3934	 (switch
3935	  /* sqrt(x) < y is always true, when y is a very large
3936	     value and we don't care about NaNs or Infinities.  */
3937	  (if (! HONOR_NANS (@0) && ! HONOR_INFINITIES (@0))
3938	   { constant_boolean_node (true, type); })
3939	  /* sqrt(x) < y is x != +Inf when y is very large and we
3940	     don't care about NaNs.  */
3941	  (if (! HONOR_NANS (@0))
3942	   (ne @0 { build_real (TREE_TYPE (@0), c2); }))
3943	  /* sqrt(x) < y is x >= 0 when y is very large and we
3944	     don't care about Infinities.  */
3945	  (if (! HONOR_INFINITIES (@0))
3946	   (ge @0 { build_real (TREE_TYPE (@0), dconst0); }))
3947	  /* sqrt(x) < y is x >= 0 && x != +Inf, when y is large.  */
3948	  (if (GENERIC)
3949	   (truth_andif
3950	    (ge @0 { build_real (TREE_TYPE (@0), dconst0); })
3951	    (ne @0 { build_real (TREE_TYPE (@0), c2); }))))
3952	 /* sqrt(x) < c is the same as x < c*c, if we ignore NaNs.  */
3953	 (if (ncmp != ERROR_MARK && ! HONOR_NANS (@0))
3954	  (if (ncmp == LT_EXPR)
3955	   (lt @0 { build_real (TREE_TYPE (@0), c2); })
3956	   (le @0 { build_real (TREE_TYPE (@0), c2); }))
3957	  /* sqrt(x) < c is the same as x >= 0 && x < c*c.  */
3958	  (if (ncmp != ERROR_MARK && GENERIC)
3959	   (if (ncmp == LT_EXPR)
3960	    (truth_andif
3961	     (ge @0 { build_real (TREE_TYPE (@0), dconst0); })
3962	     (lt @0 { build_real (TREE_TYPE (@0), c2); }))
3963	    (truth_andif
3964	     (ge @0 { build_real (TREE_TYPE (@0), dconst0); })
3965	     (le @0 { build_real (TREE_TYPE (@0), c2); })))))))))))
3966   /* Transform sqrt(x) cmp sqrt(y) -> x cmp y.  */
3967   (simplify
3968    (cmp (sq @0) (sq @1))
3969      (if (! HONOR_NANS (@0))
3970	(cmp @0 @1))))))
3971
3972/* Optimize various special cases of (FTYPE) N CMP (FTYPE) M.  */
3973(for cmp  (lt le eq ne ge gt unordered ordered unlt unle ungt unge uneq ltgt)
3974     icmp (lt le eq ne ge gt unordered ordered lt   le   gt   ge   eq   ne)
3975 (simplify
3976  (cmp (float@0 @1) (float @2))
3977   (if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (@0))
3978	&& ! DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@0)))
3979    (with
3980     {
3981       format_helper fmt (REAL_MODE_FORMAT (TYPE_MODE (TREE_TYPE (@0))));
3982       tree type1 = TREE_TYPE (@1);
3983       bool type1_signed_p = TYPE_SIGN (type1) == SIGNED;
3984       tree type2 = TREE_TYPE (@2);
3985       bool type2_signed_p = TYPE_SIGN (type2) == SIGNED;
3986     }
3987     (if (fmt.can_represent_integral_type_p (type1)
3988	  && fmt.can_represent_integral_type_p (type2))
3989      (if (cmp == ORDERED_EXPR || cmp == UNORDERED_EXPR)
3990       { constant_boolean_node (cmp == ORDERED_EXPR, type); }
3991       (if (TYPE_PRECISION (type1) > TYPE_PRECISION (type2)
3992            && type1_signed_p >= type2_signed_p)
3993        (icmp @1 (convert @2))
3994        (if (TYPE_PRECISION (type1) < TYPE_PRECISION (type2)
3995             && type1_signed_p <= type2_signed_p)
3996         (icmp (convert:type2 @1) @2)
3997         (if (TYPE_PRECISION (type1) == TYPE_PRECISION (type2)
3998              && type1_signed_p == type2_signed_p)
3999	  (icmp @1 @2))))))))))
4000
4001/* Optimize various special cases of (FTYPE) N CMP CST.  */
4002(for cmp  (lt le eq ne ge gt)
4003     icmp (le le eq ne ge ge)
4004 (simplify
4005  (cmp (float @0) REAL_CST@1)
4006   (if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (@1))
4007	&& ! DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@1)))
4008    (with
4009     {
4010       tree itype = TREE_TYPE (@0);
4011       format_helper fmt (REAL_MODE_FORMAT (TYPE_MODE (TREE_TYPE (@1))));
4012       const REAL_VALUE_TYPE *cst = TREE_REAL_CST_PTR (@1);
4013       /* Be careful to preserve any potential exceptions due to
4014	  NaNs.  qNaNs are ok in == or != context.
4015	  TODO: relax under -fno-trapping-math or
4016	  -fno-signaling-nans.  */
4017       bool exception_p
4018         = real_isnan (cst) && (cst->signalling
4019				|| (cmp != EQ_EXPR && cmp != NE_EXPR));
4020     }
4021     /* TODO: allow non-fitting itype and SNaNs when
4022	-fno-trapping-math.  */
4023     (if (fmt.can_represent_integral_type_p (itype) && ! exception_p)
4024      (with
4025       {
4026	 signop isign = TYPE_SIGN (itype);
4027	 REAL_VALUE_TYPE imin, imax;
4028	 real_from_integer (&imin, fmt, wi::min_value (itype), isign);
4029	 real_from_integer (&imax, fmt, wi::max_value (itype), isign);
4030
4031	 REAL_VALUE_TYPE icst;
4032	 if (cmp == GT_EXPR || cmp == GE_EXPR)
4033	   real_ceil (&icst, fmt, cst);
4034	 else if (cmp == LT_EXPR || cmp == LE_EXPR)
4035	   real_floor (&icst, fmt, cst);
4036	 else
4037	   real_trunc (&icst, fmt, cst);
4038
4039	 bool cst_int_p = !real_isnan (cst) && real_identical (&icst, cst);
4040
4041	 bool overflow_p = false;
4042	 wide_int icst_val
4043	   = real_to_integer (&icst, &overflow_p, TYPE_PRECISION (itype));
4044       }
4045       (switch
4046	/* Optimize cases when CST is outside of ITYPE's range.  */
4047	(if (real_compare (LT_EXPR, cst, &imin))
4048	 { constant_boolean_node (cmp == GT_EXPR || cmp == GE_EXPR || cmp == NE_EXPR,
4049				  type); })
4050	(if (real_compare (GT_EXPR, cst, &imax))
4051	 { constant_boolean_node (cmp == LT_EXPR || cmp == LE_EXPR || cmp == NE_EXPR,
4052				  type); })
4053	/* Remove cast if CST is an integer representable by ITYPE.  */
4054	(if (cst_int_p)
4055	 (cmp @0 { gcc_assert (!overflow_p);
4056		   wide_int_to_tree (itype, icst_val); })
4057	)
4058	/* When CST is fractional, optimize
4059	    (FTYPE) N == CST -> 0
4060	    (FTYPE) N != CST -> 1.  */
4061	(if (cmp == EQ_EXPR || cmp == NE_EXPR)
4062	 { constant_boolean_node (cmp == NE_EXPR, type); })
4063	/* Otherwise replace with sensible integer constant.  */
4064	(with
4065	 {
4066	   gcc_checking_assert (!overflow_p);
4067	 }
4068	 (icmp @0 { wide_int_to_tree (itype, icst_val); })))))))))
4069
4070/* Fold A /[ex] B CMP C to A CMP B * C.  */
4071(for cmp (eq ne)
4072 (simplify
4073  (cmp (exact_div @0 @1) INTEGER_CST@2)
4074  (if (!integer_zerop (@1))
4075   (if (wi::to_wide (@2) == 0)
4076    (cmp @0 @2)
4077    (if (TREE_CODE (@1) == INTEGER_CST)
4078     (with
4079      {
4080	wi::overflow_type ovf;
4081	wide_int prod = wi::mul (wi::to_wide (@2), wi::to_wide (@1),
4082				 TYPE_SIGN (TREE_TYPE (@1)), &ovf);
4083      }
4084      (if (ovf)
4085       { constant_boolean_node (cmp == NE_EXPR, type); }
4086       (cmp @0 { wide_int_to_tree (TREE_TYPE (@0), prod); }))))))))
4087(for cmp (lt le gt ge)
4088 (simplify
4089  (cmp (exact_div @0 INTEGER_CST@1) INTEGER_CST@2)
4090  (if (wi::gt_p (wi::to_wide (@1), 0, TYPE_SIGN (TREE_TYPE (@1))))
4091   (with
4092    {
4093      wi::overflow_type ovf;
4094      wide_int prod = wi::mul (wi::to_wide (@2), wi::to_wide (@1),
4095			       TYPE_SIGN (TREE_TYPE (@1)), &ovf);
4096    }
4097    (if (ovf)
4098     { constant_boolean_node (wi::lt_p (wi::to_wide (@2), 0,
4099					TYPE_SIGN (TREE_TYPE (@2)))
4100			      != (cmp == LT_EXPR || cmp == LE_EXPR), type); }
4101     (cmp @0 { wide_int_to_tree (TREE_TYPE (@0), prod); }))))))
4102
4103/* Fold (size_t)(A /[ex] B) CMP C to (size_t)A CMP (size_t)B * C or A CMP' 0.
4104
4105   For small C (less than max/B), this is (size_t)A CMP (size_t)B * C.
4106   For large C (more than min/B+2^size), this is also true, with the
4107   multiplication computed modulo 2^size.
4108   For intermediate C, this just tests the sign of A.  */
4109(for cmp  (lt le gt ge)
4110     cmp2 (ge ge lt lt)
4111 (simplify
4112  (cmp (convert (exact_div @0 INTEGER_CST@1)) INTEGER_CST@2)
4113  (if (tree_nop_conversion_p (TREE_TYPE (@0), TREE_TYPE (@2))
4114       && TYPE_UNSIGNED (TREE_TYPE (@2)) && !TYPE_UNSIGNED (TREE_TYPE (@0))
4115       && wi::gt_p (wi::to_wide (@1), 0, TYPE_SIGN (TREE_TYPE (@1))))
4116   (with
4117    {
4118      tree utype = TREE_TYPE (@2);
4119      wide_int denom = wi::to_wide (@1);
4120      wide_int right = wi::to_wide (@2);
4121      wide_int smax = wi::sdiv_trunc (wi::max_value (TREE_TYPE (@0)), denom);
4122      wide_int smin = wi::sdiv_trunc (wi::min_value (TREE_TYPE (@0)), denom);
4123      bool small = wi::leu_p (right, smax);
4124      bool large = wi::geu_p (right, smin);
4125    }
4126    (if (small || large)
4127     (cmp (convert:utype @0) (mult @2 (convert @1)))
4128     (cmp2 @0 { build_zero_cst (TREE_TYPE (@0)); }))))))
4129
4130/* Unordered tests if either argument is a NaN.  */
4131(simplify
4132 (bit_ior (unordered @0 @0) (unordered @1 @1))
4133 (if (types_match (@0, @1))
4134  (unordered @0 @1)))
4135(simplify
4136 (bit_and (ordered @0 @0) (ordered @1 @1))
4137 (if (types_match (@0, @1))
4138  (ordered @0 @1)))
4139(simplify
4140 (bit_ior:c (unordered @0 @0) (unordered:c@2 @0 @1))
4141 @2)
4142(simplify
4143 (bit_and:c (ordered @0 @0) (ordered:c@2 @0 @1))
4144 @2)
4145
4146/* Simple range test simplifications.  */
4147/* A < B || A >= B -> true.  */
4148(for test1 (lt le le le ne ge)
4149     test2 (ge gt ge ne eq ne)
4150 (simplify
4151  (bit_ior:c (test1 @0 @1) (test2 @0 @1))
4152  (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
4153       || VECTOR_INTEGER_TYPE_P (TREE_TYPE (@0)))
4154   { constant_boolean_node (true, type); })))
4155/* A < B && A >= B -> false.  */
4156(for test1 (lt lt lt le ne eq)
4157     test2 (ge gt eq gt eq gt)
4158 (simplify
4159  (bit_and:c (test1 @0 @1) (test2 @0 @1))
4160  (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
4161       || VECTOR_INTEGER_TYPE_P (TREE_TYPE (@0)))
4162   { constant_boolean_node (false, type); })))
4163
4164/* A & (2**N - 1) <= 2**K - 1 -> A & (2**N - 2**K) == 0
4165   A & (2**N - 1) >  2**K - 1 -> A & (2**N - 2**K) != 0
4166
4167   Note that comparisons
4168     A & (2**N - 1) <  2**K   -> A & (2**N - 2**K) == 0
4169     A & (2**N - 1) >= 2**K   -> A & (2**N - 2**K) != 0
4170   will be canonicalized to above so there's no need to
4171   consider them here.
4172 */
4173
4174(for cmp (le gt)
4175     eqcmp (eq ne)
4176 (simplify
4177  (cmp (bit_and@0 @1 INTEGER_CST@2) INTEGER_CST@3)
4178  (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)))
4179   (with
4180    {
4181     tree ty = TREE_TYPE (@0);
4182     unsigned prec = TYPE_PRECISION (ty);
4183     wide_int mask = wi::to_wide (@2, prec);
4184     wide_int rhs = wi::to_wide (@3, prec);
4185     signop sgn = TYPE_SIGN (ty);
4186    }
4187    (if ((mask & (mask + 1)) == 0 && wi::gt_p (rhs, 0, sgn)
4188	 && (rhs & (rhs + 1)) == 0 && wi::ge_p (mask, rhs, sgn))
4189      (eqcmp (bit_and @1 { wide_int_to_tree (ty, mask - rhs); })
4190	     { build_zero_cst (ty); }))))))
4191
4192/* -A CMP -B -> B CMP A.  */
4193(for cmp (tcc_comparison)
4194     scmp (swapped_tcc_comparison)
4195 (simplify
4196  (cmp (negate @0) (negate @1))
4197  (if (FLOAT_TYPE_P (TREE_TYPE (@0))
4198       || (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
4199	   && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))))
4200   (scmp @0 @1)))
4201 (simplify
4202  (cmp (negate @0) CONSTANT_CLASS_P@1)
4203  (if (FLOAT_TYPE_P (TREE_TYPE (@0))
4204       || (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
4205	   && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))))
4206   (with { tree tem = const_unop (NEGATE_EXPR, TREE_TYPE (@0), @1); }
4207    (if (tem && !TREE_OVERFLOW (tem))
4208     (scmp @0 { tem; }))))))
4209
4210/* Convert ABS_EXPR<x> == 0 or ABS_EXPR<x> != 0 to x == 0 or x != 0.  */
4211(for op (eq ne)
4212 (simplify
4213  (op (abs @0) zerop@1)
4214  (op @0 @1)))
4215
4216/* From fold_sign_changed_comparison and fold_widened_comparison.
4217   FIXME: the lack of symmetry is disturbing.  */
4218(for cmp (simple_comparison)
4219 (simplify
4220  (cmp (convert@0 @00) (convert?@1 @10))
4221  (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
4222       /* Disable this optimization if we're casting a function pointer
4223	  type on targets that require function pointer canonicalization.  */
4224       && !(targetm.have_canonicalize_funcptr_for_compare ()
4225	    && ((POINTER_TYPE_P (TREE_TYPE (@00))
4226		 && FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@00))))
4227		|| (POINTER_TYPE_P (TREE_TYPE (@10))
4228		    && FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@10))))))
4229       && single_use (@0))
4230   (if (TYPE_PRECISION (TREE_TYPE (@00)) == TYPE_PRECISION (TREE_TYPE (@0))
4231	&& (TREE_CODE (@10) == INTEGER_CST
4232	    || @1 != @10)
4233	&& (TYPE_UNSIGNED (TREE_TYPE (@00)) == TYPE_UNSIGNED (TREE_TYPE (@0))
4234	    || cmp == NE_EXPR
4235	    || cmp == EQ_EXPR)
4236	&& !POINTER_TYPE_P (TREE_TYPE (@00)))
4237    /* ???  The special-casing of INTEGER_CST conversion was in the original
4238       code and here to avoid a spurious overflow flag on the resulting
4239       constant which fold_convert produces.  */
4240    (if (TREE_CODE (@1) == INTEGER_CST)
4241     (cmp @00 { force_fit_type (TREE_TYPE (@00), wi::to_widest (@1), 0,
4242				TREE_OVERFLOW (@1)); })
4243     (cmp @00 (convert @1)))
4244
4245    (if (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (TREE_TYPE (@00)))
4246     /* If possible, express the comparison in the shorter mode.  */
4247     (if ((cmp == EQ_EXPR || cmp == NE_EXPR
4248	   || TYPE_UNSIGNED (TREE_TYPE (@0)) == TYPE_UNSIGNED (TREE_TYPE (@00))
4249	   || (!TYPE_UNSIGNED (TREE_TYPE (@0))
4250	       && TYPE_UNSIGNED (TREE_TYPE (@00))))
4251	  && (types_match (TREE_TYPE (@10), TREE_TYPE (@00))
4252	      || ((TYPE_PRECISION (TREE_TYPE (@00))
4253		   >= TYPE_PRECISION (TREE_TYPE (@10)))
4254		  && (TYPE_UNSIGNED (TREE_TYPE (@00))
4255		      == TYPE_UNSIGNED (TREE_TYPE (@10))))
4256	      || (TREE_CODE (@10) == INTEGER_CST
4257		  && INTEGRAL_TYPE_P (TREE_TYPE (@00))
4258		  && int_fits_type_p (@10, TREE_TYPE (@00)))))
4259      (cmp @00 (convert @10))
4260      (if (TREE_CODE (@10) == INTEGER_CST
4261	   && INTEGRAL_TYPE_P (TREE_TYPE (@00))
4262	   && !int_fits_type_p (@10, TREE_TYPE (@00)))
4263       (with
4264	{
4265	  tree min = lower_bound_in_type (TREE_TYPE (@10), TREE_TYPE (@00));
4266	  tree max = upper_bound_in_type (TREE_TYPE (@10), TREE_TYPE (@00));
4267	  bool above = integer_nonzerop (const_binop (LT_EXPR, type, max, @10));
4268	  bool below = integer_nonzerop (const_binop (LT_EXPR, type, @10, min));
4269	}
4270	(if (above || below)
4271	 (if (cmp == EQ_EXPR || cmp == NE_EXPR)
4272	  { constant_boolean_node (cmp == EQ_EXPR ? false : true, type); }
4273	  (if (cmp == LT_EXPR || cmp == LE_EXPR)
4274	   { constant_boolean_node (above ? true : false, type); }
4275	   (if (cmp == GT_EXPR || cmp == GE_EXPR)
4276	    { constant_boolean_node (above ? false : true, type); }))))))))))))
4277
4278(for cmp (eq ne)
4279 (simplify
4280  /* SSA names are canonicalized to 2nd place.  */
4281  (cmp addr@0 SSA_NAME@1)
4282  (with
4283   { poly_int64 off; tree base; }
4284   /* A local variable can never be pointed to by
4285      the default SSA name of an incoming parameter.  */
4286   (if (SSA_NAME_IS_DEFAULT_DEF (@1)
4287	&& TREE_CODE (SSA_NAME_VAR (@1)) == PARM_DECL
4288	&& (base = get_base_address (TREE_OPERAND (@0, 0)))
4289	&& TREE_CODE (base) == VAR_DECL
4290	&& auto_var_in_fn_p (base, current_function_decl))
4291    (if (cmp == NE_EXPR)
4292     { constant_boolean_node (true, type); }
4293     { constant_boolean_node (false, type); })
4294    /* If the address is based on @1 decide using the offset.  */
4295    (if ((base = get_addr_base_and_unit_offset (TREE_OPERAND (@0, 0), &off))
4296	 && TREE_CODE (base) == MEM_REF
4297	 && TREE_OPERAND (base, 0) == @1)
4298     (with { off += mem_ref_offset (base).force_shwi (); }
4299      (if (known_ne (off, 0))
4300       { constant_boolean_node (cmp == NE_EXPR, type); }
4301       (if (known_eq (off, 0))
4302        { constant_boolean_node (cmp == EQ_EXPR, type); }))))))))
4303
4304/* Equality compare simplifications from fold_binary  */
4305(for cmp (eq ne)
4306
4307 /* If we have (A | C) == D where C & ~D != 0, convert this into 0.
4308    Similarly for NE_EXPR.  */
4309 (simplify
4310  (cmp (convert?@3 (bit_ior @0 INTEGER_CST@1)) INTEGER_CST@2)
4311  (if (tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@0))
4312       && wi::bit_and_not (wi::to_wide (@1), wi::to_wide (@2)) != 0)
4313   { constant_boolean_node (cmp == NE_EXPR, type); }))
4314
4315 /* (X ^ Y) == 0 becomes X == Y, and (X ^ Y) != 0 becomes X != Y.  */
4316 (simplify
4317  (cmp (bit_xor @0 @1) integer_zerop)
4318  (cmp @0 @1))
4319
4320 /* (X ^ Y) == Y becomes X == 0.
4321    Likewise (X ^ Y) == X becomes Y == 0.  */
4322 (simplify
4323  (cmp:c (bit_xor:c @0 @1) @0)
4324  (cmp @1 { build_zero_cst (TREE_TYPE (@1)); }))
4325
4326 /* (X ^ C1) op C2 can be rewritten as X op (C1 ^ C2).  */
4327 (simplify
4328  (cmp (convert?@3 (bit_xor @0 INTEGER_CST@1)) INTEGER_CST@2)
4329  (if (tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@0)))
4330   (cmp @0 (bit_xor @1 (convert @2)))))
4331
4332 (simplify
4333  (cmp (convert? addr@0) integer_zerop)
4334  (if (tree_single_nonzero_warnv_p (@0, NULL))
4335   { constant_boolean_node (cmp == NE_EXPR, type); })))
4336
4337/* If we have (A & C) == C where C is a power of 2, convert this into
4338   (A & C) != 0.  Similarly for NE_EXPR.  */
4339(for cmp (eq ne)
4340     icmp (ne eq)
4341 (simplify
4342  (cmp (bit_and@2 @0 integer_pow2p@1) @1)
4343  (icmp @2 { build_zero_cst (TREE_TYPE (@0)); })))
4344
4345/* If we have (A & C) != 0 ? D : 0 where C and D are powers of 2,
4346   convert this into a shift followed by ANDing with D.  */
4347(simplify
4348 (cond
4349  (ne (bit_and @0 integer_pow2p@1) integer_zerop)
4350  INTEGER_CST@2 integer_zerop)
4351 (if (integer_pow2p (@2))
4352  (with {
4353     int shift = (wi::exact_log2 (wi::to_wide (@2))
4354		  - wi::exact_log2 (wi::to_wide (@1)));
4355   }
4356   (if (shift > 0)
4357    (bit_and
4358     (lshift (convert @0) { build_int_cst (integer_type_node, shift); }) @2)
4359    (bit_and
4360     (convert (rshift @0 { build_int_cst (integer_type_node, -shift); }))
4361     @2)))))
4362
4363/* If we have (A & C) != 0 where C is the sign bit of A, convert
4364   this into A < 0.  Similarly for (A & C) == 0 into A >= 0.  */
4365(for cmp (eq ne)
4366     ncmp (ge lt)
4367 (simplify
4368  (cmp (bit_and (convert?@2 @0) integer_pow2p@1) integer_zerop)
4369  (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
4370       && type_has_mode_precision_p (TREE_TYPE (@0))
4371       && element_precision (@2) >= element_precision (@0)
4372       && wi::only_sign_bit_p (wi::to_wide (@1), element_precision (@0)))
4373   (with { tree stype = signed_type_for (TREE_TYPE (@0)); }
4374    (ncmp (convert:stype @0) { build_zero_cst (stype); })))))
4375
4376/* If we have A < 0 ? C : 0 where C is a power of 2, convert
4377   this into a right shift or sign extension followed by ANDing with C.  */
4378(simplify
4379 (cond
4380  (lt @0 integer_zerop)
4381  INTEGER_CST@1 integer_zerop)
4382 (if (integer_pow2p (@1)
4383      && !TYPE_UNSIGNED (TREE_TYPE (@0)))
4384  (with {
4385    int shift = element_precision (@0) - wi::exact_log2 (wi::to_wide (@1)) - 1;
4386   }
4387   (if (shift >= 0)
4388    (bit_and
4389     (convert (rshift @0 { build_int_cst (integer_type_node, shift); }))
4390     @1)
4391    /* Otherwise ctype must be wider than TREE_TYPE (@0) and pure
4392       sign extension followed by AND with C will achieve the effect.  */
4393    (bit_and (convert @0) @1)))))
4394
4395/* When the addresses are not directly of decls compare base and offset.
4396   This implements some remaining parts of fold_comparison address
4397   comparisons but still no complete part of it.  Still it is good
4398   enough to make fold_stmt not regress when not dispatching to fold_binary.  */
4399(for cmp (simple_comparison)
4400 (simplify
4401  (cmp (convert1?@2 addr@0) (convert2? addr@1))
4402  (with
4403   {
4404     poly_int64 off0, off1;
4405     tree base0 = get_addr_base_and_unit_offset (TREE_OPERAND (@0, 0), &off0);
4406     tree base1 = get_addr_base_and_unit_offset (TREE_OPERAND (@1, 0), &off1);
4407     if (base0 && TREE_CODE (base0) == MEM_REF)
4408       {
4409	 off0 += mem_ref_offset (base0).force_shwi ();
4410         base0 = TREE_OPERAND (base0, 0);
4411       }
4412     if (base1 && TREE_CODE (base1) == MEM_REF)
4413       {
4414	 off1 += mem_ref_offset (base1).force_shwi ();
4415         base1 = TREE_OPERAND (base1, 0);
4416       }
4417   }
4418   (if (base0 && base1)
4419    (with
4420     {
4421       int equal = 2;
4422       /* Punt in GENERIC on variables with value expressions;
4423	  the value expressions might point to fields/elements
4424	  of other vars etc.  */
4425       if (GENERIC
4426	   && ((VAR_P (base0) && DECL_HAS_VALUE_EXPR_P (base0))
4427	       || (VAR_P (base1) && DECL_HAS_VALUE_EXPR_P (base1))))
4428	 ;
4429       else if (decl_in_symtab_p (base0)
4430		&& decl_in_symtab_p (base1))
4431         equal = symtab_node::get_create (base0)
4432	           ->equal_address_to (symtab_node::get_create (base1));
4433       else if ((DECL_P (base0)
4434		 || TREE_CODE (base0) == SSA_NAME
4435		 || TREE_CODE (base0) == STRING_CST)
4436		&& (DECL_P (base1)
4437		    || TREE_CODE (base1) == SSA_NAME
4438		    || TREE_CODE (base1) == STRING_CST))
4439         equal = (base0 == base1);
4440       if (equal == 0)
4441	 {
4442	   HOST_WIDE_INT ioff0 = -1, ioff1 = -1;
4443	   off0.is_constant (&ioff0);
4444	   off1.is_constant (&ioff1);
4445	   if ((DECL_P (base0) && TREE_CODE (base1) == STRING_CST)
4446	       || (TREE_CODE (base0) == STRING_CST && DECL_P (base1))
4447	       || (TREE_CODE (base0) == STRING_CST
4448		   && TREE_CODE (base1) == STRING_CST
4449		   && ioff0 >= 0 && ioff1 >= 0
4450		   && ioff0 < TREE_STRING_LENGTH (base0)
4451		   && ioff1 < TREE_STRING_LENGTH (base1)
4452		   /* This is a too conservative test that the STRING_CSTs
4453		      will not end up being string-merged.  */
4454		   && strncmp (TREE_STRING_POINTER (base0) + ioff0,
4455			       TREE_STRING_POINTER (base1) + ioff1,
4456			       MIN (TREE_STRING_LENGTH (base0) - ioff0,
4457				    TREE_STRING_LENGTH (base1) - ioff1)) != 0))
4458	     ;
4459	   else if (!DECL_P (base0) || !DECL_P (base1))
4460	     equal = 2;
4461	   else if (cmp != EQ_EXPR && cmp != NE_EXPR)
4462	     equal = 2;
4463	   /* If this is a pointer comparison, ignore for now even
4464	      valid equalities where one pointer is the offset zero
4465	      of one object and the other to one past end of another one.  */
4466	   else if (!INTEGRAL_TYPE_P (TREE_TYPE (@2)))
4467	     ;
4468	   /* Assume that automatic variables can't be adjacent to global
4469	      variables.  */
4470	   else if (is_global_var (base0) != is_global_var (base1))
4471	     ;
4472	   else
4473	     {
4474	       tree sz0 = DECL_SIZE_UNIT (base0);
4475	       tree sz1 = DECL_SIZE_UNIT (base1);
4476	       /* If sizes are unknown, e.g. VLA or not representable,
4477		  punt.  */
4478	       if (!tree_fits_poly_int64_p (sz0)
4479		   || !tree_fits_poly_int64_p (sz1))
4480		 equal = 2;
4481	       else
4482		 {
4483		   poly_int64 size0 = tree_to_poly_int64 (sz0);
4484		   poly_int64 size1 = tree_to_poly_int64 (sz1);
4485		   /* If one offset is pointing (or could be) to the beginning
4486		      of one object and the other is pointing to one past the
4487		      last byte of the other object, punt.  */
4488		   if (maybe_eq (off0, 0) && maybe_eq (off1, size1))
4489		     equal = 2;
4490		   else if (maybe_eq (off1, 0) && maybe_eq (off0, size0))
4491		     equal = 2;
4492		   /* If both offsets are the same, there are some cases
4493		      we know that are ok.  Either if we know they aren't
4494		      zero, or if we know both sizes are no zero.  */
4495		   if (equal == 2
4496		       && known_eq (off0, off1)
4497		       && (known_ne (off0, 0)
4498			   || (known_ne (size0, 0) && known_ne (size1, 0))))
4499		     equal = 0;
4500		 }
4501	     }
4502	 }
4503     }
4504     (if (equal == 1
4505	  && (cmp == EQ_EXPR || cmp == NE_EXPR
4506	      /* If the offsets are equal we can ignore overflow.  */
4507	      || known_eq (off0, off1)
4508	      || TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
4509		 /* Or if we compare using pointers to decls or strings.  */
4510	      || (POINTER_TYPE_P (TREE_TYPE (@2))
4511		  && (DECL_P (base0) || TREE_CODE (base0) == STRING_CST))))
4512      (switch
4513       (if (cmp == EQ_EXPR && (known_eq (off0, off1) || known_ne (off0, off1)))
4514	{ constant_boolean_node (known_eq (off0, off1), type); })
4515       (if (cmp == NE_EXPR && (known_eq (off0, off1) || known_ne (off0, off1)))
4516	{ constant_boolean_node (known_ne (off0, off1), type); })
4517       (if (cmp == LT_EXPR && (known_lt (off0, off1) || known_ge (off0, off1)))
4518	{ constant_boolean_node (known_lt (off0, off1), type); })
4519       (if (cmp == LE_EXPR && (known_le (off0, off1) || known_gt (off0, off1)))
4520	{ constant_boolean_node (known_le (off0, off1), type); })
4521       (if (cmp == GE_EXPR && (known_ge (off0, off1) || known_lt (off0, off1)))
4522	{ constant_boolean_node (known_ge (off0, off1), type); })
4523       (if (cmp == GT_EXPR && (known_gt (off0, off1) || known_le (off0, off1)))
4524	{ constant_boolean_node (known_gt (off0, off1), type); }))
4525      (if (equal == 0)
4526	(switch
4527	 (if (cmp == EQ_EXPR)
4528	  { constant_boolean_node (false, type); })
4529	 (if (cmp == NE_EXPR)
4530	  { constant_boolean_node (true, type); })))))))))
4531
4532/* Simplify pointer equality compares using PTA.  */
4533(for neeq (ne eq)
4534 (simplify
4535  (neeq @0 @1)
4536  (if (POINTER_TYPE_P (TREE_TYPE (@0))
4537       && ptrs_compare_unequal (@0, @1))
4538   { constant_boolean_node (neeq != EQ_EXPR, type); })))
4539
4540/* PR70920: Transform (intptr_t)x eq/ne CST to x eq/ne (typeof x) CST.
4541   and (typeof ptr_cst) x eq/ne ptr_cst to x eq/ne (typeof x) CST.
4542   Disable the transform if either operand is pointer to function.
4543   This broke pr22051-2.c for arm where function pointer
4544   canonicalizaion is not wanted.  */
4545
4546(for cmp (ne eq)
4547 (simplify
4548  (cmp (convert @0) INTEGER_CST@1)
4549  (if (((POINTER_TYPE_P (TREE_TYPE (@0))
4550	 && !FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@0)))
4551	 && INTEGRAL_TYPE_P (TREE_TYPE (@1)))
4552	|| (INTEGRAL_TYPE_P (TREE_TYPE (@0))
4553	    && POINTER_TYPE_P (TREE_TYPE (@1))
4554	    && !FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@1)))))
4555       && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1)))
4556   (cmp @0 (convert @1)))))
4557
4558/* Non-equality compare simplifications from fold_binary  */
4559(for cmp (lt gt le ge)
4560 /* Comparisons with the highest or lowest possible integer of
4561    the specified precision will have known values.  */
4562 (simplify
4563  (cmp (convert?@2 @0) uniform_integer_cst_p@1)
4564  (if ((INTEGRAL_TYPE_P (TREE_TYPE (@1))
4565	|| POINTER_TYPE_P (TREE_TYPE (@1))
4566	|| VECTOR_INTEGER_TYPE_P (TREE_TYPE (@1)))
4567       && tree_nop_conversion_p (TREE_TYPE (@2), TREE_TYPE (@0)))
4568   (with
4569    {
4570      tree cst = uniform_integer_cst_p (@1);
4571      tree arg1_type = TREE_TYPE (cst);
4572      unsigned int prec = TYPE_PRECISION (arg1_type);
4573      wide_int max = wi::max_value (arg1_type);
4574      wide_int signed_max = wi::max_value (prec, SIGNED);
4575      wide_int min = wi::min_value (arg1_type);
4576    }
4577    (switch
4578     (if (wi::to_wide (cst) == max)
4579      (switch
4580       (if (cmp == GT_EXPR)
4581	{ constant_boolean_node (false, type); })
4582       (if (cmp == GE_EXPR)
4583	(eq @2 @1))
4584       (if (cmp == LE_EXPR)
4585	{ constant_boolean_node (true, type); })
4586       (if (cmp == LT_EXPR)
4587	(ne @2 @1))))
4588     (if (wi::to_wide (cst) == min)
4589      (switch
4590       (if (cmp == LT_EXPR)
4591        { constant_boolean_node (false, type); })
4592       (if (cmp == LE_EXPR)
4593        (eq @2 @1))
4594       (if (cmp == GE_EXPR)
4595        { constant_boolean_node (true, type); })
4596       (if (cmp == GT_EXPR)
4597        (ne @2 @1))))
4598     (if (wi::to_wide (cst) == max - 1)
4599      (switch
4600       (if (cmp == GT_EXPR)
4601	(eq @2 { build_uniform_cst (TREE_TYPE (@1),
4602				    wide_int_to_tree (TREE_TYPE (cst),
4603						      wi::to_wide (cst)
4604						      + 1)); }))
4605       (if (cmp == LE_EXPR)
4606	(ne @2 { build_uniform_cst (TREE_TYPE (@1),
4607				    wide_int_to_tree (TREE_TYPE (cst),
4608						      wi::to_wide (cst)
4609						      + 1)); }))))
4610     (if (wi::to_wide (cst) == min + 1)
4611      (switch
4612       (if (cmp == GE_EXPR)
4613        (ne @2 { build_uniform_cst (TREE_TYPE (@1),
4614				    wide_int_to_tree (TREE_TYPE (cst),
4615						      wi::to_wide (cst)
4616						      - 1)); }))
4617       (if (cmp == LT_EXPR)
4618        (eq @2 { build_uniform_cst (TREE_TYPE (@1),
4619				    wide_int_to_tree (TREE_TYPE (cst),
4620						      wi::to_wide (cst)
4621						      - 1)); }))))
4622     (if (wi::to_wide (cst) == signed_max
4623	  && TYPE_UNSIGNED (arg1_type)
4624	  /* We will flip the signedness of the comparison operator
4625	     associated with the mode of @1, so the sign bit is
4626	     specified by this mode.  Check that @1 is the signed
4627	     max associated with this sign bit.  */
4628	  && prec == GET_MODE_PRECISION (SCALAR_INT_TYPE_MODE (arg1_type))
4629	  /* signed_type does not work on pointer types.  */
4630	  && INTEGRAL_TYPE_P (arg1_type))
4631      /* The following case also applies to X < signed_max+1
4632	 and X >= signed_max+1 because previous transformations.  */
4633      (if (cmp == LE_EXPR || cmp == GT_EXPR)
4634       (with { tree st = signed_type_for (TREE_TYPE (@1)); }
4635       	(switch
4636	 (if (cst == @1 && cmp == LE_EXPR)
4637	  (ge (convert:st @0) { build_zero_cst (st); }))
4638	 (if (cst == @1 && cmp == GT_EXPR)
4639	  (lt (convert:st @0) { build_zero_cst (st); }))
4640	 (if (cmp == LE_EXPR)
4641	  (ge (view_convert:st @0) { build_zero_cst (st); }))
4642	 (if (cmp == GT_EXPR)
4643	  (lt (view_convert:st @0) { build_zero_cst (st); })))))))))))
4644
4645(for cmp (unordered ordered unlt unle ungt unge uneq ltgt)
4646 /* If the second operand is NaN, the result is constant.  */
4647 (simplify
4648  (cmp @0 REAL_CST@1)
4649  (if (REAL_VALUE_ISNAN (TREE_REAL_CST (@1))
4650       && (cmp != LTGT_EXPR || ! flag_trapping_math))
4651   { constant_boolean_node (cmp == ORDERED_EXPR || cmp == LTGT_EXPR
4652			    ? false : true, type); })))
4653
4654/* bool_var != 0 becomes bool_var.  */
4655(simplify
4656 (ne @0 integer_zerop)
4657 (if (TREE_CODE (TREE_TYPE (@0)) == BOOLEAN_TYPE
4658      && types_match (type, TREE_TYPE (@0)))
4659  (non_lvalue @0)))
4660/* bool_var == 1 becomes bool_var.  */
4661(simplify
4662 (eq @0 integer_onep)
4663 (if (TREE_CODE (TREE_TYPE (@0)) == BOOLEAN_TYPE
4664      && types_match (type, TREE_TYPE (@0)))
4665  (non_lvalue @0)))
4666/* Do not handle
4667   bool_var == 0 becomes !bool_var or
4668   bool_var != 1 becomes !bool_var
4669   here because that only is good in assignment context as long
4670   as we require a tcc_comparison in GIMPLE_CONDs where we'd
4671   replace if (x == 0) with tem = ~x; if (tem != 0) which is
4672   clearly less optimal and which we'll transform again in forwprop.  */
4673
4674/* When one argument is a constant, overflow detection can be simplified.
4675   Currently restricted to single use so as not to interfere too much with
4676   ADD_OVERFLOW detection in tree-ssa-math-opts.c.
4677   A + CST CMP A  ->  A CMP' CST' */
4678(for cmp (lt le ge gt)
4679     out (gt gt le le)
4680 (simplify
4681  (cmp:c (plus@2 @0 INTEGER_CST@1) @0)
4682  (if (TYPE_UNSIGNED (TREE_TYPE (@0))
4683       && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))
4684       && wi::to_wide (@1) != 0
4685       && single_use (@2))
4686   (with { unsigned int prec = TYPE_PRECISION (TREE_TYPE (@0)); }
4687    (out @0 { wide_int_to_tree (TREE_TYPE (@0),
4688			        wi::max_value (prec, UNSIGNED)
4689				- wi::to_wide (@1)); })))))
4690
4691/* To detect overflow in unsigned A - B, A < B is simpler than A - B > A.
4692   However, the detection logic for SUB_OVERFLOW in tree-ssa-math-opts.c
4693   expects the long form, so we restrict the transformation for now.  */
4694(for cmp (gt le)
4695 (simplify
4696  (cmp:c (minus@2 @0 @1) @0)
4697  (if (single_use (@2)
4698       && ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
4699       && TYPE_UNSIGNED (TREE_TYPE (@0))
4700       && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
4701   (cmp @1 @0))))
4702
4703/* Testing for overflow is unnecessary if we already know the result.  */
4704/* A - B > A  */
4705(for cmp (gt le)
4706     out (ne eq)
4707 (simplify
4708  (cmp:c (realpart (IFN_SUB_OVERFLOW@2 @0 @1)) @0)
4709  (if (TYPE_UNSIGNED (TREE_TYPE (@0))
4710       && types_match (TREE_TYPE (@0), TREE_TYPE (@1)))
4711   (out (imagpart @2) { build_zero_cst (TREE_TYPE (@0)); }))))
4712/* A + B < A  */
4713(for cmp (lt ge)
4714     out (ne eq)
4715 (simplify
4716  (cmp:c (realpart (IFN_ADD_OVERFLOW:c@2 @0 @1)) @0)
4717  (if (TYPE_UNSIGNED (TREE_TYPE (@0))
4718       && types_match (TREE_TYPE (@0), TREE_TYPE (@1)))
4719   (out (imagpart @2) { build_zero_cst (TREE_TYPE (@0)); }))))
4720
4721/* For unsigned operands, -1 / B < A checks whether A * B would overflow.
4722   Simplify it to __builtin_mul_overflow (A, B, <unused>).  */
4723(for cmp (lt ge)
4724     out (ne eq)
4725 (simplify
4726  (cmp:c (trunc_div:s integer_all_onesp @1) @0)
4727  (if (TYPE_UNSIGNED (TREE_TYPE (@0)) && !VECTOR_TYPE_P (TREE_TYPE (@0)))
4728   (with { tree t = TREE_TYPE (@0), cpx = build_complex_type (t); }
4729    (out (imagpart (IFN_MUL_OVERFLOW:cpx @0 @1)) { build_zero_cst (t); })))))
4730
4731/* Simplification of math builtins.  These rules must all be optimizations
4732   as well as IL simplifications.  If there is a possibility that the new
4733   form could be a pessimization, the rule should go in the canonicalization
4734   section that follows this one.
4735
4736   Rules can generally go in this section if they satisfy one of
4737   the following:
4738
4739   - the rule describes an identity
4740
4741   - the rule replaces calls with something as simple as addition or
4742     multiplication
4743
4744   - the rule contains unary calls only and simplifies the surrounding
4745     arithmetic.  (The idea here is to exclude non-unary calls in which
4746     one operand is constant and in which the call is known to be cheap
4747     when the operand has that value.)  */
4748
4749(if (flag_unsafe_math_optimizations)
4750 /* Simplify sqrt(x) * sqrt(x) -> x.  */
4751 (simplify
4752  (mult (SQRT_ALL@1 @0) @1)
4753  (if (!HONOR_SNANS (type))
4754   @0))
4755
4756 (for op (plus minus)
4757  /* Simplify (A / C) +- (B / C) -> (A +- B) / C.  */
4758  (simplify
4759   (op (rdiv @0 @1)
4760       (rdiv @2 @1))
4761   (rdiv (op @0 @2) @1)))
4762
4763 (for cmp (lt le gt ge)
4764      neg_cmp (gt ge lt le)
4765  /* Simplify (x * C1) cmp C2 -> x cmp (C2 / C1), where C1 != 0.  */
4766  (simplify
4767   (cmp (mult @0 REAL_CST@1) REAL_CST@2)
4768   (with
4769    { tree tem = const_binop (RDIV_EXPR, type, @2, @1); }
4770    (if (tem
4771	 && !(REAL_VALUE_ISINF (TREE_REAL_CST (tem))
4772	      || (real_zerop (tem) && !real_zerop (@1))))
4773     (switch
4774      (if (real_less (&dconst0, TREE_REAL_CST_PTR (@1)))
4775       (cmp @0 { tem; }))
4776      (if (real_less (TREE_REAL_CST_PTR (@1), &dconst0))
4777       (neg_cmp @0 { tem; })))))))
4778
4779 /* Simplify sqrt(x) * sqrt(y) -> sqrt(x*y).  */
4780 (for root (SQRT CBRT)
4781  (simplify
4782   (mult (root:s @0) (root:s @1))
4783    (root (mult @0 @1))))
4784
4785 /* Simplify expN(x) * expN(y) -> expN(x+y). */
4786 (for exps (EXP EXP2 EXP10 POW10)
4787  (simplify
4788   (mult (exps:s @0) (exps:s @1))
4789    (exps (plus @0 @1))))
4790
4791 /* Simplify a/root(b/c) into a*root(c/b).  */
4792 (for root (SQRT CBRT)
4793  (simplify
4794   (rdiv @0 (root:s (rdiv:s @1 @2)))
4795    (mult @0 (root (rdiv @2 @1)))))
4796
4797 /* Simplify x/expN(y) into x*expN(-y).  */
4798 (for exps (EXP EXP2 EXP10 POW10)
4799  (simplify
4800   (rdiv @0 (exps:s @1))
4801    (mult @0 (exps (negate @1)))))
4802
4803 (for logs (LOG LOG2 LOG10 LOG10)
4804      exps (EXP EXP2 EXP10 POW10)
4805  /* logN(expN(x)) -> x.  */
4806  (simplify
4807   (logs (exps @0))
4808   @0)
4809  /* expN(logN(x)) -> x.  */
4810  (simplify
4811   (exps (logs @0))
4812   @0))
4813
4814 /* Optimize logN(func()) for various exponential functions.  We
4815    want to determine the value "x" and the power "exponent" in
4816    order to transform logN(x**exponent) into exponent*logN(x).  */
4817 (for logs (LOG  LOG   LOG   LOG2 LOG2  LOG2  LOG10 LOG10)
4818      exps (EXP2 EXP10 POW10 EXP  EXP10 POW10 EXP   EXP2)
4819  (simplify
4820   (logs (exps @0))
4821   (if (SCALAR_FLOAT_TYPE_P (type))
4822    (with {
4823      tree x;
4824      switch (exps)
4825	{
4826	CASE_CFN_EXP:
4827	  /* Prepare to do logN(exp(exponent)) -> exponent*logN(e).  */
4828	  x = build_real_truncate (type, dconst_e ());
4829	  break;
4830	CASE_CFN_EXP2:
4831	  /* Prepare to do logN(exp2(exponent)) -> exponent*logN(2).  */
4832	  x = build_real (type, dconst2);
4833	  break;
4834	CASE_CFN_EXP10:
4835	CASE_CFN_POW10:
4836	  /* Prepare to do logN(exp10(exponent)) -> exponent*logN(10).  */
4837	  {
4838	    REAL_VALUE_TYPE dconst10;
4839	    real_from_integer (&dconst10, VOIDmode, 10, SIGNED);
4840	    x = build_real (type, dconst10);
4841	  }
4842	  break;
4843	default:
4844	  gcc_unreachable ();
4845	}
4846      }
4847     (mult (logs { x; }) @0)))))
4848
4849 (for logs (LOG LOG
4850            LOG2 LOG2
4851	    LOG10 LOG10)
4852      exps (SQRT CBRT)
4853  (simplify
4854   (logs (exps @0))
4855   (if (SCALAR_FLOAT_TYPE_P (type))
4856    (with {
4857      tree x;
4858      switch (exps)
4859	{
4860	CASE_CFN_SQRT:
4861	  /* Prepare to do logN(sqrt(x)) -> 0.5*logN(x).  */
4862	  x = build_real (type, dconsthalf);
4863	  break;
4864	CASE_CFN_CBRT:
4865	  /* Prepare to do logN(cbrt(x)) -> (1/3)*logN(x).  */
4866	  x = build_real_truncate (type, dconst_third ());
4867	  break;
4868	default:
4869	  gcc_unreachable ();
4870	}
4871      }
4872     (mult { x; } (logs @0))))))
4873
4874 /* logN(pow(x,exponent)) -> exponent*logN(x).  */
4875 (for logs (LOG LOG2 LOG10)
4876      pows (POW)
4877  (simplify
4878   (logs (pows @0 @1))
4879   (mult @1 (logs @0))))
4880
4881 /* pow(C,x) -> exp(log(C)*x) if C > 0,
4882    or if C is a positive power of 2,
4883    pow(C,x) -> exp2(log2(C)*x).  */
4884#if GIMPLE
4885 (for pows (POW)
4886      exps (EXP)
4887      logs (LOG)
4888      exp2s (EXP2)
4889      log2s (LOG2)
4890  (simplify
4891   (pows REAL_CST@0 @1)
4892   (if (real_compare (GT_EXPR, TREE_REAL_CST_PTR (@0), &dconst0)
4893	&& real_isfinite (TREE_REAL_CST_PTR (@0))
4894	/* As libmvec doesn't have a vectorized exp2, defer optimizing
4895	   the use_exp2 case until after vectorization.  It seems actually
4896	   beneficial for all constants to postpone this until later,
4897	   because exp(log(C)*x), while faster, will have worse precision
4898	   and if x folds into a constant too, that is unnecessary
4899	   pessimization.  */
4900	&& canonicalize_math_after_vectorization_p ())
4901    (with {
4902       const REAL_VALUE_TYPE *const value = TREE_REAL_CST_PTR (@0);
4903       bool use_exp2 = false;
4904       if (targetm.libc_has_function (function_c99_misc)
4905	   && value->cl == rvc_normal)
4906	 {
4907	   REAL_VALUE_TYPE frac_rvt = *value;
4908	   SET_REAL_EXP (&frac_rvt, 1);
4909	   if (real_equal (&frac_rvt, &dconst1))
4910	     use_exp2 = true;
4911	 }
4912     }
4913     (if (!use_exp2)
4914      (if (optimize_pow_to_exp (@0, @1))
4915       (exps (mult (logs @0) @1)))
4916      (exp2s (mult (log2s @0) @1)))))))
4917#endif
4918
4919 /* pow(C,x)*expN(y) -> expN(logN(C)*x+y) if C > 0.  */
4920 (for pows (POW)
4921      exps (EXP EXP2 EXP10 POW10)
4922      logs (LOG LOG2 LOG10 LOG10)
4923  (simplify
4924   (mult:c (pows:s REAL_CST@0 @1) (exps:s @2))
4925   (if (real_compare (GT_EXPR, TREE_REAL_CST_PTR (@0), &dconst0)
4926	&& real_isfinite (TREE_REAL_CST_PTR (@0)))
4927    (exps (plus (mult (logs @0) @1) @2)))))
4928
4929 (for sqrts (SQRT)
4930      cbrts (CBRT)
4931      pows (POW)
4932      exps (EXP EXP2 EXP10 POW10)
4933  /* sqrt(expN(x)) -> expN(x*0.5).  */
4934  (simplify
4935   (sqrts (exps @0))
4936   (exps (mult @0 { build_real (type, dconsthalf); })))
4937  /* cbrt(expN(x)) -> expN(x/3).  */
4938  (simplify
4939   (cbrts (exps @0))
4940   (exps (mult @0 { build_real_truncate (type, dconst_third ()); })))
4941  /* pow(expN(x), y) -> expN(x*y).  */
4942  (simplify
4943   (pows (exps @0) @1)
4944   (exps (mult @0 @1))))
4945
4946 /* tan(atan(x)) -> x.  */
4947 (for tans (TAN)
4948      atans (ATAN)
4949  (simplify
4950   (tans (atans @0))
4951   @0)))
4952
4953 /* Simplify sin(atan(x)) -> x / sqrt(x*x + 1). */
4954 (for sins (SIN)
4955      atans (ATAN)
4956      sqrts (SQRT)
4957      copysigns (COPYSIGN)
4958  (simplify
4959   (sins (atans:s @0))
4960   (with
4961     {
4962      REAL_VALUE_TYPE r_cst;
4963      build_sinatan_real (&r_cst, type);
4964      tree t_cst = build_real (type, r_cst);
4965      tree t_one = build_one_cst (type);
4966     }
4967    (if (SCALAR_FLOAT_TYPE_P (type))
4968     (cond (lt (abs @0) { t_cst; })
4969      (rdiv @0 (sqrts (plus (mult @0 @0) { t_one; })))
4970      (copysigns { t_one; } @0))))))
4971
4972/* Simplify cos(atan(x)) -> 1 / sqrt(x*x + 1). */
4973 (for coss (COS)
4974      atans (ATAN)
4975      sqrts (SQRT)
4976      copysigns (COPYSIGN)
4977  (simplify
4978   (coss (atans:s @0))
4979   (with
4980     {
4981      REAL_VALUE_TYPE r_cst;
4982      build_sinatan_real (&r_cst, type);
4983      tree t_cst = build_real (type, r_cst);
4984      tree t_one = build_one_cst (type);
4985      tree t_zero = build_zero_cst (type);
4986     }
4987    (if (SCALAR_FLOAT_TYPE_P (type))
4988     (cond (lt (abs @0) { t_cst; })
4989      (rdiv { t_one; } (sqrts (plus (mult @0 @0) { t_one; })))
4990      (copysigns { t_zero; } @0))))))
4991
4992 (if (!flag_errno_math)
4993  /* Simplify sinh(atanh(x)) -> x / sqrt((1 - x)*(1 + x)). */
4994  (for sinhs (SINH)
4995       atanhs (ATANH)
4996       sqrts (SQRT)
4997   (simplify
4998    (sinhs (atanhs:s @0))
4999    (with { tree t_one = build_one_cst (type); }
5000    (rdiv @0 (sqrts (mult (minus { t_one; } @0) (plus { t_one; } @0)))))))
5001
5002  /* Simplify cosh(atanh(x)) -> 1 / sqrt((1 - x)*(1 + x)) */
5003  (for coshs (COSH)
5004       atanhs (ATANH)
5005       sqrts (SQRT)
5006   (simplify
5007    (coshs (atanhs:s @0))
5008    (with { tree t_one = build_one_cst (type); }
5009    (rdiv { t_one; } (sqrts (mult (minus { t_one; } @0) (plus { t_one; } @0))))))))
5010
5011/* cabs(x+0i) or cabs(0+xi) -> abs(x).  */
5012(simplify
5013 (CABS (complex:C @0 real_zerop@1))
5014 (abs @0))
5015
5016/* trunc(trunc(x)) -> trunc(x), etc.  */
5017(for fns (TRUNC_ALL FLOOR_ALL CEIL_ALL ROUND_ALL NEARBYINT_ALL RINT_ALL)
5018 (simplify
5019  (fns (fns @0))
5020  (fns @0)))
5021/* f(x) -> x if x is integer valued and f does nothing for such values.  */
5022(for fns (TRUNC_ALL FLOOR_ALL CEIL_ALL ROUND_ALL NEARBYINT_ALL RINT_ALL)
5023 (simplify
5024  (fns integer_valued_real_p@0)
5025  @0))
5026
5027/* hypot(x,0) and hypot(0,x) -> abs(x).  */
5028(simplify
5029 (HYPOT:c @0 real_zerop@1)
5030 (abs @0))
5031
5032/* pow(1,x) -> 1.  */
5033(simplify
5034 (POW real_onep@0 @1)
5035 @0)
5036
5037(simplify
5038 /* copysign(x,x) -> x.  */
5039 (COPYSIGN_ALL @0 @0)
5040 @0)
5041
5042(simplify
5043 /* copysign(x,y) -> fabs(x) if y is nonnegative.  */
5044 (COPYSIGN_ALL @0 tree_expr_nonnegative_p@1)
5045 (abs @0))
5046
5047(for scale (LDEXP SCALBN SCALBLN)
5048 /* ldexp(0, x) -> 0.  */
5049 (simplify
5050  (scale real_zerop@0 @1)
5051  @0)
5052 /* ldexp(x, 0) -> x.  */
5053 (simplify
5054  (scale @0 integer_zerop@1)
5055  @0)
5056 /* ldexp(x, y) -> x if x is +-Inf or NaN.  */
5057 (simplify
5058  (scale REAL_CST@0 @1)
5059  (if (!real_isfinite (TREE_REAL_CST_PTR (@0)))
5060   @0)))
5061
5062/* Canonicalization of sequences of math builtins.  These rules represent
5063   IL simplifications but are not necessarily optimizations.
5064
5065   The sincos pass is responsible for picking "optimal" implementations
5066   of math builtins, which may be more complicated and can sometimes go
5067   the other way, e.g. converting pow into a sequence of sqrts.
5068   We only want to do these canonicalizations before the pass has run.  */
5069
5070(if (flag_unsafe_math_optimizations && canonicalize_math_p ())
5071 /* Simplify tan(x) * cos(x) -> sin(x). */
5072 (simplify
5073  (mult:c (TAN:s @0) (COS:s @0))
5074   (SIN @0))
5075
5076 /* Simplify x * pow(x,c) -> pow(x,c+1). */
5077 (simplify
5078  (mult:c @0 (POW:s @0 REAL_CST@1))
5079  (if (!TREE_OVERFLOW (@1))
5080   (POW @0 (plus @1 { build_one_cst (type); }))))
5081
5082 /* Simplify sin(x) / cos(x) -> tan(x). */
5083 (simplify
5084  (rdiv (SIN:s @0) (COS:s @0))
5085   (TAN @0))
5086
5087 /* Simplify sinh(x) / cosh(x) -> tanh(x). */
5088 (simplify
5089  (rdiv (SINH:s @0) (COSH:s @0))
5090   (TANH @0))
5091
5092 /* Simplify cos(x) / sin(x) -> 1 / tan(x). */
5093 (simplify
5094  (rdiv (COS:s @0) (SIN:s @0))
5095   (rdiv { build_one_cst (type); } (TAN @0)))
5096
5097 /* Simplify sin(x) / tan(x) -> cos(x). */
5098 (simplify
5099  (rdiv (SIN:s @0) (TAN:s @0))
5100  (if (! HONOR_NANS (@0)
5101       && ! HONOR_INFINITIES (@0))
5102   (COS @0)))
5103
5104 /* Simplify tan(x) / sin(x) -> 1.0 / cos(x). */
5105 (simplify
5106  (rdiv (TAN:s @0) (SIN:s @0))
5107  (if (! HONOR_NANS (@0)
5108       && ! HONOR_INFINITIES (@0))
5109   (rdiv { build_one_cst (type); } (COS @0))))
5110
5111 /* Simplify pow(x,y) * pow(x,z) -> pow(x,y+z). */
5112 (simplify
5113  (mult (POW:s @0 @1) (POW:s @0 @2))
5114   (POW @0 (plus @1 @2)))
5115
5116 /* Simplify pow(x,y) * pow(z,y) -> pow(x*z,y). */
5117 (simplify
5118  (mult (POW:s @0 @1) (POW:s @2 @1))
5119   (POW (mult @0 @2) @1))
5120
5121 /* Simplify powi(x,y) * powi(z,y) -> powi(x*z,y). */
5122 (simplify
5123  (mult (POWI:s @0 @1) (POWI:s @2 @1))
5124   (POWI (mult @0 @2) @1))
5125
5126 /* Simplify pow(x,c) / x -> pow(x,c-1). */
5127 (simplify
5128  (rdiv (POW:s @0 REAL_CST@1) @0)
5129  (if (!TREE_OVERFLOW (@1))
5130   (POW @0 (minus @1 { build_one_cst (type); }))))
5131
5132 /* Simplify x / pow (y,z) -> x * pow(y,-z). */
5133 (simplify
5134  (rdiv @0 (POW:s @1 @2))
5135   (mult @0 (POW @1 (negate @2))))
5136
5137 (for sqrts (SQRT)
5138      cbrts (CBRT)
5139      pows (POW)
5140  /* sqrt(sqrt(x)) -> pow(x,1/4).  */
5141  (simplify
5142   (sqrts (sqrts @0))
5143   (pows @0 { build_real (type, dconst_quarter ()); }))
5144  /* sqrt(cbrt(x)) -> pow(x,1/6).  */
5145  (simplify
5146   (sqrts (cbrts @0))
5147   (pows @0 { build_real_truncate (type, dconst_sixth ()); }))
5148  /* cbrt(sqrt(x)) -> pow(x,1/6).  */
5149  (simplify
5150   (cbrts (sqrts @0))
5151   (pows @0 { build_real_truncate (type, dconst_sixth ()); }))
5152  /* cbrt(cbrt(x)) -> pow(x,1/9), iff x is nonnegative.  */
5153  (simplify
5154   (cbrts (cbrts tree_expr_nonnegative_p@0))
5155   (pows @0 { build_real_truncate (type, dconst_ninth ()); }))
5156  /* sqrt(pow(x,y)) -> pow(|x|,y*0.5).  */
5157  (simplify
5158   (sqrts (pows @0 @1))
5159   (pows (abs @0) (mult @1 { build_real (type, dconsthalf); })))
5160  /* cbrt(pow(x,y)) -> pow(x,y/3), iff x is nonnegative.  */
5161  (simplify
5162   (cbrts (pows tree_expr_nonnegative_p@0 @1))
5163   (pows @0 (mult @1 { build_real_truncate (type, dconst_third ()); })))
5164  /* pow(sqrt(x),y) -> pow(x,y*0.5).  */
5165  (simplify
5166   (pows (sqrts @0) @1)
5167   (pows @0 (mult @1 { build_real (type, dconsthalf); })))
5168  /* pow(cbrt(x),y) -> pow(x,y/3) iff x is nonnegative.  */
5169  (simplify
5170   (pows (cbrts tree_expr_nonnegative_p@0) @1)
5171   (pows @0 (mult @1 { build_real_truncate (type, dconst_third ()); })))
5172  /* pow(pow(x,y),z) -> pow(x,y*z) iff x is nonnegative.  */
5173  (simplify
5174   (pows (pows tree_expr_nonnegative_p@0 @1) @2)
5175   (pows @0 (mult @1 @2))))
5176
5177 /* cabs(x+xi) -> fabs(x)*sqrt(2).  */
5178 (simplify
5179  (CABS (complex @0 @0))
5180  (mult (abs @0) { build_real_truncate (type, dconst_sqrt2 ()); }))
5181
5182 /* hypot(x,x) -> fabs(x)*sqrt(2).  */
5183 (simplify
5184  (HYPOT @0 @0)
5185  (mult (abs @0) { build_real_truncate (type, dconst_sqrt2 ()); }))
5186
5187 /* cexp(x+yi) -> exp(x)*cexpi(y).  */
5188 (for cexps (CEXP)
5189      exps (EXP)
5190      cexpis (CEXPI)
5191  (simplify
5192   (cexps compositional_complex@0)
5193   (if (targetm.libc_has_function (function_c99_math_complex))
5194    (complex
5195     (mult (exps@1 (realpart @0)) (realpart (cexpis:type@2 (imagpart @0))))
5196     (mult @1 (imagpart @2)))))))
5197
5198(if (canonicalize_math_p ())
5199 /* floor(x) -> trunc(x) if x is nonnegative.  */
5200 (for floors (FLOOR_ALL)
5201      truncs (TRUNC_ALL)
5202  (simplify
5203   (floors tree_expr_nonnegative_p@0)
5204   (truncs @0))))
5205
5206(match double_value_p
5207 @0
5208 (if (TYPE_MAIN_VARIANT (TREE_TYPE (@0)) == double_type_node)))
5209(for froms (BUILT_IN_TRUNCL
5210	    BUILT_IN_FLOORL
5211	    BUILT_IN_CEILL
5212	    BUILT_IN_ROUNDL
5213	    BUILT_IN_NEARBYINTL
5214	    BUILT_IN_RINTL)
5215     tos (BUILT_IN_TRUNC
5216	  BUILT_IN_FLOOR
5217	  BUILT_IN_CEIL
5218	  BUILT_IN_ROUND
5219	  BUILT_IN_NEARBYINT
5220	  BUILT_IN_RINT)
5221 /* truncl(extend(x)) -> extend(trunc(x)), etc., if x is a double.  */
5222 (if (optimize && canonicalize_math_p ())
5223  (simplify
5224   (froms (convert double_value_p@0))
5225   (convert (tos @0)))))
5226
5227(match float_value_p
5228 @0
5229 (if (TYPE_MAIN_VARIANT (TREE_TYPE (@0)) == float_type_node)))
5230(for froms (BUILT_IN_TRUNCL BUILT_IN_TRUNC
5231	    BUILT_IN_FLOORL BUILT_IN_FLOOR
5232	    BUILT_IN_CEILL BUILT_IN_CEIL
5233	    BUILT_IN_ROUNDL BUILT_IN_ROUND
5234	    BUILT_IN_NEARBYINTL BUILT_IN_NEARBYINT
5235	    BUILT_IN_RINTL BUILT_IN_RINT)
5236     tos (BUILT_IN_TRUNCF BUILT_IN_TRUNCF
5237	  BUILT_IN_FLOORF BUILT_IN_FLOORF
5238	  BUILT_IN_CEILF BUILT_IN_CEILF
5239	  BUILT_IN_ROUNDF BUILT_IN_ROUNDF
5240	  BUILT_IN_NEARBYINTF BUILT_IN_NEARBYINTF
5241	  BUILT_IN_RINTF BUILT_IN_RINTF)
5242 /* truncl(extend(x)) and trunc(extend(x)) -> extend(truncf(x)), etc.,
5243    if x is a float.  */
5244 (if (optimize && canonicalize_math_p ()
5245      && targetm.libc_has_function (function_c99_misc))
5246  (simplify
5247   (froms (convert float_value_p@0))
5248   (convert (tos @0)))))
5249
5250(for froms (XFLOORL XCEILL XROUNDL XRINTL)
5251     tos (XFLOOR XCEIL XROUND XRINT)
5252 /* llfloorl(extend(x)) -> llfloor(x), etc., if x is a double.  */
5253 (if (optimize && canonicalize_math_p ())
5254  (simplify
5255   (froms (convert double_value_p@0))
5256   (tos @0))))
5257
5258(for froms (XFLOORL XCEILL XROUNDL XRINTL
5259	    XFLOOR XCEIL XROUND XRINT)
5260     tos (XFLOORF XCEILF XROUNDF XRINTF)
5261 /* llfloorl(extend(x)) and llfloor(extend(x)) -> llfloorf(x), etc.,
5262    if x is a float.  */
5263 (if (optimize && canonicalize_math_p ())
5264  (simplify
5265   (froms (convert float_value_p@0))
5266   (tos @0))))
5267
5268(if (canonicalize_math_p ())
5269 /* xfloor(x) -> fix_trunc(x) if x is nonnegative.  */
5270 (for floors (IFLOOR LFLOOR LLFLOOR)
5271  (simplify
5272   (floors tree_expr_nonnegative_p@0)
5273   (fix_trunc @0))))
5274
5275(if (canonicalize_math_p ())
5276 /* xfloor(x) -> fix_trunc(x), etc., if x is integer valued.  */
5277 (for fns (IFLOOR LFLOOR LLFLOOR
5278	   ICEIL LCEIL LLCEIL
5279	   IROUND LROUND LLROUND)
5280  (simplify
5281   (fns integer_valued_real_p@0)
5282   (fix_trunc @0)))
5283 (if (!flag_errno_math)
5284  /* xrint(x) -> fix_trunc(x), etc., if x is integer valued.  */
5285  (for rints (IRINT LRINT LLRINT)
5286   (simplify
5287    (rints integer_valued_real_p@0)
5288    (fix_trunc @0)))))
5289
5290(if (canonicalize_math_p ())
5291 (for ifn (IFLOOR ICEIL IROUND IRINT)
5292      lfn (LFLOOR LCEIL LROUND LRINT)
5293      llfn (LLFLOOR LLCEIL LLROUND LLRINT)
5294  /* Canonicalize iround (x) to lround (x) on ILP32 targets where
5295     sizeof (int) == sizeof (long).  */
5296  (if (TYPE_PRECISION (integer_type_node)
5297       == TYPE_PRECISION (long_integer_type_node))
5298   (simplify
5299    (ifn @0)
5300    (lfn:long_integer_type_node @0)))
5301  /* Canonicalize llround (x) to lround (x) on LP64 targets where
5302     sizeof (long long) == sizeof (long).  */
5303  (if (TYPE_PRECISION (long_long_integer_type_node)
5304       == TYPE_PRECISION (long_integer_type_node))
5305   (simplify
5306    (llfn @0)
5307    (lfn:long_integer_type_node @0)))))
5308
5309/* cproj(x) -> x if we're ignoring infinities.  */
5310(simplify
5311 (CPROJ @0)
5312 (if (!HONOR_INFINITIES (type))
5313   @0))
5314
5315/* If the real part is inf and the imag part is known to be
5316   nonnegative, return (inf + 0i).  */
5317(simplify
5318 (CPROJ (complex REAL_CST@0 tree_expr_nonnegative_p@1))
5319 (if (real_isinf (TREE_REAL_CST_PTR (@0)))
5320  { build_complex_inf (type, false); }))
5321
5322/* If the imag part is inf, return (inf+I*copysign(0,imag)).  */
5323(simplify
5324 (CPROJ (complex @0 REAL_CST@1))
5325 (if (real_isinf (TREE_REAL_CST_PTR (@1)))
5326  { build_complex_inf (type, TREE_REAL_CST_PTR (@1)->sign); }))
5327
5328(for pows (POW)
5329     sqrts (SQRT)
5330     cbrts (CBRT)
5331 (simplify
5332  (pows @0 REAL_CST@1)
5333  (with {
5334    const REAL_VALUE_TYPE *value = TREE_REAL_CST_PTR (@1);
5335    REAL_VALUE_TYPE tmp;
5336   }
5337   (switch
5338    /* pow(x,0) -> 1.  */
5339    (if (real_equal (value, &dconst0))
5340     { build_real (type, dconst1); })
5341    /* pow(x,1) -> x.  */
5342    (if (real_equal (value, &dconst1))
5343     @0)
5344    /* pow(x,-1) -> 1/x.  */
5345    (if (real_equal (value, &dconstm1))
5346     (rdiv { build_real (type, dconst1); } @0))
5347    /* pow(x,0.5) -> sqrt(x).  */
5348    (if (flag_unsafe_math_optimizations
5349	 && canonicalize_math_p ()
5350	 && real_equal (value, &dconsthalf))
5351     (sqrts @0))
5352    /* pow(x,1/3) -> cbrt(x).  */
5353    (if (flag_unsafe_math_optimizations
5354	 && canonicalize_math_p ()
5355	 && (tmp = real_value_truncate (TYPE_MODE (type), dconst_third ()),
5356	     real_equal (value, &tmp)))
5357     (cbrts @0))))))
5358
5359/* powi(1,x) -> 1.  */
5360(simplify
5361 (POWI real_onep@0 @1)
5362 @0)
5363
5364(simplify
5365 (POWI @0 INTEGER_CST@1)
5366 (switch
5367  /* powi(x,0) -> 1.  */
5368  (if (wi::to_wide (@1) == 0)
5369   { build_real (type, dconst1); })
5370  /* powi(x,1) -> x.  */
5371  (if (wi::to_wide (@1) == 1)
5372   @0)
5373  /* powi(x,-1) -> 1/x.  */
5374  (if (wi::to_wide (@1) == -1)
5375   (rdiv { build_real (type, dconst1); } @0))))
5376
5377/* Narrowing of arithmetic and logical operations.
5378
5379   These are conceptually similar to the transformations performed for
5380   the C/C++ front-ends by shorten_binary_op and shorten_compare.  Long
5381   term we want to move all that code out of the front-ends into here.  */
5382
5383/* Convert (outertype)((innertype0)a+(innertype1)b)
5384   into ((newtype)a+(newtype)b) where newtype
5385   is the widest mode from all of these.  */
5386(for op (plus minus mult rdiv)
5387 (simplify
5388   (convert (op:s@0 (convert1?@3 @1) (convert2?@4 @2)))
5389   /* If we have a narrowing conversion of an arithmetic operation where
5390      both operands are widening conversions from the same type as the outer
5391      narrowing conversion.  Then convert the innermost operands to a
5392      suitable unsigned type (to avoid introducing undefined behavior),
5393      perform the operation and convert the result to the desired type.  */
5394   (if (INTEGRAL_TYPE_P (type)
5395	&& op != MULT_EXPR
5396	&& op != RDIV_EXPR
5397	/* We check for type compatibility between @0 and @1 below,
5398	   so there's no need to check that @2/@4 are integral types.  */
5399	&& INTEGRAL_TYPE_P (TREE_TYPE (@1))
5400	&& INTEGRAL_TYPE_P (TREE_TYPE (@3))
5401	/* The precision of the type of each operand must match the
5402	   precision of the mode of each operand, similarly for the
5403	   result.  */
5404	&& type_has_mode_precision_p (TREE_TYPE (@1))
5405	&& type_has_mode_precision_p (TREE_TYPE (@2))
5406	&& type_has_mode_precision_p (type)
5407	/* The inner conversion must be a widening conversion.  */
5408	&& TYPE_PRECISION (TREE_TYPE (@3)) > TYPE_PRECISION (TREE_TYPE (@1))
5409	&& types_match (@1, type)
5410	&& (types_match (@1, @2)
5411	    /* Or the second operand is const integer or converted const
5412	       integer from valueize.  */
5413	    || TREE_CODE (@2) == INTEGER_CST))
5414     (if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (@1)))
5415       (op @1 (convert @2))
5416       (with { tree utype = unsigned_type_for (TREE_TYPE (@1)); }
5417	(convert (op (convert:utype @1)
5418		     (convert:utype @2)))))
5419     (if (FLOAT_TYPE_P (type)
5420	  && DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@0))
5421	       == DECIMAL_FLOAT_TYPE_P (type))
5422      (with { tree arg0 = strip_float_extensions (@1);
5423	      tree arg1 = strip_float_extensions (@2);
5424	      tree itype = TREE_TYPE (@0);
5425	      tree ty1 = TREE_TYPE (arg0);
5426	      tree ty2 = TREE_TYPE (arg1);
5427	      enum tree_code code = TREE_CODE (itype); }
5428	(if (FLOAT_TYPE_P (ty1)
5429	     && FLOAT_TYPE_P (ty2))
5430	 (with { tree newtype = type;
5431		 if (TYPE_MODE (ty1) == SDmode
5432		     || TYPE_MODE (ty2) == SDmode
5433		     || TYPE_MODE (type) == SDmode)
5434		   newtype = dfloat32_type_node;
5435		 if (TYPE_MODE (ty1) == DDmode
5436		     || TYPE_MODE (ty2) == DDmode
5437		     || TYPE_MODE (type) == DDmode)
5438		   newtype = dfloat64_type_node;
5439		 if (TYPE_MODE (ty1) == TDmode
5440		     || TYPE_MODE (ty2) == TDmode
5441		     || TYPE_MODE (type) == TDmode)
5442		   newtype = dfloat128_type_node; }
5443	  (if ((newtype == dfloat32_type_node
5444		|| newtype == dfloat64_type_node
5445		|| newtype == dfloat128_type_node)
5446	      && newtype == type
5447	      && types_match (newtype, type))
5448	    (op (convert:newtype @1) (convert:newtype @2))
5449	    (with { if (TYPE_PRECISION (ty1) > TYPE_PRECISION (newtype))
5450		      newtype = ty1;
5451		    if (TYPE_PRECISION (ty2) > TYPE_PRECISION (newtype))
5452		      newtype = ty2; }
5453	       /* Sometimes this transformation is safe (cannot
5454		  change results through affecting double rounding
5455		  cases) and sometimes it is not.  If NEWTYPE is
5456		  wider than TYPE, e.g. (float)((long double)double
5457		  + (long double)double) converted to
5458		  (float)(double + double), the transformation is
5459		  unsafe regardless of the details of the types
5460		  involved; double rounding can arise if the result
5461		  of NEWTYPE arithmetic is a NEWTYPE value half way
5462		  between two representable TYPE values but the
5463		  exact value is sufficiently different (in the
5464		  right direction) for this difference to be
5465		  visible in ITYPE arithmetic.  If NEWTYPE is the
5466		  same as TYPE, however, the transformation may be
5467		  safe depending on the types involved: it is safe
5468		  if the ITYPE has strictly more than twice as many
5469		  mantissa bits as TYPE, can represent infinities
5470		  and NaNs if the TYPE can, and has sufficient
5471		  exponent range for the product or ratio of two
5472		  values representable in the TYPE to be within the
5473		  range of normal values of ITYPE.  */
5474	      (if (TYPE_PRECISION (newtype) < TYPE_PRECISION (itype)
5475		   && (flag_unsafe_math_optimizations
5476		       || (TYPE_PRECISION (newtype) == TYPE_PRECISION (type)
5477			   && real_can_shorten_arithmetic (TYPE_MODE (itype),
5478							   TYPE_MODE (type))
5479			   && !excess_precision_type (newtype)))
5480		   && !types_match (itype, newtype))
5481		 (convert:type (op (convert:newtype @1)
5482				   (convert:newtype @2)))
5483	 )))) )
5484   ))
5485)))
5486
5487/* This is another case of narrowing, specifically when there's an outer
5488   BIT_AND_EXPR which masks off bits outside the type of the innermost
5489   operands.   Like the previous case we have to convert the operands
5490   to unsigned types to avoid introducing undefined behavior for the
5491   arithmetic operation.  */
5492(for op (minus plus)
5493 (simplify
5494  (bit_and (op:s (convert@2 @0) (convert@3 @1)) INTEGER_CST@4)
5495  (if (INTEGRAL_TYPE_P (type)
5496       /* We check for type compatibility between @0 and @1 below,
5497	  so there's no need to check that @1/@3 are integral types.  */
5498       && INTEGRAL_TYPE_P (TREE_TYPE (@0))
5499       && INTEGRAL_TYPE_P (TREE_TYPE (@2))
5500       /* The precision of the type of each operand must match the
5501	  precision of the mode of each operand, similarly for the
5502	  result.  */
5503       && type_has_mode_precision_p (TREE_TYPE (@0))
5504       && type_has_mode_precision_p (TREE_TYPE (@1))
5505       && type_has_mode_precision_p (type)
5506       /* The inner conversion must be a widening conversion.  */
5507       && TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (TREE_TYPE (@0))
5508       && types_match (@0, @1)
5509       && (tree_int_cst_min_precision (@4, TYPE_SIGN (TREE_TYPE (@0)))
5510	   <= TYPE_PRECISION (TREE_TYPE (@0)))
5511       && (wi::to_wide (@4)
5512	   & wi::mask (TYPE_PRECISION (TREE_TYPE (@0)),
5513		       true, TYPE_PRECISION (type))) == 0)
5514   (if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
5515    (with { tree ntype = TREE_TYPE (@0); }
5516     (convert (bit_and (op @0 @1) (convert:ntype @4))))
5517    (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); }
5518     (convert (bit_and (op (convert:utype @0) (convert:utype @1))
5519	       (convert:utype @4))))))))
5520
5521/* Transform (@0 < @1 and @0 < @2) to use min,
5522   (@0 > @1 and @0 > @2) to use max */
5523(for logic (bit_and bit_and bit_and bit_and bit_ior bit_ior bit_ior bit_ior)
5524     op    (lt      le      gt      ge      lt      le      gt      ge     )
5525     ext   (min     min     max     max     max     max     min     min    )
5526 (simplify
5527  (logic (op:cs @0 @1) (op:cs @0 @2))
5528  (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
5529       && TREE_CODE (@0) != INTEGER_CST)
5530   (op @0 (ext @1 @2)))))
5531
5532(simplify
5533 /* signbit(x) -> 0 if x is nonnegative.  */
5534 (SIGNBIT tree_expr_nonnegative_p@0)
5535 { integer_zero_node; })
5536
5537(simplify
5538 /* signbit(x) -> x<0 if x doesn't have signed zeros.  */
5539 (SIGNBIT @0)
5540 (if (!HONOR_SIGNED_ZEROS (@0))
5541  (convert (lt @0 { build_real (TREE_TYPE (@0), dconst0); }))))
5542
5543/* Transform comparisons of the form X +- C1 CMP C2 to X CMP C2 -+ C1.  */
5544(for cmp (eq ne)
5545 (for op (plus minus)
5546      rop (minus plus)
5547  (simplify
5548   (cmp (op@3 @0 INTEGER_CST@1) INTEGER_CST@2)
5549   (if (!TREE_OVERFLOW (@1) && !TREE_OVERFLOW (@2)
5550	&& !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@0))
5551	&& !TYPE_OVERFLOW_TRAPS (TREE_TYPE (@0))
5552	&& !TYPE_SATURATING (TREE_TYPE (@0)))
5553    (with { tree res = int_const_binop (rop, @2, @1); }
5554     (if (TREE_OVERFLOW (res)
5555	  && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
5556      { constant_boolean_node (cmp == NE_EXPR, type); }
5557      (if (single_use (@3))
5558       (cmp @0 { TREE_OVERFLOW (res)
5559		 ? drop_tree_overflow (res) : res; }))))))))
5560(for cmp (lt le gt ge)
5561 (for op (plus minus)
5562      rop (minus plus)
5563  (simplify
5564   (cmp (op@3 @0 INTEGER_CST@1) INTEGER_CST@2)
5565   (if (!TREE_OVERFLOW (@1) && !TREE_OVERFLOW (@2)
5566	&& TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
5567    (with { tree res = int_const_binop (rop, @2, @1); }
5568     (if (TREE_OVERFLOW (res))
5569      {
5570	fold_overflow_warning (("assuming signed overflow does not occur "
5571				"when simplifying conditional to constant"),
5572			       WARN_STRICT_OVERFLOW_CONDITIONAL);
5573        bool less = cmp == LE_EXPR || cmp == LT_EXPR;
5574	/* wi::ges_p (@2, 0) should be sufficient for a signed type.  */
5575	bool ovf_high = wi::lt_p (wi::to_wide (@1), 0,
5576				  TYPE_SIGN (TREE_TYPE (@1)))
5577			!= (op == MINUS_EXPR);
5578	constant_boolean_node (less == ovf_high, type);
5579      }
5580      (if (single_use (@3))
5581       (with
5582	{
5583	  fold_overflow_warning (("assuming signed overflow does not occur "
5584				  "when changing X +- C1 cmp C2 to "
5585				  "X cmp C2 -+ C1"),
5586				 WARN_STRICT_OVERFLOW_COMPARISON);
5587	}
5588	(cmp @0 { res; })))))))))
5589
5590/* Canonicalizations of BIT_FIELD_REFs.  */
5591
5592(simplify
5593 (BIT_FIELD_REF (BIT_FIELD_REF @0 @1 @2) @3 @4)
5594 (BIT_FIELD_REF @0 @3 { const_binop (PLUS_EXPR, bitsizetype, @2, @4); }))
5595
5596(simplify
5597 (BIT_FIELD_REF (view_convert @0) @1 @2)
5598 (BIT_FIELD_REF @0 @1 @2))
5599
5600(simplify
5601 (BIT_FIELD_REF @0 @1 integer_zerop)
5602 (if (tree_int_cst_equal (@1, TYPE_SIZE (TREE_TYPE (@0))))
5603  (view_convert @0)))
5604
5605(simplify
5606 (BIT_FIELD_REF @0 @1 @2)
5607 (switch
5608  (if (TREE_CODE (TREE_TYPE (@0)) == COMPLEX_TYPE
5609       && tree_int_cst_equal (@1, TYPE_SIZE (TREE_TYPE (TREE_TYPE (@0)))))
5610   (switch
5611    (if (integer_zerop (@2))
5612     (view_convert (realpart @0)))
5613    (if (tree_int_cst_equal (@2, TYPE_SIZE (TREE_TYPE (TREE_TYPE (@0)))))
5614     (view_convert (imagpart @0)))))
5615  (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
5616       && INTEGRAL_TYPE_P (type)
5617       /* On GIMPLE this should only apply to register arguments.  */
5618       && (! GIMPLE || is_gimple_reg (@0))
5619       /* A bit-field-ref that referenced the full argument can be stripped.  */
5620       && ((compare_tree_int (@1, TYPE_PRECISION (TREE_TYPE (@0))) == 0
5621	    && integer_zerop (@2))
5622	   /* Low-parts can be reduced to integral conversions.
5623	      ???  The following doesn't work for PDP endian.  */
5624	   || (BYTES_BIG_ENDIAN == WORDS_BIG_ENDIAN
5625	       /* Don't even think about BITS_BIG_ENDIAN.  */
5626	       && TYPE_PRECISION (TREE_TYPE (@0)) % BITS_PER_UNIT == 0
5627	       && TYPE_PRECISION (type) % BITS_PER_UNIT == 0
5628	       && compare_tree_int (@2, (BYTES_BIG_ENDIAN
5629					 ? (TYPE_PRECISION (TREE_TYPE (@0))
5630					    - TYPE_PRECISION (type))
5631					 : 0)) == 0)))
5632   (convert @0))))
5633
5634/* Simplify vector extracts.  */
5635
5636(simplify
5637 (BIT_FIELD_REF CONSTRUCTOR@0 @1 @2)
5638 (if (VECTOR_TYPE_P (TREE_TYPE (@0))
5639      && (types_match (type, TREE_TYPE (TREE_TYPE (@0)))
5640          || (VECTOR_TYPE_P (type)
5641	      && types_match (TREE_TYPE (type), TREE_TYPE (TREE_TYPE (@0))))))
5642  (with
5643   {
5644     tree ctor = (TREE_CODE (@0) == SSA_NAME
5645		  ? gimple_assign_rhs1 (SSA_NAME_DEF_STMT (@0)) : @0);
5646     tree eltype = TREE_TYPE (TREE_TYPE (ctor));
5647     unsigned HOST_WIDE_INT width = tree_to_uhwi (TYPE_SIZE (eltype));
5648     unsigned HOST_WIDE_INT n = tree_to_uhwi (@1);
5649     unsigned HOST_WIDE_INT idx = tree_to_uhwi (@2);
5650   }
5651   (if (n != 0
5652	&& (idx % width) == 0
5653	&& (n % width) == 0
5654	&& known_le ((idx + n) / width,
5655		     TYPE_VECTOR_SUBPARTS (TREE_TYPE (ctor))))
5656    (with
5657     {
5658       idx = idx / width;
5659       n = n / width;
5660       /* Constructor elements can be subvectors.  */
5661       poly_uint64 k = 1;
5662       if (CONSTRUCTOR_NELTS (ctor) != 0)
5663         {
5664           tree cons_elem = TREE_TYPE (CONSTRUCTOR_ELT (ctor, 0)->value);
5665	   if (TREE_CODE (cons_elem) == VECTOR_TYPE)
5666	     k = TYPE_VECTOR_SUBPARTS (cons_elem);
5667	 }
5668       unsigned HOST_WIDE_INT elt, count, const_k;
5669     }
5670     (switch
5671      /* We keep an exact subset of the constructor elements.  */
5672      (if (multiple_p (idx, k, &elt) && multiple_p (n, k, &count))
5673       (if (CONSTRUCTOR_NELTS (ctor) == 0)
5674        { build_constructor (type, NULL); }
5675	(if (count == 1)
5676	 (if (elt < CONSTRUCTOR_NELTS (ctor))
5677	  (view_convert { CONSTRUCTOR_ELT (ctor, elt)->value; })
5678	  { build_zero_cst (type); })
5679	 /* We don't want to emit new CTORs unless the old one goes away.
5680	    ???  Eventually allow this if the CTOR ends up constant or
5681	    uniform.  */
5682	 (if (single_use (@0))
5683	  {
5684	    vec<constructor_elt, va_gc> *vals;
5685	    vec_alloc (vals, count);
5686	    for (unsigned i = 0;
5687		 i < count && elt + i < CONSTRUCTOR_NELTS (ctor); ++i)
5688	      CONSTRUCTOR_APPEND_ELT (vals, NULL_TREE,
5689				      CONSTRUCTOR_ELT (ctor, elt + i)->value);
5690	    build_constructor (type, vals);
5691	  }))))
5692      /* The bitfield references a single constructor element.  */
5693      (if (k.is_constant (&const_k)
5694	   && idx + n <= (idx / const_k + 1) * const_k)
5695       (switch
5696	(if (CONSTRUCTOR_NELTS (ctor) <= idx / const_k)
5697	 { build_zero_cst (type); })
5698	(if (n == const_k)
5699	 (view_convert { CONSTRUCTOR_ELT (ctor, idx / const_k)->value; }))
5700	(BIT_FIELD_REF { CONSTRUCTOR_ELT (ctor, idx / const_k)->value; }
5701		       @1 { bitsize_int ((idx % const_k) * width); })))))))))
5702
5703/* Simplify a bit extraction from a bit insertion for the cases with
5704   the inserted element fully covering the extraction or the insertion
5705   not touching the extraction.  */
5706(simplify
5707 (BIT_FIELD_REF (bit_insert @0 @1 @ipos) @rsize @rpos)
5708 (with
5709  {
5710    unsigned HOST_WIDE_INT isize;
5711    if (INTEGRAL_TYPE_P (TREE_TYPE (@1)))
5712      isize = TYPE_PRECISION (TREE_TYPE (@1));
5713    else
5714      isize = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (@1)));
5715  }
5716  (switch
5717   (if (wi::leu_p (wi::to_wide (@ipos), wi::to_wide (@rpos))
5718	&& wi::leu_p (wi::to_wide (@rpos) + wi::to_wide (@rsize),
5719		      wi::to_wide (@ipos) + isize))
5720    (BIT_FIELD_REF @1 @rsize { wide_int_to_tree (bitsizetype,
5721                                                 wi::to_wide (@rpos)
5722						 - wi::to_wide (@ipos)); }))
5723   (if (wi::geu_p (wi::to_wide (@ipos),
5724		   wi::to_wide (@rpos) + wi::to_wide (@rsize))
5725	|| wi::geu_p (wi::to_wide (@rpos),
5726		      wi::to_wide (@ipos) + isize))
5727    (BIT_FIELD_REF @0 @rsize @rpos)))))
5728
5729(if (canonicalize_math_after_vectorization_p ())
5730 (for fmas (FMA)
5731  (simplify
5732   (fmas:c (negate @0) @1 @2)
5733   (IFN_FNMA @0 @1 @2))
5734  (simplify
5735   (fmas @0 @1 (negate @2))
5736   (IFN_FMS @0 @1 @2))
5737  (simplify
5738   (fmas:c (negate @0) @1 (negate @2))
5739   (IFN_FNMS @0 @1 @2))
5740  (simplify
5741   (negate (fmas@3 @0 @1 @2))
5742   (if (single_use (@3))
5743    (IFN_FNMS @0 @1 @2))))
5744
5745 (simplify
5746  (IFN_FMS:c (negate @0) @1 @2)
5747  (IFN_FNMS @0 @1 @2))
5748 (simplify
5749  (IFN_FMS @0 @1 (negate @2))
5750  (IFN_FMA @0 @1 @2))
5751 (simplify
5752  (IFN_FMS:c (negate @0) @1 (negate @2))
5753  (IFN_FNMA @0 @1 @2))
5754 (simplify
5755  (negate (IFN_FMS@3 @0 @1 @2))
5756   (if (single_use (@3))
5757    (IFN_FNMA @0 @1 @2)))
5758
5759 (simplify
5760  (IFN_FNMA:c (negate @0) @1 @2)
5761  (IFN_FMA @0 @1 @2))
5762 (simplify
5763  (IFN_FNMA @0 @1 (negate @2))
5764  (IFN_FNMS @0 @1 @2))
5765 (simplify
5766  (IFN_FNMA:c (negate @0) @1 (negate @2))
5767  (IFN_FMS @0 @1 @2))
5768 (simplify
5769  (negate (IFN_FNMA@3 @0 @1 @2))
5770  (if (single_use (@3))
5771   (IFN_FMS @0 @1 @2)))
5772
5773 (simplify
5774  (IFN_FNMS:c (negate @0) @1 @2)
5775  (IFN_FMS @0 @1 @2))
5776 (simplify
5777  (IFN_FNMS @0 @1 (negate @2))
5778  (IFN_FNMA @0 @1 @2))
5779 (simplify
5780  (IFN_FNMS:c (negate @0) @1 (negate @2))
5781  (IFN_FMA @0 @1 @2))
5782 (simplify
5783  (negate (IFN_FNMS@3 @0 @1 @2))
5784  (if (single_use (@3))
5785   (IFN_FMA @0 @1 @2))))
5786
5787/* POPCOUNT simplifications.  */
5788(for popcount (BUILT_IN_POPCOUNT BUILT_IN_POPCOUNTL BUILT_IN_POPCOUNTLL
5789	       BUILT_IN_POPCOUNTIMAX)
5790  /* popcount(X&1) is nop_expr(X&1).  */
5791  (simplify
5792    (popcount @0)
5793    (if (tree_nonzero_bits (@0) == 1)
5794      (convert @0)))
5795  /* popcount(X) + popcount(Y) is popcount(X|Y) when X&Y must be zero.  */
5796  (simplify
5797    (plus (popcount:s @0) (popcount:s @1))
5798    (if (wi::bit_and (tree_nonzero_bits (@0), tree_nonzero_bits (@1)) == 0)
5799      (popcount (bit_ior @0 @1))))
5800  /* popcount(X) == 0 is X == 0, and related (in)equalities.  */
5801  (for cmp (le eq ne gt)
5802       rep (eq eq ne ne)
5803    (simplify
5804      (cmp (popcount @0) integer_zerop)
5805      (rep @0 { build_zero_cst (TREE_TYPE (@0)); }))))
5806
5807#if GIMPLE
5808/* 64- and 32-bits branchless implementations of popcount are detected:
5809
5810   int popcount64c (uint64_t x)
5811   {
5812     x -= (x >> 1) & 0x5555555555555555ULL;
5813     x = (x & 0x3333333333333333ULL) + ((x >> 2) & 0x3333333333333333ULL);
5814     x = (x + (x >> 4)) & 0x0f0f0f0f0f0f0f0fULL;
5815     return (x * 0x0101010101010101ULL) >> 56;
5816   }
5817
5818   int popcount32c (uint32_t x)
5819   {
5820     x -= (x >> 1) & 0x55555555;
5821     x = (x & 0x33333333) + ((x >> 2) & 0x33333333);
5822     x = (x + (x >> 4)) & 0x0f0f0f0f;
5823     return (x * 0x01010101) >> 24;
5824   }  */
5825(simplify
5826 (rshift
5827  (mult
5828   (bit_and
5829    (plus:c
5830     (rshift @8 INTEGER_CST@5)
5831      (plus:c@8
5832       (bit_and @6 INTEGER_CST@7)
5833	(bit_and
5834	 (rshift
5835	  (minus@6 @0
5836	   (bit_and (rshift @0 INTEGER_CST@4) INTEGER_CST@11))
5837	  INTEGER_CST@10)
5838	 INTEGER_CST@9)))
5839    INTEGER_CST@3)
5840   INTEGER_CST@2)
5841  INTEGER_CST@1)
5842  /* Check constants and optab.  */
5843  (with { unsigned prec = TYPE_PRECISION (type);
5844	  int shift = (64 - prec) & 63;
5845	  unsigned HOST_WIDE_INT c1
5846	    = HOST_WIDE_INT_UC (0x0101010101010101) >> shift;
5847	  unsigned HOST_WIDE_INT c2
5848	    = HOST_WIDE_INT_UC (0x0F0F0F0F0F0F0F0F) >> shift;
5849	  unsigned HOST_WIDE_INT c3
5850	    = HOST_WIDE_INT_UC (0x3333333333333333) >> shift;
5851	  unsigned HOST_WIDE_INT c4
5852	    = HOST_WIDE_INT_UC (0x5555555555555555) >> shift;
5853   }
5854   (if (prec >= 16
5855	&& prec <= 64
5856	&& pow2p_hwi (prec)
5857	&& TYPE_UNSIGNED (type)
5858	&& integer_onep (@4)
5859	&& wi::to_widest (@10) == 2
5860	&& wi::to_widest (@5) == 4
5861	&& wi::to_widest (@1) == prec - 8
5862	&& tree_to_uhwi (@2) == c1
5863	&& tree_to_uhwi (@3) == c2
5864	&& tree_to_uhwi (@9) == c3
5865	&& tree_to_uhwi (@7) == c3
5866	&& tree_to_uhwi (@11) == c4
5867	&& direct_internal_fn_supported_p (IFN_POPCOUNT, type,
5868					   OPTIMIZE_FOR_BOTH))
5869    (convert (IFN_POPCOUNT:type @0)))))
5870#endif
5871
5872/* Simplify:
5873
5874     a = a1 op a2
5875     r = c ? a : b;
5876
5877   to:
5878
5879     r = c ? a1 op a2 : b;
5880
5881   if the target can do it in one go.  This makes the operation conditional
5882   on c, so could drop potentially-trapping arithmetic, but that's a valid
5883   simplification if the result of the operation isn't needed.
5884
5885   Avoid speculatively generating a stand-alone vector comparison
5886   on targets that might not support them.  Any target implementing
5887   conditional internal functions must support the same comparisons
5888   inside and outside a VEC_COND_EXPR.  */
5889
5890#if GIMPLE
5891(for uncond_op (UNCOND_BINARY)
5892     cond_op (COND_BINARY)
5893 (simplify
5894  (vec_cond @0 (view_convert? (uncond_op@4 @1 @2)) @3)
5895  (with { tree op_type = TREE_TYPE (@4); }
5896   (if (vectorized_internal_fn_supported_p (as_internal_fn (cond_op), op_type)
5897	&& element_precision (type) == element_precision (op_type))
5898    (view_convert (cond_op @0 @1 @2 (view_convert:op_type @3))))))
5899 (simplify
5900  (vec_cond @0 @1 (view_convert? (uncond_op@4 @2 @3)))
5901  (with { tree op_type = TREE_TYPE (@4); }
5902   (if (vectorized_internal_fn_supported_p (as_internal_fn (cond_op), op_type)
5903	&& element_precision (type) == element_precision (op_type))
5904    (view_convert (cond_op (bit_not @0) @2 @3 (view_convert:op_type @1)))))))
5905
5906/* Same for ternary operations.  */
5907(for uncond_op (UNCOND_TERNARY)
5908     cond_op (COND_TERNARY)
5909 (simplify
5910  (vec_cond @0 (view_convert? (uncond_op@5 @1 @2 @3)) @4)
5911  (with { tree op_type = TREE_TYPE (@5); }
5912   (if (vectorized_internal_fn_supported_p (as_internal_fn (cond_op), op_type)
5913	&& element_precision (type) == element_precision (op_type))
5914    (view_convert (cond_op @0 @1 @2 @3 (view_convert:op_type @4))))))
5915 (simplify
5916  (vec_cond @0 @1 (view_convert? (uncond_op@5 @2 @3 @4)))
5917  (with { tree op_type = TREE_TYPE (@5); }
5918   (if (vectorized_internal_fn_supported_p (as_internal_fn (cond_op), op_type)
5919	&& element_precision (type) == element_precision (op_type))
5920    (view_convert (cond_op (bit_not @0) @2 @3 @4
5921		  (view_convert:op_type @1)))))))
5922#endif
5923
5924/* Detect cases in which a VEC_COND_EXPR effectively replaces the
5925   "else" value of an IFN_COND_*.  */
5926(for cond_op (COND_BINARY)
5927 (simplify
5928  (vec_cond @0 (view_convert? (cond_op @0 @1 @2 @3)) @4)
5929  (with { tree op_type = TREE_TYPE (@3); }
5930   (if (element_precision (type) == element_precision (op_type))
5931    (view_convert (cond_op @0 @1 @2 (view_convert:op_type @4))))))
5932 (simplify
5933  (vec_cond @0 @1 (view_convert? (cond_op @2 @3 @4 @5)))
5934  (with { tree op_type = TREE_TYPE (@5); }
5935   (if (inverse_conditions_p (@0, @2)
5936        && element_precision (type) == element_precision (op_type))
5937    (view_convert (cond_op @2 @3 @4 (view_convert:op_type @1)))))))
5938
5939/* Same for ternary operations.  */
5940(for cond_op (COND_TERNARY)
5941 (simplify
5942  (vec_cond @0 (view_convert? (cond_op @0 @1 @2 @3 @4)) @5)
5943  (with { tree op_type = TREE_TYPE (@4); }
5944   (if (element_precision (type) == element_precision (op_type))
5945    (view_convert (cond_op @0 @1 @2 @3 (view_convert:op_type @5))))))
5946 (simplify
5947  (vec_cond @0 @1 (view_convert? (cond_op @2 @3 @4 @5 @6)))
5948  (with { tree op_type = TREE_TYPE (@6); }
5949   (if (inverse_conditions_p (@0, @2)
5950        && element_precision (type) == element_precision (op_type))
5951    (view_convert (cond_op @2 @3 @4 @5 (view_convert:op_type @1)))))))
5952
5953/* For pointers @0 and @2 and nonnegative constant offset @1, look for
5954   expressions like:
5955
5956   A: (@0 + @1 < @2) | (@2 + @1 < @0)
5957   B: (@0 + @1 <= @2) | (@2 + @1 <= @0)
5958
5959   If pointers are known not to wrap, B checks whether @1 bytes starting
5960   at @0 and @2 do not overlap, while A tests the same thing for @1 + 1
5961   bytes.  A is more efficiently tested as:
5962
5963   A: (sizetype) (@0 + @1 - @2) > @1 * 2
5964
5965   The equivalent expression for B is given by replacing @1 with @1 - 1:
5966
5967   B: (sizetype) (@0 + (@1 - 1) - @2) > (@1 - 1) * 2
5968
5969   @0 and @2 can be swapped in both expressions without changing the result.
5970
5971   The folds rely on sizetype's being unsigned (which is always true)
5972   and on its being the same width as the pointer (which we have to check).
5973
5974   The fold replaces two pointer_plus expressions, two comparisons and
5975   an IOR with a pointer_plus, a pointer_diff, and a comparison, so in
5976   the best case it's a saving of two operations.  The A fold retains one
5977   of the original pointer_pluses, so is a win even if both pointer_pluses
5978   are used elsewhere.  The B fold is a wash if both pointer_pluses are
5979   used elsewhere, since all we end up doing is replacing a comparison with
5980   a pointer_plus.  We do still apply the fold under those circumstances
5981   though, in case applying it to other conditions eventually makes one of the
5982   pointer_pluses dead.  */
5983(for ior (truth_orif truth_or bit_ior)
5984 (for cmp (le lt)
5985  (simplify
5986   (ior (cmp:cs (pointer_plus@3 @0 INTEGER_CST@1) @2)
5987	(cmp:cs (pointer_plus@4 @2 @1) @0))
5988   (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
5989	&& TYPE_OVERFLOW_WRAPS (sizetype)
5990	&& TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (sizetype))
5991    /* Calculate the rhs constant.  */
5992    (with { offset_int off = wi::to_offset (@1) - (cmp == LE_EXPR ? 1 : 0);
5993	    offset_int rhs = off * 2; }
5994     /* Always fails for negative values.  */
5995     (if (wi::min_precision (rhs, UNSIGNED) <= TYPE_PRECISION (sizetype))
5996      /* Since the order of @0 and @2 doesn't matter, let tree_swap_operands_p
5997	 pick a canonical order.  This increases the chances of using the
5998	 same pointer_plus in multiple checks.  */
5999      (with { bool swap_p = tree_swap_operands_p (@0, @2);
6000	      tree rhs_tree = wide_int_to_tree (sizetype, rhs); }
6001       (if (cmp == LT_EXPR)
6002	(gt (convert:sizetype
6003	     (pointer_diff:ssizetype { swap_p ? @4 : @3; }
6004				     { swap_p ? @0 : @2; }))
6005	    { rhs_tree; })
6006	(gt (convert:sizetype
6007	     (pointer_diff:ssizetype
6008	      (pointer_plus { swap_p ? @2 : @0; }
6009			    { wide_int_to_tree (sizetype, off); })
6010	      { swap_p ? @0 : @2; }))
6011	    { rhs_tree; })))))))))
6012
6013/* Fold REDUC (@0 & @1) -> @0[I] & @1[I] if element I is the only nonzero
6014   element of @1.  */
6015(for reduc (IFN_REDUC_PLUS IFN_REDUC_IOR IFN_REDUC_XOR)
6016 (simplify (reduc (view_convert? (bit_and @0 VECTOR_CST@1)))
6017  (with { int i = single_nonzero_element (@1); }
6018   (if (i >= 0)
6019    (with { tree elt = vector_cst_elt (@1, i);
6020	    tree elt_type = TREE_TYPE (elt);
6021	    unsigned int elt_bits = tree_to_uhwi (TYPE_SIZE (elt_type));
6022	    tree size = bitsize_int (elt_bits);
6023	    tree pos = bitsize_int (elt_bits * i); }
6024     (view_convert
6025      (bit_and:elt_type
6026       (BIT_FIELD_REF:elt_type @0 { size; } { pos; })
6027       { elt; })))))))
6028
6029(simplify
6030 (vec_perm @0 @1 VECTOR_CST@2)
6031 (with
6032  {
6033    tree op0 = @0, op1 = @1, op2 = @2;
6034
6035    /* Build a vector of integers from the tree mask.  */
6036    vec_perm_builder builder;
6037    if (!tree_to_vec_perm_builder (&builder, op2))
6038      return NULL_TREE;
6039
6040    /* Create a vec_perm_indices for the integer vector.  */
6041    poly_uint64 nelts = TYPE_VECTOR_SUBPARTS (type);
6042    bool single_arg = (op0 == op1);
6043    vec_perm_indices sel (builder, single_arg ? 1 : 2, nelts);
6044  }
6045  (if (sel.series_p (0, 1, 0, 1))
6046   { op0; }
6047   (if (sel.series_p (0, 1, nelts, 1))
6048    { op1; }
6049    (with
6050     {
6051       if (!single_arg)
6052         {
6053	   if (sel.all_from_input_p (0))
6054	     op1 = op0;
6055	   else if (sel.all_from_input_p (1))
6056	     {
6057	       op0 = op1;
6058	       sel.rotate_inputs (1);
6059	     }
6060	   else if (known_ge (poly_uint64 (sel[0]), nelts))
6061	     {
6062	       std::swap (op0, op1);
6063	       sel.rotate_inputs (1);
6064	     }
6065         }
6066       gassign *def;
6067       tree cop0 = op0, cop1 = op1;
6068       if (TREE_CODE (op0) == SSA_NAME
6069           && (def = dyn_cast <gassign *> (SSA_NAME_DEF_STMT (op0)))
6070	   && gimple_assign_rhs_code (def) == CONSTRUCTOR)
6071	 cop0 = gimple_assign_rhs1 (def);
6072       if (TREE_CODE (op1) == SSA_NAME
6073           && (def = dyn_cast <gassign *> (SSA_NAME_DEF_STMT (op1)))
6074	   && gimple_assign_rhs_code (def) == CONSTRUCTOR)
6075	 cop1 = gimple_assign_rhs1 (def);
6076
6077       tree t;
6078    }
6079    (if ((TREE_CODE (cop0) == VECTOR_CST
6080	  || TREE_CODE (cop0) == CONSTRUCTOR)
6081	 && (TREE_CODE (cop1) == VECTOR_CST
6082	     || TREE_CODE (cop1) == CONSTRUCTOR)
6083	 && (t = fold_vec_perm (type, cop0, cop1, sel)))
6084     { t; }
6085     (with
6086      {
6087	bool changed = (op0 == op1 && !single_arg);
6088	tree ins = NULL_TREE;
6089	unsigned at = 0;
6090
6091	/* See if the permutation is performing a single element
6092	   insert from a CONSTRUCTOR or constant and use a BIT_INSERT_EXPR
6093	   in that case.  But only if the vector mode is supported,
6094	   otherwise this is invalid GIMPLE.  */
6095        if (TYPE_MODE (type) != BLKmode
6096	    && (TREE_CODE (cop0) == VECTOR_CST
6097		|| TREE_CODE (cop0) == CONSTRUCTOR
6098		|| TREE_CODE (cop1) == VECTOR_CST
6099		|| TREE_CODE (cop1) == CONSTRUCTOR))
6100          {
6101	    bool insert_first_p = sel.series_p (1, 1, nelts + 1, 1);
6102	    if (insert_first_p)
6103	      {
6104	        /* After canonicalizing the first elt to come from the
6105		   first vector we only can insert the first elt from
6106		   the first vector.  */
6107	        at = 0;
6108		if ((ins = fold_read_from_vector (cop0, sel[0])))
6109		  op0 = op1;
6110	      }
6111	    /* The above can fail for two-element vectors which always
6112	       appear to insert the first element, so try inserting
6113	       into the second lane as well.  For more than two
6114	       elements that's wasted time.  */
6115	    if (!insert_first_p || (!ins && maybe_eq (nelts, 2u)))
6116	      {
6117	        unsigned int encoded_nelts = sel.encoding ().encoded_nelts ();
6118		for (at = 0; at < encoded_nelts; ++at)
6119		  if (maybe_ne (sel[at], at))
6120		    break;
6121		if (at < encoded_nelts
6122		    && (known_eq (at + 1, nelts)
6123			|| sel.series_p (at + 1, 1, at + 1, 1)))
6124		  {
6125		    if (known_lt (poly_uint64 (sel[at]), nelts))
6126		      ins = fold_read_from_vector (cop0, sel[at]);
6127		    else
6128		      ins = fold_read_from_vector (cop1, sel[at] - nelts);
6129		  }
6130	      }
6131	  }
6132
6133	/* Generate a canonical form of the selector.  */
6134	if (!ins && sel.encoding () != builder)
6135	  {
6136	    /* Some targets are deficient and fail to expand a single
6137	       argument permutation while still allowing an equivalent
6138	       2-argument version.  */
6139	    tree oldop2 = op2;
6140	    if (sel.ninputs () == 2
6141	       || can_vec_perm_const_p (TYPE_MODE (type), sel, false))
6142	      op2 = vec_perm_indices_to_tree (TREE_TYPE (op2), sel);
6143	    else
6144	      {
6145	        vec_perm_indices sel2 (builder, 2, nelts);
6146	        if (can_vec_perm_const_p (TYPE_MODE (type), sel2, false))
6147	          op2 = vec_perm_indices_to_tree (TREE_TYPE (op2), sel2);
6148	        else
6149	          /* Not directly supported with either encoding,
6150		     so use the preferred form.  */
6151		  op2 = vec_perm_indices_to_tree (TREE_TYPE (op2), sel);
6152	      }
6153	    if (!operand_equal_p (op2, oldop2, 0))
6154	      changed = true;
6155	  }
6156      }
6157      (if (ins)
6158       (bit_insert { op0; } { ins; }
6159         { bitsize_int (at * tree_to_uhwi (TYPE_SIZE (TREE_TYPE (type)))); })
6160       (if (changed)
6161        (vec_perm { op0; } { op1; } { op2; }))))))))))
6162
6163/* VEC_PERM_EXPR (v, v, mask) -> v where v contains same element.  */
6164
6165(match vec_same_elem_p
6166 @0
6167 (if (uniform_vector_p (@0))))
6168
6169(match vec_same_elem_p
6170 (vec_duplicate @0))
6171
6172(simplify
6173 (vec_perm vec_same_elem_p@0 @0 @1)
6174 @0)
6175
6176/* Match count trailing zeroes for simplify_count_trailing_zeroes in fwprop.
6177   The canonical form is array[((x & -x) * C) >> SHIFT] where C is a magic
6178   constant which when multiplied by a power of 2 contains a unique value
6179   in the top 5 or 6 bits.  This is then indexed into a table which maps it
6180   to the number of trailing zeroes.  */
6181(match (ctz_table_index @1 @2 @3)
6182  (rshift (mult (bit_and:c (negate @1) @1) INTEGER_CST@2) INTEGER_CST@3))
6183