1/* Match-and-simplify patterns for shared GENERIC and GIMPLE folding. 2 This file is consumed by genmatch which produces gimple-match.c 3 and generic-match.c from it. 4 5 Copyright (C) 2014-2020 Free Software Foundation, Inc. 6 Contributed by Richard Biener <rguenther@suse.de> 7 and Prathamesh Kulkarni <bilbotheelffriend@gmail.com> 8 9This file is part of GCC. 10 11GCC is free software; you can redistribute it and/or modify it under 12the terms of the GNU General Public License as published by the Free 13Software Foundation; either version 3, or (at your option) any later 14version. 15 16GCC is distributed in the hope that it will be useful, but WITHOUT ANY 17WARRANTY; without even the implied warranty of MERCHANTABILITY or 18FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 19for more details. 20 21You should have received a copy of the GNU General Public License 22along with GCC; see the file COPYING3. If not see 23<http://www.gnu.org/licenses/>. */ 24 25 26/* Generic tree predicates we inherit. */ 27(define_predicates 28 integer_onep integer_zerop integer_all_onesp integer_minus_onep 29 integer_each_onep integer_truep integer_nonzerop 30 real_zerop real_onep real_minus_onep 31 zerop 32 initializer_each_zero_or_onep 33 CONSTANT_CLASS_P 34 tree_expr_nonnegative_p 35 tree_expr_nonzero_p 36 integer_valued_real_p 37 integer_pow2p 38 uniform_integer_cst_p 39 HONOR_NANS 40 uniform_vector_p) 41 42/* Operator lists. */ 43(define_operator_list tcc_comparison 44 lt le eq ne ge gt unordered ordered unlt unle ungt unge uneq ltgt) 45(define_operator_list inverted_tcc_comparison 46 ge gt ne eq lt le ordered unordered ge gt le lt ltgt uneq) 47(define_operator_list inverted_tcc_comparison_with_nans 48 unge ungt ne eq unlt unle ordered unordered ge gt le lt ltgt uneq) 49(define_operator_list swapped_tcc_comparison 50 gt ge eq ne le lt unordered ordered ungt unge unlt unle uneq ltgt) 51(define_operator_list simple_comparison lt le eq ne ge gt) 52(define_operator_list swapped_simple_comparison gt ge eq ne le lt) 53 54#include "cfn-operators.pd" 55 56/* Define operand lists for math rounding functions {,i,l,ll}FN, 57 where the versions prefixed with "i" return an int, those prefixed with 58 "l" return a long and those prefixed with "ll" return a long long. 59 60 Also define operand lists: 61 62 X<FN>F for all float functions, in the order i, l, ll 63 X<FN> for all double functions, in the same order 64 X<FN>L for all long double functions, in the same order. */ 65#define DEFINE_INT_AND_FLOAT_ROUND_FN(FN) \ 66 (define_operator_list X##FN##F BUILT_IN_I##FN##F \ 67 BUILT_IN_L##FN##F \ 68 BUILT_IN_LL##FN##F) \ 69 (define_operator_list X##FN BUILT_IN_I##FN \ 70 BUILT_IN_L##FN \ 71 BUILT_IN_LL##FN) \ 72 (define_operator_list X##FN##L BUILT_IN_I##FN##L \ 73 BUILT_IN_L##FN##L \ 74 BUILT_IN_LL##FN##L) 75 76DEFINE_INT_AND_FLOAT_ROUND_FN (FLOOR) 77DEFINE_INT_AND_FLOAT_ROUND_FN (CEIL) 78DEFINE_INT_AND_FLOAT_ROUND_FN (ROUND) 79DEFINE_INT_AND_FLOAT_ROUND_FN (RINT) 80 81/* Binary operations and their associated IFN_COND_* function. */ 82(define_operator_list UNCOND_BINARY 83 plus minus 84 mult trunc_div trunc_mod rdiv 85 min max 86 bit_and bit_ior bit_xor 87 lshift rshift) 88(define_operator_list COND_BINARY 89 IFN_COND_ADD IFN_COND_SUB 90 IFN_COND_MUL IFN_COND_DIV IFN_COND_MOD IFN_COND_RDIV 91 IFN_COND_MIN IFN_COND_MAX 92 IFN_COND_AND IFN_COND_IOR IFN_COND_XOR 93 IFN_COND_SHL IFN_COND_SHR) 94 95/* Same for ternary operations. */ 96(define_operator_list UNCOND_TERNARY 97 IFN_FMA IFN_FMS IFN_FNMA IFN_FNMS) 98(define_operator_list COND_TERNARY 99 IFN_COND_FMA IFN_COND_FMS IFN_COND_FNMA IFN_COND_FNMS) 100 101/* With nop_convert? combine convert? and view_convert? in one pattern 102 plus conditionalize on tree_nop_conversion_p conversions. */ 103(match (nop_convert @0) 104 (convert @0) 105 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))))) 106(match (nop_convert @0) 107 (view_convert @0) 108 (if (VECTOR_TYPE_P (type) && VECTOR_TYPE_P (TREE_TYPE (@0)) 109 && known_eq (TYPE_VECTOR_SUBPARTS (type), 110 TYPE_VECTOR_SUBPARTS (TREE_TYPE (@0))) 111 && tree_nop_conversion_p (TREE_TYPE (type), TREE_TYPE (TREE_TYPE (@0)))))) 112 113/* Transform likes of (char) ABS_EXPR <(int) x> into (char) ABSU_EXPR <x> 114 ABSU_EXPR returns unsigned absolute value of the operand and the operand 115 of the ABSU_EXPR will have the corresponding signed type. */ 116(simplify (abs (convert @0)) 117 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) 118 && !TYPE_UNSIGNED (TREE_TYPE (@0)) 119 && element_precision (type) > element_precision (TREE_TYPE (@0))) 120 (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); } 121 (convert (absu:utype @0))))) 122 123 124/* Simplifications of operations with one constant operand and 125 simplifications to constants or single values. */ 126 127(for op (plus pointer_plus minus bit_ior bit_xor) 128 (simplify 129 (op @0 integer_zerop) 130 (non_lvalue @0))) 131 132/* 0 +p index -> (type)index */ 133(simplify 134 (pointer_plus integer_zerop @1) 135 (non_lvalue (convert @1))) 136 137/* ptr - 0 -> (type)ptr */ 138(simplify 139 (pointer_diff @0 integer_zerop) 140 (convert @0)) 141 142/* See if ARG1 is zero and X + ARG1 reduces to X. 143 Likewise if the operands are reversed. */ 144(simplify 145 (plus:c @0 real_zerop@1) 146 (if (fold_real_zero_addition_p (type, @1, 0)) 147 (non_lvalue @0))) 148 149/* See if ARG1 is zero and X - ARG1 reduces to X. */ 150(simplify 151 (minus @0 real_zerop@1) 152 (if (fold_real_zero_addition_p (type, @1, 1)) 153 (non_lvalue @0))) 154 155/* Even if the fold_real_zero_addition_p can't simplify X + 0.0 156 into X, we can optimize (X + 0.0) + 0.0 or (X + 0.0) - 0.0 157 or (X - 0.0) + 0.0 into X + 0.0 and (X - 0.0) - 0.0 into X - 0.0 158 if not -frounding-math. For sNaNs the first operation would raise 159 exceptions but turn the result into qNan, so the second operation 160 would not raise it. */ 161(for inner_op (plus minus) 162 (for outer_op (plus minus) 163 (simplify 164 (outer_op (inner_op@3 @0 REAL_CST@1) REAL_CST@2) 165 (if (real_zerop (@1) 166 && real_zerop (@2) 167 && !HONOR_SIGN_DEPENDENT_ROUNDING (type)) 168 (with { bool inner_plus = ((inner_op == PLUS_EXPR) 169 ^ REAL_VALUE_MINUS_ZERO (TREE_REAL_CST (@1))); 170 bool outer_plus 171 = ((outer_op == PLUS_EXPR) 172 ^ REAL_VALUE_MINUS_ZERO (TREE_REAL_CST (@2))); } 173 (if (outer_plus && !inner_plus) 174 (outer_op @0 @2) 175 @3)))))) 176 177/* Simplify x - x. 178 This is unsafe for certain floats even in non-IEEE formats. 179 In IEEE, it is unsafe because it does wrong for NaNs. 180 Also note that operand_equal_p is always false if an operand 181 is volatile. */ 182(simplify 183 (minus @0 @0) 184 (if (!FLOAT_TYPE_P (type) || !HONOR_NANS (type)) 185 { build_zero_cst (type); })) 186(simplify 187 (pointer_diff @@0 @0) 188 { build_zero_cst (type); }) 189 190(simplify 191 (mult @0 integer_zerop@1) 192 @1) 193 194/* Maybe fold x * 0 to 0. The expressions aren't the same 195 when x is NaN, since x * 0 is also NaN. Nor are they the 196 same in modes with signed zeros, since multiplying a 197 negative value by 0 gives -0, not +0. */ 198(simplify 199 (mult @0 real_zerop@1) 200 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type)) 201 @1)) 202 203/* In IEEE floating point, x*1 is not equivalent to x for snans. 204 Likewise for complex arithmetic with signed zeros. */ 205(simplify 206 (mult @0 real_onep) 207 (if (!HONOR_SNANS (type) 208 && (!HONOR_SIGNED_ZEROS (type) 209 || !COMPLEX_FLOAT_TYPE_P (type))) 210 (non_lvalue @0))) 211 212/* Transform x * -1.0 into -x. */ 213(simplify 214 (mult @0 real_minus_onep) 215 (if (!HONOR_SNANS (type) 216 && (!HONOR_SIGNED_ZEROS (type) 217 || !COMPLEX_FLOAT_TYPE_P (type))) 218 (negate @0))) 219 220/* Transform { 0 or 1 } * { 0 or 1 } into { 0 or 1 } & { 0 or 1 } */ 221(simplify 222 (mult SSA_NAME@1 SSA_NAME@2) 223 (if (INTEGRAL_TYPE_P (type) 224 && get_nonzero_bits (@1) == 1 225 && get_nonzero_bits (@2) == 1) 226 (bit_and @1 @2))) 227 228/* Transform x * { 0 or 1, 0 or 1, ... } into x & { 0 or -1, 0 or -1, ...}, 229 unless the target has native support for the former but not the latter. */ 230(simplify 231 (mult @0 VECTOR_CST@1) 232 (if (initializer_each_zero_or_onep (@1) 233 && !HONOR_SNANS (type) 234 && !HONOR_SIGNED_ZEROS (type)) 235 (with { tree itype = FLOAT_TYPE_P (type) ? unsigned_type_for (type) : type; } 236 (if (itype 237 && (!VECTOR_MODE_P (TYPE_MODE (type)) 238 || (VECTOR_MODE_P (TYPE_MODE (itype)) 239 && optab_handler (and_optab, 240 TYPE_MODE (itype)) != CODE_FOR_nothing))) 241 (view_convert (bit_and:itype (view_convert @0) 242 (ne @1 { build_zero_cst (type); }))))))) 243 244(for cmp (gt ge lt le) 245 outp (convert convert negate negate) 246 outn (negate negate convert convert) 247 /* Transform X * (X > 0.0 ? 1.0 : -1.0) into abs(X). */ 248 /* Transform X * (X >= 0.0 ? 1.0 : -1.0) into abs(X). */ 249 /* Transform X * (X < 0.0 ? 1.0 : -1.0) into -abs(X). */ 250 /* Transform X * (X <= 0.0 ? 1.0 : -1.0) into -abs(X). */ 251 (simplify 252 (mult:c @0 (cond (cmp @0 real_zerop) real_onep@1 real_minus_onep)) 253 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type)) 254 (outp (abs @0)))) 255 /* Transform X * (X > 0.0 ? -1.0 : 1.0) into -abs(X). */ 256 /* Transform X * (X >= 0.0 ? -1.0 : 1.0) into -abs(X). */ 257 /* Transform X * (X < 0.0 ? -1.0 : 1.0) into abs(X). */ 258 /* Transform X * (X <= 0.0 ? -1.0 : 1.0) into abs(X). */ 259 (simplify 260 (mult:c @0 (cond (cmp @0 real_zerop) real_minus_onep real_onep@1)) 261 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type)) 262 (outn (abs @0))))) 263 264/* Transform X * copysign (1.0, X) into abs(X). */ 265(simplify 266 (mult:c @0 (COPYSIGN_ALL real_onep @0)) 267 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type)) 268 (abs @0))) 269 270/* Transform X * copysign (1.0, -X) into -abs(X). */ 271(simplify 272 (mult:c @0 (COPYSIGN_ALL real_onep (negate @0))) 273 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type)) 274 (negate (abs @0)))) 275 276/* Transform copysign (CST, X) into copysign (ABS(CST), X). */ 277(simplify 278 (COPYSIGN_ALL REAL_CST@0 @1) 279 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@0))) 280 (COPYSIGN_ALL (negate @0) @1))) 281 282/* X * 1, X / 1 -> X. */ 283(for op (mult trunc_div ceil_div floor_div round_div exact_div) 284 (simplify 285 (op @0 integer_onep) 286 (non_lvalue @0))) 287 288/* (A / (1 << B)) -> (A >> B). 289 Only for unsigned A. For signed A, this would not preserve rounding 290 toward zero. 291 For example: (-1 / ( 1 << B)) != -1 >> B. 292 Also also widening conversions, like: 293 (A / (unsigned long long) (1U << B)) -> (A >> B) 294 or 295 (A / (unsigned long long) (1 << B)) -> (A >> B). 296 If the left shift is signed, it can be done only if the upper bits 297 of A starting from shift's type sign bit are zero, as 298 (unsigned long long) (1 << 31) is -2147483648ULL, not 2147483648ULL, 299 so it is valid only if A >> 31 is zero. */ 300(simplify 301 (trunc_div @0 (convert? (lshift integer_onep@1 @2))) 302 (if ((TYPE_UNSIGNED (type) || tree_expr_nonnegative_p (@0)) 303 && (!VECTOR_TYPE_P (type) 304 || target_supports_op_p (type, RSHIFT_EXPR, optab_vector) 305 || target_supports_op_p (type, RSHIFT_EXPR, optab_scalar)) 306 && (useless_type_conversion_p (type, TREE_TYPE (@1)) 307 || (element_precision (type) >= element_precision (TREE_TYPE (@1)) 308 && (TYPE_UNSIGNED (TREE_TYPE (@1)) 309 || (element_precision (type) 310 == element_precision (TREE_TYPE (@1))) 311 || (INTEGRAL_TYPE_P (type) 312 && (tree_nonzero_bits (@0) 313 & wi::mask (element_precision (TREE_TYPE (@1)) - 1, 314 true, 315 element_precision (type))) == 0))))) 316 (rshift @0 @2))) 317 318/* Preserve explicit divisions by 0: the C++ front-end wants to detect 319 undefined behavior in constexpr evaluation, and assuming that the division 320 traps enables better optimizations than these anyway. */ 321(for div (trunc_div ceil_div floor_div round_div exact_div) 322 /* 0 / X is always zero. */ 323 (simplify 324 (div integer_zerop@0 @1) 325 /* But not for 0 / 0 so that we can get the proper warnings and errors. */ 326 (if (!integer_zerop (@1)) 327 @0)) 328 /* X / -1 is -X. */ 329 (simplify 330 (div @0 integer_minus_onep@1) 331 (if (!TYPE_UNSIGNED (type)) 332 (negate @0))) 333 /* X / X is one. */ 334 (simplify 335 (div @0 @0) 336 /* But not for 0 / 0 so that we can get the proper warnings and errors. 337 And not for _Fract types where we can't build 1. */ 338 (if (!integer_zerop (@0) && !ALL_FRACT_MODE_P (TYPE_MODE (type))) 339 { build_one_cst (type); })) 340 /* X / abs (X) is X < 0 ? -1 : 1. */ 341 (simplify 342 (div:C @0 (abs @0)) 343 (if (INTEGRAL_TYPE_P (type) 344 && TYPE_OVERFLOW_UNDEFINED (type)) 345 (cond (lt @0 { build_zero_cst (type); }) 346 { build_minus_one_cst (type); } { build_one_cst (type); }))) 347 /* X / -X is -1. */ 348 (simplify 349 (div:C @0 (negate @0)) 350 (if ((INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type)) 351 && TYPE_OVERFLOW_UNDEFINED (type)) 352 { build_minus_one_cst (type); }))) 353 354/* For unsigned integral types, FLOOR_DIV_EXPR is the same as 355 TRUNC_DIV_EXPR. Rewrite into the latter in this case. Similarly 356 for MOD instead of DIV. */ 357(for floor_divmod (floor_div floor_mod) 358 trunc_divmod (trunc_div trunc_mod) 359 (simplify 360 (floor_divmod @0 @1) 361 (if ((INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type)) 362 && TYPE_UNSIGNED (type)) 363 (trunc_divmod @0 @1)))) 364 365/* Combine two successive divisions. Note that combining ceil_div 366 and floor_div is trickier and combining round_div even more so. */ 367(for div (trunc_div exact_div) 368 (simplify 369 (div (div@3 @0 INTEGER_CST@1) INTEGER_CST@2) 370 (with { 371 wi::overflow_type overflow; 372 wide_int mul = wi::mul (wi::to_wide (@1), wi::to_wide (@2), 373 TYPE_SIGN (type), &overflow); 374 } 375 (if (div == EXACT_DIV_EXPR 376 || optimize_successive_divisions_p (@2, @3)) 377 (if (!overflow) 378 (div @0 { wide_int_to_tree (type, mul); }) 379 (if (TYPE_UNSIGNED (type) 380 || mul != wi::min_value (TYPE_PRECISION (type), SIGNED)) 381 { build_zero_cst (type); })))))) 382 383/* Combine successive multiplications. Similar to above, but handling 384 overflow is different. */ 385(simplify 386 (mult (mult @0 INTEGER_CST@1) INTEGER_CST@2) 387 (with { 388 wi::overflow_type overflow; 389 wide_int mul = wi::mul (wi::to_wide (@1), wi::to_wide (@2), 390 TYPE_SIGN (type), &overflow); 391 } 392 /* Skip folding on overflow: the only special case is @1 * @2 == -INT_MIN, 393 otherwise undefined overflow implies that @0 must be zero. */ 394 (if (!overflow || TYPE_OVERFLOW_WRAPS (type)) 395 (mult @0 { wide_int_to_tree (type, mul); })))) 396 397/* Optimize A / A to 1.0 if we don't care about 398 NaNs or Infinities. */ 399(simplify 400 (rdiv @0 @0) 401 (if (FLOAT_TYPE_P (type) 402 && ! HONOR_NANS (type) 403 && ! HONOR_INFINITIES (type)) 404 { build_one_cst (type); })) 405 406/* Optimize -A / A to -1.0 if we don't care about 407 NaNs or Infinities. */ 408(simplify 409 (rdiv:C @0 (negate @0)) 410 (if (FLOAT_TYPE_P (type) 411 && ! HONOR_NANS (type) 412 && ! HONOR_INFINITIES (type)) 413 { build_minus_one_cst (type); })) 414 415/* PR71078: x / abs(x) -> copysign (1.0, x) */ 416(simplify 417 (rdiv:C (convert? @0) (convert? (abs @0))) 418 (if (SCALAR_FLOAT_TYPE_P (type) 419 && ! HONOR_NANS (type) 420 && ! HONOR_INFINITIES (type)) 421 (switch 422 (if (types_match (type, float_type_node)) 423 (BUILT_IN_COPYSIGNF { build_one_cst (type); } (convert @0))) 424 (if (types_match (type, double_type_node)) 425 (BUILT_IN_COPYSIGN { build_one_cst (type); } (convert @0))) 426 (if (types_match (type, long_double_type_node)) 427 (BUILT_IN_COPYSIGNL { build_one_cst (type); } (convert @0)))))) 428 429/* In IEEE floating point, x/1 is not equivalent to x for snans. */ 430(simplify 431 (rdiv @0 real_onep) 432 (if (!HONOR_SNANS (type)) 433 (non_lvalue @0))) 434 435/* In IEEE floating point, x/-1 is not equivalent to -x for snans. */ 436(simplify 437 (rdiv @0 real_minus_onep) 438 (if (!HONOR_SNANS (type)) 439 (negate @0))) 440 441(if (flag_reciprocal_math) 442 /* Convert (A/B)/C to A/(B*C). */ 443 (simplify 444 (rdiv (rdiv:s @0 @1) @2) 445 (rdiv @0 (mult @1 @2))) 446 447 /* Canonicalize x / (C1 * y) to (x * C2) / y. */ 448 (simplify 449 (rdiv @0 (mult:s @1 REAL_CST@2)) 450 (with 451 { tree tem = const_binop (RDIV_EXPR, type, build_one_cst (type), @2); } 452 (if (tem) 453 (rdiv (mult @0 { tem; } ) @1)))) 454 455 /* Convert A/(B/C) to (A/B)*C */ 456 (simplify 457 (rdiv @0 (rdiv:s @1 @2)) 458 (mult (rdiv @0 @1) @2))) 459 460/* Simplify x / (- y) to -x / y. */ 461(simplify 462 (rdiv @0 (negate @1)) 463 (rdiv (negate @0) @1)) 464 465(if (flag_unsafe_math_optimizations) 466 /* Simplify (C / x op 0.0) to x op 0.0 for C != 0, C != Inf/Nan. 467 Since C / x may underflow to zero, do this only for unsafe math. */ 468 (for op (lt le gt ge) 469 neg_op (gt ge lt le) 470 (simplify 471 (op (rdiv REAL_CST@0 @1) real_zerop@2) 472 (if (!HONOR_SIGNED_ZEROS (@1) && !HONOR_INFINITIES (@1)) 473 (switch 474 (if (real_less (&dconst0, TREE_REAL_CST_PTR (@0))) 475 (op @1 @2)) 476 /* For C < 0, use the inverted operator. */ 477 (if (real_less (TREE_REAL_CST_PTR (@0), &dconst0)) 478 (neg_op @1 @2))))))) 479 480/* Optimize (X & (-A)) / A where A is a power of 2, to X >> log2(A) */ 481(for div (trunc_div ceil_div floor_div round_div exact_div) 482 (simplify 483 (div (convert? (bit_and @0 INTEGER_CST@1)) INTEGER_CST@2) 484 (if (integer_pow2p (@2) 485 && tree_int_cst_sgn (@2) > 0 486 && tree_nop_conversion_p (type, TREE_TYPE (@0)) 487 && wi::to_wide (@2) + wi::to_wide (@1) == 0) 488 (rshift (convert @0) 489 { build_int_cst (integer_type_node, 490 wi::exact_log2 (wi::to_wide (@2))); })))) 491 492/* If ARG1 is a constant, we can convert this to a multiply by the 493 reciprocal. This does not have the same rounding properties, 494 so only do this if -freciprocal-math. We can actually 495 always safely do it if ARG1 is a power of two, but it's hard to 496 tell if it is or not in a portable manner. */ 497(for cst (REAL_CST COMPLEX_CST VECTOR_CST) 498 (simplify 499 (rdiv @0 cst@1) 500 (if (optimize) 501 (if (flag_reciprocal_math 502 && !real_zerop (@1)) 503 (with 504 { tree tem = const_binop (RDIV_EXPR, type, build_one_cst (type), @1); } 505 (if (tem) 506 (mult @0 { tem; } ))) 507 (if (cst != COMPLEX_CST) 508 (with { tree inverse = exact_inverse (type, @1); } 509 (if (inverse) 510 (mult @0 { inverse; } )))))))) 511 512(for mod (ceil_mod floor_mod round_mod trunc_mod) 513 /* 0 % X is always zero. */ 514 (simplify 515 (mod integer_zerop@0 @1) 516 /* But not for 0 % 0 so that we can get the proper warnings and errors. */ 517 (if (!integer_zerop (@1)) 518 @0)) 519 /* X % 1 is always zero. */ 520 (simplify 521 (mod @0 integer_onep) 522 { build_zero_cst (type); }) 523 /* X % -1 is zero. */ 524 (simplify 525 (mod @0 integer_minus_onep@1) 526 (if (!TYPE_UNSIGNED (type)) 527 { build_zero_cst (type); })) 528 /* X % X is zero. */ 529 (simplify 530 (mod @0 @0) 531 /* But not for 0 % 0 so that we can get the proper warnings and errors. */ 532 (if (!integer_zerop (@0)) 533 { build_zero_cst (type); })) 534 /* (X % Y) % Y is just X % Y. */ 535 (simplify 536 (mod (mod@2 @0 @1) @1) 537 @2) 538 /* From extract_muldiv_1: (X * C1) % C2 is zero if C1 is a multiple of C2. */ 539 (simplify 540 (mod (mult @0 INTEGER_CST@1) INTEGER_CST@2) 541 (if (ANY_INTEGRAL_TYPE_P (type) 542 && TYPE_OVERFLOW_UNDEFINED (type) 543 && wi::multiple_of_p (wi::to_wide (@1), wi::to_wide (@2), 544 TYPE_SIGN (type))) 545 { build_zero_cst (type); })) 546 /* For (X % C) == 0, if X is signed and C is power of 2, use unsigned 547 modulo and comparison, since it is simpler and equivalent. */ 548 (for cmp (eq ne) 549 (simplify 550 (cmp (mod @0 integer_pow2p@2) integer_zerop@1) 551 (if (!TYPE_UNSIGNED (TREE_TYPE (@0))) 552 (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); } 553 (cmp (mod (convert:utype @0) (convert:utype @2)) (convert:utype @1))))))) 554 555/* X % -C is the same as X % C. */ 556(simplify 557 (trunc_mod @0 INTEGER_CST@1) 558 (if (TYPE_SIGN (type) == SIGNED 559 && !TREE_OVERFLOW (@1) 560 && wi::neg_p (wi::to_wide (@1)) 561 && !TYPE_OVERFLOW_TRAPS (type) 562 /* Avoid this transformation if C is INT_MIN, i.e. C == -C. */ 563 && !sign_bit_p (@1, @1)) 564 (trunc_mod @0 (negate @1)))) 565 566/* X % -Y is the same as X % Y. */ 567(simplify 568 (trunc_mod @0 (convert? (negate @1))) 569 (if (INTEGRAL_TYPE_P (type) 570 && !TYPE_UNSIGNED (type) 571 && !TYPE_OVERFLOW_TRAPS (type) 572 && tree_nop_conversion_p (type, TREE_TYPE (@1)) 573 /* Avoid this transformation if X might be INT_MIN or 574 Y might be -1, because we would then change valid 575 INT_MIN % -(-1) into invalid INT_MIN % -1. */ 576 && (expr_not_equal_to (@0, wi::to_wide (TYPE_MIN_VALUE (type))) 577 || expr_not_equal_to (@1, wi::minus_one (TYPE_PRECISION 578 (TREE_TYPE (@1)))))) 579 (trunc_mod @0 (convert @1)))) 580 581/* X - (X / Y) * Y is the same as X % Y. */ 582(simplify 583 (minus (convert1? @0) (convert2? (mult:c (trunc_div @@0 @@1) @1))) 584 (if (INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type)) 585 (convert (trunc_mod @0 @1)))) 586 587/* Optimize TRUNC_MOD_EXPR by a power of two into a BIT_AND_EXPR, 588 i.e. "X % C" into "X & (C - 1)", if X and C are positive. 589 Also optimize A % (C << N) where C is a power of 2, 590 to A & ((C << N) - 1). */ 591(match (power_of_two_cand @1) 592 INTEGER_CST@1) 593(match (power_of_two_cand @1) 594 (lshift INTEGER_CST@1 @2)) 595(for mod (trunc_mod floor_mod) 596 (simplify 597 (mod @0 (convert? (power_of_two_cand@1 @2))) 598 (if ((TYPE_UNSIGNED (type) || tree_expr_nonnegative_p (@0)) 599 /* Allow any integral conversions of the divisor, except 600 conversion from narrower signed to wider unsigned type 601 where if @1 would be negative power of two, the divisor 602 would not be a power of two. */ 603 && INTEGRAL_TYPE_P (type) 604 && INTEGRAL_TYPE_P (TREE_TYPE (@1)) 605 && (TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@1)) 606 || TYPE_UNSIGNED (TREE_TYPE (@1)) 607 || !TYPE_UNSIGNED (type)) 608 && integer_pow2p (@2) && tree_int_cst_sgn (@2) > 0) 609 (with { tree utype = TREE_TYPE (@1); 610 if (!TYPE_OVERFLOW_WRAPS (utype)) 611 utype = unsigned_type_for (utype); } 612 (bit_and @0 (convert (minus (convert:utype @1) 613 { build_one_cst (utype); }))))))) 614 615/* Simplify (unsigned t * 2)/2 -> unsigned t & 0x7FFFFFFF. */ 616(simplify 617 (trunc_div (mult @0 integer_pow2p@1) @1) 618 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) && TYPE_UNSIGNED (TREE_TYPE (@0))) 619 (bit_and @0 { wide_int_to_tree 620 (type, wi::mask (TYPE_PRECISION (type) 621 - wi::exact_log2 (wi::to_wide (@1)), 622 false, TYPE_PRECISION (type))); }))) 623 624/* Simplify (unsigned t / 2) * 2 -> unsigned t & ~1. */ 625(simplify 626 (mult (trunc_div @0 integer_pow2p@1) @1) 627 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) && TYPE_UNSIGNED (TREE_TYPE (@0))) 628 (bit_and @0 (negate @1)))) 629 630/* Simplify (t * 2) / 2) -> t. */ 631(for div (trunc_div ceil_div floor_div round_div exact_div) 632 (simplify 633 (div (mult:c @0 @1) @1) 634 (if (ANY_INTEGRAL_TYPE_P (type) 635 && TYPE_OVERFLOW_UNDEFINED (type)) 636 @0))) 637 638(for op (negate abs) 639 /* Simplify cos(-x) and cos(|x|) -> cos(x). Similarly for cosh. */ 640 (for coss (COS COSH) 641 (simplify 642 (coss (op @0)) 643 (coss @0))) 644 /* Simplify pow(-x, y) and pow(|x|,y) -> pow(x,y) if y is an even integer. */ 645 (for pows (POW) 646 (simplify 647 (pows (op @0) REAL_CST@1) 648 (with { HOST_WIDE_INT n; } 649 (if (real_isinteger (&TREE_REAL_CST (@1), &n) && (n & 1) == 0) 650 (pows @0 @1))))) 651 /* Likewise for powi. */ 652 (for pows (POWI) 653 (simplify 654 (pows (op @0) INTEGER_CST@1) 655 (if ((wi::to_wide (@1) & 1) == 0) 656 (pows @0 @1)))) 657 /* Strip negate and abs from both operands of hypot. */ 658 (for hypots (HYPOT) 659 (simplify 660 (hypots (op @0) @1) 661 (hypots @0 @1)) 662 (simplify 663 (hypots @0 (op @1)) 664 (hypots @0 @1))) 665 /* copysign(-x, y) and copysign(abs(x), y) -> copysign(x, y). */ 666 (for copysigns (COPYSIGN_ALL) 667 (simplify 668 (copysigns (op @0) @1) 669 (copysigns @0 @1)))) 670 671/* abs(x)*abs(x) -> x*x. Should be valid for all types. */ 672(simplify 673 (mult (abs@1 @0) @1) 674 (mult @0 @0)) 675 676/* Convert absu(x)*absu(x) -> x*x. */ 677(simplify 678 (mult (absu@1 @0) @1) 679 (mult (convert@2 @0) @2)) 680 681/* cos(copysign(x, y)) -> cos(x). Similarly for cosh. */ 682(for coss (COS COSH) 683 copysigns (COPYSIGN) 684 (simplify 685 (coss (copysigns @0 @1)) 686 (coss @0))) 687 688/* pow(copysign(x, y), z) -> pow(x, z) if z is an even integer. */ 689(for pows (POW) 690 copysigns (COPYSIGN) 691 (simplify 692 (pows (copysigns @0 @2) REAL_CST@1) 693 (with { HOST_WIDE_INT n; } 694 (if (real_isinteger (&TREE_REAL_CST (@1), &n) && (n & 1) == 0) 695 (pows @0 @1))))) 696/* Likewise for powi. */ 697(for pows (POWI) 698 copysigns (COPYSIGN) 699 (simplify 700 (pows (copysigns @0 @2) INTEGER_CST@1) 701 (if ((wi::to_wide (@1) & 1) == 0) 702 (pows @0 @1)))) 703 704(for hypots (HYPOT) 705 copysigns (COPYSIGN) 706 /* hypot(copysign(x, y), z) -> hypot(x, z). */ 707 (simplify 708 (hypots (copysigns @0 @1) @2) 709 (hypots @0 @2)) 710 /* hypot(x, copysign(y, z)) -> hypot(x, y). */ 711 (simplify 712 (hypots @0 (copysigns @1 @2)) 713 (hypots @0 @1))) 714 715/* copysign(x, CST) -> [-]abs (x). */ 716(for copysigns (COPYSIGN_ALL) 717 (simplify 718 (copysigns @0 REAL_CST@1) 719 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1))) 720 (negate (abs @0)) 721 (abs @0)))) 722 723/* copysign(copysign(x, y), z) -> copysign(x, z). */ 724(for copysigns (COPYSIGN_ALL) 725 (simplify 726 (copysigns (copysigns @0 @1) @2) 727 (copysigns @0 @2))) 728 729/* copysign(x,y)*copysign(x,y) -> x*x. */ 730(for copysigns (COPYSIGN_ALL) 731 (simplify 732 (mult (copysigns@2 @0 @1) @2) 733 (mult @0 @0))) 734 735/* ccos(-x) -> ccos(x). Similarly for ccosh. */ 736(for ccoss (CCOS CCOSH) 737 (simplify 738 (ccoss (negate @0)) 739 (ccoss @0))) 740 741/* cabs(-x) and cos(conj(x)) -> cabs(x). */ 742(for ops (conj negate) 743 (for cabss (CABS) 744 (simplify 745 (cabss (ops @0)) 746 (cabss @0)))) 747 748/* Fold (a * (1 << b)) into (a << b) */ 749(simplify 750 (mult:c @0 (convert? (lshift integer_onep@1 @2))) 751 (if (! FLOAT_TYPE_P (type) 752 && tree_nop_conversion_p (type, TREE_TYPE (@1))) 753 (lshift @0 @2))) 754 755/* Fold (1 << (C - x)) where C = precision(type) - 1 756 into ((1 << C) >> x). */ 757(simplify 758 (lshift integer_onep@0 (minus@1 INTEGER_CST@2 @3)) 759 (if (INTEGRAL_TYPE_P (type) 760 && wi::eq_p (wi::to_wide (@2), TYPE_PRECISION (type) - 1) 761 && single_use (@1)) 762 (if (TYPE_UNSIGNED (type)) 763 (rshift (lshift @0 @2) @3) 764 (with 765 { tree utype = unsigned_type_for (type); } 766 (convert (rshift (lshift (convert:utype @0) @2) @3)))))) 767 768/* Fold (C1/X)*C2 into (C1*C2)/X. */ 769(simplify 770 (mult (rdiv@3 REAL_CST@0 @1) REAL_CST@2) 771 (if (flag_associative_math 772 && single_use (@3)) 773 (with 774 { tree tem = const_binop (MULT_EXPR, type, @0, @2); } 775 (if (tem) 776 (rdiv { tem; } @1))))) 777 778/* Simplify ~X & X as zero. */ 779(simplify 780 (bit_and:c (convert? @0) (convert? (bit_not @0))) 781 { build_zero_cst (type); }) 782 783/* PR71636: Transform x & ((1U << b) - 1) -> x & ~(~0U << b); */ 784(simplify 785 (bit_and:c @0 (plus:s (lshift:s integer_onep @1) integer_minus_onep)) 786 (if (TYPE_UNSIGNED (type)) 787 (bit_and @0 (bit_not (lshift { build_all_ones_cst (type); } @1))))) 788 789(for bitop (bit_and bit_ior) 790 cmp (eq ne) 791 /* PR35691: Transform 792 (x == 0 & y == 0) -> (x | typeof(x)(y)) == 0. 793 (x != 0 | y != 0) -> (x | typeof(x)(y)) != 0. */ 794 (simplify 795 (bitop (cmp @0 integer_zerop@2) (cmp @1 integer_zerop)) 796 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) 797 && INTEGRAL_TYPE_P (TREE_TYPE (@1)) 798 && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1))) 799 (cmp (bit_ior @0 (convert @1)) @2))) 800 /* Transform: 801 (x == -1 & y == -1) -> (x & typeof(x)(y)) == -1. 802 (x != -1 | y != -1) -> (x & typeof(x)(y)) != -1. */ 803 (simplify 804 (bitop (cmp @0 integer_all_onesp@2) (cmp @1 integer_all_onesp)) 805 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) 806 && INTEGRAL_TYPE_P (TREE_TYPE (@1)) 807 && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1))) 808 (cmp (bit_and @0 (convert @1)) @2)))) 809 810/* Fold (A & ~B) - (A & B) into (A ^ B) - B. */ 811(simplify 812 (minus (bit_and:cs @0 (bit_not @1)) (bit_and:cs @0 @1)) 813 (minus (bit_xor @0 @1) @1)) 814(simplify 815 (minus (bit_and:s @0 INTEGER_CST@2) (bit_and:s @0 INTEGER_CST@1)) 816 (if (~wi::to_wide (@2) == wi::to_wide (@1)) 817 (minus (bit_xor @0 @1) @1))) 818 819/* Fold (A & B) - (A & ~B) into B - (A ^ B). */ 820(simplify 821 (minus (bit_and:cs @0 @1) (bit_and:cs @0 (bit_not @1))) 822 (minus @1 (bit_xor @0 @1))) 823 824/* Simplify (X & ~Y) |^+ (~X & Y) -> X ^ Y. */ 825(for op (bit_ior bit_xor plus) 826 (simplify 827 (op (bit_and:c @0 (bit_not @1)) (bit_and:c (bit_not @0) @1)) 828 (bit_xor @0 @1)) 829 (simplify 830 (op:c (bit_and @0 INTEGER_CST@2) (bit_and (bit_not @0) INTEGER_CST@1)) 831 (if (~wi::to_wide (@2) == wi::to_wide (@1)) 832 (bit_xor @0 @1)))) 833 834/* PR53979: Transform ((a ^ b) | a) -> (a | b) */ 835(simplify 836 (bit_ior:c (bit_xor:c @0 @1) @0) 837 (bit_ior @0 @1)) 838 839/* (a & ~b) | (a ^ b) --> a ^ b */ 840(simplify 841 (bit_ior:c (bit_and:c @0 (bit_not @1)) (bit_xor:c@2 @0 @1)) 842 @2) 843 844/* (a & ~b) ^ ~a --> ~(a & b) */ 845(simplify 846 (bit_xor:c (bit_and:cs @0 (bit_not @1)) (bit_not @0)) 847 (bit_not (bit_and @0 @1))) 848 849/* (~a & b) ^ a --> (a | b) */ 850(simplify 851 (bit_xor:c (bit_and:cs (bit_not @0) @1) @0) 852 (bit_ior @0 @1)) 853 854/* (a | b) & ~(a ^ b) --> a & b */ 855(simplify 856 (bit_and:c (bit_ior @0 @1) (bit_not (bit_xor:c @0 @1))) 857 (bit_and @0 @1)) 858 859/* a | ~(a ^ b) --> a | ~b */ 860(simplify 861 (bit_ior:c @0 (bit_not:s (bit_xor:c @0 @1))) 862 (bit_ior @0 (bit_not @1))) 863 864/* (a | b) | (a &^ b) --> a | b */ 865(for op (bit_and bit_xor) 866 (simplify 867 (bit_ior:c (bit_ior@2 @0 @1) (op:c @0 @1)) 868 @2)) 869 870/* (a & b) | ~(a ^ b) --> ~(a ^ b) */ 871(simplify 872 (bit_ior:c (bit_and:c @0 @1) (bit_not@2 (bit_xor @0 @1))) 873 @2) 874 875/* ~(~a & b) --> a | ~b */ 876(simplify 877 (bit_not (bit_and:cs (bit_not @0) @1)) 878 (bit_ior @0 (bit_not @1))) 879 880/* ~(~a | b) --> a & ~b */ 881(simplify 882 (bit_not (bit_ior:cs (bit_not @0) @1)) 883 (bit_and @0 (bit_not @1))) 884 885/* Simplify (~X & Y) to X ^ Y if we know that (X & ~Y) is 0. */ 886#if GIMPLE 887(simplify 888 (bit_and (bit_not SSA_NAME@0) INTEGER_CST@1) 889 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) 890 && wi::bit_and_not (get_nonzero_bits (@0), wi::to_wide (@1)) == 0) 891 (bit_xor @0 @1))) 892#endif 893 894/* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M, 895 ((A & N) + B) & M -> (A + B) & M 896 Similarly if (N & M) == 0, 897 ((A | N) + B) & M -> (A + B) & M 898 and for - instead of + (or unary - instead of +) 899 and/or ^ instead of |. 900 If B is constant and (B & M) == 0, fold into A & M. */ 901(for op (plus minus) 902 (for bitop (bit_and bit_ior bit_xor) 903 (simplify 904 (bit_and (op:s (bitop:s@0 @3 INTEGER_CST@4) @1) INTEGER_CST@2) 905 (with 906 { tree pmop[2]; 907 tree utype = fold_bit_and_mask (TREE_TYPE (@0), @2, op, @0, bitop, 908 @3, @4, @1, ERROR_MARK, NULL_TREE, 909 NULL_TREE, pmop); } 910 (if (utype) 911 (convert (bit_and (op (convert:utype { pmop[0]; }) 912 (convert:utype { pmop[1]; })) 913 (convert:utype @2)))))) 914 (simplify 915 (bit_and (op:s @0 (bitop:s@1 @3 INTEGER_CST@4)) INTEGER_CST@2) 916 (with 917 { tree pmop[2]; 918 tree utype = fold_bit_and_mask (TREE_TYPE (@0), @2, op, @0, ERROR_MARK, 919 NULL_TREE, NULL_TREE, @1, bitop, @3, 920 @4, pmop); } 921 (if (utype) 922 (convert (bit_and (op (convert:utype { pmop[0]; }) 923 (convert:utype { pmop[1]; })) 924 (convert:utype @2))))))) 925 (simplify 926 (bit_and (op:s @0 @1) INTEGER_CST@2) 927 (with 928 { tree pmop[2]; 929 tree utype = fold_bit_and_mask (TREE_TYPE (@0), @2, op, @0, ERROR_MARK, 930 NULL_TREE, NULL_TREE, @1, ERROR_MARK, 931 NULL_TREE, NULL_TREE, pmop); } 932 (if (utype) 933 (convert (bit_and (op (convert:utype { pmop[0]; }) 934 (convert:utype { pmop[1]; })) 935 (convert:utype @2))))))) 936(for bitop (bit_and bit_ior bit_xor) 937 (simplify 938 (bit_and (negate:s (bitop:s@0 @2 INTEGER_CST@3)) INTEGER_CST@1) 939 (with 940 { tree pmop[2]; 941 tree utype = fold_bit_and_mask (TREE_TYPE (@0), @1, NEGATE_EXPR, @0, 942 bitop, @2, @3, NULL_TREE, ERROR_MARK, 943 NULL_TREE, NULL_TREE, pmop); } 944 (if (utype) 945 (convert (bit_and (negate (convert:utype { pmop[0]; })) 946 (convert:utype @1))))))) 947 948/* X % Y is smaller than Y. */ 949(for cmp (lt ge) 950 (simplify 951 (cmp (trunc_mod @0 @1) @1) 952 (if (TYPE_UNSIGNED (TREE_TYPE (@0))) 953 { constant_boolean_node (cmp == LT_EXPR, type); }))) 954(for cmp (gt le) 955 (simplify 956 (cmp @1 (trunc_mod @0 @1)) 957 (if (TYPE_UNSIGNED (TREE_TYPE (@0))) 958 { constant_boolean_node (cmp == GT_EXPR, type); }))) 959 960/* x | ~0 -> ~0 */ 961(simplify 962 (bit_ior @0 integer_all_onesp@1) 963 @1) 964 965/* x | 0 -> x */ 966(simplify 967 (bit_ior @0 integer_zerop) 968 @0) 969 970/* x & 0 -> 0 */ 971(simplify 972 (bit_and @0 integer_zerop@1) 973 @1) 974 975/* ~x | x -> -1 */ 976/* ~x ^ x -> -1 */ 977/* ~x + x -> -1 */ 978(for op (bit_ior bit_xor plus) 979 (simplify 980 (op:c (convert? @0) (convert? (bit_not @0))) 981 (convert { build_all_ones_cst (TREE_TYPE (@0)); }))) 982 983/* x ^ x -> 0 */ 984(simplify 985 (bit_xor @0 @0) 986 { build_zero_cst (type); }) 987 988/* Canonicalize X ^ ~0 to ~X. */ 989(simplify 990 (bit_xor @0 integer_all_onesp@1) 991 (bit_not @0)) 992 993/* x & ~0 -> x */ 994(simplify 995 (bit_and @0 integer_all_onesp) 996 (non_lvalue @0)) 997 998/* x & x -> x, x | x -> x */ 999(for bitop (bit_and bit_ior) 1000 (simplify 1001 (bitop @0 @0) 1002 (non_lvalue @0))) 1003 1004/* x & C -> x if we know that x & ~C == 0. */ 1005#if GIMPLE 1006(simplify 1007 (bit_and SSA_NAME@0 INTEGER_CST@1) 1008 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) 1009 && wi::bit_and_not (get_nonzero_bits (@0), wi::to_wide (@1)) == 0) 1010 @0)) 1011#endif 1012 1013/* x + (x & 1) -> (x + 1) & ~1 */ 1014(simplify 1015 (plus:c @0 (bit_and:s @0 integer_onep@1)) 1016 (bit_and (plus @0 @1) (bit_not @1))) 1017 1018/* x & ~(x & y) -> x & ~y */ 1019/* x | ~(x | y) -> x | ~y */ 1020(for bitop (bit_and bit_ior) 1021 (simplify 1022 (bitop:c @0 (bit_not (bitop:cs @0 @1))) 1023 (bitop @0 (bit_not @1)))) 1024 1025/* (~x & y) | ~(x | y) -> ~x */ 1026(simplify 1027 (bit_ior:c (bit_and:c (bit_not@2 @0) @1) (bit_not (bit_ior:c @0 @1))) 1028 @2) 1029 1030/* (x | y) ^ (x | ~y) -> ~x */ 1031(simplify 1032 (bit_xor:c (bit_ior:c @0 @1) (bit_ior:c @0 (bit_not @1))) 1033 (bit_not @0)) 1034 1035/* (x & y) | ~(x | y) -> ~(x ^ y) */ 1036(simplify 1037 (bit_ior:c (bit_and:s @0 @1) (bit_not:s (bit_ior:s @0 @1))) 1038 (bit_not (bit_xor @0 @1))) 1039 1040/* (~x | y) ^ (x ^ y) -> x | ~y */ 1041(simplify 1042 (bit_xor:c (bit_ior:cs (bit_not @0) @1) (bit_xor:s @0 @1)) 1043 (bit_ior @0 (bit_not @1))) 1044 1045/* (x ^ y) | ~(x | y) -> ~(x & y) */ 1046(simplify 1047 (bit_ior:c (bit_xor:s @0 @1) (bit_not:s (bit_ior:s @0 @1))) 1048 (bit_not (bit_and @0 @1))) 1049 1050/* (x | y) & ~x -> y & ~x */ 1051/* (x & y) | ~x -> y | ~x */ 1052(for bitop (bit_and bit_ior) 1053 rbitop (bit_ior bit_and) 1054 (simplify 1055 (bitop:c (rbitop:c @0 @1) (bit_not@2 @0)) 1056 (bitop @1 @2))) 1057 1058/* (x & y) ^ (x | y) -> x ^ y */ 1059(simplify 1060 (bit_xor:c (bit_and @0 @1) (bit_ior @0 @1)) 1061 (bit_xor @0 @1)) 1062 1063/* (x ^ y) ^ (x | y) -> x & y */ 1064(simplify 1065 (bit_xor:c (bit_xor @0 @1) (bit_ior @0 @1)) 1066 (bit_and @0 @1)) 1067 1068/* (x & y) + (x ^ y) -> x | y */ 1069/* (x & y) | (x ^ y) -> x | y */ 1070/* (x & y) ^ (x ^ y) -> x | y */ 1071(for op (plus bit_ior bit_xor) 1072 (simplify 1073 (op:c (bit_and @0 @1) (bit_xor @0 @1)) 1074 (bit_ior @0 @1))) 1075 1076/* (x & y) + (x | y) -> x + y */ 1077(simplify 1078 (plus:c (bit_and @0 @1) (bit_ior @0 @1)) 1079 (plus @0 @1)) 1080 1081/* (x + y) - (x | y) -> x & y */ 1082(simplify 1083 (minus (plus @0 @1) (bit_ior @0 @1)) 1084 (if (!TYPE_OVERFLOW_SANITIZED (type) && !TYPE_OVERFLOW_TRAPS (type) 1085 && !TYPE_SATURATING (type)) 1086 (bit_and @0 @1))) 1087 1088/* (x + y) - (x & y) -> x | y */ 1089(simplify 1090 (minus (plus @0 @1) (bit_and @0 @1)) 1091 (if (!TYPE_OVERFLOW_SANITIZED (type) && !TYPE_OVERFLOW_TRAPS (type) 1092 && !TYPE_SATURATING (type)) 1093 (bit_ior @0 @1))) 1094 1095/* (x | y) - (x ^ y) -> x & y */ 1096(simplify 1097 (minus (bit_ior @0 @1) (bit_xor @0 @1)) 1098 (bit_and @0 @1)) 1099 1100/* (x | y) - (x & y) -> x ^ y */ 1101(simplify 1102 (minus (bit_ior @0 @1) (bit_and @0 @1)) 1103 (bit_xor @0 @1)) 1104 1105/* (x | y) & ~(x & y) -> x ^ y */ 1106(simplify 1107 (bit_and:c (bit_ior @0 @1) (bit_not (bit_and @0 @1))) 1108 (bit_xor @0 @1)) 1109 1110/* (x | y) & (~x ^ y) -> x & y */ 1111(simplify 1112 (bit_and:c (bit_ior:c @0 @1) (bit_xor:c @1 (bit_not @0))) 1113 (bit_and @0 @1)) 1114 1115/* (~x | y) & (x | ~y) -> ~(x ^ y) */ 1116(simplify 1117 (bit_and (bit_ior:cs (bit_not @0) @1) (bit_ior:cs @0 (bit_not @1))) 1118 (bit_not (bit_xor @0 @1))) 1119 1120/* (~x | y) ^ (x | ~y) -> x ^ y */ 1121(simplify 1122 (bit_xor (bit_ior:c (bit_not @0) @1) (bit_ior:c @0 (bit_not @1))) 1123 (bit_xor @0 @1)) 1124 1125/* ~x & ~y -> ~(x | y) 1126 ~x | ~y -> ~(x & y) */ 1127(for op (bit_and bit_ior) 1128 rop (bit_ior bit_and) 1129 (simplify 1130 (op (convert1? (bit_not @0)) (convert2? (bit_not @1))) 1131 (if (element_precision (type) <= element_precision (TREE_TYPE (@0)) 1132 && element_precision (type) <= element_precision (TREE_TYPE (@1))) 1133 (bit_not (rop (convert @0) (convert @1)))))) 1134 1135/* If we are XORing or adding two BIT_AND_EXPR's, both of which are and'ing 1136 with a constant, and the two constants have no bits in common, 1137 we should treat this as a BIT_IOR_EXPR since this may produce more 1138 simplifications. */ 1139(for op (bit_xor plus) 1140 (simplify 1141 (op (convert1? (bit_and@4 @0 INTEGER_CST@1)) 1142 (convert2? (bit_and@5 @2 INTEGER_CST@3))) 1143 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)) 1144 && tree_nop_conversion_p (type, TREE_TYPE (@2)) 1145 && (wi::to_wide (@1) & wi::to_wide (@3)) == 0) 1146 (bit_ior (convert @4) (convert @5))))) 1147 1148/* (X | Y) ^ X -> Y & ~ X*/ 1149(simplify 1150 (bit_xor:c (convert1? (bit_ior:c @@0 @1)) (convert2? @0)) 1151 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))) 1152 (convert (bit_and @1 (bit_not @0))))) 1153 1154/* Convert ~X ^ ~Y to X ^ Y. */ 1155(simplify 1156 (bit_xor (convert1? (bit_not @0)) (convert2? (bit_not @1))) 1157 (if (element_precision (type) <= element_precision (TREE_TYPE (@0)) 1158 && element_precision (type) <= element_precision (TREE_TYPE (@1))) 1159 (bit_xor (convert @0) (convert @1)))) 1160 1161/* Convert ~X ^ C to X ^ ~C. */ 1162(simplify 1163 (bit_xor (convert? (bit_not @0)) INTEGER_CST@1) 1164 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))) 1165 (bit_xor (convert @0) (bit_not @1)))) 1166 1167/* Fold (X & Y) ^ Y and (X ^ Y) & Y as ~X & Y. */ 1168(for opo (bit_and bit_xor) 1169 opi (bit_xor bit_and) 1170 (simplify 1171 (opo:c (opi:cs @0 @1) @1) 1172 (bit_and (bit_not @0) @1))) 1173 1174/* Given a bit-wise operation CODE applied to ARG0 and ARG1, see if both 1175 operands are another bit-wise operation with a common input. If so, 1176 distribute the bit operations to save an operation and possibly two if 1177 constants are involved. For example, convert 1178 (A | B) & (A | C) into A | (B & C) 1179 Further simplification will occur if B and C are constants. */ 1180(for op (bit_and bit_ior bit_xor) 1181 rop (bit_ior bit_and bit_and) 1182 (simplify 1183 (op (convert? (rop:c @@0 @1)) (convert? (rop:c @0 @2))) 1184 (if (tree_nop_conversion_p (type, TREE_TYPE (@1)) 1185 && tree_nop_conversion_p (type, TREE_TYPE (@2))) 1186 (rop (convert @0) (op (convert @1) (convert @2)))))) 1187 1188/* Some simple reassociation for bit operations, also handled in reassoc. */ 1189/* (X & Y) & Y -> X & Y 1190 (X | Y) | Y -> X | Y */ 1191(for op (bit_and bit_ior) 1192 (simplify 1193 (op:c (convert1?@2 (op:c @0 @@1)) (convert2? @1)) 1194 @2)) 1195/* (X ^ Y) ^ Y -> X */ 1196(simplify 1197 (bit_xor:c (convert1? (bit_xor:c @0 @@1)) (convert2? @1)) 1198 (convert @0)) 1199/* (X & Y) & (X & Z) -> (X & Y) & Z 1200 (X | Y) | (X | Z) -> (X | Y) | Z */ 1201(for op (bit_and bit_ior) 1202 (simplify 1203 (op (convert1?@3 (op:c@4 @0 @1)) (convert2?@5 (op:c@6 @0 @2))) 1204 (if (tree_nop_conversion_p (type, TREE_TYPE (@1)) 1205 && tree_nop_conversion_p (type, TREE_TYPE (@2))) 1206 (if (single_use (@5) && single_use (@6)) 1207 (op @3 (convert @2)) 1208 (if (single_use (@3) && single_use (@4)) 1209 (op (convert @1) @5)))))) 1210/* (X ^ Y) ^ (X ^ Z) -> Y ^ Z */ 1211(simplify 1212 (bit_xor (convert1? (bit_xor:c @0 @1)) (convert2? (bit_xor:c @0 @2))) 1213 (if (tree_nop_conversion_p (type, TREE_TYPE (@1)) 1214 && tree_nop_conversion_p (type, TREE_TYPE (@2))) 1215 (bit_xor (convert @1) (convert @2)))) 1216 1217/* Convert abs (abs (X)) into abs (X). 1218 also absu (absu (X)) into absu (X). */ 1219(simplify 1220 (abs (abs@1 @0)) 1221 @1) 1222 1223(simplify 1224 (absu (convert@2 (absu@1 @0))) 1225 (if (tree_nop_conversion_p (TREE_TYPE (@2), TREE_TYPE (@1))) 1226 @1)) 1227 1228/* Convert abs[u] (-X) -> abs[u] (X). */ 1229(simplify 1230 (abs (negate @0)) 1231 (abs @0)) 1232 1233(simplify 1234 (absu (negate @0)) 1235 (absu @0)) 1236 1237/* Convert abs[u] (X) where X is nonnegative -> (X). */ 1238(simplify 1239 (abs tree_expr_nonnegative_p@0) 1240 @0) 1241 1242(simplify 1243 (absu tree_expr_nonnegative_p@0) 1244 (convert @0)) 1245 1246/* A few cases of fold-const.c negate_expr_p predicate. */ 1247(match negate_expr_p 1248 INTEGER_CST 1249 (if ((INTEGRAL_TYPE_P (type) 1250 && TYPE_UNSIGNED (type)) 1251 || (!TYPE_OVERFLOW_SANITIZED (type) 1252 && may_negate_without_overflow_p (t))))) 1253(match negate_expr_p 1254 FIXED_CST) 1255(match negate_expr_p 1256 (negate @0) 1257 (if (!TYPE_OVERFLOW_SANITIZED (type)))) 1258(match negate_expr_p 1259 REAL_CST 1260 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (t))))) 1261/* VECTOR_CST handling of non-wrapping types would recurse in unsupported 1262 ways. */ 1263(match negate_expr_p 1264 VECTOR_CST 1265 (if (FLOAT_TYPE_P (TREE_TYPE (type)) || TYPE_OVERFLOW_WRAPS (type)))) 1266(match negate_expr_p 1267 (minus @0 @1) 1268 (if ((ANY_INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_WRAPS (type)) 1269 || (FLOAT_TYPE_P (type) 1270 && !HONOR_SIGN_DEPENDENT_ROUNDING (type) 1271 && !HONOR_SIGNED_ZEROS (type))))) 1272 1273/* (-A) * (-B) -> A * B */ 1274(simplify 1275 (mult:c (convert1? (negate @0)) (convert2? negate_expr_p@1)) 1276 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)) 1277 && tree_nop_conversion_p (type, TREE_TYPE (@1))) 1278 (mult (convert @0) (convert (negate @1))))) 1279 1280/* -(A + B) -> (-B) - A. */ 1281(simplify 1282 (negate (plus:c @0 negate_expr_p@1)) 1283 (if (!HONOR_SIGN_DEPENDENT_ROUNDING (element_mode (type)) 1284 && !HONOR_SIGNED_ZEROS (element_mode (type))) 1285 (minus (negate @1) @0))) 1286 1287/* -(A - B) -> B - A. */ 1288(simplify 1289 (negate (minus @0 @1)) 1290 (if ((ANY_INTEGRAL_TYPE_P (type) && !TYPE_OVERFLOW_SANITIZED (type)) 1291 || (FLOAT_TYPE_P (type) 1292 && !HONOR_SIGN_DEPENDENT_ROUNDING (type) 1293 && !HONOR_SIGNED_ZEROS (type))) 1294 (minus @1 @0))) 1295(simplify 1296 (negate (pointer_diff @0 @1)) 1297 (if (TYPE_OVERFLOW_UNDEFINED (type)) 1298 (pointer_diff @1 @0))) 1299 1300/* A - B -> A + (-B) if B is easily negatable. */ 1301(simplify 1302 (minus @0 negate_expr_p@1) 1303 (if (!FIXED_POINT_TYPE_P (type)) 1304 (plus @0 (negate @1)))) 1305 1306/* Try to fold (type) X op CST -> (type) (X op ((type-x) CST)) 1307 when profitable. 1308 For bitwise binary operations apply operand conversions to the 1309 binary operation result instead of to the operands. This allows 1310 to combine successive conversions and bitwise binary operations. 1311 We combine the above two cases by using a conditional convert. */ 1312(for bitop (bit_and bit_ior bit_xor) 1313 (simplify 1314 (bitop (convert @0) (convert? @1)) 1315 (if (((TREE_CODE (@1) == INTEGER_CST 1316 && INTEGRAL_TYPE_P (TREE_TYPE (@0)) 1317 && int_fits_type_p (@1, TREE_TYPE (@0))) 1318 || types_match (@0, @1)) 1319 /* ??? This transform conflicts with fold-const.c doing 1320 Convert (T)(x & c) into (T)x & (T)c, if c is an integer 1321 constants (if x has signed type, the sign bit cannot be set 1322 in c). This folds extension into the BIT_AND_EXPR. 1323 Restrict it to GIMPLE to avoid endless recursions. */ 1324 && (bitop != BIT_AND_EXPR || GIMPLE) 1325 && (/* That's a good idea if the conversion widens the operand, thus 1326 after hoisting the conversion the operation will be narrower. */ 1327 TYPE_PRECISION (TREE_TYPE (@0)) < TYPE_PRECISION (type) 1328 /* It's also a good idea if the conversion is to a non-integer 1329 mode. */ 1330 || GET_MODE_CLASS (TYPE_MODE (type)) != MODE_INT 1331 /* Or if the precision of TO is not the same as the precision 1332 of its mode. */ 1333 || !type_has_mode_precision_p (type))) 1334 (convert (bitop @0 (convert @1)))))) 1335 1336(for bitop (bit_and bit_ior) 1337 rbitop (bit_ior bit_and) 1338 /* (x | y) & x -> x */ 1339 /* (x & y) | x -> x */ 1340 (simplify 1341 (bitop:c (rbitop:c @0 @1) @0) 1342 @0) 1343 /* (~x | y) & x -> x & y */ 1344 /* (~x & y) | x -> x | y */ 1345 (simplify 1346 (bitop:c (rbitop:c (bit_not @0) @1) @0) 1347 (bitop @0 @1))) 1348 1349/* (x | CST1) & CST2 -> (x & CST2) | (CST1 & CST2) */ 1350(simplify 1351 (bit_and (bit_ior @0 CONSTANT_CLASS_P@1) CONSTANT_CLASS_P@2) 1352 (bit_ior (bit_and @0 @2) (bit_and @1 @2))) 1353 1354/* Combine successive equal operations with constants. */ 1355(for bitop (bit_and bit_ior bit_xor) 1356 (simplify 1357 (bitop (bitop @0 CONSTANT_CLASS_P@1) CONSTANT_CLASS_P@2) 1358 (if (!CONSTANT_CLASS_P (@0)) 1359 /* This is the canonical form regardless of whether (bitop @1 @2) can be 1360 folded to a constant. */ 1361 (bitop @0 (bitop @1 @2)) 1362 /* In this case we have three constants and (bitop @0 @1) doesn't fold 1363 to a constant. This can happen if @0 or @1 is a POLY_INT_CST and if 1364 the values involved are such that the operation can't be decided at 1365 compile time. Try folding one of @0 or @1 with @2 to see whether 1366 that combination can be decided at compile time. 1367 1368 Keep the existing form if both folds fail, to avoid endless 1369 oscillation. */ 1370 (with { tree cst1 = const_binop (bitop, type, @0, @2); } 1371 (if (cst1) 1372 (bitop @1 { cst1; }) 1373 (with { tree cst2 = const_binop (bitop, type, @1, @2); } 1374 (if (cst2) 1375 (bitop @0 { cst2; })))))))) 1376 1377/* Try simple folding for X op !X, and X op X with the help 1378 of the truth_valued_p and logical_inverted_value predicates. */ 1379(match truth_valued_p 1380 @0 1381 (if (INTEGRAL_TYPE_P (type) && TYPE_PRECISION (type) == 1))) 1382(for op (tcc_comparison truth_and truth_andif truth_or truth_orif truth_xor) 1383 (match truth_valued_p 1384 (op @0 @1))) 1385(match truth_valued_p 1386 (truth_not @0)) 1387 1388(match (logical_inverted_value @0) 1389 (truth_not @0)) 1390(match (logical_inverted_value @0) 1391 (bit_not truth_valued_p@0)) 1392(match (logical_inverted_value @0) 1393 (eq @0 integer_zerop)) 1394(match (logical_inverted_value @0) 1395 (ne truth_valued_p@0 integer_truep)) 1396(match (logical_inverted_value @0) 1397 (bit_xor truth_valued_p@0 integer_truep)) 1398 1399/* X & !X -> 0. */ 1400(simplify 1401 (bit_and:c @0 (logical_inverted_value @0)) 1402 { build_zero_cst (type); }) 1403/* X | !X and X ^ !X -> 1, , if X is truth-valued. */ 1404(for op (bit_ior bit_xor) 1405 (simplify 1406 (op:c truth_valued_p@0 (logical_inverted_value @0)) 1407 { constant_boolean_node (true, type); })) 1408/* X ==/!= !X is false/true. */ 1409(for op (eq ne) 1410 (simplify 1411 (op:c truth_valued_p@0 (logical_inverted_value @0)) 1412 { constant_boolean_node (op == NE_EXPR ? true : false, type); })) 1413 1414/* ~~x -> x */ 1415(simplify 1416 (bit_not (bit_not @0)) 1417 @0) 1418 1419/* Convert ~ (-A) to A - 1. */ 1420(simplify 1421 (bit_not (convert? (negate @0))) 1422 (if (element_precision (type) <= element_precision (TREE_TYPE (@0)) 1423 || !TYPE_UNSIGNED (TREE_TYPE (@0))) 1424 (convert (minus @0 { build_each_one_cst (TREE_TYPE (@0)); })))) 1425 1426/* Convert - (~A) to A + 1. */ 1427(simplify 1428 (negate (nop_convert? (bit_not @0))) 1429 (plus (view_convert @0) { build_each_one_cst (type); })) 1430 1431/* Convert ~ (A - 1) or ~ (A + -1) to -A. */ 1432(simplify 1433 (bit_not (convert? (minus @0 integer_each_onep))) 1434 (if (element_precision (type) <= element_precision (TREE_TYPE (@0)) 1435 || !TYPE_UNSIGNED (TREE_TYPE (@0))) 1436 (convert (negate @0)))) 1437(simplify 1438 (bit_not (convert? (plus @0 integer_all_onesp))) 1439 (if (element_precision (type) <= element_precision (TREE_TYPE (@0)) 1440 || !TYPE_UNSIGNED (TREE_TYPE (@0))) 1441 (convert (negate @0)))) 1442 1443/* Part of convert ~(X ^ Y) to ~X ^ Y or X ^ ~Y if ~X or ~Y simplify. */ 1444(simplify 1445 (bit_not (convert? (bit_xor @0 INTEGER_CST@1))) 1446 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))) 1447 (convert (bit_xor @0 (bit_not @1))))) 1448(simplify 1449 (bit_not (convert? (bit_xor:c (bit_not @0) @1))) 1450 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))) 1451 (convert (bit_xor @0 @1)))) 1452 1453/* Otherwise prefer ~(X ^ Y) to ~X ^ Y as more canonical. */ 1454(simplify 1455 (bit_xor:c (nop_convert?:s (bit_not:s @0)) @1) 1456 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))) 1457 (bit_not (bit_xor (view_convert @0) @1)))) 1458 1459/* (x & ~m) | (y & m) -> ((x ^ y) & m) ^ x */ 1460(simplify 1461 (bit_ior:c (bit_and:cs @0 (bit_not @2)) (bit_and:cs @1 @2)) 1462 (bit_xor (bit_and (bit_xor @0 @1) @2) @0)) 1463 1464/* Fold A - (A & B) into ~B & A. */ 1465(simplify 1466 (minus (convert1? @0) (convert2?:s (bit_and:cs @@0 @1))) 1467 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)) 1468 && tree_nop_conversion_p (type, TREE_TYPE (@1))) 1469 (convert (bit_and (bit_not @1) @0)))) 1470 1471/* (m1 CMP m2) * d -> (m1 CMP m2) ? d : 0 */ 1472(for cmp (gt lt ge le) 1473(simplify 1474 (mult (convert (cmp @0 @1)) @2) 1475 (if (GIMPLE || !TREE_SIDE_EFFECTS (@2)) 1476 (cond (cmp @0 @1) @2 { build_zero_cst (type); })))) 1477 1478/* For integral types with undefined overflow and C != 0 fold 1479 x * C EQ/NE y * C into x EQ/NE y. */ 1480(for cmp (eq ne) 1481 (simplify 1482 (cmp (mult:c @0 @1) (mult:c @2 @1)) 1483 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1)) 1484 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)) 1485 && tree_expr_nonzero_p (@1)) 1486 (cmp @0 @2)))) 1487 1488/* For integral types with wrapping overflow and C odd fold 1489 x * C EQ/NE y * C into x EQ/NE y. */ 1490(for cmp (eq ne) 1491 (simplify 1492 (cmp (mult @0 INTEGER_CST@1) (mult @2 @1)) 1493 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1)) 1494 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)) 1495 && (TREE_INT_CST_LOW (@1) & 1) != 0) 1496 (cmp @0 @2)))) 1497 1498/* For integral types with undefined overflow and C != 0 fold 1499 x * C RELOP y * C into: 1500 1501 x RELOP y for nonnegative C 1502 y RELOP x for negative C */ 1503(for cmp (lt gt le ge) 1504 (simplify 1505 (cmp (mult:c @0 @1) (mult:c @2 @1)) 1506 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1)) 1507 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))) 1508 (if (tree_expr_nonnegative_p (@1) && tree_expr_nonzero_p (@1)) 1509 (cmp @0 @2) 1510 (if (TREE_CODE (@1) == INTEGER_CST 1511 && wi::neg_p (wi::to_wide (@1), TYPE_SIGN (TREE_TYPE (@1)))) 1512 (cmp @2 @0)))))) 1513 1514/* (X - 1U) <= INT_MAX-1U into (int) X > 0. */ 1515(for cmp (le gt) 1516 icmp (gt le) 1517 (simplify 1518 (cmp (plus @0 integer_minus_onep@1) INTEGER_CST@2) 1519 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) 1520 && TYPE_UNSIGNED (TREE_TYPE (@0)) 1521 && TYPE_PRECISION (TREE_TYPE (@0)) > 1 1522 && (wi::to_wide (@2) 1523 == wi::max_value (TYPE_PRECISION (TREE_TYPE (@0)), SIGNED) - 1)) 1524 (with { tree stype = signed_type_for (TREE_TYPE (@0)); } 1525 (icmp (convert:stype @0) { build_int_cst (stype, 0); }))))) 1526 1527/* X / 4 < Y / 4 iff X < Y when the division is known to be exact. */ 1528(for cmp (simple_comparison) 1529 (simplify 1530 (cmp (convert?@3 (exact_div @0 INTEGER_CST@2)) (convert? (exact_div @1 @2))) 1531 (if (element_precision (@3) >= element_precision (@0) 1532 && types_match (@0, @1)) 1533 (if (wi::lt_p (wi::to_wide (@2), 0, TYPE_SIGN (TREE_TYPE (@2)))) 1534 (if (!TYPE_UNSIGNED (TREE_TYPE (@3))) 1535 (cmp @1 @0) 1536 (if (tree_expr_nonzero_p (@0) && tree_expr_nonzero_p (@1)) 1537 (with 1538 { 1539 tree utype = unsigned_type_for (TREE_TYPE (@0)); 1540 } 1541 (cmp (convert:utype @1) (convert:utype @0))))) 1542 (if (wi::gt_p (wi::to_wide (@2), 1, TYPE_SIGN (TREE_TYPE (@2)))) 1543 (if (TYPE_UNSIGNED (TREE_TYPE (@0)) || !TYPE_UNSIGNED (TREE_TYPE (@3))) 1544 (cmp @0 @1) 1545 (with 1546 { 1547 tree utype = unsigned_type_for (TREE_TYPE (@0)); 1548 } 1549 (cmp (convert:utype @0) (convert:utype @1))))))))) 1550 1551/* X / C1 op C2 into a simple range test. */ 1552(for cmp (simple_comparison) 1553 (simplify 1554 (cmp (trunc_div:s @0 INTEGER_CST@1) INTEGER_CST@2) 1555 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) 1556 && integer_nonzerop (@1) 1557 && !TREE_OVERFLOW (@1) 1558 && !TREE_OVERFLOW (@2)) 1559 (with { tree lo, hi; bool neg_overflow; 1560 enum tree_code code = fold_div_compare (cmp, @1, @2, &lo, &hi, 1561 &neg_overflow); } 1562 (switch 1563 (if (code == LT_EXPR || code == GE_EXPR) 1564 (if (TREE_OVERFLOW (lo)) 1565 { build_int_cst (type, (code == LT_EXPR) ^ neg_overflow); } 1566 (if (code == LT_EXPR) 1567 (lt @0 { lo; }) 1568 (ge @0 { lo; })))) 1569 (if (code == LE_EXPR || code == GT_EXPR) 1570 (if (TREE_OVERFLOW (hi)) 1571 { build_int_cst (type, (code == LE_EXPR) ^ neg_overflow); } 1572 (if (code == LE_EXPR) 1573 (le @0 { hi; }) 1574 (gt @0 { hi; })))) 1575 (if (!lo && !hi) 1576 { build_int_cst (type, code == NE_EXPR); }) 1577 (if (code == EQ_EXPR && !hi) 1578 (ge @0 { lo; })) 1579 (if (code == EQ_EXPR && !lo) 1580 (le @0 { hi; })) 1581 (if (code == NE_EXPR && !hi) 1582 (lt @0 { lo; })) 1583 (if (code == NE_EXPR && !lo) 1584 (gt @0 { hi; })) 1585 (if (GENERIC) 1586 { build_range_check (UNKNOWN_LOCATION, type, @0, code == EQ_EXPR, 1587 lo, hi); }) 1588 (with 1589 { 1590 tree etype = range_check_type (TREE_TYPE (@0)); 1591 if (etype) 1592 { 1593 hi = fold_convert (etype, hi); 1594 lo = fold_convert (etype, lo); 1595 hi = const_binop (MINUS_EXPR, etype, hi, lo); 1596 } 1597 } 1598 (if (etype && hi && !TREE_OVERFLOW (hi)) 1599 (if (code == EQ_EXPR) 1600 (le (minus (convert:etype @0) { lo; }) { hi; }) 1601 (gt (minus (convert:etype @0) { lo; }) { hi; }))))))))) 1602 1603/* X + Z < Y + Z is the same as X < Y when there is no overflow. */ 1604(for op (lt le ge gt) 1605 (simplify 1606 (op (plus:c @0 @2) (plus:c @1 @2)) 1607 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) 1608 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))) 1609 (op @0 @1)))) 1610/* For equality and subtraction, this is also true with wrapping overflow. */ 1611(for op (eq ne minus) 1612 (simplify 1613 (op (plus:c @0 @2) (plus:c @1 @2)) 1614 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) 1615 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)) 1616 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))) 1617 (op @0 @1)))) 1618 1619/* X - Z < Y - Z is the same as X < Y when there is no overflow. */ 1620(for op (lt le ge gt) 1621 (simplify 1622 (op (minus @0 @2) (minus @1 @2)) 1623 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) 1624 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))) 1625 (op @0 @1)))) 1626/* For equality and subtraction, this is also true with wrapping overflow. */ 1627(for op (eq ne minus) 1628 (simplify 1629 (op (minus @0 @2) (minus @1 @2)) 1630 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) 1631 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)) 1632 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))) 1633 (op @0 @1)))) 1634/* And for pointers... */ 1635(for op (simple_comparison) 1636 (simplify 1637 (op (pointer_diff@3 @0 @2) (pointer_diff @1 @2)) 1638 (if (!TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2))) 1639 (op @0 @1)))) 1640(simplify 1641 (minus (pointer_diff@3 @0 @2) (pointer_diff @1 @2)) 1642 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@3)) 1643 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2))) 1644 (pointer_diff @0 @1))) 1645 1646/* Z - X < Z - Y is the same as Y < X when there is no overflow. */ 1647(for op (lt le ge gt) 1648 (simplify 1649 (op (minus @2 @0) (minus @2 @1)) 1650 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) 1651 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))) 1652 (op @1 @0)))) 1653/* For equality and subtraction, this is also true with wrapping overflow. */ 1654(for op (eq ne minus) 1655 (simplify 1656 (op (minus @2 @0) (minus @2 @1)) 1657 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) 1658 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)) 1659 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))) 1660 (op @1 @0)))) 1661/* And for pointers... */ 1662(for op (simple_comparison) 1663 (simplify 1664 (op (pointer_diff@3 @2 @0) (pointer_diff @2 @1)) 1665 (if (!TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2))) 1666 (op @1 @0)))) 1667(simplify 1668 (minus (pointer_diff@3 @2 @0) (pointer_diff @2 @1)) 1669 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@3)) 1670 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2))) 1671 (pointer_diff @1 @0))) 1672 1673/* X + Y < Y is the same as X < 0 when there is no overflow. */ 1674(for op (lt le gt ge) 1675 (simplify 1676 (op:c (plus:c@2 @0 @1) @1) 1677 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) 1678 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)) 1679 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@0)) 1680 && (CONSTANT_CLASS_P (@0) || single_use (@2))) 1681 (op @0 { build_zero_cst (TREE_TYPE (@0)); })))) 1682/* For equality, this is also true with wrapping overflow. */ 1683(for op (eq ne) 1684 (simplify 1685 (op:c (nop_convert?@3 (plus:c@2 @0 (convert1? @1))) (convert2? @1)) 1686 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) 1687 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)) 1688 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))) 1689 && (CONSTANT_CLASS_P (@0) || (single_use (@2) && single_use (@3))) 1690 && tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@2)) 1691 && tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@1))) 1692 (op @0 { build_zero_cst (TREE_TYPE (@0)); }))) 1693 (simplify 1694 (op:c (nop_convert?@3 (pointer_plus@2 (convert1? @0) @1)) (convert2? @0)) 1695 (if (tree_nop_conversion_p (TREE_TYPE (@2), TREE_TYPE (@0)) 1696 && tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@0)) 1697 && (CONSTANT_CLASS_P (@1) || (single_use (@2) && single_use (@3)))) 1698 (op @1 { build_zero_cst (TREE_TYPE (@1)); })))) 1699 1700/* X - Y < X is the same as Y > 0 when there is no overflow. 1701 For equality, this is also true with wrapping overflow. */ 1702(for op (simple_comparison) 1703 (simplify 1704 (op:c @0 (minus@2 @0 @1)) 1705 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) 1706 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)) 1707 || ((op == EQ_EXPR || op == NE_EXPR) 1708 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))) 1709 && (CONSTANT_CLASS_P (@1) || single_use (@2))) 1710 (op @1 { build_zero_cst (TREE_TYPE (@1)); })))) 1711 1712/* Transform: 1713 (X / Y) == 0 -> X < Y if X, Y are unsigned. 1714 (X / Y) != 0 -> X >= Y, if X, Y are unsigned. */ 1715(for cmp (eq ne) 1716 ocmp (lt ge) 1717 (simplify 1718 (cmp (trunc_div @0 @1) integer_zerop) 1719 (if (TYPE_UNSIGNED (TREE_TYPE (@0)) 1720 /* Complex ==/!= is allowed, but not </>=. */ 1721 && TREE_CODE (TREE_TYPE (@0)) != COMPLEX_TYPE 1722 && (VECTOR_TYPE_P (type) || !VECTOR_TYPE_P (TREE_TYPE (@0)))) 1723 (ocmp @0 @1)))) 1724 1725/* X == C - X can never be true if C is odd. */ 1726(for cmp (eq ne) 1727 (simplify 1728 (cmp:c (convert? @0) (convert1? (minus INTEGER_CST@1 (convert2? @0)))) 1729 (if (TREE_INT_CST_LOW (@1) & 1) 1730 { constant_boolean_node (cmp == NE_EXPR, type); }))) 1731 1732/* Arguments on which one can call get_nonzero_bits to get the bits 1733 possibly set. */ 1734(match with_possible_nonzero_bits 1735 INTEGER_CST@0) 1736(match with_possible_nonzero_bits 1737 SSA_NAME@0 1738 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0))))) 1739/* Slightly extended version, do not make it recursive to keep it cheap. */ 1740(match (with_possible_nonzero_bits2 @0) 1741 with_possible_nonzero_bits@0) 1742(match (with_possible_nonzero_bits2 @0) 1743 (bit_and:c with_possible_nonzero_bits@0 @2)) 1744 1745/* Same for bits that are known to be set, but we do not have 1746 an equivalent to get_nonzero_bits yet. */ 1747(match (with_certain_nonzero_bits2 @0) 1748 INTEGER_CST@0) 1749(match (with_certain_nonzero_bits2 @0) 1750 (bit_ior @1 INTEGER_CST@0)) 1751 1752/* X == C (or X & Z == Y | C) is impossible if ~nonzero(X) & C != 0. */ 1753(for cmp (eq ne) 1754 (simplify 1755 (cmp:c (with_possible_nonzero_bits2 @0) (with_certain_nonzero_bits2 @1)) 1756 (if (wi::bit_and_not (wi::to_wide (@1), get_nonzero_bits (@0)) != 0) 1757 { constant_boolean_node (cmp == NE_EXPR, type); }))) 1758 1759/* ((X inner_op C0) outer_op C1) 1760 With X being a tree where value_range has reasoned certain bits to always be 1761 zero throughout its computed value range, 1762 inner_op = {|,^}, outer_op = {|,^} and inner_op != outer_op 1763 where zero_mask has 1's for all bits that are sure to be 0 in 1764 and 0's otherwise. 1765 if (inner_op == '^') C0 &= ~C1; 1766 if ((C0 & ~zero_mask) == 0) then emit (X outer_op (C0 outer_op C1) 1767 if ((C1 & ~zero_mask) == 0) then emit (X inner_op (C0 outer_op C1) 1768*/ 1769(for inner_op (bit_ior bit_xor) 1770 outer_op (bit_xor bit_ior) 1771(simplify 1772 (outer_op 1773 (inner_op:s @2 INTEGER_CST@0) INTEGER_CST@1) 1774 (with 1775 { 1776 bool fail = false; 1777 wide_int zero_mask_not; 1778 wide_int C0; 1779 wide_int cst_emit; 1780 1781 if (TREE_CODE (@2) == SSA_NAME) 1782 zero_mask_not = get_nonzero_bits (@2); 1783 else 1784 fail = true; 1785 1786 if (inner_op == BIT_XOR_EXPR) 1787 { 1788 C0 = wi::bit_and_not (wi::to_wide (@0), wi::to_wide (@1)); 1789 cst_emit = C0 | wi::to_wide (@1); 1790 } 1791 else 1792 { 1793 C0 = wi::to_wide (@0); 1794 cst_emit = C0 ^ wi::to_wide (@1); 1795 } 1796 } 1797 (if (!fail && (C0 & zero_mask_not) == 0) 1798 (outer_op @2 { wide_int_to_tree (type, cst_emit); }) 1799 (if (!fail && (wi::to_wide (@1) & zero_mask_not) == 0) 1800 (inner_op @2 { wide_int_to_tree (type, cst_emit); })))))) 1801 1802/* Associate (p +p off1) +p off2 as (p +p (off1 + off2)). */ 1803(simplify 1804 (pointer_plus (pointer_plus:s @0 @1) @3) 1805 (pointer_plus @0 (plus @1 @3))) 1806 1807/* Pattern match 1808 tem1 = (long) ptr1; 1809 tem2 = (long) ptr2; 1810 tem3 = tem2 - tem1; 1811 tem4 = (unsigned long) tem3; 1812 tem5 = ptr1 + tem4; 1813 and produce 1814 tem5 = ptr2; */ 1815(simplify 1816 (pointer_plus @0 (convert?@2 (minus@3 (convert @1) (convert @0)))) 1817 /* Conditionally look through a sign-changing conversion. */ 1818 (if (TYPE_PRECISION (TREE_TYPE (@2)) == TYPE_PRECISION (TREE_TYPE (@3)) 1819 && ((GIMPLE && useless_type_conversion_p (type, TREE_TYPE (@1))) 1820 || (GENERIC && type == TREE_TYPE (@1)))) 1821 @1)) 1822(simplify 1823 (pointer_plus @0 (convert?@2 (pointer_diff@3 @1 @@0))) 1824 (if (TYPE_PRECISION (TREE_TYPE (@2)) >= TYPE_PRECISION (TREE_TYPE (@3))) 1825 (convert @1))) 1826 1827/* Pattern match 1828 tem = (sizetype) ptr; 1829 tem = tem & algn; 1830 tem = -tem; 1831 ... = ptr p+ tem; 1832 and produce the simpler and easier to analyze with respect to alignment 1833 ... = ptr & ~algn; */ 1834(simplify 1835 (pointer_plus @0 (negate (bit_and (convert @0) INTEGER_CST@1))) 1836 (with { tree algn = wide_int_to_tree (TREE_TYPE (@0), ~wi::to_wide (@1)); } 1837 (bit_and @0 { algn; }))) 1838 1839/* Try folding difference of addresses. */ 1840(simplify 1841 (minus (convert ADDR_EXPR@0) (convert @1)) 1842 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))) 1843 (with { poly_int64 diff; } 1844 (if (ptr_difference_const (@0, @1, &diff)) 1845 { build_int_cst_type (type, diff); })))) 1846(simplify 1847 (minus (convert @0) (convert ADDR_EXPR@1)) 1848 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))) 1849 (with { poly_int64 diff; } 1850 (if (ptr_difference_const (@0, @1, &diff)) 1851 { build_int_cst_type (type, diff); })))) 1852(simplify 1853 (pointer_diff (convert?@2 ADDR_EXPR@0) (convert1?@3 @1)) 1854 (if (tree_nop_conversion_p (TREE_TYPE(@2), TREE_TYPE (@0)) 1855 && tree_nop_conversion_p (TREE_TYPE(@3), TREE_TYPE (@1))) 1856 (with { poly_int64 diff; } 1857 (if (ptr_difference_const (@0, @1, &diff)) 1858 { build_int_cst_type (type, diff); })))) 1859(simplify 1860 (pointer_diff (convert?@2 @0) (convert1?@3 ADDR_EXPR@1)) 1861 (if (tree_nop_conversion_p (TREE_TYPE(@2), TREE_TYPE (@0)) 1862 && tree_nop_conversion_p (TREE_TYPE(@3), TREE_TYPE (@1))) 1863 (with { poly_int64 diff; } 1864 (if (ptr_difference_const (@0, @1, &diff)) 1865 { build_int_cst_type (type, diff); })))) 1866 1867/* Canonicalize (T *)(ptr - ptr-cst) to &MEM[ptr + -ptr-cst]. */ 1868(simplify 1869 (convert (pointer_diff @0 INTEGER_CST@1)) 1870 (if (POINTER_TYPE_P (type)) 1871 { build_fold_addr_expr_with_type 1872 (build2 (MEM_REF, char_type_node, @0, 1873 wide_int_to_tree (ptr_type_node, wi::neg (wi::to_wide (@1)))), 1874 type); })) 1875 1876/* If arg0 is derived from the address of an object or function, we may 1877 be able to fold this expression using the object or function's 1878 alignment. */ 1879(simplify 1880 (bit_and (convert? @0) INTEGER_CST@1) 1881 (if (POINTER_TYPE_P (TREE_TYPE (@0)) 1882 && tree_nop_conversion_p (type, TREE_TYPE (@0))) 1883 (with 1884 { 1885 unsigned int align; 1886 unsigned HOST_WIDE_INT bitpos; 1887 get_pointer_alignment_1 (@0, &align, &bitpos); 1888 } 1889 (if (wi::ltu_p (wi::to_wide (@1), align / BITS_PER_UNIT)) 1890 { wide_int_to_tree (type, (wi::to_wide (@1) 1891 & (bitpos / BITS_PER_UNIT))); })))) 1892 1893(match min_value 1894 INTEGER_CST 1895 (if (INTEGRAL_TYPE_P (type) 1896 && wi::eq_p (wi::to_wide (t), wi::min_value (type))))) 1897 1898(match max_value 1899 INTEGER_CST 1900 (if (INTEGRAL_TYPE_P (type) 1901 && wi::eq_p (wi::to_wide (t), wi::max_value (type))))) 1902 1903/* x > y && x != XXX_MIN --> x > y 1904 x > y && x == XXX_MIN --> false . */ 1905(for eqne (eq ne) 1906 (simplify 1907 (bit_and:c (gt:c@2 @0 @1) (eqne @0 min_value)) 1908 (switch 1909 (if (eqne == EQ_EXPR) 1910 { constant_boolean_node (false, type); }) 1911 (if (eqne == NE_EXPR) 1912 @2) 1913 ))) 1914 1915/* x < y && x != XXX_MAX --> x < y 1916 x < y && x == XXX_MAX --> false. */ 1917(for eqne (eq ne) 1918 (simplify 1919 (bit_and:c (lt:c@2 @0 @1) (eqne @0 max_value)) 1920 (switch 1921 (if (eqne == EQ_EXPR) 1922 { constant_boolean_node (false, type); }) 1923 (if (eqne == NE_EXPR) 1924 @2) 1925 ))) 1926 1927/* x <= y && x == XXX_MIN --> x == XXX_MIN. */ 1928(simplify 1929 (bit_and:c (le:c @0 @1) (eq@2 @0 min_value)) 1930 @2) 1931 1932/* x >= y && x == XXX_MAX --> x == XXX_MAX. */ 1933(simplify 1934 (bit_and:c (ge:c @0 @1) (eq@2 @0 max_value)) 1935 @2) 1936 1937/* x > y || x != XXX_MIN --> x != XXX_MIN. */ 1938(simplify 1939 (bit_ior:c (gt:c @0 @1) (ne@2 @0 min_value)) 1940 @2) 1941 1942/* x <= y || x != XXX_MIN --> true. */ 1943(simplify 1944 (bit_ior:c (le:c @0 @1) (ne @0 min_value)) 1945 { constant_boolean_node (true, type); }) 1946 1947/* x <= y || x == XXX_MIN --> x <= y. */ 1948(simplify 1949 (bit_ior:c (le:c@2 @0 @1) (eq @0 min_value)) 1950 @2) 1951 1952/* x < y || x != XXX_MAX --> x != XXX_MAX. */ 1953(simplify 1954 (bit_ior:c (lt:c @0 @1) (ne@2 @0 max_value)) 1955 @2) 1956 1957/* x >= y || x != XXX_MAX --> true 1958 x >= y || x == XXX_MAX --> x >= y. */ 1959(for eqne (eq ne) 1960 (simplify 1961 (bit_ior:c (ge:c@2 @0 @1) (eqne @0 max_value)) 1962 (switch 1963 (if (eqne == EQ_EXPR) 1964 @2) 1965 (if (eqne == NE_EXPR) 1966 { constant_boolean_node (true, type); })))) 1967 1968/* Convert (X == CST1) && (X OP2 CST2) to a known value 1969 based on CST1 OP2 CST2. Similarly for (X != CST1). */ 1970 1971(for code1 (eq ne) 1972 (for code2 (eq ne lt gt le ge) 1973 (simplify 1974 (bit_and:c (code1@3 @0 INTEGER_CST@1) (code2@4 @0 INTEGER_CST@2)) 1975 (with 1976 { 1977 int cmp = tree_int_cst_compare (@1, @2); 1978 bool val; 1979 switch (code2) 1980 { 1981 case EQ_EXPR: val = (cmp == 0); break; 1982 case NE_EXPR: val = (cmp != 0); break; 1983 case LT_EXPR: val = (cmp < 0); break; 1984 case GT_EXPR: val = (cmp > 0); break; 1985 case LE_EXPR: val = (cmp <= 0); break; 1986 case GE_EXPR: val = (cmp >= 0); break; 1987 default: gcc_unreachable (); 1988 } 1989 } 1990 (switch 1991 (if (code1 == EQ_EXPR && val) @3) 1992 (if (code1 == EQ_EXPR && !val) { constant_boolean_node (false, type); }) 1993 (if (code1 == NE_EXPR && !val) @4)))))) 1994 1995/* Convert (X OP1 CST1) && (X OP2 CST2). */ 1996 1997(for code1 (lt le gt ge) 1998 (for code2 (lt le gt ge) 1999 (simplify 2000 (bit_and (code1:c@3 @0 INTEGER_CST@1) (code2:c@4 @0 INTEGER_CST@2)) 2001 (with 2002 { 2003 int cmp = tree_int_cst_compare (@1, @2); 2004 } 2005 (switch 2006 /* Choose the more restrictive of two < or <= comparisons. */ 2007 (if ((code1 == LT_EXPR || code1 == LE_EXPR) 2008 && (code2 == LT_EXPR || code2 == LE_EXPR)) 2009 (if ((cmp < 0) || (cmp == 0 && code1 == LT_EXPR)) 2010 @3 2011 @4)) 2012 /* Likewise chose the more restrictive of two > or >= comparisons. */ 2013 (if ((code1 == GT_EXPR || code1 == GE_EXPR) 2014 && (code2 == GT_EXPR || code2 == GE_EXPR)) 2015 (if ((cmp > 0) || (cmp == 0 && code1 == GT_EXPR)) 2016 @3 2017 @4)) 2018 /* Check for singleton ranges. */ 2019 (if (cmp == 0 2020 && ((code1 == LE_EXPR && code2 == GE_EXPR) 2021 || (code1 == GE_EXPR && code2 == LE_EXPR))) 2022 (eq @0 @1)) 2023 /* Check for disjoint ranges. */ 2024 (if (cmp <= 0 2025 && (code1 == LT_EXPR || code1 == LE_EXPR) 2026 && (code2 == GT_EXPR || code2 == GE_EXPR)) 2027 { constant_boolean_node (false, type); }) 2028 (if (cmp >= 0 2029 && (code1 == GT_EXPR || code1 == GE_EXPR) 2030 && (code2 == LT_EXPR || code2 == LE_EXPR)) 2031 { constant_boolean_node (false, type); }) 2032 ))))) 2033 2034/* Convert (X == CST1) || (X OP2 CST2) to a known value 2035 based on CST1 OP2 CST2. Similarly for (X != CST1). */ 2036 2037(for code1 (eq ne) 2038 (for code2 (eq ne lt gt le ge) 2039 (simplify 2040 (bit_ior:c (code1@3 @0 INTEGER_CST@1) (code2@4 @0 INTEGER_CST@2)) 2041 (with 2042 { 2043 int cmp = tree_int_cst_compare (@1, @2); 2044 bool val; 2045 switch (code2) 2046 { 2047 case EQ_EXPR: val = (cmp == 0); break; 2048 case NE_EXPR: val = (cmp != 0); break; 2049 case LT_EXPR: val = (cmp < 0); break; 2050 case GT_EXPR: val = (cmp > 0); break; 2051 case LE_EXPR: val = (cmp <= 0); break; 2052 case GE_EXPR: val = (cmp >= 0); break; 2053 default: gcc_unreachable (); 2054 } 2055 } 2056 (switch 2057 (if (code1 == EQ_EXPR && val) @4) 2058 (if (code1 == NE_EXPR && val) { constant_boolean_node (true, type); }) 2059 (if (code1 == NE_EXPR && !val) @3)))))) 2060 2061/* Convert (X OP1 CST1) || (X OP2 CST2). */ 2062 2063(for code1 (lt le gt ge) 2064 (for code2 (lt le gt ge) 2065 (simplify 2066 (bit_ior (code1@3 @0 INTEGER_CST@1) (code2@4 @0 INTEGER_CST@2)) 2067 (with 2068 { 2069 int cmp = tree_int_cst_compare (@1, @2); 2070 } 2071 (switch 2072 /* Choose the more restrictive of two < or <= comparisons. */ 2073 (if ((code1 == LT_EXPR || code1 == LE_EXPR) 2074 && (code2 == LT_EXPR || code2 == LE_EXPR)) 2075 (if ((cmp < 0) || (cmp == 0 && code1 == LT_EXPR)) 2076 @4 2077 @3)) 2078 /* Likewise chose the more restrictive of two > or >= comparisons. */ 2079 (if ((code1 == GT_EXPR || code1 == GE_EXPR) 2080 && (code2 == GT_EXPR || code2 == GE_EXPR)) 2081 (if ((cmp > 0) || (cmp == 0 && code1 == GT_EXPR)) 2082 @4 2083 @3)) 2084 /* Check for singleton ranges. */ 2085 (if (cmp == 0 2086 && ((code1 == LT_EXPR && code2 == GT_EXPR) 2087 || (code1 == GT_EXPR && code2 == LT_EXPR))) 2088 (ne @0 @2)) 2089 /* Check for disjoint ranges. */ 2090 (if (cmp >= 0 2091 && (code1 == LT_EXPR || code1 == LE_EXPR) 2092 && (code2 == GT_EXPR || code2 == GE_EXPR)) 2093 { constant_boolean_node (true, type); }) 2094 (if (cmp <= 0 2095 && (code1 == GT_EXPR || code1 == GE_EXPR) 2096 && (code2 == LT_EXPR || code2 == LE_EXPR)) 2097 { constant_boolean_node (true, type); }) 2098 ))))) 2099 2100/* We can't reassociate at all for saturating types. */ 2101(if (!TYPE_SATURATING (type)) 2102 2103 /* Contract negates. */ 2104 /* A + (-B) -> A - B */ 2105 (simplify 2106 (plus:c @0 (convert? (negate @1))) 2107 /* Apply STRIP_NOPS on the negate. */ 2108 (if (tree_nop_conversion_p (type, TREE_TYPE (@1)) 2109 && !TYPE_OVERFLOW_SANITIZED (type)) 2110 (with 2111 { 2112 tree t1 = type; 2113 if (INTEGRAL_TYPE_P (type) 2114 && TYPE_OVERFLOW_WRAPS (type) != TYPE_OVERFLOW_WRAPS (TREE_TYPE (@1))) 2115 t1 = TYPE_OVERFLOW_WRAPS (type) ? type : TREE_TYPE (@1); 2116 } 2117 (convert (minus (convert:t1 @0) (convert:t1 @1)))))) 2118 /* A - (-B) -> A + B */ 2119 (simplify 2120 (minus @0 (convert? (negate @1))) 2121 (if (tree_nop_conversion_p (type, TREE_TYPE (@1)) 2122 && !TYPE_OVERFLOW_SANITIZED (type)) 2123 (with 2124 { 2125 tree t1 = type; 2126 if (INTEGRAL_TYPE_P (type) 2127 && TYPE_OVERFLOW_WRAPS (type) != TYPE_OVERFLOW_WRAPS (TREE_TYPE (@1))) 2128 t1 = TYPE_OVERFLOW_WRAPS (type) ? type : TREE_TYPE (@1); 2129 } 2130 (convert (plus (convert:t1 @0) (convert:t1 @1)))))) 2131 /* -(T)(-A) -> (T)A 2132 Sign-extension is ok except for INT_MIN, which thankfully cannot 2133 happen without overflow. */ 2134 (simplify 2135 (negate (convert (negate @1))) 2136 (if (INTEGRAL_TYPE_P (type) 2137 && (TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@1)) 2138 || (!TYPE_UNSIGNED (TREE_TYPE (@1)) 2139 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1)))) 2140 && !TYPE_OVERFLOW_SANITIZED (type) 2141 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@1))) 2142 (convert @1))) 2143 (simplify 2144 (negate (convert negate_expr_p@1)) 2145 (if (SCALAR_FLOAT_TYPE_P (type) 2146 && ((DECIMAL_FLOAT_TYPE_P (type) 2147 == DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@1)) 2148 && TYPE_PRECISION (type) >= TYPE_PRECISION (TREE_TYPE (@1))) 2149 || !HONOR_SIGN_DEPENDENT_ROUNDING (type))) 2150 (convert (negate @1)))) 2151 (simplify 2152 (negate (nop_convert? (negate @1))) 2153 (if (!TYPE_OVERFLOW_SANITIZED (type) 2154 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@1))) 2155 (view_convert @1))) 2156 2157 /* We can't reassociate floating-point unless -fassociative-math 2158 or fixed-point plus or minus because of saturation to +-Inf. */ 2159 (if ((!FLOAT_TYPE_P (type) || flag_associative_math) 2160 && !FIXED_POINT_TYPE_P (type)) 2161 2162 /* Match patterns that allow contracting a plus-minus pair 2163 irrespective of overflow issues. */ 2164 /* (A +- B) - A -> +- B */ 2165 /* (A +- B) -+ B -> A */ 2166 /* A - (A +- B) -> -+ B */ 2167 /* A +- (B -+ A) -> +- B */ 2168 (simplify 2169 (minus (nop_convert1? (plus:c (nop_convert2? @0) @1)) @0) 2170 (view_convert @1)) 2171 (simplify 2172 (minus (nop_convert1? (minus (nop_convert2? @0) @1)) @0) 2173 (if (!ANY_INTEGRAL_TYPE_P (type) 2174 || TYPE_OVERFLOW_WRAPS (type)) 2175 (negate (view_convert @1)) 2176 (view_convert (negate @1)))) 2177 (simplify 2178 (plus:c (nop_convert1? (minus @0 (nop_convert2? @1))) @1) 2179 (view_convert @0)) 2180 (simplify 2181 (minus @0 (nop_convert1? (plus:c (nop_convert2? @0) @1))) 2182 (if (!ANY_INTEGRAL_TYPE_P (type) 2183 || TYPE_OVERFLOW_WRAPS (type)) 2184 (negate (view_convert @1)) 2185 (view_convert (negate @1)))) 2186 (simplify 2187 (minus @0 (nop_convert1? (minus (nop_convert2? @0) @1))) 2188 (view_convert @1)) 2189 /* (A +- B) + (C - A) -> C +- B */ 2190 /* (A + B) - (A - C) -> B + C */ 2191 /* More cases are handled with comparisons. */ 2192 (simplify 2193 (plus:c (plus:c @0 @1) (minus @2 @0)) 2194 (plus @2 @1)) 2195 (simplify 2196 (plus:c (minus @0 @1) (minus @2 @0)) 2197 (minus @2 @1)) 2198 (simplify 2199 (plus:c (pointer_diff @0 @1) (pointer_diff @2 @0)) 2200 (if (TYPE_OVERFLOW_UNDEFINED (type) 2201 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@0))) 2202 (pointer_diff @2 @1))) 2203 (simplify 2204 (minus (plus:c @0 @1) (minus @0 @2)) 2205 (plus @1 @2)) 2206 2207 /* (A +- CST1) +- CST2 -> A + CST3 2208 Use view_convert because it is safe for vectors and equivalent for 2209 scalars. */ 2210 (for outer_op (plus minus) 2211 (for inner_op (plus minus) 2212 neg_inner_op (minus plus) 2213 (simplify 2214 (outer_op (nop_convert? (inner_op @0 CONSTANT_CLASS_P@1)) 2215 CONSTANT_CLASS_P@2) 2216 /* If one of the types wraps, use that one. */ 2217 (if (!ANY_INTEGRAL_TYPE_P (type) || TYPE_OVERFLOW_WRAPS (type)) 2218 /* If all 3 captures are CONSTANT_CLASS_P, punt, as we might recurse 2219 forever if something doesn't simplify into a constant. */ 2220 (if (!CONSTANT_CLASS_P (@0)) 2221 (if (outer_op == PLUS_EXPR) 2222 (plus (view_convert @0) (inner_op @2 (view_convert @1))) 2223 (minus (view_convert @0) (neg_inner_op @2 (view_convert @1))))) 2224 (if (!ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) 2225 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))) 2226 (if (outer_op == PLUS_EXPR) 2227 (view_convert (plus @0 (inner_op (view_convert @2) @1))) 2228 (view_convert (minus @0 (neg_inner_op (view_convert @2) @1)))) 2229 /* If the constant operation overflows we cannot do the transform 2230 directly as we would introduce undefined overflow, for example 2231 with (a - 1) + INT_MIN. */ 2232 (if (types_match (type, @0)) 2233 (with { tree cst = const_binop (outer_op == inner_op 2234 ? PLUS_EXPR : MINUS_EXPR, 2235 type, @1, @2); } 2236 (if (cst && !TREE_OVERFLOW (cst)) 2237 (inner_op @0 { cst; } ) 2238 /* X+INT_MAX+1 is X-INT_MIN. */ 2239 (if (INTEGRAL_TYPE_P (type) && cst 2240 && wi::to_wide (cst) == wi::min_value (type)) 2241 (neg_inner_op @0 { wide_int_to_tree (type, wi::to_wide (cst)); }) 2242 /* Last resort, use some unsigned type. */ 2243 (with { tree utype = unsigned_type_for (type); } 2244 (if (utype) 2245 (view_convert (inner_op 2246 (view_convert:utype @0) 2247 (view_convert:utype 2248 { drop_tree_overflow (cst); })))))))))))))) 2249 2250 /* (CST1 - A) +- CST2 -> CST3 - A */ 2251 (for outer_op (plus minus) 2252 (simplify 2253 (outer_op (nop_convert? (minus CONSTANT_CLASS_P@1 @0)) CONSTANT_CLASS_P@2) 2254 /* If one of the types wraps, use that one. */ 2255 (if (!ANY_INTEGRAL_TYPE_P (type) || TYPE_OVERFLOW_WRAPS (type)) 2256 /* If all 3 captures are CONSTANT_CLASS_P, punt, as we might recurse 2257 forever if something doesn't simplify into a constant. */ 2258 (if (!CONSTANT_CLASS_P (@0)) 2259 (minus (outer_op (view_convert @1) @2) (view_convert @0))) 2260 (if (!ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) 2261 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))) 2262 (view_convert (minus (outer_op @1 (view_convert @2)) @0)) 2263 (if (types_match (type, @0)) 2264 (with { tree cst = const_binop (outer_op, type, @1, @2); } 2265 (if (cst && !TREE_OVERFLOW (cst)) 2266 (minus { cst; } @0)))))))) 2267 2268 /* CST1 - (CST2 - A) -> CST3 + A 2269 Use view_convert because it is safe for vectors and equivalent for 2270 scalars. */ 2271 (simplify 2272 (minus CONSTANT_CLASS_P@1 (nop_convert? (minus CONSTANT_CLASS_P@2 @0))) 2273 /* If one of the types wraps, use that one. */ 2274 (if (!ANY_INTEGRAL_TYPE_P (type) || TYPE_OVERFLOW_WRAPS (type)) 2275 /* If all 3 captures are CONSTANT_CLASS_P, punt, as we might recurse 2276 forever if something doesn't simplify into a constant. */ 2277 (if (!CONSTANT_CLASS_P (@0)) 2278 (plus (view_convert @0) (minus @1 (view_convert @2)))) 2279 (if (!ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) 2280 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))) 2281 (view_convert (plus @0 (minus (view_convert @1) @2))) 2282 (if (types_match (type, @0)) 2283 (with { tree cst = const_binop (MINUS_EXPR, type, @1, @2); } 2284 (if (cst && !TREE_OVERFLOW (cst)) 2285 (plus { cst; } @0))))))) 2286 2287/* ((T)(A)) + CST -> (T)(A + CST) */ 2288#if GIMPLE 2289 (simplify 2290 (plus (convert SSA_NAME@0) INTEGER_CST@1) 2291 (if (TREE_CODE (TREE_TYPE (@0)) == INTEGER_TYPE 2292 && TREE_CODE (type) == INTEGER_TYPE 2293 && TYPE_PRECISION (type) > TYPE_PRECISION (TREE_TYPE (@0)) 2294 && int_fits_type_p (@1, TREE_TYPE (@0))) 2295 /* Perform binary operation inside the cast if the constant fits 2296 and (A + CST)'s range does not overflow. */ 2297 (with 2298 { 2299 wi::overflow_type min_ovf = wi::OVF_OVERFLOW, 2300 max_ovf = wi::OVF_OVERFLOW; 2301 tree inner_type = TREE_TYPE (@0); 2302 2303 wide_int w1 2304 = wide_int::from (wi::to_wide (@1), TYPE_PRECISION (inner_type), 2305 TYPE_SIGN (inner_type)); 2306 2307 wide_int wmin0, wmax0; 2308 if (get_range_info (@0, &wmin0, &wmax0) == VR_RANGE) 2309 { 2310 wi::add (wmin0, w1, TYPE_SIGN (inner_type), &min_ovf); 2311 wi::add (wmax0, w1, TYPE_SIGN (inner_type), &max_ovf); 2312 } 2313 } 2314 (if (min_ovf == wi::OVF_NONE && max_ovf == wi::OVF_NONE) 2315 (convert (plus @0 { wide_int_to_tree (TREE_TYPE (@0), w1); } ))) 2316 ))) 2317#endif 2318 2319/* ((T)(A + CST1)) + CST2 -> (T)(A) + (T)CST1 + CST2 */ 2320#if GIMPLE 2321 (for op (plus minus) 2322 (simplify 2323 (plus (convert:s (op:s @0 INTEGER_CST@1)) INTEGER_CST@2) 2324 (if (TREE_CODE (TREE_TYPE (@0)) == INTEGER_TYPE 2325 && TREE_CODE (type) == INTEGER_TYPE 2326 && TYPE_PRECISION (type) > TYPE_PRECISION (TREE_TYPE (@0)) 2327 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)) 2328 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@0)) 2329 && TYPE_OVERFLOW_WRAPS (type)) 2330 (plus (convert @0) (op @2 (convert @1)))))) 2331#endif 2332 2333 /* ~A + A -> -1 */ 2334 (simplify 2335 (plus:c (bit_not @0) @0) 2336 (if (!TYPE_OVERFLOW_TRAPS (type)) 2337 { build_all_ones_cst (type); })) 2338 2339 /* ~A + 1 -> -A */ 2340 (simplify 2341 (plus (convert? (bit_not @0)) integer_each_onep) 2342 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))) 2343 (negate (convert @0)))) 2344 2345 /* -A - 1 -> ~A */ 2346 (simplify 2347 (minus (convert? (negate @0)) integer_each_onep) 2348 (if (!TYPE_OVERFLOW_TRAPS (type) 2349 && TREE_CODE (type) != COMPLEX_TYPE 2350 && tree_nop_conversion_p (type, TREE_TYPE (@0))) 2351 (bit_not (convert @0)))) 2352 2353 /* -1 - A -> ~A */ 2354 (simplify 2355 (minus integer_all_onesp @0) 2356 (if (TREE_CODE (type) != COMPLEX_TYPE) 2357 (bit_not @0))) 2358 2359 /* (T)(P + A) - (T)P -> (T) A */ 2360 (simplify 2361 (minus (convert (plus:c @@0 @1)) 2362 (convert? @0)) 2363 (if (element_precision (type) <= element_precision (TREE_TYPE (@1)) 2364 /* For integer types, if A has a smaller type 2365 than T the result depends on the possible 2366 overflow in P + A. 2367 E.g. T=size_t, A=(unsigned)429497295, P>0. 2368 However, if an overflow in P + A would cause 2369 undefined behavior, we can assume that there 2370 is no overflow. */ 2371 || (INTEGRAL_TYPE_P (TREE_TYPE (@1)) 2372 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1)))) 2373 (convert @1))) 2374 (simplify 2375 (minus (convert (pointer_plus @@0 @1)) 2376 (convert @0)) 2377 (if (element_precision (type) <= element_precision (TREE_TYPE (@1)) 2378 /* For pointer types, if the conversion of A to the 2379 final type requires a sign- or zero-extension, 2380 then we have to punt - it is not defined which 2381 one is correct. */ 2382 || (POINTER_TYPE_P (TREE_TYPE (@0)) 2383 && TREE_CODE (@1) == INTEGER_CST 2384 && tree_int_cst_sign_bit (@1) == 0)) 2385 (convert @1))) 2386 (simplify 2387 (pointer_diff (pointer_plus @@0 @1) @0) 2388 /* The second argument of pointer_plus must be interpreted as signed, and 2389 thus sign-extended if necessary. */ 2390 (with { tree stype = signed_type_for (TREE_TYPE (@1)); } 2391 /* Use view_convert instead of convert here, as POINTER_PLUS_EXPR 2392 second arg is unsigned even when we need to consider it as signed, 2393 we don't want to diagnose overflow here. */ 2394 (convert (view_convert:stype @1)))) 2395 2396 /* (T)P - (T)(P + A) -> -(T) A */ 2397 (simplify 2398 (minus (convert? @0) 2399 (convert (plus:c @@0 @1))) 2400 (if (INTEGRAL_TYPE_P (type) 2401 && TYPE_OVERFLOW_UNDEFINED (type) 2402 && element_precision (type) <= element_precision (TREE_TYPE (@1))) 2403 (with { tree utype = unsigned_type_for (type); } 2404 (convert (negate (convert:utype @1)))) 2405 (if (element_precision (type) <= element_precision (TREE_TYPE (@1)) 2406 /* For integer types, if A has a smaller type 2407 than T the result depends on the possible 2408 overflow in P + A. 2409 E.g. T=size_t, A=(unsigned)429497295, P>0. 2410 However, if an overflow in P + A would cause 2411 undefined behavior, we can assume that there 2412 is no overflow. */ 2413 || (INTEGRAL_TYPE_P (TREE_TYPE (@1)) 2414 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1)))) 2415 (negate (convert @1))))) 2416 (simplify 2417 (minus (convert @0) 2418 (convert (pointer_plus @@0 @1))) 2419 (if (INTEGRAL_TYPE_P (type) 2420 && TYPE_OVERFLOW_UNDEFINED (type) 2421 && element_precision (type) <= element_precision (TREE_TYPE (@1))) 2422 (with { tree utype = unsigned_type_for (type); } 2423 (convert (negate (convert:utype @1)))) 2424 (if (element_precision (type) <= element_precision (TREE_TYPE (@1)) 2425 /* For pointer types, if the conversion of A to the 2426 final type requires a sign- or zero-extension, 2427 then we have to punt - it is not defined which 2428 one is correct. */ 2429 || (POINTER_TYPE_P (TREE_TYPE (@0)) 2430 && TREE_CODE (@1) == INTEGER_CST 2431 && tree_int_cst_sign_bit (@1) == 0)) 2432 (negate (convert @1))))) 2433 (simplify 2434 (pointer_diff @0 (pointer_plus @@0 @1)) 2435 /* The second argument of pointer_plus must be interpreted as signed, and 2436 thus sign-extended if necessary. */ 2437 (with { tree stype = signed_type_for (TREE_TYPE (@1)); } 2438 /* Use view_convert instead of convert here, as POINTER_PLUS_EXPR 2439 second arg is unsigned even when we need to consider it as signed, 2440 we don't want to diagnose overflow here. */ 2441 (negate (convert (view_convert:stype @1))))) 2442 2443 /* (T)(P + A) - (T)(P + B) -> (T)A - (T)B */ 2444 (simplify 2445 (minus (convert (plus:c @@0 @1)) 2446 (convert (plus:c @0 @2))) 2447 (if (INTEGRAL_TYPE_P (type) 2448 && TYPE_OVERFLOW_UNDEFINED (type) 2449 && element_precision (type) <= element_precision (TREE_TYPE (@1)) 2450 && element_precision (type) <= element_precision (TREE_TYPE (@2))) 2451 (with { tree utype = unsigned_type_for (type); } 2452 (convert (minus (convert:utype @1) (convert:utype @2)))) 2453 (if (((element_precision (type) <= element_precision (TREE_TYPE (@1))) 2454 == (element_precision (type) <= element_precision (TREE_TYPE (@2)))) 2455 && (element_precision (type) <= element_precision (TREE_TYPE (@1)) 2456 /* For integer types, if A has a smaller type 2457 than T the result depends on the possible 2458 overflow in P + A. 2459 E.g. T=size_t, A=(unsigned)429497295, P>0. 2460 However, if an overflow in P + A would cause 2461 undefined behavior, we can assume that there 2462 is no overflow. */ 2463 || (INTEGRAL_TYPE_P (TREE_TYPE (@1)) 2464 && INTEGRAL_TYPE_P (TREE_TYPE (@2)) 2465 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1)) 2466 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@2))))) 2467 (minus (convert @1) (convert @2))))) 2468 (simplify 2469 (minus (convert (pointer_plus @@0 @1)) 2470 (convert (pointer_plus @0 @2))) 2471 (if (INTEGRAL_TYPE_P (type) 2472 && TYPE_OVERFLOW_UNDEFINED (type) 2473 && element_precision (type) <= element_precision (TREE_TYPE (@1))) 2474 (with { tree utype = unsigned_type_for (type); } 2475 (convert (minus (convert:utype @1) (convert:utype @2)))) 2476 (if (element_precision (type) <= element_precision (TREE_TYPE (@1)) 2477 /* For pointer types, if the conversion of A to the 2478 final type requires a sign- or zero-extension, 2479 then we have to punt - it is not defined which 2480 one is correct. */ 2481 || (POINTER_TYPE_P (TREE_TYPE (@0)) 2482 && TREE_CODE (@1) == INTEGER_CST 2483 && tree_int_cst_sign_bit (@1) == 0 2484 && TREE_CODE (@2) == INTEGER_CST 2485 && tree_int_cst_sign_bit (@2) == 0)) 2486 (minus (convert @1) (convert @2))))) 2487 (simplify 2488 (pointer_diff (pointer_plus @@0 @1) (pointer_plus @0 @2)) 2489 /* The second argument of pointer_plus must be interpreted as signed, and 2490 thus sign-extended if necessary. */ 2491 (with { tree stype = signed_type_for (TREE_TYPE (@1)); } 2492 /* Use view_convert instead of convert here, as POINTER_PLUS_EXPR 2493 second arg is unsigned even when we need to consider it as signed, 2494 we don't want to diagnose overflow here. */ 2495 (minus (convert (view_convert:stype @1)) 2496 (convert (view_convert:stype @2))))))) 2497 2498/* (A * C) +- (B * C) -> (A+-B) * C and (A * C) +- A -> A * (C+-1). 2499 Modeled after fold_plusminus_mult_expr. */ 2500(if (!TYPE_SATURATING (type) 2501 && (!FLOAT_TYPE_P (type) || flag_associative_math)) 2502 (for plusminus (plus minus) 2503 (simplify 2504 (plusminus (mult:cs@3 @0 @1) (mult:cs@4 @0 @2)) 2505 (if ((!ANY_INTEGRAL_TYPE_P (type) 2506 || TYPE_OVERFLOW_WRAPS (type) 2507 || (INTEGRAL_TYPE_P (type) 2508 && tree_expr_nonzero_p (@0) 2509 && expr_not_equal_to (@0, wi::minus_one (TYPE_PRECISION (type))))) 2510 /* If @1 +- @2 is constant require a hard single-use on either 2511 original operand (but not on both). */ 2512 && (single_use (@3) || single_use (@4))) 2513 (mult (plusminus @1 @2) @0))) 2514 /* We cannot generate constant 1 for fract. */ 2515 (if (!ALL_FRACT_MODE_P (TYPE_MODE (type))) 2516 (simplify 2517 (plusminus @0 (mult:c@3 @0 @2)) 2518 (if ((!ANY_INTEGRAL_TYPE_P (type) 2519 || TYPE_OVERFLOW_WRAPS (type) 2520 /* For @0 + @0*@2 this transformation would introduce UB 2521 (where there was none before) for @0 in [-1,0] and @2 max. 2522 For @0 - @0*@2 this transformation would introduce UB 2523 for @0 0 and @2 in [min,min+1] or @0 -1 and @2 min+1. */ 2524 || (INTEGRAL_TYPE_P (type) 2525 && ((tree_expr_nonzero_p (@0) 2526 && expr_not_equal_to (@0, 2527 wi::minus_one (TYPE_PRECISION (type)))) 2528 || (plusminus == PLUS_EXPR 2529 ? expr_not_equal_to (@2, 2530 wi::max_value (TYPE_PRECISION (type), SIGNED)) 2531 /* Let's ignore the @0 -1 and @2 min case. */ 2532 : (expr_not_equal_to (@2, 2533 wi::min_value (TYPE_PRECISION (type), SIGNED)) 2534 && expr_not_equal_to (@2, 2535 wi::min_value (TYPE_PRECISION (type), SIGNED) 2536 + 1)))))) 2537 && single_use (@3)) 2538 (mult (plusminus { build_one_cst (type); } @2) @0))) 2539 (simplify 2540 (plusminus (mult:c@3 @0 @2) @0) 2541 (if ((!ANY_INTEGRAL_TYPE_P (type) 2542 || TYPE_OVERFLOW_WRAPS (type) 2543 /* For @0*@2 + @0 this transformation would introduce UB 2544 (where there was none before) for @0 in [-1,0] and @2 max. 2545 For @0*@2 - @0 this transformation would introduce UB 2546 for @0 0 and @2 min. */ 2547 || (INTEGRAL_TYPE_P (type) 2548 && ((tree_expr_nonzero_p (@0) 2549 && (plusminus == MINUS_EXPR 2550 || expr_not_equal_to (@0, 2551 wi::minus_one (TYPE_PRECISION (type))))) 2552 || expr_not_equal_to (@2, 2553 (plusminus == PLUS_EXPR 2554 ? wi::max_value (TYPE_PRECISION (type), SIGNED) 2555 : wi::min_value (TYPE_PRECISION (type), SIGNED)))))) 2556 && single_use (@3)) 2557 (mult (plusminus @2 { build_one_cst (type); }) @0)))))) 2558 2559/* Simplifications of MIN_EXPR, MAX_EXPR, fmin() and fmax(). */ 2560 2561(for minmax (min max FMIN_ALL FMAX_ALL) 2562 (simplify 2563 (minmax @0 @0) 2564 @0)) 2565/* min(max(x,y),y) -> y. */ 2566(simplify 2567 (min:c (max:c @0 @1) @1) 2568 @1) 2569/* max(min(x,y),y) -> y. */ 2570(simplify 2571 (max:c (min:c @0 @1) @1) 2572 @1) 2573/* max(a,-a) -> abs(a). */ 2574(simplify 2575 (max:c @0 (negate @0)) 2576 (if (TREE_CODE (type) != COMPLEX_TYPE 2577 && (! ANY_INTEGRAL_TYPE_P (type) 2578 || TYPE_OVERFLOW_UNDEFINED (type))) 2579 (abs @0))) 2580/* min(a,-a) -> -abs(a). */ 2581(simplify 2582 (min:c @0 (negate @0)) 2583 (if (TREE_CODE (type) != COMPLEX_TYPE 2584 && (! ANY_INTEGRAL_TYPE_P (type) 2585 || TYPE_OVERFLOW_UNDEFINED (type))) 2586 (negate (abs @0)))) 2587(simplify 2588 (min @0 @1) 2589 (switch 2590 (if (INTEGRAL_TYPE_P (type) 2591 && TYPE_MIN_VALUE (type) 2592 && operand_equal_p (@1, TYPE_MIN_VALUE (type), OEP_ONLY_CONST)) 2593 @1) 2594 (if (INTEGRAL_TYPE_P (type) 2595 && TYPE_MAX_VALUE (type) 2596 && operand_equal_p (@1, TYPE_MAX_VALUE (type), OEP_ONLY_CONST)) 2597 @0))) 2598(simplify 2599 (max @0 @1) 2600 (switch 2601 (if (INTEGRAL_TYPE_P (type) 2602 && TYPE_MAX_VALUE (type) 2603 && operand_equal_p (@1, TYPE_MAX_VALUE (type), OEP_ONLY_CONST)) 2604 @1) 2605 (if (INTEGRAL_TYPE_P (type) 2606 && TYPE_MIN_VALUE (type) 2607 && operand_equal_p (@1, TYPE_MIN_VALUE (type), OEP_ONLY_CONST)) 2608 @0))) 2609 2610/* max (a, a + CST) -> a + CST where CST is positive. */ 2611/* max (a, a + CST) -> a where CST is negative. */ 2612(simplify 2613 (max:c @0 (plus@2 @0 INTEGER_CST@1)) 2614 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))) 2615 (if (tree_int_cst_sgn (@1) > 0) 2616 @2 2617 @0))) 2618 2619/* min (a, a + CST) -> a where CST is positive. */ 2620/* min (a, a + CST) -> a + CST where CST is negative. */ 2621(simplify 2622 (min:c @0 (plus@2 @0 INTEGER_CST@1)) 2623 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))) 2624 (if (tree_int_cst_sgn (@1) > 0) 2625 @0 2626 @2))) 2627 2628/* (convert (minmax ((convert (x) c)))) -> minmax (x c) if x is promoted 2629 and the outer convert demotes the expression back to x's type. */ 2630(for minmax (min max) 2631 (simplify 2632 (convert (minmax@0 (convert @1) INTEGER_CST@2)) 2633 (if (INTEGRAL_TYPE_P (type) 2634 && types_match (@1, type) && int_fits_type_p (@2, type) 2635 && TYPE_SIGN (TREE_TYPE (@0)) == TYPE_SIGN (type) 2636 && TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (type)) 2637 (minmax @1 (convert @2))))) 2638 2639(for minmax (FMIN_ALL FMAX_ALL) 2640 /* If either argument is NaN, return the other one. Avoid the 2641 transformation if we get (and honor) a signalling NaN. */ 2642 (simplify 2643 (minmax:c @0 REAL_CST@1) 2644 (if (real_isnan (TREE_REAL_CST_PTR (@1)) 2645 && (!HONOR_SNANS (@1) || !TREE_REAL_CST (@1).signalling)) 2646 @0))) 2647/* Convert fmin/fmax to MIN_EXPR/MAX_EXPR. C99 requires these 2648 functions to return the numeric arg if the other one is NaN. 2649 MIN and MAX don't honor that, so only transform if -ffinite-math-only 2650 is set. C99 doesn't require -0.0 to be handled, so we don't have to 2651 worry about it either. */ 2652(if (flag_finite_math_only) 2653 (simplify 2654 (FMIN_ALL @0 @1) 2655 (min @0 @1)) 2656 (simplify 2657 (FMAX_ALL @0 @1) 2658 (max @0 @1))) 2659/* min (-A, -B) -> -max (A, B) */ 2660(for minmax (min max FMIN_ALL FMAX_ALL) 2661 maxmin (max min FMAX_ALL FMIN_ALL) 2662 (simplify 2663 (minmax (negate:s@2 @0) (negate:s@3 @1)) 2664 (if (FLOAT_TYPE_P (TREE_TYPE (@0)) 2665 || (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) 2666 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))) 2667 (negate (maxmin @0 @1))))) 2668/* MIN (~X, ~Y) -> ~MAX (X, Y) 2669 MAX (~X, ~Y) -> ~MIN (X, Y) */ 2670(for minmax (min max) 2671 maxmin (max min) 2672 (simplify 2673 (minmax (bit_not:s@2 @0) (bit_not:s@3 @1)) 2674 (bit_not (maxmin @0 @1)))) 2675 2676/* MIN (X, Y) == X -> X <= Y */ 2677(for minmax (min min max max) 2678 cmp (eq ne eq ne ) 2679 out (le gt ge lt ) 2680 (simplify 2681 (cmp:c (minmax:c @0 @1) @0) 2682 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))) 2683 (out @0 @1)))) 2684/* MIN (X, 5) == 0 -> X == 0 2685 MIN (X, 5) == 7 -> false */ 2686(for cmp (eq ne) 2687 (simplify 2688 (cmp (min @0 INTEGER_CST@1) INTEGER_CST@2) 2689 (if (wi::lt_p (wi::to_wide (@1), wi::to_wide (@2), 2690 TYPE_SIGN (TREE_TYPE (@0)))) 2691 { constant_boolean_node (cmp == NE_EXPR, type); } 2692 (if (wi::gt_p (wi::to_wide (@1), wi::to_wide (@2), 2693 TYPE_SIGN (TREE_TYPE (@0)))) 2694 (cmp @0 @2))))) 2695(for cmp (eq ne) 2696 (simplify 2697 (cmp (max @0 INTEGER_CST@1) INTEGER_CST@2) 2698 (if (wi::gt_p (wi::to_wide (@1), wi::to_wide (@2), 2699 TYPE_SIGN (TREE_TYPE (@0)))) 2700 { constant_boolean_node (cmp == NE_EXPR, type); } 2701 (if (wi::lt_p (wi::to_wide (@1), wi::to_wide (@2), 2702 TYPE_SIGN (TREE_TYPE (@0)))) 2703 (cmp @0 @2))))) 2704/* MIN (X, C1) < C2 -> X < C2 || C1 < C2 */ 2705(for minmax (min min max max min min max max ) 2706 cmp (lt le gt ge gt ge lt le ) 2707 comb (bit_ior bit_ior bit_ior bit_ior bit_and bit_and bit_and bit_and) 2708 (simplify 2709 (cmp (minmax @0 INTEGER_CST@1) INTEGER_CST@2) 2710 (comb (cmp @0 @2) (cmp @1 @2)))) 2711 2712/* Undo fancy way of writing max/min or other ?: expressions, 2713 like a - ((a - b) & -(a < b)), in this case into (a < b) ? b : a. 2714 People normally use ?: and that is what we actually try to optimize. */ 2715(for cmp (simple_comparison) 2716 (simplify 2717 (minus @0 (bit_and:c (minus @0 @1) 2718 (convert? (negate@4 (convert? (cmp@5 @2 @3)))))) 2719 (if (INTEGRAL_TYPE_P (type) 2720 && INTEGRAL_TYPE_P (TREE_TYPE (@4)) 2721 && TREE_CODE (TREE_TYPE (@4)) != BOOLEAN_TYPE 2722 && INTEGRAL_TYPE_P (TREE_TYPE (@5)) 2723 && (TYPE_PRECISION (TREE_TYPE (@4)) >= TYPE_PRECISION (type) 2724 || !TYPE_UNSIGNED (TREE_TYPE (@4))) 2725 && (GIMPLE || !TREE_SIDE_EFFECTS (@1))) 2726 (cond (cmp @2 @3) @1 @0))) 2727 (simplify 2728 (plus:c @0 (bit_and:c (minus @1 @0) 2729 (convert? (negate@4 (convert? (cmp@5 @2 @3)))))) 2730 (if (INTEGRAL_TYPE_P (type) 2731 && INTEGRAL_TYPE_P (TREE_TYPE (@4)) 2732 && TREE_CODE (TREE_TYPE (@4)) != BOOLEAN_TYPE 2733 && INTEGRAL_TYPE_P (TREE_TYPE (@5)) 2734 && (TYPE_PRECISION (TREE_TYPE (@4)) >= TYPE_PRECISION (type) 2735 || !TYPE_UNSIGNED (TREE_TYPE (@4))) 2736 && (GIMPLE || !TREE_SIDE_EFFECTS (@1))) 2737 (cond (cmp @2 @3) @1 @0)))) 2738 2739/* Simplifications of shift and rotates. */ 2740 2741(for rotate (lrotate rrotate) 2742 (simplify 2743 (rotate integer_all_onesp@0 @1) 2744 @0)) 2745 2746/* Optimize -1 >> x for arithmetic right shifts. */ 2747(simplify 2748 (rshift integer_all_onesp@0 @1) 2749 (if (!TYPE_UNSIGNED (type) 2750 && tree_expr_nonnegative_p (@1)) 2751 @0)) 2752 2753/* Optimize (x >> c) << c into x & (-1<<c). */ 2754(simplify 2755 (lshift (nop_convert? (rshift @0 INTEGER_CST@1)) @1) 2756 (if (wi::ltu_p (wi::to_wide (@1), element_precision (type))) 2757 /* It doesn't matter if the right shift is arithmetic or logical. */ 2758 (bit_and (view_convert @0) (lshift { build_minus_one_cst (type); } @1)))) 2759 2760(simplify 2761 (lshift (convert (convert@2 (rshift @0 INTEGER_CST@1))) @1) 2762 (if (wi::ltu_p (wi::to_wide (@1), element_precision (type)) 2763 /* Allow intermediate conversion to integral type with whatever sign, as 2764 long as the low TYPE_PRECISION (type) 2765 - TYPE_PRECISION (TREE_TYPE (@2)) bits are preserved. */ 2766 && INTEGRAL_TYPE_P (type) 2767 && INTEGRAL_TYPE_P (TREE_TYPE (@2)) 2768 && INTEGRAL_TYPE_P (TREE_TYPE (@0)) 2769 && TYPE_PRECISION (type) == TYPE_PRECISION (TREE_TYPE (@0)) 2770 && (TYPE_PRECISION (TREE_TYPE (@2)) >= TYPE_PRECISION (type) 2771 || wi::geu_p (wi::to_wide (@1), 2772 TYPE_PRECISION (type) 2773 - TYPE_PRECISION (TREE_TYPE (@2))))) 2774 (bit_and (convert @0) (lshift { build_minus_one_cst (type); } @1)))) 2775 2776/* Optimize (x << c) >> c into x & ((unsigned)-1 >> c) for unsigned 2777 types. */ 2778(simplify 2779 (rshift (lshift @0 INTEGER_CST@1) @1) 2780 (if (TYPE_UNSIGNED (type) 2781 && (wi::ltu_p (wi::to_wide (@1), element_precision (type)))) 2782 (bit_and @0 (rshift { build_minus_one_cst (type); } @1)))) 2783 2784(for shiftrotate (lrotate rrotate lshift rshift) 2785 (simplify 2786 (shiftrotate @0 integer_zerop) 2787 (non_lvalue @0)) 2788 (simplify 2789 (shiftrotate integer_zerop@0 @1) 2790 @0) 2791 /* Prefer vector1 << scalar to vector1 << vector2 2792 if vector2 is uniform. */ 2793 (for vec (VECTOR_CST CONSTRUCTOR) 2794 (simplify 2795 (shiftrotate @0 vec@1) 2796 (with { tree tem = uniform_vector_p (@1); } 2797 (if (tem) 2798 (shiftrotate @0 { tem; })))))) 2799 2800/* Simplify X << Y where Y's low width bits are 0 to X, as only valid 2801 Y is 0. Similarly for X >> Y. */ 2802#if GIMPLE 2803(for shift (lshift rshift) 2804 (simplify 2805 (shift @0 SSA_NAME@1) 2806 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))) 2807 (with { 2808 int width = ceil_log2 (element_precision (TREE_TYPE (@0))); 2809 int prec = TYPE_PRECISION (TREE_TYPE (@1)); 2810 } 2811 (if ((get_nonzero_bits (@1) & wi::mask (width, false, prec)) == 0) 2812 @0))))) 2813#endif 2814 2815/* Rewrite an LROTATE_EXPR by a constant into an 2816 RROTATE_EXPR by a new constant. */ 2817(simplify 2818 (lrotate @0 INTEGER_CST@1) 2819 (rrotate @0 { const_binop (MINUS_EXPR, TREE_TYPE (@1), 2820 build_int_cst (TREE_TYPE (@1), 2821 element_precision (type)), @1); })) 2822 2823/* Turn (a OP c1) OP c2 into a OP (c1+c2). */ 2824(for op (lrotate rrotate rshift lshift) 2825 (simplify 2826 (op (op @0 INTEGER_CST@1) INTEGER_CST@2) 2827 (with { unsigned int prec = element_precision (type); } 2828 (if (wi::ge_p (wi::to_wide (@1), 0, TYPE_SIGN (TREE_TYPE (@1))) 2829 && wi::lt_p (wi::to_wide (@1), prec, TYPE_SIGN (TREE_TYPE (@1))) 2830 && wi::ge_p (wi::to_wide (@2), 0, TYPE_SIGN (TREE_TYPE (@2))) 2831 && wi::lt_p (wi::to_wide (@2), prec, TYPE_SIGN (TREE_TYPE (@2)))) 2832 (with { unsigned int low = (tree_to_uhwi (@1) 2833 + tree_to_uhwi (@2)); } 2834 /* Deal with a OP (c1 + c2) being undefined but (a OP c1) OP c2 2835 being well defined. */ 2836 (if (low >= prec) 2837 (if (op == LROTATE_EXPR || op == RROTATE_EXPR) 2838 (op @0 { build_int_cst (TREE_TYPE (@1), low % prec); }) 2839 (if (TYPE_UNSIGNED (type) || op == LSHIFT_EXPR) 2840 { build_zero_cst (type); } 2841 (op @0 { build_int_cst (TREE_TYPE (@1), prec - 1); }))) 2842 (op @0 { build_int_cst (TREE_TYPE (@1), low); }))))))) 2843 2844 2845/* ((1 << A) & 1) != 0 -> A == 0 2846 ((1 << A) & 1) == 0 -> A != 0 */ 2847(for cmp (ne eq) 2848 icmp (eq ne) 2849 (simplify 2850 (cmp (bit_and (lshift integer_onep @0) integer_onep) integer_zerop) 2851 (icmp @0 { build_zero_cst (TREE_TYPE (@0)); }))) 2852 2853/* (CST1 << A) == CST2 -> A == ctz (CST2) - ctz (CST1) 2854 (CST1 << A) != CST2 -> A != ctz (CST2) - ctz (CST1) 2855 if CST2 != 0. */ 2856(for cmp (ne eq) 2857 (simplify 2858 (cmp (lshift INTEGER_CST@0 @1) INTEGER_CST@2) 2859 (with { int cand = wi::ctz (wi::to_wide (@2)) - wi::ctz (wi::to_wide (@0)); } 2860 (if (cand < 0 2861 || (!integer_zerop (@2) 2862 && wi::lshift (wi::to_wide (@0), cand) != wi::to_wide (@2))) 2863 { constant_boolean_node (cmp == NE_EXPR, type); } 2864 (if (!integer_zerop (@2) 2865 && wi::lshift (wi::to_wide (@0), cand) == wi::to_wide (@2)) 2866 (cmp @1 { build_int_cst (TREE_TYPE (@1), cand); })))))) 2867 2868/* Fold (X << C1) & C2 into (X << C1) & (C2 | ((1 << C1) - 1)) 2869 (X >> C1) & C2 into (X >> C1) & (C2 | ~((type) -1 >> C1)) 2870 if the new mask might be further optimized. */ 2871(for shift (lshift rshift) 2872 (simplify 2873 (bit_and (convert?:s@4 (shift:s@5 (convert1?@3 @0) INTEGER_CST@1)) 2874 INTEGER_CST@2) 2875 (if (tree_nop_conversion_p (TREE_TYPE (@4), TREE_TYPE (@5)) 2876 && TYPE_PRECISION (type) <= HOST_BITS_PER_WIDE_INT 2877 && tree_fits_uhwi_p (@1) 2878 && tree_to_uhwi (@1) > 0 2879 && tree_to_uhwi (@1) < TYPE_PRECISION (type)) 2880 (with 2881 { 2882 unsigned int shiftc = tree_to_uhwi (@1); 2883 unsigned HOST_WIDE_INT mask = TREE_INT_CST_LOW (@2); 2884 unsigned HOST_WIDE_INT newmask, zerobits = 0; 2885 tree shift_type = TREE_TYPE (@3); 2886 unsigned int prec; 2887 2888 if (shift == LSHIFT_EXPR) 2889 zerobits = ((HOST_WIDE_INT_1U << shiftc) - 1); 2890 else if (shift == RSHIFT_EXPR 2891 && type_has_mode_precision_p (shift_type)) 2892 { 2893 prec = TYPE_PRECISION (TREE_TYPE (@3)); 2894 tree arg00 = @0; 2895 /* See if more bits can be proven as zero because of 2896 zero extension. */ 2897 if (@3 != @0 2898 && TYPE_UNSIGNED (TREE_TYPE (@0))) 2899 { 2900 tree inner_type = TREE_TYPE (@0); 2901 if (type_has_mode_precision_p (inner_type) 2902 && TYPE_PRECISION (inner_type) < prec) 2903 { 2904 prec = TYPE_PRECISION (inner_type); 2905 /* See if we can shorten the right shift. */ 2906 if (shiftc < prec) 2907 shift_type = inner_type; 2908 /* Otherwise X >> C1 is all zeros, so we'll optimize 2909 it into (X, 0) later on by making sure zerobits 2910 is all ones. */ 2911 } 2912 } 2913 zerobits = HOST_WIDE_INT_M1U; 2914 if (shiftc < prec) 2915 { 2916 zerobits >>= HOST_BITS_PER_WIDE_INT - shiftc; 2917 zerobits <<= prec - shiftc; 2918 } 2919 /* For arithmetic shift if sign bit could be set, zerobits 2920 can contain actually sign bits, so no transformation is 2921 possible, unless MASK masks them all away. In that 2922 case the shift needs to be converted into logical shift. */ 2923 if (!TYPE_UNSIGNED (TREE_TYPE (@3)) 2924 && prec == TYPE_PRECISION (TREE_TYPE (@3))) 2925 { 2926 if ((mask & zerobits) == 0) 2927 shift_type = unsigned_type_for (TREE_TYPE (@3)); 2928 else 2929 zerobits = 0; 2930 } 2931 } 2932 } 2933 /* ((X << 16) & 0xff00) is (X, 0). */ 2934 (if ((mask & zerobits) == mask) 2935 { build_int_cst (type, 0); } 2936 (with { newmask = mask | zerobits; } 2937 (if (newmask != mask && (newmask & (newmask + 1)) == 0) 2938 (with 2939 { 2940 /* Only do the transformation if NEWMASK is some integer 2941 mode's mask. */ 2942 for (prec = BITS_PER_UNIT; 2943 prec < HOST_BITS_PER_WIDE_INT; prec <<= 1) 2944 if (newmask == (HOST_WIDE_INT_1U << prec) - 1) 2945 break; 2946 } 2947 (if (prec < HOST_BITS_PER_WIDE_INT 2948 || newmask == HOST_WIDE_INT_M1U) 2949 (with 2950 { tree newmaskt = build_int_cst_type (TREE_TYPE (@2), newmask); } 2951 (if (!tree_int_cst_equal (newmaskt, @2)) 2952 (if (shift_type != TREE_TYPE (@3)) 2953 (bit_and (convert (shift:shift_type (convert @3) @1)) { newmaskt; }) 2954 (bit_and @4 { newmaskt; }))))))))))))) 2955 2956/* Fold (X {&,^,|} C2) << C1 into (X << C1) {&,^,|} (C2 << C1) 2957 (X {&,^,|} C2) >> C1 into (X >> C1) & (C2 >> C1). */ 2958(for shift (lshift rshift) 2959 (for bit_op (bit_and bit_xor bit_ior) 2960 (simplify 2961 (shift (convert?:s (bit_op:s @0 INTEGER_CST@2)) INTEGER_CST@1) 2962 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))) 2963 (with { tree mask = int_const_binop (shift, fold_convert (type, @2), @1); } 2964 (bit_op (shift (convert @0) @1) { mask; })))))) 2965 2966/* ~(~X >> Y) -> X >> Y (for arithmetic shift). */ 2967(simplify 2968 (bit_not (convert1?:s (rshift:s (convert2?@0 (bit_not @1)) @2))) 2969 (if (!TYPE_UNSIGNED (TREE_TYPE (@0)) 2970 && (element_precision (TREE_TYPE (@0)) 2971 <= element_precision (TREE_TYPE (@1)) 2972 || !TYPE_UNSIGNED (TREE_TYPE (@1)))) 2973 (with 2974 { tree shift_type = TREE_TYPE (@0); } 2975 (convert (rshift (convert:shift_type @1) @2))))) 2976 2977/* ~(~X >>r Y) -> X >>r Y 2978 ~(~X <<r Y) -> X <<r Y */ 2979(for rotate (lrotate rrotate) 2980 (simplify 2981 (bit_not (convert1?:s (rotate:s (convert2?@0 (bit_not @1)) @2))) 2982 (if ((element_precision (TREE_TYPE (@0)) 2983 <= element_precision (TREE_TYPE (@1)) 2984 || !TYPE_UNSIGNED (TREE_TYPE (@1))) 2985 && (element_precision (type) <= element_precision (TREE_TYPE (@0)) 2986 || !TYPE_UNSIGNED (TREE_TYPE (@0)))) 2987 (with 2988 { tree rotate_type = TREE_TYPE (@0); } 2989 (convert (rotate (convert:rotate_type @1) @2)))))) 2990 2991/* Simplifications of conversions. */ 2992 2993/* Basic strip-useless-type-conversions / strip_nops. */ 2994(for cvt (convert view_convert float fix_trunc) 2995 (simplify 2996 (cvt @0) 2997 (if ((GIMPLE && useless_type_conversion_p (type, TREE_TYPE (@0))) 2998 || (GENERIC && type == TREE_TYPE (@0))) 2999 @0))) 3000 3001/* Contract view-conversions. */ 3002(simplify 3003 (view_convert (view_convert @0)) 3004 (view_convert @0)) 3005 3006/* For integral conversions with the same precision or pointer 3007 conversions use a NOP_EXPR instead. */ 3008(simplify 3009 (view_convert @0) 3010 (if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type)) 3011 && (INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0))) 3012 && TYPE_PRECISION (type) == TYPE_PRECISION (TREE_TYPE (@0))) 3013 (convert @0))) 3014 3015/* Strip inner integral conversions that do not change precision or size, or 3016 zero-extend while keeping the same size (for bool-to-char). */ 3017(simplify 3018 (view_convert (convert@0 @1)) 3019 (if ((INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0))) 3020 && (INTEGRAL_TYPE_P (TREE_TYPE (@1)) || POINTER_TYPE_P (TREE_TYPE (@1))) 3021 && TYPE_SIZE (TREE_TYPE (@0)) == TYPE_SIZE (TREE_TYPE (@1)) 3022 && (TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1)) 3023 || (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (TREE_TYPE (@1)) 3024 && TYPE_UNSIGNED (TREE_TYPE (@1))))) 3025 (view_convert @1))) 3026 3027/* Simplify a view-converted empty constructor. */ 3028(simplify 3029 (view_convert CONSTRUCTOR@0) 3030 (if (TREE_CODE (@0) != SSA_NAME 3031 && CONSTRUCTOR_NELTS (@0) == 0) 3032 { build_zero_cst (type); })) 3033 3034/* Re-association barriers around constants and other re-association 3035 barriers can be removed. */ 3036(simplify 3037 (paren CONSTANT_CLASS_P@0) 3038 @0) 3039(simplify 3040 (paren (paren@1 @0)) 3041 @1) 3042 3043/* Handle cases of two conversions in a row. */ 3044(for ocvt (convert float fix_trunc) 3045 (for icvt (convert float) 3046 (simplify 3047 (ocvt (icvt@1 @0)) 3048 (with 3049 { 3050 tree inside_type = TREE_TYPE (@0); 3051 tree inter_type = TREE_TYPE (@1); 3052 int inside_int = INTEGRAL_TYPE_P (inside_type); 3053 int inside_ptr = POINTER_TYPE_P (inside_type); 3054 int inside_float = FLOAT_TYPE_P (inside_type); 3055 int inside_vec = VECTOR_TYPE_P (inside_type); 3056 unsigned int inside_prec = TYPE_PRECISION (inside_type); 3057 int inside_unsignedp = TYPE_UNSIGNED (inside_type); 3058 int inter_int = INTEGRAL_TYPE_P (inter_type); 3059 int inter_ptr = POINTER_TYPE_P (inter_type); 3060 int inter_float = FLOAT_TYPE_P (inter_type); 3061 int inter_vec = VECTOR_TYPE_P (inter_type); 3062 unsigned int inter_prec = TYPE_PRECISION (inter_type); 3063 int inter_unsignedp = TYPE_UNSIGNED (inter_type); 3064 int final_int = INTEGRAL_TYPE_P (type); 3065 int final_ptr = POINTER_TYPE_P (type); 3066 int final_float = FLOAT_TYPE_P (type); 3067 int final_vec = VECTOR_TYPE_P (type); 3068 unsigned int final_prec = TYPE_PRECISION (type); 3069 int final_unsignedp = TYPE_UNSIGNED (type); 3070 } 3071 (switch 3072 /* In addition to the cases of two conversions in a row 3073 handled below, if we are converting something to its own 3074 type via an object of identical or wider precision, neither 3075 conversion is needed. */ 3076 (if (((GIMPLE && useless_type_conversion_p (type, inside_type)) 3077 || (GENERIC 3078 && TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (inside_type))) 3079 && (((inter_int || inter_ptr) && final_int) 3080 || (inter_float && final_float)) 3081 && inter_prec >= final_prec) 3082 (ocvt @0)) 3083 3084 /* Likewise, if the intermediate and initial types are either both 3085 float or both integer, we don't need the middle conversion if the 3086 former is wider than the latter and doesn't change the signedness 3087 (for integers). Avoid this if the final type is a pointer since 3088 then we sometimes need the middle conversion. */ 3089 (if (((inter_int && inside_int) || (inter_float && inside_float)) 3090 && (final_int || final_float) 3091 && inter_prec >= inside_prec 3092 && (inter_float || inter_unsignedp == inside_unsignedp)) 3093 (ocvt @0)) 3094 3095 /* If we have a sign-extension of a zero-extended value, we can 3096 replace that by a single zero-extension. Likewise if the 3097 final conversion does not change precision we can drop the 3098 intermediate conversion. */ 3099 (if (inside_int && inter_int && final_int 3100 && ((inside_prec < inter_prec && inter_prec < final_prec 3101 && inside_unsignedp && !inter_unsignedp) 3102 || final_prec == inter_prec)) 3103 (ocvt @0)) 3104 3105 /* Two conversions in a row are not needed unless: 3106 - some conversion is floating-point (overstrict for now), or 3107 - some conversion is a vector (overstrict for now), or 3108 - the intermediate type is narrower than both initial and 3109 final, or 3110 - the intermediate type and innermost type differ in signedness, 3111 and the outermost type is wider than the intermediate, or 3112 - the initial type is a pointer type and the precisions of the 3113 intermediate and final types differ, or 3114 - the final type is a pointer type and the precisions of the 3115 initial and intermediate types differ. */ 3116 (if (! inside_float && ! inter_float && ! final_float 3117 && ! inside_vec && ! inter_vec && ! final_vec 3118 && (inter_prec >= inside_prec || inter_prec >= final_prec) 3119 && ! (inside_int && inter_int 3120 && inter_unsignedp != inside_unsignedp 3121 && inter_prec < final_prec) 3122 && ((inter_unsignedp && inter_prec > inside_prec) 3123 == (final_unsignedp && final_prec > inter_prec)) 3124 && ! (inside_ptr && inter_prec != final_prec) 3125 && ! (final_ptr && inside_prec != inter_prec)) 3126 (ocvt @0)) 3127 3128 /* A truncation to an unsigned type (a zero-extension) should be 3129 canonicalized as bitwise and of a mask. */ 3130 (if (GIMPLE /* PR70366: doing this in GENERIC breaks -Wconversion. */ 3131 && final_int && inter_int && inside_int 3132 && final_prec == inside_prec 3133 && final_prec > inter_prec 3134 && inter_unsignedp) 3135 (convert (bit_and @0 { wide_int_to_tree 3136 (inside_type, 3137 wi::mask (inter_prec, false, 3138 TYPE_PRECISION (inside_type))); }))) 3139 3140 /* If we are converting an integer to a floating-point that can 3141 represent it exactly and back to an integer, we can skip the 3142 floating-point conversion. */ 3143 (if (GIMPLE /* PR66211 */ 3144 && inside_int && inter_float && final_int && 3145 (unsigned) significand_size (TYPE_MODE (inter_type)) 3146 >= inside_prec - !inside_unsignedp) 3147 (convert @0))))))) 3148 3149/* If we have a narrowing conversion to an integral type that is fed by a 3150 BIT_AND_EXPR, we might be able to remove the BIT_AND_EXPR if it merely 3151 masks off bits outside the final type (and nothing else). */ 3152(simplify 3153 (convert (bit_and @0 INTEGER_CST@1)) 3154 (if (INTEGRAL_TYPE_P (type) 3155 && INTEGRAL_TYPE_P (TREE_TYPE (@0)) 3156 && TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@0)) 3157 && operand_equal_p (@1, build_low_bits_mask (TREE_TYPE (@1), 3158 TYPE_PRECISION (type)), 0)) 3159 (convert @0))) 3160 3161 3162/* (X /[ex] A) * A -> X. */ 3163(simplify 3164 (mult (convert1? (exact_div @0 @@1)) (convert2? @1)) 3165 (convert @0)) 3166 3167/* Simplify (A / B) * B + (A % B) -> A. */ 3168(for div (trunc_div ceil_div floor_div round_div) 3169 mod (trunc_mod ceil_mod floor_mod round_mod) 3170 (simplify 3171 (plus:c (mult:c (div @0 @1) @1) (mod @0 @1)) 3172 @0)) 3173 3174/* ((X /[ex] A) +- B) * A --> X +- A * B. */ 3175(for op (plus minus) 3176 (simplify 3177 (mult (convert1? (op (convert2? (exact_div @0 INTEGER_CST@@1)) INTEGER_CST@2)) @1) 3178 (if (tree_nop_conversion_p (type, TREE_TYPE (@2)) 3179 && tree_nop_conversion_p (TREE_TYPE (@0), TREE_TYPE (@2))) 3180 (with 3181 { 3182 wi::overflow_type overflow; 3183 wide_int mul = wi::mul (wi::to_wide (@1), wi::to_wide (@2), 3184 TYPE_SIGN (type), &overflow); 3185 } 3186 (if (types_match (type, TREE_TYPE (@2)) 3187 && types_match (TREE_TYPE (@0), TREE_TYPE (@2)) && !overflow) 3188 (op @0 { wide_int_to_tree (type, mul); }) 3189 (with { tree utype = unsigned_type_for (type); } 3190 (convert (op (convert:utype @0) 3191 (mult (convert:utype @1) (convert:utype @2)))))))))) 3192 3193/* Canonicalization of binary operations. */ 3194 3195/* Convert X + -C into X - C. */ 3196(simplify 3197 (plus @0 REAL_CST@1) 3198 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1))) 3199 (with { tree tem = const_unop (NEGATE_EXPR, type, @1); } 3200 (if (!TREE_OVERFLOW (tem) || !flag_trapping_math) 3201 (minus @0 { tem; }))))) 3202 3203/* Convert x+x into x*2. */ 3204(simplify 3205 (plus @0 @0) 3206 (if (SCALAR_FLOAT_TYPE_P (type)) 3207 (mult @0 { build_real (type, dconst2); }) 3208 (if (INTEGRAL_TYPE_P (type)) 3209 (mult @0 { build_int_cst (type, 2); })))) 3210 3211/* 0 - X -> -X. */ 3212(simplify 3213 (minus integer_zerop @1) 3214 (negate @1)) 3215(simplify 3216 (pointer_diff integer_zerop @1) 3217 (negate (convert @1))) 3218 3219/* (ARG0 - ARG1) is the same as (-ARG1 + ARG0). So check whether 3220 ARG0 is zero and X + ARG0 reduces to X, since that would mean 3221 (-ARG1 + ARG0) reduces to -ARG1. */ 3222(simplify 3223 (minus real_zerop@0 @1) 3224 (if (fold_real_zero_addition_p (type, @0, 0)) 3225 (negate @1))) 3226 3227/* Transform x * -1 into -x. */ 3228(simplify 3229 (mult @0 integer_minus_onep) 3230 (negate @0)) 3231 3232/* Reassociate (X * CST) * Y to (X * Y) * CST. This does not introduce 3233 signed overflow for CST != 0 && CST != -1. */ 3234(simplify 3235 (mult:c (mult:s@3 @0 INTEGER_CST@1) @2) 3236 (if (TREE_CODE (@2) != INTEGER_CST 3237 && single_use (@3) 3238 && !integer_zerop (@1) && !integer_minus_onep (@1)) 3239 (mult (mult @0 @2) @1))) 3240 3241/* True if we can easily extract the real and imaginary parts of a complex 3242 number. */ 3243(match compositional_complex 3244 (convert? (complex @0 @1))) 3245 3246/* COMPLEX_EXPR and REALPART/IMAGPART_EXPR cancellations. */ 3247(simplify 3248 (complex (realpart @0) (imagpart @0)) 3249 @0) 3250(simplify 3251 (realpart (complex @0 @1)) 3252 @0) 3253(simplify 3254 (imagpart (complex @0 @1)) 3255 @1) 3256 3257/* Sometimes we only care about half of a complex expression. */ 3258(simplify 3259 (realpart (convert?:s (conj:s @0))) 3260 (convert (realpart @0))) 3261(simplify 3262 (imagpart (convert?:s (conj:s @0))) 3263 (convert (negate (imagpart @0)))) 3264(for part (realpart imagpart) 3265 (for op (plus minus) 3266 (simplify 3267 (part (convert?:s@2 (op:s @0 @1))) 3268 (convert (op (part @0) (part @1)))))) 3269(simplify 3270 (realpart (convert?:s (CEXPI:s @0))) 3271 (convert (COS @0))) 3272(simplify 3273 (imagpart (convert?:s (CEXPI:s @0))) 3274 (convert (SIN @0))) 3275 3276/* conj(conj(x)) -> x */ 3277(simplify 3278 (conj (convert? (conj @0))) 3279 (if (tree_nop_conversion_p (TREE_TYPE (@0), type)) 3280 (convert @0))) 3281 3282/* conj({x,y}) -> {x,-y} */ 3283(simplify 3284 (conj (convert?:s (complex:s @0 @1))) 3285 (with { tree itype = TREE_TYPE (type); } 3286 (complex (convert:itype @0) (negate (convert:itype @1))))) 3287 3288/* BSWAP simplifications, transforms checked by gcc.dg/builtin-bswap-8.c. */ 3289(for bswap (BUILT_IN_BSWAP16 BUILT_IN_BSWAP32 BUILT_IN_BSWAP64) 3290 (simplify 3291 (bswap (bswap @0)) 3292 @0) 3293 (simplify 3294 (bswap (bit_not (bswap @0))) 3295 (bit_not @0)) 3296 (for bitop (bit_xor bit_ior bit_and) 3297 (simplify 3298 (bswap (bitop:c (bswap @0) @1)) 3299 (bitop @0 (bswap @1))))) 3300 3301 3302/* Combine COND_EXPRs and VEC_COND_EXPRs. */ 3303 3304/* Simplify constant conditions. 3305 Only optimize constant conditions when the selected branch 3306 has the same type as the COND_EXPR. This avoids optimizing 3307 away "c ? x : throw", where the throw has a void type. 3308 Note that we cannot throw away the fold-const.c variant nor 3309 this one as we depend on doing this transform before possibly 3310 A ? B : B -> B triggers and the fold-const.c one can optimize 3311 0 ? A : B to B even if A has side-effects. Something 3312 genmatch cannot handle. */ 3313(simplify 3314 (cond INTEGER_CST@0 @1 @2) 3315 (if (integer_zerop (@0)) 3316 (if (!VOID_TYPE_P (TREE_TYPE (@2)) || VOID_TYPE_P (type)) 3317 @2) 3318 (if (!VOID_TYPE_P (TREE_TYPE (@1)) || VOID_TYPE_P (type)) 3319 @1))) 3320(simplify 3321 (vec_cond VECTOR_CST@0 @1 @2) 3322 (if (integer_all_onesp (@0)) 3323 @1 3324 (if (integer_zerop (@0)) 3325 @2))) 3326 3327/* Sink unary operations to constant branches, but only if we do fold it to 3328 constants. */ 3329(for op (negate bit_not abs absu) 3330 (simplify 3331 (op (vec_cond @0 VECTOR_CST@1 VECTOR_CST@2)) 3332 (with 3333 { 3334 tree cst1, cst2; 3335 cst1 = const_unop (op, type, @1); 3336 if (cst1) 3337 cst2 = const_unop (op, type, @2); 3338 } 3339 (if (cst1 && cst2) 3340 (vec_cond @0 { cst1; } { cst2; }))))) 3341 3342/* Simplification moved from fold_cond_expr_with_comparison. It may also 3343 be extended. */ 3344/* This pattern implements two kinds simplification: 3345 3346 Case 1) 3347 (cond (cmp (convert1? x) c1) (convert2? x) c2) -> (minmax (x c)) if: 3348 1) Conversions are type widening from smaller type. 3349 2) Const c1 equals to c2 after canonicalizing comparison. 3350 3) Comparison has tree code LT, LE, GT or GE. 3351 This specific pattern is needed when (cmp (convert x) c) may not 3352 be simplified by comparison patterns because of multiple uses of 3353 x. It also makes sense here because simplifying across multiple 3354 referred var is always benefitial for complicated cases. 3355 3356 Case 2) 3357 (cond (eq (convert1? x) c1) (convert2? x) c2) -> (cond (eq x c1) c1 c2). */ 3358(for cmp (lt le gt ge eq) 3359 (simplify 3360 (cond (cmp (convert1? @1) INTEGER_CST@3) (convert2? @1) INTEGER_CST@2) 3361 (with 3362 { 3363 tree from_type = TREE_TYPE (@1); 3364 tree c1_type = TREE_TYPE (@3), c2_type = TREE_TYPE (@2); 3365 enum tree_code code = ERROR_MARK; 3366 3367 if (INTEGRAL_TYPE_P (from_type) 3368 && int_fits_type_p (@2, from_type) 3369 && (types_match (c1_type, from_type) 3370 || (TYPE_PRECISION (c1_type) > TYPE_PRECISION (from_type) 3371 && (TYPE_UNSIGNED (from_type) 3372 || TYPE_SIGN (c1_type) == TYPE_SIGN (from_type)))) 3373 && (types_match (c2_type, from_type) 3374 || (TYPE_PRECISION (c2_type) > TYPE_PRECISION (from_type) 3375 && (TYPE_UNSIGNED (from_type) 3376 || TYPE_SIGN (c2_type) == TYPE_SIGN (from_type))))) 3377 { 3378 if (cmp != EQ_EXPR) 3379 { 3380 if (wi::to_widest (@3) == (wi::to_widest (@2) - 1)) 3381 { 3382 /* X <= Y - 1 equals to X < Y. */ 3383 if (cmp == LE_EXPR) 3384 code = LT_EXPR; 3385 /* X > Y - 1 equals to X >= Y. */ 3386 if (cmp == GT_EXPR) 3387 code = GE_EXPR; 3388 } 3389 if (wi::to_widest (@3) == (wi::to_widest (@2) + 1)) 3390 { 3391 /* X < Y + 1 equals to X <= Y. */ 3392 if (cmp == LT_EXPR) 3393 code = LE_EXPR; 3394 /* X >= Y + 1 equals to X > Y. */ 3395 if (cmp == GE_EXPR) 3396 code = GT_EXPR; 3397 } 3398 if (code != ERROR_MARK 3399 || wi::to_widest (@2) == wi::to_widest (@3)) 3400 { 3401 if (cmp == LT_EXPR || cmp == LE_EXPR) 3402 code = MIN_EXPR; 3403 if (cmp == GT_EXPR || cmp == GE_EXPR) 3404 code = MAX_EXPR; 3405 } 3406 } 3407 /* Can do A == C1 ? A : C2 -> A == C1 ? C1 : C2? */ 3408 else if (int_fits_type_p (@3, from_type)) 3409 code = EQ_EXPR; 3410 } 3411 } 3412 (if (code == MAX_EXPR) 3413 (convert (max @1 (convert @2))) 3414 (if (code == MIN_EXPR) 3415 (convert (min @1 (convert @2))) 3416 (if (code == EQ_EXPR) 3417 (convert (cond (eq @1 (convert @3)) 3418 (convert:from_type @3) (convert:from_type @2))))))))) 3419 3420/* (cond (cmp (convert? x) c1) (op x c2) c3) -> (op (minmax x c1) c2) if: 3421 3422 1) OP is PLUS or MINUS. 3423 2) CMP is LT, LE, GT or GE. 3424 3) C3 == (C1 op C2), and computation doesn't have undefined behavior. 3425 3426 This pattern also handles special cases like: 3427 3428 A) Operand x is a unsigned to signed type conversion and c1 is 3429 integer zero. In this case, 3430 (signed type)x < 0 <=> x > MAX_VAL(signed type) 3431 (signed type)x >= 0 <=> x <= MAX_VAL(signed type) 3432 B) Const c1 may not equal to (C3 op' C2). In this case we also 3433 check equality for (c1+1) and (c1-1) by adjusting comparison 3434 code. 3435 3436 TODO: Though signed type is handled by this pattern, it cannot be 3437 simplified at the moment because C standard requires additional 3438 type promotion. In order to match&simplify it here, the IR needs 3439 to be cleaned up by other optimizers, i.e, VRP. */ 3440(for op (plus minus) 3441 (for cmp (lt le gt ge) 3442 (simplify 3443 (cond (cmp (convert? @X) INTEGER_CST@1) (op @X INTEGER_CST@2) INTEGER_CST@3) 3444 (with { tree from_type = TREE_TYPE (@X), to_type = TREE_TYPE (@1); } 3445 (if (types_match (from_type, to_type) 3446 /* Check if it is special case A). */ 3447 || (TYPE_UNSIGNED (from_type) 3448 && !TYPE_UNSIGNED (to_type) 3449 && TYPE_PRECISION (from_type) == TYPE_PRECISION (to_type) 3450 && integer_zerop (@1) 3451 && (cmp == LT_EXPR || cmp == GE_EXPR))) 3452 (with 3453 { 3454 wi::overflow_type overflow = wi::OVF_NONE; 3455 enum tree_code code, cmp_code = cmp; 3456 wide_int real_c1; 3457 wide_int c1 = wi::to_wide (@1); 3458 wide_int c2 = wi::to_wide (@2); 3459 wide_int c3 = wi::to_wide (@3); 3460 signop sgn = TYPE_SIGN (from_type); 3461 3462 /* Handle special case A), given x of unsigned type: 3463 ((signed type)x < 0) <=> (x > MAX_VAL(signed type)) 3464 ((signed type)x >= 0) <=> (x <= MAX_VAL(signed type)) */ 3465 if (!types_match (from_type, to_type)) 3466 { 3467 if (cmp_code == LT_EXPR) 3468 cmp_code = GT_EXPR; 3469 if (cmp_code == GE_EXPR) 3470 cmp_code = LE_EXPR; 3471 c1 = wi::max_value (to_type); 3472 } 3473 /* To simplify this pattern, we require c3 = (c1 op c2). Here we 3474 compute (c3 op' c2) and check if it equals to c1 with op' being 3475 the inverted operator of op. Make sure overflow doesn't happen 3476 if it is undefined. */ 3477 if (op == PLUS_EXPR) 3478 real_c1 = wi::sub (c3, c2, sgn, &overflow); 3479 else 3480 real_c1 = wi::add (c3, c2, sgn, &overflow); 3481 3482 code = cmp_code; 3483 if (!overflow || !TYPE_OVERFLOW_UNDEFINED (from_type)) 3484 { 3485 /* Check if c1 equals to real_c1. Boundary condition is handled 3486 by adjusting comparison operation if necessary. */ 3487 if (!wi::cmp (wi::sub (real_c1, 1, sgn, &overflow), c1, sgn) 3488 && !overflow) 3489 { 3490 /* X <= Y - 1 equals to X < Y. */ 3491 if (cmp_code == LE_EXPR) 3492 code = LT_EXPR; 3493 /* X > Y - 1 equals to X >= Y. */ 3494 if (cmp_code == GT_EXPR) 3495 code = GE_EXPR; 3496 } 3497 if (!wi::cmp (wi::add (real_c1, 1, sgn, &overflow), c1, sgn) 3498 && !overflow) 3499 { 3500 /* X < Y + 1 equals to X <= Y. */ 3501 if (cmp_code == LT_EXPR) 3502 code = LE_EXPR; 3503 /* X >= Y + 1 equals to X > Y. */ 3504 if (cmp_code == GE_EXPR) 3505 code = GT_EXPR; 3506 } 3507 if (code != cmp_code || !wi::cmp (real_c1, c1, sgn)) 3508 { 3509 if (cmp_code == LT_EXPR || cmp_code == LE_EXPR) 3510 code = MIN_EXPR; 3511 if (cmp_code == GT_EXPR || cmp_code == GE_EXPR) 3512 code = MAX_EXPR; 3513 } 3514 } 3515 } 3516 (if (code == MAX_EXPR) 3517 (op (max @X { wide_int_to_tree (from_type, real_c1); }) 3518 { wide_int_to_tree (from_type, c2); }) 3519 (if (code == MIN_EXPR) 3520 (op (min @X { wide_int_to_tree (from_type, real_c1); }) 3521 { wide_int_to_tree (from_type, c2); }))))))))) 3522 3523(for cnd (cond vec_cond) 3524 /* A ? B : (A ? X : C) -> A ? B : C. */ 3525 (simplify 3526 (cnd @0 (cnd @0 @1 @2) @3) 3527 (cnd @0 @1 @3)) 3528 (simplify 3529 (cnd @0 @1 (cnd @0 @2 @3)) 3530 (cnd @0 @1 @3)) 3531 /* A ? B : (!A ? C : X) -> A ? B : C. */ 3532 /* ??? This matches embedded conditions open-coded because genmatch 3533 would generate matching code for conditions in separate stmts only. 3534 The following is still important to merge then and else arm cases 3535 from if-conversion. */ 3536 (simplify 3537 (cnd @0 @1 (cnd @2 @3 @4)) 3538 (if (inverse_conditions_p (@0, @2)) 3539 (cnd @0 @1 @3))) 3540 (simplify 3541 (cnd @0 (cnd @1 @2 @3) @4) 3542 (if (inverse_conditions_p (@0, @1)) 3543 (cnd @0 @3 @4))) 3544 3545 /* A ? B : B -> B. */ 3546 (simplify 3547 (cnd @0 @1 @1) 3548 @1) 3549 3550 /* !A ? B : C -> A ? C : B. */ 3551 (simplify 3552 (cnd (logical_inverted_value truth_valued_p@0) @1 @2) 3553 (cnd @0 @2 @1))) 3554 3555/* A + (B vcmp C ? 1 : 0) -> A - (B vcmp C ? -1 : 0), since vector comparisons 3556 return all -1 or all 0 results. */ 3557/* ??? We could instead convert all instances of the vec_cond to negate, 3558 but that isn't necessarily a win on its own. */ 3559(simplify 3560 (plus:c @3 (view_convert? (vec_cond:s @0 integer_each_onep@1 integer_zerop@2))) 3561 (if (VECTOR_TYPE_P (type) 3562 && known_eq (TYPE_VECTOR_SUBPARTS (type), 3563 TYPE_VECTOR_SUBPARTS (TREE_TYPE (@1))) 3564 && (TYPE_MODE (TREE_TYPE (type)) 3565 == TYPE_MODE (TREE_TYPE (TREE_TYPE (@1))))) 3566 (minus @3 (view_convert (vec_cond @0 (negate @1) @2))))) 3567 3568/* ... likewise A - (B vcmp C ? 1 : 0) -> A + (B vcmp C ? -1 : 0). */ 3569(simplify 3570 (minus @3 (view_convert? (vec_cond:s @0 integer_each_onep@1 integer_zerop@2))) 3571 (if (VECTOR_TYPE_P (type) 3572 && known_eq (TYPE_VECTOR_SUBPARTS (type), 3573 TYPE_VECTOR_SUBPARTS (TREE_TYPE (@1))) 3574 && (TYPE_MODE (TREE_TYPE (type)) 3575 == TYPE_MODE (TREE_TYPE (TREE_TYPE (@1))))) 3576 (plus @3 (view_convert (vec_cond @0 (negate @1) @2))))) 3577 3578 3579/* Simplifications of comparisons. */ 3580 3581/* See if we can reduce the magnitude of a constant involved in a 3582 comparison by changing the comparison code. This is a canonicalization 3583 formerly done by maybe_canonicalize_comparison_1. */ 3584(for cmp (le gt) 3585 acmp (lt ge) 3586 (simplify 3587 (cmp @0 uniform_integer_cst_p@1) 3588 (with { tree cst = uniform_integer_cst_p (@1); } 3589 (if (tree_int_cst_sgn (cst) == -1) 3590 (acmp @0 { build_uniform_cst (TREE_TYPE (@1), 3591 wide_int_to_tree (TREE_TYPE (cst), 3592 wi::to_wide (cst) 3593 + 1)); }))))) 3594(for cmp (ge lt) 3595 acmp (gt le) 3596 (simplify 3597 (cmp @0 uniform_integer_cst_p@1) 3598 (with { tree cst = uniform_integer_cst_p (@1); } 3599 (if (tree_int_cst_sgn (cst) == 1) 3600 (acmp @0 { build_uniform_cst (TREE_TYPE (@1), 3601 wide_int_to_tree (TREE_TYPE (cst), 3602 wi::to_wide (cst) - 1)); }))))) 3603 3604/* We can simplify a logical negation of a comparison to the 3605 inverted comparison. As we cannot compute an expression 3606 operator using invert_tree_comparison we have to simulate 3607 that with expression code iteration. */ 3608(for cmp (tcc_comparison) 3609 icmp (inverted_tcc_comparison) 3610 ncmp (inverted_tcc_comparison_with_nans) 3611 /* Ideally we'd like to combine the following two patterns 3612 and handle some more cases by using 3613 (logical_inverted_value (cmp @0 @1)) 3614 here but for that genmatch would need to "inline" that. 3615 For now implement what forward_propagate_comparison did. */ 3616 (simplify 3617 (bit_not (cmp @0 @1)) 3618 (if (VECTOR_TYPE_P (type) 3619 || (INTEGRAL_TYPE_P (type) && TYPE_PRECISION (type) == 1)) 3620 /* Comparison inversion may be impossible for trapping math, 3621 invert_tree_comparison will tell us. But we can't use 3622 a computed operator in the replacement tree thus we have 3623 to play the trick below. */ 3624 (with { enum tree_code ic = invert_tree_comparison 3625 (cmp, HONOR_NANS (@0)); } 3626 (if (ic == icmp) 3627 (icmp @0 @1) 3628 (if (ic == ncmp) 3629 (ncmp @0 @1)))))) 3630 (simplify 3631 (bit_xor (cmp @0 @1) integer_truep) 3632 (with { enum tree_code ic = invert_tree_comparison 3633 (cmp, HONOR_NANS (@0)); } 3634 (if (ic == icmp) 3635 (icmp @0 @1) 3636 (if (ic == ncmp) 3637 (ncmp @0 @1)))))) 3638 3639/* Transform comparisons of the form X - Y CMP 0 to X CMP Y. 3640 ??? The transformation is valid for the other operators if overflow 3641 is undefined for the type, but performing it here badly interacts 3642 with the transformation in fold_cond_expr_with_comparison which 3643 attempts to synthetize ABS_EXPR. */ 3644(for cmp (eq ne) 3645 (for sub (minus pointer_diff) 3646 (simplify 3647 (cmp (sub@2 @0 @1) integer_zerop) 3648 (if (single_use (@2)) 3649 (cmp @0 @1))))) 3650 3651/* Transform comparisons of the form X * C1 CMP 0 to X CMP 0 in the 3652 signed arithmetic case. That form is created by the compiler 3653 often enough for folding it to be of value. One example is in 3654 computing loop trip counts after Operator Strength Reduction. */ 3655(for cmp (simple_comparison) 3656 scmp (swapped_simple_comparison) 3657 (simplify 3658 (cmp (mult@3 @0 INTEGER_CST@1) integer_zerop@2) 3659 /* Handle unfolded multiplication by zero. */ 3660 (if (integer_zerop (@1)) 3661 (cmp @1 @2) 3662 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) 3663 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)) 3664 && single_use (@3)) 3665 /* If @1 is negative we swap the sense of the comparison. */ 3666 (if (tree_int_cst_sgn (@1) < 0) 3667 (scmp @0 @2) 3668 (cmp @0 @2)))))) 3669 3670/* Simplify comparison of something with itself. For IEEE 3671 floating-point, we can only do some of these simplifications. */ 3672(for cmp (eq ge le) 3673 (simplify 3674 (cmp @0 @0) 3675 (if (! FLOAT_TYPE_P (TREE_TYPE (@0)) 3676 || ! HONOR_NANS (@0)) 3677 { constant_boolean_node (true, type); } 3678 (if (cmp != EQ_EXPR) 3679 (eq @0 @0))))) 3680(for cmp (ne gt lt) 3681 (simplify 3682 (cmp @0 @0) 3683 (if (cmp != NE_EXPR 3684 || ! FLOAT_TYPE_P (TREE_TYPE (@0)) 3685 || ! HONOR_NANS (@0)) 3686 { constant_boolean_node (false, type); }))) 3687(for cmp (unle unge uneq) 3688 (simplify 3689 (cmp @0 @0) 3690 { constant_boolean_node (true, type); })) 3691(for cmp (unlt ungt) 3692 (simplify 3693 (cmp @0 @0) 3694 (unordered @0 @0))) 3695(simplify 3696 (ltgt @0 @0) 3697 (if (!flag_trapping_math) 3698 { constant_boolean_node (false, type); })) 3699 3700/* Fold ~X op ~Y as Y op X. */ 3701(for cmp (simple_comparison) 3702 (simplify 3703 (cmp (bit_not@2 @0) (bit_not@3 @1)) 3704 (if (single_use (@2) && single_use (@3)) 3705 (cmp @1 @0)))) 3706 3707/* Fold ~X op C as X op' ~C, where op' is the swapped comparison. */ 3708(for cmp (simple_comparison) 3709 scmp (swapped_simple_comparison) 3710 (simplify 3711 (cmp (bit_not@2 @0) CONSTANT_CLASS_P@1) 3712 (if (single_use (@2) 3713 && (TREE_CODE (@1) == INTEGER_CST || TREE_CODE (@1) == VECTOR_CST)) 3714 (scmp @0 (bit_not @1))))) 3715 3716(for cmp (simple_comparison) 3717 /* Fold (double)float1 CMP (double)float2 into float1 CMP float2. */ 3718 (simplify 3719 (cmp (convert@2 @0) (convert? @1)) 3720 (if (FLOAT_TYPE_P (TREE_TYPE (@0)) 3721 && (DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@2)) 3722 == DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@0))) 3723 && (DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@2)) 3724 == DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@1)))) 3725 (with 3726 { 3727 tree type1 = TREE_TYPE (@1); 3728 if (TREE_CODE (@1) == REAL_CST && !DECIMAL_FLOAT_TYPE_P (type1)) 3729 { 3730 REAL_VALUE_TYPE orig = TREE_REAL_CST (@1); 3731 if (TYPE_PRECISION (type1) > TYPE_PRECISION (float_type_node) 3732 && exact_real_truncate (TYPE_MODE (float_type_node), &orig)) 3733 type1 = float_type_node; 3734 if (TYPE_PRECISION (type1) > TYPE_PRECISION (double_type_node) 3735 && exact_real_truncate (TYPE_MODE (double_type_node), &orig)) 3736 type1 = double_type_node; 3737 } 3738 tree newtype 3739 = (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (type1) 3740 ? TREE_TYPE (@0) : type1); 3741 } 3742 (if (TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (newtype)) 3743 (cmp (convert:newtype @0) (convert:newtype @1)))))) 3744 3745 (simplify 3746 (cmp @0 REAL_CST@1) 3747 /* IEEE doesn't distinguish +0 and -0 in comparisons. */ 3748 (switch 3749 /* a CMP (-0) -> a CMP 0 */ 3750 (if (REAL_VALUE_MINUS_ZERO (TREE_REAL_CST (@1))) 3751 (cmp @0 { build_real (TREE_TYPE (@1), dconst0); })) 3752 /* x != NaN is always true, other ops are always false. */ 3753 (if (REAL_VALUE_ISNAN (TREE_REAL_CST (@1)) 3754 && ! HONOR_SNANS (@1)) 3755 { constant_boolean_node (cmp == NE_EXPR, type); }) 3756 /* Fold comparisons against infinity. */ 3757 (if (REAL_VALUE_ISINF (TREE_REAL_CST (@1)) 3758 && MODE_HAS_INFINITIES (TYPE_MODE (TREE_TYPE (@1)))) 3759 (with 3760 { 3761 REAL_VALUE_TYPE max; 3762 enum tree_code code = cmp; 3763 bool neg = REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1)); 3764 if (neg) 3765 code = swap_tree_comparison (code); 3766 } 3767 (switch 3768 /* x > +Inf is always false, if we ignore NaNs or exceptions. */ 3769 (if (code == GT_EXPR 3770 && !(HONOR_NANS (@0) && flag_trapping_math)) 3771 { constant_boolean_node (false, type); }) 3772 (if (code == LE_EXPR) 3773 /* x <= +Inf is always true, if we don't care about NaNs. */ 3774 (if (! HONOR_NANS (@0)) 3775 { constant_boolean_node (true, type); } 3776 /* x <= +Inf is the same as x == x, i.e. !isnan(x), but this loses 3777 an "invalid" exception. */ 3778 (if (!flag_trapping_math) 3779 (eq @0 @0)))) 3780 /* x == +Inf and x >= +Inf are always equal to x > DBL_MAX, but 3781 for == this introduces an exception for x a NaN. */ 3782 (if ((code == EQ_EXPR && !(HONOR_NANS (@0) && flag_trapping_math)) 3783 || code == GE_EXPR) 3784 (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); } 3785 (if (neg) 3786 (lt @0 { build_real (TREE_TYPE (@0), max); }) 3787 (gt @0 { build_real (TREE_TYPE (@0), max); })))) 3788 /* x < +Inf is always equal to x <= DBL_MAX. */ 3789 (if (code == LT_EXPR) 3790 (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); } 3791 (if (neg) 3792 (ge @0 { build_real (TREE_TYPE (@0), max); }) 3793 (le @0 { build_real (TREE_TYPE (@0), max); })))) 3794 /* x != +Inf is always equal to !(x > DBL_MAX), but this introduces 3795 an exception for x a NaN so use an unordered comparison. */ 3796 (if (code == NE_EXPR) 3797 (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); } 3798 (if (! HONOR_NANS (@0)) 3799 (if (neg) 3800 (ge @0 { build_real (TREE_TYPE (@0), max); }) 3801 (le @0 { build_real (TREE_TYPE (@0), max); })) 3802 (if (neg) 3803 (unge @0 { build_real (TREE_TYPE (@0), max); }) 3804 (unle @0 { build_real (TREE_TYPE (@0), max); })))))))))) 3805 3806 /* If this is a comparison of a real constant with a PLUS_EXPR 3807 or a MINUS_EXPR of a real constant, we can convert it into a 3808 comparison with a revised real constant as long as no overflow 3809 occurs when unsafe_math_optimizations are enabled. */ 3810 (if (flag_unsafe_math_optimizations) 3811 (for op (plus minus) 3812 (simplify 3813 (cmp (op @0 REAL_CST@1) REAL_CST@2) 3814 (with 3815 { 3816 tree tem = const_binop (op == PLUS_EXPR ? MINUS_EXPR : PLUS_EXPR, 3817 TREE_TYPE (@1), @2, @1); 3818 } 3819 (if (tem && !TREE_OVERFLOW (tem)) 3820 (cmp @0 { tem; })))))) 3821 3822 /* Likewise, we can simplify a comparison of a real constant with 3823 a MINUS_EXPR whose first operand is also a real constant, i.e. 3824 (c1 - x) < c2 becomes x > c1-c2. Reordering is allowed on 3825 floating-point types only if -fassociative-math is set. */ 3826 (if (flag_associative_math) 3827 (simplify 3828 (cmp (minus REAL_CST@0 @1) REAL_CST@2) 3829 (with { tree tem = const_binop (MINUS_EXPR, TREE_TYPE (@1), @0, @2); } 3830 (if (tem && !TREE_OVERFLOW (tem)) 3831 (cmp { tem; } @1))))) 3832 3833 /* Fold comparisons against built-in math functions. */ 3834 (if (flag_unsafe_math_optimizations && ! flag_errno_math) 3835 (for sq (SQRT) 3836 (simplify 3837 (cmp (sq @0) REAL_CST@1) 3838 (switch 3839 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1))) 3840 (switch 3841 /* sqrt(x) < y is always false, if y is negative. */ 3842 (if (cmp == EQ_EXPR || cmp == LT_EXPR || cmp == LE_EXPR) 3843 { constant_boolean_node (false, type); }) 3844 /* sqrt(x) > y is always true, if y is negative and we 3845 don't care about NaNs, i.e. negative values of x. */ 3846 (if (cmp == NE_EXPR || !HONOR_NANS (@0)) 3847 { constant_boolean_node (true, type); }) 3848 /* sqrt(x) > y is the same as x >= 0, if y is negative. */ 3849 (ge @0 { build_real (TREE_TYPE (@0), dconst0); }))) 3850 (if (real_equal (TREE_REAL_CST_PTR (@1), &dconst0)) 3851 (switch 3852 /* sqrt(x) < 0 is always false. */ 3853 (if (cmp == LT_EXPR) 3854 { constant_boolean_node (false, type); }) 3855 /* sqrt(x) >= 0 is always true if we don't care about NaNs. */ 3856 (if (cmp == GE_EXPR && !HONOR_NANS (@0)) 3857 { constant_boolean_node (true, type); }) 3858 /* sqrt(x) <= 0 -> x == 0. */ 3859 (if (cmp == LE_EXPR) 3860 (eq @0 @1)) 3861 /* Otherwise sqrt(x) cmp 0 -> x cmp 0. Here cmp can be >=, >, 3862 == or !=. In the last case: 3863 3864 (sqrt(x) != 0) == (NaN != 0) == true == (x != 0) 3865 3866 if x is negative or NaN. Due to -funsafe-math-optimizations, 3867 the results for other x follow from natural arithmetic. */ 3868 (cmp @0 @1))) 3869 (if ((cmp == LT_EXPR 3870 || cmp == LE_EXPR 3871 || cmp == GT_EXPR 3872 || cmp == GE_EXPR) 3873 && !REAL_VALUE_ISNAN (TREE_REAL_CST (@1)) 3874 /* Give up for -frounding-math. */ 3875 && !HONOR_SIGN_DEPENDENT_ROUNDING (TREE_TYPE (@0))) 3876 (with 3877 { 3878 REAL_VALUE_TYPE c2; 3879 enum tree_code ncmp = cmp; 3880 const real_format *fmt 3881 = REAL_MODE_FORMAT (TYPE_MODE (TREE_TYPE (@0))); 3882 real_arithmetic (&c2, MULT_EXPR, 3883 &TREE_REAL_CST (@1), &TREE_REAL_CST (@1)); 3884 real_convert (&c2, fmt, &c2); 3885 /* See PR91734: if c2 is inexact and sqrt(c2) < c (or sqrt(c2) >= c), 3886 then change LT_EXPR into LE_EXPR or GE_EXPR into GT_EXPR. */ 3887 if (!REAL_VALUE_ISINF (c2)) 3888 { 3889 tree c3 = fold_const_call (CFN_SQRT, TREE_TYPE (@0), 3890 build_real (TREE_TYPE (@0), c2)); 3891 if (c3 == NULL_TREE || TREE_CODE (c3) != REAL_CST) 3892 ncmp = ERROR_MARK; 3893 else if ((cmp == LT_EXPR || cmp == GE_EXPR) 3894 && real_less (&TREE_REAL_CST (c3), &TREE_REAL_CST (@1))) 3895 ncmp = cmp == LT_EXPR ? LE_EXPR : GT_EXPR; 3896 else if ((cmp == LE_EXPR || cmp == GT_EXPR) 3897 && real_less (&TREE_REAL_CST (@1), &TREE_REAL_CST (c3))) 3898 ncmp = cmp == LE_EXPR ? LT_EXPR : GE_EXPR; 3899 else 3900 { 3901 /* With rounding to even, sqrt of up to 3 different values 3902 gives the same normal result, so in some cases c2 needs 3903 to be adjusted. */ 3904 REAL_VALUE_TYPE c2alt, tow; 3905 if (cmp == LT_EXPR || cmp == GE_EXPR) 3906 tow = dconst0; 3907 else 3908 real_inf (&tow); 3909 real_nextafter (&c2alt, fmt, &c2, &tow); 3910 real_convert (&c2alt, fmt, &c2alt); 3911 if (REAL_VALUE_ISINF (c2alt)) 3912 ncmp = ERROR_MARK; 3913 else 3914 { 3915 c3 = fold_const_call (CFN_SQRT, TREE_TYPE (@0), 3916 build_real (TREE_TYPE (@0), c2alt)); 3917 if (c3 == NULL_TREE || TREE_CODE (c3) != REAL_CST) 3918 ncmp = ERROR_MARK; 3919 else if (real_equal (&TREE_REAL_CST (c3), 3920 &TREE_REAL_CST (@1))) 3921 c2 = c2alt; 3922 } 3923 } 3924 } 3925 } 3926 (if (cmp == GT_EXPR || cmp == GE_EXPR) 3927 (if (REAL_VALUE_ISINF (c2)) 3928 /* sqrt(x) > y is x == +Inf, when y is very large. */ 3929 (if (HONOR_INFINITIES (@0)) 3930 (eq @0 { build_real (TREE_TYPE (@0), c2); }) 3931 { constant_boolean_node (false, type); }) 3932 /* sqrt(x) > c is the same as x > c*c. */ 3933 (if (ncmp != ERROR_MARK) 3934 (if (ncmp == GE_EXPR) 3935 (ge @0 { build_real (TREE_TYPE (@0), c2); }) 3936 (gt @0 { build_real (TREE_TYPE (@0), c2); })))) 3937 /* else if (cmp == LT_EXPR || cmp == LE_EXPR) */ 3938 (if (REAL_VALUE_ISINF (c2)) 3939 (switch 3940 /* sqrt(x) < y is always true, when y is a very large 3941 value and we don't care about NaNs or Infinities. */ 3942 (if (! HONOR_NANS (@0) && ! HONOR_INFINITIES (@0)) 3943 { constant_boolean_node (true, type); }) 3944 /* sqrt(x) < y is x != +Inf when y is very large and we 3945 don't care about NaNs. */ 3946 (if (! HONOR_NANS (@0)) 3947 (ne @0 { build_real (TREE_TYPE (@0), c2); })) 3948 /* sqrt(x) < y is x >= 0 when y is very large and we 3949 don't care about Infinities. */ 3950 (if (! HONOR_INFINITIES (@0)) 3951 (ge @0 { build_real (TREE_TYPE (@0), dconst0); })) 3952 /* sqrt(x) < y is x >= 0 && x != +Inf, when y is large. */ 3953 (if (GENERIC) 3954 (truth_andif 3955 (ge @0 { build_real (TREE_TYPE (@0), dconst0); }) 3956 (ne @0 { build_real (TREE_TYPE (@0), c2); })))) 3957 /* sqrt(x) < c is the same as x < c*c, if we ignore NaNs. */ 3958 (if (ncmp != ERROR_MARK && ! HONOR_NANS (@0)) 3959 (if (ncmp == LT_EXPR) 3960 (lt @0 { build_real (TREE_TYPE (@0), c2); }) 3961 (le @0 { build_real (TREE_TYPE (@0), c2); })) 3962 /* sqrt(x) < c is the same as x >= 0 && x < c*c. */ 3963 (if (ncmp != ERROR_MARK && GENERIC) 3964 (if (ncmp == LT_EXPR) 3965 (truth_andif 3966 (ge @0 { build_real (TREE_TYPE (@0), dconst0); }) 3967 (lt @0 { build_real (TREE_TYPE (@0), c2); })) 3968 (truth_andif 3969 (ge @0 { build_real (TREE_TYPE (@0), dconst0); }) 3970 (le @0 { build_real (TREE_TYPE (@0), c2); }))))))))))) 3971 /* Transform sqrt(x) cmp sqrt(y) -> x cmp y. */ 3972 (simplify 3973 (cmp (sq @0) (sq @1)) 3974 (if (! HONOR_NANS (@0)) 3975 (cmp @0 @1)))))) 3976 3977/* Optimize various special cases of (FTYPE) N CMP (FTYPE) M. */ 3978(for cmp (lt le eq ne ge gt unordered ordered unlt unle ungt unge uneq ltgt) 3979 icmp (lt le eq ne ge gt unordered ordered lt le gt ge eq ne) 3980 (simplify 3981 (cmp (float@0 @1) (float @2)) 3982 (if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (@0)) 3983 && ! DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@0))) 3984 (with 3985 { 3986 format_helper fmt (REAL_MODE_FORMAT (TYPE_MODE (TREE_TYPE (@0)))); 3987 tree type1 = TREE_TYPE (@1); 3988 bool type1_signed_p = TYPE_SIGN (type1) == SIGNED; 3989 tree type2 = TREE_TYPE (@2); 3990 bool type2_signed_p = TYPE_SIGN (type2) == SIGNED; 3991 } 3992 (if (fmt.can_represent_integral_type_p (type1) 3993 && fmt.can_represent_integral_type_p (type2)) 3994 (if (cmp == ORDERED_EXPR || cmp == UNORDERED_EXPR) 3995 { constant_boolean_node (cmp == ORDERED_EXPR, type); } 3996 (if (TYPE_PRECISION (type1) > TYPE_PRECISION (type2) 3997 && type1_signed_p >= type2_signed_p) 3998 (icmp @1 (convert @2)) 3999 (if (TYPE_PRECISION (type1) < TYPE_PRECISION (type2) 4000 && type1_signed_p <= type2_signed_p) 4001 (icmp (convert:type2 @1) @2) 4002 (if (TYPE_PRECISION (type1) == TYPE_PRECISION (type2) 4003 && type1_signed_p == type2_signed_p) 4004 (icmp @1 @2)))))))))) 4005 4006/* Optimize various special cases of (FTYPE) N CMP CST. */ 4007(for cmp (lt le eq ne ge gt) 4008 icmp (le le eq ne ge ge) 4009 (simplify 4010 (cmp (float @0) REAL_CST@1) 4011 (if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (@1)) 4012 && ! DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@1))) 4013 (with 4014 { 4015 tree itype = TREE_TYPE (@0); 4016 format_helper fmt (REAL_MODE_FORMAT (TYPE_MODE (TREE_TYPE (@1)))); 4017 const REAL_VALUE_TYPE *cst = TREE_REAL_CST_PTR (@1); 4018 /* Be careful to preserve any potential exceptions due to 4019 NaNs. qNaNs are ok in == or != context. 4020 TODO: relax under -fno-trapping-math or 4021 -fno-signaling-nans. */ 4022 bool exception_p 4023 = real_isnan (cst) && (cst->signalling 4024 || (cmp != EQ_EXPR && cmp != NE_EXPR)); 4025 } 4026 /* TODO: allow non-fitting itype and SNaNs when 4027 -fno-trapping-math. */ 4028 (if (fmt.can_represent_integral_type_p (itype) && ! exception_p) 4029 (with 4030 { 4031 signop isign = TYPE_SIGN (itype); 4032 REAL_VALUE_TYPE imin, imax; 4033 real_from_integer (&imin, fmt, wi::min_value (itype), isign); 4034 real_from_integer (&imax, fmt, wi::max_value (itype), isign); 4035 4036 REAL_VALUE_TYPE icst; 4037 if (cmp == GT_EXPR || cmp == GE_EXPR) 4038 real_ceil (&icst, fmt, cst); 4039 else if (cmp == LT_EXPR || cmp == LE_EXPR) 4040 real_floor (&icst, fmt, cst); 4041 else 4042 real_trunc (&icst, fmt, cst); 4043 4044 bool cst_int_p = !real_isnan (cst) && real_identical (&icst, cst); 4045 4046 bool overflow_p = false; 4047 wide_int icst_val 4048 = real_to_integer (&icst, &overflow_p, TYPE_PRECISION (itype)); 4049 } 4050 (switch 4051 /* Optimize cases when CST is outside of ITYPE's range. */ 4052 (if (real_compare (LT_EXPR, cst, &imin)) 4053 { constant_boolean_node (cmp == GT_EXPR || cmp == GE_EXPR || cmp == NE_EXPR, 4054 type); }) 4055 (if (real_compare (GT_EXPR, cst, &imax)) 4056 { constant_boolean_node (cmp == LT_EXPR || cmp == LE_EXPR || cmp == NE_EXPR, 4057 type); }) 4058 /* Remove cast if CST is an integer representable by ITYPE. */ 4059 (if (cst_int_p) 4060 (cmp @0 { gcc_assert (!overflow_p); 4061 wide_int_to_tree (itype, icst_val); }) 4062 ) 4063 /* When CST is fractional, optimize 4064 (FTYPE) N == CST -> 0 4065 (FTYPE) N != CST -> 1. */ 4066 (if (cmp == EQ_EXPR || cmp == NE_EXPR) 4067 { constant_boolean_node (cmp == NE_EXPR, type); }) 4068 /* Otherwise replace with sensible integer constant. */ 4069 (with 4070 { 4071 gcc_checking_assert (!overflow_p); 4072 } 4073 (icmp @0 { wide_int_to_tree (itype, icst_val); }))))))))) 4074 4075/* Fold A /[ex] B CMP C to A CMP B * C. */ 4076(for cmp (eq ne) 4077 (simplify 4078 (cmp (exact_div @0 @1) INTEGER_CST@2) 4079 (if (!integer_zerop (@1)) 4080 (if (wi::to_wide (@2) == 0) 4081 (cmp @0 @2) 4082 (if (TREE_CODE (@1) == INTEGER_CST) 4083 (with 4084 { 4085 wi::overflow_type ovf; 4086 wide_int prod = wi::mul (wi::to_wide (@2), wi::to_wide (@1), 4087 TYPE_SIGN (TREE_TYPE (@1)), &ovf); 4088 } 4089 (if (ovf) 4090 { constant_boolean_node (cmp == NE_EXPR, type); } 4091 (cmp @0 { wide_int_to_tree (TREE_TYPE (@0), prod); })))))))) 4092(for cmp (lt le gt ge) 4093 (simplify 4094 (cmp (exact_div @0 INTEGER_CST@1) INTEGER_CST@2) 4095 (if (wi::gt_p (wi::to_wide (@1), 0, TYPE_SIGN (TREE_TYPE (@1)))) 4096 (with 4097 { 4098 wi::overflow_type ovf; 4099 wide_int prod = wi::mul (wi::to_wide (@2), wi::to_wide (@1), 4100 TYPE_SIGN (TREE_TYPE (@1)), &ovf); 4101 } 4102 (if (ovf) 4103 { constant_boolean_node (wi::lt_p (wi::to_wide (@2), 0, 4104 TYPE_SIGN (TREE_TYPE (@2))) 4105 != (cmp == LT_EXPR || cmp == LE_EXPR), type); } 4106 (cmp @0 { wide_int_to_tree (TREE_TYPE (@0), prod); })))))) 4107 4108/* Fold (size_t)(A /[ex] B) CMP C to (size_t)A CMP (size_t)B * C or A CMP' 0. 4109 4110 For small C (less than max/B), this is (size_t)A CMP (size_t)B * C. 4111 For large C (more than min/B+2^size), this is also true, with the 4112 multiplication computed modulo 2^size. 4113 For intermediate C, this just tests the sign of A. */ 4114(for cmp (lt le gt ge) 4115 cmp2 (ge ge lt lt) 4116 (simplify 4117 (cmp (convert (exact_div @0 INTEGER_CST@1)) INTEGER_CST@2) 4118 (if (tree_nop_conversion_p (TREE_TYPE (@0), TREE_TYPE (@2)) 4119 && TYPE_UNSIGNED (TREE_TYPE (@2)) && !TYPE_UNSIGNED (TREE_TYPE (@0)) 4120 && wi::gt_p (wi::to_wide (@1), 0, TYPE_SIGN (TREE_TYPE (@1)))) 4121 (with 4122 { 4123 tree utype = TREE_TYPE (@2); 4124 wide_int denom = wi::to_wide (@1); 4125 wide_int right = wi::to_wide (@2); 4126 wide_int smax = wi::sdiv_trunc (wi::max_value (TREE_TYPE (@0)), denom); 4127 wide_int smin = wi::sdiv_trunc (wi::min_value (TREE_TYPE (@0)), denom); 4128 bool small = wi::leu_p (right, smax); 4129 bool large = wi::geu_p (right, smin); 4130 } 4131 (if (small || large) 4132 (cmp (convert:utype @0) (mult @2 (convert @1))) 4133 (cmp2 @0 { build_zero_cst (TREE_TYPE (@0)); })))))) 4134 4135/* Unordered tests if either argument is a NaN. */ 4136(simplify 4137 (bit_ior (unordered @0 @0) (unordered @1 @1)) 4138 (if (types_match (@0, @1)) 4139 (unordered @0 @1))) 4140(simplify 4141 (bit_and (ordered @0 @0) (ordered @1 @1)) 4142 (if (types_match (@0, @1)) 4143 (ordered @0 @1))) 4144(simplify 4145 (bit_ior:c (unordered @0 @0) (unordered:c@2 @0 @1)) 4146 @2) 4147(simplify 4148 (bit_and:c (ordered @0 @0) (ordered:c@2 @0 @1)) 4149 @2) 4150 4151/* Simple range test simplifications. */ 4152/* A < B || A >= B -> true. */ 4153(for test1 (lt le le le ne ge) 4154 test2 (ge gt ge ne eq ne) 4155 (simplify 4156 (bit_ior:c (test1 @0 @1) (test2 @0 @1)) 4157 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) 4158 || VECTOR_INTEGER_TYPE_P (TREE_TYPE (@0))) 4159 { constant_boolean_node (true, type); }))) 4160/* A < B && A >= B -> false. */ 4161(for test1 (lt lt lt le ne eq) 4162 test2 (ge gt eq gt eq gt) 4163 (simplify 4164 (bit_and:c (test1 @0 @1) (test2 @0 @1)) 4165 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) 4166 || VECTOR_INTEGER_TYPE_P (TREE_TYPE (@0))) 4167 { constant_boolean_node (false, type); }))) 4168 4169/* A & (2**N - 1) <= 2**K - 1 -> A & (2**N - 2**K) == 0 4170 A & (2**N - 1) > 2**K - 1 -> A & (2**N - 2**K) != 0 4171 4172 Note that comparisons 4173 A & (2**N - 1) < 2**K -> A & (2**N - 2**K) == 0 4174 A & (2**N - 1) >= 2**K -> A & (2**N - 2**K) != 0 4175 will be canonicalized to above so there's no need to 4176 consider them here. 4177 */ 4178 4179(for cmp (le gt) 4180 eqcmp (eq ne) 4181 (simplify 4182 (cmp (bit_and@0 @1 INTEGER_CST@2) INTEGER_CST@3) 4183 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))) 4184 (with 4185 { 4186 tree ty = TREE_TYPE (@0); 4187 unsigned prec = TYPE_PRECISION (ty); 4188 wide_int mask = wi::to_wide (@2, prec); 4189 wide_int rhs = wi::to_wide (@3, prec); 4190 signop sgn = TYPE_SIGN (ty); 4191 } 4192 (if ((mask & (mask + 1)) == 0 && wi::gt_p (rhs, 0, sgn) 4193 && (rhs & (rhs + 1)) == 0 && wi::ge_p (mask, rhs, sgn)) 4194 (eqcmp (bit_and @1 { wide_int_to_tree (ty, mask - rhs); }) 4195 { build_zero_cst (ty); })))))) 4196 4197/* -A CMP -B -> B CMP A. */ 4198(for cmp (tcc_comparison) 4199 scmp (swapped_tcc_comparison) 4200 (simplify 4201 (cmp (negate @0) (negate @1)) 4202 (if (FLOAT_TYPE_P (TREE_TYPE (@0)) 4203 || (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) 4204 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))) 4205 (scmp @0 @1))) 4206 (simplify 4207 (cmp (negate @0) CONSTANT_CLASS_P@1) 4208 (if (FLOAT_TYPE_P (TREE_TYPE (@0)) 4209 || (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) 4210 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))) 4211 (with { tree tem = const_unop (NEGATE_EXPR, TREE_TYPE (@0), @1); } 4212 (if (tem && !TREE_OVERFLOW (tem)) 4213 (scmp @0 { tem; })))))) 4214 4215/* Convert ABS_EXPR<x> == 0 or ABS_EXPR<x> != 0 to x == 0 or x != 0. */ 4216(for op (eq ne) 4217 (simplify 4218 (op (abs @0) zerop@1) 4219 (op @0 @1))) 4220 4221/* From fold_sign_changed_comparison and fold_widened_comparison. 4222 FIXME: the lack of symmetry is disturbing. */ 4223(for cmp (simple_comparison) 4224 (simplify 4225 (cmp (convert@0 @00) (convert?@1 @10)) 4226 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) 4227 /* Disable this optimization if we're casting a function pointer 4228 type on targets that require function pointer canonicalization. */ 4229 && !(targetm.have_canonicalize_funcptr_for_compare () 4230 && ((POINTER_TYPE_P (TREE_TYPE (@00)) 4231 && FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@00)))) 4232 || (POINTER_TYPE_P (TREE_TYPE (@10)) 4233 && FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@10)))))) 4234 && single_use (@0)) 4235 (if (TYPE_PRECISION (TREE_TYPE (@00)) == TYPE_PRECISION (TREE_TYPE (@0)) 4236 && (TREE_CODE (@10) == INTEGER_CST 4237 || @1 != @10) 4238 && (TYPE_UNSIGNED (TREE_TYPE (@00)) == TYPE_UNSIGNED (TREE_TYPE (@0)) 4239 || cmp == NE_EXPR 4240 || cmp == EQ_EXPR) 4241 && !POINTER_TYPE_P (TREE_TYPE (@00))) 4242 /* ??? The special-casing of INTEGER_CST conversion was in the original 4243 code and here to avoid a spurious overflow flag on the resulting 4244 constant which fold_convert produces. */ 4245 (if (TREE_CODE (@1) == INTEGER_CST) 4246 (cmp @00 { force_fit_type (TREE_TYPE (@00), wi::to_widest (@1), 0, 4247 TREE_OVERFLOW (@1)); }) 4248 (cmp @00 (convert @1))) 4249 4250 (if (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (TREE_TYPE (@00))) 4251 /* If possible, express the comparison in the shorter mode. */ 4252 (if ((cmp == EQ_EXPR || cmp == NE_EXPR 4253 || TYPE_UNSIGNED (TREE_TYPE (@0)) == TYPE_UNSIGNED (TREE_TYPE (@00)) 4254 || (!TYPE_UNSIGNED (TREE_TYPE (@0)) 4255 && TYPE_UNSIGNED (TREE_TYPE (@00)))) 4256 && (types_match (TREE_TYPE (@10), TREE_TYPE (@00)) 4257 || ((TYPE_PRECISION (TREE_TYPE (@00)) 4258 >= TYPE_PRECISION (TREE_TYPE (@10))) 4259 && (TYPE_UNSIGNED (TREE_TYPE (@00)) 4260 == TYPE_UNSIGNED (TREE_TYPE (@10)))) 4261 || (TREE_CODE (@10) == INTEGER_CST 4262 && INTEGRAL_TYPE_P (TREE_TYPE (@00)) 4263 && int_fits_type_p (@10, TREE_TYPE (@00))))) 4264 (cmp @00 (convert @10)) 4265 (if (TREE_CODE (@10) == INTEGER_CST 4266 && INTEGRAL_TYPE_P (TREE_TYPE (@00)) 4267 && !int_fits_type_p (@10, TREE_TYPE (@00))) 4268 (with 4269 { 4270 tree min = lower_bound_in_type (TREE_TYPE (@10), TREE_TYPE (@00)); 4271 tree max = upper_bound_in_type (TREE_TYPE (@10), TREE_TYPE (@00)); 4272 bool above = integer_nonzerop (const_binop (LT_EXPR, type, max, @10)); 4273 bool below = integer_nonzerop (const_binop (LT_EXPR, type, @10, min)); 4274 } 4275 (if (above || below) 4276 (if (cmp == EQ_EXPR || cmp == NE_EXPR) 4277 { constant_boolean_node (cmp == EQ_EXPR ? false : true, type); } 4278 (if (cmp == LT_EXPR || cmp == LE_EXPR) 4279 { constant_boolean_node (above ? true : false, type); } 4280 (if (cmp == GT_EXPR || cmp == GE_EXPR) 4281 { constant_boolean_node (above ? false : true, type); })))))))))))) 4282 4283(for cmp (eq ne) 4284 (simplify 4285 /* SSA names are canonicalized to 2nd place. */ 4286 (cmp addr@0 SSA_NAME@1) 4287 (with 4288 { poly_int64 off; tree base; } 4289 /* A local variable can never be pointed to by 4290 the default SSA name of an incoming parameter. */ 4291 (if (SSA_NAME_IS_DEFAULT_DEF (@1) 4292 && TREE_CODE (SSA_NAME_VAR (@1)) == PARM_DECL 4293 && (base = get_base_address (TREE_OPERAND (@0, 0))) 4294 && TREE_CODE (base) == VAR_DECL 4295 && auto_var_in_fn_p (base, current_function_decl)) 4296 (if (cmp == NE_EXPR) 4297 { constant_boolean_node (true, type); } 4298 { constant_boolean_node (false, type); }) 4299 /* If the address is based on @1 decide using the offset. */ 4300 (if ((base = get_addr_base_and_unit_offset (TREE_OPERAND (@0, 0), &off)) 4301 && TREE_CODE (base) == MEM_REF 4302 && TREE_OPERAND (base, 0) == @1) 4303 (with { off += mem_ref_offset (base).force_shwi (); } 4304 (if (known_ne (off, 0)) 4305 { constant_boolean_node (cmp == NE_EXPR, type); } 4306 (if (known_eq (off, 0)) 4307 { constant_boolean_node (cmp == EQ_EXPR, type); })))))))) 4308 4309/* Equality compare simplifications from fold_binary */ 4310(for cmp (eq ne) 4311 4312 /* If we have (A | C) == D where C & ~D != 0, convert this into 0. 4313 Similarly for NE_EXPR. */ 4314 (simplify 4315 (cmp (convert?@3 (bit_ior @0 INTEGER_CST@1)) INTEGER_CST@2) 4316 (if (tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@0)) 4317 && wi::bit_and_not (wi::to_wide (@1), wi::to_wide (@2)) != 0) 4318 { constant_boolean_node (cmp == NE_EXPR, type); })) 4319 4320 /* (X ^ Y) == 0 becomes X == Y, and (X ^ Y) != 0 becomes X != Y. */ 4321 (simplify 4322 (cmp (bit_xor @0 @1) integer_zerop) 4323 (cmp @0 @1)) 4324 4325 /* (X ^ Y) == Y becomes X == 0. 4326 Likewise (X ^ Y) == X becomes Y == 0. */ 4327 (simplify 4328 (cmp:c (bit_xor:c @0 @1) @0) 4329 (cmp @1 { build_zero_cst (TREE_TYPE (@1)); })) 4330 4331 /* (X ^ C1) op C2 can be rewritten as X op (C1 ^ C2). */ 4332 (simplify 4333 (cmp (convert?@3 (bit_xor @0 INTEGER_CST@1)) INTEGER_CST@2) 4334 (if (tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@0))) 4335 (cmp @0 (bit_xor @1 (convert @2))))) 4336 4337 (simplify 4338 (cmp (convert? addr@0) integer_zerop) 4339 (if (tree_single_nonzero_warnv_p (@0, NULL)) 4340 { constant_boolean_node (cmp == NE_EXPR, type); }))) 4341 4342/* If we have (A & C) == C where C is a power of 2, convert this into 4343 (A & C) != 0. Similarly for NE_EXPR. */ 4344(for cmp (eq ne) 4345 icmp (ne eq) 4346 (simplify 4347 (cmp (bit_and@2 @0 integer_pow2p@1) @1) 4348 (icmp @2 { build_zero_cst (TREE_TYPE (@0)); }))) 4349 4350/* If we have (A & C) != 0 ? D : 0 where C and D are powers of 2, 4351 convert this into a shift followed by ANDing with D. */ 4352(simplify 4353 (cond 4354 (ne (bit_and @0 integer_pow2p@1) integer_zerop) 4355 INTEGER_CST@2 integer_zerop) 4356 (if (integer_pow2p (@2)) 4357 (with { 4358 int shift = (wi::exact_log2 (wi::to_wide (@2)) 4359 - wi::exact_log2 (wi::to_wide (@1))); 4360 } 4361 (if (shift > 0) 4362 (bit_and 4363 (lshift (convert @0) { build_int_cst (integer_type_node, shift); }) @2) 4364 (bit_and 4365 (convert (rshift @0 { build_int_cst (integer_type_node, -shift); })) 4366 @2))))) 4367 4368/* If we have (A & C) != 0 where C is the sign bit of A, convert 4369 this into A < 0. Similarly for (A & C) == 0 into A >= 0. */ 4370(for cmp (eq ne) 4371 ncmp (ge lt) 4372 (simplify 4373 (cmp (bit_and (convert?@2 @0) integer_pow2p@1) integer_zerop) 4374 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) 4375 && type_has_mode_precision_p (TREE_TYPE (@0)) 4376 && element_precision (@2) >= element_precision (@0) 4377 && wi::only_sign_bit_p (wi::to_wide (@1), element_precision (@0))) 4378 (with { tree stype = signed_type_for (TREE_TYPE (@0)); } 4379 (ncmp (convert:stype @0) { build_zero_cst (stype); }))))) 4380 4381/* If we have A < 0 ? C : 0 where C is a power of 2, convert 4382 this into a right shift or sign extension followed by ANDing with C. */ 4383(simplify 4384 (cond 4385 (lt @0 integer_zerop) 4386 INTEGER_CST@1 integer_zerop) 4387 (if (integer_pow2p (@1) 4388 && !TYPE_UNSIGNED (TREE_TYPE (@0))) 4389 (with { 4390 int shift = element_precision (@0) - wi::exact_log2 (wi::to_wide (@1)) - 1; 4391 } 4392 (if (shift >= 0) 4393 (bit_and 4394 (convert (rshift @0 { build_int_cst (integer_type_node, shift); })) 4395 @1) 4396 /* Otherwise ctype must be wider than TREE_TYPE (@0) and pure 4397 sign extension followed by AND with C will achieve the effect. */ 4398 (bit_and (convert @0) @1))))) 4399 4400/* When the addresses are not directly of decls compare base and offset. 4401 This implements some remaining parts of fold_comparison address 4402 comparisons but still no complete part of it. Still it is good 4403 enough to make fold_stmt not regress when not dispatching to fold_binary. */ 4404(for cmp (simple_comparison) 4405 (simplify 4406 (cmp (convert1?@2 addr@0) (convert2? addr@1)) 4407 (with 4408 { 4409 poly_int64 off0, off1; 4410 tree base0 = get_addr_base_and_unit_offset (TREE_OPERAND (@0, 0), &off0); 4411 tree base1 = get_addr_base_and_unit_offset (TREE_OPERAND (@1, 0), &off1); 4412 if (base0 && TREE_CODE (base0) == MEM_REF) 4413 { 4414 off0 += mem_ref_offset (base0).force_shwi (); 4415 base0 = TREE_OPERAND (base0, 0); 4416 } 4417 if (base1 && TREE_CODE (base1) == MEM_REF) 4418 { 4419 off1 += mem_ref_offset (base1).force_shwi (); 4420 base1 = TREE_OPERAND (base1, 0); 4421 } 4422 } 4423 (if (base0 && base1) 4424 (with 4425 { 4426 int equal = 2; 4427 /* Punt in GENERIC on variables with value expressions; 4428 the value expressions might point to fields/elements 4429 of other vars etc. */ 4430 if (GENERIC 4431 && ((VAR_P (base0) && DECL_HAS_VALUE_EXPR_P (base0)) 4432 || (VAR_P (base1) && DECL_HAS_VALUE_EXPR_P (base1)))) 4433 ; 4434 else if (decl_in_symtab_p (base0) 4435 && decl_in_symtab_p (base1)) 4436 equal = symtab_node::get_create (base0) 4437 ->equal_address_to (symtab_node::get_create (base1)); 4438 else if ((DECL_P (base0) 4439 || TREE_CODE (base0) == SSA_NAME 4440 || TREE_CODE (base0) == STRING_CST) 4441 && (DECL_P (base1) 4442 || TREE_CODE (base1) == SSA_NAME 4443 || TREE_CODE (base1) == STRING_CST)) 4444 equal = (base0 == base1); 4445 if (equal == 0) 4446 { 4447 HOST_WIDE_INT ioff0 = -1, ioff1 = -1; 4448 off0.is_constant (&ioff0); 4449 off1.is_constant (&ioff1); 4450 if ((DECL_P (base0) && TREE_CODE (base1) == STRING_CST) 4451 || (TREE_CODE (base0) == STRING_CST && DECL_P (base1)) 4452 || (TREE_CODE (base0) == STRING_CST 4453 && TREE_CODE (base1) == STRING_CST 4454 && ioff0 >= 0 && ioff1 >= 0 4455 && ioff0 < TREE_STRING_LENGTH (base0) 4456 && ioff1 < TREE_STRING_LENGTH (base1) 4457 /* This is a too conservative test that the STRING_CSTs 4458 will not end up being string-merged. */ 4459 && strncmp (TREE_STRING_POINTER (base0) + ioff0, 4460 TREE_STRING_POINTER (base1) + ioff1, 4461 MIN (TREE_STRING_LENGTH (base0) - ioff0, 4462 TREE_STRING_LENGTH (base1) - ioff1)) != 0)) 4463 ; 4464 else if (!DECL_P (base0) || !DECL_P (base1)) 4465 equal = 2; 4466 else if (cmp != EQ_EXPR && cmp != NE_EXPR) 4467 equal = 2; 4468 /* If this is a pointer comparison, ignore for now even 4469 valid equalities where one pointer is the offset zero 4470 of one object and the other to one past end of another one. */ 4471 else if (!INTEGRAL_TYPE_P (TREE_TYPE (@2))) 4472 ; 4473 /* Assume that automatic variables can't be adjacent to global 4474 variables. */ 4475 else if (is_global_var (base0) != is_global_var (base1)) 4476 ; 4477 else 4478 { 4479 tree sz0 = DECL_SIZE_UNIT (base0); 4480 tree sz1 = DECL_SIZE_UNIT (base1); 4481 /* If sizes are unknown, e.g. VLA or not representable, 4482 punt. */ 4483 if (!tree_fits_poly_int64_p (sz0) 4484 || !tree_fits_poly_int64_p (sz1)) 4485 equal = 2; 4486 else 4487 { 4488 poly_int64 size0 = tree_to_poly_int64 (sz0); 4489 poly_int64 size1 = tree_to_poly_int64 (sz1); 4490 /* If one offset is pointing (or could be) to the beginning 4491 of one object and the other is pointing to one past the 4492 last byte of the other object, punt. */ 4493 if (maybe_eq (off0, 0) && maybe_eq (off1, size1)) 4494 equal = 2; 4495 else if (maybe_eq (off1, 0) && maybe_eq (off0, size0)) 4496 equal = 2; 4497 /* If both offsets are the same, there are some cases 4498 we know that are ok. Either if we know they aren't 4499 zero, or if we know both sizes are no zero. */ 4500 if (equal == 2 4501 && known_eq (off0, off1) 4502 && (known_ne (off0, 0) 4503 || (known_ne (size0, 0) && known_ne (size1, 0)))) 4504 equal = 0; 4505 } 4506 } 4507 } 4508 } 4509 (if (equal == 1 4510 && (cmp == EQ_EXPR || cmp == NE_EXPR 4511 /* If the offsets are equal we can ignore overflow. */ 4512 || known_eq (off0, off1) 4513 || TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)) 4514 /* Or if we compare using pointers to decls or strings. */ 4515 || (POINTER_TYPE_P (TREE_TYPE (@2)) 4516 && (DECL_P (base0) || TREE_CODE (base0) == STRING_CST)))) 4517 (switch 4518 (if (cmp == EQ_EXPR && (known_eq (off0, off1) || known_ne (off0, off1))) 4519 { constant_boolean_node (known_eq (off0, off1), type); }) 4520 (if (cmp == NE_EXPR && (known_eq (off0, off1) || known_ne (off0, off1))) 4521 { constant_boolean_node (known_ne (off0, off1), type); }) 4522 (if (cmp == LT_EXPR && (known_lt (off0, off1) || known_ge (off0, off1))) 4523 { constant_boolean_node (known_lt (off0, off1), type); }) 4524 (if (cmp == LE_EXPR && (known_le (off0, off1) || known_gt (off0, off1))) 4525 { constant_boolean_node (known_le (off0, off1), type); }) 4526 (if (cmp == GE_EXPR && (known_ge (off0, off1) || known_lt (off0, off1))) 4527 { constant_boolean_node (known_ge (off0, off1), type); }) 4528 (if (cmp == GT_EXPR && (known_gt (off0, off1) || known_le (off0, off1))) 4529 { constant_boolean_node (known_gt (off0, off1), type); })) 4530 (if (equal == 0) 4531 (switch 4532 (if (cmp == EQ_EXPR) 4533 { constant_boolean_node (false, type); }) 4534 (if (cmp == NE_EXPR) 4535 { constant_boolean_node (true, type); }))))))))) 4536 4537/* Simplify pointer equality compares using PTA. */ 4538(for neeq (ne eq) 4539 (simplify 4540 (neeq @0 @1) 4541 (if (POINTER_TYPE_P (TREE_TYPE (@0)) 4542 && ptrs_compare_unequal (@0, @1)) 4543 { constant_boolean_node (neeq != EQ_EXPR, type); }))) 4544 4545/* PR70920: Transform (intptr_t)x eq/ne CST to x eq/ne (typeof x) CST. 4546 and (typeof ptr_cst) x eq/ne ptr_cst to x eq/ne (typeof x) CST. 4547 Disable the transform if either operand is pointer to function. 4548 This broke pr22051-2.c for arm where function pointer 4549 canonicalizaion is not wanted. */ 4550 4551(for cmp (ne eq) 4552 (simplify 4553 (cmp (convert @0) INTEGER_CST@1) 4554 (if (((POINTER_TYPE_P (TREE_TYPE (@0)) 4555 && !FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@0))) 4556 && INTEGRAL_TYPE_P (TREE_TYPE (@1)) 4557 /* Don't perform this optimization in GENERIC if @0 has reference 4558 type when sanitizing. See PR101210. */ 4559 && !(GENERIC 4560 && TREE_CODE (TREE_TYPE (@0)) == REFERENCE_TYPE 4561 && (flag_sanitize & (SANITIZE_NULL | SANITIZE_ALIGNMENT)))) 4562 || (INTEGRAL_TYPE_P (TREE_TYPE (@0)) 4563 && POINTER_TYPE_P (TREE_TYPE (@1)) 4564 && !FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@1))))) 4565 && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1))) 4566 (cmp @0 (convert @1))))) 4567 4568/* Non-equality compare simplifications from fold_binary */ 4569(for cmp (lt gt le ge) 4570 /* Comparisons with the highest or lowest possible integer of 4571 the specified precision will have known values. */ 4572 (simplify 4573 (cmp (convert?@2 @0) uniform_integer_cst_p@1) 4574 (if ((INTEGRAL_TYPE_P (TREE_TYPE (@1)) 4575 || POINTER_TYPE_P (TREE_TYPE (@1)) 4576 || VECTOR_INTEGER_TYPE_P (TREE_TYPE (@1))) 4577 && tree_nop_conversion_p (TREE_TYPE (@2), TREE_TYPE (@0))) 4578 (with 4579 { 4580 tree cst = uniform_integer_cst_p (@1); 4581 tree arg1_type = TREE_TYPE (cst); 4582 unsigned int prec = TYPE_PRECISION (arg1_type); 4583 wide_int max = wi::max_value (arg1_type); 4584 wide_int signed_max = wi::max_value (prec, SIGNED); 4585 wide_int min = wi::min_value (arg1_type); 4586 } 4587 (switch 4588 (if (wi::to_wide (cst) == max) 4589 (switch 4590 (if (cmp == GT_EXPR) 4591 { constant_boolean_node (false, type); }) 4592 (if (cmp == GE_EXPR) 4593 (eq @2 @1)) 4594 (if (cmp == LE_EXPR) 4595 { constant_boolean_node (true, type); }) 4596 (if (cmp == LT_EXPR) 4597 (ne @2 @1)))) 4598 (if (wi::to_wide (cst) == min) 4599 (switch 4600 (if (cmp == LT_EXPR) 4601 { constant_boolean_node (false, type); }) 4602 (if (cmp == LE_EXPR) 4603 (eq @2 @1)) 4604 (if (cmp == GE_EXPR) 4605 { constant_boolean_node (true, type); }) 4606 (if (cmp == GT_EXPR) 4607 (ne @2 @1)))) 4608 (if (wi::to_wide (cst) == max - 1) 4609 (switch 4610 (if (cmp == GT_EXPR) 4611 (eq @2 { build_uniform_cst (TREE_TYPE (@1), 4612 wide_int_to_tree (TREE_TYPE (cst), 4613 wi::to_wide (cst) 4614 + 1)); })) 4615 (if (cmp == LE_EXPR) 4616 (ne @2 { build_uniform_cst (TREE_TYPE (@1), 4617 wide_int_to_tree (TREE_TYPE (cst), 4618 wi::to_wide (cst) 4619 + 1)); })))) 4620 (if (wi::to_wide (cst) == min + 1) 4621 (switch 4622 (if (cmp == GE_EXPR) 4623 (ne @2 { build_uniform_cst (TREE_TYPE (@1), 4624 wide_int_to_tree (TREE_TYPE (cst), 4625 wi::to_wide (cst) 4626 - 1)); })) 4627 (if (cmp == LT_EXPR) 4628 (eq @2 { build_uniform_cst (TREE_TYPE (@1), 4629 wide_int_to_tree (TREE_TYPE (cst), 4630 wi::to_wide (cst) 4631 - 1)); })))) 4632 (if (wi::to_wide (cst) == signed_max 4633 && TYPE_UNSIGNED (arg1_type) 4634 /* We will flip the signedness of the comparison operator 4635 associated with the mode of @1, so the sign bit is 4636 specified by this mode. Check that @1 is the signed 4637 max associated with this sign bit. */ 4638 && prec == GET_MODE_PRECISION (SCALAR_INT_TYPE_MODE (arg1_type)) 4639 /* signed_type does not work on pointer types. */ 4640 && INTEGRAL_TYPE_P (arg1_type)) 4641 /* The following case also applies to X < signed_max+1 4642 and X >= signed_max+1 because previous transformations. */ 4643 (if (cmp == LE_EXPR || cmp == GT_EXPR) 4644 (with { tree st = signed_type_for (TREE_TYPE (@1)); } 4645 (switch 4646 (if (cst == @1 && cmp == LE_EXPR) 4647 (ge (convert:st @0) { build_zero_cst (st); })) 4648 (if (cst == @1 && cmp == GT_EXPR) 4649 (lt (convert:st @0) { build_zero_cst (st); })) 4650 (if (cmp == LE_EXPR) 4651 (ge (view_convert:st @0) { build_zero_cst (st); })) 4652 (if (cmp == GT_EXPR) 4653 (lt (view_convert:st @0) { build_zero_cst (st); }))))))))))) 4654 4655(for cmp (unordered ordered unlt unle ungt unge uneq ltgt) 4656 /* If the second operand is NaN, the result is constant. */ 4657 (simplify 4658 (cmp @0 REAL_CST@1) 4659 (if (REAL_VALUE_ISNAN (TREE_REAL_CST (@1)) 4660 && (cmp != LTGT_EXPR || ! flag_trapping_math)) 4661 { constant_boolean_node (cmp == ORDERED_EXPR || cmp == LTGT_EXPR 4662 ? false : true, type); }))) 4663 4664/* bool_var != 0 becomes bool_var. */ 4665(simplify 4666 (ne @0 integer_zerop) 4667 (if (TREE_CODE (TREE_TYPE (@0)) == BOOLEAN_TYPE 4668 && types_match (type, TREE_TYPE (@0))) 4669 (non_lvalue @0))) 4670/* bool_var == 1 becomes bool_var. */ 4671(simplify 4672 (eq @0 integer_onep) 4673 (if (TREE_CODE (TREE_TYPE (@0)) == BOOLEAN_TYPE 4674 && types_match (type, TREE_TYPE (@0))) 4675 (non_lvalue @0))) 4676/* Do not handle 4677 bool_var == 0 becomes !bool_var or 4678 bool_var != 1 becomes !bool_var 4679 here because that only is good in assignment context as long 4680 as we require a tcc_comparison in GIMPLE_CONDs where we'd 4681 replace if (x == 0) with tem = ~x; if (tem != 0) which is 4682 clearly less optimal and which we'll transform again in forwprop. */ 4683 4684/* When one argument is a constant, overflow detection can be simplified. 4685 Currently restricted to single use so as not to interfere too much with 4686 ADD_OVERFLOW detection in tree-ssa-math-opts.c. 4687 A + CST CMP A -> A CMP' CST' */ 4688(for cmp (lt le ge gt) 4689 out (gt gt le le) 4690 (simplify 4691 (cmp:c (plus@2 @0 INTEGER_CST@1) @0) 4692 (if (TYPE_UNSIGNED (TREE_TYPE (@0)) 4693 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)) 4694 && wi::to_wide (@1) != 0 4695 && single_use (@2)) 4696 (with { unsigned int prec = TYPE_PRECISION (TREE_TYPE (@0)); } 4697 (out @0 { wide_int_to_tree (TREE_TYPE (@0), 4698 wi::max_value (prec, UNSIGNED) 4699 - wi::to_wide (@1)); }))))) 4700 4701/* To detect overflow in unsigned A - B, A < B is simpler than A - B > A. 4702 However, the detection logic for SUB_OVERFLOW in tree-ssa-math-opts.c 4703 expects the long form, so we restrict the transformation for now. */ 4704(for cmp (gt le) 4705 (simplify 4706 (cmp:c (minus@2 @0 @1) @0) 4707 (if (single_use (@2) 4708 && ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) 4709 && TYPE_UNSIGNED (TREE_TYPE (@0)) 4710 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))) 4711 (cmp @1 @0)))) 4712 4713/* Testing for overflow is unnecessary if we already know the result. */ 4714/* A - B > A */ 4715(for cmp (gt le) 4716 out (ne eq) 4717 (simplify 4718 (cmp:c (realpart (IFN_SUB_OVERFLOW@2 @0 @1)) @0) 4719 (if (TYPE_UNSIGNED (TREE_TYPE (@0)) 4720 && types_match (TREE_TYPE (@0), TREE_TYPE (@1))) 4721 (out (imagpart @2) { build_zero_cst (TREE_TYPE (@0)); })))) 4722/* A + B < A */ 4723(for cmp (lt ge) 4724 out (ne eq) 4725 (simplify 4726 (cmp:c (realpart (IFN_ADD_OVERFLOW:c@2 @0 @1)) @0) 4727 (if (TYPE_UNSIGNED (TREE_TYPE (@0)) 4728 && types_match (TREE_TYPE (@0), TREE_TYPE (@1))) 4729 (out (imagpart @2) { build_zero_cst (TREE_TYPE (@0)); })))) 4730 4731/* For unsigned operands, -1 / B < A checks whether A * B would overflow. 4732 Simplify it to __builtin_mul_overflow (A, B, <unused>). */ 4733(for cmp (lt ge) 4734 out (ne eq) 4735 (simplify 4736 (cmp:c (trunc_div:s integer_all_onesp @1) @0) 4737 (if (TYPE_UNSIGNED (TREE_TYPE (@0)) && !VECTOR_TYPE_P (TREE_TYPE (@0))) 4738 (with { tree t = TREE_TYPE (@0), cpx = build_complex_type (t); } 4739 (out (imagpart (IFN_MUL_OVERFLOW:cpx @0 @1)) { build_zero_cst (t); }))))) 4740 4741/* Simplification of math builtins. These rules must all be optimizations 4742 as well as IL simplifications. If there is a possibility that the new 4743 form could be a pessimization, the rule should go in the canonicalization 4744 section that follows this one. 4745 4746 Rules can generally go in this section if they satisfy one of 4747 the following: 4748 4749 - the rule describes an identity 4750 4751 - the rule replaces calls with something as simple as addition or 4752 multiplication 4753 4754 - the rule contains unary calls only and simplifies the surrounding 4755 arithmetic. (The idea here is to exclude non-unary calls in which 4756 one operand is constant and in which the call is known to be cheap 4757 when the operand has that value.) */ 4758 4759(if (flag_unsafe_math_optimizations) 4760 /* Simplify sqrt(x) * sqrt(x) -> x. */ 4761 (simplify 4762 (mult (SQRT_ALL@1 @0) @1) 4763 (if (!HONOR_SNANS (type)) 4764 @0)) 4765 4766 (for op (plus minus) 4767 /* Simplify (A / C) +- (B / C) -> (A +- B) / C. */ 4768 (simplify 4769 (op (rdiv @0 @1) 4770 (rdiv @2 @1)) 4771 (rdiv (op @0 @2) @1))) 4772 4773 (for cmp (lt le gt ge) 4774 neg_cmp (gt ge lt le) 4775 /* Simplify (x * C1) cmp C2 -> x cmp (C2 / C1), where C1 != 0. */ 4776 (simplify 4777 (cmp (mult @0 REAL_CST@1) REAL_CST@2) 4778 (with 4779 { tree tem = const_binop (RDIV_EXPR, type, @2, @1); } 4780 (if (tem 4781 && !(REAL_VALUE_ISINF (TREE_REAL_CST (tem)) 4782 || (real_zerop (tem) && !real_zerop (@1)))) 4783 (switch 4784 (if (real_less (&dconst0, TREE_REAL_CST_PTR (@1))) 4785 (cmp @0 { tem; })) 4786 (if (real_less (TREE_REAL_CST_PTR (@1), &dconst0)) 4787 (neg_cmp @0 { tem; }))))))) 4788 4789 /* Simplify sqrt(x) * sqrt(y) -> sqrt(x*y). */ 4790 (for root (SQRT CBRT) 4791 (simplify 4792 (mult (root:s @0) (root:s @1)) 4793 (root (mult @0 @1)))) 4794 4795 /* Simplify expN(x) * expN(y) -> expN(x+y). */ 4796 (for exps (EXP EXP2 EXP10 POW10) 4797 (simplify 4798 (mult (exps:s @0) (exps:s @1)) 4799 (exps (plus @0 @1)))) 4800 4801 /* Simplify a/root(b/c) into a*root(c/b). */ 4802 (for root (SQRT CBRT) 4803 (simplify 4804 (rdiv @0 (root:s (rdiv:s @1 @2))) 4805 (mult @0 (root (rdiv @2 @1))))) 4806 4807 /* Simplify x/expN(y) into x*expN(-y). */ 4808 (for exps (EXP EXP2 EXP10 POW10) 4809 (simplify 4810 (rdiv @0 (exps:s @1)) 4811 (mult @0 (exps (negate @1))))) 4812 4813 (for logs (LOG LOG2 LOG10 LOG10) 4814 exps (EXP EXP2 EXP10 POW10) 4815 /* logN(expN(x)) -> x. */ 4816 (simplify 4817 (logs (exps @0)) 4818 @0) 4819 /* expN(logN(x)) -> x. */ 4820 (simplify 4821 (exps (logs @0)) 4822 @0)) 4823 4824 /* Optimize logN(func()) for various exponential functions. We 4825 want to determine the value "x" and the power "exponent" in 4826 order to transform logN(x**exponent) into exponent*logN(x). */ 4827 (for logs (LOG LOG LOG LOG2 LOG2 LOG2 LOG10 LOG10) 4828 exps (EXP2 EXP10 POW10 EXP EXP10 POW10 EXP EXP2) 4829 (simplify 4830 (logs (exps @0)) 4831 (if (SCALAR_FLOAT_TYPE_P (type)) 4832 (with { 4833 tree x; 4834 switch (exps) 4835 { 4836 CASE_CFN_EXP: 4837 /* Prepare to do logN(exp(exponent)) -> exponent*logN(e). */ 4838 x = build_real_truncate (type, dconst_e ()); 4839 break; 4840 CASE_CFN_EXP2: 4841 /* Prepare to do logN(exp2(exponent)) -> exponent*logN(2). */ 4842 x = build_real (type, dconst2); 4843 break; 4844 CASE_CFN_EXP10: 4845 CASE_CFN_POW10: 4846 /* Prepare to do logN(exp10(exponent)) -> exponent*logN(10). */ 4847 { 4848 REAL_VALUE_TYPE dconst10; 4849 real_from_integer (&dconst10, VOIDmode, 10, SIGNED); 4850 x = build_real (type, dconst10); 4851 } 4852 break; 4853 default: 4854 gcc_unreachable (); 4855 } 4856 } 4857 (mult (logs { x; }) @0))))) 4858 4859 (for logs (LOG LOG 4860 LOG2 LOG2 4861 LOG10 LOG10) 4862 exps (SQRT CBRT) 4863 (simplify 4864 (logs (exps @0)) 4865 (if (SCALAR_FLOAT_TYPE_P (type)) 4866 (with { 4867 tree x; 4868 switch (exps) 4869 { 4870 CASE_CFN_SQRT: 4871 /* Prepare to do logN(sqrt(x)) -> 0.5*logN(x). */ 4872 x = build_real (type, dconsthalf); 4873 break; 4874 CASE_CFN_CBRT: 4875 /* Prepare to do logN(cbrt(x)) -> (1/3)*logN(x). */ 4876 x = build_real_truncate (type, dconst_third ()); 4877 break; 4878 default: 4879 gcc_unreachable (); 4880 } 4881 } 4882 (mult { x; } (logs @0)))))) 4883 4884 /* logN(pow(x,exponent)) -> exponent*logN(x). */ 4885 (for logs (LOG LOG2 LOG10) 4886 pows (POW) 4887 (simplify 4888 (logs (pows @0 @1)) 4889 (mult @1 (logs @0)))) 4890 4891 /* pow(C,x) -> exp(log(C)*x) if C > 0, 4892 or if C is a positive power of 2, 4893 pow(C,x) -> exp2(log2(C)*x). */ 4894#if GIMPLE 4895 (for pows (POW) 4896 exps (EXP) 4897 logs (LOG) 4898 exp2s (EXP2) 4899 log2s (LOG2) 4900 (simplify 4901 (pows REAL_CST@0 @1) 4902 (if (real_compare (GT_EXPR, TREE_REAL_CST_PTR (@0), &dconst0) 4903 && real_isfinite (TREE_REAL_CST_PTR (@0)) 4904 /* As libmvec doesn't have a vectorized exp2, defer optimizing 4905 the use_exp2 case until after vectorization. It seems actually 4906 beneficial for all constants to postpone this until later, 4907 because exp(log(C)*x), while faster, will have worse precision 4908 and if x folds into a constant too, that is unnecessary 4909 pessimization. */ 4910 && canonicalize_math_after_vectorization_p ()) 4911 (with { 4912 const REAL_VALUE_TYPE *const value = TREE_REAL_CST_PTR (@0); 4913 bool use_exp2 = false; 4914 if (targetm.libc_has_function (function_c99_misc) 4915 && value->cl == rvc_normal) 4916 { 4917 REAL_VALUE_TYPE frac_rvt = *value; 4918 SET_REAL_EXP (&frac_rvt, 1); 4919 if (real_equal (&frac_rvt, &dconst1)) 4920 use_exp2 = true; 4921 } 4922 } 4923 (if (!use_exp2) 4924 (if (optimize_pow_to_exp (@0, @1)) 4925 (exps (mult (logs @0) @1))) 4926 (exp2s (mult (log2s @0) @1))))))) 4927#endif 4928 4929 /* pow(C,x)*expN(y) -> expN(logN(C)*x+y) if C > 0. */ 4930 (for pows (POW) 4931 exps (EXP EXP2 EXP10 POW10) 4932 logs (LOG LOG2 LOG10 LOG10) 4933 (simplify 4934 (mult:c (pows:s REAL_CST@0 @1) (exps:s @2)) 4935 (if (real_compare (GT_EXPR, TREE_REAL_CST_PTR (@0), &dconst0) 4936 && real_isfinite (TREE_REAL_CST_PTR (@0))) 4937 (exps (plus (mult (logs @0) @1) @2))))) 4938 4939 (for sqrts (SQRT) 4940 cbrts (CBRT) 4941 pows (POW) 4942 exps (EXP EXP2 EXP10 POW10) 4943 /* sqrt(expN(x)) -> expN(x*0.5). */ 4944 (simplify 4945 (sqrts (exps @0)) 4946 (exps (mult @0 { build_real (type, dconsthalf); }))) 4947 /* cbrt(expN(x)) -> expN(x/3). */ 4948 (simplify 4949 (cbrts (exps @0)) 4950 (exps (mult @0 { build_real_truncate (type, dconst_third ()); }))) 4951 /* pow(expN(x), y) -> expN(x*y). */ 4952 (simplify 4953 (pows (exps @0) @1) 4954 (exps (mult @0 @1)))) 4955 4956 /* tan(atan(x)) -> x. */ 4957 (for tans (TAN) 4958 atans (ATAN) 4959 (simplify 4960 (tans (atans @0)) 4961 @0))) 4962 4963 /* Simplify sin(atan(x)) -> x / sqrt(x*x + 1). */ 4964 (for sins (SIN) 4965 atans (ATAN) 4966 sqrts (SQRT) 4967 copysigns (COPYSIGN) 4968 (simplify 4969 (sins (atans:s @0)) 4970 (with 4971 { 4972 REAL_VALUE_TYPE r_cst; 4973 build_sinatan_real (&r_cst, type); 4974 tree t_cst = build_real (type, r_cst); 4975 tree t_one = build_one_cst (type); 4976 } 4977 (if (SCALAR_FLOAT_TYPE_P (type)) 4978 (cond (lt (abs @0) { t_cst; }) 4979 (rdiv @0 (sqrts (plus (mult @0 @0) { t_one; }))) 4980 (copysigns { t_one; } @0)))))) 4981 4982/* Simplify cos(atan(x)) -> 1 / sqrt(x*x + 1). */ 4983 (for coss (COS) 4984 atans (ATAN) 4985 sqrts (SQRT) 4986 copysigns (COPYSIGN) 4987 (simplify 4988 (coss (atans:s @0)) 4989 (with 4990 { 4991 REAL_VALUE_TYPE r_cst; 4992 build_sinatan_real (&r_cst, type); 4993 tree t_cst = build_real (type, r_cst); 4994 tree t_one = build_one_cst (type); 4995 tree t_zero = build_zero_cst (type); 4996 } 4997 (if (SCALAR_FLOAT_TYPE_P (type)) 4998 (cond (lt (abs @0) { t_cst; }) 4999 (rdiv { t_one; } (sqrts (plus (mult @0 @0) { t_one; }))) 5000 (copysigns { t_zero; } @0)))))) 5001 5002 (if (!flag_errno_math) 5003 /* Simplify sinh(atanh(x)) -> x / sqrt((1 - x)*(1 + x)). */ 5004 (for sinhs (SINH) 5005 atanhs (ATANH) 5006 sqrts (SQRT) 5007 (simplify 5008 (sinhs (atanhs:s @0)) 5009 (with { tree t_one = build_one_cst (type); } 5010 (rdiv @0 (sqrts (mult (minus { t_one; } @0) (plus { t_one; } @0))))))) 5011 5012 /* Simplify cosh(atanh(x)) -> 1 / sqrt((1 - x)*(1 + x)) */ 5013 (for coshs (COSH) 5014 atanhs (ATANH) 5015 sqrts (SQRT) 5016 (simplify 5017 (coshs (atanhs:s @0)) 5018 (with { tree t_one = build_one_cst (type); } 5019 (rdiv { t_one; } (sqrts (mult (minus { t_one; } @0) (plus { t_one; } @0)))))))) 5020 5021/* cabs(x+0i) or cabs(0+xi) -> abs(x). */ 5022(simplify 5023 (CABS (complex:C @0 real_zerop@1)) 5024 (abs @0)) 5025 5026/* trunc(trunc(x)) -> trunc(x), etc. */ 5027(for fns (TRUNC_ALL FLOOR_ALL CEIL_ALL ROUND_ALL NEARBYINT_ALL RINT_ALL) 5028 (simplify 5029 (fns (fns @0)) 5030 (fns @0))) 5031/* f(x) -> x if x is integer valued and f does nothing for such values. */ 5032(for fns (TRUNC_ALL FLOOR_ALL CEIL_ALL ROUND_ALL NEARBYINT_ALL RINT_ALL) 5033 (simplify 5034 (fns integer_valued_real_p@0) 5035 @0)) 5036 5037/* hypot(x,0) and hypot(0,x) -> abs(x). */ 5038(simplify 5039 (HYPOT:c @0 real_zerop@1) 5040 (abs @0)) 5041 5042/* pow(1,x) -> 1. */ 5043(simplify 5044 (POW real_onep@0 @1) 5045 @0) 5046 5047(simplify 5048 /* copysign(x,x) -> x. */ 5049 (COPYSIGN_ALL @0 @0) 5050 @0) 5051 5052(simplify 5053 /* copysign(x,y) -> fabs(x) if y is nonnegative. */ 5054 (COPYSIGN_ALL @0 tree_expr_nonnegative_p@1) 5055 (abs @0)) 5056 5057(for scale (LDEXP SCALBN SCALBLN) 5058 /* ldexp(0, x) -> 0. */ 5059 (simplify 5060 (scale real_zerop@0 @1) 5061 @0) 5062 /* ldexp(x, 0) -> x. */ 5063 (simplify 5064 (scale @0 integer_zerop@1) 5065 @0) 5066 /* ldexp(x, y) -> x if x is +-Inf or NaN. */ 5067 (simplify 5068 (scale REAL_CST@0 @1) 5069 (if (!real_isfinite (TREE_REAL_CST_PTR (@0))) 5070 @0))) 5071 5072/* Canonicalization of sequences of math builtins. These rules represent 5073 IL simplifications but are not necessarily optimizations. 5074 5075 The sincos pass is responsible for picking "optimal" implementations 5076 of math builtins, which may be more complicated and can sometimes go 5077 the other way, e.g. converting pow into a sequence of sqrts. 5078 We only want to do these canonicalizations before the pass has run. */ 5079 5080(if (flag_unsafe_math_optimizations && canonicalize_math_p ()) 5081 /* Simplify tan(x) * cos(x) -> sin(x). */ 5082 (simplify 5083 (mult:c (TAN:s @0) (COS:s @0)) 5084 (SIN @0)) 5085 5086 /* Simplify x * pow(x,c) -> pow(x,c+1). */ 5087 (simplify 5088 (mult:c @0 (POW:s @0 REAL_CST@1)) 5089 (if (!TREE_OVERFLOW (@1)) 5090 (POW @0 (plus @1 { build_one_cst (type); })))) 5091 5092 /* Simplify sin(x) / cos(x) -> tan(x). */ 5093 (simplify 5094 (rdiv (SIN:s @0) (COS:s @0)) 5095 (TAN @0)) 5096 5097 /* Simplify sinh(x) / cosh(x) -> tanh(x). */ 5098 (simplify 5099 (rdiv (SINH:s @0) (COSH:s @0)) 5100 (TANH @0)) 5101 5102 /* Simplify cos(x) / sin(x) -> 1 / tan(x). */ 5103 (simplify 5104 (rdiv (COS:s @0) (SIN:s @0)) 5105 (rdiv { build_one_cst (type); } (TAN @0))) 5106 5107 /* Simplify sin(x) / tan(x) -> cos(x). */ 5108 (simplify 5109 (rdiv (SIN:s @0) (TAN:s @0)) 5110 (if (! HONOR_NANS (@0) 5111 && ! HONOR_INFINITIES (@0)) 5112 (COS @0))) 5113 5114 /* Simplify tan(x) / sin(x) -> 1.0 / cos(x). */ 5115 (simplify 5116 (rdiv (TAN:s @0) (SIN:s @0)) 5117 (if (! HONOR_NANS (@0) 5118 && ! HONOR_INFINITIES (@0)) 5119 (rdiv { build_one_cst (type); } (COS @0)))) 5120 5121 /* Simplify pow(x,y) * pow(x,z) -> pow(x,y+z). */ 5122 (simplify 5123 (mult (POW:s @0 @1) (POW:s @0 @2)) 5124 (POW @0 (plus @1 @2))) 5125 5126 /* Simplify pow(x,y) * pow(z,y) -> pow(x*z,y). */ 5127 (simplify 5128 (mult (POW:s @0 @1) (POW:s @2 @1)) 5129 (POW (mult @0 @2) @1)) 5130 5131 /* Simplify powi(x,y) * powi(z,y) -> powi(x*z,y). */ 5132 (simplify 5133 (mult (POWI:s @0 @1) (POWI:s @2 @1)) 5134 (POWI (mult @0 @2) @1)) 5135 5136 /* Simplify pow(x,c) / x -> pow(x,c-1). */ 5137 (simplify 5138 (rdiv (POW:s @0 REAL_CST@1) @0) 5139 (if (!TREE_OVERFLOW (@1)) 5140 (POW @0 (minus @1 { build_one_cst (type); })))) 5141 5142 /* Simplify x / pow (y,z) -> x * pow(y,-z). */ 5143 (simplify 5144 (rdiv @0 (POW:s @1 @2)) 5145 (mult @0 (POW @1 (negate @2)))) 5146 5147 (for sqrts (SQRT) 5148 cbrts (CBRT) 5149 pows (POW) 5150 /* sqrt(sqrt(x)) -> pow(x,1/4). */ 5151 (simplify 5152 (sqrts (sqrts @0)) 5153 (pows @0 { build_real (type, dconst_quarter ()); })) 5154 /* sqrt(cbrt(x)) -> pow(x,1/6). */ 5155 (simplify 5156 (sqrts (cbrts @0)) 5157 (pows @0 { build_real_truncate (type, dconst_sixth ()); })) 5158 /* cbrt(sqrt(x)) -> pow(x,1/6). */ 5159 (simplify 5160 (cbrts (sqrts @0)) 5161 (pows @0 { build_real_truncate (type, dconst_sixth ()); })) 5162 /* cbrt(cbrt(x)) -> pow(x,1/9), iff x is nonnegative. */ 5163 (simplify 5164 (cbrts (cbrts tree_expr_nonnegative_p@0)) 5165 (pows @0 { build_real_truncate (type, dconst_ninth ()); })) 5166 /* sqrt(pow(x,y)) -> pow(|x|,y*0.5). */ 5167 (simplify 5168 (sqrts (pows @0 @1)) 5169 (pows (abs @0) (mult @1 { build_real (type, dconsthalf); }))) 5170 /* cbrt(pow(x,y)) -> pow(x,y/3), iff x is nonnegative. */ 5171 (simplify 5172 (cbrts (pows tree_expr_nonnegative_p@0 @1)) 5173 (pows @0 (mult @1 { build_real_truncate (type, dconst_third ()); }))) 5174 /* pow(sqrt(x),y) -> pow(x,y*0.5). */ 5175 (simplify 5176 (pows (sqrts @0) @1) 5177 (pows @0 (mult @1 { build_real (type, dconsthalf); }))) 5178 /* pow(cbrt(x),y) -> pow(x,y/3) iff x is nonnegative. */ 5179 (simplify 5180 (pows (cbrts tree_expr_nonnegative_p@0) @1) 5181 (pows @0 (mult @1 { build_real_truncate (type, dconst_third ()); }))) 5182 /* pow(pow(x,y),z) -> pow(x,y*z) iff x is nonnegative. */ 5183 (simplify 5184 (pows (pows tree_expr_nonnegative_p@0 @1) @2) 5185 (pows @0 (mult @1 @2)))) 5186 5187 /* cabs(x+xi) -> fabs(x)*sqrt(2). */ 5188 (simplify 5189 (CABS (complex @0 @0)) 5190 (mult (abs @0) { build_real_truncate (type, dconst_sqrt2 ()); })) 5191 5192 /* hypot(x,x) -> fabs(x)*sqrt(2). */ 5193 (simplify 5194 (HYPOT @0 @0) 5195 (mult (abs @0) { build_real_truncate (type, dconst_sqrt2 ()); })) 5196 5197 /* cexp(x+yi) -> exp(x)*cexpi(y). */ 5198 (for cexps (CEXP) 5199 exps (EXP) 5200 cexpis (CEXPI) 5201 (simplify 5202 (cexps compositional_complex@0) 5203 (if (targetm.libc_has_function (function_c99_math_complex)) 5204 (complex 5205 (mult (exps@1 (realpart @0)) (realpart (cexpis:type@2 (imagpart @0)))) 5206 (mult @1 (imagpart @2))))))) 5207 5208(if (canonicalize_math_p ()) 5209 /* floor(x) -> trunc(x) if x is nonnegative. */ 5210 (for floors (FLOOR_ALL) 5211 truncs (TRUNC_ALL) 5212 (simplify 5213 (floors tree_expr_nonnegative_p@0) 5214 (truncs @0)))) 5215 5216(match double_value_p 5217 @0 5218 (if (TYPE_MAIN_VARIANT (TREE_TYPE (@0)) == double_type_node))) 5219(for froms (BUILT_IN_TRUNCL 5220 BUILT_IN_FLOORL 5221 BUILT_IN_CEILL 5222 BUILT_IN_ROUNDL 5223 BUILT_IN_NEARBYINTL 5224 BUILT_IN_RINTL) 5225 tos (BUILT_IN_TRUNC 5226 BUILT_IN_FLOOR 5227 BUILT_IN_CEIL 5228 BUILT_IN_ROUND 5229 BUILT_IN_NEARBYINT 5230 BUILT_IN_RINT) 5231 /* truncl(extend(x)) -> extend(trunc(x)), etc., if x is a double. */ 5232 (if (optimize && canonicalize_math_p ()) 5233 (simplify 5234 (froms (convert double_value_p@0)) 5235 (convert (tos @0))))) 5236 5237(match float_value_p 5238 @0 5239 (if (TYPE_MAIN_VARIANT (TREE_TYPE (@0)) == float_type_node))) 5240(for froms (BUILT_IN_TRUNCL BUILT_IN_TRUNC 5241 BUILT_IN_FLOORL BUILT_IN_FLOOR 5242 BUILT_IN_CEILL BUILT_IN_CEIL 5243 BUILT_IN_ROUNDL BUILT_IN_ROUND 5244 BUILT_IN_NEARBYINTL BUILT_IN_NEARBYINT 5245 BUILT_IN_RINTL BUILT_IN_RINT) 5246 tos (BUILT_IN_TRUNCF BUILT_IN_TRUNCF 5247 BUILT_IN_FLOORF BUILT_IN_FLOORF 5248 BUILT_IN_CEILF BUILT_IN_CEILF 5249 BUILT_IN_ROUNDF BUILT_IN_ROUNDF 5250 BUILT_IN_NEARBYINTF BUILT_IN_NEARBYINTF 5251 BUILT_IN_RINTF BUILT_IN_RINTF) 5252 /* truncl(extend(x)) and trunc(extend(x)) -> extend(truncf(x)), etc., 5253 if x is a float. */ 5254 (if (optimize && canonicalize_math_p () 5255 && targetm.libc_has_function (function_c99_misc)) 5256 (simplify 5257 (froms (convert float_value_p@0)) 5258 (convert (tos @0))))) 5259 5260(for froms (XFLOORL XCEILL XROUNDL XRINTL) 5261 tos (XFLOOR XCEIL XROUND XRINT) 5262 /* llfloorl(extend(x)) -> llfloor(x), etc., if x is a double. */ 5263 (if (optimize && canonicalize_math_p ()) 5264 (simplify 5265 (froms (convert double_value_p@0)) 5266 (tos @0)))) 5267 5268(for froms (XFLOORL XCEILL XROUNDL XRINTL 5269 XFLOOR XCEIL XROUND XRINT) 5270 tos (XFLOORF XCEILF XROUNDF XRINTF) 5271 /* llfloorl(extend(x)) and llfloor(extend(x)) -> llfloorf(x), etc., 5272 if x is a float. */ 5273 (if (optimize && canonicalize_math_p ()) 5274 (simplify 5275 (froms (convert float_value_p@0)) 5276 (tos @0)))) 5277 5278(if (canonicalize_math_p ()) 5279 /* xfloor(x) -> fix_trunc(x) if x is nonnegative. */ 5280 (for floors (IFLOOR LFLOOR LLFLOOR) 5281 (simplify 5282 (floors tree_expr_nonnegative_p@0) 5283 (fix_trunc @0)))) 5284 5285(if (canonicalize_math_p ()) 5286 /* xfloor(x) -> fix_trunc(x), etc., if x is integer valued. */ 5287 (for fns (IFLOOR LFLOOR LLFLOOR 5288 ICEIL LCEIL LLCEIL 5289 IROUND LROUND LLROUND) 5290 (simplify 5291 (fns integer_valued_real_p@0) 5292 (fix_trunc @0))) 5293 (if (!flag_errno_math) 5294 /* xrint(x) -> fix_trunc(x), etc., if x is integer valued. */ 5295 (for rints (IRINT LRINT LLRINT) 5296 (simplify 5297 (rints integer_valued_real_p@0) 5298 (fix_trunc @0))))) 5299 5300(if (canonicalize_math_p ()) 5301 (for ifn (IFLOOR ICEIL IROUND IRINT) 5302 lfn (LFLOOR LCEIL LROUND LRINT) 5303 llfn (LLFLOOR LLCEIL LLROUND LLRINT) 5304 /* Canonicalize iround (x) to lround (x) on ILP32 targets where 5305 sizeof (int) == sizeof (long). */ 5306 (if (TYPE_PRECISION (integer_type_node) 5307 == TYPE_PRECISION (long_integer_type_node)) 5308 (simplify 5309 (ifn @0) 5310 (lfn:long_integer_type_node @0))) 5311 /* Canonicalize llround (x) to lround (x) on LP64 targets where 5312 sizeof (long long) == sizeof (long). */ 5313 (if (TYPE_PRECISION (long_long_integer_type_node) 5314 == TYPE_PRECISION (long_integer_type_node)) 5315 (simplify 5316 (llfn @0) 5317 (lfn:long_integer_type_node @0))))) 5318 5319/* cproj(x) -> x if we're ignoring infinities. */ 5320(simplify 5321 (CPROJ @0) 5322 (if (!HONOR_INFINITIES (type)) 5323 @0)) 5324 5325/* If the real part is inf and the imag part is known to be 5326 nonnegative, return (inf + 0i). */ 5327(simplify 5328 (CPROJ (complex REAL_CST@0 tree_expr_nonnegative_p@1)) 5329 (if (real_isinf (TREE_REAL_CST_PTR (@0))) 5330 { build_complex_inf (type, false); })) 5331 5332/* If the imag part is inf, return (inf+I*copysign(0,imag)). */ 5333(simplify 5334 (CPROJ (complex @0 REAL_CST@1)) 5335 (if (real_isinf (TREE_REAL_CST_PTR (@1))) 5336 { build_complex_inf (type, TREE_REAL_CST_PTR (@1)->sign); })) 5337 5338(for pows (POW) 5339 sqrts (SQRT) 5340 cbrts (CBRT) 5341 (simplify 5342 (pows @0 REAL_CST@1) 5343 (with { 5344 const REAL_VALUE_TYPE *value = TREE_REAL_CST_PTR (@1); 5345 REAL_VALUE_TYPE tmp; 5346 } 5347 (switch 5348 /* pow(x,0) -> 1. */ 5349 (if (real_equal (value, &dconst0)) 5350 { build_real (type, dconst1); }) 5351 /* pow(x,1) -> x. */ 5352 (if (real_equal (value, &dconst1)) 5353 @0) 5354 /* pow(x,-1) -> 1/x. */ 5355 (if (real_equal (value, &dconstm1)) 5356 (rdiv { build_real (type, dconst1); } @0)) 5357 /* pow(x,0.5) -> sqrt(x). */ 5358 (if (flag_unsafe_math_optimizations 5359 && canonicalize_math_p () 5360 && real_equal (value, &dconsthalf)) 5361 (sqrts @0)) 5362 /* pow(x,1/3) -> cbrt(x). */ 5363 (if (flag_unsafe_math_optimizations 5364 && canonicalize_math_p () 5365 && (tmp = real_value_truncate (TYPE_MODE (type), dconst_third ()), 5366 real_equal (value, &tmp))) 5367 (cbrts @0)))))) 5368 5369/* powi(1,x) -> 1. */ 5370(simplify 5371 (POWI real_onep@0 @1) 5372 @0) 5373 5374(simplify 5375 (POWI @0 INTEGER_CST@1) 5376 (switch 5377 /* powi(x,0) -> 1. */ 5378 (if (wi::to_wide (@1) == 0) 5379 { build_real (type, dconst1); }) 5380 /* powi(x,1) -> x. */ 5381 (if (wi::to_wide (@1) == 1) 5382 @0) 5383 /* powi(x,-1) -> 1/x. */ 5384 (if (wi::to_wide (@1) == -1) 5385 (rdiv { build_real (type, dconst1); } @0)))) 5386 5387/* Narrowing of arithmetic and logical operations. 5388 5389 These are conceptually similar to the transformations performed for 5390 the C/C++ front-ends by shorten_binary_op and shorten_compare. Long 5391 term we want to move all that code out of the front-ends into here. */ 5392 5393/* Convert (outertype)((innertype0)a+(innertype1)b) 5394 into ((newtype)a+(newtype)b) where newtype 5395 is the widest mode from all of these. */ 5396(for op (plus minus mult rdiv) 5397 (simplify 5398 (convert (op:s@0 (convert1?@3 @1) (convert2?@4 @2))) 5399 /* If we have a narrowing conversion of an arithmetic operation where 5400 both operands are widening conversions from the same type as the outer 5401 narrowing conversion. Then convert the innermost operands to a 5402 suitable unsigned type (to avoid introducing undefined behavior), 5403 perform the operation and convert the result to the desired type. */ 5404 (if (INTEGRAL_TYPE_P (type) 5405 && op != MULT_EXPR 5406 && op != RDIV_EXPR 5407 /* We check for type compatibility between @0 and @1 below, 5408 so there's no need to check that @2/@4 are integral types. */ 5409 && INTEGRAL_TYPE_P (TREE_TYPE (@1)) 5410 && INTEGRAL_TYPE_P (TREE_TYPE (@3)) 5411 /* The precision of the type of each operand must match the 5412 precision of the mode of each operand, similarly for the 5413 result. */ 5414 && type_has_mode_precision_p (TREE_TYPE (@1)) 5415 && type_has_mode_precision_p (TREE_TYPE (@2)) 5416 && type_has_mode_precision_p (type) 5417 /* The inner conversion must be a widening conversion. */ 5418 && TYPE_PRECISION (TREE_TYPE (@3)) > TYPE_PRECISION (TREE_TYPE (@1)) 5419 && types_match (@1, type) 5420 && (types_match (@1, @2) 5421 /* Or the second operand is const integer or converted const 5422 integer from valueize. */ 5423 || TREE_CODE (@2) == INTEGER_CST)) 5424 (if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (@1))) 5425 (op @1 (convert @2)) 5426 (with { tree utype = unsigned_type_for (TREE_TYPE (@1)); } 5427 (convert (op (convert:utype @1) 5428 (convert:utype @2))))) 5429 (if (FLOAT_TYPE_P (type) 5430 && DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@0)) 5431 == DECIMAL_FLOAT_TYPE_P (type)) 5432 (with { tree arg0 = strip_float_extensions (@1); 5433 tree arg1 = strip_float_extensions (@2); 5434 tree itype = TREE_TYPE (@0); 5435 tree ty1 = TREE_TYPE (arg0); 5436 tree ty2 = TREE_TYPE (arg1); 5437 enum tree_code code = TREE_CODE (itype); } 5438 (if (FLOAT_TYPE_P (ty1) 5439 && FLOAT_TYPE_P (ty2)) 5440 (with { tree newtype = type; 5441 if (TYPE_MODE (ty1) == SDmode 5442 || TYPE_MODE (ty2) == SDmode 5443 || TYPE_MODE (type) == SDmode) 5444 newtype = dfloat32_type_node; 5445 if (TYPE_MODE (ty1) == DDmode 5446 || TYPE_MODE (ty2) == DDmode 5447 || TYPE_MODE (type) == DDmode) 5448 newtype = dfloat64_type_node; 5449 if (TYPE_MODE (ty1) == TDmode 5450 || TYPE_MODE (ty2) == TDmode 5451 || TYPE_MODE (type) == TDmode) 5452 newtype = dfloat128_type_node; } 5453 (if ((newtype == dfloat32_type_node 5454 || newtype == dfloat64_type_node 5455 || newtype == dfloat128_type_node) 5456 && newtype == type 5457 && types_match (newtype, type)) 5458 (op (convert:newtype @1) (convert:newtype @2)) 5459 (with { if (TYPE_PRECISION (ty1) > TYPE_PRECISION (newtype)) 5460 newtype = ty1; 5461 if (TYPE_PRECISION (ty2) > TYPE_PRECISION (newtype)) 5462 newtype = ty2; } 5463 /* Sometimes this transformation is safe (cannot 5464 change results through affecting double rounding 5465 cases) and sometimes it is not. If NEWTYPE is 5466 wider than TYPE, e.g. (float)((long double)double 5467 + (long double)double) converted to 5468 (float)(double + double), the transformation is 5469 unsafe regardless of the details of the types 5470 involved; double rounding can arise if the result 5471 of NEWTYPE arithmetic is a NEWTYPE value half way 5472 between two representable TYPE values but the 5473 exact value is sufficiently different (in the 5474 right direction) for this difference to be 5475 visible in ITYPE arithmetic. If NEWTYPE is the 5476 same as TYPE, however, the transformation may be 5477 safe depending on the types involved: it is safe 5478 if the ITYPE has strictly more than twice as many 5479 mantissa bits as TYPE, can represent infinities 5480 and NaNs if the TYPE can, and has sufficient 5481 exponent range for the product or ratio of two 5482 values representable in the TYPE to be within the 5483 range of normal values of ITYPE. */ 5484 (if (TYPE_PRECISION (newtype) < TYPE_PRECISION (itype) 5485 && (flag_unsafe_math_optimizations 5486 || (TYPE_PRECISION (newtype) == TYPE_PRECISION (type) 5487 && real_can_shorten_arithmetic (TYPE_MODE (itype), 5488 TYPE_MODE (type)) 5489 && !excess_precision_type (newtype))) 5490 && !types_match (itype, newtype)) 5491 (convert:type (op (convert:newtype @1) 5492 (convert:newtype @2))) 5493 )))) ) 5494 )) 5495))) 5496 5497/* This is another case of narrowing, specifically when there's an outer 5498 BIT_AND_EXPR which masks off bits outside the type of the innermost 5499 operands. Like the previous case we have to convert the operands 5500 to unsigned types to avoid introducing undefined behavior for the 5501 arithmetic operation. */ 5502(for op (minus plus) 5503 (simplify 5504 (bit_and (op:s (convert@2 @0) (convert@3 @1)) INTEGER_CST@4) 5505 (if (INTEGRAL_TYPE_P (type) 5506 /* We check for type compatibility between @0 and @1 below, 5507 so there's no need to check that @1/@3 are integral types. */ 5508 && INTEGRAL_TYPE_P (TREE_TYPE (@0)) 5509 && INTEGRAL_TYPE_P (TREE_TYPE (@2)) 5510 /* The precision of the type of each operand must match the 5511 precision of the mode of each operand, similarly for the 5512 result. */ 5513 && type_has_mode_precision_p (TREE_TYPE (@0)) 5514 && type_has_mode_precision_p (TREE_TYPE (@1)) 5515 && type_has_mode_precision_p (type) 5516 /* The inner conversion must be a widening conversion. */ 5517 && TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (TREE_TYPE (@0)) 5518 && types_match (@0, @1) 5519 && (tree_int_cst_min_precision (@4, TYPE_SIGN (TREE_TYPE (@0))) 5520 <= TYPE_PRECISION (TREE_TYPE (@0))) 5521 && (wi::to_wide (@4) 5522 & wi::mask (TYPE_PRECISION (TREE_TYPE (@0)), 5523 true, TYPE_PRECISION (type))) == 0) 5524 (if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))) 5525 (with { tree ntype = TREE_TYPE (@0); } 5526 (convert (bit_and (op @0 @1) (convert:ntype @4)))) 5527 (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); } 5528 (convert (bit_and (op (convert:utype @0) (convert:utype @1)) 5529 (convert:utype @4)))))))) 5530 5531/* Transform (@0 < @1 and @0 < @2) to use min, 5532 (@0 > @1 and @0 > @2) to use max */ 5533(for logic (bit_and bit_and bit_and bit_and bit_ior bit_ior bit_ior bit_ior) 5534 op (lt le gt ge lt le gt ge ) 5535 ext (min min max max max max min min ) 5536 (simplify 5537 (logic (op:cs @0 @1) (op:cs @0 @2)) 5538 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) 5539 && TREE_CODE (@0) != INTEGER_CST) 5540 (op @0 (ext @1 @2))))) 5541 5542(simplify 5543 /* signbit(x) -> 0 if x is nonnegative. */ 5544 (SIGNBIT tree_expr_nonnegative_p@0) 5545 { integer_zero_node; }) 5546 5547(simplify 5548 /* signbit(x) -> x<0 if x doesn't have signed zeros. */ 5549 (SIGNBIT @0) 5550 (if (!HONOR_SIGNED_ZEROS (@0)) 5551 (convert (lt @0 { build_real (TREE_TYPE (@0), dconst0); })))) 5552 5553/* Transform comparisons of the form X +- C1 CMP C2 to X CMP C2 -+ C1. */ 5554(for cmp (eq ne) 5555 (for op (plus minus) 5556 rop (minus plus) 5557 (simplify 5558 (cmp (op@3 @0 INTEGER_CST@1) INTEGER_CST@2) 5559 (if (!TREE_OVERFLOW (@1) && !TREE_OVERFLOW (@2) 5560 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@0)) 5561 && !TYPE_OVERFLOW_TRAPS (TREE_TYPE (@0)) 5562 && !TYPE_SATURATING (TREE_TYPE (@0))) 5563 (with { tree res = int_const_binop (rop, @2, @1); } 5564 (if (TREE_OVERFLOW (res) 5565 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))) 5566 { constant_boolean_node (cmp == NE_EXPR, type); } 5567 (if (single_use (@3)) 5568 (cmp @0 { TREE_OVERFLOW (res) 5569 ? drop_tree_overflow (res) : res; })))))))) 5570(for cmp (lt le gt ge) 5571 (for op (plus minus) 5572 rop (minus plus) 5573 (simplify 5574 (cmp (op@3 @0 INTEGER_CST@1) INTEGER_CST@2) 5575 (if (!TREE_OVERFLOW (@1) && !TREE_OVERFLOW (@2) 5576 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))) 5577 (with { tree res = int_const_binop (rop, @2, @1); } 5578 (if (TREE_OVERFLOW (res)) 5579 { 5580 fold_overflow_warning (("assuming signed overflow does not occur " 5581 "when simplifying conditional to constant"), 5582 WARN_STRICT_OVERFLOW_CONDITIONAL); 5583 bool less = cmp == LE_EXPR || cmp == LT_EXPR; 5584 /* wi::ges_p (@2, 0) should be sufficient for a signed type. */ 5585 bool ovf_high = wi::lt_p (wi::to_wide (@1), 0, 5586 TYPE_SIGN (TREE_TYPE (@1))) 5587 != (op == MINUS_EXPR); 5588 constant_boolean_node (less == ovf_high, type); 5589 } 5590 (if (single_use (@3)) 5591 (with 5592 { 5593 fold_overflow_warning (("assuming signed overflow does not occur " 5594 "when changing X +- C1 cmp C2 to " 5595 "X cmp C2 -+ C1"), 5596 WARN_STRICT_OVERFLOW_COMPARISON); 5597 } 5598 (cmp @0 { res; }))))))))) 5599 5600/* Canonicalizations of BIT_FIELD_REFs. */ 5601 5602(simplify 5603 (BIT_FIELD_REF (BIT_FIELD_REF @0 @1 @2) @3 @4) 5604 (BIT_FIELD_REF @0 @3 { const_binop (PLUS_EXPR, bitsizetype, @2, @4); })) 5605 5606(simplify 5607 (BIT_FIELD_REF (view_convert @0) @1 @2) 5608 (BIT_FIELD_REF @0 @1 @2)) 5609 5610(simplify 5611 (BIT_FIELD_REF @0 @1 integer_zerop) 5612 (if (tree_int_cst_equal (@1, TYPE_SIZE (TREE_TYPE (@0)))) 5613 (view_convert @0))) 5614 5615(simplify 5616 (BIT_FIELD_REF @0 @1 @2) 5617 (switch 5618 (if (TREE_CODE (TREE_TYPE (@0)) == COMPLEX_TYPE 5619 && tree_int_cst_equal (@1, TYPE_SIZE (TREE_TYPE (TREE_TYPE (@0))))) 5620 (switch 5621 (if (integer_zerop (@2)) 5622 (view_convert (realpart @0))) 5623 (if (tree_int_cst_equal (@2, TYPE_SIZE (TREE_TYPE (TREE_TYPE (@0))))) 5624 (view_convert (imagpart @0))))) 5625 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) 5626 && INTEGRAL_TYPE_P (type) 5627 /* On GIMPLE this should only apply to register arguments. */ 5628 && (! GIMPLE || is_gimple_reg (@0)) 5629 /* A bit-field-ref that referenced the full argument can be stripped. */ 5630 && ((compare_tree_int (@1, TYPE_PRECISION (TREE_TYPE (@0))) == 0 5631 && integer_zerop (@2)) 5632 /* Low-parts can be reduced to integral conversions. 5633 ??? The following doesn't work for PDP endian. */ 5634 || (BYTES_BIG_ENDIAN == WORDS_BIG_ENDIAN 5635 /* Don't even think about BITS_BIG_ENDIAN. */ 5636 && TYPE_PRECISION (TREE_TYPE (@0)) % BITS_PER_UNIT == 0 5637 && TYPE_PRECISION (type) % BITS_PER_UNIT == 0 5638 && compare_tree_int (@2, (BYTES_BIG_ENDIAN 5639 ? (TYPE_PRECISION (TREE_TYPE (@0)) 5640 - TYPE_PRECISION (type)) 5641 : 0)) == 0))) 5642 (convert @0)))) 5643 5644/* Simplify vector extracts. */ 5645 5646(simplify 5647 (BIT_FIELD_REF CONSTRUCTOR@0 @1 @2) 5648 (if (VECTOR_TYPE_P (TREE_TYPE (@0)) 5649 && (types_match (type, TREE_TYPE (TREE_TYPE (@0))) 5650 || (VECTOR_TYPE_P (type) 5651 && types_match (TREE_TYPE (type), TREE_TYPE (TREE_TYPE (@0)))))) 5652 (with 5653 { 5654 tree ctor = (TREE_CODE (@0) == SSA_NAME 5655 ? gimple_assign_rhs1 (SSA_NAME_DEF_STMT (@0)) : @0); 5656 tree eltype = TREE_TYPE (TREE_TYPE (ctor)); 5657 unsigned HOST_WIDE_INT width = tree_to_uhwi (TYPE_SIZE (eltype)); 5658 unsigned HOST_WIDE_INT n = tree_to_uhwi (@1); 5659 unsigned HOST_WIDE_INT idx = tree_to_uhwi (@2); 5660 } 5661 (if (n != 0 5662 && (idx % width) == 0 5663 && (n % width) == 0 5664 && known_le ((idx + n) / width, 5665 TYPE_VECTOR_SUBPARTS (TREE_TYPE (ctor)))) 5666 (with 5667 { 5668 idx = idx / width; 5669 n = n / width; 5670 /* Constructor elements can be subvectors. */ 5671 poly_uint64 k = 1; 5672 if (CONSTRUCTOR_NELTS (ctor) != 0) 5673 { 5674 tree cons_elem = TREE_TYPE (CONSTRUCTOR_ELT (ctor, 0)->value); 5675 if (TREE_CODE (cons_elem) == VECTOR_TYPE) 5676 k = TYPE_VECTOR_SUBPARTS (cons_elem); 5677 } 5678 unsigned HOST_WIDE_INT elt, count, const_k; 5679 } 5680 (switch 5681 /* We keep an exact subset of the constructor elements. */ 5682 (if (multiple_p (idx, k, &elt) && multiple_p (n, k, &count)) 5683 (if (CONSTRUCTOR_NELTS (ctor) == 0) 5684 { build_constructor (type, NULL); } 5685 (if (count == 1) 5686 (if (elt < CONSTRUCTOR_NELTS (ctor)) 5687 (view_convert { CONSTRUCTOR_ELT (ctor, elt)->value; }) 5688 { build_zero_cst (type); }) 5689 /* We don't want to emit new CTORs unless the old one goes away. 5690 ??? Eventually allow this if the CTOR ends up constant or 5691 uniform. */ 5692 (if (single_use (@0)) 5693 { 5694 vec<constructor_elt, va_gc> *vals; 5695 vec_alloc (vals, count); 5696 for (unsigned i = 0; 5697 i < count && elt + i < CONSTRUCTOR_NELTS (ctor); ++i) 5698 CONSTRUCTOR_APPEND_ELT (vals, NULL_TREE, 5699 CONSTRUCTOR_ELT (ctor, elt + i)->value); 5700 build_constructor (type, vals); 5701 })))) 5702 /* The bitfield references a single constructor element. */ 5703 (if (k.is_constant (&const_k) 5704 && idx + n <= (idx / const_k + 1) * const_k) 5705 (switch 5706 (if (CONSTRUCTOR_NELTS (ctor) <= idx / const_k) 5707 { build_zero_cst (type); }) 5708 (if (n == const_k) 5709 (view_convert { CONSTRUCTOR_ELT (ctor, idx / const_k)->value; })) 5710 (BIT_FIELD_REF { CONSTRUCTOR_ELT (ctor, idx / const_k)->value; } 5711 @1 { bitsize_int ((idx % const_k) * width); }))))))))) 5712 5713/* Simplify a bit extraction from a bit insertion for the cases with 5714 the inserted element fully covering the extraction or the insertion 5715 not touching the extraction. */ 5716(simplify 5717 (BIT_FIELD_REF (bit_insert @0 @1 @ipos) @rsize @rpos) 5718 (with 5719 { 5720 unsigned HOST_WIDE_INT isize; 5721 if (INTEGRAL_TYPE_P (TREE_TYPE (@1))) 5722 isize = TYPE_PRECISION (TREE_TYPE (@1)); 5723 else 5724 isize = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (@1))); 5725 } 5726 (switch 5727 (if ((!INTEGRAL_TYPE_P (TREE_TYPE (@1)) 5728 || type_has_mode_precision_p (TREE_TYPE (@1))) 5729 && wi::leu_p (wi::to_wide (@ipos), wi::to_wide (@rpos)) 5730 && wi::leu_p (wi::to_wide (@rpos) + wi::to_wide (@rsize), 5731 wi::to_wide (@ipos) + isize)) 5732 (BIT_FIELD_REF @1 @rsize { wide_int_to_tree (bitsizetype, 5733 wi::to_wide (@rpos) 5734 - wi::to_wide (@ipos)); })) 5735 (if (wi::geu_p (wi::to_wide (@ipos), 5736 wi::to_wide (@rpos) + wi::to_wide (@rsize)) 5737 || wi::geu_p (wi::to_wide (@rpos), 5738 wi::to_wide (@ipos) + isize)) 5739 (BIT_FIELD_REF @0 @rsize @rpos))))) 5740 5741(if (canonicalize_math_after_vectorization_p ()) 5742 (for fmas (FMA) 5743 (simplify 5744 (fmas:c (negate @0) @1 @2) 5745 (IFN_FNMA @0 @1 @2)) 5746 (simplify 5747 (fmas @0 @1 (negate @2)) 5748 (IFN_FMS @0 @1 @2)) 5749 (simplify 5750 (fmas:c (negate @0) @1 (negate @2)) 5751 (IFN_FNMS @0 @1 @2)) 5752 (simplify 5753 (negate (fmas@3 @0 @1 @2)) 5754 (if (single_use (@3)) 5755 (IFN_FNMS @0 @1 @2)))) 5756 5757 (simplify 5758 (IFN_FMS:c (negate @0) @1 @2) 5759 (IFN_FNMS @0 @1 @2)) 5760 (simplify 5761 (IFN_FMS @0 @1 (negate @2)) 5762 (IFN_FMA @0 @1 @2)) 5763 (simplify 5764 (IFN_FMS:c (negate @0) @1 (negate @2)) 5765 (IFN_FNMA @0 @1 @2)) 5766 (simplify 5767 (negate (IFN_FMS@3 @0 @1 @2)) 5768 (if (single_use (@3)) 5769 (IFN_FNMA @0 @1 @2))) 5770 5771 (simplify 5772 (IFN_FNMA:c (negate @0) @1 @2) 5773 (IFN_FMA @0 @1 @2)) 5774 (simplify 5775 (IFN_FNMA @0 @1 (negate @2)) 5776 (IFN_FNMS @0 @1 @2)) 5777 (simplify 5778 (IFN_FNMA:c (negate @0) @1 (negate @2)) 5779 (IFN_FMS @0 @1 @2)) 5780 (simplify 5781 (negate (IFN_FNMA@3 @0 @1 @2)) 5782 (if (single_use (@3)) 5783 (IFN_FMS @0 @1 @2))) 5784 5785 (simplify 5786 (IFN_FNMS:c (negate @0) @1 @2) 5787 (IFN_FMS @0 @1 @2)) 5788 (simplify 5789 (IFN_FNMS @0 @1 (negate @2)) 5790 (IFN_FNMA @0 @1 @2)) 5791 (simplify 5792 (IFN_FNMS:c (negate @0) @1 (negate @2)) 5793 (IFN_FMA @0 @1 @2)) 5794 (simplify 5795 (negate (IFN_FNMS@3 @0 @1 @2)) 5796 (if (single_use (@3)) 5797 (IFN_FMA @0 @1 @2)))) 5798 5799/* POPCOUNT simplifications. */ 5800(for popcount (BUILT_IN_POPCOUNT BUILT_IN_POPCOUNTL BUILT_IN_POPCOUNTLL 5801 BUILT_IN_POPCOUNTIMAX) 5802 /* popcount(X&1) is nop_expr(X&1). */ 5803 (simplify 5804 (popcount @0) 5805 (if (tree_nonzero_bits (@0) == 1) 5806 (convert @0))) 5807 /* popcount(X) + popcount(Y) is popcount(X|Y) when X&Y must be zero. */ 5808 (simplify 5809 (plus (popcount:s @0) (popcount:s @1)) 5810 (if (wi::bit_and (tree_nonzero_bits (@0), tree_nonzero_bits (@1)) == 0) 5811 (popcount (bit_ior @0 @1)))) 5812 /* popcount(X) == 0 is X == 0, and related (in)equalities. */ 5813 (for cmp (le eq ne gt) 5814 rep (eq eq ne ne) 5815 (simplify 5816 (cmp (popcount @0) integer_zerop) 5817 (rep @0 { build_zero_cst (TREE_TYPE (@0)); })))) 5818 5819#if GIMPLE 5820/* 64- and 32-bits branchless implementations of popcount are detected: 5821 5822 int popcount64c (uint64_t x) 5823 { 5824 x -= (x >> 1) & 0x5555555555555555ULL; 5825 x = (x & 0x3333333333333333ULL) + ((x >> 2) & 0x3333333333333333ULL); 5826 x = (x + (x >> 4)) & 0x0f0f0f0f0f0f0f0fULL; 5827 return (x * 0x0101010101010101ULL) >> 56; 5828 } 5829 5830 int popcount32c (uint32_t x) 5831 { 5832 x -= (x >> 1) & 0x55555555; 5833 x = (x & 0x33333333) + ((x >> 2) & 0x33333333); 5834 x = (x + (x >> 4)) & 0x0f0f0f0f; 5835 return (x * 0x01010101) >> 24; 5836 } */ 5837(simplify 5838 (rshift 5839 (mult 5840 (bit_and 5841 (plus:c 5842 (rshift @8 INTEGER_CST@5) 5843 (plus:c@8 5844 (bit_and @6 INTEGER_CST@7) 5845 (bit_and 5846 (rshift 5847 (minus@6 @0 5848 (bit_and (rshift @0 INTEGER_CST@4) INTEGER_CST@11)) 5849 INTEGER_CST@10) 5850 INTEGER_CST@9))) 5851 INTEGER_CST@3) 5852 INTEGER_CST@2) 5853 INTEGER_CST@1) 5854 /* Check constants and optab. */ 5855 (with { unsigned prec = TYPE_PRECISION (type); 5856 int shift = (64 - prec) & 63; 5857 unsigned HOST_WIDE_INT c1 5858 = HOST_WIDE_INT_UC (0x0101010101010101) >> shift; 5859 unsigned HOST_WIDE_INT c2 5860 = HOST_WIDE_INT_UC (0x0F0F0F0F0F0F0F0F) >> shift; 5861 unsigned HOST_WIDE_INT c3 5862 = HOST_WIDE_INT_UC (0x3333333333333333) >> shift; 5863 unsigned HOST_WIDE_INT c4 5864 = HOST_WIDE_INT_UC (0x5555555555555555) >> shift; 5865 } 5866 (if (prec >= 16 5867 && prec <= 64 5868 && pow2p_hwi (prec) 5869 && TYPE_UNSIGNED (type) 5870 && integer_onep (@4) 5871 && wi::to_widest (@10) == 2 5872 && wi::to_widest (@5) == 4 5873 && wi::to_widest (@1) == prec - 8 5874 && tree_to_uhwi (@2) == c1 5875 && tree_to_uhwi (@3) == c2 5876 && tree_to_uhwi (@9) == c3 5877 && tree_to_uhwi (@7) == c3 5878 && tree_to_uhwi (@11) == c4 5879 && direct_internal_fn_supported_p (IFN_POPCOUNT, type, 5880 OPTIMIZE_FOR_BOTH)) 5881 (convert (IFN_POPCOUNT:type @0))))) 5882#endif 5883 5884/* Simplify: 5885 5886 a = a1 op a2 5887 r = c ? a : b; 5888 5889 to: 5890 5891 r = c ? a1 op a2 : b; 5892 5893 if the target can do it in one go. This makes the operation conditional 5894 on c, so could drop potentially-trapping arithmetic, but that's a valid 5895 simplification if the result of the operation isn't needed. 5896 5897 Avoid speculatively generating a stand-alone vector comparison 5898 on targets that might not support them. Any target implementing 5899 conditional internal functions must support the same comparisons 5900 inside and outside a VEC_COND_EXPR. */ 5901 5902#if GIMPLE 5903(for uncond_op (UNCOND_BINARY) 5904 cond_op (COND_BINARY) 5905 (simplify 5906 (vec_cond @0 (view_convert? (uncond_op@4 @1 @2)) @3) 5907 (with { tree op_type = TREE_TYPE (@4); } 5908 (if (vectorized_internal_fn_supported_p (as_internal_fn (cond_op), op_type) 5909 && element_precision (type) == element_precision (op_type)) 5910 (view_convert (cond_op @0 @1 @2 (view_convert:op_type @3)))))) 5911 (simplify 5912 (vec_cond @0 @1 (view_convert? (uncond_op@4 @2 @3))) 5913 (with { tree op_type = TREE_TYPE (@4); } 5914 (if (vectorized_internal_fn_supported_p (as_internal_fn (cond_op), op_type) 5915 && element_precision (type) == element_precision (op_type)) 5916 (view_convert (cond_op (bit_not @0) @2 @3 (view_convert:op_type @1))))))) 5917 5918/* Same for ternary operations. */ 5919(for uncond_op (UNCOND_TERNARY) 5920 cond_op (COND_TERNARY) 5921 (simplify 5922 (vec_cond @0 (view_convert? (uncond_op@5 @1 @2 @3)) @4) 5923 (with { tree op_type = TREE_TYPE (@5); } 5924 (if (vectorized_internal_fn_supported_p (as_internal_fn (cond_op), op_type) 5925 && element_precision (type) == element_precision (op_type)) 5926 (view_convert (cond_op @0 @1 @2 @3 (view_convert:op_type @4)))))) 5927 (simplify 5928 (vec_cond @0 @1 (view_convert? (uncond_op@5 @2 @3 @4))) 5929 (with { tree op_type = TREE_TYPE (@5); } 5930 (if (vectorized_internal_fn_supported_p (as_internal_fn (cond_op), op_type) 5931 && element_precision (type) == element_precision (op_type)) 5932 (view_convert (cond_op (bit_not @0) @2 @3 @4 5933 (view_convert:op_type @1))))))) 5934#endif 5935 5936/* Detect cases in which a VEC_COND_EXPR effectively replaces the 5937 "else" value of an IFN_COND_*. */ 5938(for cond_op (COND_BINARY) 5939 (simplify 5940 (vec_cond @0 (view_convert? (cond_op @0 @1 @2 @3)) @4) 5941 (with { tree op_type = TREE_TYPE (@3); } 5942 (if (element_precision (type) == element_precision (op_type)) 5943 (view_convert (cond_op @0 @1 @2 (view_convert:op_type @4)))))) 5944 (simplify 5945 (vec_cond @0 @1 (view_convert? (cond_op @2 @3 @4 @5))) 5946 (with { tree op_type = TREE_TYPE (@5); } 5947 (if (inverse_conditions_p (@0, @2) 5948 && element_precision (type) == element_precision (op_type)) 5949 (view_convert (cond_op @2 @3 @4 (view_convert:op_type @1))))))) 5950 5951/* Same for ternary operations. */ 5952(for cond_op (COND_TERNARY) 5953 (simplify 5954 (vec_cond @0 (view_convert? (cond_op @0 @1 @2 @3 @4)) @5) 5955 (with { tree op_type = TREE_TYPE (@4); } 5956 (if (element_precision (type) == element_precision (op_type)) 5957 (view_convert (cond_op @0 @1 @2 @3 (view_convert:op_type @5)))))) 5958 (simplify 5959 (vec_cond @0 @1 (view_convert? (cond_op @2 @3 @4 @5 @6))) 5960 (with { tree op_type = TREE_TYPE (@6); } 5961 (if (inverse_conditions_p (@0, @2) 5962 && element_precision (type) == element_precision (op_type)) 5963 (view_convert (cond_op @2 @3 @4 @5 (view_convert:op_type @1))))))) 5964 5965/* For pointers @0 and @2 and nonnegative constant offset @1, look for 5966 expressions like: 5967 5968 A: (@0 + @1 < @2) | (@2 + @1 < @0) 5969 B: (@0 + @1 <= @2) | (@2 + @1 <= @0) 5970 5971 If pointers are known not to wrap, B checks whether @1 bytes starting 5972 at @0 and @2 do not overlap, while A tests the same thing for @1 + 1 5973 bytes. A is more efficiently tested as: 5974 5975 A: (sizetype) (@0 + @1 - @2) > @1 * 2 5976 5977 The equivalent expression for B is given by replacing @1 with @1 - 1: 5978 5979 B: (sizetype) (@0 + (@1 - 1) - @2) > (@1 - 1) * 2 5980 5981 @0 and @2 can be swapped in both expressions without changing the result. 5982 5983 The folds rely on sizetype's being unsigned (which is always true) 5984 and on its being the same width as the pointer (which we have to check). 5985 5986 The fold replaces two pointer_plus expressions, two comparisons and 5987 an IOR with a pointer_plus, a pointer_diff, and a comparison, so in 5988 the best case it's a saving of two operations. The A fold retains one 5989 of the original pointer_pluses, so is a win even if both pointer_pluses 5990 are used elsewhere. The B fold is a wash if both pointer_pluses are 5991 used elsewhere, since all we end up doing is replacing a comparison with 5992 a pointer_plus. We do still apply the fold under those circumstances 5993 though, in case applying it to other conditions eventually makes one of the 5994 pointer_pluses dead. */ 5995(for ior (truth_orif truth_or bit_ior) 5996 (for cmp (le lt) 5997 (simplify 5998 (ior (cmp:cs (pointer_plus@3 @0 INTEGER_CST@1) @2) 5999 (cmp:cs (pointer_plus@4 @2 @1) @0)) 6000 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)) 6001 && TYPE_OVERFLOW_WRAPS (sizetype) 6002 && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (sizetype)) 6003 /* Calculate the rhs constant. */ 6004 (with { offset_int off = wi::to_offset (@1) - (cmp == LE_EXPR ? 1 : 0); 6005 offset_int rhs = off * 2; } 6006 /* Always fails for negative values. */ 6007 (if (wi::min_precision (rhs, UNSIGNED) <= TYPE_PRECISION (sizetype)) 6008 /* Since the order of @0 and @2 doesn't matter, let tree_swap_operands_p 6009 pick a canonical order. This increases the chances of using the 6010 same pointer_plus in multiple checks. */ 6011 (with { bool swap_p = tree_swap_operands_p (@0, @2); 6012 tree rhs_tree = wide_int_to_tree (sizetype, rhs); } 6013 (if (cmp == LT_EXPR) 6014 (gt (convert:sizetype 6015 (pointer_diff:ssizetype { swap_p ? @4 : @3; } 6016 { swap_p ? @0 : @2; })) 6017 { rhs_tree; }) 6018 (gt (convert:sizetype 6019 (pointer_diff:ssizetype 6020 (pointer_plus { swap_p ? @2 : @0; } 6021 { wide_int_to_tree (sizetype, off); }) 6022 { swap_p ? @0 : @2; })) 6023 { rhs_tree; }))))))))) 6024 6025/* Fold REDUC (@0 & @1) -> @0[I] & @1[I] if element I is the only nonzero 6026 element of @1. */ 6027(for reduc (IFN_REDUC_PLUS IFN_REDUC_IOR IFN_REDUC_XOR) 6028 (simplify (reduc (view_convert? (bit_and @0 VECTOR_CST@1))) 6029 (with { int i = single_nonzero_element (@1); } 6030 (if (i >= 0) 6031 (with { tree elt = vector_cst_elt (@1, i); 6032 tree elt_type = TREE_TYPE (elt); 6033 unsigned int elt_bits = tree_to_uhwi (TYPE_SIZE (elt_type)); 6034 tree size = bitsize_int (elt_bits); 6035 tree pos = bitsize_int (elt_bits * i); } 6036 (view_convert 6037 (bit_and:elt_type 6038 (BIT_FIELD_REF:elt_type @0 { size; } { pos; }) 6039 { elt; }))))))) 6040 6041(simplify 6042 (vec_perm @0 @1 VECTOR_CST@2) 6043 (with 6044 { 6045 tree op0 = @0, op1 = @1, op2 = @2; 6046 6047 /* Build a vector of integers from the tree mask. */ 6048 vec_perm_builder builder; 6049 if (!tree_to_vec_perm_builder (&builder, op2)) 6050 return NULL_TREE; 6051 6052 /* Create a vec_perm_indices for the integer vector. */ 6053 poly_uint64 nelts = TYPE_VECTOR_SUBPARTS (type); 6054 bool single_arg = (op0 == op1); 6055 vec_perm_indices sel (builder, single_arg ? 1 : 2, nelts); 6056 } 6057 (if (sel.series_p (0, 1, 0, 1)) 6058 { op0; } 6059 (if (sel.series_p (0, 1, nelts, 1)) 6060 { op1; } 6061 (with 6062 { 6063 if (!single_arg) 6064 { 6065 if (sel.all_from_input_p (0)) 6066 op1 = op0; 6067 else if (sel.all_from_input_p (1)) 6068 { 6069 op0 = op1; 6070 sel.rotate_inputs (1); 6071 } 6072 else if (known_ge (poly_uint64 (sel[0]), nelts)) 6073 { 6074 std::swap (op0, op1); 6075 sel.rotate_inputs (1); 6076 } 6077 } 6078 gassign *def; 6079 tree cop0 = op0, cop1 = op1; 6080 if (TREE_CODE (op0) == SSA_NAME 6081 && (def = dyn_cast <gassign *> (SSA_NAME_DEF_STMT (op0))) 6082 && gimple_assign_rhs_code (def) == CONSTRUCTOR) 6083 cop0 = gimple_assign_rhs1 (def); 6084 if (TREE_CODE (op1) == SSA_NAME 6085 && (def = dyn_cast <gassign *> (SSA_NAME_DEF_STMT (op1))) 6086 && gimple_assign_rhs_code (def) == CONSTRUCTOR) 6087 cop1 = gimple_assign_rhs1 (def); 6088 6089 tree t; 6090 } 6091 (if ((TREE_CODE (cop0) == VECTOR_CST 6092 || TREE_CODE (cop0) == CONSTRUCTOR) 6093 && (TREE_CODE (cop1) == VECTOR_CST 6094 || TREE_CODE (cop1) == CONSTRUCTOR) 6095 && (t = fold_vec_perm (type, cop0, cop1, sel))) 6096 { t; } 6097 (with 6098 { 6099 bool changed = (op0 == op1 && !single_arg); 6100 tree ins = NULL_TREE; 6101 unsigned at = 0; 6102 6103 /* See if the permutation is performing a single element 6104 insert from a CONSTRUCTOR or constant and use a BIT_INSERT_EXPR 6105 in that case. But only if the vector mode is supported, 6106 otherwise this is invalid GIMPLE. */ 6107 if (TYPE_MODE (type) != BLKmode 6108 && (TREE_CODE (cop0) == VECTOR_CST 6109 || TREE_CODE (cop0) == CONSTRUCTOR 6110 || TREE_CODE (cop1) == VECTOR_CST 6111 || TREE_CODE (cop1) == CONSTRUCTOR)) 6112 { 6113 bool insert_first_p = sel.series_p (1, 1, nelts + 1, 1); 6114 if (insert_first_p) 6115 { 6116 /* After canonicalizing the first elt to come from the 6117 first vector we only can insert the first elt from 6118 the first vector. */ 6119 at = 0; 6120 if ((ins = fold_read_from_vector (cop0, sel[0]))) 6121 op0 = op1; 6122 } 6123 /* The above can fail for two-element vectors which always 6124 appear to insert the first element, so try inserting 6125 into the second lane as well. For more than two 6126 elements that's wasted time. */ 6127 if (!insert_first_p || (!ins && maybe_eq (nelts, 2u))) 6128 { 6129 unsigned int encoded_nelts = sel.encoding ().encoded_nelts (); 6130 for (at = 0; at < encoded_nelts; ++at) 6131 if (maybe_ne (sel[at], at)) 6132 break; 6133 if (at < encoded_nelts 6134 && (known_eq (at + 1, nelts) 6135 || sel.series_p (at + 1, 1, at + 1, 1))) 6136 { 6137 if (known_lt (poly_uint64 (sel[at]), nelts)) 6138 ins = fold_read_from_vector (cop0, sel[at]); 6139 else 6140 ins = fold_read_from_vector (cop1, sel[at] - nelts); 6141 } 6142 } 6143 } 6144 6145 /* Generate a canonical form of the selector. */ 6146 if (!ins && sel.encoding () != builder) 6147 { 6148 /* Some targets are deficient and fail to expand a single 6149 argument permutation while still allowing an equivalent 6150 2-argument version. */ 6151 tree oldop2 = op2; 6152 if (sel.ninputs () == 2 6153 || can_vec_perm_const_p (TYPE_MODE (type), sel, false)) 6154 op2 = vec_perm_indices_to_tree (TREE_TYPE (op2), sel); 6155 else 6156 { 6157 vec_perm_indices sel2 (builder, 2, nelts); 6158 if (can_vec_perm_const_p (TYPE_MODE (type), sel2, false)) 6159 op2 = vec_perm_indices_to_tree (TREE_TYPE (op2), sel2); 6160 else 6161 /* Not directly supported with either encoding, 6162 so use the preferred form. */ 6163 op2 = vec_perm_indices_to_tree (TREE_TYPE (op2), sel); 6164 } 6165 if (!operand_equal_p (op2, oldop2, 0)) 6166 changed = true; 6167 } 6168 } 6169 (if (ins) 6170 (bit_insert { op0; } { ins; } 6171 { bitsize_int (at * tree_to_uhwi (TYPE_SIZE (TREE_TYPE (type)))); }) 6172 (if (changed) 6173 (vec_perm { op0; } { op1; } { op2; })))))))))) 6174 6175/* VEC_PERM_EXPR (v, v, mask) -> v where v contains same element. */ 6176 6177(match vec_same_elem_p 6178 @0 6179 (if (uniform_vector_p (@0)))) 6180 6181(match vec_same_elem_p 6182 (vec_duplicate @0)) 6183 6184(simplify 6185 (vec_perm vec_same_elem_p@0 @0 @1) 6186 @0) 6187 6188/* Match count trailing zeroes for simplify_count_trailing_zeroes in fwprop. 6189 The canonical form is array[((x & -x) * C) >> SHIFT] where C is a magic 6190 constant which when multiplied by a power of 2 contains a unique value 6191 in the top 5 or 6 bits. This is then indexed into a table which maps it 6192 to the number of trailing zeroes. */ 6193(match (ctz_table_index @1 @2 @3) 6194 (rshift (mult (bit_and:c (negate @1) @1) INTEGER_CST@2) INTEGER_CST@3)) 6195