1/* Match-and-simplify patterns for shared GENERIC and GIMPLE folding. 2 This file is consumed by genmatch which produces gimple-match.c 3 and generic-match.c from it. 4 5 Copyright (C) 2014-2019 Free Software Foundation, Inc. 6 Contributed by Richard Biener <rguenther@suse.de> 7 and Prathamesh Kulkarni <bilbotheelffriend@gmail.com> 8 9This file is part of GCC. 10 11GCC is free software; you can redistribute it and/or modify it under 12the terms of the GNU General Public License as published by the Free 13Software Foundation; either version 3, or (at your option) any later 14version. 15 16GCC is distributed in the hope that it will be useful, but WITHOUT ANY 17WARRANTY; without even the implied warranty of MERCHANTABILITY or 18FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 19for more details. 20 21You should have received a copy of the GNU General Public License 22along with GCC; see the file COPYING3. If not see 23<http://www.gnu.org/licenses/>. */ 24 25 26/* Generic tree predicates we inherit. */ 27(define_predicates 28 integer_onep integer_zerop integer_all_onesp integer_minus_onep 29 integer_each_onep integer_truep integer_nonzerop 30 real_zerop real_onep real_minus_onep 31 zerop 32 initializer_each_zero_or_onep 33 CONSTANT_CLASS_P 34 tree_expr_nonnegative_p 35 tree_expr_nonzero_p 36 integer_valued_real_p 37 integer_pow2p 38 uniform_integer_cst_p 39 HONOR_NANS) 40 41/* Operator lists. */ 42(define_operator_list tcc_comparison 43 lt le eq ne ge gt unordered ordered unlt unle ungt unge uneq ltgt) 44(define_operator_list inverted_tcc_comparison 45 ge gt ne eq lt le ordered unordered ge gt le lt ltgt uneq) 46(define_operator_list inverted_tcc_comparison_with_nans 47 unge ungt ne eq unlt unle ordered unordered ge gt le lt ltgt uneq) 48(define_operator_list swapped_tcc_comparison 49 gt ge eq ne le lt unordered ordered ungt unge unlt unle uneq ltgt) 50(define_operator_list simple_comparison lt le eq ne ge gt) 51(define_operator_list swapped_simple_comparison gt ge eq ne le lt) 52 53#include "cfn-operators.pd" 54 55/* Define operand lists for math rounding functions {,i,l,ll}FN, 56 where the versions prefixed with "i" return an int, those prefixed with 57 "l" return a long and those prefixed with "ll" return a long long. 58 59 Also define operand lists: 60 61 X<FN>F for all float functions, in the order i, l, ll 62 X<FN> for all double functions, in the same order 63 X<FN>L for all long double functions, in the same order. */ 64#define DEFINE_INT_AND_FLOAT_ROUND_FN(FN) \ 65 (define_operator_list X##FN##F BUILT_IN_I##FN##F \ 66 BUILT_IN_L##FN##F \ 67 BUILT_IN_LL##FN##F) \ 68 (define_operator_list X##FN BUILT_IN_I##FN \ 69 BUILT_IN_L##FN \ 70 BUILT_IN_LL##FN) \ 71 (define_operator_list X##FN##L BUILT_IN_I##FN##L \ 72 BUILT_IN_L##FN##L \ 73 BUILT_IN_LL##FN##L) 74 75DEFINE_INT_AND_FLOAT_ROUND_FN (FLOOR) 76DEFINE_INT_AND_FLOAT_ROUND_FN (CEIL) 77DEFINE_INT_AND_FLOAT_ROUND_FN (ROUND) 78DEFINE_INT_AND_FLOAT_ROUND_FN (RINT) 79 80/* Binary operations and their associated IFN_COND_* function. */ 81(define_operator_list UNCOND_BINARY 82 plus minus 83 mult trunc_div trunc_mod rdiv 84 min max 85 bit_and bit_ior bit_xor) 86(define_operator_list COND_BINARY 87 IFN_COND_ADD IFN_COND_SUB 88 IFN_COND_MUL IFN_COND_DIV IFN_COND_MOD IFN_COND_RDIV 89 IFN_COND_MIN IFN_COND_MAX 90 IFN_COND_AND IFN_COND_IOR IFN_COND_XOR) 91 92/* Same for ternary operations. */ 93(define_operator_list UNCOND_TERNARY 94 IFN_FMA IFN_FMS IFN_FNMA IFN_FNMS) 95(define_operator_list COND_TERNARY 96 IFN_COND_FMA IFN_COND_FMS IFN_COND_FNMA IFN_COND_FNMS) 97 98/* As opposed to convert?, this still creates a single pattern, so 99 it is not a suitable replacement for convert? in all cases. */ 100(match (nop_convert @0) 101 (convert @0) 102 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))))) 103(match (nop_convert @0) 104 (view_convert @0) 105 (if (VECTOR_TYPE_P (type) && VECTOR_TYPE_P (TREE_TYPE (@0)) 106 && known_eq (TYPE_VECTOR_SUBPARTS (type), 107 TYPE_VECTOR_SUBPARTS (TREE_TYPE (@0))) 108 && tree_nop_conversion_p (TREE_TYPE (type), TREE_TYPE (TREE_TYPE (@0)))))) 109/* This one has to be last, or it shadows the others. */ 110(match (nop_convert @0) 111 @0) 112 113/* Transform likes of (char) ABS_EXPR <(int) x> into (char) ABSU_EXPR <x> 114 ABSU_EXPR returns unsigned absolute value of the operand and the operand 115 of the ABSU_EXPR will have the corresponding signed type. */ 116(simplify (abs (convert @0)) 117 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) 118 && !TYPE_UNSIGNED (TREE_TYPE (@0)) 119 && element_precision (type) > element_precision (TREE_TYPE (@0))) 120 (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); } 121 (convert (absu:utype @0))))) 122 123 124/* Simplifications of operations with one constant operand and 125 simplifications to constants or single values. */ 126 127(for op (plus pointer_plus minus bit_ior bit_xor) 128 (simplify 129 (op @0 integer_zerop) 130 (non_lvalue @0))) 131 132/* 0 +p index -> (type)index */ 133(simplify 134 (pointer_plus integer_zerop @1) 135 (non_lvalue (convert @1))) 136 137/* ptr - 0 -> (type)ptr */ 138(simplify 139 (pointer_diff @0 integer_zerop) 140 (convert @0)) 141 142/* See if ARG1 is zero and X + ARG1 reduces to X. 143 Likewise if the operands are reversed. */ 144(simplify 145 (plus:c @0 real_zerop@1) 146 (if (fold_real_zero_addition_p (type, @1, 0)) 147 (non_lvalue @0))) 148 149/* See if ARG1 is zero and X - ARG1 reduces to X. */ 150(simplify 151 (minus @0 real_zerop@1) 152 (if (fold_real_zero_addition_p (type, @1, 1)) 153 (non_lvalue @0))) 154 155/* Simplify x - x. 156 This is unsafe for certain floats even in non-IEEE formats. 157 In IEEE, it is unsafe because it does wrong for NaNs. 158 Also note that operand_equal_p is always false if an operand 159 is volatile. */ 160(simplify 161 (minus @0 @0) 162 (if (!FLOAT_TYPE_P (type) || !HONOR_NANS (type)) 163 { build_zero_cst (type); })) 164(simplify 165 (pointer_diff @@0 @0) 166 { build_zero_cst (type); }) 167 168(simplify 169 (mult @0 integer_zerop@1) 170 @1) 171 172/* Maybe fold x * 0 to 0. The expressions aren't the same 173 when x is NaN, since x * 0 is also NaN. Nor are they the 174 same in modes with signed zeros, since multiplying a 175 negative value by 0 gives -0, not +0. */ 176(simplify 177 (mult @0 real_zerop@1) 178 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type)) 179 @1)) 180 181/* In IEEE floating point, x*1 is not equivalent to x for snans. 182 Likewise for complex arithmetic with signed zeros. */ 183(simplify 184 (mult @0 real_onep) 185 (if (!HONOR_SNANS (type) 186 && (!HONOR_SIGNED_ZEROS (type) 187 || !COMPLEX_FLOAT_TYPE_P (type))) 188 (non_lvalue @0))) 189 190/* Transform x * -1.0 into -x. */ 191(simplify 192 (mult @0 real_minus_onep) 193 (if (!HONOR_SNANS (type) 194 && (!HONOR_SIGNED_ZEROS (type) 195 || !COMPLEX_FLOAT_TYPE_P (type))) 196 (negate @0))) 197 198/* Transform x * { 0 or 1, 0 or 1, ... } into x & { 0 or -1, 0 or -1, ...}, 199 unless the target has native support for the former but not the latter. */ 200(simplify 201 (mult @0 VECTOR_CST@1) 202 (if (initializer_each_zero_or_onep (@1) 203 && !HONOR_SNANS (type) 204 && !HONOR_SIGNED_ZEROS (type)) 205 (with { tree itype = FLOAT_TYPE_P (type) ? unsigned_type_for (type) : type; } 206 (if (itype 207 && (!VECTOR_MODE_P (TYPE_MODE (type)) 208 || (VECTOR_MODE_P (TYPE_MODE (itype)) 209 && optab_handler (and_optab, 210 TYPE_MODE (itype)) != CODE_FOR_nothing))) 211 (view_convert (bit_and:itype (view_convert @0) 212 (ne @1 { build_zero_cst (type); }))))))) 213 214(for cmp (gt ge lt le) 215 outp (convert convert negate negate) 216 outn (negate negate convert convert) 217 /* Transform X * (X > 0.0 ? 1.0 : -1.0) into abs(X). */ 218 /* Transform X * (X >= 0.0 ? 1.0 : -1.0) into abs(X). */ 219 /* Transform X * (X < 0.0 ? 1.0 : -1.0) into -abs(X). */ 220 /* Transform X * (X <= 0.0 ? 1.0 : -1.0) into -abs(X). */ 221 (simplify 222 (mult:c @0 (cond (cmp @0 real_zerop) real_onep@1 real_minus_onep)) 223 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type)) 224 (outp (abs @0)))) 225 /* Transform X * (X > 0.0 ? -1.0 : 1.0) into -abs(X). */ 226 /* Transform X * (X >= 0.0 ? -1.0 : 1.0) into -abs(X). */ 227 /* Transform X * (X < 0.0 ? -1.0 : 1.0) into abs(X). */ 228 /* Transform X * (X <= 0.0 ? -1.0 : 1.0) into abs(X). */ 229 (simplify 230 (mult:c @0 (cond (cmp @0 real_zerop) real_minus_onep real_onep@1)) 231 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type)) 232 (outn (abs @0))))) 233 234/* Transform X * copysign (1.0, X) into abs(X). */ 235(simplify 236 (mult:c @0 (COPYSIGN_ALL real_onep @0)) 237 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type)) 238 (abs @0))) 239 240/* Transform X * copysign (1.0, -X) into -abs(X). */ 241(simplify 242 (mult:c @0 (COPYSIGN_ALL real_onep (negate @0))) 243 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type)) 244 (negate (abs @0)))) 245 246/* Transform copysign (CST, X) into copysign (ABS(CST), X). */ 247(simplify 248 (COPYSIGN_ALL REAL_CST@0 @1) 249 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@0))) 250 (COPYSIGN_ALL (negate @0) @1))) 251 252/* X * 1, X / 1 -> X. */ 253(for op (mult trunc_div ceil_div floor_div round_div exact_div) 254 (simplify 255 (op @0 integer_onep) 256 (non_lvalue @0))) 257 258/* (A / (1 << B)) -> (A >> B). 259 Only for unsigned A. For signed A, this would not preserve rounding 260 toward zero. 261 For example: (-1 / ( 1 << B)) != -1 >> B. */ 262(simplify 263 (trunc_div @0 (lshift integer_onep@1 @2)) 264 (if ((TYPE_UNSIGNED (type) || tree_expr_nonnegative_p (@0)) 265 && (!VECTOR_TYPE_P (type) 266 || target_supports_op_p (type, RSHIFT_EXPR, optab_vector) 267 || target_supports_op_p (type, RSHIFT_EXPR, optab_scalar))) 268 (rshift @0 @2))) 269 270/* Preserve explicit divisions by 0: the C++ front-end wants to detect 271 undefined behavior in constexpr evaluation, and assuming that the division 272 traps enables better optimizations than these anyway. */ 273(for div (trunc_div ceil_div floor_div round_div exact_div) 274 /* 0 / X is always zero. */ 275 (simplify 276 (div integer_zerop@0 @1) 277 /* But not for 0 / 0 so that we can get the proper warnings and errors. */ 278 (if (!integer_zerop (@1)) 279 @0)) 280 /* X / -1 is -X. */ 281 (simplify 282 (div @0 integer_minus_onep@1) 283 (if (!TYPE_UNSIGNED (type)) 284 (negate @0))) 285 /* X / X is one. */ 286 (simplify 287 (div @0 @0) 288 /* But not for 0 / 0 so that we can get the proper warnings and errors. 289 And not for _Fract types where we can't build 1. */ 290 (if (!integer_zerop (@0) && !ALL_FRACT_MODE_P (TYPE_MODE (type))) 291 { build_one_cst (type); })) 292 /* X / abs (X) is X < 0 ? -1 : 1. */ 293 (simplify 294 (div:C @0 (abs @0)) 295 (if (INTEGRAL_TYPE_P (type) 296 && TYPE_OVERFLOW_UNDEFINED (type)) 297 (cond (lt @0 { build_zero_cst (type); }) 298 { build_minus_one_cst (type); } { build_one_cst (type); }))) 299 /* X / -X is -1. */ 300 (simplify 301 (div:C @0 (negate @0)) 302 (if ((INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type)) 303 && TYPE_OVERFLOW_UNDEFINED (type)) 304 { build_minus_one_cst (type); }))) 305 306/* For unsigned integral types, FLOOR_DIV_EXPR is the same as 307 TRUNC_DIV_EXPR. Rewrite into the latter in this case. */ 308(simplify 309 (floor_div @0 @1) 310 (if ((INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type)) 311 && TYPE_UNSIGNED (type)) 312 (trunc_div @0 @1))) 313 314/* Combine two successive divisions. Note that combining ceil_div 315 and floor_div is trickier and combining round_div even more so. */ 316(for div (trunc_div exact_div) 317 (simplify 318 (div (div@3 @0 INTEGER_CST@1) INTEGER_CST@2) 319 (with { 320 wi::overflow_type overflow; 321 wide_int mul = wi::mul (wi::to_wide (@1), wi::to_wide (@2), 322 TYPE_SIGN (type), &overflow); 323 } 324 (if (div == EXACT_DIV_EXPR 325 || optimize_successive_divisions_p (@2, @3)) 326 (if (!overflow) 327 (div @0 { wide_int_to_tree (type, mul); }) 328 (if (TYPE_UNSIGNED (type) 329 || mul != wi::min_value (TYPE_PRECISION (type), SIGNED)) 330 { build_zero_cst (type); })))))) 331 332/* Combine successive multiplications. Similar to above, but handling 333 overflow is different. */ 334(simplify 335 (mult (mult @0 INTEGER_CST@1) INTEGER_CST@2) 336 (with { 337 wi::overflow_type overflow; 338 wide_int mul = wi::mul (wi::to_wide (@1), wi::to_wide (@2), 339 TYPE_SIGN (type), &overflow); 340 } 341 /* Skip folding on overflow: the only special case is @1 * @2 == -INT_MIN, 342 otherwise undefined overflow implies that @0 must be zero. */ 343 (if (!overflow || TYPE_OVERFLOW_WRAPS (type)) 344 (mult @0 { wide_int_to_tree (type, mul); })))) 345 346/* Optimize A / A to 1.0 if we don't care about 347 NaNs or Infinities. */ 348(simplify 349 (rdiv @0 @0) 350 (if (FLOAT_TYPE_P (type) 351 && ! HONOR_NANS (type) 352 && ! HONOR_INFINITIES (type)) 353 { build_one_cst (type); })) 354 355/* Optimize -A / A to -1.0 if we don't care about 356 NaNs or Infinities. */ 357(simplify 358 (rdiv:C @0 (negate @0)) 359 (if (FLOAT_TYPE_P (type) 360 && ! HONOR_NANS (type) 361 && ! HONOR_INFINITIES (type)) 362 { build_minus_one_cst (type); })) 363 364/* PR71078: x / abs(x) -> copysign (1.0, x) */ 365(simplify 366 (rdiv:C (convert? @0) (convert? (abs @0))) 367 (if (SCALAR_FLOAT_TYPE_P (type) 368 && ! HONOR_NANS (type) 369 && ! HONOR_INFINITIES (type)) 370 (switch 371 (if (types_match (type, float_type_node)) 372 (BUILT_IN_COPYSIGNF { build_one_cst (type); } (convert @0))) 373 (if (types_match (type, double_type_node)) 374 (BUILT_IN_COPYSIGN { build_one_cst (type); } (convert @0))) 375 (if (types_match (type, long_double_type_node)) 376 (BUILT_IN_COPYSIGNL { build_one_cst (type); } (convert @0)))))) 377 378/* In IEEE floating point, x/1 is not equivalent to x for snans. */ 379(simplify 380 (rdiv @0 real_onep) 381 (if (!HONOR_SNANS (type)) 382 (non_lvalue @0))) 383 384/* In IEEE floating point, x/-1 is not equivalent to -x for snans. */ 385(simplify 386 (rdiv @0 real_minus_onep) 387 (if (!HONOR_SNANS (type)) 388 (negate @0))) 389 390(if (flag_reciprocal_math) 391 /* Convert (A/B)/C to A/(B*C). */ 392 (simplify 393 (rdiv (rdiv:s @0 @1) @2) 394 (rdiv @0 (mult @1 @2))) 395 396 /* Canonicalize x / (C1 * y) to (x * C2) / y. */ 397 (simplify 398 (rdiv @0 (mult:s @1 REAL_CST@2)) 399 (with 400 { tree tem = const_binop (RDIV_EXPR, type, build_one_cst (type), @2); } 401 (if (tem) 402 (rdiv (mult @0 { tem; } ) @1)))) 403 404 /* Convert A/(B/C) to (A/B)*C */ 405 (simplify 406 (rdiv @0 (rdiv:s @1 @2)) 407 (mult (rdiv @0 @1) @2))) 408 409/* Simplify x / (- y) to -x / y. */ 410(simplify 411 (rdiv @0 (negate @1)) 412 (rdiv (negate @0) @1)) 413 414(if (flag_unsafe_math_optimizations) 415 /* Simplify (C / x op 0.0) to x op 0.0 for C != 0, C != Inf/Nan. 416 Since C / x may underflow to zero, do this only for unsafe math. */ 417 (for op (lt le gt ge) 418 neg_op (gt ge lt le) 419 (simplify 420 (op (rdiv REAL_CST@0 @1) real_zerop@2) 421 (if (!HONOR_SIGNED_ZEROS (@1) && !HONOR_INFINITIES (@1)) 422 (switch 423 (if (real_less (&dconst0, TREE_REAL_CST_PTR (@0))) 424 (op @1 @2)) 425 /* For C < 0, use the inverted operator. */ 426 (if (real_less (TREE_REAL_CST_PTR (@0), &dconst0)) 427 (neg_op @1 @2))))))) 428 429/* Optimize (X & (-A)) / A where A is a power of 2, to X >> log2(A) */ 430(for div (trunc_div ceil_div floor_div round_div exact_div) 431 (simplify 432 (div (convert? (bit_and @0 INTEGER_CST@1)) INTEGER_CST@2) 433 (if (integer_pow2p (@2) 434 && tree_int_cst_sgn (@2) > 0 435 && tree_nop_conversion_p (type, TREE_TYPE (@0)) 436 && wi::to_wide (@2) + wi::to_wide (@1) == 0) 437 (rshift (convert @0) 438 { build_int_cst (integer_type_node, 439 wi::exact_log2 (wi::to_wide (@2))); })))) 440 441/* If ARG1 is a constant, we can convert this to a multiply by the 442 reciprocal. This does not have the same rounding properties, 443 so only do this if -freciprocal-math. We can actually 444 always safely do it if ARG1 is a power of two, but it's hard to 445 tell if it is or not in a portable manner. */ 446(for cst (REAL_CST COMPLEX_CST VECTOR_CST) 447 (simplify 448 (rdiv @0 cst@1) 449 (if (optimize) 450 (if (flag_reciprocal_math 451 && !real_zerop (@1)) 452 (with 453 { tree tem = const_binop (RDIV_EXPR, type, build_one_cst (type), @1); } 454 (if (tem) 455 (mult @0 { tem; } ))) 456 (if (cst != COMPLEX_CST) 457 (with { tree inverse = exact_inverse (type, @1); } 458 (if (inverse) 459 (mult @0 { inverse; } )))))))) 460 461(for mod (ceil_mod floor_mod round_mod trunc_mod) 462 /* 0 % X is always zero. */ 463 (simplify 464 (mod integer_zerop@0 @1) 465 /* But not for 0 % 0 so that we can get the proper warnings and errors. */ 466 (if (!integer_zerop (@1)) 467 @0)) 468 /* X % 1 is always zero. */ 469 (simplify 470 (mod @0 integer_onep) 471 { build_zero_cst (type); }) 472 /* X % -1 is zero. */ 473 (simplify 474 (mod @0 integer_minus_onep@1) 475 (if (!TYPE_UNSIGNED (type)) 476 { build_zero_cst (type); })) 477 /* X % X is zero. */ 478 (simplify 479 (mod @0 @0) 480 /* But not for 0 % 0 so that we can get the proper warnings and errors. */ 481 (if (!integer_zerop (@0)) 482 { build_zero_cst (type); })) 483 /* (X % Y) % Y is just X % Y. */ 484 (simplify 485 (mod (mod@2 @0 @1) @1) 486 @2) 487 /* From extract_muldiv_1: (X * C1) % C2 is zero if C1 is a multiple of C2. */ 488 (simplify 489 (mod (mult @0 INTEGER_CST@1) INTEGER_CST@2) 490 (if (ANY_INTEGRAL_TYPE_P (type) 491 && TYPE_OVERFLOW_UNDEFINED (type) 492 && wi::multiple_of_p (wi::to_wide (@1), wi::to_wide (@2), 493 TYPE_SIGN (type))) 494 { build_zero_cst (type); })) 495 /* For (X % C) == 0, if X is signed and C is power of 2, use unsigned 496 modulo and comparison, since it is simpler and equivalent. */ 497 (for cmp (eq ne) 498 (simplify 499 (cmp (mod @0 integer_pow2p@2) integer_zerop@1) 500 (if (!TYPE_UNSIGNED (TREE_TYPE (@0))) 501 (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); } 502 (cmp (mod (convert:utype @0) (convert:utype @2)) (convert:utype @1))))))) 503 504/* X % -C is the same as X % C. */ 505(simplify 506 (trunc_mod @0 INTEGER_CST@1) 507 (if (TYPE_SIGN (type) == SIGNED 508 && !TREE_OVERFLOW (@1) 509 && wi::neg_p (wi::to_wide (@1)) 510 && !TYPE_OVERFLOW_TRAPS (type) 511 /* Avoid this transformation if C is INT_MIN, i.e. C == -C. */ 512 && !sign_bit_p (@1, @1)) 513 (trunc_mod @0 (negate @1)))) 514 515/* X % -Y is the same as X % Y. */ 516(simplify 517 (trunc_mod @0 (convert? (negate @1))) 518 (if (INTEGRAL_TYPE_P (type) 519 && !TYPE_UNSIGNED (type) 520 && !TYPE_OVERFLOW_TRAPS (type) 521 && tree_nop_conversion_p (type, TREE_TYPE (@1)) 522 /* Avoid this transformation if X might be INT_MIN or 523 Y might be -1, because we would then change valid 524 INT_MIN % -(-1) into invalid INT_MIN % -1. */ 525 && (expr_not_equal_to (@0, wi::to_wide (TYPE_MIN_VALUE (type))) 526 || expr_not_equal_to (@1, wi::minus_one (TYPE_PRECISION 527 (TREE_TYPE (@1)))))) 528 (trunc_mod @0 (convert @1)))) 529 530/* X - (X / Y) * Y is the same as X % Y. */ 531(simplify 532 (minus (convert1? @0) (convert2? (mult:c (trunc_div @@0 @@1) @1))) 533 (if (INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type)) 534 (convert (trunc_mod @0 @1)))) 535 536/* Optimize TRUNC_MOD_EXPR by a power of two into a BIT_AND_EXPR, 537 i.e. "X % C" into "X & (C - 1)", if X and C are positive. 538 Also optimize A % (C << N) where C is a power of 2, 539 to A & ((C << N) - 1). */ 540(match (power_of_two_cand @1) 541 INTEGER_CST@1) 542(match (power_of_two_cand @1) 543 (lshift INTEGER_CST@1 @2)) 544(for mod (trunc_mod floor_mod) 545 (simplify 546 (mod @0 (convert? (power_of_two_cand@1 @2))) 547 (if ((TYPE_UNSIGNED (type) || tree_expr_nonnegative_p (@0)) 548 /* Allow any integral conversions of the divisor, except 549 conversion from narrower signed to wider unsigned type 550 where if @1 would be negative power of two, the divisor 551 would not be a power of two. */ 552 && INTEGRAL_TYPE_P (type) 553 && INTEGRAL_TYPE_P (TREE_TYPE (@1)) 554 && (TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@1)) 555 || TYPE_UNSIGNED (TREE_TYPE (@1)) 556 || !TYPE_UNSIGNED (type)) 557 && integer_pow2p (@2) && tree_int_cst_sgn (@2) > 0) 558 (with { tree utype = TREE_TYPE (@1); 559 if (!TYPE_OVERFLOW_WRAPS (utype)) 560 utype = unsigned_type_for (utype); } 561 (bit_and @0 (convert (minus (convert:utype @1) 562 { build_one_cst (utype); }))))))) 563 564/* Simplify (unsigned t * 2)/2 -> unsigned t & 0x7FFFFFFF. */ 565(simplify 566 (trunc_div (mult @0 integer_pow2p@1) @1) 567 (if (TYPE_UNSIGNED (TREE_TYPE (@0))) 568 (bit_and @0 { wide_int_to_tree 569 (type, wi::mask (TYPE_PRECISION (type) 570 - wi::exact_log2 (wi::to_wide (@1)), 571 false, TYPE_PRECISION (type))); }))) 572 573/* Simplify (unsigned t / 2) * 2 -> unsigned t & ~1. */ 574(simplify 575 (mult (trunc_div @0 integer_pow2p@1) @1) 576 (if (TYPE_UNSIGNED (TREE_TYPE (@0))) 577 (bit_and @0 (negate @1)))) 578 579/* Simplify (t * 2) / 2) -> t. */ 580(for div (trunc_div ceil_div floor_div round_div exact_div) 581 (simplify 582 (div (mult:c @0 @1) @1) 583 (if (ANY_INTEGRAL_TYPE_P (type) 584 && TYPE_OVERFLOW_UNDEFINED (type)) 585 @0))) 586 587(for op (negate abs) 588 /* Simplify cos(-x) and cos(|x|) -> cos(x). Similarly for cosh. */ 589 (for coss (COS COSH) 590 (simplify 591 (coss (op @0)) 592 (coss @0))) 593 /* Simplify pow(-x, y) and pow(|x|,y) -> pow(x,y) if y is an even integer. */ 594 (for pows (POW) 595 (simplify 596 (pows (op @0) REAL_CST@1) 597 (with { HOST_WIDE_INT n; } 598 (if (real_isinteger (&TREE_REAL_CST (@1), &n) && (n & 1) == 0) 599 (pows @0 @1))))) 600 /* Likewise for powi. */ 601 (for pows (POWI) 602 (simplify 603 (pows (op @0) INTEGER_CST@1) 604 (if ((wi::to_wide (@1) & 1) == 0) 605 (pows @0 @1)))) 606 /* Strip negate and abs from both operands of hypot. */ 607 (for hypots (HYPOT) 608 (simplify 609 (hypots (op @0) @1) 610 (hypots @0 @1)) 611 (simplify 612 (hypots @0 (op @1)) 613 (hypots @0 @1))) 614 /* copysign(-x, y) and copysign(abs(x), y) -> copysign(x, y). */ 615 (for copysigns (COPYSIGN_ALL) 616 (simplify 617 (copysigns (op @0) @1) 618 (copysigns @0 @1)))) 619 620/* abs(x)*abs(x) -> x*x. Should be valid for all types. */ 621(simplify 622 (mult (abs@1 @0) @1) 623 (mult @0 @0)) 624 625/* Convert absu(x)*absu(x) -> x*x. */ 626(simplify 627 (mult (absu@1 @0) @1) 628 (mult (convert@2 @0) @2)) 629 630/* cos(copysign(x, y)) -> cos(x). Similarly for cosh. */ 631(for coss (COS COSH) 632 copysigns (COPYSIGN) 633 (simplify 634 (coss (copysigns @0 @1)) 635 (coss @0))) 636 637/* pow(copysign(x, y), z) -> pow(x, z) if z is an even integer. */ 638(for pows (POW) 639 copysigns (COPYSIGN) 640 (simplify 641 (pows (copysigns @0 @2) REAL_CST@1) 642 (with { HOST_WIDE_INT n; } 643 (if (real_isinteger (&TREE_REAL_CST (@1), &n) && (n & 1) == 0) 644 (pows @0 @1))))) 645/* Likewise for powi. */ 646(for pows (POWI) 647 copysigns (COPYSIGN) 648 (simplify 649 (pows (copysigns @0 @2) INTEGER_CST@1) 650 (if ((wi::to_wide (@1) & 1) == 0) 651 (pows @0 @1)))) 652 653(for hypots (HYPOT) 654 copysigns (COPYSIGN) 655 /* hypot(copysign(x, y), z) -> hypot(x, z). */ 656 (simplify 657 (hypots (copysigns @0 @1) @2) 658 (hypots @0 @2)) 659 /* hypot(x, copysign(y, z)) -> hypot(x, y). */ 660 (simplify 661 (hypots @0 (copysigns @1 @2)) 662 (hypots @0 @1))) 663 664/* copysign(x, CST) -> [-]abs (x). */ 665(for copysigns (COPYSIGN_ALL) 666 (simplify 667 (copysigns @0 REAL_CST@1) 668 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1))) 669 (negate (abs @0)) 670 (abs @0)))) 671 672/* copysign(copysign(x, y), z) -> copysign(x, z). */ 673(for copysigns (COPYSIGN_ALL) 674 (simplify 675 (copysigns (copysigns @0 @1) @2) 676 (copysigns @0 @2))) 677 678/* copysign(x,y)*copysign(x,y) -> x*x. */ 679(for copysigns (COPYSIGN_ALL) 680 (simplify 681 (mult (copysigns@2 @0 @1) @2) 682 (mult @0 @0))) 683 684/* ccos(-x) -> ccos(x). Similarly for ccosh. */ 685(for ccoss (CCOS CCOSH) 686 (simplify 687 (ccoss (negate @0)) 688 (ccoss @0))) 689 690/* cabs(-x) and cos(conj(x)) -> cabs(x). */ 691(for ops (conj negate) 692 (for cabss (CABS) 693 (simplify 694 (cabss (ops @0)) 695 (cabss @0)))) 696 697/* Fold (a * (1 << b)) into (a << b) */ 698(simplify 699 (mult:c @0 (convert? (lshift integer_onep@1 @2))) 700 (if (! FLOAT_TYPE_P (type) 701 && tree_nop_conversion_p (type, TREE_TYPE (@1))) 702 (lshift @0 @2))) 703 704/* Fold (1 << (C - x)) where C = precision(type) - 1 705 into ((1 << C) >> x). */ 706(simplify 707 (lshift integer_onep@0 (minus@1 INTEGER_CST@2 @3)) 708 (if (INTEGRAL_TYPE_P (type) 709 && wi::eq_p (wi::to_wide (@2), TYPE_PRECISION (type) - 1) 710 && single_use (@1)) 711 (if (TYPE_UNSIGNED (type)) 712 (rshift (lshift @0 @2) @3) 713 (with 714 { tree utype = unsigned_type_for (type); } 715 (convert (rshift (lshift (convert:utype @0) @2) @3)))))) 716 717/* Fold (C1/X)*C2 into (C1*C2)/X. */ 718(simplify 719 (mult (rdiv@3 REAL_CST@0 @1) REAL_CST@2) 720 (if (flag_associative_math 721 && single_use (@3)) 722 (with 723 { tree tem = const_binop (MULT_EXPR, type, @0, @2); } 724 (if (tem) 725 (rdiv { tem; } @1))))) 726 727/* Simplify ~X & X as zero. */ 728(simplify 729 (bit_and:c (convert? @0) (convert? (bit_not @0))) 730 { build_zero_cst (type); }) 731 732/* PR71636: Transform x & ((1U << b) - 1) -> x & ~(~0U << b); */ 733(simplify 734 (bit_and:c @0 (plus:s (lshift:s integer_onep @1) integer_minus_onep)) 735 (if (TYPE_UNSIGNED (type)) 736 (bit_and @0 (bit_not (lshift { build_all_ones_cst (type); } @1))))) 737 738(for bitop (bit_and bit_ior) 739 cmp (eq ne) 740 /* PR35691: Transform 741 (x == 0 & y == 0) -> (x | typeof(x)(y)) == 0. 742 (x != 0 | y != 0) -> (x | typeof(x)(y)) != 0. */ 743 (simplify 744 (bitop (cmp @0 integer_zerop@2) (cmp @1 integer_zerop)) 745 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) 746 && INTEGRAL_TYPE_P (TREE_TYPE (@1)) 747 && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1))) 748 (cmp (bit_ior @0 (convert @1)) @2))) 749 /* Transform: 750 (x == -1 & y == -1) -> (x & typeof(x)(y)) == -1. 751 (x != -1 | y != -1) -> (x & typeof(x)(y)) != -1. */ 752 (simplify 753 (bitop (cmp @0 integer_all_onesp@2) (cmp @1 integer_all_onesp)) 754 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) 755 && INTEGRAL_TYPE_P (TREE_TYPE (@1)) 756 && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1))) 757 (cmp (bit_and @0 (convert @1)) @2)))) 758 759/* Fold (A & ~B) - (A & B) into (A ^ B) - B. */ 760(simplify 761 (minus (bit_and:cs @0 (bit_not @1)) (bit_and:cs @0 @1)) 762 (minus (bit_xor @0 @1) @1)) 763(simplify 764 (minus (bit_and:s @0 INTEGER_CST@2) (bit_and:s @0 INTEGER_CST@1)) 765 (if (~wi::to_wide (@2) == wi::to_wide (@1)) 766 (minus (bit_xor @0 @1) @1))) 767 768/* Fold (A & B) - (A & ~B) into B - (A ^ B). */ 769(simplify 770 (minus (bit_and:cs @0 @1) (bit_and:cs @0 (bit_not @1))) 771 (minus @1 (bit_xor @0 @1))) 772 773/* Simplify (X & ~Y) |^+ (~X & Y) -> X ^ Y. */ 774(for op (bit_ior bit_xor plus) 775 (simplify 776 (op (bit_and:c @0 (bit_not @1)) (bit_and:c (bit_not @0) @1)) 777 (bit_xor @0 @1)) 778 (simplify 779 (op:c (bit_and @0 INTEGER_CST@2) (bit_and (bit_not @0) INTEGER_CST@1)) 780 (if (~wi::to_wide (@2) == wi::to_wide (@1)) 781 (bit_xor @0 @1)))) 782 783/* PR53979: Transform ((a ^ b) | a) -> (a | b) */ 784(simplify 785 (bit_ior:c (bit_xor:c @0 @1) @0) 786 (bit_ior @0 @1)) 787 788/* (a & ~b) | (a ^ b) --> a ^ b */ 789(simplify 790 (bit_ior:c (bit_and:c @0 (bit_not @1)) (bit_xor:c@2 @0 @1)) 791 @2) 792 793/* (a & ~b) ^ ~a --> ~(a & b) */ 794(simplify 795 (bit_xor:c (bit_and:cs @0 (bit_not @1)) (bit_not @0)) 796 (bit_not (bit_and @0 @1))) 797 798/* (a | b) & ~(a ^ b) --> a & b */ 799(simplify 800 (bit_and:c (bit_ior @0 @1) (bit_not (bit_xor:c @0 @1))) 801 (bit_and @0 @1)) 802 803/* a | ~(a ^ b) --> a | ~b */ 804(simplify 805 (bit_ior:c @0 (bit_not:s (bit_xor:c @0 @1))) 806 (bit_ior @0 (bit_not @1))) 807 808/* (a | b) | (a &^ b) --> a | b */ 809(for op (bit_and bit_xor) 810 (simplify 811 (bit_ior:c (bit_ior@2 @0 @1) (op:c @0 @1)) 812 @2)) 813 814/* (a & b) | ~(a ^ b) --> ~(a ^ b) */ 815(simplify 816 (bit_ior:c (bit_and:c @0 @1) (bit_not@2 (bit_xor @0 @1))) 817 @2) 818 819/* ~(~a & b) --> a | ~b */ 820(simplify 821 (bit_not (bit_and:cs (bit_not @0) @1)) 822 (bit_ior @0 (bit_not @1))) 823 824/* ~(~a | b) --> a & ~b */ 825(simplify 826 (bit_not (bit_ior:cs (bit_not @0) @1)) 827 (bit_and @0 (bit_not @1))) 828 829/* Simplify (~X & Y) to X ^ Y if we know that (X & ~Y) is 0. */ 830#if GIMPLE 831(simplify 832 (bit_and (bit_not SSA_NAME@0) INTEGER_CST@1) 833 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) 834 && wi::bit_and_not (get_nonzero_bits (@0), wi::to_wide (@1)) == 0) 835 (bit_xor @0 @1))) 836#endif 837 838/* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M, 839 ((A & N) + B) & M -> (A + B) & M 840 Similarly if (N & M) == 0, 841 ((A | N) + B) & M -> (A + B) & M 842 and for - instead of + (or unary - instead of +) 843 and/or ^ instead of |. 844 If B is constant and (B & M) == 0, fold into A & M. */ 845(for op (plus minus) 846 (for bitop (bit_and bit_ior bit_xor) 847 (simplify 848 (bit_and (op:s (bitop:s@0 @3 INTEGER_CST@4) @1) INTEGER_CST@2) 849 (with 850 { tree pmop[2]; 851 tree utype = fold_bit_and_mask (TREE_TYPE (@0), @2, op, @0, bitop, 852 @3, @4, @1, ERROR_MARK, NULL_TREE, 853 NULL_TREE, pmop); } 854 (if (utype) 855 (convert (bit_and (op (convert:utype { pmop[0]; }) 856 (convert:utype { pmop[1]; })) 857 (convert:utype @2)))))) 858 (simplify 859 (bit_and (op:s @0 (bitop:s@1 @3 INTEGER_CST@4)) INTEGER_CST@2) 860 (with 861 { tree pmop[2]; 862 tree utype = fold_bit_and_mask (TREE_TYPE (@0), @2, op, @0, ERROR_MARK, 863 NULL_TREE, NULL_TREE, @1, bitop, @3, 864 @4, pmop); } 865 (if (utype) 866 (convert (bit_and (op (convert:utype { pmop[0]; }) 867 (convert:utype { pmop[1]; })) 868 (convert:utype @2))))))) 869 (simplify 870 (bit_and (op:s @0 @1) INTEGER_CST@2) 871 (with 872 { tree pmop[2]; 873 tree utype = fold_bit_and_mask (TREE_TYPE (@0), @2, op, @0, ERROR_MARK, 874 NULL_TREE, NULL_TREE, @1, ERROR_MARK, 875 NULL_TREE, NULL_TREE, pmop); } 876 (if (utype) 877 (convert (bit_and (op (convert:utype { pmop[0]; }) 878 (convert:utype { pmop[1]; })) 879 (convert:utype @2))))))) 880(for bitop (bit_and bit_ior bit_xor) 881 (simplify 882 (bit_and (negate:s (bitop:s@0 @2 INTEGER_CST@3)) INTEGER_CST@1) 883 (with 884 { tree pmop[2]; 885 tree utype = fold_bit_and_mask (TREE_TYPE (@0), @1, NEGATE_EXPR, @0, 886 bitop, @2, @3, NULL_TREE, ERROR_MARK, 887 NULL_TREE, NULL_TREE, pmop); } 888 (if (utype) 889 (convert (bit_and (negate (convert:utype { pmop[0]; })) 890 (convert:utype @1))))))) 891 892/* X % Y is smaller than Y. */ 893(for cmp (lt ge) 894 (simplify 895 (cmp (trunc_mod @0 @1) @1) 896 (if (TYPE_UNSIGNED (TREE_TYPE (@0))) 897 { constant_boolean_node (cmp == LT_EXPR, type); }))) 898(for cmp (gt le) 899 (simplify 900 (cmp @1 (trunc_mod @0 @1)) 901 (if (TYPE_UNSIGNED (TREE_TYPE (@0))) 902 { constant_boolean_node (cmp == GT_EXPR, type); }))) 903 904/* x | ~0 -> ~0 */ 905(simplify 906 (bit_ior @0 integer_all_onesp@1) 907 @1) 908 909/* x | 0 -> x */ 910(simplify 911 (bit_ior @0 integer_zerop) 912 @0) 913 914/* x & 0 -> 0 */ 915(simplify 916 (bit_and @0 integer_zerop@1) 917 @1) 918 919/* ~x | x -> -1 */ 920/* ~x ^ x -> -1 */ 921/* ~x + x -> -1 */ 922(for op (bit_ior bit_xor plus) 923 (simplify 924 (op:c (convert? @0) (convert? (bit_not @0))) 925 (convert { build_all_ones_cst (TREE_TYPE (@0)); }))) 926 927/* x ^ x -> 0 */ 928(simplify 929 (bit_xor @0 @0) 930 { build_zero_cst (type); }) 931 932/* Canonicalize X ^ ~0 to ~X. */ 933(simplify 934 (bit_xor @0 integer_all_onesp@1) 935 (bit_not @0)) 936 937/* x & ~0 -> x */ 938(simplify 939 (bit_and @0 integer_all_onesp) 940 (non_lvalue @0)) 941 942/* x & x -> x, x | x -> x */ 943(for bitop (bit_and bit_ior) 944 (simplify 945 (bitop @0 @0) 946 (non_lvalue @0))) 947 948/* x & C -> x if we know that x & ~C == 0. */ 949#if GIMPLE 950(simplify 951 (bit_and SSA_NAME@0 INTEGER_CST@1) 952 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) 953 && wi::bit_and_not (get_nonzero_bits (@0), wi::to_wide (@1)) == 0) 954 @0)) 955#endif 956 957/* x + (x & 1) -> (x + 1) & ~1 */ 958(simplify 959 (plus:c @0 (bit_and:s @0 integer_onep@1)) 960 (bit_and (plus @0 @1) (bit_not @1))) 961 962/* x & ~(x & y) -> x & ~y */ 963/* x | ~(x | y) -> x | ~y */ 964(for bitop (bit_and bit_ior) 965 (simplify 966 (bitop:c @0 (bit_not (bitop:cs @0 @1))) 967 (bitop @0 (bit_not @1)))) 968 969/* (~x & y) | ~(x | y) -> ~x */ 970(simplify 971 (bit_ior:c (bit_and:c (bit_not@2 @0) @1) (bit_not (bit_ior:c @0 @1))) 972 @2) 973 974/* (x | y) ^ (x | ~y) -> ~x */ 975(simplify 976 (bit_xor:c (bit_ior:c @0 @1) (bit_ior:c @0 (bit_not @1))) 977 (bit_not @0)) 978 979/* (x & y) | ~(x | y) -> ~(x ^ y) */ 980(simplify 981 (bit_ior:c (bit_and:s @0 @1) (bit_not:s (bit_ior:s @0 @1))) 982 (bit_not (bit_xor @0 @1))) 983 984/* (~x | y) ^ (x ^ y) -> x | ~y */ 985(simplify 986 (bit_xor:c (bit_ior:cs (bit_not @0) @1) (bit_xor:s @0 @1)) 987 (bit_ior @0 (bit_not @1))) 988 989/* (x ^ y) | ~(x | y) -> ~(x & y) */ 990(simplify 991 (bit_ior:c (bit_xor:s @0 @1) (bit_not:s (bit_ior:s @0 @1))) 992 (bit_not (bit_and @0 @1))) 993 994/* (x | y) & ~x -> y & ~x */ 995/* (x & y) | ~x -> y | ~x */ 996(for bitop (bit_and bit_ior) 997 rbitop (bit_ior bit_and) 998 (simplify 999 (bitop:c (rbitop:c @0 @1) (bit_not@2 @0)) 1000 (bitop @1 @2))) 1001 1002/* (x & y) ^ (x | y) -> x ^ y */ 1003(simplify 1004 (bit_xor:c (bit_and @0 @1) (bit_ior @0 @1)) 1005 (bit_xor @0 @1)) 1006 1007/* (x ^ y) ^ (x | y) -> x & y */ 1008(simplify 1009 (bit_xor:c (bit_xor @0 @1) (bit_ior @0 @1)) 1010 (bit_and @0 @1)) 1011 1012/* (x & y) + (x ^ y) -> x | y */ 1013/* (x & y) | (x ^ y) -> x | y */ 1014/* (x & y) ^ (x ^ y) -> x | y */ 1015(for op (plus bit_ior bit_xor) 1016 (simplify 1017 (op:c (bit_and @0 @1) (bit_xor @0 @1)) 1018 (bit_ior @0 @1))) 1019 1020/* (x & y) + (x | y) -> x + y */ 1021(simplify 1022 (plus:c (bit_and @0 @1) (bit_ior @0 @1)) 1023 (plus @0 @1)) 1024 1025/* (x + y) - (x | y) -> x & y */ 1026(simplify 1027 (minus (plus @0 @1) (bit_ior @0 @1)) 1028 (if (!TYPE_OVERFLOW_SANITIZED (type) && !TYPE_OVERFLOW_TRAPS (type) 1029 && !TYPE_SATURATING (type)) 1030 (bit_and @0 @1))) 1031 1032/* (x + y) - (x & y) -> x | y */ 1033(simplify 1034 (minus (plus @0 @1) (bit_and @0 @1)) 1035 (if (!TYPE_OVERFLOW_SANITIZED (type) && !TYPE_OVERFLOW_TRAPS (type) 1036 && !TYPE_SATURATING (type)) 1037 (bit_ior @0 @1))) 1038 1039/* (x | y) - (x ^ y) -> x & y */ 1040(simplify 1041 (minus (bit_ior @0 @1) (bit_xor @0 @1)) 1042 (bit_and @0 @1)) 1043 1044/* (x | y) - (x & y) -> x ^ y */ 1045(simplify 1046 (minus (bit_ior @0 @1) (bit_and @0 @1)) 1047 (bit_xor @0 @1)) 1048 1049/* (x | y) & ~(x & y) -> x ^ y */ 1050(simplify 1051 (bit_and:c (bit_ior @0 @1) (bit_not (bit_and @0 @1))) 1052 (bit_xor @0 @1)) 1053 1054/* (x | y) & (~x ^ y) -> x & y */ 1055(simplify 1056 (bit_and:c (bit_ior:c @0 @1) (bit_xor:c @1 (bit_not @0))) 1057 (bit_and @0 @1)) 1058 1059/* (~x | y) & (x | ~y) -> ~(x ^ y) */ 1060(simplify 1061 (bit_and (bit_ior:cs (bit_not @0) @1) (bit_ior:cs @0 (bit_not @1))) 1062 (bit_not (bit_xor @0 @1))) 1063 1064/* (~x | y) ^ (x | ~y) -> x ^ y */ 1065(simplify 1066 (bit_xor (bit_ior:c (bit_not @0) @1) (bit_ior:c @0 (bit_not @1))) 1067 (bit_xor @0 @1)) 1068 1069/* ~x & ~y -> ~(x | y) 1070 ~x | ~y -> ~(x & y) */ 1071(for op (bit_and bit_ior) 1072 rop (bit_ior bit_and) 1073 (simplify 1074 (op (convert1? (bit_not @0)) (convert2? (bit_not @1))) 1075 (if (element_precision (type) <= element_precision (TREE_TYPE (@0)) 1076 && element_precision (type) <= element_precision (TREE_TYPE (@1))) 1077 (bit_not (rop (convert @0) (convert @1)))))) 1078 1079/* If we are XORing or adding two BIT_AND_EXPR's, both of which are and'ing 1080 with a constant, and the two constants have no bits in common, 1081 we should treat this as a BIT_IOR_EXPR since this may produce more 1082 simplifications. */ 1083(for op (bit_xor plus) 1084 (simplify 1085 (op (convert1? (bit_and@4 @0 INTEGER_CST@1)) 1086 (convert2? (bit_and@5 @2 INTEGER_CST@3))) 1087 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)) 1088 && tree_nop_conversion_p (type, TREE_TYPE (@2)) 1089 && (wi::to_wide (@1) & wi::to_wide (@3)) == 0) 1090 (bit_ior (convert @4) (convert @5))))) 1091 1092/* (X | Y) ^ X -> Y & ~ X*/ 1093(simplify 1094 (bit_xor:c (convert1? (bit_ior:c @@0 @1)) (convert2? @0)) 1095 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))) 1096 (convert (bit_and @1 (bit_not @0))))) 1097 1098/* Convert ~X ^ ~Y to X ^ Y. */ 1099(simplify 1100 (bit_xor (convert1? (bit_not @0)) (convert2? (bit_not @1))) 1101 (if (element_precision (type) <= element_precision (TREE_TYPE (@0)) 1102 && element_precision (type) <= element_precision (TREE_TYPE (@1))) 1103 (bit_xor (convert @0) (convert @1)))) 1104 1105/* Convert ~X ^ C to X ^ ~C. */ 1106(simplify 1107 (bit_xor (convert? (bit_not @0)) INTEGER_CST@1) 1108 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))) 1109 (bit_xor (convert @0) (bit_not @1)))) 1110 1111/* Fold (X & Y) ^ Y and (X ^ Y) & Y as ~X & Y. */ 1112(for opo (bit_and bit_xor) 1113 opi (bit_xor bit_and) 1114 (simplify 1115 (opo:c (opi:cs @0 @1) @1) 1116 (bit_and (bit_not @0) @1))) 1117 1118/* Given a bit-wise operation CODE applied to ARG0 and ARG1, see if both 1119 operands are another bit-wise operation with a common input. If so, 1120 distribute the bit operations to save an operation and possibly two if 1121 constants are involved. For example, convert 1122 (A | B) & (A | C) into A | (B & C) 1123 Further simplification will occur if B and C are constants. */ 1124(for op (bit_and bit_ior bit_xor) 1125 rop (bit_ior bit_and bit_and) 1126 (simplify 1127 (op (convert? (rop:c @@0 @1)) (convert? (rop:c @0 @2))) 1128 (if (tree_nop_conversion_p (type, TREE_TYPE (@1)) 1129 && tree_nop_conversion_p (type, TREE_TYPE (@2))) 1130 (rop (convert @0) (op (convert @1) (convert @2)))))) 1131 1132/* Some simple reassociation for bit operations, also handled in reassoc. */ 1133/* (X & Y) & Y -> X & Y 1134 (X | Y) | Y -> X | Y */ 1135(for op (bit_and bit_ior) 1136 (simplify 1137 (op:c (convert1?@2 (op:c @0 @@1)) (convert2? @1)) 1138 @2)) 1139/* (X ^ Y) ^ Y -> X */ 1140(simplify 1141 (bit_xor:c (convert1? (bit_xor:c @0 @@1)) (convert2? @1)) 1142 (convert @0)) 1143/* (X & Y) & (X & Z) -> (X & Y) & Z 1144 (X | Y) | (X | Z) -> (X | Y) | Z */ 1145(for op (bit_and bit_ior) 1146 (simplify 1147 (op (convert1?@3 (op:c@4 @0 @1)) (convert2?@5 (op:c@6 @0 @2))) 1148 (if (tree_nop_conversion_p (type, TREE_TYPE (@1)) 1149 && tree_nop_conversion_p (type, TREE_TYPE (@2))) 1150 (if (single_use (@5) && single_use (@6)) 1151 (op @3 (convert @2)) 1152 (if (single_use (@3) && single_use (@4)) 1153 (op (convert @1) @5)))))) 1154/* (X ^ Y) ^ (X ^ Z) -> Y ^ Z */ 1155(simplify 1156 (bit_xor (convert1? (bit_xor:c @0 @1)) (convert2? (bit_xor:c @0 @2))) 1157 (if (tree_nop_conversion_p (type, TREE_TYPE (@1)) 1158 && tree_nop_conversion_p (type, TREE_TYPE (@2))) 1159 (bit_xor (convert @1) (convert @2)))) 1160 1161/* Convert abs (abs (X)) into abs (X). 1162 also absu (absu (X)) into absu (X). */ 1163(simplify 1164 (abs (abs@1 @0)) 1165 @1) 1166 1167(simplify 1168 (absu (convert@2 (absu@1 @0))) 1169 (if (tree_nop_conversion_p (TREE_TYPE (@2), TREE_TYPE (@1))) 1170 @1)) 1171 1172/* Convert abs[u] (-X) -> abs[u] (X). */ 1173(simplify 1174 (abs (negate @0)) 1175 (abs @0)) 1176 1177(simplify 1178 (absu (negate @0)) 1179 (absu @0)) 1180 1181/* Convert abs[u] (X) where X is nonnegative -> (X). */ 1182(simplify 1183 (abs tree_expr_nonnegative_p@0) 1184 @0) 1185 1186(simplify 1187 (absu tree_expr_nonnegative_p@0) 1188 (convert @0)) 1189 1190/* A few cases of fold-const.c negate_expr_p predicate. */ 1191(match negate_expr_p 1192 INTEGER_CST 1193 (if ((INTEGRAL_TYPE_P (type) 1194 && TYPE_UNSIGNED (type)) 1195 || (!TYPE_OVERFLOW_SANITIZED (type) 1196 && may_negate_without_overflow_p (t))))) 1197(match negate_expr_p 1198 FIXED_CST) 1199(match negate_expr_p 1200 (negate @0) 1201 (if (!TYPE_OVERFLOW_SANITIZED (type)))) 1202(match negate_expr_p 1203 REAL_CST 1204 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (t))))) 1205/* VECTOR_CST handling of non-wrapping types would recurse in unsupported 1206 ways. */ 1207(match negate_expr_p 1208 VECTOR_CST 1209 (if (FLOAT_TYPE_P (TREE_TYPE (type)) || TYPE_OVERFLOW_WRAPS (type)))) 1210(match negate_expr_p 1211 (minus @0 @1) 1212 (if ((ANY_INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_WRAPS (type)) 1213 || (FLOAT_TYPE_P (type) 1214 && !HONOR_SIGN_DEPENDENT_ROUNDING (type) 1215 && !HONOR_SIGNED_ZEROS (type))))) 1216 1217/* (-A) * (-B) -> A * B */ 1218(simplify 1219 (mult:c (convert1? (negate @0)) (convert2? negate_expr_p@1)) 1220 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)) 1221 && tree_nop_conversion_p (type, TREE_TYPE (@1))) 1222 (mult (convert @0) (convert (negate @1))))) 1223 1224/* -(A + B) -> (-B) - A. */ 1225(simplify 1226 (negate (plus:c @0 negate_expr_p@1)) 1227 (if (!HONOR_SIGN_DEPENDENT_ROUNDING (element_mode (type)) 1228 && !HONOR_SIGNED_ZEROS (element_mode (type))) 1229 (minus (negate @1) @0))) 1230 1231/* -(A - B) -> B - A. */ 1232(simplify 1233 (negate (minus @0 @1)) 1234 (if ((ANY_INTEGRAL_TYPE_P (type) && !TYPE_OVERFLOW_SANITIZED (type)) 1235 || (FLOAT_TYPE_P (type) 1236 && !HONOR_SIGN_DEPENDENT_ROUNDING (type) 1237 && !HONOR_SIGNED_ZEROS (type))) 1238 (minus @1 @0))) 1239(simplify 1240 (negate (pointer_diff @0 @1)) 1241 (if (TYPE_OVERFLOW_UNDEFINED (type)) 1242 (pointer_diff @1 @0))) 1243 1244/* A - B -> A + (-B) if B is easily negatable. */ 1245(simplify 1246 (minus @0 negate_expr_p@1) 1247 (if (!FIXED_POINT_TYPE_P (type)) 1248 (plus @0 (negate @1)))) 1249 1250/* Try to fold (type) X op CST -> (type) (X op ((type-x) CST)) 1251 when profitable. 1252 For bitwise binary operations apply operand conversions to the 1253 binary operation result instead of to the operands. This allows 1254 to combine successive conversions and bitwise binary operations. 1255 We combine the above two cases by using a conditional convert. */ 1256(for bitop (bit_and bit_ior bit_xor) 1257 (simplify 1258 (bitop (convert @0) (convert? @1)) 1259 (if (((TREE_CODE (@1) == INTEGER_CST 1260 && INTEGRAL_TYPE_P (TREE_TYPE (@0)) 1261 && int_fits_type_p (@1, TREE_TYPE (@0))) 1262 || types_match (@0, @1)) 1263 /* ??? This transform conflicts with fold-const.c doing 1264 Convert (T)(x & c) into (T)x & (T)c, if c is an integer 1265 constants (if x has signed type, the sign bit cannot be set 1266 in c). This folds extension into the BIT_AND_EXPR. 1267 Restrict it to GIMPLE to avoid endless recursions. */ 1268 && (bitop != BIT_AND_EXPR || GIMPLE) 1269 && (/* That's a good idea if the conversion widens the operand, thus 1270 after hoisting the conversion the operation will be narrower. */ 1271 TYPE_PRECISION (TREE_TYPE (@0)) < TYPE_PRECISION (type) 1272 /* It's also a good idea if the conversion is to a non-integer 1273 mode. */ 1274 || GET_MODE_CLASS (TYPE_MODE (type)) != MODE_INT 1275 /* Or if the precision of TO is not the same as the precision 1276 of its mode. */ 1277 || !type_has_mode_precision_p (type))) 1278 (convert (bitop @0 (convert @1)))))) 1279 1280(for bitop (bit_and bit_ior) 1281 rbitop (bit_ior bit_and) 1282 /* (x | y) & x -> x */ 1283 /* (x & y) | x -> x */ 1284 (simplify 1285 (bitop:c (rbitop:c @0 @1) @0) 1286 @0) 1287 /* (~x | y) & x -> x & y */ 1288 /* (~x & y) | x -> x | y */ 1289 (simplify 1290 (bitop:c (rbitop:c (bit_not @0) @1) @0) 1291 (bitop @0 @1))) 1292 1293/* (x | CST1) & CST2 -> (x & CST2) | (CST1 & CST2) */ 1294(simplify 1295 (bit_and (bit_ior @0 CONSTANT_CLASS_P@1) CONSTANT_CLASS_P@2) 1296 (bit_ior (bit_and @0 @2) (bit_and @1 @2))) 1297 1298/* Combine successive equal operations with constants. */ 1299(for bitop (bit_and bit_ior bit_xor) 1300 (simplify 1301 (bitop (bitop @0 CONSTANT_CLASS_P@1) CONSTANT_CLASS_P@2) 1302 (if (!CONSTANT_CLASS_P (@0)) 1303 /* This is the canonical form regardless of whether (bitop @1 @2) can be 1304 folded to a constant. */ 1305 (bitop @0 (bitop @1 @2)) 1306 /* In this case we have three constants and (bitop @0 @1) doesn't fold 1307 to a constant. This can happen if @0 or @1 is a POLY_INT_CST and if 1308 the values involved are such that the operation can't be decided at 1309 compile time. Try folding one of @0 or @1 with @2 to see whether 1310 that combination can be decided at compile time. 1311 1312 Keep the existing form if both folds fail, to avoid endless 1313 oscillation. */ 1314 (with { tree cst1 = const_binop (bitop, type, @0, @2); } 1315 (if (cst1) 1316 (bitop @1 { cst1; }) 1317 (with { tree cst2 = const_binop (bitop, type, @1, @2); } 1318 (if (cst2) 1319 (bitop @0 { cst2; })))))))) 1320 1321/* Try simple folding for X op !X, and X op X with the help 1322 of the truth_valued_p and logical_inverted_value predicates. */ 1323(match truth_valued_p 1324 @0 1325 (if (INTEGRAL_TYPE_P (type) && TYPE_PRECISION (type) == 1))) 1326(for op (tcc_comparison truth_and truth_andif truth_or truth_orif truth_xor) 1327 (match truth_valued_p 1328 (op @0 @1))) 1329(match truth_valued_p 1330 (truth_not @0)) 1331 1332(match (logical_inverted_value @0) 1333 (truth_not @0)) 1334(match (logical_inverted_value @0) 1335 (bit_not truth_valued_p@0)) 1336(match (logical_inverted_value @0) 1337 (eq @0 integer_zerop)) 1338(match (logical_inverted_value @0) 1339 (ne truth_valued_p@0 integer_truep)) 1340(match (logical_inverted_value @0) 1341 (bit_xor truth_valued_p@0 integer_truep)) 1342 1343/* X & !X -> 0. */ 1344(simplify 1345 (bit_and:c @0 (logical_inverted_value @0)) 1346 { build_zero_cst (type); }) 1347/* X | !X and X ^ !X -> 1, , if X is truth-valued. */ 1348(for op (bit_ior bit_xor) 1349 (simplify 1350 (op:c truth_valued_p@0 (logical_inverted_value @0)) 1351 { constant_boolean_node (true, type); })) 1352/* X ==/!= !X is false/true. */ 1353(for op (eq ne) 1354 (simplify 1355 (op:c truth_valued_p@0 (logical_inverted_value @0)) 1356 { constant_boolean_node (op == NE_EXPR ? true : false, type); })) 1357 1358/* ~~x -> x */ 1359(simplify 1360 (bit_not (bit_not @0)) 1361 @0) 1362 1363/* Convert ~ (-A) to A - 1. */ 1364(simplify 1365 (bit_not (convert? (negate @0))) 1366 (if (element_precision (type) <= element_precision (TREE_TYPE (@0)) 1367 || !TYPE_UNSIGNED (TREE_TYPE (@0))) 1368 (convert (minus @0 { build_each_one_cst (TREE_TYPE (@0)); })))) 1369 1370/* Convert - (~A) to A + 1. */ 1371(simplify 1372 (negate (nop_convert (bit_not @0))) 1373 (plus (view_convert @0) { build_each_one_cst (type); })) 1374 1375/* Convert ~ (A - 1) or ~ (A + -1) to -A. */ 1376(simplify 1377 (bit_not (convert? (minus @0 integer_each_onep))) 1378 (if (element_precision (type) <= element_precision (TREE_TYPE (@0)) 1379 || !TYPE_UNSIGNED (TREE_TYPE (@0))) 1380 (convert (negate @0)))) 1381(simplify 1382 (bit_not (convert? (plus @0 integer_all_onesp))) 1383 (if (element_precision (type) <= element_precision (TREE_TYPE (@0)) 1384 || !TYPE_UNSIGNED (TREE_TYPE (@0))) 1385 (convert (negate @0)))) 1386 1387/* Part of convert ~(X ^ Y) to ~X ^ Y or X ^ ~Y if ~X or ~Y simplify. */ 1388(simplify 1389 (bit_not (convert? (bit_xor @0 INTEGER_CST@1))) 1390 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))) 1391 (convert (bit_xor @0 (bit_not @1))))) 1392(simplify 1393 (bit_not (convert? (bit_xor:c (bit_not @0) @1))) 1394 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))) 1395 (convert (bit_xor @0 @1)))) 1396 1397/* Otherwise prefer ~(X ^ Y) to ~X ^ Y as more canonical. */ 1398(simplify 1399 (bit_xor:c (nop_convert:s (bit_not:s @0)) @1) 1400 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))) 1401 (bit_not (bit_xor (view_convert @0) @1)))) 1402 1403/* (x & ~m) | (y & m) -> ((x ^ y) & m) ^ x */ 1404(simplify 1405 (bit_ior:c (bit_and:cs @0 (bit_not @2)) (bit_and:cs @1 @2)) 1406 (bit_xor (bit_and (bit_xor @0 @1) @2) @0)) 1407 1408/* Fold A - (A & B) into ~B & A. */ 1409(simplify 1410 (minus (convert1? @0) (convert2?:s (bit_and:cs @@0 @1))) 1411 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)) 1412 && tree_nop_conversion_p (type, TREE_TYPE (@1))) 1413 (convert (bit_and (bit_not @1) @0)))) 1414 1415/* (m1 CMP m2) * d -> (m1 CMP m2) ? d : 0 */ 1416(for cmp (gt lt ge le) 1417(simplify 1418 (mult (convert (cmp @0 @1)) @2) 1419 (if (GIMPLE || !TREE_SIDE_EFFECTS (@2)) 1420 (cond (cmp @0 @1) @2 { build_zero_cst (type); })))) 1421 1422/* For integral types with undefined overflow and C != 0 fold 1423 x * C EQ/NE y * C into x EQ/NE y. */ 1424(for cmp (eq ne) 1425 (simplify 1426 (cmp (mult:c @0 @1) (mult:c @2 @1)) 1427 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1)) 1428 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)) 1429 && tree_expr_nonzero_p (@1)) 1430 (cmp @0 @2)))) 1431 1432/* For integral types with wrapping overflow and C odd fold 1433 x * C EQ/NE y * C into x EQ/NE y. */ 1434(for cmp (eq ne) 1435 (simplify 1436 (cmp (mult @0 INTEGER_CST@1) (mult @2 @1)) 1437 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1)) 1438 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)) 1439 && (TREE_INT_CST_LOW (@1) & 1) != 0) 1440 (cmp @0 @2)))) 1441 1442/* For integral types with undefined overflow and C != 0 fold 1443 x * C RELOP y * C into: 1444 1445 x RELOP y for nonnegative C 1446 y RELOP x for negative C */ 1447(for cmp (lt gt le ge) 1448 (simplify 1449 (cmp (mult:c @0 @1) (mult:c @2 @1)) 1450 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1)) 1451 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))) 1452 (if (tree_expr_nonnegative_p (@1) && tree_expr_nonzero_p (@1)) 1453 (cmp @0 @2) 1454 (if (TREE_CODE (@1) == INTEGER_CST 1455 && wi::neg_p (wi::to_wide (@1), TYPE_SIGN (TREE_TYPE (@1)))) 1456 (cmp @2 @0)))))) 1457 1458/* (X - 1U) <= INT_MAX-1U into (int) X > 0. */ 1459(for cmp (le gt) 1460 icmp (gt le) 1461 (simplify 1462 (cmp (plus @0 integer_minus_onep@1) INTEGER_CST@2) 1463 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) 1464 && TYPE_UNSIGNED (TREE_TYPE (@0)) 1465 && TYPE_PRECISION (TREE_TYPE (@0)) > 1 1466 && (wi::to_wide (@2) 1467 == wi::max_value (TYPE_PRECISION (TREE_TYPE (@0)), SIGNED) - 1)) 1468 (with { tree stype = signed_type_for (TREE_TYPE (@0)); } 1469 (icmp (convert:stype @0) { build_int_cst (stype, 0); }))))) 1470 1471/* X / 4 < Y / 4 iff X < Y when the division is known to be exact. */ 1472(for cmp (simple_comparison) 1473 (simplify 1474 (cmp (exact_div @0 INTEGER_CST@2) (exact_div @1 @2)) 1475 (if (wi::gt_p (wi::to_wide (@2), 0, TYPE_SIGN (TREE_TYPE (@2)))) 1476 (cmp @0 @1)))) 1477 1478/* X / C1 op C2 into a simple range test. */ 1479(for cmp (simple_comparison) 1480 (simplify 1481 (cmp (trunc_div:s @0 INTEGER_CST@1) INTEGER_CST@2) 1482 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) 1483 && integer_nonzerop (@1) 1484 && !TREE_OVERFLOW (@1) 1485 && !TREE_OVERFLOW (@2)) 1486 (with { tree lo, hi; bool neg_overflow; 1487 enum tree_code code = fold_div_compare (cmp, @1, @2, &lo, &hi, 1488 &neg_overflow); } 1489 (switch 1490 (if (code == LT_EXPR || code == GE_EXPR) 1491 (if (TREE_OVERFLOW (lo)) 1492 { build_int_cst (type, (code == LT_EXPR) ^ neg_overflow); } 1493 (if (code == LT_EXPR) 1494 (lt @0 { lo; }) 1495 (ge @0 { lo; })))) 1496 (if (code == LE_EXPR || code == GT_EXPR) 1497 (if (TREE_OVERFLOW (hi)) 1498 { build_int_cst (type, (code == LE_EXPR) ^ neg_overflow); } 1499 (if (code == LE_EXPR) 1500 (le @0 { hi; }) 1501 (gt @0 { hi; })))) 1502 (if (!lo && !hi) 1503 { build_int_cst (type, code == NE_EXPR); }) 1504 (if (code == EQ_EXPR && !hi) 1505 (ge @0 { lo; })) 1506 (if (code == EQ_EXPR && !lo) 1507 (le @0 { hi; })) 1508 (if (code == NE_EXPR && !hi) 1509 (lt @0 { lo; })) 1510 (if (code == NE_EXPR && !lo) 1511 (gt @0 { hi; })) 1512 (if (GENERIC) 1513 { build_range_check (UNKNOWN_LOCATION, type, @0, code == EQ_EXPR, 1514 lo, hi); }) 1515 (with 1516 { 1517 tree etype = range_check_type (TREE_TYPE (@0)); 1518 if (etype) 1519 { 1520 hi = fold_convert (etype, hi); 1521 lo = fold_convert (etype, lo); 1522 hi = const_binop (MINUS_EXPR, etype, hi, lo); 1523 } 1524 } 1525 (if (etype && hi && !TREE_OVERFLOW (hi)) 1526 (if (code == EQ_EXPR) 1527 (le (minus (convert:etype @0) { lo; }) { hi; }) 1528 (gt (minus (convert:etype @0) { lo; }) { hi; }))))))))) 1529 1530/* X + Z < Y + Z is the same as X < Y when there is no overflow. */ 1531(for op (lt le ge gt) 1532 (simplify 1533 (op (plus:c @0 @2) (plus:c @1 @2)) 1534 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) 1535 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))) 1536 (op @0 @1)))) 1537/* For equality and subtraction, this is also true with wrapping overflow. */ 1538(for op (eq ne minus) 1539 (simplify 1540 (op (plus:c @0 @2) (plus:c @1 @2)) 1541 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) 1542 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)) 1543 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))) 1544 (op @0 @1)))) 1545 1546/* X - Z < Y - Z is the same as X < Y when there is no overflow. */ 1547(for op (lt le ge gt) 1548 (simplify 1549 (op (minus @0 @2) (minus @1 @2)) 1550 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) 1551 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))) 1552 (op @0 @1)))) 1553/* For equality and subtraction, this is also true with wrapping overflow. */ 1554(for op (eq ne minus) 1555 (simplify 1556 (op (minus @0 @2) (minus @1 @2)) 1557 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) 1558 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)) 1559 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))) 1560 (op @0 @1)))) 1561/* And for pointers... */ 1562(for op (simple_comparison) 1563 (simplify 1564 (op (pointer_diff@3 @0 @2) (pointer_diff @1 @2)) 1565 (if (!TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2))) 1566 (op @0 @1)))) 1567(simplify 1568 (minus (pointer_diff@3 @0 @2) (pointer_diff @1 @2)) 1569 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@3)) 1570 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2))) 1571 (pointer_diff @0 @1))) 1572 1573/* Z - X < Z - Y is the same as Y < X when there is no overflow. */ 1574(for op (lt le ge gt) 1575 (simplify 1576 (op (minus @2 @0) (minus @2 @1)) 1577 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) 1578 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))) 1579 (op @1 @0)))) 1580/* For equality and subtraction, this is also true with wrapping overflow. */ 1581(for op (eq ne minus) 1582 (simplify 1583 (op (minus @2 @0) (minus @2 @1)) 1584 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) 1585 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)) 1586 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))) 1587 (op @1 @0)))) 1588/* And for pointers... */ 1589(for op (simple_comparison) 1590 (simplify 1591 (op (pointer_diff@3 @2 @0) (pointer_diff @2 @1)) 1592 (if (!TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2))) 1593 (op @1 @0)))) 1594(simplify 1595 (minus (pointer_diff@3 @2 @0) (pointer_diff @2 @1)) 1596 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@3)) 1597 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2))) 1598 (pointer_diff @1 @0))) 1599 1600/* X + Y < Y is the same as X < 0 when there is no overflow. */ 1601(for op (lt le gt ge) 1602 (simplify 1603 (op:c (plus:c@2 @0 @1) @1) 1604 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) 1605 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)) 1606 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@0)) 1607 && (CONSTANT_CLASS_P (@0) || single_use (@2))) 1608 (op @0 { build_zero_cst (TREE_TYPE (@0)); })))) 1609/* For equality, this is also true with wrapping overflow. */ 1610(for op (eq ne) 1611 (simplify 1612 (op:c (nop_convert@3 (plus:c@2 @0 (convert1? @1))) (convert2? @1)) 1613 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) 1614 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)) 1615 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))) 1616 && (CONSTANT_CLASS_P (@0) || (single_use (@2) && single_use (@3))) 1617 && tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@2)) 1618 && tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@1))) 1619 (op @0 { build_zero_cst (TREE_TYPE (@0)); }))) 1620 (simplify 1621 (op:c (nop_convert@3 (pointer_plus@2 (convert1? @0) @1)) (convert2? @0)) 1622 (if (tree_nop_conversion_p (TREE_TYPE (@2), TREE_TYPE (@0)) 1623 && tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@0)) 1624 && (CONSTANT_CLASS_P (@1) || (single_use (@2) && single_use (@3)))) 1625 (op @1 { build_zero_cst (TREE_TYPE (@1)); })))) 1626 1627/* X - Y < X is the same as Y > 0 when there is no overflow. 1628 For equality, this is also true with wrapping overflow. */ 1629(for op (simple_comparison) 1630 (simplify 1631 (op:c @0 (minus@2 @0 @1)) 1632 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) 1633 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)) 1634 || ((op == EQ_EXPR || op == NE_EXPR) 1635 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))) 1636 && (CONSTANT_CLASS_P (@1) || single_use (@2))) 1637 (op @1 { build_zero_cst (TREE_TYPE (@1)); })))) 1638 1639/* Transform: 1640 (X / Y) == 0 -> X < Y if X, Y are unsigned. 1641 (X / Y) != 0 -> X >= Y, if X, Y are unsigned. */ 1642(for cmp (eq ne) 1643 ocmp (lt ge) 1644 (simplify 1645 (cmp (trunc_div @0 @1) integer_zerop) 1646 (if (TYPE_UNSIGNED (TREE_TYPE (@0)) 1647 /* Complex ==/!= is allowed, but not </>=. */ 1648 && TREE_CODE (TREE_TYPE (@0)) != COMPLEX_TYPE 1649 && (VECTOR_TYPE_P (type) || !VECTOR_TYPE_P (TREE_TYPE (@0)))) 1650 (ocmp @0 @1)))) 1651 1652/* X == C - X can never be true if C is odd. */ 1653(for cmp (eq ne) 1654 (simplify 1655 (cmp:c (convert? @0) (convert1? (minus INTEGER_CST@1 (convert2? @0)))) 1656 (if (TREE_INT_CST_LOW (@1) & 1) 1657 { constant_boolean_node (cmp == NE_EXPR, type); }))) 1658 1659/* Arguments on which one can call get_nonzero_bits to get the bits 1660 possibly set. */ 1661(match with_possible_nonzero_bits 1662 INTEGER_CST@0) 1663(match with_possible_nonzero_bits 1664 SSA_NAME@0 1665 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0))))) 1666/* Slightly extended version, do not make it recursive to keep it cheap. */ 1667(match (with_possible_nonzero_bits2 @0) 1668 with_possible_nonzero_bits@0) 1669(match (with_possible_nonzero_bits2 @0) 1670 (bit_and:c with_possible_nonzero_bits@0 @2)) 1671 1672/* Same for bits that are known to be set, but we do not have 1673 an equivalent to get_nonzero_bits yet. */ 1674(match (with_certain_nonzero_bits2 @0) 1675 INTEGER_CST@0) 1676(match (with_certain_nonzero_bits2 @0) 1677 (bit_ior @1 INTEGER_CST@0)) 1678 1679/* X == C (or X & Z == Y | C) is impossible if ~nonzero(X) & C != 0. */ 1680(for cmp (eq ne) 1681 (simplify 1682 (cmp:c (with_possible_nonzero_bits2 @0) (with_certain_nonzero_bits2 @1)) 1683 (if (wi::bit_and_not (wi::to_wide (@1), get_nonzero_bits (@0)) != 0) 1684 { constant_boolean_node (cmp == NE_EXPR, type); }))) 1685 1686/* ((X inner_op C0) outer_op C1) 1687 With X being a tree where value_range has reasoned certain bits to always be 1688 zero throughout its computed value range, 1689 inner_op = {|,^}, outer_op = {|,^} and inner_op != outer_op 1690 where zero_mask has 1's for all bits that are sure to be 0 in 1691 and 0's otherwise. 1692 if (inner_op == '^') C0 &= ~C1; 1693 if ((C0 & ~zero_mask) == 0) then emit (X outer_op (C0 outer_op C1) 1694 if ((C1 & ~zero_mask) == 0) then emit (X inner_op (C0 outer_op C1) 1695*/ 1696(for inner_op (bit_ior bit_xor) 1697 outer_op (bit_xor bit_ior) 1698(simplify 1699 (outer_op 1700 (inner_op:s @2 INTEGER_CST@0) INTEGER_CST@1) 1701 (with 1702 { 1703 bool fail = false; 1704 wide_int zero_mask_not; 1705 wide_int C0; 1706 wide_int cst_emit; 1707 1708 if (TREE_CODE (@2) == SSA_NAME) 1709 zero_mask_not = get_nonzero_bits (@2); 1710 else 1711 fail = true; 1712 1713 if (inner_op == BIT_XOR_EXPR) 1714 { 1715 C0 = wi::bit_and_not (wi::to_wide (@0), wi::to_wide (@1)); 1716 cst_emit = C0 | wi::to_wide (@1); 1717 } 1718 else 1719 { 1720 C0 = wi::to_wide (@0); 1721 cst_emit = C0 ^ wi::to_wide (@1); 1722 } 1723 } 1724 (if (!fail && (C0 & zero_mask_not) == 0) 1725 (outer_op @2 { wide_int_to_tree (type, cst_emit); }) 1726 (if (!fail && (wi::to_wide (@1) & zero_mask_not) == 0) 1727 (inner_op @2 { wide_int_to_tree (type, cst_emit); })))))) 1728 1729/* Associate (p +p off1) +p off2 as (p +p (off1 + off2)). */ 1730(simplify 1731 (pointer_plus (pointer_plus:s @0 @1) @3) 1732 (pointer_plus @0 (plus @1 @3))) 1733 1734/* Pattern match 1735 tem1 = (long) ptr1; 1736 tem2 = (long) ptr2; 1737 tem3 = tem2 - tem1; 1738 tem4 = (unsigned long) tem3; 1739 tem5 = ptr1 + tem4; 1740 and produce 1741 tem5 = ptr2; */ 1742(simplify 1743 (pointer_plus @0 (convert?@2 (minus@3 (convert @1) (convert @0)))) 1744 /* Conditionally look through a sign-changing conversion. */ 1745 (if (TYPE_PRECISION (TREE_TYPE (@2)) == TYPE_PRECISION (TREE_TYPE (@3)) 1746 && ((GIMPLE && useless_type_conversion_p (type, TREE_TYPE (@1))) 1747 || (GENERIC && type == TREE_TYPE (@1)))) 1748 @1)) 1749(simplify 1750 (pointer_plus @0 (convert?@2 (pointer_diff@3 @1 @@0))) 1751 (if (TYPE_PRECISION (TREE_TYPE (@2)) >= TYPE_PRECISION (TREE_TYPE (@3))) 1752 (convert @1))) 1753 1754/* Pattern match 1755 tem = (sizetype) ptr; 1756 tem = tem & algn; 1757 tem = -tem; 1758 ... = ptr p+ tem; 1759 and produce the simpler and easier to analyze with respect to alignment 1760 ... = ptr & ~algn; */ 1761(simplify 1762 (pointer_plus @0 (negate (bit_and (convert @0) INTEGER_CST@1))) 1763 (with { tree algn = wide_int_to_tree (TREE_TYPE (@0), ~wi::to_wide (@1)); } 1764 (bit_and @0 { algn; }))) 1765 1766/* Try folding difference of addresses. */ 1767(simplify 1768 (minus (convert ADDR_EXPR@0) (convert @1)) 1769 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))) 1770 (with { poly_int64 diff; } 1771 (if (ptr_difference_const (@0, @1, &diff)) 1772 { build_int_cst_type (type, diff); })))) 1773(simplify 1774 (minus (convert @0) (convert ADDR_EXPR@1)) 1775 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))) 1776 (with { poly_int64 diff; } 1777 (if (ptr_difference_const (@0, @1, &diff)) 1778 { build_int_cst_type (type, diff); })))) 1779(simplify 1780 (pointer_diff (convert?@2 ADDR_EXPR@0) (convert1?@3 @1)) 1781 (if (tree_nop_conversion_p (TREE_TYPE(@2), TREE_TYPE (@0)) 1782 && tree_nop_conversion_p (TREE_TYPE(@3), TREE_TYPE (@1))) 1783 (with { poly_int64 diff; } 1784 (if (ptr_difference_const (@0, @1, &diff)) 1785 { build_int_cst_type (type, diff); })))) 1786(simplify 1787 (pointer_diff (convert?@2 @0) (convert1?@3 ADDR_EXPR@1)) 1788 (if (tree_nop_conversion_p (TREE_TYPE(@2), TREE_TYPE (@0)) 1789 && tree_nop_conversion_p (TREE_TYPE(@3), TREE_TYPE (@1))) 1790 (with { poly_int64 diff; } 1791 (if (ptr_difference_const (@0, @1, &diff)) 1792 { build_int_cst_type (type, diff); })))) 1793 1794/* If arg0 is derived from the address of an object or function, we may 1795 be able to fold this expression using the object or function's 1796 alignment. */ 1797(simplify 1798 (bit_and (convert? @0) INTEGER_CST@1) 1799 (if (POINTER_TYPE_P (TREE_TYPE (@0)) 1800 && tree_nop_conversion_p (type, TREE_TYPE (@0))) 1801 (with 1802 { 1803 unsigned int align; 1804 unsigned HOST_WIDE_INT bitpos; 1805 get_pointer_alignment_1 (@0, &align, &bitpos); 1806 } 1807 (if (wi::ltu_p (wi::to_wide (@1), align / BITS_PER_UNIT)) 1808 { wide_int_to_tree (type, (wi::to_wide (@1) 1809 & (bitpos / BITS_PER_UNIT))); })))) 1810 1811 1812/* We can't reassociate at all for saturating types. */ 1813(if (!TYPE_SATURATING (type)) 1814 1815 /* Contract negates. */ 1816 /* A + (-B) -> A - B */ 1817 (simplify 1818 (plus:c @0 (convert? (negate @1))) 1819 /* Apply STRIP_NOPS on the negate. */ 1820 (if (tree_nop_conversion_p (type, TREE_TYPE (@1)) 1821 && !TYPE_OVERFLOW_SANITIZED (type)) 1822 (with 1823 { 1824 tree t1 = type; 1825 if (INTEGRAL_TYPE_P (type) 1826 && TYPE_OVERFLOW_WRAPS (type) != TYPE_OVERFLOW_WRAPS (TREE_TYPE (@1))) 1827 t1 = TYPE_OVERFLOW_WRAPS (type) ? type : TREE_TYPE (@1); 1828 } 1829 (convert (minus (convert:t1 @0) (convert:t1 @1)))))) 1830 /* A - (-B) -> A + B */ 1831 (simplify 1832 (minus @0 (convert? (negate @1))) 1833 (if (tree_nop_conversion_p (type, TREE_TYPE (@1)) 1834 && !TYPE_OVERFLOW_SANITIZED (type)) 1835 (with 1836 { 1837 tree t1 = type; 1838 if (INTEGRAL_TYPE_P (type) 1839 && TYPE_OVERFLOW_WRAPS (type) != TYPE_OVERFLOW_WRAPS (TREE_TYPE (@1))) 1840 t1 = TYPE_OVERFLOW_WRAPS (type) ? type : TREE_TYPE (@1); 1841 } 1842 (convert (plus (convert:t1 @0) (convert:t1 @1)))))) 1843 /* -(T)(-A) -> (T)A 1844 Sign-extension is ok except for INT_MIN, which thankfully cannot 1845 happen without overflow. */ 1846 (simplify 1847 (negate (convert (negate @1))) 1848 (if (INTEGRAL_TYPE_P (type) 1849 && (TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@1)) 1850 || (!TYPE_UNSIGNED (TREE_TYPE (@1)) 1851 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1)))) 1852 && !TYPE_OVERFLOW_SANITIZED (type) 1853 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@1))) 1854 (convert @1))) 1855 (simplify 1856 (negate (convert negate_expr_p@1)) 1857 (if (SCALAR_FLOAT_TYPE_P (type) 1858 && ((DECIMAL_FLOAT_TYPE_P (type) 1859 == DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@1)) 1860 && TYPE_PRECISION (type) >= TYPE_PRECISION (TREE_TYPE (@1))) 1861 || !HONOR_SIGN_DEPENDENT_ROUNDING (type))) 1862 (convert (negate @1)))) 1863 (simplify 1864 (negate (nop_convert (negate @1))) 1865 (if (!TYPE_OVERFLOW_SANITIZED (type) 1866 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@1))) 1867 (view_convert @1))) 1868 1869 /* We can't reassociate floating-point unless -fassociative-math 1870 or fixed-point plus or minus because of saturation to +-Inf. */ 1871 (if ((!FLOAT_TYPE_P (type) || flag_associative_math) 1872 && !FIXED_POINT_TYPE_P (type)) 1873 1874 /* Match patterns that allow contracting a plus-minus pair 1875 irrespective of overflow issues. */ 1876 /* (A +- B) - A -> +- B */ 1877 /* (A +- B) -+ B -> A */ 1878 /* A - (A +- B) -> -+ B */ 1879 /* A +- (B -+ A) -> +- B */ 1880 (simplify 1881 (minus (plus:c @0 @1) @0) 1882 @1) 1883 (simplify 1884 (minus (minus @0 @1) @0) 1885 (negate @1)) 1886 (simplify 1887 (plus:c (minus @0 @1) @1) 1888 @0) 1889 (simplify 1890 (minus @0 (plus:c @0 @1)) 1891 (negate @1)) 1892 (simplify 1893 (minus @0 (minus @0 @1)) 1894 @1) 1895 /* (A +- B) + (C - A) -> C +- B */ 1896 /* (A + B) - (A - C) -> B + C */ 1897 /* More cases are handled with comparisons. */ 1898 (simplify 1899 (plus:c (plus:c @0 @1) (minus @2 @0)) 1900 (plus @2 @1)) 1901 (simplify 1902 (plus:c (minus @0 @1) (minus @2 @0)) 1903 (minus @2 @1)) 1904 (simplify 1905 (plus:c (pointer_diff @0 @1) (pointer_diff @2 @0)) 1906 (if (TYPE_OVERFLOW_UNDEFINED (type) 1907 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@0))) 1908 (pointer_diff @2 @1))) 1909 (simplify 1910 (minus (plus:c @0 @1) (minus @0 @2)) 1911 (plus @1 @2)) 1912 1913 /* (A +- CST1) +- CST2 -> A + CST3 1914 Use view_convert because it is safe for vectors and equivalent for 1915 scalars. */ 1916 (for outer_op (plus minus) 1917 (for inner_op (plus minus) 1918 neg_inner_op (minus plus) 1919 (simplify 1920 (outer_op (nop_convert (inner_op @0 CONSTANT_CLASS_P@1)) 1921 CONSTANT_CLASS_P@2) 1922 /* If one of the types wraps, use that one. */ 1923 (if (!ANY_INTEGRAL_TYPE_P (type) || TYPE_OVERFLOW_WRAPS (type)) 1924 /* If all 3 captures are CONSTANT_CLASS_P, punt, as we might recurse 1925 forever if something doesn't simplify into a constant. */ 1926 (if (!CONSTANT_CLASS_P (@0)) 1927 (if (outer_op == PLUS_EXPR) 1928 (plus (view_convert @0) (inner_op @2 (view_convert @1))) 1929 (minus (view_convert @0) (neg_inner_op @2 (view_convert @1))))) 1930 (if (!ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) 1931 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))) 1932 (if (outer_op == PLUS_EXPR) 1933 (view_convert (plus @0 (inner_op (view_convert @2) @1))) 1934 (view_convert (minus @0 (neg_inner_op (view_convert @2) @1)))) 1935 /* If the constant operation overflows we cannot do the transform 1936 directly as we would introduce undefined overflow, for example 1937 with (a - 1) + INT_MIN. */ 1938 (if (types_match (type, @0)) 1939 (with { tree cst = const_binop (outer_op == inner_op 1940 ? PLUS_EXPR : MINUS_EXPR, 1941 type, @1, @2); } 1942 (if (cst && !TREE_OVERFLOW (cst)) 1943 (inner_op @0 { cst; } ) 1944 /* X+INT_MAX+1 is X-INT_MIN. */ 1945 (if (INTEGRAL_TYPE_P (type) && cst 1946 && wi::to_wide (cst) == wi::min_value (type)) 1947 (neg_inner_op @0 { wide_int_to_tree (type, wi::to_wide (cst)); }) 1948 /* Last resort, use some unsigned type. */ 1949 (with { tree utype = unsigned_type_for (type); } 1950 (if (utype) 1951 (view_convert (inner_op 1952 (view_convert:utype @0) 1953 (view_convert:utype 1954 { drop_tree_overflow (cst); })))))))))))))) 1955 1956 /* (CST1 - A) +- CST2 -> CST3 - A */ 1957 (for outer_op (plus minus) 1958 (simplify 1959 (outer_op (minus CONSTANT_CLASS_P@1 @0) CONSTANT_CLASS_P@2) 1960 (with { tree cst = const_binop (outer_op, type, @1, @2); } 1961 (if (cst && !TREE_OVERFLOW (cst)) 1962 (minus { cst; } @0))))) 1963 1964 /* CST1 - (CST2 - A) -> CST3 + A */ 1965 (simplify 1966 (minus CONSTANT_CLASS_P@1 (minus CONSTANT_CLASS_P@2 @0)) 1967 (with { tree cst = const_binop (MINUS_EXPR, type, @1, @2); } 1968 (if (cst && !TREE_OVERFLOW (cst)) 1969 (plus { cst; } @0)))) 1970 1971 /* ~A + A -> -1 */ 1972 (simplify 1973 (plus:c (bit_not @0) @0) 1974 (if (!TYPE_OVERFLOW_TRAPS (type)) 1975 { build_all_ones_cst (type); })) 1976 1977 /* ~A + 1 -> -A */ 1978 (simplify 1979 (plus (convert? (bit_not @0)) integer_each_onep) 1980 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))) 1981 (negate (convert @0)))) 1982 1983 /* -A - 1 -> ~A */ 1984 (simplify 1985 (minus (convert? (negate @0)) integer_each_onep) 1986 (if (!TYPE_OVERFLOW_TRAPS (type) 1987 && tree_nop_conversion_p (type, TREE_TYPE (@0))) 1988 (bit_not (convert @0)))) 1989 1990 /* -1 - A -> ~A */ 1991 (simplify 1992 (minus integer_all_onesp @0) 1993 (bit_not @0)) 1994 1995 /* (T)(P + A) - (T)P -> (T) A */ 1996 (simplify 1997 (minus (convert (plus:c @@0 @1)) 1998 (convert? @0)) 1999 (if (element_precision (type) <= element_precision (TREE_TYPE (@1)) 2000 /* For integer types, if A has a smaller type 2001 than T the result depends on the possible 2002 overflow in P + A. 2003 E.g. T=size_t, A=(unsigned)429497295, P>0. 2004 However, if an overflow in P + A would cause 2005 undefined behavior, we can assume that there 2006 is no overflow. */ 2007 || (INTEGRAL_TYPE_P (TREE_TYPE (@1)) 2008 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1)))) 2009 (convert @1))) 2010 (simplify 2011 (minus (convert (pointer_plus @@0 @1)) 2012 (convert @0)) 2013 (if (element_precision (type) <= element_precision (TREE_TYPE (@1)) 2014 /* For pointer types, if the conversion of A to the 2015 final type requires a sign- or zero-extension, 2016 then we have to punt - it is not defined which 2017 one is correct. */ 2018 || (POINTER_TYPE_P (TREE_TYPE (@0)) 2019 && TREE_CODE (@1) == INTEGER_CST 2020 && tree_int_cst_sign_bit (@1) == 0)) 2021 (convert @1))) 2022 (simplify 2023 (pointer_diff (pointer_plus @@0 @1) @0) 2024 /* The second argument of pointer_plus must be interpreted as signed, and 2025 thus sign-extended if necessary. */ 2026 (with { tree stype = signed_type_for (TREE_TYPE (@1)); } 2027 /* Use view_convert instead of convert here, as POINTER_PLUS_EXPR 2028 second arg is unsigned even when we need to consider it as signed, 2029 we don't want to diagnose overflow here. */ 2030 (convert (view_convert:stype @1)))) 2031 2032 /* (T)P - (T)(P + A) -> -(T) A */ 2033 (simplify 2034 (minus (convert? @0) 2035 (convert (plus:c @@0 @1))) 2036 (if (INTEGRAL_TYPE_P (type) 2037 && TYPE_OVERFLOW_UNDEFINED (type) 2038 && element_precision (type) <= element_precision (TREE_TYPE (@1))) 2039 (with { tree utype = unsigned_type_for (type); } 2040 (convert (negate (convert:utype @1)))) 2041 (if (element_precision (type) <= element_precision (TREE_TYPE (@1)) 2042 /* For integer types, if A has a smaller type 2043 than T the result depends on the possible 2044 overflow in P + A. 2045 E.g. T=size_t, A=(unsigned)429497295, P>0. 2046 However, if an overflow in P + A would cause 2047 undefined behavior, we can assume that there 2048 is no overflow. */ 2049 || (INTEGRAL_TYPE_P (TREE_TYPE (@1)) 2050 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1)))) 2051 (negate (convert @1))))) 2052 (simplify 2053 (minus (convert @0) 2054 (convert (pointer_plus @@0 @1))) 2055 (if (INTEGRAL_TYPE_P (type) 2056 && TYPE_OVERFLOW_UNDEFINED (type) 2057 && element_precision (type) <= element_precision (TREE_TYPE (@1))) 2058 (with { tree utype = unsigned_type_for (type); } 2059 (convert (negate (convert:utype @1)))) 2060 (if (element_precision (type) <= element_precision (TREE_TYPE (@1)) 2061 /* For pointer types, if the conversion of A to the 2062 final type requires a sign- or zero-extension, 2063 then we have to punt - it is not defined which 2064 one is correct. */ 2065 || (POINTER_TYPE_P (TREE_TYPE (@0)) 2066 && TREE_CODE (@1) == INTEGER_CST 2067 && tree_int_cst_sign_bit (@1) == 0)) 2068 (negate (convert @1))))) 2069 (simplify 2070 (pointer_diff @0 (pointer_plus @@0 @1)) 2071 /* The second argument of pointer_plus must be interpreted as signed, and 2072 thus sign-extended if necessary. */ 2073 (with { tree stype = signed_type_for (TREE_TYPE (@1)); } 2074 /* Use view_convert instead of convert here, as POINTER_PLUS_EXPR 2075 second arg is unsigned even when we need to consider it as signed, 2076 we don't want to diagnose overflow here. */ 2077 (negate (convert (view_convert:stype @1))))) 2078 2079 /* (T)(P + A) - (T)(P + B) -> (T)A - (T)B */ 2080 (simplify 2081 (minus (convert (plus:c @@0 @1)) 2082 (convert (plus:c @0 @2))) 2083 (if (INTEGRAL_TYPE_P (type) 2084 && TYPE_OVERFLOW_UNDEFINED (type) 2085 && element_precision (type) <= element_precision (TREE_TYPE (@1)) 2086 && element_precision (type) <= element_precision (TREE_TYPE (@2))) 2087 (with { tree utype = unsigned_type_for (type); } 2088 (convert (minus (convert:utype @1) (convert:utype @2)))) 2089 (if (((element_precision (type) <= element_precision (TREE_TYPE (@1))) 2090 == (element_precision (type) <= element_precision (TREE_TYPE (@2)))) 2091 && (element_precision (type) <= element_precision (TREE_TYPE (@1)) 2092 /* For integer types, if A has a smaller type 2093 than T the result depends on the possible 2094 overflow in P + A. 2095 E.g. T=size_t, A=(unsigned)429497295, P>0. 2096 However, if an overflow in P + A would cause 2097 undefined behavior, we can assume that there 2098 is no overflow. */ 2099 || (INTEGRAL_TYPE_P (TREE_TYPE (@1)) 2100 && INTEGRAL_TYPE_P (TREE_TYPE (@2)) 2101 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1)) 2102 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@2))))) 2103 (minus (convert @1) (convert @2))))) 2104 (simplify 2105 (minus (convert (pointer_plus @@0 @1)) 2106 (convert (pointer_plus @0 @2))) 2107 (if (INTEGRAL_TYPE_P (type) 2108 && TYPE_OVERFLOW_UNDEFINED (type) 2109 && element_precision (type) <= element_precision (TREE_TYPE (@1))) 2110 (with { tree utype = unsigned_type_for (type); } 2111 (convert (minus (convert:utype @1) (convert:utype @2)))) 2112 (if (element_precision (type) <= element_precision (TREE_TYPE (@1)) 2113 /* For pointer types, if the conversion of A to the 2114 final type requires a sign- or zero-extension, 2115 then we have to punt - it is not defined which 2116 one is correct. */ 2117 || (POINTER_TYPE_P (TREE_TYPE (@0)) 2118 && TREE_CODE (@1) == INTEGER_CST 2119 && tree_int_cst_sign_bit (@1) == 0 2120 && TREE_CODE (@2) == INTEGER_CST 2121 && tree_int_cst_sign_bit (@2) == 0)) 2122 (minus (convert @1) (convert @2))))) 2123 (simplify 2124 (pointer_diff (pointer_plus @@0 @1) (pointer_plus @0 @2)) 2125 /* The second argument of pointer_plus must be interpreted as signed, and 2126 thus sign-extended if necessary. */ 2127 (with { tree stype = signed_type_for (TREE_TYPE (@1)); } 2128 /* Use view_convert instead of convert here, as POINTER_PLUS_EXPR 2129 second arg is unsigned even when we need to consider it as signed, 2130 we don't want to diagnose overflow here. */ 2131 (minus (convert (view_convert:stype @1)) 2132 (convert (view_convert:stype @2))))))) 2133 2134/* (A * C) +- (B * C) -> (A+-B) * C and (A * C) +- A -> A * (C+-1). 2135 Modeled after fold_plusminus_mult_expr. */ 2136(if (!TYPE_SATURATING (type) 2137 && (!FLOAT_TYPE_P (type) || flag_associative_math)) 2138 (for plusminus (plus minus) 2139 (simplify 2140 (plusminus (mult:cs@3 @0 @1) (mult:cs@4 @0 @2)) 2141 (if ((!ANY_INTEGRAL_TYPE_P (type) 2142 || TYPE_OVERFLOW_WRAPS (type) 2143 || (INTEGRAL_TYPE_P (type) 2144 && tree_expr_nonzero_p (@0) 2145 && expr_not_equal_to (@0, wi::minus_one (TYPE_PRECISION (type))))) 2146 /* If @1 +- @2 is constant require a hard single-use on either 2147 original operand (but not on both). */ 2148 && (single_use (@3) || single_use (@4))) 2149 (mult (plusminus @1 @2) @0))) 2150 /* We cannot generate constant 1 for fract. */ 2151 (if (!ALL_FRACT_MODE_P (TYPE_MODE (type))) 2152 (simplify 2153 (plusminus @0 (mult:c@3 @0 @2)) 2154 (if ((!ANY_INTEGRAL_TYPE_P (type) 2155 || TYPE_OVERFLOW_WRAPS (type) 2156 || (INTEGRAL_TYPE_P (type) 2157 && tree_expr_nonzero_p (@0) 2158 && expr_not_equal_to (@0, wi::minus_one (TYPE_PRECISION (type))))) 2159 && single_use (@3)) 2160 (mult (plusminus { build_one_cst (type); } @2) @0))) 2161 (simplify 2162 (plusminus (mult:c@3 @0 @2) @0) 2163 (if ((!ANY_INTEGRAL_TYPE_P (type) 2164 || TYPE_OVERFLOW_WRAPS (type) 2165 || (INTEGRAL_TYPE_P (type) 2166 && tree_expr_nonzero_p (@0) 2167 && expr_not_equal_to (@0, wi::minus_one (TYPE_PRECISION (type))))) 2168 && single_use (@3)) 2169 (mult (plusminus @2 { build_one_cst (type); }) @0)))))) 2170 2171/* Simplifications of MIN_EXPR, MAX_EXPR, fmin() and fmax(). */ 2172 2173(for minmax (min max FMIN_ALL FMAX_ALL) 2174 (simplify 2175 (minmax @0 @0) 2176 @0)) 2177/* min(max(x,y),y) -> y. */ 2178(simplify 2179 (min:c (max:c @0 @1) @1) 2180 @1) 2181/* max(min(x,y),y) -> y. */ 2182(simplify 2183 (max:c (min:c @0 @1) @1) 2184 @1) 2185/* max(a,-a) -> abs(a). */ 2186(simplify 2187 (max:c @0 (negate @0)) 2188 (if (TREE_CODE (type) != COMPLEX_TYPE 2189 && (! ANY_INTEGRAL_TYPE_P (type) 2190 || TYPE_OVERFLOW_UNDEFINED (type))) 2191 (abs @0))) 2192/* min(a,-a) -> -abs(a). */ 2193(simplify 2194 (min:c @0 (negate @0)) 2195 (if (TREE_CODE (type) != COMPLEX_TYPE 2196 && (! ANY_INTEGRAL_TYPE_P (type) 2197 || TYPE_OVERFLOW_UNDEFINED (type))) 2198 (negate (abs @0)))) 2199(simplify 2200 (min @0 @1) 2201 (switch 2202 (if (INTEGRAL_TYPE_P (type) 2203 && TYPE_MIN_VALUE (type) 2204 && operand_equal_p (@1, TYPE_MIN_VALUE (type), OEP_ONLY_CONST)) 2205 @1) 2206 (if (INTEGRAL_TYPE_P (type) 2207 && TYPE_MAX_VALUE (type) 2208 && operand_equal_p (@1, TYPE_MAX_VALUE (type), OEP_ONLY_CONST)) 2209 @0))) 2210(simplify 2211 (max @0 @1) 2212 (switch 2213 (if (INTEGRAL_TYPE_P (type) 2214 && TYPE_MAX_VALUE (type) 2215 && operand_equal_p (@1, TYPE_MAX_VALUE (type), OEP_ONLY_CONST)) 2216 @1) 2217 (if (INTEGRAL_TYPE_P (type) 2218 && TYPE_MIN_VALUE (type) 2219 && operand_equal_p (@1, TYPE_MIN_VALUE (type), OEP_ONLY_CONST)) 2220 @0))) 2221 2222/* max (a, a + CST) -> a + CST where CST is positive. */ 2223/* max (a, a + CST) -> a where CST is negative. */ 2224(simplify 2225 (max:c @0 (plus@2 @0 INTEGER_CST@1)) 2226 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))) 2227 (if (tree_int_cst_sgn (@1) > 0) 2228 @2 2229 @0))) 2230 2231/* min (a, a + CST) -> a where CST is positive. */ 2232/* min (a, a + CST) -> a + CST where CST is negative. */ 2233(simplify 2234 (min:c @0 (plus@2 @0 INTEGER_CST@1)) 2235 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))) 2236 (if (tree_int_cst_sgn (@1) > 0) 2237 @0 2238 @2))) 2239 2240/* (convert (minmax ((convert (x) c)))) -> minmax (x c) if x is promoted 2241 and the outer convert demotes the expression back to x's type. */ 2242(for minmax (min max) 2243 (simplify 2244 (convert (minmax@0 (convert @1) INTEGER_CST@2)) 2245 (if (INTEGRAL_TYPE_P (type) 2246 && types_match (@1, type) && int_fits_type_p (@2, type) 2247 && TYPE_SIGN (TREE_TYPE (@0)) == TYPE_SIGN (type) 2248 && TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (type)) 2249 (minmax @1 (convert @2))))) 2250 2251(for minmax (FMIN_ALL FMAX_ALL) 2252 /* If either argument is NaN, return the other one. Avoid the 2253 transformation if we get (and honor) a signalling NaN. */ 2254 (simplify 2255 (minmax:c @0 REAL_CST@1) 2256 (if (real_isnan (TREE_REAL_CST_PTR (@1)) 2257 && (!HONOR_SNANS (@1) || !TREE_REAL_CST (@1).signalling)) 2258 @0))) 2259/* Convert fmin/fmax to MIN_EXPR/MAX_EXPR. C99 requires these 2260 functions to return the numeric arg if the other one is NaN. 2261 MIN and MAX don't honor that, so only transform if -ffinite-math-only 2262 is set. C99 doesn't require -0.0 to be handled, so we don't have to 2263 worry about it either. */ 2264(if (flag_finite_math_only) 2265 (simplify 2266 (FMIN_ALL @0 @1) 2267 (min @0 @1)) 2268 (simplify 2269 (FMAX_ALL @0 @1) 2270 (max @0 @1))) 2271/* min (-A, -B) -> -max (A, B) */ 2272(for minmax (min max FMIN_ALL FMAX_ALL) 2273 maxmin (max min FMAX_ALL FMIN_ALL) 2274 (simplify 2275 (minmax (negate:s@2 @0) (negate:s@3 @1)) 2276 (if (FLOAT_TYPE_P (TREE_TYPE (@0)) 2277 || (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) 2278 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))) 2279 (negate (maxmin @0 @1))))) 2280/* MIN (~X, ~Y) -> ~MAX (X, Y) 2281 MAX (~X, ~Y) -> ~MIN (X, Y) */ 2282(for minmax (min max) 2283 maxmin (max min) 2284 (simplify 2285 (minmax (bit_not:s@2 @0) (bit_not:s@3 @1)) 2286 (bit_not (maxmin @0 @1)))) 2287 2288/* MIN (X, Y) == X -> X <= Y */ 2289(for minmax (min min max max) 2290 cmp (eq ne eq ne ) 2291 out (le gt ge lt ) 2292 (simplify 2293 (cmp:c (minmax:c @0 @1) @0) 2294 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))) 2295 (out @0 @1)))) 2296/* MIN (X, 5) == 0 -> X == 0 2297 MIN (X, 5) == 7 -> false */ 2298(for cmp (eq ne) 2299 (simplify 2300 (cmp (min @0 INTEGER_CST@1) INTEGER_CST@2) 2301 (if (wi::lt_p (wi::to_wide (@1), wi::to_wide (@2), 2302 TYPE_SIGN (TREE_TYPE (@0)))) 2303 { constant_boolean_node (cmp == NE_EXPR, type); } 2304 (if (wi::gt_p (wi::to_wide (@1), wi::to_wide (@2), 2305 TYPE_SIGN (TREE_TYPE (@0)))) 2306 (cmp @0 @2))))) 2307(for cmp (eq ne) 2308 (simplify 2309 (cmp (max @0 INTEGER_CST@1) INTEGER_CST@2) 2310 (if (wi::gt_p (wi::to_wide (@1), wi::to_wide (@2), 2311 TYPE_SIGN (TREE_TYPE (@0)))) 2312 { constant_boolean_node (cmp == NE_EXPR, type); } 2313 (if (wi::lt_p (wi::to_wide (@1), wi::to_wide (@2), 2314 TYPE_SIGN (TREE_TYPE (@0)))) 2315 (cmp @0 @2))))) 2316/* MIN (X, C1) < C2 -> X < C2 || C1 < C2 */ 2317(for minmax (min min max max min min max max ) 2318 cmp (lt le gt ge gt ge lt le ) 2319 comb (bit_ior bit_ior bit_ior bit_ior bit_and bit_and bit_and bit_and) 2320 (simplify 2321 (cmp (minmax @0 INTEGER_CST@1) INTEGER_CST@2) 2322 (comb (cmp @0 @2) (cmp @1 @2)))) 2323 2324/* Simplifications of shift and rotates. */ 2325 2326(for rotate (lrotate rrotate) 2327 (simplify 2328 (rotate integer_all_onesp@0 @1) 2329 @0)) 2330 2331/* Optimize -1 >> x for arithmetic right shifts. */ 2332(simplify 2333 (rshift integer_all_onesp@0 @1) 2334 (if (!TYPE_UNSIGNED (type) 2335 && tree_expr_nonnegative_p (@1)) 2336 @0)) 2337 2338/* Optimize (x >> c) << c into x & (-1<<c). */ 2339(simplify 2340 (lshift (rshift @0 INTEGER_CST@1) @1) 2341 (if (wi::ltu_p (wi::to_wide (@1), element_precision (type))) 2342 (bit_and @0 (lshift { build_minus_one_cst (type); } @1)))) 2343 2344/* Optimize (x << c) >> c into x & ((unsigned)-1 >> c) for unsigned 2345 types. */ 2346(simplify 2347 (rshift (lshift @0 INTEGER_CST@1) @1) 2348 (if (TYPE_UNSIGNED (type) 2349 && (wi::ltu_p (wi::to_wide (@1), element_precision (type)))) 2350 (bit_and @0 (rshift { build_minus_one_cst (type); } @1)))) 2351 2352(for shiftrotate (lrotate rrotate lshift rshift) 2353 (simplify 2354 (shiftrotate @0 integer_zerop) 2355 (non_lvalue @0)) 2356 (simplify 2357 (shiftrotate integer_zerop@0 @1) 2358 @0) 2359 /* Prefer vector1 << scalar to vector1 << vector2 2360 if vector2 is uniform. */ 2361 (for vec (VECTOR_CST CONSTRUCTOR) 2362 (simplify 2363 (shiftrotate @0 vec@1) 2364 (with { tree tem = uniform_vector_p (@1); } 2365 (if (tem) 2366 (shiftrotate @0 { tem; })))))) 2367 2368/* Simplify X << Y where Y's low width bits are 0 to X, as only valid 2369 Y is 0. Similarly for X >> Y. */ 2370#if GIMPLE 2371(for shift (lshift rshift) 2372 (simplify 2373 (shift @0 SSA_NAME@1) 2374 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))) 2375 (with { 2376 int width = ceil_log2 (element_precision (TREE_TYPE (@0))); 2377 int prec = TYPE_PRECISION (TREE_TYPE (@1)); 2378 } 2379 (if ((get_nonzero_bits (@1) & wi::mask (width, false, prec)) == 0) 2380 @0))))) 2381#endif 2382 2383/* Rewrite an LROTATE_EXPR by a constant into an 2384 RROTATE_EXPR by a new constant. */ 2385(simplify 2386 (lrotate @0 INTEGER_CST@1) 2387 (rrotate @0 { const_binop (MINUS_EXPR, TREE_TYPE (@1), 2388 build_int_cst (TREE_TYPE (@1), 2389 element_precision (type)), @1); })) 2390 2391/* Turn (a OP c1) OP c2 into a OP (c1+c2). */ 2392(for op (lrotate rrotate rshift lshift) 2393 (simplify 2394 (op (op @0 INTEGER_CST@1) INTEGER_CST@2) 2395 (with { unsigned int prec = element_precision (type); } 2396 (if (wi::ge_p (wi::to_wide (@1), 0, TYPE_SIGN (TREE_TYPE (@1))) 2397 && wi::lt_p (wi::to_wide (@1), prec, TYPE_SIGN (TREE_TYPE (@1))) 2398 && wi::ge_p (wi::to_wide (@2), 0, TYPE_SIGN (TREE_TYPE (@2))) 2399 && wi::lt_p (wi::to_wide (@2), prec, TYPE_SIGN (TREE_TYPE (@2)))) 2400 (with { unsigned int low = (tree_to_uhwi (@1) 2401 + tree_to_uhwi (@2)); } 2402 /* Deal with a OP (c1 + c2) being undefined but (a OP c1) OP c2 2403 being well defined. */ 2404 (if (low >= prec) 2405 (if (op == LROTATE_EXPR || op == RROTATE_EXPR) 2406 (op @0 { build_int_cst (TREE_TYPE (@1), low % prec); }) 2407 (if (TYPE_UNSIGNED (type) || op == LSHIFT_EXPR) 2408 { build_zero_cst (type); } 2409 (op @0 { build_int_cst (TREE_TYPE (@1), prec - 1); }))) 2410 (op @0 { build_int_cst (TREE_TYPE (@1), low); }))))))) 2411 2412 2413/* ((1 << A) & 1) != 0 -> A == 0 2414 ((1 << A) & 1) == 0 -> A != 0 */ 2415(for cmp (ne eq) 2416 icmp (eq ne) 2417 (simplify 2418 (cmp (bit_and (lshift integer_onep @0) integer_onep) integer_zerop) 2419 (icmp @0 { build_zero_cst (TREE_TYPE (@0)); }))) 2420 2421/* (CST1 << A) == CST2 -> A == ctz (CST2) - ctz (CST1) 2422 (CST1 << A) != CST2 -> A != ctz (CST2) - ctz (CST1) 2423 if CST2 != 0. */ 2424(for cmp (ne eq) 2425 (simplify 2426 (cmp (lshift INTEGER_CST@0 @1) INTEGER_CST@2) 2427 (with { int cand = wi::ctz (wi::to_wide (@2)) - wi::ctz (wi::to_wide (@0)); } 2428 (if (cand < 0 2429 || (!integer_zerop (@2) 2430 && wi::lshift (wi::to_wide (@0), cand) != wi::to_wide (@2))) 2431 { constant_boolean_node (cmp == NE_EXPR, type); } 2432 (if (!integer_zerop (@2) 2433 && wi::lshift (wi::to_wide (@0), cand) == wi::to_wide (@2)) 2434 (cmp @1 { build_int_cst (TREE_TYPE (@1), cand); })))))) 2435 2436/* Fold (X << C1) & C2 into (X << C1) & (C2 | ((1 << C1) - 1)) 2437 (X >> C1) & C2 into (X >> C1) & (C2 | ~((type) -1 >> C1)) 2438 if the new mask might be further optimized. */ 2439(for shift (lshift rshift) 2440 (simplify 2441 (bit_and (convert?:s@4 (shift:s@5 (convert1?@3 @0) INTEGER_CST@1)) 2442 INTEGER_CST@2) 2443 (if (tree_nop_conversion_p (TREE_TYPE (@4), TREE_TYPE (@5)) 2444 && TYPE_PRECISION (type) <= HOST_BITS_PER_WIDE_INT 2445 && tree_fits_uhwi_p (@1) 2446 && tree_to_uhwi (@1) > 0 2447 && tree_to_uhwi (@1) < TYPE_PRECISION (type)) 2448 (with 2449 { 2450 unsigned int shiftc = tree_to_uhwi (@1); 2451 unsigned HOST_WIDE_INT mask = TREE_INT_CST_LOW (@2); 2452 unsigned HOST_WIDE_INT newmask, zerobits = 0; 2453 tree shift_type = TREE_TYPE (@3); 2454 unsigned int prec; 2455 2456 if (shift == LSHIFT_EXPR) 2457 zerobits = ((HOST_WIDE_INT_1U << shiftc) - 1); 2458 else if (shift == RSHIFT_EXPR 2459 && type_has_mode_precision_p (shift_type)) 2460 { 2461 prec = TYPE_PRECISION (TREE_TYPE (@3)); 2462 tree arg00 = @0; 2463 /* See if more bits can be proven as zero because of 2464 zero extension. */ 2465 if (@3 != @0 2466 && TYPE_UNSIGNED (TREE_TYPE (@0))) 2467 { 2468 tree inner_type = TREE_TYPE (@0); 2469 if (type_has_mode_precision_p (inner_type) 2470 && TYPE_PRECISION (inner_type) < prec) 2471 { 2472 prec = TYPE_PRECISION (inner_type); 2473 /* See if we can shorten the right shift. */ 2474 if (shiftc < prec) 2475 shift_type = inner_type; 2476 /* Otherwise X >> C1 is all zeros, so we'll optimize 2477 it into (X, 0) later on by making sure zerobits 2478 is all ones. */ 2479 } 2480 } 2481 zerobits = HOST_WIDE_INT_M1U; 2482 if (shiftc < prec) 2483 { 2484 zerobits >>= HOST_BITS_PER_WIDE_INT - shiftc; 2485 zerobits <<= prec - shiftc; 2486 } 2487 /* For arithmetic shift if sign bit could be set, zerobits 2488 can contain actually sign bits, so no transformation is 2489 possible, unless MASK masks them all away. In that 2490 case the shift needs to be converted into logical shift. */ 2491 if (!TYPE_UNSIGNED (TREE_TYPE (@3)) 2492 && prec == TYPE_PRECISION (TREE_TYPE (@3))) 2493 { 2494 if ((mask & zerobits) == 0) 2495 shift_type = unsigned_type_for (TREE_TYPE (@3)); 2496 else 2497 zerobits = 0; 2498 } 2499 } 2500 } 2501 /* ((X << 16) & 0xff00) is (X, 0). */ 2502 (if ((mask & zerobits) == mask) 2503 { build_int_cst (type, 0); } 2504 (with { newmask = mask | zerobits; } 2505 (if (newmask != mask && (newmask & (newmask + 1)) == 0) 2506 (with 2507 { 2508 /* Only do the transformation if NEWMASK is some integer 2509 mode's mask. */ 2510 for (prec = BITS_PER_UNIT; 2511 prec < HOST_BITS_PER_WIDE_INT; prec <<= 1) 2512 if (newmask == (HOST_WIDE_INT_1U << prec) - 1) 2513 break; 2514 } 2515 (if (prec < HOST_BITS_PER_WIDE_INT 2516 || newmask == HOST_WIDE_INT_M1U) 2517 (with 2518 { tree newmaskt = build_int_cst_type (TREE_TYPE (@2), newmask); } 2519 (if (!tree_int_cst_equal (newmaskt, @2)) 2520 (if (shift_type != TREE_TYPE (@3)) 2521 (bit_and (convert (shift:shift_type (convert @3) @1)) { newmaskt; }) 2522 (bit_and @4 { newmaskt; }))))))))))))) 2523 2524/* Fold (X {&,^,|} C2) << C1 into (X << C1) {&,^,|} (C2 << C1) 2525 (X {&,^,|} C2) >> C1 into (X >> C1) & (C2 >> C1). */ 2526(for shift (lshift rshift) 2527 (for bit_op (bit_and bit_xor bit_ior) 2528 (simplify 2529 (shift (convert?:s (bit_op:s @0 INTEGER_CST@2)) INTEGER_CST@1) 2530 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))) 2531 (with { tree mask = int_const_binop (shift, fold_convert (type, @2), @1); } 2532 (bit_op (shift (convert @0) @1) { mask; })))))) 2533 2534/* ~(~X >> Y) -> X >> Y (for arithmetic shift). */ 2535(simplify 2536 (bit_not (convert1?:s (rshift:s (convert2?@0 (bit_not @1)) @2))) 2537 (if (!TYPE_UNSIGNED (TREE_TYPE (@0)) 2538 && (element_precision (TREE_TYPE (@0)) 2539 <= element_precision (TREE_TYPE (@1)) 2540 || !TYPE_UNSIGNED (TREE_TYPE (@1)))) 2541 (with 2542 { tree shift_type = TREE_TYPE (@0); } 2543 (convert (rshift (convert:shift_type @1) @2))))) 2544 2545/* ~(~X >>r Y) -> X >>r Y 2546 ~(~X <<r Y) -> X <<r Y */ 2547(for rotate (lrotate rrotate) 2548 (simplify 2549 (bit_not (convert1?:s (rotate:s (convert2?@0 (bit_not @1)) @2))) 2550 (if ((element_precision (TREE_TYPE (@0)) 2551 <= element_precision (TREE_TYPE (@1)) 2552 || !TYPE_UNSIGNED (TREE_TYPE (@1))) 2553 && (element_precision (type) <= element_precision (TREE_TYPE (@0)) 2554 || !TYPE_UNSIGNED (TREE_TYPE (@0)))) 2555 (with 2556 { tree rotate_type = TREE_TYPE (@0); } 2557 (convert (rotate (convert:rotate_type @1) @2)))))) 2558 2559/* Simplifications of conversions. */ 2560 2561/* Basic strip-useless-type-conversions / strip_nops. */ 2562(for cvt (convert view_convert float fix_trunc) 2563 (simplify 2564 (cvt @0) 2565 (if ((GIMPLE && useless_type_conversion_p (type, TREE_TYPE (@0))) 2566 || (GENERIC && type == TREE_TYPE (@0))) 2567 @0))) 2568 2569/* Contract view-conversions. */ 2570(simplify 2571 (view_convert (view_convert @0)) 2572 (view_convert @0)) 2573 2574/* For integral conversions with the same precision or pointer 2575 conversions use a NOP_EXPR instead. */ 2576(simplify 2577 (view_convert @0) 2578 (if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type)) 2579 && (INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0))) 2580 && TYPE_PRECISION (type) == TYPE_PRECISION (TREE_TYPE (@0))) 2581 (convert @0))) 2582 2583/* Strip inner integral conversions that do not change precision or size, or 2584 zero-extend while keeping the same size (for bool-to-char). */ 2585(simplify 2586 (view_convert (convert@0 @1)) 2587 (if ((INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0))) 2588 && (INTEGRAL_TYPE_P (TREE_TYPE (@1)) || POINTER_TYPE_P (TREE_TYPE (@1))) 2589 && TYPE_SIZE (TREE_TYPE (@0)) == TYPE_SIZE (TREE_TYPE (@1)) 2590 && (TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1)) 2591 || (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (TREE_TYPE (@1)) 2592 && TYPE_UNSIGNED (TREE_TYPE (@1))))) 2593 (view_convert @1))) 2594 2595/* Simplify a view-converted empty constructor. */ 2596(simplify 2597 (view_convert CONSTRUCTOR@0) 2598 (if (TREE_CODE (@0) != SSA_NAME 2599 && CONSTRUCTOR_NELTS (@0) == 0) 2600 { build_zero_cst (type); })) 2601 2602/* Re-association barriers around constants and other re-association 2603 barriers can be removed. */ 2604(simplify 2605 (paren CONSTANT_CLASS_P@0) 2606 @0) 2607(simplify 2608 (paren (paren@1 @0)) 2609 @1) 2610 2611/* Handle cases of two conversions in a row. */ 2612(for ocvt (convert float fix_trunc) 2613 (for icvt (convert float) 2614 (simplify 2615 (ocvt (icvt@1 @0)) 2616 (with 2617 { 2618 tree inside_type = TREE_TYPE (@0); 2619 tree inter_type = TREE_TYPE (@1); 2620 int inside_int = INTEGRAL_TYPE_P (inside_type); 2621 int inside_ptr = POINTER_TYPE_P (inside_type); 2622 int inside_float = FLOAT_TYPE_P (inside_type); 2623 int inside_vec = VECTOR_TYPE_P (inside_type); 2624 unsigned int inside_prec = TYPE_PRECISION (inside_type); 2625 int inside_unsignedp = TYPE_UNSIGNED (inside_type); 2626 int inter_int = INTEGRAL_TYPE_P (inter_type); 2627 int inter_ptr = POINTER_TYPE_P (inter_type); 2628 int inter_float = FLOAT_TYPE_P (inter_type); 2629 int inter_vec = VECTOR_TYPE_P (inter_type); 2630 unsigned int inter_prec = TYPE_PRECISION (inter_type); 2631 int inter_unsignedp = TYPE_UNSIGNED (inter_type); 2632 int final_int = INTEGRAL_TYPE_P (type); 2633 int final_ptr = POINTER_TYPE_P (type); 2634 int final_float = FLOAT_TYPE_P (type); 2635 int final_vec = VECTOR_TYPE_P (type); 2636 unsigned int final_prec = TYPE_PRECISION (type); 2637 int final_unsignedp = TYPE_UNSIGNED (type); 2638 } 2639 (switch 2640 /* In addition to the cases of two conversions in a row 2641 handled below, if we are converting something to its own 2642 type via an object of identical or wider precision, neither 2643 conversion is needed. */ 2644 (if (((GIMPLE && useless_type_conversion_p (type, inside_type)) 2645 || (GENERIC 2646 && TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (inside_type))) 2647 && (((inter_int || inter_ptr) && final_int) 2648 || (inter_float && final_float)) 2649 && inter_prec >= final_prec) 2650 (ocvt @0)) 2651 2652 /* Likewise, if the intermediate and initial types are either both 2653 float or both integer, we don't need the middle conversion if the 2654 former is wider than the latter and doesn't change the signedness 2655 (for integers). Avoid this if the final type is a pointer since 2656 then we sometimes need the middle conversion. */ 2657 (if (((inter_int && inside_int) || (inter_float && inside_float)) 2658 && (final_int || final_float) 2659 && inter_prec >= inside_prec 2660 && (inter_float || inter_unsignedp == inside_unsignedp)) 2661 (ocvt @0)) 2662 2663 /* If we have a sign-extension of a zero-extended value, we can 2664 replace that by a single zero-extension. Likewise if the 2665 final conversion does not change precision we can drop the 2666 intermediate conversion. */ 2667 (if (inside_int && inter_int && final_int 2668 && ((inside_prec < inter_prec && inter_prec < final_prec 2669 && inside_unsignedp && !inter_unsignedp) 2670 || final_prec == inter_prec)) 2671 (ocvt @0)) 2672 2673 /* Two conversions in a row are not needed unless: 2674 - some conversion is floating-point (overstrict for now), or 2675 - some conversion is a vector (overstrict for now), or 2676 - the intermediate type is narrower than both initial and 2677 final, or 2678 - the intermediate type and innermost type differ in signedness, 2679 and the outermost type is wider than the intermediate, or 2680 - the initial type is a pointer type and the precisions of the 2681 intermediate and final types differ, or 2682 - the final type is a pointer type and the precisions of the 2683 initial and intermediate types differ. */ 2684 (if (! inside_float && ! inter_float && ! final_float 2685 && ! inside_vec && ! inter_vec && ! final_vec 2686 && (inter_prec >= inside_prec || inter_prec >= final_prec) 2687 && ! (inside_int && inter_int 2688 && inter_unsignedp != inside_unsignedp 2689 && inter_prec < final_prec) 2690 && ((inter_unsignedp && inter_prec > inside_prec) 2691 == (final_unsignedp && final_prec > inter_prec)) 2692 && ! (inside_ptr && inter_prec != final_prec) 2693 && ! (final_ptr && inside_prec != inter_prec)) 2694 (ocvt @0)) 2695 2696 /* A truncation to an unsigned type (a zero-extension) should be 2697 canonicalized as bitwise and of a mask. */ 2698 (if (GIMPLE /* PR70366: doing this in GENERIC breaks -Wconversion. */ 2699 && final_int && inter_int && inside_int 2700 && final_prec == inside_prec 2701 && final_prec > inter_prec 2702 && inter_unsignedp) 2703 (convert (bit_and @0 { wide_int_to_tree 2704 (inside_type, 2705 wi::mask (inter_prec, false, 2706 TYPE_PRECISION (inside_type))); }))) 2707 2708 /* If we are converting an integer to a floating-point that can 2709 represent it exactly and back to an integer, we can skip the 2710 floating-point conversion. */ 2711 (if (GIMPLE /* PR66211 */ 2712 && inside_int && inter_float && final_int && 2713 (unsigned) significand_size (TYPE_MODE (inter_type)) 2714 >= inside_prec - !inside_unsignedp) 2715 (convert @0))))))) 2716 2717/* If we have a narrowing conversion to an integral type that is fed by a 2718 BIT_AND_EXPR, we might be able to remove the BIT_AND_EXPR if it merely 2719 masks off bits outside the final type (and nothing else). */ 2720(simplify 2721 (convert (bit_and @0 INTEGER_CST@1)) 2722 (if (INTEGRAL_TYPE_P (type) 2723 && INTEGRAL_TYPE_P (TREE_TYPE (@0)) 2724 && TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@0)) 2725 && operand_equal_p (@1, build_low_bits_mask (TREE_TYPE (@1), 2726 TYPE_PRECISION (type)), 0)) 2727 (convert @0))) 2728 2729 2730/* (X /[ex] A) * A -> X. */ 2731(simplify 2732 (mult (convert1? (exact_div @0 @@1)) (convert2? @1)) 2733 (convert @0)) 2734 2735/* ((X /[ex] A) +- B) * A --> X +- A * B. */ 2736(for op (plus minus) 2737 (simplify 2738 (mult (convert1? (op (convert2? (exact_div @0 INTEGER_CST@@1)) INTEGER_CST@2)) @1) 2739 (if (tree_nop_conversion_p (type, TREE_TYPE (@2)) 2740 && tree_nop_conversion_p (TREE_TYPE (@0), TREE_TYPE (@2))) 2741 (with 2742 { 2743 wi::overflow_type overflow; 2744 wide_int mul = wi::mul (wi::to_wide (@1), wi::to_wide (@2), 2745 TYPE_SIGN (type), &overflow); 2746 } 2747 (if (types_match (type, TREE_TYPE (@2)) 2748 && types_match (TREE_TYPE (@0), TREE_TYPE (@2)) && !overflow) 2749 (op @0 { wide_int_to_tree (type, mul); }) 2750 (with { tree utype = unsigned_type_for (type); } 2751 (convert (op (convert:utype @0) 2752 (mult (convert:utype @1) (convert:utype @2)))))))))) 2753 2754/* Canonicalization of binary operations. */ 2755 2756/* Convert X + -C into X - C. */ 2757(simplify 2758 (plus @0 REAL_CST@1) 2759 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1))) 2760 (with { tree tem = const_unop (NEGATE_EXPR, type, @1); } 2761 (if (!TREE_OVERFLOW (tem) || !flag_trapping_math) 2762 (minus @0 { tem; }))))) 2763 2764/* Convert x+x into x*2. */ 2765(simplify 2766 (plus @0 @0) 2767 (if (SCALAR_FLOAT_TYPE_P (type)) 2768 (mult @0 { build_real (type, dconst2); }) 2769 (if (INTEGRAL_TYPE_P (type)) 2770 (mult @0 { build_int_cst (type, 2); })))) 2771 2772/* 0 - X -> -X. */ 2773(simplify 2774 (minus integer_zerop @1) 2775 (negate @1)) 2776(simplify 2777 (pointer_diff integer_zerop @1) 2778 (negate (convert @1))) 2779 2780/* (ARG0 - ARG1) is the same as (-ARG1 + ARG0). So check whether 2781 ARG0 is zero and X + ARG0 reduces to X, since that would mean 2782 (-ARG1 + ARG0) reduces to -ARG1. */ 2783(simplify 2784 (minus real_zerop@0 @1) 2785 (if (fold_real_zero_addition_p (type, @0, 0)) 2786 (negate @1))) 2787 2788/* Transform x * -1 into -x. */ 2789(simplify 2790 (mult @0 integer_minus_onep) 2791 (negate @0)) 2792 2793/* Reassociate (X * CST) * Y to (X * Y) * CST. This does not introduce 2794 signed overflow for CST != 0 && CST != -1. */ 2795(simplify 2796 (mult:c (mult:s@3 @0 INTEGER_CST@1) @2) 2797 (if (TREE_CODE (@2) != INTEGER_CST 2798 && single_use (@3) 2799 && !integer_zerop (@1) && !integer_minus_onep (@1)) 2800 (mult (mult @0 @2) @1))) 2801 2802/* True if we can easily extract the real and imaginary parts of a complex 2803 number. */ 2804(match compositional_complex 2805 (convert? (complex @0 @1))) 2806 2807/* COMPLEX_EXPR and REALPART/IMAGPART_EXPR cancellations. */ 2808(simplify 2809 (complex (realpart @0) (imagpart @0)) 2810 @0) 2811(simplify 2812 (realpart (complex @0 @1)) 2813 @0) 2814(simplify 2815 (imagpart (complex @0 @1)) 2816 @1) 2817 2818/* Sometimes we only care about half of a complex expression. */ 2819(simplify 2820 (realpart (convert?:s (conj:s @0))) 2821 (convert (realpart @0))) 2822(simplify 2823 (imagpart (convert?:s (conj:s @0))) 2824 (convert (negate (imagpart @0)))) 2825(for part (realpart imagpart) 2826 (for op (plus minus) 2827 (simplify 2828 (part (convert?:s@2 (op:s @0 @1))) 2829 (convert (op (part @0) (part @1)))))) 2830(simplify 2831 (realpart (convert?:s (CEXPI:s @0))) 2832 (convert (COS @0))) 2833(simplify 2834 (imagpart (convert?:s (CEXPI:s @0))) 2835 (convert (SIN @0))) 2836 2837/* conj(conj(x)) -> x */ 2838(simplify 2839 (conj (convert? (conj @0))) 2840 (if (tree_nop_conversion_p (TREE_TYPE (@0), type)) 2841 (convert @0))) 2842 2843/* conj({x,y}) -> {x,-y} */ 2844(simplify 2845 (conj (convert?:s (complex:s @0 @1))) 2846 (with { tree itype = TREE_TYPE (type); } 2847 (complex (convert:itype @0) (negate (convert:itype @1))))) 2848 2849/* BSWAP simplifications, transforms checked by gcc.dg/builtin-bswap-8.c. */ 2850(for bswap (BUILT_IN_BSWAP16 BUILT_IN_BSWAP32 BUILT_IN_BSWAP64) 2851 (simplify 2852 (bswap (bswap @0)) 2853 @0) 2854 (simplify 2855 (bswap (bit_not (bswap @0))) 2856 (bit_not @0)) 2857 (for bitop (bit_xor bit_ior bit_and) 2858 (simplify 2859 (bswap (bitop:c (bswap @0) @1)) 2860 (bitop @0 (bswap @1))))) 2861 2862 2863/* Combine COND_EXPRs and VEC_COND_EXPRs. */ 2864 2865/* Simplify constant conditions. 2866 Only optimize constant conditions when the selected branch 2867 has the same type as the COND_EXPR. This avoids optimizing 2868 away "c ? x : throw", where the throw has a void type. 2869 Note that we cannot throw away the fold-const.c variant nor 2870 this one as we depend on doing this transform before possibly 2871 A ? B : B -> B triggers and the fold-const.c one can optimize 2872 0 ? A : B to B even if A has side-effects. Something 2873 genmatch cannot handle. */ 2874(simplify 2875 (cond INTEGER_CST@0 @1 @2) 2876 (if (integer_zerop (@0)) 2877 (if (!VOID_TYPE_P (TREE_TYPE (@2)) || VOID_TYPE_P (type)) 2878 @2) 2879 (if (!VOID_TYPE_P (TREE_TYPE (@1)) || VOID_TYPE_P (type)) 2880 @1))) 2881(simplify 2882 (vec_cond VECTOR_CST@0 @1 @2) 2883 (if (integer_all_onesp (@0)) 2884 @1 2885 (if (integer_zerop (@0)) 2886 @2))) 2887 2888/* Simplification moved from fold_cond_expr_with_comparison. It may also 2889 be extended. */ 2890/* This pattern implements two kinds simplification: 2891 2892 Case 1) 2893 (cond (cmp (convert1? x) c1) (convert2? x) c2) -> (minmax (x c)) if: 2894 1) Conversions are type widening from smaller type. 2895 2) Const c1 equals to c2 after canonicalizing comparison. 2896 3) Comparison has tree code LT, LE, GT or GE. 2897 This specific pattern is needed when (cmp (convert x) c) may not 2898 be simplified by comparison patterns because of multiple uses of 2899 x. It also makes sense here because simplifying across multiple 2900 referred var is always benefitial for complicated cases. 2901 2902 Case 2) 2903 (cond (eq (convert1? x) c1) (convert2? x) c2) -> (cond (eq x c1) c1 c2). */ 2904(for cmp (lt le gt ge eq) 2905 (simplify 2906 (cond (cmp (convert1? @1) INTEGER_CST@3) (convert2? @1) INTEGER_CST@2) 2907 (with 2908 { 2909 tree from_type = TREE_TYPE (@1); 2910 tree c1_type = TREE_TYPE (@3), c2_type = TREE_TYPE (@2); 2911 enum tree_code code = ERROR_MARK; 2912 2913 if (INTEGRAL_TYPE_P (from_type) 2914 && int_fits_type_p (@2, from_type) 2915 && (types_match (c1_type, from_type) 2916 || (TYPE_PRECISION (c1_type) > TYPE_PRECISION (from_type) 2917 && (TYPE_UNSIGNED (from_type) 2918 || TYPE_SIGN (c1_type) == TYPE_SIGN (from_type)))) 2919 && (types_match (c2_type, from_type) 2920 || (TYPE_PRECISION (c2_type) > TYPE_PRECISION (from_type) 2921 && (TYPE_UNSIGNED (from_type) 2922 || TYPE_SIGN (c2_type) == TYPE_SIGN (from_type))))) 2923 { 2924 if (cmp != EQ_EXPR) 2925 { 2926 if (wi::to_widest (@3) == (wi::to_widest (@2) - 1)) 2927 { 2928 /* X <= Y - 1 equals to X < Y. */ 2929 if (cmp == LE_EXPR) 2930 code = LT_EXPR; 2931 /* X > Y - 1 equals to X >= Y. */ 2932 if (cmp == GT_EXPR) 2933 code = GE_EXPR; 2934 } 2935 if (wi::to_widest (@3) == (wi::to_widest (@2) + 1)) 2936 { 2937 /* X < Y + 1 equals to X <= Y. */ 2938 if (cmp == LT_EXPR) 2939 code = LE_EXPR; 2940 /* X >= Y + 1 equals to X > Y. */ 2941 if (cmp == GE_EXPR) 2942 code = GT_EXPR; 2943 } 2944 if (code != ERROR_MARK 2945 || wi::to_widest (@2) == wi::to_widest (@3)) 2946 { 2947 if (cmp == LT_EXPR || cmp == LE_EXPR) 2948 code = MIN_EXPR; 2949 if (cmp == GT_EXPR || cmp == GE_EXPR) 2950 code = MAX_EXPR; 2951 } 2952 } 2953 /* Can do A == C1 ? A : C2 -> A == C1 ? C1 : C2? */ 2954 else if (int_fits_type_p (@3, from_type)) 2955 code = EQ_EXPR; 2956 } 2957 } 2958 (if (code == MAX_EXPR) 2959 (convert (max @1 (convert @2))) 2960 (if (code == MIN_EXPR) 2961 (convert (min @1 (convert @2))) 2962 (if (code == EQ_EXPR) 2963 (convert (cond (eq @1 (convert @3)) 2964 (convert:from_type @3) (convert:from_type @2))))))))) 2965 2966/* (cond (cmp (convert? x) c1) (op x c2) c3) -> (op (minmax x c1) c2) if: 2967 2968 1) OP is PLUS or MINUS. 2969 2) CMP is LT, LE, GT or GE. 2970 3) C3 == (C1 op C2), and computation doesn't have undefined behavior. 2971 2972 This pattern also handles special cases like: 2973 2974 A) Operand x is a unsigned to signed type conversion and c1 is 2975 integer zero. In this case, 2976 (signed type)x < 0 <=> x > MAX_VAL(signed type) 2977 (signed type)x >= 0 <=> x <= MAX_VAL(signed type) 2978 B) Const c1 may not equal to (C3 op' C2). In this case we also 2979 check equality for (c1+1) and (c1-1) by adjusting comparison 2980 code. 2981 2982 TODO: Though signed type is handled by this pattern, it cannot be 2983 simplified at the moment because C standard requires additional 2984 type promotion. In order to match&simplify it here, the IR needs 2985 to be cleaned up by other optimizers, i.e, VRP. */ 2986(for op (plus minus) 2987 (for cmp (lt le gt ge) 2988 (simplify 2989 (cond (cmp (convert? @X) INTEGER_CST@1) (op @X INTEGER_CST@2) INTEGER_CST@3) 2990 (with { tree from_type = TREE_TYPE (@X), to_type = TREE_TYPE (@1); } 2991 (if (types_match (from_type, to_type) 2992 /* Check if it is special case A). */ 2993 || (TYPE_UNSIGNED (from_type) 2994 && !TYPE_UNSIGNED (to_type) 2995 && TYPE_PRECISION (from_type) == TYPE_PRECISION (to_type) 2996 && integer_zerop (@1) 2997 && (cmp == LT_EXPR || cmp == GE_EXPR))) 2998 (with 2999 { 3000 wi::overflow_type overflow = wi::OVF_NONE; 3001 enum tree_code code, cmp_code = cmp; 3002 wide_int real_c1; 3003 wide_int c1 = wi::to_wide (@1); 3004 wide_int c2 = wi::to_wide (@2); 3005 wide_int c3 = wi::to_wide (@3); 3006 signop sgn = TYPE_SIGN (from_type); 3007 3008 /* Handle special case A), given x of unsigned type: 3009 ((signed type)x < 0) <=> (x > MAX_VAL(signed type)) 3010 ((signed type)x >= 0) <=> (x <= MAX_VAL(signed type)) */ 3011 if (!types_match (from_type, to_type)) 3012 { 3013 if (cmp_code == LT_EXPR) 3014 cmp_code = GT_EXPR; 3015 if (cmp_code == GE_EXPR) 3016 cmp_code = LE_EXPR; 3017 c1 = wi::max_value (to_type); 3018 } 3019 /* To simplify this pattern, we require c3 = (c1 op c2). Here we 3020 compute (c3 op' c2) and check if it equals to c1 with op' being 3021 the inverted operator of op. Make sure overflow doesn't happen 3022 if it is undefined. */ 3023 if (op == PLUS_EXPR) 3024 real_c1 = wi::sub (c3, c2, sgn, &overflow); 3025 else 3026 real_c1 = wi::add (c3, c2, sgn, &overflow); 3027 3028 code = cmp_code; 3029 if (!overflow || !TYPE_OVERFLOW_UNDEFINED (from_type)) 3030 { 3031 /* Check if c1 equals to real_c1. Boundary condition is handled 3032 by adjusting comparison operation if necessary. */ 3033 if (!wi::cmp (wi::sub (real_c1, 1, sgn, &overflow), c1, sgn) 3034 && !overflow) 3035 { 3036 /* X <= Y - 1 equals to X < Y. */ 3037 if (cmp_code == LE_EXPR) 3038 code = LT_EXPR; 3039 /* X > Y - 1 equals to X >= Y. */ 3040 if (cmp_code == GT_EXPR) 3041 code = GE_EXPR; 3042 } 3043 if (!wi::cmp (wi::add (real_c1, 1, sgn, &overflow), c1, sgn) 3044 && !overflow) 3045 { 3046 /* X < Y + 1 equals to X <= Y. */ 3047 if (cmp_code == LT_EXPR) 3048 code = LE_EXPR; 3049 /* X >= Y + 1 equals to X > Y. */ 3050 if (cmp_code == GE_EXPR) 3051 code = GT_EXPR; 3052 } 3053 if (code != cmp_code || !wi::cmp (real_c1, c1, sgn)) 3054 { 3055 if (cmp_code == LT_EXPR || cmp_code == LE_EXPR) 3056 code = MIN_EXPR; 3057 if (cmp_code == GT_EXPR || cmp_code == GE_EXPR) 3058 code = MAX_EXPR; 3059 } 3060 } 3061 } 3062 (if (code == MAX_EXPR) 3063 (op (max @X { wide_int_to_tree (from_type, real_c1); }) 3064 { wide_int_to_tree (from_type, c2); }) 3065 (if (code == MIN_EXPR) 3066 (op (min @X { wide_int_to_tree (from_type, real_c1); }) 3067 { wide_int_to_tree (from_type, c2); }))))))))) 3068 3069(for cnd (cond vec_cond) 3070 /* A ? B : (A ? X : C) -> A ? B : C. */ 3071 (simplify 3072 (cnd @0 (cnd @0 @1 @2) @3) 3073 (cnd @0 @1 @3)) 3074 (simplify 3075 (cnd @0 @1 (cnd @0 @2 @3)) 3076 (cnd @0 @1 @3)) 3077 /* A ? B : (!A ? C : X) -> A ? B : C. */ 3078 /* ??? This matches embedded conditions open-coded because genmatch 3079 would generate matching code for conditions in separate stmts only. 3080 The following is still important to merge then and else arm cases 3081 from if-conversion. */ 3082 (simplify 3083 (cnd @0 @1 (cnd @2 @3 @4)) 3084 (if (inverse_conditions_p (@0, @2)) 3085 (cnd @0 @1 @3))) 3086 (simplify 3087 (cnd @0 (cnd @1 @2 @3) @4) 3088 (if (inverse_conditions_p (@0, @1)) 3089 (cnd @0 @3 @4))) 3090 3091 /* A ? B : B -> B. */ 3092 (simplify 3093 (cnd @0 @1 @1) 3094 @1) 3095 3096 /* !A ? B : C -> A ? C : B. */ 3097 (simplify 3098 (cnd (logical_inverted_value truth_valued_p@0) @1 @2) 3099 (cnd @0 @2 @1))) 3100 3101/* A + (B vcmp C ? 1 : 0) -> A - (B vcmp C ? -1 : 0), since vector comparisons 3102 return all -1 or all 0 results. */ 3103/* ??? We could instead convert all instances of the vec_cond to negate, 3104 but that isn't necessarily a win on its own. */ 3105(simplify 3106 (plus:c @3 (view_convert? (vec_cond:s @0 integer_each_onep@1 integer_zerop@2))) 3107 (if (VECTOR_TYPE_P (type) 3108 && known_eq (TYPE_VECTOR_SUBPARTS (type), 3109 TYPE_VECTOR_SUBPARTS (TREE_TYPE (@1))) 3110 && (TYPE_MODE (TREE_TYPE (type)) 3111 == TYPE_MODE (TREE_TYPE (TREE_TYPE (@1))))) 3112 (minus @3 (view_convert (vec_cond @0 (negate @1) @2))))) 3113 3114/* ... likewise A - (B vcmp C ? 1 : 0) -> A + (B vcmp C ? -1 : 0). */ 3115(simplify 3116 (minus @3 (view_convert? (vec_cond:s @0 integer_each_onep@1 integer_zerop@2))) 3117 (if (VECTOR_TYPE_P (type) 3118 && known_eq (TYPE_VECTOR_SUBPARTS (type), 3119 TYPE_VECTOR_SUBPARTS (TREE_TYPE (@1))) 3120 && (TYPE_MODE (TREE_TYPE (type)) 3121 == TYPE_MODE (TREE_TYPE (TREE_TYPE (@1))))) 3122 (plus @3 (view_convert (vec_cond @0 (negate @1) @2))))) 3123 3124 3125/* Simplifications of comparisons. */ 3126 3127/* See if we can reduce the magnitude of a constant involved in a 3128 comparison by changing the comparison code. This is a canonicalization 3129 formerly done by maybe_canonicalize_comparison_1. */ 3130(for cmp (le gt) 3131 acmp (lt ge) 3132 (simplify 3133 (cmp @0 uniform_integer_cst_p@1) 3134 (with { tree cst = uniform_integer_cst_p (@1); } 3135 (if (tree_int_cst_sgn (cst) == -1) 3136 (acmp @0 { build_uniform_cst (TREE_TYPE (@1), 3137 wide_int_to_tree (TREE_TYPE (cst), 3138 wi::to_wide (cst) 3139 + 1)); }))))) 3140(for cmp (ge lt) 3141 acmp (gt le) 3142 (simplify 3143 (cmp @0 uniform_integer_cst_p@1) 3144 (with { tree cst = uniform_integer_cst_p (@1); } 3145 (if (tree_int_cst_sgn (cst) == 1) 3146 (acmp @0 { build_uniform_cst (TREE_TYPE (@1), 3147 wide_int_to_tree (TREE_TYPE (cst), 3148 wi::to_wide (cst) - 1)); }))))) 3149 3150/* We can simplify a logical negation of a comparison to the 3151 inverted comparison. As we cannot compute an expression 3152 operator using invert_tree_comparison we have to simulate 3153 that with expression code iteration. */ 3154(for cmp (tcc_comparison) 3155 icmp (inverted_tcc_comparison) 3156 ncmp (inverted_tcc_comparison_with_nans) 3157 /* Ideally we'd like to combine the following two patterns 3158 and handle some more cases by using 3159 (logical_inverted_value (cmp @0 @1)) 3160 here but for that genmatch would need to "inline" that. 3161 For now implement what forward_propagate_comparison did. */ 3162 (simplify 3163 (bit_not (cmp @0 @1)) 3164 (if (VECTOR_TYPE_P (type) 3165 || (INTEGRAL_TYPE_P (type) && TYPE_PRECISION (type) == 1)) 3166 /* Comparison inversion may be impossible for trapping math, 3167 invert_tree_comparison will tell us. But we can't use 3168 a computed operator in the replacement tree thus we have 3169 to play the trick below. */ 3170 (with { enum tree_code ic = invert_tree_comparison 3171 (cmp, HONOR_NANS (@0)); } 3172 (if (ic == icmp) 3173 (icmp @0 @1) 3174 (if (ic == ncmp) 3175 (ncmp @0 @1)))))) 3176 (simplify 3177 (bit_xor (cmp @0 @1) integer_truep) 3178 (with { enum tree_code ic = invert_tree_comparison 3179 (cmp, HONOR_NANS (@0)); } 3180 (if (ic == icmp) 3181 (icmp @0 @1) 3182 (if (ic == ncmp) 3183 (ncmp @0 @1)))))) 3184 3185/* Transform comparisons of the form X - Y CMP 0 to X CMP Y. 3186 ??? The transformation is valid for the other operators if overflow 3187 is undefined for the type, but performing it here badly interacts 3188 with the transformation in fold_cond_expr_with_comparison which 3189 attempts to synthetize ABS_EXPR. */ 3190(for cmp (eq ne) 3191 (for sub (minus pointer_diff) 3192 (simplify 3193 (cmp (sub@2 @0 @1) integer_zerop) 3194 (if (single_use (@2)) 3195 (cmp @0 @1))))) 3196 3197/* Transform comparisons of the form X * C1 CMP 0 to X CMP 0 in the 3198 signed arithmetic case. That form is created by the compiler 3199 often enough for folding it to be of value. One example is in 3200 computing loop trip counts after Operator Strength Reduction. */ 3201(for cmp (simple_comparison) 3202 scmp (swapped_simple_comparison) 3203 (simplify 3204 (cmp (mult@3 @0 INTEGER_CST@1) integer_zerop@2) 3205 /* Handle unfolded multiplication by zero. */ 3206 (if (integer_zerop (@1)) 3207 (cmp @1 @2) 3208 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) 3209 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)) 3210 && single_use (@3)) 3211 /* If @1 is negative we swap the sense of the comparison. */ 3212 (if (tree_int_cst_sgn (@1) < 0) 3213 (scmp @0 @2) 3214 (cmp @0 @2)))))) 3215 3216/* Simplify comparison of something with itself. For IEEE 3217 floating-point, we can only do some of these simplifications. */ 3218(for cmp (eq ge le) 3219 (simplify 3220 (cmp @0 @0) 3221 (if (! FLOAT_TYPE_P (TREE_TYPE (@0)) 3222 || ! HONOR_NANS (@0)) 3223 { constant_boolean_node (true, type); } 3224 (if (cmp != EQ_EXPR) 3225 (eq @0 @0))))) 3226(for cmp (ne gt lt) 3227 (simplify 3228 (cmp @0 @0) 3229 (if (cmp != NE_EXPR 3230 || ! FLOAT_TYPE_P (TREE_TYPE (@0)) 3231 || ! HONOR_NANS (@0)) 3232 { constant_boolean_node (false, type); }))) 3233(for cmp (unle unge uneq) 3234 (simplify 3235 (cmp @0 @0) 3236 { constant_boolean_node (true, type); })) 3237(for cmp (unlt ungt) 3238 (simplify 3239 (cmp @0 @0) 3240 (unordered @0 @0))) 3241(simplify 3242 (ltgt @0 @0) 3243 (if (!flag_trapping_math) 3244 { constant_boolean_node (false, type); })) 3245 3246/* Fold ~X op ~Y as Y op X. */ 3247(for cmp (simple_comparison) 3248 (simplify 3249 (cmp (bit_not@2 @0) (bit_not@3 @1)) 3250 (if (single_use (@2) && single_use (@3)) 3251 (cmp @1 @0)))) 3252 3253/* Fold ~X op C as X op' ~C, where op' is the swapped comparison. */ 3254(for cmp (simple_comparison) 3255 scmp (swapped_simple_comparison) 3256 (simplify 3257 (cmp (bit_not@2 @0) CONSTANT_CLASS_P@1) 3258 (if (single_use (@2) 3259 && (TREE_CODE (@1) == INTEGER_CST || TREE_CODE (@1) == VECTOR_CST)) 3260 (scmp @0 (bit_not @1))))) 3261 3262(for cmp (simple_comparison) 3263 /* Fold (double)float1 CMP (double)float2 into float1 CMP float2. */ 3264 (simplify 3265 (cmp (convert@2 @0) (convert? @1)) 3266 (if (FLOAT_TYPE_P (TREE_TYPE (@0)) 3267 && (DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@2)) 3268 == DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@0))) 3269 && (DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@2)) 3270 == DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@1)))) 3271 (with 3272 { 3273 tree type1 = TREE_TYPE (@1); 3274 if (TREE_CODE (@1) == REAL_CST && !DECIMAL_FLOAT_TYPE_P (type1)) 3275 { 3276 REAL_VALUE_TYPE orig = TREE_REAL_CST (@1); 3277 if (TYPE_PRECISION (type1) > TYPE_PRECISION (float_type_node) 3278 && exact_real_truncate (TYPE_MODE (float_type_node), &orig)) 3279 type1 = float_type_node; 3280 if (TYPE_PRECISION (type1) > TYPE_PRECISION (double_type_node) 3281 && exact_real_truncate (TYPE_MODE (double_type_node), &orig)) 3282 type1 = double_type_node; 3283 } 3284 tree newtype 3285 = (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (type1) 3286 ? TREE_TYPE (@0) : type1); 3287 } 3288 (if (TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (newtype)) 3289 (cmp (convert:newtype @0) (convert:newtype @1)))))) 3290 3291 (simplify 3292 (cmp @0 REAL_CST@1) 3293 /* IEEE doesn't distinguish +0 and -0 in comparisons. */ 3294 (switch 3295 /* a CMP (-0) -> a CMP 0 */ 3296 (if (REAL_VALUE_MINUS_ZERO (TREE_REAL_CST (@1))) 3297 (cmp @0 { build_real (TREE_TYPE (@1), dconst0); })) 3298 /* x != NaN is always true, other ops are always false. */ 3299 (if (REAL_VALUE_ISNAN (TREE_REAL_CST (@1)) 3300 && ! HONOR_SNANS (@1)) 3301 { constant_boolean_node (cmp == NE_EXPR, type); }) 3302 /* Fold comparisons against infinity. */ 3303 (if (REAL_VALUE_ISINF (TREE_REAL_CST (@1)) 3304 && MODE_HAS_INFINITIES (TYPE_MODE (TREE_TYPE (@1)))) 3305 (with 3306 { 3307 REAL_VALUE_TYPE max; 3308 enum tree_code code = cmp; 3309 bool neg = REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1)); 3310 if (neg) 3311 code = swap_tree_comparison (code); 3312 } 3313 (switch 3314 /* x > +Inf is always false, if we ignore NaNs or exceptions. */ 3315 (if (code == GT_EXPR 3316 && !(HONOR_NANS (@0) && flag_trapping_math)) 3317 { constant_boolean_node (false, type); }) 3318 (if (code == LE_EXPR) 3319 /* x <= +Inf is always true, if we don't care about NaNs. */ 3320 (if (! HONOR_NANS (@0)) 3321 { constant_boolean_node (true, type); } 3322 /* x <= +Inf is the same as x == x, i.e. !isnan(x), but this loses 3323 an "invalid" exception. */ 3324 (if (!flag_trapping_math) 3325 (eq @0 @0)))) 3326 /* x == +Inf and x >= +Inf are always equal to x > DBL_MAX, but 3327 for == this introduces an exception for x a NaN. */ 3328 (if ((code == EQ_EXPR && !(HONOR_NANS (@0) && flag_trapping_math)) 3329 || code == GE_EXPR) 3330 (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); } 3331 (if (neg) 3332 (lt @0 { build_real (TREE_TYPE (@0), max); }) 3333 (gt @0 { build_real (TREE_TYPE (@0), max); })))) 3334 /* x < +Inf is always equal to x <= DBL_MAX. */ 3335 (if (code == LT_EXPR) 3336 (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); } 3337 (if (neg) 3338 (ge @0 { build_real (TREE_TYPE (@0), max); }) 3339 (le @0 { build_real (TREE_TYPE (@0), max); })))) 3340 /* x != +Inf is always equal to !(x > DBL_MAX), but this introduces 3341 an exception for x a NaN so use an unordered comparison. */ 3342 (if (code == NE_EXPR) 3343 (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); } 3344 (if (! HONOR_NANS (@0)) 3345 (if (neg) 3346 (ge @0 { build_real (TREE_TYPE (@0), max); }) 3347 (le @0 { build_real (TREE_TYPE (@0), max); })) 3348 (if (neg) 3349 (unge @0 { build_real (TREE_TYPE (@0), max); }) 3350 (unle @0 { build_real (TREE_TYPE (@0), max); })))))))))) 3351 3352 /* If this is a comparison of a real constant with a PLUS_EXPR 3353 or a MINUS_EXPR of a real constant, we can convert it into a 3354 comparison with a revised real constant as long as no overflow 3355 occurs when unsafe_math_optimizations are enabled. */ 3356 (if (flag_unsafe_math_optimizations) 3357 (for op (plus minus) 3358 (simplify 3359 (cmp (op @0 REAL_CST@1) REAL_CST@2) 3360 (with 3361 { 3362 tree tem = const_binop (op == PLUS_EXPR ? MINUS_EXPR : PLUS_EXPR, 3363 TREE_TYPE (@1), @2, @1); 3364 } 3365 (if (tem && !TREE_OVERFLOW (tem)) 3366 (cmp @0 { tem; })))))) 3367 3368 /* Likewise, we can simplify a comparison of a real constant with 3369 a MINUS_EXPR whose first operand is also a real constant, i.e. 3370 (c1 - x) < c2 becomes x > c1-c2. Reordering is allowed on 3371 floating-point types only if -fassociative-math is set. */ 3372 (if (flag_associative_math) 3373 (simplify 3374 (cmp (minus REAL_CST@0 @1) REAL_CST@2) 3375 (with { tree tem = const_binop (MINUS_EXPR, TREE_TYPE (@1), @0, @2); } 3376 (if (tem && !TREE_OVERFLOW (tem)) 3377 (cmp { tem; } @1))))) 3378 3379 /* Fold comparisons against built-in math functions. */ 3380 (if (flag_unsafe_math_optimizations && ! flag_errno_math) 3381 (for sq (SQRT) 3382 (simplify 3383 (cmp (sq @0) REAL_CST@1) 3384 (switch 3385 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1))) 3386 (switch 3387 /* sqrt(x) < y is always false, if y is negative. */ 3388 (if (cmp == EQ_EXPR || cmp == LT_EXPR || cmp == LE_EXPR) 3389 { constant_boolean_node (false, type); }) 3390 /* sqrt(x) > y is always true, if y is negative and we 3391 don't care about NaNs, i.e. negative values of x. */ 3392 (if (cmp == NE_EXPR || !HONOR_NANS (@0)) 3393 { constant_boolean_node (true, type); }) 3394 /* sqrt(x) > y is the same as x >= 0, if y is negative. */ 3395 (ge @0 { build_real (TREE_TYPE (@0), dconst0); }))) 3396 (if (real_equal (TREE_REAL_CST_PTR (@1), &dconst0)) 3397 (switch 3398 /* sqrt(x) < 0 is always false. */ 3399 (if (cmp == LT_EXPR) 3400 { constant_boolean_node (false, type); }) 3401 /* sqrt(x) >= 0 is always true if we don't care about NaNs. */ 3402 (if (cmp == GE_EXPR && !HONOR_NANS (@0)) 3403 { constant_boolean_node (true, type); }) 3404 /* sqrt(x) <= 0 -> x == 0. */ 3405 (if (cmp == LE_EXPR) 3406 (eq @0 @1)) 3407 /* Otherwise sqrt(x) cmp 0 -> x cmp 0. Here cmp can be >=, >, 3408 == or !=. In the last case: 3409 3410 (sqrt(x) != 0) == (NaN != 0) == true == (x != 0) 3411 3412 if x is negative or NaN. Due to -funsafe-math-optimizations, 3413 the results for other x follow from natural arithmetic. */ 3414 (cmp @0 @1))) 3415 (if ((cmp == LT_EXPR 3416 || cmp == LE_EXPR 3417 || cmp == GT_EXPR 3418 || cmp == GE_EXPR) 3419 && !REAL_VALUE_ISNAN (TREE_REAL_CST (@1)) 3420 /* Give up for -frounding-math. */ 3421 && !HONOR_SIGN_DEPENDENT_ROUNDING (TREE_TYPE (@0))) 3422 (with 3423 { 3424 REAL_VALUE_TYPE c2; 3425 enum tree_code ncmp = cmp; 3426 const real_format *fmt 3427 = REAL_MODE_FORMAT (TYPE_MODE (TREE_TYPE (@0))); 3428 real_arithmetic (&c2, MULT_EXPR, 3429 &TREE_REAL_CST (@1), &TREE_REAL_CST (@1)); 3430 real_convert (&c2, fmt, &c2); 3431 /* See PR91734: if c2 is inexact and sqrt(c2) < c (or sqrt(c2) >= c), 3432 then change LT_EXPR into LE_EXPR or GE_EXPR into GT_EXPR. */ 3433 if (!REAL_VALUE_ISINF (c2)) 3434 { 3435 tree c3 = fold_const_call (CFN_SQRT, TREE_TYPE (@0), 3436 build_real (TREE_TYPE (@0), c2)); 3437 if (c3 == NULL_TREE || TREE_CODE (c3) != REAL_CST) 3438 ncmp = ERROR_MARK; 3439 else if ((cmp == LT_EXPR || cmp == GE_EXPR) 3440 && real_less (&TREE_REAL_CST (c3), &TREE_REAL_CST (@1))) 3441 ncmp = cmp == LT_EXPR ? LE_EXPR : GT_EXPR; 3442 else if ((cmp == LE_EXPR || cmp == GT_EXPR) 3443 && real_less (&TREE_REAL_CST (@1), &TREE_REAL_CST (c3))) 3444 ncmp = cmp == LE_EXPR ? LT_EXPR : GE_EXPR; 3445 else 3446 { 3447 /* With rounding to even, sqrt of up to 3 different values 3448 gives the same normal result, so in some cases c2 needs 3449 to be adjusted. */ 3450 REAL_VALUE_TYPE c2alt, tow; 3451 if (cmp == LT_EXPR || cmp == GE_EXPR) 3452 tow = dconst0; 3453 else 3454 real_inf (&tow); 3455 real_nextafter (&c2alt, fmt, &c2, &tow); 3456 real_convert (&c2alt, fmt, &c2alt); 3457 if (REAL_VALUE_ISINF (c2alt)) 3458 ncmp = ERROR_MARK; 3459 else 3460 { 3461 c3 = fold_const_call (CFN_SQRT, TREE_TYPE (@0), 3462 build_real (TREE_TYPE (@0), c2alt)); 3463 if (c3 == NULL_TREE || TREE_CODE (c3) != REAL_CST) 3464 ncmp = ERROR_MARK; 3465 else if (real_equal (&TREE_REAL_CST (c3), 3466 &TREE_REAL_CST (@1))) 3467 c2 = c2alt; 3468 } 3469 } 3470 } 3471 } 3472 (if (cmp == GT_EXPR || cmp == GE_EXPR) 3473 (if (REAL_VALUE_ISINF (c2)) 3474 /* sqrt(x) > y is x == +Inf, when y is very large. */ 3475 (if (HONOR_INFINITIES (@0)) 3476 (eq @0 { build_real (TREE_TYPE (@0), c2); }) 3477 { constant_boolean_node (false, type); }) 3478 /* sqrt(x) > c is the same as x > c*c. */ 3479 (if (ncmp != ERROR_MARK) 3480 (if (ncmp == GE_EXPR) 3481 (ge @0 { build_real (TREE_TYPE (@0), c2); }) 3482 (gt @0 { build_real (TREE_TYPE (@0), c2); })))) 3483 /* else if (cmp == LT_EXPR || cmp == LE_EXPR) */ 3484 (if (REAL_VALUE_ISINF (c2)) 3485 (switch 3486 /* sqrt(x) < y is always true, when y is a very large 3487 value and we don't care about NaNs or Infinities. */ 3488 (if (! HONOR_NANS (@0) && ! HONOR_INFINITIES (@0)) 3489 { constant_boolean_node (true, type); }) 3490 /* sqrt(x) < y is x != +Inf when y is very large and we 3491 don't care about NaNs. */ 3492 (if (! HONOR_NANS (@0)) 3493 (ne @0 { build_real (TREE_TYPE (@0), c2); })) 3494 /* sqrt(x) < y is x >= 0 when y is very large and we 3495 don't care about Infinities. */ 3496 (if (! HONOR_INFINITIES (@0)) 3497 (ge @0 { build_real (TREE_TYPE (@0), dconst0); })) 3498 /* sqrt(x) < y is x >= 0 && x != +Inf, when y is large. */ 3499 (if (GENERIC) 3500 (truth_andif 3501 (ge @0 { build_real (TREE_TYPE (@0), dconst0); }) 3502 (ne @0 { build_real (TREE_TYPE (@0), c2); })))) 3503 /* sqrt(x) < c is the same as x < c*c, if we ignore NaNs. */ 3504 (if (ncmp != ERROR_MARK && ! HONOR_NANS (@0)) 3505 (if (ncmp == LT_EXPR) 3506 (lt @0 { build_real (TREE_TYPE (@0), c2); }) 3507 (le @0 { build_real (TREE_TYPE (@0), c2); })) 3508 /* sqrt(x) < c is the same as x >= 0 && x < c*c. */ 3509 (if (ncmp != ERROR_MARK && GENERIC) 3510 (if (ncmp == LT_EXPR) 3511 (truth_andif 3512 (ge @0 { build_real (TREE_TYPE (@0), dconst0); }) 3513 (lt @0 { build_real (TREE_TYPE (@0), c2); })) 3514 (truth_andif 3515 (ge @0 { build_real (TREE_TYPE (@0), dconst0); }) 3516 (le @0 { build_real (TREE_TYPE (@0), c2); }))))))))))) 3517 /* Transform sqrt(x) cmp sqrt(y) -> x cmp y. */ 3518 (simplify 3519 (cmp (sq @0) (sq @1)) 3520 (if (! HONOR_NANS (@0)) 3521 (cmp @0 @1)))))) 3522 3523/* Optimize various special cases of (FTYPE) N CMP (FTYPE) M. */ 3524(for cmp (lt le eq ne ge gt unordered ordered unlt unle ungt unge uneq ltgt) 3525 icmp (lt le eq ne ge gt unordered ordered lt le gt ge eq ne) 3526 (simplify 3527 (cmp (float@0 @1) (float @2)) 3528 (if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (@0)) 3529 && ! DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@0))) 3530 (with 3531 { 3532 format_helper fmt (REAL_MODE_FORMAT (TYPE_MODE (TREE_TYPE (@0)))); 3533 tree type1 = TREE_TYPE (@1); 3534 bool type1_signed_p = TYPE_SIGN (type1) == SIGNED; 3535 tree type2 = TREE_TYPE (@2); 3536 bool type2_signed_p = TYPE_SIGN (type2) == SIGNED; 3537 } 3538 (if (fmt.can_represent_integral_type_p (type1) 3539 && fmt.can_represent_integral_type_p (type2)) 3540 (if (cmp == ORDERED_EXPR || cmp == UNORDERED_EXPR) 3541 { constant_boolean_node (cmp == ORDERED_EXPR, type); } 3542 (if (TYPE_PRECISION (type1) > TYPE_PRECISION (type2) 3543 && type1_signed_p >= type2_signed_p) 3544 (icmp @1 (convert @2)) 3545 (if (TYPE_PRECISION (type1) < TYPE_PRECISION (type2) 3546 && type1_signed_p <= type2_signed_p) 3547 (icmp (convert:type2 @1) @2) 3548 (if (TYPE_PRECISION (type1) == TYPE_PRECISION (type2) 3549 && type1_signed_p == type2_signed_p) 3550 (icmp @1 @2)))))))))) 3551 3552/* Optimize various special cases of (FTYPE) N CMP CST. */ 3553(for cmp (lt le eq ne ge gt) 3554 icmp (le le eq ne ge ge) 3555 (simplify 3556 (cmp (float @0) REAL_CST@1) 3557 (if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (@1)) 3558 && ! DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@1))) 3559 (with 3560 { 3561 tree itype = TREE_TYPE (@0); 3562 format_helper fmt (REAL_MODE_FORMAT (TYPE_MODE (TREE_TYPE (@1)))); 3563 const REAL_VALUE_TYPE *cst = TREE_REAL_CST_PTR (@1); 3564 /* Be careful to preserve any potential exceptions due to 3565 NaNs. qNaNs are ok in == or != context. 3566 TODO: relax under -fno-trapping-math or 3567 -fno-signaling-nans. */ 3568 bool exception_p 3569 = real_isnan (cst) && (cst->signalling 3570 || (cmp != EQ_EXPR && cmp != NE_EXPR)); 3571 } 3572 /* TODO: allow non-fitting itype and SNaNs when 3573 -fno-trapping-math. */ 3574 (if (fmt.can_represent_integral_type_p (itype) && ! exception_p) 3575 (with 3576 { 3577 signop isign = TYPE_SIGN (itype); 3578 REAL_VALUE_TYPE imin, imax; 3579 real_from_integer (&imin, fmt, wi::min_value (itype), isign); 3580 real_from_integer (&imax, fmt, wi::max_value (itype), isign); 3581 3582 REAL_VALUE_TYPE icst; 3583 if (cmp == GT_EXPR || cmp == GE_EXPR) 3584 real_ceil (&icst, fmt, cst); 3585 else if (cmp == LT_EXPR || cmp == LE_EXPR) 3586 real_floor (&icst, fmt, cst); 3587 else 3588 real_trunc (&icst, fmt, cst); 3589 3590 bool cst_int_p = !real_isnan (cst) && real_identical (&icst, cst); 3591 3592 bool overflow_p = false; 3593 wide_int icst_val 3594 = real_to_integer (&icst, &overflow_p, TYPE_PRECISION (itype)); 3595 } 3596 (switch 3597 /* Optimize cases when CST is outside of ITYPE's range. */ 3598 (if (real_compare (LT_EXPR, cst, &imin)) 3599 { constant_boolean_node (cmp == GT_EXPR || cmp == GE_EXPR || cmp == NE_EXPR, 3600 type); }) 3601 (if (real_compare (GT_EXPR, cst, &imax)) 3602 { constant_boolean_node (cmp == LT_EXPR || cmp == LE_EXPR || cmp == NE_EXPR, 3603 type); }) 3604 /* Remove cast if CST is an integer representable by ITYPE. */ 3605 (if (cst_int_p) 3606 (cmp @0 { gcc_assert (!overflow_p); 3607 wide_int_to_tree (itype, icst_val); }) 3608 ) 3609 /* When CST is fractional, optimize 3610 (FTYPE) N == CST -> 0 3611 (FTYPE) N != CST -> 1. */ 3612 (if (cmp == EQ_EXPR || cmp == NE_EXPR) 3613 { constant_boolean_node (cmp == NE_EXPR, type); }) 3614 /* Otherwise replace with sensible integer constant. */ 3615 (with 3616 { 3617 gcc_checking_assert (!overflow_p); 3618 } 3619 (icmp @0 { wide_int_to_tree (itype, icst_val); }))))))))) 3620 3621/* Fold A /[ex] B CMP C to A CMP B * C. */ 3622(for cmp (eq ne) 3623 (simplify 3624 (cmp (exact_div @0 @1) INTEGER_CST@2) 3625 (if (!integer_zerop (@1)) 3626 (if (wi::to_wide (@2) == 0) 3627 (cmp @0 @2) 3628 (if (TREE_CODE (@1) == INTEGER_CST) 3629 (with 3630 { 3631 wi::overflow_type ovf; 3632 wide_int prod = wi::mul (wi::to_wide (@2), wi::to_wide (@1), 3633 TYPE_SIGN (TREE_TYPE (@1)), &ovf); 3634 } 3635 (if (ovf) 3636 { constant_boolean_node (cmp == NE_EXPR, type); } 3637 (cmp @0 { wide_int_to_tree (TREE_TYPE (@0), prod); })))))))) 3638(for cmp (lt le gt ge) 3639 (simplify 3640 (cmp (exact_div @0 INTEGER_CST@1) INTEGER_CST@2) 3641 (if (wi::gt_p (wi::to_wide (@1), 0, TYPE_SIGN (TREE_TYPE (@1)))) 3642 (with 3643 { 3644 wi::overflow_type ovf; 3645 wide_int prod = wi::mul (wi::to_wide (@2), wi::to_wide (@1), 3646 TYPE_SIGN (TREE_TYPE (@1)), &ovf); 3647 } 3648 (if (ovf) 3649 { constant_boolean_node (wi::lt_p (wi::to_wide (@2), 0, 3650 TYPE_SIGN (TREE_TYPE (@2))) 3651 != (cmp == LT_EXPR || cmp == LE_EXPR), type); } 3652 (cmp @0 { wide_int_to_tree (TREE_TYPE (@0), prod); })))))) 3653 3654/* Unordered tests if either argument is a NaN. */ 3655(simplify 3656 (bit_ior (unordered @0 @0) (unordered @1 @1)) 3657 (if (types_match (@0, @1)) 3658 (unordered @0 @1))) 3659(simplify 3660 (bit_and (ordered @0 @0) (ordered @1 @1)) 3661 (if (types_match (@0, @1)) 3662 (ordered @0 @1))) 3663(simplify 3664 (bit_ior:c (unordered @0 @0) (unordered:c@2 @0 @1)) 3665 @2) 3666(simplify 3667 (bit_and:c (ordered @0 @0) (ordered:c@2 @0 @1)) 3668 @2) 3669 3670/* Simple range test simplifications. */ 3671/* A < B || A >= B -> true. */ 3672(for test1 (lt le le le ne ge) 3673 test2 (ge gt ge ne eq ne) 3674 (simplify 3675 (bit_ior:c (test1 @0 @1) (test2 @0 @1)) 3676 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) 3677 || VECTOR_INTEGER_TYPE_P (TREE_TYPE (@0))) 3678 { constant_boolean_node (true, type); }))) 3679/* A < B && A >= B -> false. */ 3680(for test1 (lt lt lt le ne eq) 3681 test2 (ge gt eq gt eq gt) 3682 (simplify 3683 (bit_and:c (test1 @0 @1) (test2 @0 @1)) 3684 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) 3685 || VECTOR_INTEGER_TYPE_P (TREE_TYPE (@0))) 3686 { constant_boolean_node (false, type); }))) 3687 3688/* A & (2**N - 1) <= 2**K - 1 -> A & (2**N - 2**K) == 0 3689 A & (2**N - 1) > 2**K - 1 -> A & (2**N - 2**K) != 0 3690 3691 Note that comparisons 3692 A & (2**N - 1) < 2**K -> A & (2**N - 2**K) == 0 3693 A & (2**N - 1) >= 2**K -> A & (2**N - 2**K) != 0 3694 will be canonicalized to above so there's no need to 3695 consider them here. 3696 */ 3697 3698(for cmp (le gt) 3699 eqcmp (eq ne) 3700 (simplify 3701 (cmp (bit_and@0 @1 INTEGER_CST@2) INTEGER_CST@3) 3702 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))) 3703 (with 3704 { 3705 tree ty = TREE_TYPE (@0); 3706 unsigned prec = TYPE_PRECISION (ty); 3707 wide_int mask = wi::to_wide (@2, prec); 3708 wide_int rhs = wi::to_wide (@3, prec); 3709 signop sgn = TYPE_SIGN (ty); 3710 } 3711 (if ((mask & (mask + 1)) == 0 && wi::gt_p (rhs, 0, sgn) 3712 && (rhs & (rhs + 1)) == 0 && wi::ge_p (mask, rhs, sgn)) 3713 (eqcmp (bit_and @1 { wide_int_to_tree (ty, mask - rhs); }) 3714 { build_zero_cst (ty); })))))) 3715 3716/* -A CMP -B -> B CMP A. */ 3717(for cmp (tcc_comparison) 3718 scmp (swapped_tcc_comparison) 3719 (simplify 3720 (cmp (negate @0) (negate @1)) 3721 (if (FLOAT_TYPE_P (TREE_TYPE (@0)) 3722 || (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) 3723 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))) 3724 (scmp @0 @1))) 3725 (simplify 3726 (cmp (negate @0) CONSTANT_CLASS_P@1) 3727 (if (FLOAT_TYPE_P (TREE_TYPE (@0)) 3728 || (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) 3729 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))) 3730 (with { tree tem = const_unop (NEGATE_EXPR, TREE_TYPE (@0), @1); } 3731 (if (tem && !TREE_OVERFLOW (tem)) 3732 (scmp @0 { tem; })))))) 3733 3734/* Convert ABS_EXPR<x> == 0 or ABS_EXPR<x> != 0 to x == 0 or x != 0. */ 3735(for op (eq ne) 3736 (simplify 3737 (op (abs @0) zerop@1) 3738 (op @0 @1))) 3739 3740/* From fold_sign_changed_comparison and fold_widened_comparison. 3741 FIXME: the lack of symmetry is disturbing. */ 3742(for cmp (simple_comparison) 3743 (simplify 3744 (cmp (convert@0 @00) (convert?@1 @10)) 3745 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) 3746 /* Disable this optimization if we're casting a function pointer 3747 type on targets that require function pointer canonicalization. */ 3748 && !(targetm.have_canonicalize_funcptr_for_compare () 3749 && ((POINTER_TYPE_P (TREE_TYPE (@00)) 3750 && FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@00)))) 3751 || (POINTER_TYPE_P (TREE_TYPE (@10)) 3752 && FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@10)))))) 3753 && single_use (@0)) 3754 (if (TYPE_PRECISION (TREE_TYPE (@00)) == TYPE_PRECISION (TREE_TYPE (@0)) 3755 && (TREE_CODE (@10) == INTEGER_CST 3756 || @1 != @10) 3757 && (TYPE_UNSIGNED (TREE_TYPE (@00)) == TYPE_UNSIGNED (TREE_TYPE (@0)) 3758 || cmp == NE_EXPR 3759 || cmp == EQ_EXPR) 3760 && !POINTER_TYPE_P (TREE_TYPE (@00))) 3761 /* ??? The special-casing of INTEGER_CST conversion was in the original 3762 code and here to avoid a spurious overflow flag on the resulting 3763 constant which fold_convert produces. */ 3764 (if (TREE_CODE (@1) == INTEGER_CST) 3765 (cmp @00 { force_fit_type (TREE_TYPE (@00), wi::to_widest (@1), 0, 3766 TREE_OVERFLOW (@1)); }) 3767 (cmp @00 (convert @1))) 3768 3769 (if (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (TREE_TYPE (@00))) 3770 /* If possible, express the comparison in the shorter mode. */ 3771 (if ((cmp == EQ_EXPR || cmp == NE_EXPR 3772 || TYPE_UNSIGNED (TREE_TYPE (@0)) == TYPE_UNSIGNED (TREE_TYPE (@00)) 3773 || (!TYPE_UNSIGNED (TREE_TYPE (@0)) 3774 && TYPE_UNSIGNED (TREE_TYPE (@00)))) 3775 && (types_match (TREE_TYPE (@10), TREE_TYPE (@00)) 3776 || ((TYPE_PRECISION (TREE_TYPE (@00)) 3777 >= TYPE_PRECISION (TREE_TYPE (@10))) 3778 && (TYPE_UNSIGNED (TREE_TYPE (@00)) 3779 == TYPE_UNSIGNED (TREE_TYPE (@10)))) 3780 || (TREE_CODE (@10) == INTEGER_CST 3781 && INTEGRAL_TYPE_P (TREE_TYPE (@00)) 3782 && int_fits_type_p (@10, TREE_TYPE (@00))))) 3783 (cmp @00 (convert @10)) 3784 (if (TREE_CODE (@10) == INTEGER_CST 3785 && INTEGRAL_TYPE_P (TREE_TYPE (@00)) 3786 && !int_fits_type_p (@10, TREE_TYPE (@00))) 3787 (with 3788 { 3789 tree min = lower_bound_in_type (TREE_TYPE (@10), TREE_TYPE (@00)); 3790 tree max = upper_bound_in_type (TREE_TYPE (@10), TREE_TYPE (@00)); 3791 bool above = integer_nonzerop (const_binop (LT_EXPR, type, max, @10)); 3792 bool below = integer_nonzerop (const_binop (LT_EXPR, type, @10, min)); 3793 } 3794 (if (above || below) 3795 (if (cmp == EQ_EXPR || cmp == NE_EXPR) 3796 { constant_boolean_node (cmp == EQ_EXPR ? false : true, type); } 3797 (if (cmp == LT_EXPR || cmp == LE_EXPR) 3798 { constant_boolean_node (above ? true : false, type); } 3799 (if (cmp == GT_EXPR || cmp == GE_EXPR) 3800 { constant_boolean_node (above ? false : true, type); })))))))))))) 3801 3802(for cmp (eq ne) 3803 /* A local variable can never be pointed to by 3804 the default SSA name of an incoming parameter. 3805 SSA names are canonicalized to 2nd place. */ 3806 (simplify 3807 (cmp addr@0 SSA_NAME@1) 3808 (if (SSA_NAME_IS_DEFAULT_DEF (@1) 3809 && TREE_CODE (SSA_NAME_VAR (@1)) == PARM_DECL) 3810 (with { tree base = get_base_address (TREE_OPERAND (@0, 0)); } 3811 (if (TREE_CODE (base) == VAR_DECL 3812 && auto_var_in_fn_p (base, current_function_decl)) 3813 (if (cmp == NE_EXPR) 3814 { constant_boolean_node (true, type); } 3815 { constant_boolean_node (false, type); })))))) 3816 3817/* Equality compare simplifications from fold_binary */ 3818(for cmp (eq ne) 3819 3820 /* If we have (A | C) == D where C & ~D != 0, convert this into 0. 3821 Similarly for NE_EXPR. */ 3822 (simplify 3823 (cmp (convert?@3 (bit_ior @0 INTEGER_CST@1)) INTEGER_CST@2) 3824 (if (tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@0)) 3825 && wi::bit_and_not (wi::to_wide (@1), wi::to_wide (@2)) != 0) 3826 { constant_boolean_node (cmp == NE_EXPR, type); })) 3827 3828 /* (X ^ Y) == 0 becomes X == Y, and (X ^ Y) != 0 becomes X != Y. */ 3829 (simplify 3830 (cmp (bit_xor @0 @1) integer_zerop) 3831 (cmp @0 @1)) 3832 3833 /* (X ^ Y) == Y becomes X == 0. 3834 Likewise (X ^ Y) == X becomes Y == 0. */ 3835 (simplify 3836 (cmp:c (bit_xor:c @0 @1) @0) 3837 (cmp @1 { build_zero_cst (TREE_TYPE (@1)); })) 3838 3839 /* (X ^ C1) op C2 can be rewritten as X op (C1 ^ C2). */ 3840 (simplify 3841 (cmp (convert?@3 (bit_xor @0 INTEGER_CST@1)) INTEGER_CST@2) 3842 (if (tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@0))) 3843 (cmp @0 (bit_xor @1 (convert @2))))) 3844 3845 (simplify 3846 (cmp (convert? addr@0) integer_zerop) 3847 (if (tree_single_nonzero_warnv_p (@0, NULL)) 3848 { constant_boolean_node (cmp == NE_EXPR, type); }))) 3849 3850/* If we have (A & C) == C where C is a power of 2, convert this into 3851 (A & C) != 0. Similarly for NE_EXPR. */ 3852(for cmp (eq ne) 3853 icmp (ne eq) 3854 (simplify 3855 (cmp (bit_and@2 @0 integer_pow2p@1) @1) 3856 (icmp @2 { build_zero_cst (TREE_TYPE (@0)); }))) 3857 3858/* If we have (A & C) != 0 ? D : 0 where C and D are powers of 2, 3859 convert this into a shift followed by ANDing with D. */ 3860(simplify 3861 (cond 3862 (ne (bit_and @0 integer_pow2p@1) integer_zerop) 3863 INTEGER_CST@2 integer_zerop) 3864 (if (integer_pow2p (@2)) 3865 (with { 3866 int shift = (wi::exact_log2 (wi::to_wide (@2)) 3867 - wi::exact_log2 (wi::to_wide (@1))); 3868 } 3869 (if (shift > 0) 3870 (bit_and 3871 (lshift (convert @0) { build_int_cst (integer_type_node, shift); }) @2) 3872 (bit_and 3873 (convert (rshift @0 { build_int_cst (integer_type_node, -shift); })) 3874 @2))))) 3875 3876/* If we have (A & C) != 0 where C is the sign bit of A, convert 3877 this into A < 0. Similarly for (A & C) == 0 into A >= 0. */ 3878(for cmp (eq ne) 3879 ncmp (ge lt) 3880 (simplify 3881 (cmp (bit_and (convert?@2 @0) integer_pow2p@1) integer_zerop) 3882 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) 3883 && type_has_mode_precision_p (TREE_TYPE (@0)) 3884 && element_precision (@2) >= element_precision (@0) 3885 && wi::only_sign_bit_p (wi::to_wide (@1), element_precision (@0))) 3886 (with { tree stype = signed_type_for (TREE_TYPE (@0)); } 3887 (ncmp (convert:stype @0) { build_zero_cst (stype); }))))) 3888 3889/* If we have A < 0 ? C : 0 where C is a power of 2, convert 3890 this into a right shift or sign extension followed by ANDing with C. */ 3891(simplify 3892 (cond 3893 (lt @0 integer_zerop) 3894 INTEGER_CST@1 integer_zerop) 3895 (if (integer_pow2p (@1) 3896 && !TYPE_UNSIGNED (TREE_TYPE (@0))) 3897 (with { 3898 int shift = element_precision (@0) - wi::exact_log2 (wi::to_wide (@1)) - 1; 3899 } 3900 (if (shift >= 0) 3901 (bit_and 3902 (convert (rshift @0 { build_int_cst (integer_type_node, shift); })) 3903 @1) 3904 /* Otherwise ctype must be wider than TREE_TYPE (@0) and pure 3905 sign extension followed by AND with C will achieve the effect. */ 3906 (bit_and (convert @0) @1))))) 3907 3908/* When the addresses are not directly of decls compare base and offset. 3909 This implements some remaining parts of fold_comparison address 3910 comparisons but still no complete part of it. Still it is good 3911 enough to make fold_stmt not regress when not dispatching to fold_binary. */ 3912(for cmp (simple_comparison) 3913 (simplify 3914 (cmp (convert1?@2 addr@0) (convert2? addr@1)) 3915 (with 3916 { 3917 poly_int64 off0, off1; 3918 tree base0 = get_addr_base_and_unit_offset (TREE_OPERAND (@0, 0), &off0); 3919 tree base1 = get_addr_base_and_unit_offset (TREE_OPERAND (@1, 0), &off1); 3920 if (base0 && TREE_CODE (base0) == MEM_REF) 3921 { 3922 off0 += mem_ref_offset (base0).force_shwi (); 3923 base0 = TREE_OPERAND (base0, 0); 3924 } 3925 if (base1 && TREE_CODE (base1) == MEM_REF) 3926 { 3927 off1 += mem_ref_offset (base1).force_shwi (); 3928 base1 = TREE_OPERAND (base1, 0); 3929 } 3930 } 3931 (if (base0 && base1) 3932 (with 3933 { 3934 int equal = 2; 3935 /* Punt in GENERIC on variables with value expressions; 3936 the value expressions might point to fields/elements 3937 of other vars etc. */ 3938 if (GENERIC 3939 && ((VAR_P (base0) && DECL_HAS_VALUE_EXPR_P (base0)) 3940 || (VAR_P (base1) && DECL_HAS_VALUE_EXPR_P (base1)))) 3941 ; 3942 else if (decl_in_symtab_p (base0) 3943 && decl_in_symtab_p (base1)) 3944 equal = symtab_node::get_create (base0) 3945 ->equal_address_to (symtab_node::get_create (base1)); 3946 else if ((DECL_P (base0) 3947 || TREE_CODE (base0) == SSA_NAME 3948 || TREE_CODE (base0) == STRING_CST) 3949 && (DECL_P (base1) 3950 || TREE_CODE (base1) == SSA_NAME 3951 || TREE_CODE (base1) == STRING_CST)) 3952 equal = (base0 == base1); 3953 if (equal == 0) 3954 { 3955 if (!DECL_P (base0) || !DECL_P (base1)) 3956 equal = 2; 3957 else if (cmp != EQ_EXPR && cmp != NE_EXPR) 3958 equal = 2; 3959 /* If this is a pointer comparison, ignore for now even 3960 valid equalities where one pointer is the offset zero 3961 of one object and the other to one past end of another one. */ 3962 else if (!INTEGRAL_TYPE_P (TREE_TYPE (@2))) 3963 ; 3964 /* Assume that automatic variables can't be adjacent to global 3965 variables. */ 3966 else if (is_global_var (base0) != is_global_var (base1)) 3967 ; 3968 else 3969 { 3970 tree sz0 = DECL_SIZE_UNIT (base0); 3971 tree sz1 = DECL_SIZE_UNIT (base1); 3972 /* If sizes are unknown, e.g. VLA or not representable, 3973 punt. */ 3974 if (!tree_fits_poly_int64_p (sz0) 3975 || !tree_fits_poly_int64_p (sz1)) 3976 equal = 2; 3977 else 3978 { 3979 poly_int64 size0 = tree_to_poly_int64 (sz0); 3980 poly_int64 size1 = tree_to_poly_int64 (sz1); 3981 /* If one offset is pointing (or could be) to the beginning 3982 of one object and the other is pointing to one past the 3983 last byte of the other object, punt. */ 3984 if (maybe_eq (off0, 0) && maybe_eq (off1, size1)) 3985 equal = 2; 3986 else if (maybe_eq (off1, 0) && maybe_eq (off0, size0)) 3987 equal = 2; 3988 /* If both offsets are the same, there are some cases 3989 we know that are ok. Either if we know they aren't 3990 zero, or if we know both sizes are no zero. */ 3991 if (equal == 2 3992 && known_eq (off0, off1) 3993 && (known_ne (off0, 0) 3994 || (known_ne (size0, 0) && known_ne (size1, 0)))) 3995 equal = 0; 3996 } 3997 } 3998 } 3999 } 4000 (if (equal == 1 4001 && (cmp == EQ_EXPR || cmp == NE_EXPR 4002 /* If the offsets are equal we can ignore overflow. */ 4003 || known_eq (off0, off1) 4004 || TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)) 4005 /* Or if we compare using pointers to decls or strings. */ 4006 || (POINTER_TYPE_P (TREE_TYPE (@2)) 4007 && (DECL_P (base0) || TREE_CODE (base0) == STRING_CST)))) 4008 (switch 4009 (if (cmp == EQ_EXPR && (known_eq (off0, off1) || known_ne (off0, off1))) 4010 { constant_boolean_node (known_eq (off0, off1), type); }) 4011 (if (cmp == NE_EXPR && (known_eq (off0, off1) || known_ne (off0, off1))) 4012 { constant_boolean_node (known_ne (off0, off1), type); }) 4013 (if (cmp == LT_EXPR && (known_lt (off0, off1) || known_ge (off0, off1))) 4014 { constant_boolean_node (known_lt (off0, off1), type); }) 4015 (if (cmp == LE_EXPR && (known_le (off0, off1) || known_gt (off0, off1))) 4016 { constant_boolean_node (known_le (off0, off1), type); }) 4017 (if (cmp == GE_EXPR && (known_ge (off0, off1) || known_lt (off0, off1))) 4018 { constant_boolean_node (known_ge (off0, off1), type); }) 4019 (if (cmp == GT_EXPR && (known_gt (off0, off1) || known_le (off0, off1))) 4020 { constant_boolean_node (known_gt (off0, off1), type); })) 4021 (if (equal == 0) 4022 (switch 4023 (if (cmp == EQ_EXPR) 4024 { constant_boolean_node (false, type); }) 4025 (if (cmp == NE_EXPR) 4026 { constant_boolean_node (true, type); }))))))))) 4027 4028/* Simplify pointer equality compares using PTA. */ 4029(for neeq (ne eq) 4030 (simplify 4031 (neeq @0 @1) 4032 (if (POINTER_TYPE_P (TREE_TYPE (@0)) 4033 && ptrs_compare_unequal (@0, @1)) 4034 { constant_boolean_node (neeq != EQ_EXPR, type); }))) 4035 4036/* PR70920: Transform (intptr_t)x eq/ne CST to x eq/ne (typeof x) CST. 4037 and (typeof ptr_cst) x eq/ne ptr_cst to x eq/ne (typeof x) CST. 4038 Disable the transform if either operand is pointer to function. 4039 This broke pr22051-2.c for arm where function pointer 4040 canonicalizaion is not wanted. */ 4041 4042(for cmp (ne eq) 4043 (simplify 4044 (cmp (convert @0) INTEGER_CST@1) 4045 (if (((POINTER_TYPE_P (TREE_TYPE (@0)) 4046 && !FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@0))) 4047 && INTEGRAL_TYPE_P (TREE_TYPE (@1))) 4048 || (INTEGRAL_TYPE_P (TREE_TYPE (@0)) 4049 && POINTER_TYPE_P (TREE_TYPE (@1)) 4050 && !FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@1))))) 4051 && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1))) 4052 (cmp @0 (convert @1))))) 4053 4054/* Non-equality compare simplifications from fold_binary */ 4055(for cmp (lt gt le ge) 4056 /* Comparisons with the highest or lowest possible integer of 4057 the specified precision will have known values. */ 4058 (simplify 4059 (cmp (convert?@2 @0) uniform_integer_cst_p@1) 4060 (if ((INTEGRAL_TYPE_P (TREE_TYPE (@1)) 4061 || POINTER_TYPE_P (TREE_TYPE (@1)) 4062 || VECTOR_INTEGER_TYPE_P (TREE_TYPE (@1))) 4063 && tree_nop_conversion_p (TREE_TYPE (@2), TREE_TYPE (@0))) 4064 (with 4065 { 4066 tree cst = uniform_integer_cst_p (@1); 4067 tree arg1_type = TREE_TYPE (cst); 4068 unsigned int prec = TYPE_PRECISION (arg1_type); 4069 wide_int max = wi::max_value (arg1_type); 4070 wide_int signed_max = wi::max_value (prec, SIGNED); 4071 wide_int min = wi::min_value (arg1_type); 4072 } 4073 (switch 4074 (if (wi::to_wide (cst) == max) 4075 (switch 4076 (if (cmp == GT_EXPR) 4077 { constant_boolean_node (false, type); }) 4078 (if (cmp == GE_EXPR) 4079 (eq @2 @1)) 4080 (if (cmp == LE_EXPR) 4081 { constant_boolean_node (true, type); }) 4082 (if (cmp == LT_EXPR) 4083 (ne @2 @1)))) 4084 (if (wi::to_wide (cst) == min) 4085 (switch 4086 (if (cmp == LT_EXPR) 4087 { constant_boolean_node (false, type); }) 4088 (if (cmp == LE_EXPR) 4089 (eq @2 @1)) 4090 (if (cmp == GE_EXPR) 4091 { constant_boolean_node (true, type); }) 4092 (if (cmp == GT_EXPR) 4093 (ne @2 @1)))) 4094 (if (wi::to_wide (cst) == max - 1) 4095 (switch 4096 (if (cmp == GT_EXPR) 4097 (eq @2 { build_uniform_cst (TREE_TYPE (@1), 4098 wide_int_to_tree (TREE_TYPE (cst), 4099 wi::to_wide (cst) 4100 + 1)); })) 4101 (if (cmp == LE_EXPR) 4102 (ne @2 { build_uniform_cst (TREE_TYPE (@1), 4103 wide_int_to_tree (TREE_TYPE (cst), 4104 wi::to_wide (cst) 4105 + 1)); })))) 4106 (if (wi::to_wide (cst) == min + 1) 4107 (switch 4108 (if (cmp == GE_EXPR) 4109 (ne @2 { build_uniform_cst (TREE_TYPE (@1), 4110 wide_int_to_tree (TREE_TYPE (cst), 4111 wi::to_wide (cst) 4112 - 1)); })) 4113 (if (cmp == LT_EXPR) 4114 (eq @2 { build_uniform_cst (TREE_TYPE (@1), 4115 wide_int_to_tree (TREE_TYPE (cst), 4116 wi::to_wide (cst) 4117 - 1)); })))) 4118 (if (wi::to_wide (cst) == signed_max 4119 && TYPE_UNSIGNED (arg1_type) 4120 /* We will flip the signedness of the comparison operator 4121 associated with the mode of @1, so the sign bit is 4122 specified by this mode. Check that @1 is the signed 4123 max associated with this sign bit. */ 4124 && prec == GET_MODE_PRECISION (SCALAR_INT_TYPE_MODE (arg1_type)) 4125 /* signed_type does not work on pointer types. */ 4126 && INTEGRAL_TYPE_P (arg1_type)) 4127 /* The following case also applies to X < signed_max+1 4128 and X >= signed_max+1 because previous transformations. */ 4129 (if (cmp == LE_EXPR || cmp == GT_EXPR) 4130 (with { tree st = signed_type_for (TREE_TYPE (@1)); } 4131 (switch 4132 (if (cst == @1 && cmp == LE_EXPR) 4133 (ge (convert:st @0) { build_zero_cst (st); })) 4134 (if (cst == @1 && cmp == GT_EXPR) 4135 (lt (convert:st @0) { build_zero_cst (st); })) 4136 (if (cmp == LE_EXPR) 4137 (ge (view_convert:st @0) { build_zero_cst (st); })) 4138 (if (cmp == GT_EXPR) 4139 (lt (view_convert:st @0) { build_zero_cst (st); }))))))))))) 4140 4141(for cmp (unordered ordered unlt unle ungt unge uneq ltgt) 4142 /* If the second operand is NaN, the result is constant. */ 4143 (simplify 4144 (cmp @0 REAL_CST@1) 4145 (if (REAL_VALUE_ISNAN (TREE_REAL_CST (@1)) 4146 && (cmp != LTGT_EXPR || ! flag_trapping_math)) 4147 { constant_boolean_node (cmp == ORDERED_EXPR || cmp == LTGT_EXPR 4148 ? false : true, type); }))) 4149 4150/* bool_var != 0 becomes bool_var. */ 4151(simplify 4152 (ne @0 integer_zerop) 4153 (if (TREE_CODE (TREE_TYPE (@0)) == BOOLEAN_TYPE 4154 && types_match (type, TREE_TYPE (@0))) 4155 (non_lvalue @0))) 4156/* bool_var == 1 becomes bool_var. */ 4157(simplify 4158 (eq @0 integer_onep) 4159 (if (TREE_CODE (TREE_TYPE (@0)) == BOOLEAN_TYPE 4160 && types_match (type, TREE_TYPE (@0))) 4161 (non_lvalue @0))) 4162/* Do not handle 4163 bool_var == 0 becomes !bool_var or 4164 bool_var != 1 becomes !bool_var 4165 here because that only is good in assignment context as long 4166 as we require a tcc_comparison in GIMPLE_CONDs where we'd 4167 replace if (x == 0) with tem = ~x; if (tem != 0) which is 4168 clearly less optimal and which we'll transform again in forwprop. */ 4169 4170/* When one argument is a constant, overflow detection can be simplified. 4171 Currently restricted to single use so as not to interfere too much with 4172 ADD_OVERFLOW detection in tree-ssa-math-opts.c. 4173 A + CST CMP A -> A CMP' CST' */ 4174(for cmp (lt le ge gt) 4175 out (gt gt le le) 4176 (simplify 4177 (cmp:c (plus@2 @0 INTEGER_CST@1) @0) 4178 (if (TYPE_UNSIGNED (TREE_TYPE (@0)) 4179 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)) 4180 && wi::to_wide (@1) != 0 4181 && single_use (@2)) 4182 (with { unsigned int prec = TYPE_PRECISION (TREE_TYPE (@0)); } 4183 (out @0 { wide_int_to_tree (TREE_TYPE (@0), 4184 wi::max_value (prec, UNSIGNED) 4185 - wi::to_wide (@1)); }))))) 4186 4187/* To detect overflow in unsigned A - B, A < B is simpler than A - B > A. 4188 However, the detection logic for SUB_OVERFLOW in tree-ssa-math-opts.c 4189 expects the long form, so we restrict the transformation for now. */ 4190(for cmp (gt le) 4191 (simplify 4192 (cmp:c (minus@2 @0 @1) @0) 4193 (if (single_use (@2) 4194 && ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) 4195 && TYPE_UNSIGNED (TREE_TYPE (@0)) 4196 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))) 4197 (cmp @1 @0)))) 4198 4199/* Testing for overflow is unnecessary if we already know the result. */ 4200/* A - B > A */ 4201(for cmp (gt le) 4202 out (ne eq) 4203 (simplify 4204 (cmp:c (realpart (IFN_SUB_OVERFLOW@2 @0 @1)) @0) 4205 (if (TYPE_UNSIGNED (TREE_TYPE (@0)) 4206 && types_match (TREE_TYPE (@0), TREE_TYPE (@1))) 4207 (out (imagpart @2) { build_zero_cst (TREE_TYPE (@0)); })))) 4208/* A + B < A */ 4209(for cmp (lt ge) 4210 out (ne eq) 4211 (simplify 4212 (cmp:c (realpart (IFN_ADD_OVERFLOW:c@2 @0 @1)) @0) 4213 (if (TYPE_UNSIGNED (TREE_TYPE (@0)) 4214 && types_match (TREE_TYPE (@0), TREE_TYPE (@1))) 4215 (out (imagpart @2) { build_zero_cst (TREE_TYPE (@0)); })))) 4216 4217/* For unsigned operands, -1 / B < A checks whether A * B would overflow. 4218 Simplify it to __builtin_mul_overflow (A, B, <unused>). */ 4219(for cmp (lt ge) 4220 out (ne eq) 4221 (simplify 4222 (cmp:c (trunc_div:s integer_all_onesp @1) @0) 4223 (if (TYPE_UNSIGNED (TREE_TYPE (@0)) && !VECTOR_TYPE_P (TREE_TYPE (@0))) 4224 (with { tree t = TREE_TYPE (@0), cpx = build_complex_type (t); } 4225 (out (imagpart (IFN_MUL_OVERFLOW:cpx @0 @1)) { build_zero_cst (t); }))))) 4226 4227/* Simplification of math builtins. These rules must all be optimizations 4228 as well as IL simplifications. If there is a possibility that the new 4229 form could be a pessimization, the rule should go in the canonicalization 4230 section that follows this one. 4231 4232 Rules can generally go in this section if they satisfy one of 4233 the following: 4234 4235 - the rule describes an identity 4236 4237 - the rule replaces calls with something as simple as addition or 4238 multiplication 4239 4240 - the rule contains unary calls only and simplifies the surrounding 4241 arithmetic. (The idea here is to exclude non-unary calls in which 4242 one operand is constant and in which the call is known to be cheap 4243 when the operand has that value.) */ 4244 4245(if (flag_unsafe_math_optimizations) 4246 /* Simplify sqrt(x) * sqrt(x) -> x. */ 4247 (simplify 4248 (mult (SQRT_ALL@1 @0) @1) 4249 (if (!HONOR_SNANS (type)) 4250 @0)) 4251 4252 (for op (plus minus) 4253 /* Simplify (A / C) +- (B / C) -> (A +- B) / C. */ 4254 (simplify 4255 (op (rdiv @0 @1) 4256 (rdiv @2 @1)) 4257 (rdiv (op @0 @2) @1))) 4258 4259 (for cmp (lt le gt ge) 4260 neg_cmp (gt ge lt le) 4261 /* Simplify (x * C1) cmp C2 -> x cmp (C2 / C1), where C1 != 0. */ 4262 (simplify 4263 (cmp (mult @0 REAL_CST@1) REAL_CST@2) 4264 (with 4265 { tree tem = const_binop (RDIV_EXPR, type, @2, @1); } 4266 (if (tem 4267 && !(REAL_VALUE_ISINF (TREE_REAL_CST (tem)) 4268 || (real_zerop (tem) && !real_zerop (@1)))) 4269 (switch 4270 (if (real_less (&dconst0, TREE_REAL_CST_PTR (@1))) 4271 (cmp @0 { tem; })) 4272 (if (real_less (TREE_REAL_CST_PTR (@1), &dconst0)) 4273 (neg_cmp @0 { tem; }))))))) 4274 4275 /* Simplify sqrt(x) * sqrt(y) -> sqrt(x*y). */ 4276 (for root (SQRT CBRT) 4277 (simplify 4278 (mult (root:s @0) (root:s @1)) 4279 (root (mult @0 @1)))) 4280 4281 /* Simplify expN(x) * expN(y) -> expN(x+y). */ 4282 (for exps (EXP EXP2 EXP10 POW10) 4283 (simplify 4284 (mult (exps:s @0) (exps:s @1)) 4285 (exps (plus @0 @1)))) 4286 4287 /* Simplify a/root(b/c) into a*root(c/b). */ 4288 (for root (SQRT CBRT) 4289 (simplify 4290 (rdiv @0 (root:s (rdiv:s @1 @2))) 4291 (mult @0 (root (rdiv @2 @1))))) 4292 4293 /* Simplify x/expN(y) into x*expN(-y). */ 4294 (for exps (EXP EXP2 EXP10 POW10) 4295 (simplify 4296 (rdiv @0 (exps:s @1)) 4297 (mult @0 (exps (negate @1))))) 4298 4299 (for logs (LOG LOG2 LOG10 LOG10) 4300 exps (EXP EXP2 EXP10 POW10) 4301 /* logN(expN(x)) -> x. */ 4302 (simplify 4303 (logs (exps @0)) 4304 @0) 4305 /* expN(logN(x)) -> x. */ 4306 (simplify 4307 (exps (logs @0)) 4308 @0)) 4309 4310 /* Optimize logN(func()) for various exponential functions. We 4311 want to determine the value "x" and the power "exponent" in 4312 order to transform logN(x**exponent) into exponent*logN(x). */ 4313 (for logs (LOG LOG LOG LOG2 LOG2 LOG2 LOG10 LOG10) 4314 exps (EXP2 EXP10 POW10 EXP EXP10 POW10 EXP EXP2) 4315 (simplify 4316 (logs (exps @0)) 4317 (if (SCALAR_FLOAT_TYPE_P (type)) 4318 (with { 4319 tree x; 4320 switch (exps) 4321 { 4322 CASE_CFN_EXP: 4323 /* Prepare to do logN(exp(exponent)) -> exponent*logN(e). */ 4324 x = build_real_truncate (type, dconst_e ()); 4325 break; 4326 CASE_CFN_EXP2: 4327 /* Prepare to do logN(exp2(exponent)) -> exponent*logN(2). */ 4328 x = build_real (type, dconst2); 4329 break; 4330 CASE_CFN_EXP10: 4331 CASE_CFN_POW10: 4332 /* Prepare to do logN(exp10(exponent)) -> exponent*logN(10). */ 4333 { 4334 REAL_VALUE_TYPE dconst10; 4335 real_from_integer (&dconst10, VOIDmode, 10, SIGNED); 4336 x = build_real (type, dconst10); 4337 } 4338 break; 4339 default: 4340 gcc_unreachable (); 4341 } 4342 } 4343 (mult (logs { x; }) @0))))) 4344 4345 (for logs (LOG LOG 4346 LOG2 LOG2 4347 LOG10 LOG10) 4348 exps (SQRT CBRT) 4349 (simplify 4350 (logs (exps @0)) 4351 (if (SCALAR_FLOAT_TYPE_P (type)) 4352 (with { 4353 tree x; 4354 switch (exps) 4355 { 4356 CASE_CFN_SQRT: 4357 /* Prepare to do logN(sqrt(x)) -> 0.5*logN(x). */ 4358 x = build_real (type, dconsthalf); 4359 break; 4360 CASE_CFN_CBRT: 4361 /* Prepare to do logN(cbrt(x)) -> (1/3)*logN(x). */ 4362 x = build_real_truncate (type, dconst_third ()); 4363 break; 4364 default: 4365 gcc_unreachable (); 4366 } 4367 } 4368 (mult { x; } (logs @0)))))) 4369 4370 /* logN(pow(x,exponent)) -> exponent*logN(x). */ 4371 (for logs (LOG LOG2 LOG10) 4372 pows (POW) 4373 (simplify 4374 (logs (pows @0 @1)) 4375 (mult @1 (logs @0)))) 4376 4377 /* pow(C,x) -> exp(log(C)*x) if C > 0, 4378 or if C is a positive power of 2, 4379 pow(C,x) -> exp2(log2(C)*x). */ 4380#if GIMPLE 4381 (for pows (POW) 4382 exps (EXP) 4383 logs (LOG) 4384 exp2s (EXP2) 4385 log2s (LOG2) 4386 (simplify 4387 (pows REAL_CST@0 @1) 4388 (if (real_compare (GT_EXPR, TREE_REAL_CST_PTR (@0), &dconst0) 4389 && real_isfinite (TREE_REAL_CST_PTR (@0)) 4390 /* As libmvec doesn't have a vectorized exp2, defer optimizing 4391 the use_exp2 case until after vectorization. It seems actually 4392 beneficial for all constants to postpone this until later, 4393 because exp(log(C)*x), while faster, will have worse precision 4394 and if x folds into a constant too, that is unnecessary 4395 pessimization. */ 4396 && canonicalize_math_after_vectorization_p ()) 4397 (with { 4398 const REAL_VALUE_TYPE *const value = TREE_REAL_CST_PTR (@0); 4399 bool use_exp2 = false; 4400 if (targetm.libc_has_function (function_c99_misc) 4401 && value->cl == rvc_normal) 4402 { 4403 REAL_VALUE_TYPE frac_rvt = *value; 4404 SET_REAL_EXP (&frac_rvt, 1); 4405 if (real_equal (&frac_rvt, &dconst1)) 4406 use_exp2 = true; 4407 } 4408 } 4409 (if (!use_exp2) 4410 (if (optimize_pow_to_exp (@0, @1)) 4411 (exps (mult (logs @0) @1))) 4412 (exp2s (mult (log2s @0) @1))))))) 4413#endif 4414 4415 /* pow(C,x)*expN(y) -> expN(logN(C)*x+y) if C > 0. */ 4416 (for pows (POW) 4417 exps (EXP EXP2 EXP10 POW10) 4418 logs (LOG LOG2 LOG10 LOG10) 4419 (simplify 4420 (mult:c (pows:s REAL_CST@0 @1) (exps:s @2)) 4421 (if (real_compare (GT_EXPR, TREE_REAL_CST_PTR (@0), &dconst0) 4422 && real_isfinite (TREE_REAL_CST_PTR (@0))) 4423 (exps (plus (mult (logs @0) @1) @2))))) 4424 4425 (for sqrts (SQRT) 4426 cbrts (CBRT) 4427 pows (POW) 4428 exps (EXP EXP2 EXP10 POW10) 4429 /* sqrt(expN(x)) -> expN(x*0.5). */ 4430 (simplify 4431 (sqrts (exps @0)) 4432 (exps (mult @0 { build_real (type, dconsthalf); }))) 4433 /* cbrt(expN(x)) -> expN(x/3). */ 4434 (simplify 4435 (cbrts (exps @0)) 4436 (exps (mult @0 { build_real_truncate (type, dconst_third ()); }))) 4437 /* pow(expN(x), y) -> expN(x*y). */ 4438 (simplify 4439 (pows (exps @0) @1) 4440 (exps (mult @0 @1)))) 4441 4442 /* tan(atan(x)) -> x. */ 4443 (for tans (TAN) 4444 atans (ATAN) 4445 (simplify 4446 (tans (atans @0)) 4447 @0))) 4448 4449 /* Simplify sin(atan(x)) -> x / sqrt(x*x + 1). */ 4450 (for sins (SIN) 4451 atans (ATAN) 4452 sqrts (SQRT) 4453 copysigns (COPYSIGN) 4454 (simplify 4455 (sins (atans:s @0)) 4456 (with 4457 { 4458 REAL_VALUE_TYPE r_cst; 4459 build_sinatan_real (&r_cst, type); 4460 tree t_cst = build_real (type, r_cst); 4461 tree t_one = build_one_cst (type); 4462 } 4463 (if (SCALAR_FLOAT_TYPE_P (type)) 4464 (cond (lt (abs @0) { t_cst; }) 4465 (rdiv @0 (sqrts (plus (mult @0 @0) { t_one; }))) 4466 (copysigns { t_one; } @0)))))) 4467 4468/* Simplify cos(atan(x)) -> 1 / sqrt(x*x + 1). */ 4469 (for coss (COS) 4470 atans (ATAN) 4471 sqrts (SQRT) 4472 copysigns (COPYSIGN) 4473 (simplify 4474 (coss (atans:s @0)) 4475 (with 4476 { 4477 REAL_VALUE_TYPE r_cst; 4478 build_sinatan_real (&r_cst, type); 4479 tree t_cst = build_real (type, r_cst); 4480 tree t_one = build_one_cst (type); 4481 tree t_zero = build_zero_cst (type); 4482 } 4483 (if (SCALAR_FLOAT_TYPE_P (type)) 4484 (cond (lt (abs @0) { t_cst; }) 4485 (rdiv { t_one; } (sqrts (plus (mult @0 @0) { t_one; }))) 4486 (copysigns { t_zero; } @0)))))) 4487 4488 (if (!flag_errno_math) 4489 /* Simplify sinh(atanh(x)) -> x / sqrt((1 - x)*(1 + x)). */ 4490 (for sinhs (SINH) 4491 atanhs (ATANH) 4492 sqrts (SQRT) 4493 (simplify 4494 (sinhs (atanhs:s @0)) 4495 (with { tree t_one = build_one_cst (type); } 4496 (rdiv @0 (sqrts (mult (minus { t_one; } @0) (plus { t_one; } @0))))))) 4497 4498 /* Simplify cosh(atanh(x)) -> 1 / sqrt((1 - x)*(1 + x)) */ 4499 (for coshs (COSH) 4500 atanhs (ATANH) 4501 sqrts (SQRT) 4502 (simplify 4503 (coshs (atanhs:s @0)) 4504 (with { tree t_one = build_one_cst (type); } 4505 (rdiv { t_one; } (sqrts (mult (minus { t_one; } @0) (plus { t_one; } @0)))))))) 4506 4507/* cabs(x+0i) or cabs(0+xi) -> abs(x). */ 4508(simplify 4509 (CABS (complex:C @0 real_zerop@1)) 4510 (abs @0)) 4511 4512/* trunc(trunc(x)) -> trunc(x), etc. */ 4513(for fns (TRUNC_ALL FLOOR_ALL CEIL_ALL ROUND_ALL NEARBYINT_ALL RINT_ALL) 4514 (simplify 4515 (fns (fns @0)) 4516 (fns @0))) 4517/* f(x) -> x if x is integer valued and f does nothing for such values. */ 4518(for fns (TRUNC_ALL FLOOR_ALL CEIL_ALL ROUND_ALL NEARBYINT_ALL RINT_ALL) 4519 (simplify 4520 (fns integer_valued_real_p@0) 4521 @0)) 4522 4523/* hypot(x,0) and hypot(0,x) -> abs(x). */ 4524(simplify 4525 (HYPOT:c @0 real_zerop@1) 4526 (abs @0)) 4527 4528/* pow(1,x) -> 1. */ 4529(simplify 4530 (POW real_onep@0 @1) 4531 @0) 4532 4533(simplify 4534 /* copysign(x,x) -> x. */ 4535 (COPYSIGN_ALL @0 @0) 4536 @0) 4537 4538(simplify 4539 /* copysign(x,y) -> fabs(x) if y is nonnegative. */ 4540 (COPYSIGN_ALL @0 tree_expr_nonnegative_p@1) 4541 (abs @0)) 4542 4543(for scale (LDEXP SCALBN SCALBLN) 4544 /* ldexp(0, x) -> 0. */ 4545 (simplify 4546 (scale real_zerop@0 @1) 4547 @0) 4548 /* ldexp(x, 0) -> x. */ 4549 (simplify 4550 (scale @0 integer_zerop@1) 4551 @0) 4552 /* ldexp(x, y) -> x if x is +-Inf or NaN. */ 4553 (simplify 4554 (scale REAL_CST@0 @1) 4555 (if (!real_isfinite (TREE_REAL_CST_PTR (@0))) 4556 @0))) 4557 4558/* Canonicalization of sequences of math builtins. These rules represent 4559 IL simplifications but are not necessarily optimizations. 4560 4561 The sincos pass is responsible for picking "optimal" implementations 4562 of math builtins, which may be more complicated and can sometimes go 4563 the other way, e.g. converting pow into a sequence of sqrts. 4564 We only want to do these canonicalizations before the pass has run. */ 4565 4566(if (flag_unsafe_math_optimizations && canonicalize_math_p ()) 4567 /* Simplify tan(x) * cos(x) -> sin(x). */ 4568 (simplify 4569 (mult:c (TAN:s @0) (COS:s @0)) 4570 (SIN @0)) 4571 4572 /* Simplify x * pow(x,c) -> pow(x,c+1). */ 4573 (simplify 4574 (mult:c @0 (POW:s @0 REAL_CST@1)) 4575 (if (!TREE_OVERFLOW (@1)) 4576 (POW @0 (plus @1 { build_one_cst (type); })))) 4577 4578 /* Simplify sin(x) / cos(x) -> tan(x). */ 4579 (simplify 4580 (rdiv (SIN:s @0) (COS:s @0)) 4581 (TAN @0)) 4582 4583 /* Simplify cos(x) / sin(x) -> 1 / tan(x). */ 4584 (simplify 4585 (rdiv (COS:s @0) (SIN:s @0)) 4586 (rdiv { build_one_cst (type); } (TAN @0))) 4587 4588 /* Simplify sin(x) / tan(x) -> cos(x). */ 4589 (simplify 4590 (rdiv (SIN:s @0) (TAN:s @0)) 4591 (if (! HONOR_NANS (@0) 4592 && ! HONOR_INFINITIES (@0)) 4593 (COS @0))) 4594 4595 /* Simplify tan(x) / sin(x) -> 1.0 / cos(x). */ 4596 (simplify 4597 (rdiv (TAN:s @0) (SIN:s @0)) 4598 (if (! HONOR_NANS (@0) 4599 && ! HONOR_INFINITIES (@0)) 4600 (rdiv { build_one_cst (type); } (COS @0)))) 4601 4602 /* Simplify pow(x,y) * pow(x,z) -> pow(x,y+z). */ 4603 (simplify 4604 (mult (POW:s @0 @1) (POW:s @0 @2)) 4605 (POW @0 (plus @1 @2))) 4606 4607 /* Simplify pow(x,y) * pow(z,y) -> pow(x*z,y). */ 4608 (simplify 4609 (mult (POW:s @0 @1) (POW:s @2 @1)) 4610 (POW (mult @0 @2) @1)) 4611 4612 /* Simplify powi(x,y) * powi(z,y) -> powi(x*z,y). */ 4613 (simplify 4614 (mult (POWI:s @0 @1) (POWI:s @2 @1)) 4615 (POWI (mult @0 @2) @1)) 4616 4617 /* Simplify pow(x,c) / x -> pow(x,c-1). */ 4618 (simplify 4619 (rdiv (POW:s @0 REAL_CST@1) @0) 4620 (if (!TREE_OVERFLOW (@1)) 4621 (POW @0 (minus @1 { build_one_cst (type); })))) 4622 4623 /* Simplify x / pow (y,z) -> x * pow(y,-z). */ 4624 (simplify 4625 (rdiv @0 (POW:s @1 @2)) 4626 (mult @0 (POW @1 (negate @2)))) 4627 4628 (for sqrts (SQRT) 4629 cbrts (CBRT) 4630 pows (POW) 4631 /* sqrt(sqrt(x)) -> pow(x,1/4). */ 4632 (simplify 4633 (sqrts (sqrts @0)) 4634 (pows @0 { build_real (type, dconst_quarter ()); })) 4635 /* sqrt(cbrt(x)) -> pow(x,1/6). */ 4636 (simplify 4637 (sqrts (cbrts @0)) 4638 (pows @0 { build_real_truncate (type, dconst_sixth ()); })) 4639 /* cbrt(sqrt(x)) -> pow(x,1/6). */ 4640 (simplify 4641 (cbrts (sqrts @0)) 4642 (pows @0 { build_real_truncate (type, dconst_sixth ()); })) 4643 /* cbrt(cbrt(x)) -> pow(x,1/9), iff x is nonnegative. */ 4644 (simplify 4645 (cbrts (cbrts tree_expr_nonnegative_p@0)) 4646 (pows @0 { build_real_truncate (type, dconst_ninth ()); })) 4647 /* sqrt(pow(x,y)) -> pow(|x|,y*0.5). */ 4648 (simplify 4649 (sqrts (pows @0 @1)) 4650 (pows (abs @0) (mult @1 { build_real (type, dconsthalf); }))) 4651 /* cbrt(pow(x,y)) -> pow(x,y/3), iff x is nonnegative. */ 4652 (simplify 4653 (cbrts (pows tree_expr_nonnegative_p@0 @1)) 4654 (pows @0 (mult @1 { build_real_truncate (type, dconst_third ()); }))) 4655 /* pow(sqrt(x),y) -> pow(x,y*0.5). */ 4656 (simplify 4657 (pows (sqrts @0) @1) 4658 (pows @0 (mult @1 { build_real (type, dconsthalf); }))) 4659 /* pow(cbrt(x),y) -> pow(x,y/3) iff x is nonnegative. */ 4660 (simplify 4661 (pows (cbrts tree_expr_nonnegative_p@0) @1) 4662 (pows @0 (mult @1 { build_real_truncate (type, dconst_third ()); }))) 4663 /* pow(pow(x,y),z) -> pow(x,y*z) iff x is nonnegative. */ 4664 (simplify 4665 (pows (pows tree_expr_nonnegative_p@0 @1) @2) 4666 (pows @0 (mult @1 @2)))) 4667 4668 /* cabs(x+xi) -> fabs(x)*sqrt(2). */ 4669 (simplify 4670 (CABS (complex @0 @0)) 4671 (mult (abs @0) { build_real_truncate (type, dconst_sqrt2 ()); })) 4672 4673 /* hypot(x,x) -> fabs(x)*sqrt(2). */ 4674 (simplify 4675 (HYPOT @0 @0) 4676 (mult (abs @0) { build_real_truncate (type, dconst_sqrt2 ()); })) 4677 4678 /* cexp(x+yi) -> exp(x)*cexpi(y). */ 4679 (for cexps (CEXP) 4680 exps (EXP) 4681 cexpis (CEXPI) 4682 (simplify 4683 (cexps compositional_complex@0) 4684 (if (targetm.libc_has_function (function_c99_math_complex)) 4685 (complex 4686 (mult (exps@1 (realpart @0)) (realpart (cexpis:type@2 (imagpart @0)))) 4687 (mult @1 (imagpart @2))))))) 4688 4689(if (canonicalize_math_p ()) 4690 /* floor(x) -> trunc(x) if x is nonnegative. */ 4691 (for floors (FLOOR_ALL) 4692 truncs (TRUNC_ALL) 4693 (simplify 4694 (floors tree_expr_nonnegative_p@0) 4695 (truncs @0)))) 4696 4697(match double_value_p 4698 @0 4699 (if (TYPE_MAIN_VARIANT (TREE_TYPE (@0)) == double_type_node))) 4700(for froms (BUILT_IN_TRUNCL 4701 BUILT_IN_FLOORL 4702 BUILT_IN_CEILL 4703 BUILT_IN_ROUNDL 4704 BUILT_IN_NEARBYINTL 4705 BUILT_IN_RINTL) 4706 tos (BUILT_IN_TRUNC 4707 BUILT_IN_FLOOR 4708 BUILT_IN_CEIL 4709 BUILT_IN_ROUND 4710 BUILT_IN_NEARBYINT 4711 BUILT_IN_RINT) 4712 /* truncl(extend(x)) -> extend(trunc(x)), etc., if x is a double. */ 4713 (if (optimize && canonicalize_math_p ()) 4714 (simplify 4715 (froms (convert double_value_p@0)) 4716 (convert (tos @0))))) 4717 4718(match float_value_p 4719 @0 4720 (if (TYPE_MAIN_VARIANT (TREE_TYPE (@0)) == float_type_node))) 4721(for froms (BUILT_IN_TRUNCL BUILT_IN_TRUNC 4722 BUILT_IN_FLOORL BUILT_IN_FLOOR 4723 BUILT_IN_CEILL BUILT_IN_CEIL 4724 BUILT_IN_ROUNDL BUILT_IN_ROUND 4725 BUILT_IN_NEARBYINTL BUILT_IN_NEARBYINT 4726 BUILT_IN_RINTL BUILT_IN_RINT) 4727 tos (BUILT_IN_TRUNCF BUILT_IN_TRUNCF 4728 BUILT_IN_FLOORF BUILT_IN_FLOORF 4729 BUILT_IN_CEILF BUILT_IN_CEILF 4730 BUILT_IN_ROUNDF BUILT_IN_ROUNDF 4731 BUILT_IN_NEARBYINTF BUILT_IN_NEARBYINTF 4732 BUILT_IN_RINTF BUILT_IN_RINTF) 4733 /* truncl(extend(x)) and trunc(extend(x)) -> extend(truncf(x)), etc., 4734 if x is a float. */ 4735 (if (optimize && canonicalize_math_p () 4736 && targetm.libc_has_function (function_c99_misc)) 4737 (simplify 4738 (froms (convert float_value_p@0)) 4739 (convert (tos @0))))) 4740 4741(for froms (XFLOORL XCEILL XROUNDL XRINTL) 4742 tos (XFLOOR XCEIL XROUND XRINT) 4743 /* llfloorl(extend(x)) -> llfloor(x), etc., if x is a double. */ 4744 (if (optimize && canonicalize_math_p ()) 4745 (simplify 4746 (froms (convert double_value_p@0)) 4747 (tos @0)))) 4748 4749(for froms (XFLOORL XCEILL XROUNDL XRINTL 4750 XFLOOR XCEIL XROUND XRINT) 4751 tos (XFLOORF XCEILF XROUNDF XRINTF) 4752 /* llfloorl(extend(x)) and llfloor(extend(x)) -> llfloorf(x), etc., 4753 if x is a float. */ 4754 (if (optimize && canonicalize_math_p ()) 4755 (simplify 4756 (froms (convert float_value_p@0)) 4757 (tos @0)))) 4758 4759(if (canonicalize_math_p ()) 4760 /* xfloor(x) -> fix_trunc(x) if x is nonnegative. */ 4761 (for floors (IFLOOR LFLOOR LLFLOOR) 4762 (simplify 4763 (floors tree_expr_nonnegative_p@0) 4764 (fix_trunc @0)))) 4765 4766(if (canonicalize_math_p ()) 4767 /* xfloor(x) -> fix_trunc(x), etc., if x is integer valued. */ 4768 (for fns (IFLOOR LFLOOR LLFLOOR 4769 ICEIL LCEIL LLCEIL 4770 IROUND LROUND LLROUND) 4771 (simplify 4772 (fns integer_valued_real_p@0) 4773 (fix_trunc @0))) 4774 (if (!flag_errno_math) 4775 /* xrint(x) -> fix_trunc(x), etc., if x is integer valued. */ 4776 (for rints (IRINT LRINT LLRINT) 4777 (simplify 4778 (rints integer_valued_real_p@0) 4779 (fix_trunc @0))))) 4780 4781(if (canonicalize_math_p ()) 4782 (for ifn (IFLOOR ICEIL IROUND IRINT) 4783 lfn (LFLOOR LCEIL LROUND LRINT) 4784 llfn (LLFLOOR LLCEIL LLROUND LLRINT) 4785 /* Canonicalize iround (x) to lround (x) on ILP32 targets where 4786 sizeof (int) == sizeof (long). */ 4787 (if (TYPE_PRECISION (integer_type_node) 4788 == TYPE_PRECISION (long_integer_type_node)) 4789 (simplify 4790 (ifn @0) 4791 (lfn:long_integer_type_node @0))) 4792 /* Canonicalize llround (x) to lround (x) on LP64 targets where 4793 sizeof (long long) == sizeof (long). */ 4794 (if (TYPE_PRECISION (long_long_integer_type_node) 4795 == TYPE_PRECISION (long_integer_type_node)) 4796 (simplify 4797 (llfn @0) 4798 (lfn:long_integer_type_node @0))))) 4799 4800/* cproj(x) -> x if we're ignoring infinities. */ 4801(simplify 4802 (CPROJ @0) 4803 (if (!HONOR_INFINITIES (type)) 4804 @0)) 4805 4806/* If the real part is inf and the imag part is known to be 4807 nonnegative, return (inf + 0i). */ 4808(simplify 4809 (CPROJ (complex REAL_CST@0 tree_expr_nonnegative_p@1)) 4810 (if (real_isinf (TREE_REAL_CST_PTR (@0))) 4811 { build_complex_inf (type, false); })) 4812 4813/* If the imag part is inf, return (inf+I*copysign(0,imag)). */ 4814(simplify 4815 (CPROJ (complex @0 REAL_CST@1)) 4816 (if (real_isinf (TREE_REAL_CST_PTR (@1))) 4817 { build_complex_inf (type, TREE_REAL_CST_PTR (@1)->sign); })) 4818 4819(for pows (POW) 4820 sqrts (SQRT) 4821 cbrts (CBRT) 4822 (simplify 4823 (pows @0 REAL_CST@1) 4824 (with { 4825 const REAL_VALUE_TYPE *value = TREE_REAL_CST_PTR (@1); 4826 REAL_VALUE_TYPE tmp; 4827 } 4828 (switch 4829 /* pow(x,0) -> 1. */ 4830 (if (real_equal (value, &dconst0)) 4831 { build_real (type, dconst1); }) 4832 /* pow(x,1) -> x. */ 4833 (if (real_equal (value, &dconst1)) 4834 @0) 4835 /* pow(x,-1) -> 1/x. */ 4836 (if (real_equal (value, &dconstm1)) 4837 (rdiv { build_real (type, dconst1); } @0)) 4838 /* pow(x,0.5) -> sqrt(x). */ 4839 (if (flag_unsafe_math_optimizations 4840 && canonicalize_math_p () 4841 && real_equal (value, &dconsthalf)) 4842 (sqrts @0)) 4843 /* pow(x,1/3) -> cbrt(x). */ 4844 (if (flag_unsafe_math_optimizations 4845 && canonicalize_math_p () 4846 && (tmp = real_value_truncate (TYPE_MODE (type), dconst_third ()), 4847 real_equal (value, &tmp))) 4848 (cbrts @0)))))) 4849 4850/* powi(1,x) -> 1. */ 4851(simplify 4852 (POWI real_onep@0 @1) 4853 @0) 4854 4855(simplify 4856 (POWI @0 INTEGER_CST@1) 4857 (switch 4858 /* powi(x,0) -> 1. */ 4859 (if (wi::to_wide (@1) == 0) 4860 { build_real (type, dconst1); }) 4861 /* powi(x,1) -> x. */ 4862 (if (wi::to_wide (@1) == 1) 4863 @0) 4864 /* powi(x,-1) -> 1/x. */ 4865 (if (wi::to_wide (@1) == -1) 4866 (rdiv { build_real (type, dconst1); } @0)))) 4867 4868/* Narrowing of arithmetic and logical operations. 4869 4870 These are conceptually similar to the transformations performed for 4871 the C/C++ front-ends by shorten_binary_op and shorten_compare. Long 4872 term we want to move all that code out of the front-ends into here. */ 4873 4874/* If we have a narrowing conversion of an arithmetic operation where 4875 both operands are widening conversions from the same type as the outer 4876 narrowing conversion. Then convert the innermost operands to a suitable 4877 unsigned type (to avoid introducing undefined behavior), perform the 4878 operation and convert the result to the desired type. */ 4879(for op (plus minus) 4880 (simplify 4881 (convert (op:s (convert@2 @0) (convert?@3 @1))) 4882 (if (INTEGRAL_TYPE_P (type) 4883 /* We check for type compatibility between @0 and @1 below, 4884 so there's no need to check that @1/@3 are integral types. */ 4885 && INTEGRAL_TYPE_P (TREE_TYPE (@0)) 4886 && INTEGRAL_TYPE_P (TREE_TYPE (@2)) 4887 /* The precision of the type of each operand must match the 4888 precision of the mode of each operand, similarly for the 4889 result. */ 4890 && type_has_mode_precision_p (TREE_TYPE (@0)) 4891 && type_has_mode_precision_p (TREE_TYPE (@1)) 4892 && type_has_mode_precision_p (type) 4893 /* The inner conversion must be a widening conversion. */ 4894 && TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (TREE_TYPE (@0)) 4895 && types_match (@0, type) 4896 && (types_match (@0, @1) 4897 /* Or the second operand is const integer or converted const 4898 integer from valueize. */ 4899 || TREE_CODE (@1) == INTEGER_CST)) 4900 (if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))) 4901 (op @0 (convert @1)) 4902 (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); } 4903 (convert (op (convert:utype @0) 4904 (convert:utype @1)))))))) 4905 4906/* This is another case of narrowing, specifically when there's an outer 4907 BIT_AND_EXPR which masks off bits outside the type of the innermost 4908 operands. Like the previous case we have to convert the operands 4909 to unsigned types to avoid introducing undefined behavior for the 4910 arithmetic operation. */ 4911(for op (minus plus) 4912 (simplify 4913 (bit_and (op:s (convert@2 @0) (convert@3 @1)) INTEGER_CST@4) 4914 (if (INTEGRAL_TYPE_P (type) 4915 /* We check for type compatibility between @0 and @1 below, 4916 so there's no need to check that @1/@3 are integral types. */ 4917 && INTEGRAL_TYPE_P (TREE_TYPE (@0)) 4918 && INTEGRAL_TYPE_P (TREE_TYPE (@2)) 4919 /* The precision of the type of each operand must match the 4920 precision of the mode of each operand, similarly for the 4921 result. */ 4922 && type_has_mode_precision_p (TREE_TYPE (@0)) 4923 && type_has_mode_precision_p (TREE_TYPE (@1)) 4924 && type_has_mode_precision_p (type) 4925 /* The inner conversion must be a widening conversion. */ 4926 && TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (TREE_TYPE (@0)) 4927 && types_match (@0, @1) 4928 && (tree_int_cst_min_precision (@4, TYPE_SIGN (TREE_TYPE (@0))) 4929 <= TYPE_PRECISION (TREE_TYPE (@0))) 4930 && (wi::to_wide (@4) 4931 & wi::mask (TYPE_PRECISION (TREE_TYPE (@0)), 4932 true, TYPE_PRECISION (type))) == 0) 4933 (if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))) 4934 (with { tree ntype = TREE_TYPE (@0); } 4935 (convert (bit_and (op @0 @1) (convert:ntype @4)))) 4936 (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); } 4937 (convert (bit_and (op (convert:utype @0) (convert:utype @1)) 4938 (convert:utype @4)))))))) 4939 4940/* Transform (@0 < @1 and @0 < @2) to use min, 4941 (@0 > @1 and @0 > @2) to use max */ 4942(for logic (bit_and bit_and bit_and bit_and bit_ior bit_ior bit_ior bit_ior) 4943 op (lt le gt ge lt le gt ge ) 4944 ext (min min max max max max min min ) 4945 (simplify 4946 (logic (op:cs @0 @1) (op:cs @0 @2)) 4947 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) 4948 && TREE_CODE (@0) != INTEGER_CST) 4949 (op @0 (ext @1 @2))))) 4950 4951(simplify 4952 /* signbit(x) -> 0 if x is nonnegative. */ 4953 (SIGNBIT tree_expr_nonnegative_p@0) 4954 { integer_zero_node; }) 4955 4956(simplify 4957 /* signbit(x) -> x<0 if x doesn't have signed zeros. */ 4958 (SIGNBIT @0) 4959 (if (!HONOR_SIGNED_ZEROS (@0)) 4960 (convert (lt @0 { build_real (TREE_TYPE (@0), dconst0); })))) 4961 4962/* Transform comparisons of the form X +- C1 CMP C2 to X CMP C2 -+ C1. */ 4963(for cmp (eq ne) 4964 (for op (plus minus) 4965 rop (minus plus) 4966 (simplify 4967 (cmp (op@3 @0 INTEGER_CST@1) INTEGER_CST@2) 4968 (if (!TREE_OVERFLOW (@1) && !TREE_OVERFLOW (@2) 4969 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@0)) 4970 && !TYPE_OVERFLOW_TRAPS (TREE_TYPE (@0)) 4971 && !TYPE_SATURATING (TREE_TYPE (@0))) 4972 (with { tree res = int_const_binop (rop, @2, @1); } 4973 (if (TREE_OVERFLOW (res) 4974 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))) 4975 { constant_boolean_node (cmp == NE_EXPR, type); } 4976 (if (single_use (@3)) 4977 (cmp @0 { TREE_OVERFLOW (res) 4978 ? drop_tree_overflow (res) : res; })))))))) 4979(for cmp (lt le gt ge) 4980 (for op (plus minus) 4981 rop (minus plus) 4982 (simplify 4983 (cmp (op@3 @0 INTEGER_CST@1) INTEGER_CST@2) 4984 (if (!TREE_OVERFLOW (@1) && !TREE_OVERFLOW (@2) 4985 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))) 4986 (with { tree res = int_const_binop (rop, @2, @1); } 4987 (if (TREE_OVERFLOW (res)) 4988 { 4989 fold_overflow_warning (("assuming signed overflow does not occur " 4990 "when simplifying conditional to constant"), 4991 WARN_STRICT_OVERFLOW_CONDITIONAL); 4992 bool less = cmp == LE_EXPR || cmp == LT_EXPR; 4993 /* wi::ges_p (@2, 0) should be sufficient for a signed type. */ 4994 bool ovf_high = wi::lt_p (wi::to_wide (@1), 0, 4995 TYPE_SIGN (TREE_TYPE (@1))) 4996 != (op == MINUS_EXPR); 4997 constant_boolean_node (less == ovf_high, type); 4998 } 4999 (if (single_use (@3)) 5000 (with 5001 { 5002 fold_overflow_warning (("assuming signed overflow does not occur " 5003 "when changing X +- C1 cmp C2 to " 5004 "X cmp C2 -+ C1"), 5005 WARN_STRICT_OVERFLOW_COMPARISON); 5006 } 5007 (cmp @0 { res; }))))))))) 5008 5009/* Canonicalizations of BIT_FIELD_REFs. */ 5010 5011(simplify 5012 (BIT_FIELD_REF (BIT_FIELD_REF @0 @1 @2) @3 @4) 5013 (BIT_FIELD_REF @0 @3 { const_binop (PLUS_EXPR, bitsizetype, @2, @4); })) 5014 5015(simplify 5016 (BIT_FIELD_REF (view_convert @0) @1 @2) 5017 (BIT_FIELD_REF @0 @1 @2)) 5018 5019(simplify 5020 (BIT_FIELD_REF @0 @1 integer_zerop) 5021 (if (tree_int_cst_equal (@1, TYPE_SIZE (TREE_TYPE (@0)))) 5022 (view_convert @0))) 5023 5024(simplify 5025 (BIT_FIELD_REF @0 @1 @2) 5026 (switch 5027 (if (TREE_CODE (TREE_TYPE (@0)) == COMPLEX_TYPE 5028 && tree_int_cst_equal (@1, TYPE_SIZE (TREE_TYPE (TREE_TYPE (@0))))) 5029 (switch 5030 (if (integer_zerop (@2)) 5031 (view_convert (realpart @0))) 5032 (if (tree_int_cst_equal (@2, TYPE_SIZE (TREE_TYPE (TREE_TYPE (@0))))) 5033 (view_convert (imagpart @0))))) 5034 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) 5035 && INTEGRAL_TYPE_P (type) 5036 /* On GIMPLE this should only apply to register arguments. */ 5037 && (! GIMPLE || is_gimple_reg (@0)) 5038 /* A bit-field-ref that referenced the full argument can be stripped. */ 5039 && ((compare_tree_int (@1, TYPE_PRECISION (TREE_TYPE (@0))) == 0 5040 && integer_zerop (@2)) 5041 /* Low-parts can be reduced to integral conversions. 5042 ??? The following doesn't work for PDP endian. */ 5043 || (BYTES_BIG_ENDIAN == WORDS_BIG_ENDIAN 5044 /* Don't even think about BITS_BIG_ENDIAN. */ 5045 && TYPE_PRECISION (TREE_TYPE (@0)) % BITS_PER_UNIT == 0 5046 && TYPE_PRECISION (type) % BITS_PER_UNIT == 0 5047 && compare_tree_int (@2, (BYTES_BIG_ENDIAN 5048 ? (TYPE_PRECISION (TREE_TYPE (@0)) 5049 - TYPE_PRECISION (type)) 5050 : 0)) == 0))) 5051 (convert @0)))) 5052 5053/* Simplify vector extracts. */ 5054 5055(simplify 5056 (BIT_FIELD_REF CONSTRUCTOR@0 @1 @2) 5057 (if (VECTOR_TYPE_P (TREE_TYPE (@0)) 5058 && (types_match (type, TREE_TYPE (TREE_TYPE (@0))) 5059 || (VECTOR_TYPE_P (type) 5060 && types_match (TREE_TYPE (type), TREE_TYPE (TREE_TYPE (@0)))))) 5061 (with 5062 { 5063 tree ctor = (TREE_CODE (@0) == SSA_NAME 5064 ? gimple_assign_rhs1 (SSA_NAME_DEF_STMT (@0)) : @0); 5065 tree eltype = TREE_TYPE (TREE_TYPE (ctor)); 5066 unsigned HOST_WIDE_INT width = tree_to_uhwi (TYPE_SIZE (eltype)); 5067 unsigned HOST_WIDE_INT n = tree_to_uhwi (@1); 5068 unsigned HOST_WIDE_INT idx = tree_to_uhwi (@2); 5069 } 5070 (if (n != 0 5071 && (idx % width) == 0 5072 && (n % width) == 0 5073 && known_le ((idx + n) / width, 5074 TYPE_VECTOR_SUBPARTS (TREE_TYPE (ctor)))) 5075 (with 5076 { 5077 idx = idx / width; 5078 n = n / width; 5079 /* Constructor elements can be subvectors. */ 5080 poly_uint64 k = 1; 5081 if (CONSTRUCTOR_NELTS (ctor) != 0) 5082 { 5083 tree cons_elem = TREE_TYPE (CONSTRUCTOR_ELT (ctor, 0)->value); 5084 if (TREE_CODE (cons_elem) == VECTOR_TYPE) 5085 k = TYPE_VECTOR_SUBPARTS (cons_elem); 5086 } 5087 unsigned HOST_WIDE_INT elt, count, const_k; 5088 } 5089 (switch 5090 /* We keep an exact subset of the constructor elements. */ 5091 (if (multiple_p (idx, k, &elt) && multiple_p (n, k, &count)) 5092 (if (CONSTRUCTOR_NELTS (ctor) == 0) 5093 { build_constructor (type, NULL); } 5094 (if (count == 1) 5095 (if (elt < CONSTRUCTOR_NELTS (ctor)) 5096 (view_convert { CONSTRUCTOR_ELT (ctor, elt)->value; }) 5097 { build_zero_cst (type); }) 5098 { 5099 vec<constructor_elt, va_gc> *vals; 5100 vec_alloc (vals, count); 5101 for (unsigned i = 0; 5102 i < count && elt + i < CONSTRUCTOR_NELTS (ctor); ++i) 5103 CONSTRUCTOR_APPEND_ELT (vals, NULL_TREE, 5104 CONSTRUCTOR_ELT (ctor, elt + i)->value); 5105 build_constructor (type, vals); 5106 }))) 5107 /* The bitfield references a single constructor element. */ 5108 (if (k.is_constant (&const_k) 5109 && idx + n <= (idx / const_k + 1) * const_k) 5110 (switch 5111 (if (CONSTRUCTOR_NELTS (ctor) <= idx / const_k) 5112 { build_zero_cst (type); }) 5113 (if (n == const_k) 5114 (view_convert { CONSTRUCTOR_ELT (ctor, idx / const_k)->value; })) 5115 (BIT_FIELD_REF { CONSTRUCTOR_ELT (ctor, idx / const_k)->value; } 5116 @1 { bitsize_int ((idx % const_k) * width); }))))))))) 5117 5118/* Simplify a bit extraction from a bit insertion for the cases with 5119 the inserted element fully covering the extraction or the insertion 5120 not touching the extraction. */ 5121(simplify 5122 (BIT_FIELD_REF (bit_insert @0 @1 @ipos) @rsize @rpos) 5123 (with 5124 { 5125 unsigned HOST_WIDE_INT isize; 5126 if (INTEGRAL_TYPE_P (TREE_TYPE (@1))) 5127 isize = TYPE_PRECISION (TREE_TYPE (@1)); 5128 else 5129 isize = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (@1))); 5130 } 5131 (switch 5132 (if (wi::leu_p (wi::to_wide (@ipos), wi::to_wide (@rpos)) 5133 && wi::leu_p (wi::to_wide (@rpos) + wi::to_wide (@rsize), 5134 wi::to_wide (@ipos) + isize)) 5135 (BIT_FIELD_REF @1 @rsize { wide_int_to_tree (bitsizetype, 5136 wi::to_wide (@rpos) 5137 - wi::to_wide (@ipos)); })) 5138 (if (wi::geu_p (wi::to_wide (@ipos), 5139 wi::to_wide (@rpos) + wi::to_wide (@rsize)) 5140 || wi::geu_p (wi::to_wide (@rpos), 5141 wi::to_wide (@ipos) + isize)) 5142 (BIT_FIELD_REF @0 @rsize @rpos))))) 5143 5144(if (canonicalize_math_after_vectorization_p ()) 5145 (for fmas (FMA) 5146 (simplify 5147 (fmas:c (negate @0) @1 @2) 5148 (IFN_FNMA @0 @1 @2)) 5149 (simplify 5150 (fmas @0 @1 (negate @2)) 5151 (IFN_FMS @0 @1 @2)) 5152 (simplify 5153 (fmas:c (negate @0) @1 (negate @2)) 5154 (IFN_FNMS @0 @1 @2)) 5155 (simplify 5156 (negate (fmas@3 @0 @1 @2)) 5157 (if (single_use (@3)) 5158 (IFN_FNMS @0 @1 @2)))) 5159 5160 (simplify 5161 (IFN_FMS:c (negate @0) @1 @2) 5162 (IFN_FNMS @0 @1 @2)) 5163 (simplify 5164 (IFN_FMS @0 @1 (negate @2)) 5165 (IFN_FMA @0 @1 @2)) 5166 (simplify 5167 (IFN_FMS:c (negate @0) @1 (negate @2)) 5168 (IFN_FNMA @0 @1 @2)) 5169 (simplify 5170 (negate (IFN_FMS@3 @0 @1 @2)) 5171 (if (single_use (@3)) 5172 (IFN_FNMA @0 @1 @2))) 5173 5174 (simplify 5175 (IFN_FNMA:c (negate @0) @1 @2) 5176 (IFN_FMA @0 @1 @2)) 5177 (simplify 5178 (IFN_FNMA @0 @1 (negate @2)) 5179 (IFN_FNMS @0 @1 @2)) 5180 (simplify 5181 (IFN_FNMA:c (negate @0) @1 (negate @2)) 5182 (IFN_FMS @0 @1 @2)) 5183 (simplify 5184 (negate (IFN_FNMA@3 @0 @1 @2)) 5185 (if (single_use (@3)) 5186 (IFN_FMS @0 @1 @2))) 5187 5188 (simplify 5189 (IFN_FNMS:c (negate @0) @1 @2) 5190 (IFN_FMS @0 @1 @2)) 5191 (simplify 5192 (IFN_FNMS @0 @1 (negate @2)) 5193 (IFN_FNMA @0 @1 @2)) 5194 (simplify 5195 (IFN_FNMS:c (negate @0) @1 (negate @2)) 5196 (IFN_FMA @0 @1 @2)) 5197 (simplify 5198 (negate (IFN_FNMS@3 @0 @1 @2)) 5199 (if (single_use (@3)) 5200 (IFN_FMA @0 @1 @2)))) 5201 5202/* POPCOUNT simplifications. */ 5203(for popcount (BUILT_IN_POPCOUNT BUILT_IN_POPCOUNTL BUILT_IN_POPCOUNTLL 5204 BUILT_IN_POPCOUNTIMAX) 5205 /* popcount(X&1) is nop_expr(X&1). */ 5206 (simplify 5207 (popcount @0) 5208 (if (tree_nonzero_bits (@0) == 1) 5209 (convert @0))) 5210 /* popcount(X) + popcount(Y) is popcount(X|Y) when X&Y must be zero. */ 5211 (simplify 5212 (plus (popcount:s @0) (popcount:s @1)) 5213 (if (wi::bit_and (tree_nonzero_bits (@0), tree_nonzero_bits (@1)) == 0) 5214 (popcount (bit_ior @0 @1)))) 5215 /* popcount(X) == 0 is X == 0, and related (in)equalities. */ 5216 (for cmp (le eq ne gt) 5217 rep (eq eq ne ne) 5218 (simplify 5219 (cmp (popcount @0) integer_zerop) 5220 (rep @0 { build_zero_cst (TREE_TYPE (@0)); })))) 5221 5222/* Simplify: 5223 5224 a = a1 op a2 5225 r = c ? a : b; 5226 5227 to: 5228 5229 r = c ? a1 op a2 : b; 5230 5231 if the target can do it in one go. This makes the operation conditional 5232 on c, so could drop potentially-trapping arithmetic, but that's a valid 5233 simplification if the result of the operation isn't needed. 5234 5235 Avoid speculatively generating a stand-alone vector comparison 5236 on targets that might not support them. Any target implementing 5237 conditional internal functions must support the same comparisons 5238 inside and outside a VEC_COND_EXPR. */ 5239 5240#if GIMPLE 5241(for uncond_op (UNCOND_BINARY) 5242 cond_op (COND_BINARY) 5243 (simplify 5244 (vec_cond @0 (view_convert? (uncond_op@4 @1 @2)) @3) 5245 (with { tree op_type = TREE_TYPE (@4); } 5246 (if (vectorized_internal_fn_supported_p (as_internal_fn (cond_op), op_type) 5247 && element_precision (type) == element_precision (op_type)) 5248 (view_convert (cond_op @0 @1 @2 (view_convert:op_type @3)))))) 5249 (simplify 5250 (vec_cond @0 @1 (view_convert? (uncond_op@4 @2 @3))) 5251 (with { tree op_type = TREE_TYPE (@4); } 5252 (if (vectorized_internal_fn_supported_p (as_internal_fn (cond_op), op_type) 5253 && element_precision (type) == element_precision (op_type)) 5254 (view_convert (cond_op (bit_not @0) @2 @3 (view_convert:op_type @1))))))) 5255 5256/* Same for ternary operations. */ 5257(for uncond_op (UNCOND_TERNARY) 5258 cond_op (COND_TERNARY) 5259 (simplify 5260 (vec_cond @0 (view_convert? (uncond_op@5 @1 @2 @3)) @4) 5261 (with { tree op_type = TREE_TYPE (@5); } 5262 (if (vectorized_internal_fn_supported_p (as_internal_fn (cond_op), op_type) 5263 && element_precision (type) == element_precision (op_type)) 5264 (view_convert (cond_op @0 @1 @2 @3 (view_convert:op_type @4)))))) 5265 (simplify 5266 (vec_cond @0 @1 (view_convert? (uncond_op@5 @2 @3 @4))) 5267 (with { tree op_type = TREE_TYPE (@5); } 5268 (if (vectorized_internal_fn_supported_p (as_internal_fn (cond_op), op_type) 5269 && element_precision (type) == element_precision (op_type)) 5270 (view_convert (cond_op (bit_not @0) @2 @3 @4 5271 (view_convert:op_type @1))))))) 5272#endif 5273 5274/* Detect cases in which a VEC_COND_EXPR effectively replaces the 5275 "else" value of an IFN_COND_*. */ 5276(for cond_op (COND_BINARY) 5277 (simplify 5278 (vec_cond @0 (view_convert? (cond_op @0 @1 @2 @3)) @4) 5279 (with { tree op_type = TREE_TYPE (@3); } 5280 (if (element_precision (type) == element_precision (op_type)) 5281 (view_convert (cond_op @0 @1 @2 (view_convert:op_type @4)))))) 5282 (simplify 5283 (vec_cond @0 @1 (view_convert? (cond_op @2 @3 @4 @5))) 5284 (with { tree op_type = TREE_TYPE (@5); } 5285 (if (inverse_conditions_p (@0, @2) 5286 && element_precision (type) == element_precision (op_type)) 5287 (view_convert (cond_op @2 @3 @4 (view_convert:op_type @1))))))) 5288 5289/* Same for ternary operations. */ 5290(for cond_op (COND_TERNARY) 5291 (simplify 5292 (vec_cond @0 (view_convert? (cond_op @0 @1 @2 @3 @4)) @5) 5293 (with { tree op_type = TREE_TYPE (@4); } 5294 (if (element_precision (type) == element_precision (op_type)) 5295 (view_convert (cond_op @0 @1 @2 @3 (view_convert:op_type @5)))))) 5296 (simplify 5297 (vec_cond @0 @1 (view_convert? (cond_op @2 @3 @4 @5 @6))) 5298 (with { tree op_type = TREE_TYPE (@6); } 5299 (if (inverse_conditions_p (@0, @2) 5300 && element_precision (type) == element_precision (op_type)) 5301 (view_convert (cond_op @2 @3 @4 @5 (view_convert:op_type @1))))))) 5302 5303/* For pointers @0 and @2 and nonnegative constant offset @1, look for 5304 expressions like: 5305 5306 A: (@0 + @1 < @2) | (@2 + @1 < @0) 5307 B: (@0 + @1 <= @2) | (@2 + @1 <= @0) 5308 5309 If pointers are known not to wrap, B checks whether @1 bytes starting 5310 at @0 and @2 do not overlap, while A tests the same thing for @1 + 1 5311 bytes. A is more efficiently tested as: 5312 5313 A: (sizetype) (@0 + @1 - @2) > @1 * 2 5314 5315 The equivalent expression for B is given by replacing @1 with @1 - 1: 5316 5317 B: (sizetype) (@0 + (@1 - 1) - @2) > (@1 - 1) * 2 5318 5319 @0 and @2 can be swapped in both expressions without changing the result. 5320 5321 The folds rely on sizetype's being unsigned (which is always true) 5322 and on its being the same width as the pointer (which we have to check). 5323 5324 The fold replaces two pointer_plus expressions, two comparisons and 5325 an IOR with a pointer_plus, a pointer_diff, and a comparison, so in 5326 the best case it's a saving of two operations. The A fold retains one 5327 of the original pointer_pluses, so is a win even if both pointer_pluses 5328 are used elsewhere. The B fold is a wash if both pointer_pluses are 5329 used elsewhere, since all we end up doing is replacing a comparison with 5330 a pointer_plus. We do still apply the fold under those circumstances 5331 though, in case applying it to other conditions eventually makes one of the 5332 pointer_pluses dead. */ 5333(for ior (truth_orif truth_or bit_ior) 5334 (for cmp (le lt) 5335 (simplify 5336 (ior (cmp:cs (pointer_plus@3 @0 INTEGER_CST@1) @2) 5337 (cmp:cs (pointer_plus@4 @2 @1) @0)) 5338 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)) 5339 && TYPE_OVERFLOW_WRAPS (sizetype) 5340 && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (sizetype)) 5341 /* Calculate the rhs constant. */ 5342 (with { offset_int off = wi::to_offset (@1) - (cmp == LE_EXPR ? 1 : 0); 5343 offset_int rhs = off * 2; } 5344 /* Always fails for negative values. */ 5345 (if (wi::min_precision (rhs, UNSIGNED) <= TYPE_PRECISION (sizetype)) 5346 /* Since the order of @0 and @2 doesn't matter, let tree_swap_operands_p 5347 pick a canonical order. This increases the chances of using the 5348 same pointer_plus in multiple checks. */ 5349 (with { bool swap_p = tree_swap_operands_p (@0, @2); 5350 tree rhs_tree = wide_int_to_tree (sizetype, rhs); } 5351 (if (cmp == LT_EXPR) 5352 (gt (convert:sizetype 5353 (pointer_diff:ssizetype { swap_p ? @4 : @3; } 5354 { swap_p ? @0 : @2; })) 5355 { rhs_tree; }) 5356 (gt (convert:sizetype 5357 (pointer_diff:ssizetype 5358 (pointer_plus { swap_p ? @2 : @0; } 5359 { wide_int_to_tree (sizetype, off); }) 5360 { swap_p ? @0 : @2; })) 5361 { rhs_tree; }))))))))) 5362 5363/* Fold REDUC (@0 & @1) -> @0[I] & @1[I] if element I is the only nonzero 5364 element of @1. */ 5365(for reduc (IFN_REDUC_PLUS IFN_REDUC_IOR IFN_REDUC_XOR) 5366 (simplify (reduc (view_convert? (bit_and @0 VECTOR_CST@1))) 5367 (with { int i = single_nonzero_element (@1); } 5368 (if (i >= 0) 5369 (with { tree elt = vector_cst_elt (@1, i); 5370 tree elt_type = TREE_TYPE (elt); 5371 unsigned int elt_bits = tree_to_uhwi (TYPE_SIZE (elt_type)); 5372 tree size = bitsize_int (elt_bits); 5373 tree pos = bitsize_int (elt_bits * i); } 5374 (view_convert 5375 (bit_and:elt_type 5376 (BIT_FIELD_REF:elt_type @0 { size; } { pos; }) 5377 { elt; }))))))) 5378