1/* Match-and-simplify patterns for shared GENERIC and GIMPLE folding. 2 This file is consumed by genmatch which produces gimple-match.c 3 and generic-match.c from it. 4 5 Copyright (C) 2014-2018 Free Software Foundation, Inc. 6 Contributed by Richard Biener <rguenther@suse.de> 7 and Prathamesh Kulkarni <bilbotheelffriend@gmail.com> 8 9This file is part of GCC. 10 11GCC is free software; you can redistribute it and/or modify it under 12the terms of the GNU General Public License as published by the Free 13Software Foundation; either version 3, or (at your option) any later 14version. 15 16GCC is distributed in the hope that it will be useful, but WITHOUT ANY 17WARRANTY; without even the implied warranty of MERCHANTABILITY or 18FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 19for more details. 20 21You should have received a copy of the GNU General Public License 22along with GCC; see the file COPYING3. If not see 23<http://www.gnu.org/licenses/>. */ 24 25 26/* Generic tree predicates we inherit. */ 27(define_predicates 28 integer_onep integer_zerop integer_all_onesp integer_minus_onep 29 integer_each_onep integer_truep integer_nonzerop 30 real_zerop real_onep real_minus_onep 31 zerop 32 CONSTANT_CLASS_P 33 tree_expr_nonnegative_p 34 tree_expr_nonzero_p 35 integer_valued_real_p 36 integer_pow2p 37 HONOR_NANS) 38 39/* Operator lists. */ 40(define_operator_list tcc_comparison 41 lt le eq ne ge gt unordered ordered unlt unle ungt unge uneq ltgt) 42(define_operator_list inverted_tcc_comparison 43 ge gt ne eq lt le ordered unordered ge gt le lt ltgt uneq) 44(define_operator_list inverted_tcc_comparison_with_nans 45 unge ungt ne eq unlt unle ordered unordered ge gt le lt ltgt uneq) 46(define_operator_list swapped_tcc_comparison 47 gt ge eq ne le lt unordered ordered ungt unge unlt unle uneq ltgt) 48(define_operator_list simple_comparison lt le eq ne ge gt) 49(define_operator_list swapped_simple_comparison gt ge eq ne le lt) 50 51#include "cfn-operators.pd" 52 53/* Define operand lists for math rounding functions {,i,l,ll}FN, 54 where the versions prefixed with "i" return an int, those prefixed with 55 "l" return a long and those prefixed with "ll" return a long long. 56 57 Also define operand lists: 58 59 X<FN>F for all float functions, in the order i, l, ll 60 X<FN> for all double functions, in the same order 61 X<FN>L for all long double functions, in the same order. */ 62#define DEFINE_INT_AND_FLOAT_ROUND_FN(FN) \ 63 (define_operator_list X##FN##F BUILT_IN_I##FN##F \ 64 BUILT_IN_L##FN##F \ 65 BUILT_IN_LL##FN##F) \ 66 (define_operator_list X##FN BUILT_IN_I##FN \ 67 BUILT_IN_L##FN \ 68 BUILT_IN_LL##FN) \ 69 (define_operator_list X##FN##L BUILT_IN_I##FN##L \ 70 BUILT_IN_L##FN##L \ 71 BUILT_IN_LL##FN##L) 72 73DEFINE_INT_AND_FLOAT_ROUND_FN (FLOOR) 74DEFINE_INT_AND_FLOAT_ROUND_FN (CEIL) 75DEFINE_INT_AND_FLOAT_ROUND_FN (ROUND) 76DEFINE_INT_AND_FLOAT_ROUND_FN (RINT) 77 78/* As opposed to convert?, this still creates a single pattern, so 79 it is not a suitable replacement for convert? in all cases. */ 80(match (nop_convert @0) 81 (convert @0) 82 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))))) 83(match (nop_convert @0) 84 (view_convert @0) 85 (if (VECTOR_TYPE_P (type) && VECTOR_TYPE_P (TREE_TYPE (@0)) 86 && known_eq (TYPE_VECTOR_SUBPARTS (type), 87 TYPE_VECTOR_SUBPARTS (TREE_TYPE (@0))) 88 && tree_nop_conversion_p (TREE_TYPE (type), TREE_TYPE (TREE_TYPE (@0)))))) 89/* This one has to be last, or it shadows the others. */ 90(match (nop_convert @0) 91 @0) 92 93/* Simplifications of operations with one constant operand and 94 simplifications to constants or single values. */ 95 96(for op (plus pointer_plus minus bit_ior bit_xor) 97 (simplify 98 (op @0 integer_zerop) 99 (non_lvalue @0))) 100 101/* 0 +p index -> (type)index */ 102(simplify 103 (pointer_plus integer_zerop @1) 104 (non_lvalue (convert @1))) 105 106/* ptr - 0 -> (type)ptr */ 107(simplify 108 (pointer_diff @0 integer_zerop) 109 (convert @0)) 110 111/* See if ARG1 is zero and X + ARG1 reduces to X. 112 Likewise if the operands are reversed. */ 113(simplify 114 (plus:c @0 real_zerop@1) 115 (if (fold_real_zero_addition_p (type, @1, 0)) 116 (non_lvalue @0))) 117 118/* See if ARG1 is zero and X - ARG1 reduces to X. */ 119(simplify 120 (minus @0 real_zerop@1) 121 (if (fold_real_zero_addition_p (type, @1, 1)) 122 (non_lvalue @0))) 123 124/* Simplify x - x. 125 This is unsafe for certain floats even in non-IEEE formats. 126 In IEEE, it is unsafe because it does wrong for NaNs. 127 Also note that operand_equal_p is always false if an operand 128 is volatile. */ 129(simplify 130 (minus @0 @0) 131 (if (!FLOAT_TYPE_P (type) || !HONOR_NANS (type)) 132 { build_zero_cst (type); })) 133(simplify 134 (pointer_diff @@0 @0) 135 { build_zero_cst (type); }) 136 137(simplify 138 (mult @0 integer_zerop@1) 139 @1) 140 141/* Maybe fold x * 0 to 0. The expressions aren't the same 142 when x is NaN, since x * 0 is also NaN. Nor are they the 143 same in modes with signed zeros, since multiplying a 144 negative value by 0 gives -0, not +0. */ 145(simplify 146 (mult @0 real_zerop@1) 147 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type)) 148 @1)) 149 150/* In IEEE floating point, x*1 is not equivalent to x for snans. 151 Likewise for complex arithmetic with signed zeros. */ 152(simplify 153 (mult @0 real_onep) 154 (if (!HONOR_SNANS (type) 155 && (!HONOR_SIGNED_ZEROS (type) 156 || !COMPLEX_FLOAT_TYPE_P (type))) 157 (non_lvalue @0))) 158 159/* Transform x * -1.0 into -x. */ 160(simplify 161 (mult @0 real_minus_onep) 162 (if (!HONOR_SNANS (type) 163 && (!HONOR_SIGNED_ZEROS (type) 164 || !COMPLEX_FLOAT_TYPE_P (type))) 165 (negate @0))) 166 167(for cmp (gt ge lt le) 168 outp (convert convert negate negate) 169 outn (negate negate convert convert) 170 /* Transform (X > 0.0 ? 1.0 : -1.0) into copysign(1, X). */ 171 /* Transform (X >= 0.0 ? 1.0 : -1.0) into copysign(1, X). */ 172 /* Transform (X < 0.0 ? 1.0 : -1.0) into copysign(1,-X). */ 173 /* Transform (X <= 0.0 ? 1.0 : -1.0) into copysign(1,-X). */ 174 (simplify 175 (cond (cmp @0 real_zerop) real_onep@1 real_minus_onep) 176 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type) 177 && types_match (type, TREE_TYPE (@0))) 178 (switch 179 (if (types_match (type, float_type_node)) 180 (BUILT_IN_COPYSIGNF @1 (outp @0))) 181 (if (types_match (type, double_type_node)) 182 (BUILT_IN_COPYSIGN @1 (outp @0))) 183 (if (types_match (type, long_double_type_node)) 184 (BUILT_IN_COPYSIGNL @1 (outp @0)))))) 185 /* Transform (X > 0.0 ? -1.0 : 1.0) into copysign(1,-X). */ 186 /* Transform (X >= 0.0 ? -1.0 : 1.0) into copysign(1,-X). */ 187 /* Transform (X < 0.0 ? -1.0 : 1.0) into copysign(1,X). */ 188 /* Transform (X <= 0.0 ? -1.0 : 1.0) into copysign(1,X). */ 189 (simplify 190 (cond (cmp @0 real_zerop) real_minus_onep real_onep@1) 191 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type) 192 && types_match (type, TREE_TYPE (@0))) 193 (switch 194 (if (types_match (type, float_type_node)) 195 (BUILT_IN_COPYSIGNF @1 (outn @0))) 196 (if (types_match (type, double_type_node)) 197 (BUILT_IN_COPYSIGN @1 (outn @0))) 198 (if (types_match (type, long_double_type_node)) 199 (BUILT_IN_COPYSIGNL @1 (outn @0))))))) 200 201/* Transform X * copysign (1.0, X) into abs(X). */ 202(simplify 203 (mult:c @0 (COPYSIGN_ALL real_onep @0)) 204 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type)) 205 (abs @0))) 206 207/* Transform X * copysign (1.0, -X) into -abs(X). */ 208(simplify 209 (mult:c @0 (COPYSIGN_ALL real_onep (negate @0))) 210 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type)) 211 (negate (abs @0)))) 212 213/* Transform copysign (CST, X) into copysign (ABS(CST), X). */ 214(simplify 215 (COPYSIGN_ALL REAL_CST@0 @1) 216 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@0))) 217 (COPYSIGN_ALL (negate @0) @1))) 218 219/* X * 1, X / 1 -> X. */ 220(for op (mult trunc_div ceil_div floor_div round_div exact_div) 221 (simplify 222 (op @0 integer_onep) 223 (non_lvalue @0))) 224 225/* (A / (1 << B)) -> (A >> B). 226 Only for unsigned A. For signed A, this would not preserve rounding 227 toward zero. 228 For example: (-1 / ( 1 << B)) != -1 >> B. */ 229(simplify 230 (trunc_div @0 (lshift integer_onep@1 @2)) 231 (if ((TYPE_UNSIGNED (type) || tree_expr_nonnegative_p (@0)) 232 && (!VECTOR_TYPE_P (type) 233 || target_supports_op_p (type, RSHIFT_EXPR, optab_vector) 234 || target_supports_op_p (type, RSHIFT_EXPR, optab_scalar))) 235 (rshift @0 @2))) 236 237/* Preserve explicit divisions by 0: the C++ front-end wants to detect 238 undefined behavior in constexpr evaluation, and assuming that the division 239 traps enables better optimizations than these anyway. */ 240(for div (trunc_div ceil_div floor_div round_div exact_div) 241 /* 0 / X is always zero. */ 242 (simplify 243 (div integer_zerop@0 @1) 244 /* But not for 0 / 0 so that we can get the proper warnings and errors. */ 245 (if (!integer_zerop (@1)) 246 @0)) 247 /* X / -1 is -X. */ 248 (simplify 249 (div @0 integer_minus_onep@1) 250 (if (!TYPE_UNSIGNED (type)) 251 (negate @0))) 252 /* X / X is one. */ 253 (simplify 254 (div @0 @0) 255 /* But not for 0 / 0 so that we can get the proper warnings and errors. 256 And not for _Fract types where we can't build 1. */ 257 (if (!integer_zerop (@0) && !ALL_FRACT_MODE_P (TYPE_MODE (type))) 258 { build_one_cst (type); })) 259 /* X / abs (X) is X < 0 ? -1 : 1. */ 260 (simplify 261 (div:C @0 (abs @0)) 262 (if (INTEGRAL_TYPE_P (type) 263 && TYPE_OVERFLOW_UNDEFINED (type)) 264 (cond (lt @0 { build_zero_cst (type); }) 265 { build_minus_one_cst (type); } { build_one_cst (type); }))) 266 /* X / -X is -1. */ 267 (simplify 268 (div:C @0 (negate @0)) 269 (if ((INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type)) 270 && TYPE_OVERFLOW_UNDEFINED (type)) 271 { build_minus_one_cst (type); }))) 272 273/* For unsigned integral types, FLOOR_DIV_EXPR is the same as 274 TRUNC_DIV_EXPR. Rewrite into the latter in this case. */ 275(simplify 276 (floor_div @0 @1) 277 (if ((INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type)) 278 && TYPE_UNSIGNED (type)) 279 (trunc_div @0 @1))) 280 281/* Combine two successive divisions. Note that combining ceil_div 282 and floor_div is trickier and combining round_div even more so. */ 283(for div (trunc_div exact_div) 284 (simplify 285 (div (div @0 INTEGER_CST@1) INTEGER_CST@2) 286 (with { 287 bool overflow_p; 288 wide_int mul = wi::mul (wi::to_wide (@1), wi::to_wide (@2), 289 TYPE_SIGN (type), &overflow_p); 290 } 291 (if (!overflow_p) 292 (div @0 { wide_int_to_tree (type, mul); }) 293 (if (TYPE_UNSIGNED (type) 294 || mul != wi::min_value (TYPE_PRECISION (type), SIGNED)) 295 { build_zero_cst (type); }))))) 296 297/* Combine successive multiplications. Similar to above, but handling 298 overflow is different. */ 299(simplify 300 (mult (mult @0 INTEGER_CST@1) INTEGER_CST@2) 301 (with { 302 bool overflow_p; 303 wide_int mul = wi::mul (wi::to_wide (@1), wi::to_wide (@2), 304 TYPE_SIGN (type), &overflow_p); 305 } 306 /* Skip folding on overflow: the only special case is @1 * @2 == -INT_MIN, 307 otherwise undefined overflow implies that @0 must be zero. */ 308 (if (!overflow_p || TYPE_OVERFLOW_WRAPS (type)) 309 (mult @0 { wide_int_to_tree (type, mul); })))) 310 311/* Optimize A / A to 1.0 if we don't care about 312 NaNs or Infinities. */ 313(simplify 314 (rdiv @0 @0) 315 (if (FLOAT_TYPE_P (type) 316 && ! HONOR_NANS (type) 317 && ! HONOR_INFINITIES (type)) 318 { build_one_cst (type); })) 319 320/* Optimize -A / A to -1.0 if we don't care about 321 NaNs or Infinities. */ 322(simplify 323 (rdiv:C @0 (negate @0)) 324 (if (FLOAT_TYPE_P (type) 325 && ! HONOR_NANS (type) 326 && ! HONOR_INFINITIES (type)) 327 { build_minus_one_cst (type); })) 328 329/* PR71078: x / abs(x) -> copysign (1.0, x) */ 330(simplify 331 (rdiv:C (convert? @0) (convert? (abs @0))) 332 (if (SCALAR_FLOAT_TYPE_P (type) 333 && ! HONOR_NANS (type) 334 && ! HONOR_INFINITIES (type)) 335 (switch 336 (if (types_match (type, float_type_node)) 337 (BUILT_IN_COPYSIGNF { build_one_cst (type); } (convert @0))) 338 (if (types_match (type, double_type_node)) 339 (BUILT_IN_COPYSIGN { build_one_cst (type); } (convert @0))) 340 (if (types_match (type, long_double_type_node)) 341 (BUILT_IN_COPYSIGNL { build_one_cst (type); } (convert @0)))))) 342 343/* In IEEE floating point, x/1 is not equivalent to x for snans. */ 344(simplify 345 (rdiv @0 real_onep) 346 (if (!HONOR_SNANS (type)) 347 (non_lvalue @0))) 348 349/* In IEEE floating point, x/-1 is not equivalent to -x for snans. */ 350(simplify 351 (rdiv @0 real_minus_onep) 352 (if (!HONOR_SNANS (type)) 353 (negate @0))) 354 355(if (flag_reciprocal_math) 356 /* Convert (A/B)/C to A/(B*C). */ 357 (simplify 358 (rdiv (rdiv:s @0 @1) @2) 359 (rdiv @0 (mult @1 @2))) 360 361 /* Canonicalize x / (C1 * y) to (x * C2) / y. */ 362 (simplify 363 (rdiv @0 (mult:s @1 REAL_CST@2)) 364 (with 365 { tree tem = const_binop (RDIV_EXPR, type, build_one_cst (type), @2); } 366 (if (tem) 367 (rdiv (mult @0 { tem; } ) @1)))) 368 369 /* Convert A/(B/C) to (A/B)*C */ 370 (simplify 371 (rdiv @0 (rdiv:s @1 @2)) 372 (mult (rdiv @0 @1) @2))) 373 374/* Simplify x / (- y) to -x / y. */ 375(simplify 376 (rdiv @0 (negate @1)) 377 (rdiv (negate @0) @1)) 378 379/* Optimize (X & (-A)) / A where A is a power of 2, to X >> log2(A) */ 380(for div (trunc_div ceil_div floor_div round_div exact_div) 381 (simplify 382 (div (convert? (bit_and @0 INTEGER_CST@1)) INTEGER_CST@2) 383 (if (integer_pow2p (@2) 384 && tree_int_cst_sgn (@2) > 0 385 && tree_nop_conversion_p (type, TREE_TYPE (@0)) 386 && wi::to_wide (@2) + wi::to_wide (@1) == 0) 387 (rshift (convert @0) 388 { build_int_cst (integer_type_node, 389 wi::exact_log2 (wi::to_wide (@2))); })))) 390 391/* If ARG1 is a constant, we can convert this to a multiply by the 392 reciprocal. This does not have the same rounding properties, 393 so only do this if -freciprocal-math. We can actually 394 always safely do it if ARG1 is a power of two, but it's hard to 395 tell if it is or not in a portable manner. */ 396(for cst (REAL_CST COMPLEX_CST VECTOR_CST) 397 (simplify 398 (rdiv @0 cst@1) 399 (if (optimize) 400 (if (flag_reciprocal_math 401 && !real_zerop (@1)) 402 (with 403 { tree tem = const_binop (RDIV_EXPR, type, build_one_cst (type), @1); } 404 (if (tem) 405 (mult @0 { tem; } ))) 406 (if (cst != COMPLEX_CST) 407 (with { tree inverse = exact_inverse (type, @1); } 408 (if (inverse) 409 (mult @0 { inverse; } )))))))) 410 411(for mod (ceil_mod floor_mod round_mod trunc_mod) 412 /* 0 % X is always zero. */ 413 (simplify 414 (mod integer_zerop@0 @1) 415 /* But not for 0 % 0 so that we can get the proper warnings and errors. */ 416 (if (!integer_zerop (@1)) 417 @0)) 418 /* X % 1 is always zero. */ 419 (simplify 420 (mod @0 integer_onep) 421 { build_zero_cst (type); }) 422 /* X % -1 is zero. */ 423 (simplify 424 (mod @0 integer_minus_onep@1) 425 (if (!TYPE_UNSIGNED (type)) 426 { build_zero_cst (type); })) 427 /* X % X is zero. */ 428 (simplify 429 (mod @0 @0) 430 /* But not for 0 % 0 so that we can get the proper warnings and errors. */ 431 (if (!integer_zerop (@0)) 432 { build_zero_cst (type); })) 433 /* (X % Y) % Y is just X % Y. */ 434 (simplify 435 (mod (mod@2 @0 @1) @1) 436 @2) 437 /* From extract_muldiv_1: (X * C1) % C2 is zero if C1 is a multiple of C2. */ 438 (simplify 439 (mod (mult @0 INTEGER_CST@1) INTEGER_CST@2) 440 (if (ANY_INTEGRAL_TYPE_P (type) 441 && TYPE_OVERFLOW_UNDEFINED (type) 442 && wi::multiple_of_p (wi::to_wide (@1), wi::to_wide (@2), 443 TYPE_SIGN (type))) 444 { build_zero_cst (type); }))) 445 446/* X % -C is the same as X % C. */ 447(simplify 448 (trunc_mod @0 INTEGER_CST@1) 449 (if (TYPE_SIGN (type) == SIGNED 450 && !TREE_OVERFLOW (@1) 451 && wi::neg_p (wi::to_wide (@1)) 452 && !TYPE_OVERFLOW_TRAPS (type) 453 /* Avoid this transformation if C is INT_MIN, i.e. C == -C. */ 454 && !sign_bit_p (@1, @1)) 455 (trunc_mod @0 (negate @1)))) 456 457/* X % -Y is the same as X % Y. */ 458(simplify 459 (trunc_mod @0 (convert? (negate @1))) 460 (if (INTEGRAL_TYPE_P (type) 461 && !TYPE_UNSIGNED (type) 462 && !TYPE_OVERFLOW_TRAPS (type) 463 && tree_nop_conversion_p (type, TREE_TYPE (@1)) 464 /* Avoid this transformation if X might be INT_MIN or 465 Y might be -1, because we would then change valid 466 INT_MIN % -(-1) into invalid INT_MIN % -1. */ 467 && (expr_not_equal_to (@0, wi::to_wide (TYPE_MIN_VALUE (type))) 468 || expr_not_equal_to (@1, wi::minus_one (TYPE_PRECISION 469 (TREE_TYPE (@1)))))) 470 (trunc_mod @0 (convert @1)))) 471 472/* X - (X / Y) * Y is the same as X % Y. */ 473(simplify 474 (minus (convert1? @0) (convert2? (mult:c (trunc_div @@0 @@1) @1))) 475 (if (INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type)) 476 (convert (trunc_mod @0 @1)))) 477 478/* Optimize TRUNC_MOD_EXPR by a power of two into a BIT_AND_EXPR, 479 i.e. "X % C" into "X & (C - 1)", if X and C are positive. 480 Also optimize A % (C << N) where C is a power of 2, 481 to A & ((C << N) - 1). */ 482(match (power_of_two_cand @1) 483 INTEGER_CST@1) 484(match (power_of_two_cand @1) 485 (lshift INTEGER_CST@1 @2)) 486(for mod (trunc_mod floor_mod) 487 (simplify 488 (mod @0 (convert?@3 (power_of_two_cand@1 @2))) 489 (if ((TYPE_UNSIGNED (type) 490 || tree_expr_nonnegative_p (@0)) 491 && tree_nop_conversion_p (type, TREE_TYPE (@3)) 492 && integer_pow2p (@2) && tree_int_cst_sgn (@2) > 0) 493 (bit_and @0 (convert (minus @1 { build_int_cst (TREE_TYPE (@1), 1); })))))) 494 495/* Simplify (unsigned t * 2)/2 -> unsigned t & 0x7FFFFFFF. */ 496(simplify 497 (trunc_div (mult @0 integer_pow2p@1) @1) 498 (if (TYPE_UNSIGNED (TREE_TYPE (@0))) 499 (bit_and @0 { wide_int_to_tree 500 (type, wi::mask (TYPE_PRECISION (type) 501 - wi::exact_log2 (wi::to_wide (@1)), 502 false, TYPE_PRECISION (type))); }))) 503 504/* Simplify (unsigned t / 2) * 2 -> unsigned t & ~1. */ 505(simplify 506 (mult (trunc_div @0 integer_pow2p@1) @1) 507 (if (TYPE_UNSIGNED (TREE_TYPE (@0))) 508 (bit_and @0 (negate @1)))) 509 510/* Simplify (t * 2) / 2) -> t. */ 511(for div (trunc_div ceil_div floor_div round_div exact_div) 512 (simplify 513 (div (mult:c @0 @1) @1) 514 (if (ANY_INTEGRAL_TYPE_P (type) 515 && TYPE_OVERFLOW_UNDEFINED (type)) 516 @0))) 517 518(for op (negate abs) 519 /* Simplify cos(-x) and cos(|x|) -> cos(x). Similarly for cosh. */ 520 (for coss (COS COSH) 521 (simplify 522 (coss (op @0)) 523 (coss @0))) 524 /* Simplify pow(-x, y) and pow(|x|,y) -> pow(x,y) if y is an even integer. */ 525 (for pows (POW) 526 (simplify 527 (pows (op @0) REAL_CST@1) 528 (with { HOST_WIDE_INT n; } 529 (if (real_isinteger (&TREE_REAL_CST (@1), &n) && (n & 1) == 0) 530 (pows @0 @1))))) 531 /* Likewise for powi. */ 532 (for pows (POWI) 533 (simplify 534 (pows (op @0) INTEGER_CST@1) 535 (if ((wi::to_wide (@1) & 1) == 0) 536 (pows @0 @1)))) 537 /* Strip negate and abs from both operands of hypot. */ 538 (for hypots (HYPOT) 539 (simplify 540 (hypots (op @0) @1) 541 (hypots @0 @1)) 542 (simplify 543 (hypots @0 (op @1)) 544 (hypots @0 @1))) 545 /* copysign(-x, y) and copysign(abs(x), y) -> copysign(x, y). */ 546 (for copysigns (COPYSIGN_ALL) 547 (simplify 548 (copysigns (op @0) @1) 549 (copysigns @0 @1)))) 550 551/* abs(x)*abs(x) -> x*x. Should be valid for all types. */ 552(simplify 553 (mult (abs@1 @0) @1) 554 (mult @0 @0)) 555 556/* cos(copysign(x, y)) -> cos(x). Similarly for cosh. */ 557(for coss (COS COSH) 558 copysigns (COPYSIGN) 559 (simplify 560 (coss (copysigns @0 @1)) 561 (coss @0))) 562 563/* pow(copysign(x, y), z) -> pow(x, z) if z is an even integer. */ 564(for pows (POW) 565 copysigns (COPYSIGN) 566 (simplify 567 (pows (copysigns @0 @2) REAL_CST@1) 568 (with { HOST_WIDE_INT n; } 569 (if (real_isinteger (&TREE_REAL_CST (@1), &n) && (n & 1) == 0) 570 (pows @0 @1))))) 571/* Likewise for powi. */ 572(for pows (POWI) 573 copysigns (COPYSIGN) 574 (simplify 575 (pows (copysigns @0 @2) INTEGER_CST@1) 576 (if ((wi::to_wide (@1) & 1) == 0) 577 (pows @0 @1)))) 578 579(for hypots (HYPOT) 580 copysigns (COPYSIGN) 581 /* hypot(copysign(x, y), z) -> hypot(x, z). */ 582 (simplify 583 (hypots (copysigns @0 @1) @2) 584 (hypots @0 @2)) 585 /* hypot(x, copysign(y, z)) -> hypot(x, y). */ 586 (simplify 587 (hypots @0 (copysigns @1 @2)) 588 (hypots @0 @1))) 589 590/* copysign(x, CST) -> [-]abs (x). */ 591(for copysigns (COPYSIGN_ALL) 592 (simplify 593 (copysigns @0 REAL_CST@1) 594 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1))) 595 (negate (abs @0)) 596 (abs @0)))) 597 598/* copysign(copysign(x, y), z) -> copysign(x, z). */ 599(for copysigns (COPYSIGN_ALL) 600 (simplify 601 (copysigns (copysigns @0 @1) @2) 602 (copysigns @0 @2))) 603 604/* copysign(x,y)*copysign(x,y) -> x*x. */ 605(for copysigns (COPYSIGN_ALL) 606 (simplify 607 (mult (copysigns@2 @0 @1) @2) 608 (mult @0 @0))) 609 610/* ccos(-x) -> ccos(x). Similarly for ccosh. */ 611(for ccoss (CCOS CCOSH) 612 (simplify 613 (ccoss (negate @0)) 614 (ccoss @0))) 615 616/* cabs(-x) and cos(conj(x)) -> cabs(x). */ 617(for ops (conj negate) 618 (for cabss (CABS) 619 (simplify 620 (cabss (ops @0)) 621 (cabss @0)))) 622 623/* Fold (a * (1 << b)) into (a << b) */ 624(simplify 625 (mult:c @0 (convert? (lshift integer_onep@1 @2))) 626 (if (! FLOAT_TYPE_P (type) 627 && tree_nop_conversion_p (type, TREE_TYPE (@1))) 628 (lshift @0 @2))) 629 630/* Fold (1 << (C - x)) where C = precision(type) - 1 631 into ((1 << C) >> x). */ 632(simplify 633 (lshift integer_onep@0 (minus@1 INTEGER_CST@2 @3)) 634 (if (INTEGRAL_TYPE_P (type) 635 && wi::eq_p (wi::to_wide (@2), TYPE_PRECISION (type) - 1) 636 && single_use (@1)) 637 (if (TYPE_UNSIGNED (type)) 638 (rshift (lshift @0 @2) @3) 639 (with 640 { tree utype = unsigned_type_for (type); } 641 (convert (rshift (lshift (convert:utype @0) @2) @3)))))) 642 643/* Fold (C1/X)*C2 into (C1*C2)/X. */ 644(simplify 645 (mult (rdiv@3 REAL_CST@0 @1) REAL_CST@2) 646 (if (flag_associative_math 647 && single_use (@3)) 648 (with 649 { tree tem = const_binop (MULT_EXPR, type, @0, @2); } 650 (if (tem) 651 (rdiv { tem; } @1))))) 652 653/* Simplify ~X & X as zero. */ 654(simplify 655 (bit_and:c (convert? @0) (convert? (bit_not @0))) 656 { build_zero_cst (type); }) 657 658/* PR71636: Transform x & ((1U << b) - 1) -> x & ~(~0U << b); */ 659(simplify 660 (bit_and:c @0 (plus:s (lshift:s integer_onep @1) integer_minus_onep)) 661 (if (TYPE_UNSIGNED (type)) 662 (bit_and @0 (bit_not (lshift { build_all_ones_cst (type); } @1))))) 663 664(for bitop (bit_and bit_ior) 665 cmp (eq ne) 666 /* PR35691: Transform 667 (x == 0 & y == 0) -> (x | typeof(x)(y)) == 0. 668 (x != 0 | y != 0) -> (x | typeof(x)(y)) != 0. */ 669 (simplify 670 (bitop (cmp @0 integer_zerop@2) (cmp @1 integer_zerop)) 671 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) 672 && INTEGRAL_TYPE_P (TREE_TYPE (@1)) 673 && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1))) 674 (cmp (bit_ior @0 (convert @1)) @2))) 675 /* Transform: 676 (x == -1 & y == -1) -> (x & typeof(x)(y)) == -1. 677 (x != -1 | y != -1) -> (x & typeof(x)(y)) != -1. */ 678 (simplify 679 (bitop (cmp @0 integer_all_onesp@2) (cmp @1 integer_all_onesp)) 680 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) 681 && INTEGRAL_TYPE_P (TREE_TYPE (@1)) 682 && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1))) 683 (cmp (bit_and @0 (convert @1)) @2)))) 684 685/* Fold (A & ~B) - (A & B) into (A ^ B) - B. */ 686(simplify 687 (minus (bit_and:cs @0 (bit_not @1)) (bit_and:cs @0 @1)) 688 (minus (bit_xor @0 @1) @1)) 689(simplify 690 (minus (bit_and:s @0 INTEGER_CST@2) (bit_and:s @0 INTEGER_CST@1)) 691 (if (~wi::to_wide (@2) == wi::to_wide (@1)) 692 (minus (bit_xor @0 @1) @1))) 693 694/* Fold (A & B) - (A & ~B) into B - (A ^ B). */ 695(simplify 696 (minus (bit_and:cs @0 @1) (bit_and:cs @0 (bit_not @1))) 697 (minus @1 (bit_xor @0 @1))) 698 699/* Simplify (X & ~Y) |^+ (~X & Y) -> X ^ Y. */ 700(for op (bit_ior bit_xor plus) 701 (simplify 702 (op (bit_and:c @0 (bit_not @1)) (bit_and:c (bit_not @0) @1)) 703 (bit_xor @0 @1)) 704 (simplify 705 (op:c (bit_and @0 INTEGER_CST@2) (bit_and (bit_not @0) INTEGER_CST@1)) 706 (if (~wi::to_wide (@2) == wi::to_wide (@1)) 707 (bit_xor @0 @1)))) 708 709/* PR53979: Transform ((a ^ b) | a) -> (a | b) */ 710(simplify 711 (bit_ior:c (bit_xor:c @0 @1) @0) 712 (bit_ior @0 @1)) 713 714/* (a & ~b) | (a ^ b) --> a ^ b */ 715(simplify 716 (bit_ior:c (bit_and:c @0 (bit_not @1)) (bit_xor:c@2 @0 @1)) 717 @2) 718 719/* (a & ~b) ^ ~a --> ~(a & b) */ 720(simplify 721 (bit_xor:c (bit_and:cs @0 (bit_not @1)) (bit_not @0)) 722 (bit_not (bit_and @0 @1))) 723 724/* (a | b) & ~(a ^ b) --> a & b */ 725(simplify 726 (bit_and:c (bit_ior @0 @1) (bit_not (bit_xor:c @0 @1))) 727 (bit_and @0 @1)) 728 729/* a | ~(a ^ b) --> a | ~b */ 730(simplify 731 (bit_ior:c @0 (bit_not:s (bit_xor:c @0 @1))) 732 (bit_ior @0 (bit_not @1))) 733 734/* (a | b) | (a &^ b) --> a | b */ 735(for op (bit_and bit_xor) 736 (simplify 737 (bit_ior:c (bit_ior@2 @0 @1) (op:c @0 @1)) 738 @2)) 739 740/* (a & b) | ~(a ^ b) --> ~(a ^ b) */ 741(simplify 742 (bit_ior:c (bit_and:c @0 @1) (bit_not@2 (bit_xor @0 @1))) 743 @2) 744 745/* ~(~a & b) --> a | ~b */ 746(simplify 747 (bit_not (bit_and:cs (bit_not @0) @1)) 748 (bit_ior @0 (bit_not @1))) 749 750/* Simplify (~X & Y) to X ^ Y if we know that (X & ~Y) is 0. */ 751#if GIMPLE 752(simplify 753 (bit_and (bit_not SSA_NAME@0) INTEGER_CST@1) 754 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) 755 && wi::bit_and_not (get_nonzero_bits (@0), wi::to_wide (@1)) == 0) 756 (bit_xor @0 @1))) 757#endif 758 759/* X % Y is smaller than Y. */ 760(for cmp (lt ge) 761 (simplify 762 (cmp (trunc_mod @0 @1) @1) 763 (if (TYPE_UNSIGNED (TREE_TYPE (@0))) 764 { constant_boolean_node (cmp == LT_EXPR, type); }))) 765(for cmp (gt le) 766 (simplify 767 (cmp @1 (trunc_mod @0 @1)) 768 (if (TYPE_UNSIGNED (TREE_TYPE (@0))) 769 { constant_boolean_node (cmp == GT_EXPR, type); }))) 770 771/* x | ~0 -> ~0 */ 772(simplify 773 (bit_ior @0 integer_all_onesp@1) 774 @1) 775 776/* x | 0 -> x */ 777(simplify 778 (bit_ior @0 integer_zerop) 779 @0) 780 781/* x & 0 -> 0 */ 782(simplify 783 (bit_and @0 integer_zerop@1) 784 @1) 785 786/* ~x | x -> -1 */ 787/* ~x ^ x -> -1 */ 788/* ~x + x -> -1 */ 789(for op (bit_ior bit_xor plus) 790 (simplify 791 (op:c (convert? @0) (convert? (bit_not @0))) 792 (convert { build_all_ones_cst (TREE_TYPE (@0)); }))) 793 794/* x ^ x -> 0 */ 795(simplify 796 (bit_xor @0 @0) 797 { build_zero_cst (type); }) 798 799/* Canonicalize X ^ ~0 to ~X. */ 800(simplify 801 (bit_xor @0 integer_all_onesp@1) 802 (bit_not @0)) 803 804/* x & ~0 -> x */ 805(simplify 806 (bit_and @0 integer_all_onesp) 807 (non_lvalue @0)) 808 809/* x & x -> x, x | x -> x */ 810(for bitop (bit_and bit_ior) 811 (simplify 812 (bitop @0 @0) 813 (non_lvalue @0))) 814 815/* x & C -> x if we know that x & ~C == 0. */ 816#if GIMPLE 817(simplify 818 (bit_and SSA_NAME@0 INTEGER_CST@1) 819 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) 820 && wi::bit_and_not (get_nonzero_bits (@0), wi::to_wide (@1)) == 0) 821 @0)) 822#endif 823 824/* x + (x & 1) -> (x + 1) & ~1 */ 825(simplify 826 (plus:c @0 (bit_and:s @0 integer_onep@1)) 827 (bit_and (plus @0 @1) (bit_not @1))) 828 829/* x & ~(x & y) -> x & ~y */ 830/* x | ~(x | y) -> x | ~y */ 831(for bitop (bit_and bit_ior) 832 (simplify 833 (bitop:c @0 (bit_not (bitop:cs @0 @1))) 834 (bitop @0 (bit_not @1)))) 835 836/* (x | y) & ~x -> y & ~x */ 837/* (x & y) | ~x -> y | ~x */ 838(for bitop (bit_and bit_ior) 839 rbitop (bit_ior bit_and) 840 (simplify 841 (bitop:c (rbitop:c @0 @1) (bit_not@2 @0)) 842 (bitop @1 @2))) 843 844/* (x & y) ^ (x | y) -> x ^ y */ 845(simplify 846 (bit_xor:c (bit_and @0 @1) (bit_ior @0 @1)) 847 (bit_xor @0 @1)) 848 849/* (x ^ y) ^ (x | y) -> x & y */ 850(simplify 851 (bit_xor:c (bit_xor @0 @1) (bit_ior @0 @1)) 852 (bit_and @0 @1)) 853 854/* (x & y) + (x ^ y) -> x | y */ 855/* (x & y) | (x ^ y) -> x | y */ 856/* (x & y) ^ (x ^ y) -> x | y */ 857(for op (plus bit_ior bit_xor) 858 (simplify 859 (op:c (bit_and @0 @1) (bit_xor @0 @1)) 860 (bit_ior @0 @1))) 861 862/* (x & y) + (x | y) -> x + y */ 863(simplify 864 (plus:c (bit_and @0 @1) (bit_ior @0 @1)) 865 (plus @0 @1)) 866 867/* (x + y) - (x | y) -> x & y */ 868(simplify 869 (minus (plus @0 @1) (bit_ior @0 @1)) 870 (if (!TYPE_OVERFLOW_SANITIZED (type) && !TYPE_OVERFLOW_TRAPS (type) 871 && !TYPE_SATURATING (type)) 872 (bit_and @0 @1))) 873 874/* (x + y) - (x & y) -> x | y */ 875(simplify 876 (minus (plus @0 @1) (bit_and @0 @1)) 877 (if (!TYPE_OVERFLOW_SANITIZED (type) && !TYPE_OVERFLOW_TRAPS (type) 878 && !TYPE_SATURATING (type)) 879 (bit_ior @0 @1))) 880 881/* (x | y) - (x ^ y) -> x & y */ 882(simplify 883 (minus (bit_ior @0 @1) (bit_xor @0 @1)) 884 (bit_and @0 @1)) 885 886/* (x | y) - (x & y) -> x ^ y */ 887(simplify 888 (minus (bit_ior @0 @1) (bit_and @0 @1)) 889 (bit_xor @0 @1)) 890 891/* (x | y) & ~(x & y) -> x ^ y */ 892(simplify 893 (bit_and:c (bit_ior @0 @1) (bit_not (bit_and @0 @1))) 894 (bit_xor @0 @1)) 895 896/* (x | y) & (~x ^ y) -> x & y */ 897(simplify 898 (bit_and:c (bit_ior:c @0 @1) (bit_xor:c @1 (bit_not @0))) 899 (bit_and @0 @1)) 900 901/* ~x & ~y -> ~(x | y) 902 ~x | ~y -> ~(x & y) */ 903(for op (bit_and bit_ior) 904 rop (bit_ior bit_and) 905 (simplify 906 (op (convert1? (bit_not @0)) (convert2? (bit_not @1))) 907 (if (element_precision (type) <= element_precision (TREE_TYPE (@0)) 908 && element_precision (type) <= element_precision (TREE_TYPE (@1))) 909 (bit_not (rop (convert @0) (convert @1)))))) 910 911/* If we are XORing or adding two BIT_AND_EXPR's, both of which are and'ing 912 with a constant, and the two constants have no bits in common, 913 we should treat this as a BIT_IOR_EXPR since this may produce more 914 simplifications. */ 915(for op (bit_xor plus) 916 (simplify 917 (op (convert1? (bit_and@4 @0 INTEGER_CST@1)) 918 (convert2? (bit_and@5 @2 INTEGER_CST@3))) 919 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)) 920 && tree_nop_conversion_p (type, TREE_TYPE (@2)) 921 && (wi::to_wide (@1) & wi::to_wide (@3)) == 0) 922 (bit_ior (convert @4) (convert @5))))) 923 924/* (X | Y) ^ X -> Y & ~ X*/ 925(simplify 926 (bit_xor:c (convert1? (bit_ior:c @@0 @1)) (convert2? @0)) 927 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))) 928 (convert (bit_and @1 (bit_not @0))))) 929 930/* Convert ~X ^ ~Y to X ^ Y. */ 931(simplify 932 (bit_xor (convert1? (bit_not @0)) (convert2? (bit_not @1))) 933 (if (element_precision (type) <= element_precision (TREE_TYPE (@0)) 934 && element_precision (type) <= element_precision (TREE_TYPE (@1))) 935 (bit_xor (convert @0) (convert @1)))) 936 937/* Convert ~X ^ C to X ^ ~C. */ 938(simplify 939 (bit_xor (convert? (bit_not @0)) INTEGER_CST@1) 940 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))) 941 (bit_xor (convert @0) (bit_not @1)))) 942 943/* Fold (X & Y) ^ Y and (X ^ Y) & Y as ~X & Y. */ 944(for opo (bit_and bit_xor) 945 opi (bit_xor bit_and) 946 (simplify 947 (opo:c (opi:c @0 @1) @1) 948 (bit_and (bit_not @0) @1))) 949 950/* Given a bit-wise operation CODE applied to ARG0 and ARG1, see if both 951 operands are another bit-wise operation with a common input. If so, 952 distribute the bit operations to save an operation and possibly two if 953 constants are involved. For example, convert 954 (A | B) & (A | C) into A | (B & C) 955 Further simplification will occur if B and C are constants. */ 956(for op (bit_and bit_ior bit_xor) 957 rop (bit_ior bit_and bit_and) 958 (simplify 959 (op (convert? (rop:c @@0 @1)) (convert? (rop:c @0 @2))) 960 (if (tree_nop_conversion_p (type, TREE_TYPE (@1)) 961 && tree_nop_conversion_p (type, TREE_TYPE (@2))) 962 (rop (convert @0) (op (convert @1) (convert @2)))))) 963 964/* Some simple reassociation for bit operations, also handled in reassoc. */ 965/* (X & Y) & Y -> X & Y 966 (X | Y) | Y -> X | Y */ 967(for op (bit_and bit_ior) 968 (simplify 969 (op:c (convert1?@2 (op:c @0 @@1)) (convert2? @1)) 970 @2)) 971/* (X ^ Y) ^ Y -> X */ 972(simplify 973 (bit_xor:c (convert1? (bit_xor:c @0 @@1)) (convert2? @1)) 974 (convert @0)) 975/* (X & Y) & (X & Z) -> (X & Y) & Z 976 (X | Y) | (X | Z) -> (X | Y) | Z */ 977(for op (bit_and bit_ior) 978 (simplify 979 (op (convert1?@3 (op:c@4 @0 @1)) (convert2?@5 (op:c@6 @0 @2))) 980 (if (tree_nop_conversion_p (type, TREE_TYPE (@1)) 981 && tree_nop_conversion_p (type, TREE_TYPE (@2))) 982 (if (single_use (@5) && single_use (@6)) 983 (op @3 (convert @2)) 984 (if (single_use (@3) && single_use (@4)) 985 (op (convert @1) @5)))))) 986/* (X ^ Y) ^ (X ^ Z) -> Y ^ Z */ 987(simplify 988 (bit_xor (convert1? (bit_xor:c @0 @1)) (convert2? (bit_xor:c @0 @2))) 989 (if (tree_nop_conversion_p (type, TREE_TYPE (@1)) 990 && tree_nop_conversion_p (type, TREE_TYPE (@2))) 991 (bit_xor (convert @1) (convert @2)))) 992 993(simplify 994 (abs (abs@1 @0)) 995 @1) 996(simplify 997 (abs (negate @0)) 998 (abs @0)) 999(simplify 1000 (abs tree_expr_nonnegative_p@0) 1001 @0) 1002 1003/* A few cases of fold-const.c negate_expr_p predicate. */ 1004(match negate_expr_p 1005 INTEGER_CST 1006 (if ((INTEGRAL_TYPE_P (type) 1007 && TYPE_UNSIGNED (type)) 1008 || (!TYPE_OVERFLOW_SANITIZED (type) 1009 && may_negate_without_overflow_p (t))))) 1010(match negate_expr_p 1011 FIXED_CST) 1012(match negate_expr_p 1013 (negate @0) 1014 (if (!TYPE_OVERFLOW_SANITIZED (type)))) 1015(match negate_expr_p 1016 REAL_CST 1017 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (t))))) 1018/* VECTOR_CST handling of non-wrapping types would recurse in unsupported 1019 ways. */ 1020(match negate_expr_p 1021 VECTOR_CST 1022 (if (FLOAT_TYPE_P (TREE_TYPE (type)) || TYPE_OVERFLOW_WRAPS (type)))) 1023(match negate_expr_p 1024 (minus @0 @1) 1025 (if ((ANY_INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_WRAPS (type)) 1026 || (FLOAT_TYPE_P (type) 1027 && !HONOR_SIGN_DEPENDENT_ROUNDING (type) 1028 && !HONOR_SIGNED_ZEROS (type))))) 1029 1030/* (-A) * (-B) -> A * B */ 1031(simplify 1032 (mult:c (convert1? (negate @0)) (convert2? negate_expr_p@1)) 1033 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)) 1034 && tree_nop_conversion_p (type, TREE_TYPE (@1))) 1035 (mult (convert @0) (convert (negate @1))))) 1036 1037/* -(A + B) -> (-B) - A. */ 1038(simplify 1039 (negate (plus:c @0 negate_expr_p@1)) 1040 (if (!HONOR_SIGN_DEPENDENT_ROUNDING (element_mode (type)) 1041 && !HONOR_SIGNED_ZEROS (element_mode (type))) 1042 (minus (negate @1) @0))) 1043 1044/* -(A - B) -> B - A. */ 1045(simplify 1046 (negate (minus @0 @1)) 1047 (if ((ANY_INTEGRAL_TYPE_P (type) && !TYPE_OVERFLOW_SANITIZED (type)) 1048 || (FLOAT_TYPE_P (type) 1049 && !HONOR_SIGN_DEPENDENT_ROUNDING (type) 1050 && !HONOR_SIGNED_ZEROS (type))) 1051 (minus @1 @0))) 1052(simplify 1053 (negate (pointer_diff @0 @1)) 1054 (if (TYPE_OVERFLOW_UNDEFINED (type)) 1055 (pointer_diff @1 @0))) 1056 1057/* A - B -> A + (-B) if B is easily negatable. */ 1058(simplify 1059 (minus @0 negate_expr_p@1) 1060 (if (!FIXED_POINT_TYPE_P (type)) 1061 (plus @0 (negate @1)))) 1062 1063/* Try to fold (type) X op CST -> (type) (X op ((type-x) CST)) 1064 when profitable. 1065 For bitwise binary operations apply operand conversions to the 1066 binary operation result instead of to the operands. This allows 1067 to combine successive conversions and bitwise binary operations. 1068 We combine the above two cases by using a conditional convert. */ 1069(for bitop (bit_and bit_ior bit_xor) 1070 (simplify 1071 (bitop (convert @0) (convert? @1)) 1072 (if (((TREE_CODE (@1) == INTEGER_CST 1073 && INTEGRAL_TYPE_P (TREE_TYPE (@0)) 1074 && int_fits_type_p (@1, TREE_TYPE (@0))) 1075 || types_match (@0, @1)) 1076 /* ??? This transform conflicts with fold-const.c doing 1077 Convert (T)(x & c) into (T)x & (T)c, if c is an integer 1078 constants (if x has signed type, the sign bit cannot be set 1079 in c). This folds extension into the BIT_AND_EXPR. 1080 Restrict it to GIMPLE to avoid endless recursions. */ 1081 && (bitop != BIT_AND_EXPR || GIMPLE) 1082 && (/* That's a good idea if the conversion widens the operand, thus 1083 after hoisting the conversion the operation will be narrower. */ 1084 TYPE_PRECISION (TREE_TYPE (@0)) < TYPE_PRECISION (type) 1085 /* It's also a good idea if the conversion is to a non-integer 1086 mode. */ 1087 || GET_MODE_CLASS (TYPE_MODE (type)) != MODE_INT 1088 /* Or if the precision of TO is not the same as the precision 1089 of its mode. */ 1090 || !type_has_mode_precision_p (type))) 1091 (convert (bitop @0 (convert @1)))))) 1092 1093(for bitop (bit_and bit_ior) 1094 rbitop (bit_ior bit_and) 1095 /* (x | y) & x -> x */ 1096 /* (x & y) | x -> x */ 1097 (simplify 1098 (bitop:c (rbitop:c @0 @1) @0) 1099 @0) 1100 /* (~x | y) & x -> x & y */ 1101 /* (~x & y) | x -> x | y */ 1102 (simplify 1103 (bitop:c (rbitop:c (bit_not @0) @1) @0) 1104 (bitop @0 @1))) 1105 1106/* (x | CST1) & CST2 -> (x & CST2) | (CST1 & CST2) */ 1107(simplify 1108 (bit_and (bit_ior @0 CONSTANT_CLASS_P@1) CONSTANT_CLASS_P@2) 1109 (bit_ior (bit_and @0 @2) (bit_and @1 @2))) 1110 1111/* Combine successive equal operations with constants. */ 1112(for bitop (bit_and bit_ior bit_xor) 1113 (simplify 1114 (bitop (bitop @0 CONSTANT_CLASS_P@1) CONSTANT_CLASS_P@2) 1115 (if (!CONSTANT_CLASS_P (@0)) 1116 /* This is the canonical form regardless of whether (bitop @1 @2) can be 1117 folded to a constant. */ 1118 (bitop @0 (bitop @1 @2)) 1119 /* In this case we have three constants and (bitop @0 @1) doesn't fold 1120 to a constant. This can happen if @0 or @1 is a POLY_INT_CST and if 1121 the values involved are such that the operation can't be decided at 1122 compile time. Try folding one of @0 or @1 with @2 to see whether 1123 that combination can be decided at compile time. 1124 1125 Keep the existing form if both folds fail, to avoid endless 1126 oscillation. */ 1127 (with { tree cst1 = const_binop (bitop, type, @0, @2); } 1128 (if (cst1) 1129 (bitop @1 { cst1; }) 1130 (with { tree cst2 = const_binop (bitop, type, @1, @2); } 1131 (if (cst2) 1132 (bitop @0 { cst2; })))))))) 1133 1134/* Try simple folding for X op !X, and X op X with the help 1135 of the truth_valued_p and logical_inverted_value predicates. */ 1136(match truth_valued_p 1137 @0 1138 (if (INTEGRAL_TYPE_P (type) && TYPE_PRECISION (type) == 1))) 1139(for op (tcc_comparison truth_and truth_andif truth_or truth_orif truth_xor) 1140 (match truth_valued_p 1141 (op @0 @1))) 1142(match truth_valued_p 1143 (truth_not @0)) 1144 1145(match (logical_inverted_value @0) 1146 (truth_not @0)) 1147(match (logical_inverted_value @0) 1148 (bit_not truth_valued_p@0)) 1149(match (logical_inverted_value @0) 1150 (eq @0 integer_zerop)) 1151(match (logical_inverted_value @0) 1152 (ne truth_valued_p@0 integer_truep)) 1153(match (logical_inverted_value @0) 1154 (bit_xor truth_valued_p@0 integer_truep)) 1155 1156/* X & !X -> 0. */ 1157(simplify 1158 (bit_and:c @0 (logical_inverted_value @0)) 1159 { build_zero_cst (type); }) 1160/* X | !X and X ^ !X -> 1, , if X is truth-valued. */ 1161(for op (bit_ior bit_xor) 1162 (simplify 1163 (op:c truth_valued_p@0 (logical_inverted_value @0)) 1164 { constant_boolean_node (true, type); })) 1165/* X ==/!= !X is false/true. */ 1166(for op (eq ne) 1167 (simplify 1168 (op:c truth_valued_p@0 (logical_inverted_value @0)) 1169 { constant_boolean_node (op == NE_EXPR ? true : false, type); })) 1170 1171/* ~~x -> x */ 1172(simplify 1173 (bit_not (bit_not @0)) 1174 @0) 1175 1176/* Convert ~ (-A) to A - 1. */ 1177(simplify 1178 (bit_not (convert? (negate @0))) 1179 (if (element_precision (type) <= element_precision (TREE_TYPE (@0)) 1180 || !TYPE_UNSIGNED (TREE_TYPE (@0))) 1181 (convert (minus @0 { build_each_one_cst (TREE_TYPE (@0)); })))) 1182 1183/* Convert - (~A) to A + 1. */ 1184(simplify 1185 (negate (nop_convert (bit_not @0))) 1186 (plus (view_convert @0) { build_each_one_cst (type); })) 1187 1188/* Convert ~ (A - 1) or ~ (A + -1) to -A. */ 1189(simplify 1190 (bit_not (convert? (minus @0 integer_each_onep))) 1191 (if (element_precision (type) <= element_precision (TREE_TYPE (@0)) 1192 || !TYPE_UNSIGNED (TREE_TYPE (@0))) 1193 (convert (negate @0)))) 1194(simplify 1195 (bit_not (convert? (plus @0 integer_all_onesp))) 1196 (if (element_precision (type) <= element_precision (TREE_TYPE (@0)) 1197 || !TYPE_UNSIGNED (TREE_TYPE (@0))) 1198 (convert (negate @0)))) 1199 1200/* Part of convert ~(X ^ Y) to ~X ^ Y or X ^ ~Y if ~X or ~Y simplify. */ 1201(simplify 1202 (bit_not (convert? (bit_xor @0 INTEGER_CST@1))) 1203 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))) 1204 (convert (bit_xor @0 (bit_not @1))))) 1205(simplify 1206 (bit_not (convert? (bit_xor:c (bit_not @0) @1))) 1207 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))) 1208 (convert (bit_xor @0 @1)))) 1209 1210/* Otherwise prefer ~(X ^ Y) to ~X ^ Y as more canonical. */ 1211(simplify 1212 (bit_xor:c (nop_convert:s (bit_not:s @0)) @1) 1213 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))) 1214 (bit_not (bit_xor (view_convert @0) @1)))) 1215 1216/* (x & ~m) | (y & m) -> ((x ^ y) & m) ^ x */ 1217(simplify 1218 (bit_ior:c (bit_and:cs @0 (bit_not @2)) (bit_and:cs @1 @2)) 1219 (bit_xor (bit_and (bit_xor @0 @1) @2) @0)) 1220 1221/* Fold A - (A & B) into ~B & A. */ 1222(simplify 1223 (minus (convert1? @0) (convert2?:s (bit_and:cs @@0 @1))) 1224 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)) 1225 && tree_nop_conversion_p (type, TREE_TYPE (@1))) 1226 (convert (bit_and (bit_not @1) @0)))) 1227 1228/* (m1 CMP m2) * d -> (m1 CMP m2) ? d : 0 */ 1229(for cmp (gt lt ge le) 1230(simplify 1231 (mult (convert (cmp @0 @1)) @2) 1232 (if (GIMPLE || !TREE_SIDE_EFFECTS (@2)) 1233 (cond (cmp @0 @1) @2 { build_zero_cst (type); })))) 1234 1235/* For integral types with undefined overflow and C != 0 fold 1236 x * C EQ/NE y * C into x EQ/NE y. */ 1237(for cmp (eq ne) 1238 (simplify 1239 (cmp (mult:c @0 @1) (mult:c @2 @1)) 1240 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1)) 1241 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)) 1242 && tree_expr_nonzero_p (@1)) 1243 (cmp @0 @2)))) 1244 1245/* For integral types with wrapping overflow and C odd fold 1246 x * C EQ/NE y * C into x EQ/NE y. */ 1247(for cmp (eq ne) 1248 (simplify 1249 (cmp (mult @0 INTEGER_CST@1) (mult @2 @1)) 1250 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1)) 1251 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)) 1252 && (TREE_INT_CST_LOW (@1) & 1) != 0) 1253 (cmp @0 @2)))) 1254 1255/* For integral types with undefined overflow and C != 0 fold 1256 x * C RELOP y * C into: 1257 1258 x RELOP y for nonnegative C 1259 y RELOP x for negative C */ 1260(for cmp (lt gt le ge) 1261 (simplify 1262 (cmp (mult:c @0 @1) (mult:c @2 @1)) 1263 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1)) 1264 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))) 1265 (if (tree_expr_nonnegative_p (@1) && tree_expr_nonzero_p (@1)) 1266 (cmp @0 @2) 1267 (if (TREE_CODE (@1) == INTEGER_CST 1268 && wi::neg_p (wi::to_wide (@1), TYPE_SIGN (TREE_TYPE (@1)))) 1269 (cmp @2 @0)))))) 1270 1271/* (X - 1U) <= INT_MAX-1U into (int) X > 0. */ 1272(for cmp (le gt) 1273 icmp (gt le) 1274 (simplify 1275 (cmp (plus @0 integer_minus_onep@1) INTEGER_CST@2) 1276 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) 1277 && TYPE_UNSIGNED (TREE_TYPE (@0)) 1278 && TYPE_PRECISION (TREE_TYPE (@0)) > 1 1279 && (wi::to_wide (@2) 1280 == wi::max_value (TYPE_PRECISION (TREE_TYPE (@0)), SIGNED) - 1)) 1281 (with { tree stype = signed_type_for (TREE_TYPE (@0)); } 1282 (icmp (convert:stype @0) { build_int_cst (stype, 0); }))))) 1283 1284/* X / 4 < Y / 4 iff X < Y when the division is known to be exact. */ 1285(for cmp (simple_comparison) 1286 (simplify 1287 (cmp (exact_div @0 INTEGER_CST@2) (exact_div @1 @2)) 1288 (if (wi::gt_p (wi::to_wide (@2), 0, TYPE_SIGN (TREE_TYPE (@2)))) 1289 (cmp @0 @1)))) 1290 1291/* X / C1 op C2 into a simple range test. */ 1292(for cmp (simple_comparison) 1293 (simplify 1294 (cmp (trunc_div:s @0 INTEGER_CST@1) INTEGER_CST@2) 1295 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) 1296 && integer_nonzerop (@1) 1297 && !TREE_OVERFLOW (@1) 1298 && !TREE_OVERFLOW (@2)) 1299 (with { tree lo, hi; bool neg_overflow; 1300 enum tree_code code = fold_div_compare (cmp, @1, @2, &lo, &hi, 1301 &neg_overflow); } 1302 (switch 1303 (if (code == LT_EXPR || code == GE_EXPR) 1304 (if (TREE_OVERFLOW (lo)) 1305 { build_int_cst (type, (code == LT_EXPR) ^ neg_overflow); } 1306 (if (code == LT_EXPR) 1307 (lt @0 { lo; }) 1308 (ge @0 { lo; })))) 1309 (if (code == LE_EXPR || code == GT_EXPR) 1310 (if (TREE_OVERFLOW (hi)) 1311 { build_int_cst (type, (code == LE_EXPR) ^ neg_overflow); } 1312 (if (code == LE_EXPR) 1313 (le @0 { hi; }) 1314 (gt @0 { hi; })))) 1315 (if (!lo && !hi) 1316 { build_int_cst (type, code == NE_EXPR); }) 1317 (if (code == EQ_EXPR && !hi) 1318 (ge @0 { lo; })) 1319 (if (code == EQ_EXPR && !lo) 1320 (le @0 { hi; })) 1321 (if (code == NE_EXPR && !hi) 1322 (lt @0 { lo; })) 1323 (if (code == NE_EXPR && !lo) 1324 (gt @0 { hi; })) 1325 (if (GENERIC) 1326 { build_range_check (UNKNOWN_LOCATION, type, @0, code == EQ_EXPR, 1327 lo, hi); }) 1328 (with 1329 { 1330 tree etype = range_check_type (TREE_TYPE (@0)); 1331 if (etype) 1332 { 1333 if (! TYPE_UNSIGNED (etype)) 1334 etype = unsigned_type_for (etype); 1335 hi = fold_convert (etype, hi); 1336 lo = fold_convert (etype, lo); 1337 hi = const_binop (MINUS_EXPR, etype, hi, lo); 1338 } 1339 } 1340 (if (etype && hi && !TREE_OVERFLOW (hi)) 1341 (if (code == EQ_EXPR) 1342 (le (minus (convert:etype @0) { lo; }) { hi; }) 1343 (gt (minus (convert:etype @0) { lo; }) { hi; }))))))))) 1344 1345/* X + Z < Y + Z is the same as X < Y when there is no overflow. */ 1346(for op (lt le ge gt) 1347 (simplify 1348 (op (plus:c @0 @2) (plus:c @1 @2)) 1349 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) 1350 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))) 1351 (op @0 @1)))) 1352/* For equality and subtraction, this is also true with wrapping overflow. */ 1353(for op (eq ne minus) 1354 (simplify 1355 (op (plus:c @0 @2) (plus:c @1 @2)) 1356 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) 1357 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)) 1358 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))) 1359 (op @0 @1)))) 1360 1361/* X - Z < Y - Z is the same as X < Y when there is no overflow. */ 1362(for op (lt le ge gt) 1363 (simplify 1364 (op (minus @0 @2) (minus @1 @2)) 1365 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) 1366 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))) 1367 (op @0 @1)))) 1368/* For equality and subtraction, this is also true with wrapping overflow. */ 1369(for op (eq ne minus) 1370 (simplify 1371 (op (minus @0 @2) (minus @1 @2)) 1372 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) 1373 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)) 1374 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))) 1375 (op @0 @1)))) 1376/* And for pointers... */ 1377(for op (simple_comparison) 1378 (simplify 1379 (op (pointer_diff@3 @0 @2) (pointer_diff @1 @2)) 1380 (if (!TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2))) 1381 (op @0 @1)))) 1382(simplify 1383 (minus (pointer_diff@3 @0 @2) (pointer_diff @1 @2)) 1384 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@3)) 1385 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2))) 1386 (pointer_diff @0 @1))) 1387 1388/* Z - X < Z - Y is the same as Y < X when there is no overflow. */ 1389(for op (lt le ge gt) 1390 (simplify 1391 (op (minus @2 @0) (minus @2 @1)) 1392 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) 1393 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))) 1394 (op @1 @0)))) 1395/* For equality and subtraction, this is also true with wrapping overflow. */ 1396(for op (eq ne minus) 1397 (simplify 1398 (op (minus @2 @0) (minus @2 @1)) 1399 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) 1400 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)) 1401 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))) 1402 (op @1 @0)))) 1403/* And for pointers... */ 1404(for op (simple_comparison) 1405 (simplify 1406 (op (pointer_diff@3 @2 @0) (pointer_diff @2 @1)) 1407 (if (!TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2))) 1408 (op @1 @0)))) 1409(simplify 1410 (minus (pointer_diff@3 @2 @0) (pointer_diff @2 @1)) 1411 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@3)) 1412 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2))) 1413 (pointer_diff @1 @0))) 1414 1415/* X + Y < Y is the same as X < 0 when there is no overflow. */ 1416(for op (lt le gt ge) 1417 (simplify 1418 (op:c (plus:c@2 @0 @1) @1) 1419 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) 1420 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)) 1421 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@0)) 1422 && (CONSTANT_CLASS_P (@0) || single_use (@2))) 1423 (op @0 { build_zero_cst (TREE_TYPE (@0)); })))) 1424/* For equality, this is also true with wrapping overflow. */ 1425(for op (eq ne) 1426 (simplify 1427 (op:c (nop_convert@3 (plus:c@2 @0 (convert1? @1))) (convert2? @1)) 1428 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) 1429 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)) 1430 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))) 1431 && (CONSTANT_CLASS_P (@0) || (single_use (@2) && single_use (@3))) 1432 && tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@2)) 1433 && tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@1))) 1434 (op @0 { build_zero_cst (TREE_TYPE (@0)); }))) 1435 (simplify 1436 (op:c (nop_convert@3 (pointer_plus@2 (convert1? @0) @1)) (convert2? @0)) 1437 (if (tree_nop_conversion_p (TREE_TYPE (@2), TREE_TYPE (@0)) 1438 && tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@0)) 1439 && (CONSTANT_CLASS_P (@1) || (single_use (@2) && single_use (@3)))) 1440 (op @1 { build_zero_cst (TREE_TYPE (@1)); })))) 1441 1442/* X - Y < X is the same as Y > 0 when there is no overflow. 1443 For equality, this is also true with wrapping overflow. */ 1444(for op (simple_comparison) 1445 (simplify 1446 (op:c @0 (minus@2 @0 @1)) 1447 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) 1448 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)) 1449 || ((op == EQ_EXPR || op == NE_EXPR) 1450 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))) 1451 && (CONSTANT_CLASS_P (@1) || single_use (@2))) 1452 (op @1 { build_zero_cst (TREE_TYPE (@1)); })))) 1453 1454/* Transform: 1455 (X / Y) == 0 -> X < Y if X, Y are unsigned. 1456 (X / Y) != 0 -> X >= Y, if X, Y are unsigned. */ 1457(for cmp (eq ne) 1458 ocmp (lt ge) 1459 (simplify 1460 (cmp (trunc_div @0 @1) integer_zerop) 1461 (if (TYPE_UNSIGNED (TREE_TYPE (@0)) 1462 /* Complex ==/!= is allowed, but not </>=. */ 1463 && TREE_CODE (TREE_TYPE (@0)) != COMPLEX_TYPE 1464 && (VECTOR_TYPE_P (type) || !VECTOR_TYPE_P (TREE_TYPE (@0)))) 1465 (ocmp @0 @1)))) 1466 1467/* X == C - X can never be true if C is odd. */ 1468(for cmp (eq ne) 1469 (simplify 1470 (cmp:c (convert? @0) (convert1? (minus INTEGER_CST@1 (convert2? @0)))) 1471 (if (TREE_INT_CST_LOW (@1) & 1) 1472 { constant_boolean_node (cmp == NE_EXPR, type); }))) 1473 1474/* Arguments on which one can call get_nonzero_bits to get the bits 1475 possibly set. */ 1476(match with_possible_nonzero_bits 1477 INTEGER_CST@0) 1478(match with_possible_nonzero_bits 1479 SSA_NAME@0 1480 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0))))) 1481/* Slightly extended version, do not make it recursive to keep it cheap. */ 1482(match (with_possible_nonzero_bits2 @0) 1483 with_possible_nonzero_bits@0) 1484(match (with_possible_nonzero_bits2 @0) 1485 (bit_and:c with_possible_nonzero_bits@0 @2)) 1486 1487/* Same for bits that are known to be set, but we do not have 1488 an equivalent to get_nonzero_bits yet. */ 1489(match (with_certain_nonzero_bits2 @0) 1490 INTEGER_CST@0) 1491(match (with_certain_nonzero_bits2 @0) 1492 (bit_ior @1 INTEGER_CST@0)) 1493 1494/* X == C (or X & Z == Y | C) is impossible if ~nonzero(X) & C != 0. */ 1495(for cmp (eq ne) 1496 (simplify 1497 (cmp:c (with_possible_nonzero_bits2 @0) (with_certain_nonzero_bits2 @1)) 1498 (if (wi::bit_and_not (wi::to_wide (@1), get_nonzero_bits (@0)) != 0) 1499 { constant_boolean_node (cmp == NE_EXPR, type); }))) 1500 1501/* ((X inner_op C0) outer_op C1) 1502 With X being a tree where value_range has reasoned certain bits to always be 1503 zero throughout its computed value range, 1504 inner_op = {|,^}, outer_op = {|,^} and inner_op != outer_op 1505 where zero_mask has 1's for all bits that are sure to be 0 in 1506 and 0's otherwise. 1507 if (inner_op == '^') C0 &= ~C1; 1508 if ((C0 & ~zero_mask) == 0) then emit (X outer_op (C0 outer_op C1) 1509 if ((C1 & ~zero_mask) == 0) then emit (X inner_op (C0 outer_op C1) 1510*/ 1511(for inner_op (bit_ior bit_xor) 1512 outer_op (bit_xor bit_ior) 1513(simplify 1514 (outer_op 1515 (inner_op:s @2 INTEGER_CST@0) INTEGER_CST@1) 1516 (with 1517 { 1518 bool fail = false; 1519 wide_int zero_mask_not; 1520 wide_int C0; 1521 wide_int cst_emit; 1522 1523 if (TREE_CODE (@2) == SSA_NAME) 1524 zero_mask_not = get_nonzero_bits (@2); 1525 else 1526 fail = true; 1527 1528 if (inner_op == BIT_XOR_EXPR) 1529 { 1530 C0 = wi::bit_and_not (wi::to_wide (@0), wi::to_wide (@1)); 1531 cst_emit = C0 | wi::to_wide (@1); 1532 } 1533 else 1534 { 1535 C0 = wi::to_wide (@0); 1536 cst_emit = C0 ^ wi::to_wide (@1); 1537 } 1538 } 1539 (if (!fail && (C0 & zero_mask_not) == 0) 1540 (outer_op @2 { wide_int_to_tree (type, cst_emit); }) 1541 (if (!fail && (wi::to_wide (@1) & zero_mask_not) == 0) 1542 (inner_op @2 { wide_int_to_tree (type, cst_emit); })))))) 1543 1544/* Associate (p +p off1) +p off2 as (p +p (off1 + off2)). */ 1545(simplify 1546 (pointer_plus (pointer_plus:s @0 @1) @3) 1547 (pointer_plus @0 (plus @1 @3))) 1548 1549/* Pattern match 1550 tem1 = (long) ptr1; 1551 tem2 = (long) ptr2; 1552 tem3 = tem2 - tem1; 1553 tem4 = (unsigned long) tem3; 1554 tem5 = ptr1 + tem4; 1555 and produce 1556 tem5 = ptr2; */ 1557(simplify 1558 (pointer_plus @0 (convert?@2 (minus@3 (convert @1) (convert @0)))) 1559 /* Conditionally look through a sign-changing conversion. */ 1560 (if (TYPE_PRECISION (TREE_TYPE (@2)) == TYPE_PRECISION (TREE_TYPE (@3)) 1561 && ((GIMPLE && useless_type_conversion_p (type, TREE_TYPE (@1))) 1562 || (GENERIC && type == TREE_TYPE (@1)))) 1563 @1)) 1564(simplify 1565 (pointer_plus @0 (convert?@2 (pointer_diff@3 @1 @@0))) 1566 (if (TYPE_PRECISION (TREE_TYPE (@2)) >= TYPE_PRECISION (TREE_TYPE (@3))) 1567 (convert @1))) 1568 1569/* Pattern match 1570 tem = (sizetype) ptr; 1571 tem = tem & algn; 1572 tem = -tem; 1573 ... = ptr p+ tem; 1574 and produce the simpler and easier to analyze with respect to alignment 1575 ... = ptr & ~algn; */ 1576(simplify 1577 (pointer_plus @0 (negate (bit_and (convert @0) INTEGER_CST@1))) 1578 (with { tree algn = wide_int_to_tree (TREE_TYPE (@0), ~wi::to_wide (@1)); } 1579 (bit_and @0 { algn; }))) 1580 1581/* Try folding difference of addresses. */ 1582(simplify 1583 (minus (convert ADDR_EXPR@0) (convert @1)) 1584 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))) 1585 (with { poly_int64 diff; } 1586 (if (ptr_difference_const (@0, @1, &diff)) 1587 { build_int_cst_type (type, diff); })))) 1588(simplify 1589 (minus (convert @0) (convert ADDR_EXPR@1)) 1590 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))) 1591 (with { poly_int64 diff; } 1592 (if (ptr_difference_const (@0, @1, &diff)) 1593 { build_int_cst_type (type, diff); })))) 1594(simplify 1595 (pointer_diff (convert?@2 ADDR_EXPR@0) (convert1?@3 @1)) 1596 (if (tree_nop_conversion_p (TREE_TYPE(@2), TREE_TYPE (@0)) 1597 && tree_nop_conversion_p (TREE_TYPE(@3), TREE_TYPE (@1))) 1598 (with { poly_int64 diff; } 1599 (if (ptr_difference_const (@0, @1, &diff)) 1600 { build_int_cst_type (type, diff); })))) 1601(simplify 1602 (pointer_diff (convert?@2 @0) (convert1?@3 ADDR_EXPR@1)) 1603 (if (tree_nop_conversion_p (TREE_TYPE(@2), TREE_TYPE (@0)) 1604 && tree_nop_conversion_p (TREE_TYPE(@3), TREE_TYPE (@1))) 1605 (with { poly_int64 diff; } 1606 (if (ptr_difference_const (@0, @1, &diff)) 1607 { build_int_cst_type (type, diff); })))) 1608 1609/* If arg0 is derived from the address of an object or function, we may 1610 be able to fold this expression using the object or function's 1611 alignment. */ 1612(simplify 1613 (bit_and (convert? @0) INTEGER_CST@1) 1614 (if (POINTER_TYPE_P (TREE_TYPE (@0)) 1615 && tree_nop_conversion_p (type, TREE_TYPE (@0))) 1616 (with 1617 { 1618 unsigned int align; 1619 unsigned HOST_WIDE_INT bitpos; 1620 get_pointer_alignment_1 (@0, &align, &bitpos); 1621 } 1622 (if (wi::ltu_p (wi::to_wide (@1), align / BITS_PER_UNIT)) 1623 { wide_int_to_tree (type, (wi::to_wide (@1) 1624 & (bitpos / BITS_PER_UNIT))); })))) 1625 1626 1627/* We can't reassociate at all for saturating types. */ 1628(if (!TYPE_SATURATING (type)) 1629 1630 /* Contract negates. */ 1631 /* A + (-B) -> A - B */ 1632 (simplify 1633 (plus:c @0 (convert? (negate @1))) 1634 /* Apply STRIP_NOPS on the negate. */ 1635 (if (tree_nop_conversion_p (type, TREE_TYPE (@1)) 1636 && !TYPE_OVERFLOW_SANITIZED (type)) 1637 (with 1638 { 1639 tree t1 = type; 1640 if (INTEGRAL_TYPE_P (type) 1641 && TYPE_OVERFLOW_WRAPS (type) != TYPE_OVERFLOW_WRAPS (TREE_TYPE (@1))) 1642 t1 = TYPE_OVERFLOW_WRAPS (type) ? type : TREE_TYPE (@1); 1643 } 1644 (convert (minus (convert:t1 @0) (convert:t1 @1)))))) 1645 /* A - (-B) -> A + B */ 1646 (simplify 1647 (minus @0 (convert? (negate @1))) 1648 (if (tree_nop_conversion_p (type, TREE_TYPE (@1)) 1649 && !TYPE_OVERFLOW_SANITIZED (type)) 1650 (with 1651 { 1652 tree t1 = type; 1653 if (INTEGRAL_TYPE_P (type) 1654 && TYPE_OVERFLOW_WRAPS (type) != TYPE_OVERFLOW_WRAPS (TREE_TYPE (@1))) 1655 t1 = TYPE_OVERFLOW_WRAPS (type) ? type : TREE_TYPE (@1); 1656 } 1657 (convert (plus (convert:t1 @0) (convert:t1 @1)))))) 1658 /* -(T)(-A) -> (T)A 1659 Sign-extension is ok except for INT_MIN, which thankfully cannot 1660 happen without overflow. */ 1661 (simplify 1662 (negate (convert (negate @1))) 1663 (if (INTEGRAL_TYPE_P (type) 1664 && (TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@1)) 1665 || (!TYPE_UNSIGNED (TREE_TYPE (@1)) 1666 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1)))) 1667 && !TYPE_OVERFLOW_SANITIZED (type) 1668 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@1))) 1669 (convert @1))) 1670 (simplify 1671 (negate (convert negate_expr_p@1)) 1672 (if (SCALAR_FLOAT_TYPE_P (type) 1673 && ((DECIMAL_FLOAT_TYPE_P (type) 1674 == DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@1)) 1675 && TYPE_PRECISION (type) >= TYPE_PRECISION (TREE_TYPE (@1))) 1676 || !HONOR_SIGN_DEPENDENT_ROUNDING (type))) 1677 (convert (negate @1)))) 1678 (simplify 1679 (negate (nop_convert (negate @1))) 1680 (if (!TYPE_OVERFLOW_SANITIZED (type) 1681 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@1))) 1682 (view_convert @1))) 1683 1684 /* We can't reassociate floating-point unless -fassociative-math 1685 or fixed-point plus or minus because of saturation to +-Inf. */ 1686 (if ((!FLOAT_TYPE_P (type) || flag_associative_math) 1687 && !FIXED_POINT_TYPE_P (type)) 1688 1689 /* Match patterns that allow contracting a plus-minus pair 1690 irrespective of overflow issues. */ 1691 /* (A +- B) - A -> +- B */ 1692 /* (A +- B) -+ B -> A */ 1693 /* A - (A +- B) -> -+ B */ 1694 /* A +- (B -+ A) -> +- B */ 1695 (simplify 1696 (minus (plus:c @0 @1) @0) 1697 @1) 1698 (simplify 1699 (minus (minus @0 @1) @0) 1700 (negate @1)) 1701 (simplify 1702 (plus:c (minus @0 @1) @1) 1703 @0) 1704 (simplify 1705 (minus @0 (plus:c @0 @1)) 1706 (negate @1)) 1707 (simplify 1708 (minus @0 (minus @0 @1)) 1709 @1) 1710 /* (A +- B) + (C - A) -> C +- B */ 1711 /* (A + B) - (A - C) -> B + C */ 1712 /* More cases are handled with comparisons. */ 1713 (simplify 1714 (plus:c (plus:c @0 @1) (minus @2 @0)) 1715 (plus @2 @1)) 1716 (simplify 1717 (plus:c (minus @0 @1) (minus @2 @0)) 1718 (minus @2 @1)) 1719 (simplify 1720 (plus:c (pointer_diff @0 @1) (pointer_diff @2 @0)) 1721 (if (TYPE_OVERFLOW_UNDEFINED (type) 1722 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@0))) 1723 (pointer_diff @2 @1))) 1724 (simplify 1725 (minus (plus:c @0 @1) (minus @0 @2)) 1726 (plus @1 @2)) 1727 1728 /* (A +- CST1) +- CST2 -> A + CST3 1729 Use view_convert because it is safe for vectors and equivalent for 1730 scalars. */ 1731 (for outer_op (plus minus) 1732 (for inner_op (plus minus) 1733 neg_inner_op (minus plus) 1734 (simplify 1735 (outer_op (nop_convert (inner_op @0 CONSTANT_CLASS_P@1)) 1736 CONSTANT_CLASS_P@2) 1737 /* If one of the types wraps, use that one. */ 1738 (if (!ANY_INTEGRAL_TYPE_P (type) || TYPE_OVERFLOW_WRAPS (type)) 1739 /* If all 3 captures are CONSTANT_CLASS_P, punt, as we might recurse 1740 forever if something doesn't simplify into a constant. */ 1741 (if (!CONSTANT_CLASS_P (@0)) 1742 (if (outer_op == PLUS_EXPR) 1743 (plus (view_convert @0) (inner_op @2 (view_convert @1))) 1744 (minus (view_convert @0) (neg_inner_op @2 (view_convert @1))))) 1745 (if (!ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) 1746 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))) 1747 (if (outer_op == PLUS_EXPR) 1748 (view_convert (plus @0 (inner_op (view_convert @2) @1))) 1749 (view_convert (minus @0 (neg_inner_op (view_convert @2) @1)))) 1750 /* If the constant operation overflows we cannot do the transform 1751 directly as we would introduce undefined overflow, for example 1752 with (a - 1) + INT_MIN. */ 1753 (if (types_match (type, @0)) 1754 (with { tree cst = const_binop (outer_op == inner_op 1755 ? PLUS_EXPR : MINUS_EXPR, 1756 type, @1, @2); } 1757 (if (cst && !TREE_OVERFLOW (cst)) 1758 (inner_op @0 { cst; } ) 1759 /* X+INT_MAX+1 is X-INT_MIN. */ 1760 (if (INTEGRAL_TYPE_P (type) && cst 1761 && wi::to_wide (cst) == wi::min_value (type)) 1762 (neg_inner_op @0 { wide_int_to_tree (type, wi::to_wide (cst)); }) 1763 /* Last resort, use some unsigned type. */ 1764 (with { tree utype = unsigned_type_for (type); } 1765 (if (utype) 1766 (view_convert (inner_op 1767 (view_convert:utype @0) 1768 (view_convert:utype 1769 { drop_tree_overflow (cst); })))))))))))))) 1770 1771 /* (CST1 - A) +- CST2 -> CST3 - A */ 1772 (for outer_op (plus minus) 1773 (simplify 1774 (outer_op (minus CONSTANT_CLASS_P@1 @0) CONSTANT_CLASS_P@2) 1775 (with { tree cst = const_binop (outer_op, type, @1, @2); } 1776 (if (cst && !TREE_OVERFLOW (cst)) 1777 (minus { cst; } @0))))) 1778 1779 /* CST1 - (CST2 - A) -> CST3 + A */ 1780 (simplify 1781 (minus CONSTANT_CLASS_P@1 (minus CONSTANT_CLASS_P@2 @0)) 1782 (with { tree cst = const_binop (MINUS_EXPR, type, @1, @2); } 1783 (if (cst && !TREE_OVERFLOW (cst)) 1784 (plus { cst; } @0)))) 1785 1786 /* ~A + A -> -1 */ 1787 (simplify 1788 (plus:c (bit_not @0) @0) 1789 (if (!TYPE_OVERFLOW_TRAPS (type)) 1790 { build_all_ones_cst (type); })) 1791 1792 /* ~A + 1 -> -A */ 1793 (simplify 1794 (plus (convert? (bit_not @0)) integer_each_onep) 1795 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))) 1796 (negate (convert @0)))) 1797 1798 /* -A - 1 -> ~A */ 1799 (simplify 1800 (minus (convert? (negate @0)) integer_each_onep) 1801 (if (!TYPE_OVERFLOW_TRAPS (type) 1802 && tree_nop_conversion_p (type, TREE_TYPE (@0))) 1803 (bit_not (convert @0)))) 1804 1805 /* -1 - A -> ~A */ 1806 (simplify 1807 (minus integer_all_onesp @0) 1808 (bit_not @0)) 1809 1810 /* (T)(P + A) - (T)P -> (T) A */ 1811 (simplify 1812 (minus (convert (plus:c @@0 @1)) 1813 (convert? @0)) 1814 (if (element_precision (type) <= element_precision (TREE_TYPE (@1)) 1815 /* For integer types, if A has a smaller type 1816 than T the result depends on the possible 1817 overflow in P + A. 1818 E.g. T=size_t, A=(unsigned)429497295, P>0. 1819 However, if an overflow in P + A would cause 1820 undefined behavior, we can assume that there 1821 is no overflow. */ 1822 || (INTEGRAL_TYPE_P (TREE_TYPE (@1)) 1823 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1)))) 1824 (convert @1))) 1825 (simplify 1826 (minus (convert (pointer_plus @@0 @1)) 1827 (convert @0)) 1828 (if (element_precision (type) <= element_precision (TREE_TYPE (@1)) 1829 /* For pointer types, if the conversion of A to the 1830 final type requires a sign- or zero-extension, 1831 then we have to punt - it is not defined which 1832 one is correct. */ 1833 || (POINTER_TYPE_P (TREE_TYPE (@0)) 1834 && TREE_CODE (@1) == INTEGER_CST 1835 && tree_int_cst_sign_bit (@1) == 0)) 1836 (convert @1))) 1837 (simplify 1838 (pointer_diff (pointer_plus @@0 @1) @0) 1839 /* The second argument of pointer_plus must be interpreted as signed, and 1840 thus sign-extended if necessary. */ 1841 (with { tree stype = signed_type_for (TREE_TYPE (@1)); } 1842 /* Use view_convert instead of convert here, as POINTER_PLUS_EXPR 1843 second arg is unsigned even when we need to consider it as signed, 1844 we don't want to diagnose overflow here. */ 1845 (convert (view_convert:stype @1)))) 1846 1847 /* (T)P - (T)(P + A) -> -(T) A */ 1848 (simplify 1849 (minus (convert? @0) 1850 (convert (plus:c @@0 @1))) 1851 (if (INTEGRAL_TYPE_P (type) 1852 && TYPE_OVERFLOW_UNDEFINED (type) 1853 && element_precision (type) <= element_precision (TREE_TYPE (@1))) 1854 (with { tree utype = unsigned_type_for (type); } 1855 (convert (negate (convert:utype @1)))) 1856 (if (element_precision (type) <= element_precision (TREE_TYPE (@1)) 1857 /* For integer types, if A has a smaller type 1858 than T the result depends on the possible 1859 overflow in P + A. 1860 E.g. T=size_t, A=(unsigned)429497295, P>0. 1861 However, if an overflow in P + A would cause 1862 undefined behavior, we can assume that there 1863 is no overflow. */ 1864 || (INTEGRAL_TYPE_P (TREE_TYPE (@1)) 1865 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1)))) 1866 (negate (convert @1))))) 1867 (simplify 1868 (minus (convert @0) 1869 (convert (pointer_plus @@0 @1))) 1870 (if (INTEGRAL_TYPE_P (type) 1871 && TYPE_OVERFLOW_UNDEFINED (type) 1872 && element_precision (type) <= element_precision (TREE_TYPE (@1))) 1873 (with { tree utype = unsigned_type_for (type); } 1874 (convert (negate (convert:utype @1)))) 1875 (if (element_precision (type) <= element_precision (TREE_TYPE (@1)) 1876 /* For pointer types, if the conversion of A to the 1877 final type requires a sign- or zero-extension, 1878 then we have to punt - it is not defined which 1879 one is correct. */ 1880 || (POINTER_TYPE_P (TREE_TYPE (@0)) 1881 && TREE_CODE (@1) == INTEGER_CST 1882 && tree_int_cst_sign_bit (@1) == 0)) 1883 (negate (convert @1))))) 1884 (simplify 1885 (pointer_diff @0 (pointer_plus @@0 @1)) 1886 /* The second argument of pointer_plus must be interpreted as signed, and 1887 thus sign-extended if necessary. */ 1888 (with { tree stype = signed_type_for (TREE_TYPE (@1)); } 1889 /* Use view_convert instead of convert here, as POINTER_PLUS_EXPR 1890 second arg is unsigned even when we need to consider it as signed, 1891 we don't want to diagnose overflow here. */ 1892 (negate (convert (view_convert:stype @1))))) 1893 1894 /* (T)(P + A) - (T)(P + B) -> (T)A - (T)B */ 1895 (simplify 1896 (minus (convert (plus:c @@0 @1)) 1897 (convert (plus:c @0 @2))) 1898 (if (INTEGRAL_TYPE_P (type) 1899 && TYPE_OVERFLOW_UNDEFINED (type) 1900 && element_precision (type) <= element_precision (TREE_TYPE (@1)) 1901 && element_precision (type) <= element_precision (TREE_TYPE (@2))) 1902 (with { tree utype = unsigned_type_for (type); } 1903 (convert (minus (convert:utype @1) (convert:utype @2)))) 1904 (if (((element_precision (type) <= element_precision (TREE_TYPE (@1))) 1905 == (element_precision (type) <= element_precision (TREE_TYPE (@2)))) 1906 && (element_precision (type) <= element_precision (TREE_TYPE (@1)) 1907 /* For integer types, if A has a smaller type 1908 than T the result depends on the possible 1909 overflow in P + A. 1910 E.g. T=size_t, A=(unsigned)429497295, P>0. 1911 However, if an overflow in P + A would cause 1912 undefined behavior, we can assume that there 1913 is no overflow. */ 1914 || (INTEGRAL_TYPE_P (TREE_TYPE (@1)) 1915 && INTEGRAL_TYPE_P (TREE_TYPE (@2)) 1916 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1)) 1917 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@2))))) 1918 (minus (convert @1) (convert @2))))) 1919 (simplify 1920 (minus (convert (pointer_plus @@0 @1)) 1921 (convert (pointer_plus @0 @2))) 1922 (if (INTEGRAL_TYPE_P (type) 1923 && TYPE_OVERFLOW_UNDEFINED (type) 1924 && element_precision (type) <= element_precision (TREE_TYPE (@1))) 1925 (with { tree utype = unsigned_type_for (type); } 1926 (convert (minus (convert:utype @1) (convert:utype @2)))) 1927 (if (element_precision (type) <= element_precision (TREE_TYPE (@1)) 1928 /* For pointer types, if the conversion of A to the 1929 final type requires a sign- or zero-extension, 1930 then we have to punt - it is not defined which 1931 one is correct. */ 1932 || (POINTER_TYPE_P (TREE_TYPE (@0)) 1933 && TREE_CODE (@1) == INTEGER_CST 1934 && tree_int_cst_sign_bit (@1) == 0 1935 && TREE_CODE (@2) == INTEGER_CST 1936 && tree_int_cst_sign_bit (@2) == 0)) 1937 (minus (convert @1) (convert @2))))) 1938 (simplify 1939 (pointer_diff (pointer_plus @@0 @1) (pointer_plus @0 @2)) 1940 /* The second argument of pointer_plus must be interpreted as signed, and 1941 thus sign-extended if necessary. */ 1942 (with { tree stype = signed_type_for (TREE_TYPE (@1)); } 1943 /* Use view_convert instead of convert here, as POINTER_PLUS_EXPR 1944 second arg is unsigned even when we need to consider it as signed, 1945 we don't want to diagnose overflow here. */ 1946 (minus (convert (view_convert:stype @1)) 1947 (convert (view_convert:stype @2))))))) 1948 1949/* (A * C) +- (B * C) -> (A+-B) * C and (A * C) +- A -> A * (C+-1). 1950 Modeled after fold_plusminus_mult_expr. */ 1951(if (!TYPE_SATURATING (type) 1952 && (!FLOAT_TYPE_P (type) || flag_associative_math)) 1953 (for plusminus (plus minus) 1954 (simplify 1955 (plusminus (mult:cs@3 @0 @1) (mult:cs@4 @0 @2)) 1956 (if ((!ANY_INTEGRAL_TYPE_P (type) 1957 || TYPE_OVERFLOW_WRAPS (type) 1958 || (INTEGRAL_TYPE_P (type) 1959 && tree_expr_nonzero_p (@0) 1960 && expr_not_equal_to (@0, wi::minus_one (TYPE_PRECISION (type))))) 1961 /* If @1 +- @2 is constant require a hard single-use on either 1962 original operand (but not on both). */ 1963 && (single_use (@3) || single_use (@4))) 1964 (mult (plusminus @1 @2) @0))) 1965 /* We cannot generate constant 1 for fract. */ 1966 (if (!ALL_FRACT_MODE_P (TYPE_MODE (type))) 1967 (simplify 1968 (plusminus @0 (mult:c@3 @0 @2)) 1969 (if ((!ANY_INTEGRAL_TYPE_P (type) 1970 || TYPE_OVERFLOW_WRAPS (type) 1971 || (INTEGRAL_TYPE_P (type) 1972 && tree_expr_nonzero_p (@0) 1973 && expr_not_equal_to (@0, wi::minus_one (TYPE_PRECISION (type))))) 1974 && single_use (@3)) 1975 (mult (plusminus { build_one_cst (type); } @2) @0))) 1976 (simplify 1977 (plusminus (mult:c@3 @0 @2) @0) 1978 (if ((!ANY_INTEGRAL_TYPE_P (type) 1979 || TYPE_OVERFLOW_WRAPS (type) 1980 || (INTEGRAL_TYPE_P (type) 1981 && tree_expr_nonzero_p (@0) 1982 && expr_not_equal_to (@0, wi::minus_one (TYPE_PRECISION (type))))) 1983 && single_use (@3)) 1984 (mult (plusminus @2 { build_one_cst (type); }) @0)))))) 1985 1986/* Simplifications of MIN_EXPR, MAX_EXPR, fmin() and fmax(). */ 1987 1988(for minmax (min max FMIN_ALL FMAX_ALL) 1989 (simplify 1990 (minmax @0 @0) 1991 @0)) 1992/* min(max(x,y),y) -> y. */ 1993(simplify 1994 (min:c (max:c @0 @1) @1) 1995 @1) 1996/* max(min(x,y),y) -> y. */ 1997(simplify 1998 (max:c (min:c @0 @1) @1) 1999 @1) 2000/* max(a,-a) -> abs(a). */ 2001(simplify 2002 (max:c @0 (negate @0)) 2003 (if (TREE_CODE (type) != COMPLEX_TYPE 2004 && (! ANY_INTEGRAL_TYPE_P (type) 2005 || TYPE_OVERFLOW_UNDEFINED (type))) 2006 (abs @0))) 2007/* min(a,-a) -> -abs(a). */ 2008(simplify 2009 (min:c @0 (negate @0)) 2010 (if (TREE_CODE (type) != COMPLEX_TYPE 2011 && (! ANY_INTEGRAL_TYPE_P (type) 2012 || TYPE_OVERFLOW_UNDEFINED (type))) 2013 (negate (abs @0)))) 2014(simplify 2015 (min @0 @1) 2016 (switch 2017 (if (INTEGRAL_TYPE_P (type) 2018 && TYPE_MIN_VALUE (type) 2019 && operand_equal_p (@1, TYPE_MIN_VALUE (type), OEP_ONLY_CONST)) 2020 @1) 2021 (if (INTEGRAL_TYPE_P (type) 2022 && TYPE_MAX_VALUE (type) 2023 && operand_equal_p (@1, TYPE_MAX_VALUE (type), OEP_ONLY_CONST)) 2024 @0))) 2025(simplify 2026 (max @0 @1) 2027 (switch 2028 (if (INTEGRAL_TYPE_P (type) 2029 && TYPE_MAX_VALUE (type) 2030 && operand_equal_p (@1, TYPE_MAX_VALUE (type), OEP_ONLY_CONST)) 2031 @1) 2032 (if (INTEGRAL_TYPE_P (type) 2033 && TYPE_MIN_VALUE (type) 2034 && operand_equal_p (@1, TYPE_MIN_VALUE (type), OEP_ONLY_CONST)) 2035 @0))) 2036 2037/* max (a, a + CST) -> a + CST where CST is positive. */ 2038/* max (a, a + CST) -> a where CST is negative. */ 2039(simplify 2040 (max:c @0 (plus@2 @0 INTEGER_CST@1)) 2041 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))) 2042 (if (tree_int_cst_sgn (@1) > 0) 2043 @2 2044 @0))) 2045 2046/* min (a, a + CST) -> a where CST is positive. */ 2047/* min (a, a + CST) -> a + CST where CST is negative. */ 2048(simplify 2049 (min:c @0 (plus@2 @0 INTEGER_CST@1)) 2050 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))) 2051 (if (tree_int_cst_sgn (@1) > 0) 2052 @0 2053 @2))) 2054 2055/* (convert (minmax ((convert (x) c)))) -> minmax (x c) if x is promoted 2056 and the outer convert demotes the expression back to x's type. */ 2057(for minmax (min max) 2058 (simplify 2059 (convert (minmax@0 (convert @1) INTEGER_CST@2)) 2060 (if (INTEGRAL_TYPE_P (type) 2061 && types_match (@1, type) && int_fits_type_p (@2, type) 2062 && TYPE_SIGN (TREE_TYPE (@0)) == TYPE_SIGN (type) 2063 && TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (type)) 2064 (minmax @1 (convert @2))))) 2065 2066(for minmax (FMIN_ALL FMAX_ALL) 2067 /* If either argument is NaN, return the other one. Avoid the 2068 transformation if we get (and honor) a signalling NaN. */ 2069 (simplify 2070 (minmax:c @0 REAL_CST@1) 2071 (if (real_isnan (TREE_REAL_CST_PTR (@1)) 2072 && (!HONOR_SNANS (@1) || !TREE_REAL_CST (@1).signalling)) 2073 @0))) 2074/* Convert fmin/fmax to MIN_EXPR/MAX_EXPR. C99 requires these 2075 functions to return the numeric arg if the other one is NaN. 2076 MIN and MAX don't honor that, so only transform if -ffinite-math-only 2077 is set. C99 doesn't require -0.0 to be handled, so we don't have to 2078 worry about it either. */ 2079(if (flag_finite_math_only) 2080 (simplify 2081 (FMIN_ALL @0 @1) 2082 (min @0 @1)) 2083 (simplify 2084 (FMAX_ALL @0 @1) 2085 (max @0 @1))) 2086/* min (-A, -B) -> -max (A, B) */ 2087(for minmax (min max FMIN_ALL FMAX_ALL) 2088 maxmin (max min FMAX_ALL FMIN_ALL) 2089 (simplify 2090 (minmax (negate:s@2 @0) (negate:s@3 @1)) 2091 (if (FLOAT_TYPE_P (TREE_TYPE (@0)) 2092 || (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) 2093 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))) 2094 (negate (maxmin @0 @1))))) 2095/* MIN (~X, ~Y) -> ~MAX (X, Y) 2096 MAX (~X, ~Y) -> ~MIN (X, Y) */ 2097(for minmax (min max) 2098 maxmin (max min) 2099 (simplify 2100 (minmax (bit_not:s@2 @0) (bit_not:s@3 @1)) 2101 (bit_not (maxmin @0 @1)))) 2102 2103/* MIN (X, Y) == X -> X <= Y */ 2104(for minmax (min min max max) 2105 cmp (eq ne eq ne ) 2106 out (le gt ge lt ) 2107 (simplify 2108 (cmp:c (minmax:c @0 @1) @0) 2109 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))) 2110 (out @0 @1)))) 2111/* MIN (X, 5) == 0 -> X == 0 2112 MIN (X, 5) == 7 -> false */ 2113(for cmp (eq ne) 2114 (simplify 2115 (cmp (min @0 INTEGER_CST@1) INTEGER_CST@2) 2116 (if (wi::lt_p (wi::to_wide (@1), wi::to_wide (@2), 2117 TYPE_SIGN (TREE_TYPE (@0)))) 2118 { constant_boolean_node (cmp == NE_EXPR, type); } 2119 (if (wi::gt_p (wi::to_wide (@1), wi::to_wide (@2), 2120 TYPE_SIGN (TREE_TYPE (@0)))) 2121 (cmp @0 @2))))) 2122(for cmp (eq ne) 2123 (simplify 2124 (cmp (max @0 INTEGER_CST@1) INTEGER_CST@2) 2125 (if (wi::gt_p (wi::to_wide (@1), wi::to_wide (@2), 2126 TYPE_SIGN (TREE_TYPE (@0)))) 2127 { constant_boolean_node (cmp == NE_EXPR, type); } 2128 (if (wi::lt_p (wi::to_wide (@1), wi::to_wide (@2), 2129 TYPE_SIGN (TREE_TYPE (@0)))) 2130 (cmp @0 @2))))) 2131/* MIN (X, C1) < C2 -> X < C2 || C1 < C2 */ 2132(for minmax (min min max max min min max max ) 2133 cmp (lt le gt ge gt ge lt le ) 2134 comb (bit_ior bit_ior bit_ior bit_ior bit_and bit_and bit_and bit_and) 2135 (simplify 2136 (cmp (minmax @0 INTEGER_CST@1) INTEGER_CST@2) 2137 (comb (cmp @0 @2) (cmp @1 @2)))) 2138 2139/* Simplifications of shift and rotates. */ 2140 2141(for rotate (lrotate rrotate) 2142 (simplify 2143 (rotate integer_all_onesp@0 @1) 2144 @0)) 2145 2146/* Optimize -1 >> x for arithmetic right shifts. */ 2147(simplify 2148 (rshift integer_all_onesp@0 @1) 2149 (if (!TYPE_UNSIGNED (type) 2150 && tree_expr_nonnegative_p (@1)) 2151 @0)) 2152 2153/* Optimize (x >> c) << c into x & (-1<<c). */ 2154(simplify 2155 (lshift (rshift @0 INTEGER_CST@1) @1) 2156 (if (wi::ltu_p (wi::to_wide (@1), element_precision (type))) 2157 (bit_and @0 (lshift { build_minus_one_cst (type); } @1)))) 2158 2159/* Optimize (x << c) >> c into x & ((unsigned)-1 >> c) for unsigned 2160 types. */ 2161(simplify 2162 (rshift (lshift @0 INTEGER_CST@1) @1) 2163 (if (TYPE_UNSIGNED (type) 2164 && (wi::ltu_p (wi::to_wide (@1), element_precision (type)))) 2165 (bit_and @0 (rshift { build_minus_one_cst (type); } @1)))) 2166 2167(for shiftrotate (lrotate rrotate lshift rshift) 2168 (simplify 2169 (shiftrotate @0 integer_zerop) 2170 (non_lvalue @0)) 2171 (simplify 2172 (shiftrotate integer_zerop@0 @1) 2173 @0) 2174 /* Prefer vector1 << scalar to vector1 << vector2 2175 if vector2 is uniform. */ 2176 (for vec (VECTOR_CST CONSTRUCTOR) 2177 (simplify 2178 (shiftrotate @0 vec@1) 2179 (with { tree tem = uniform_vector_p (@1); } 2180 (if (tem) 2181 (shiftrotate @0 { tem; })))))) 2182 2183/* Simplify X << Y where Y's low width bits are 0 to X, as only valid 2184 Y is 0. Similarly for X >> Y. */ 2185#if GIMPLE 2186(for shift (lshift rshift) 2187 (simplify 2188 (shift @0 SSA_NAME@1) 2189 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))) 2190 (with { 2191 int width = ceil_log2 (element_precision (TREE_TYPE (@0))); 2192 int prec = TYPE_PRECISION (TREE_TYPE (@1)); 2193 } 2194 (if ((get_nonzero_bits (@1) & wi::mask (width, false, prec)) == 0) 2195 @0))))) 2196#endif 2197 2198/* Rewrite an LROTATE_EXPR by a constant into an 2199 RROTATE_EXPR by a new constant. */ 2200(simplify 2201 (lrotate @0 INTEGER_CST@1) 2202 (rrotate @0 { const_binop (MINUS_EXPR, TREE_TYPE (@1), 2203 build_int_cst (TREE_TYPE (@1), 2204 element_precision (type)), @1); })) 2205 2206/* Turn (a OP c1) OP c2 into a OP (c1+c2). */ 2207(for op (lrotate rrotate rshift lshift) 2208 (simplify 2209 (op (op @0 INTEGER_CST@1) INTEGER_CST@2) 2210 (with { unsigned int prec = element_precision (type); } 2211 (if (wi::ge_p (wi::to_wide (@1), 0, TYPE_SIGN (TREE_TYPE (@1))) 2212 && wi::lt_p (wi::to_wide (@1), prec, TYPE_SIGN (TREE_TYPE (@1))) 2213 && wi::ge_p (wi::to_wide (@2), 0, TYPE_SIGN (TREE_TYPE (@2))) 2214 && wi::lt_p (wi::to_wide (@2), prec, TYPE_SIGN (TREE_TYPE (@2)))) 2215 (with { unsigned int low = (tree_to_uhwi (@1) 2216 + tree_to_uhwi (@2)); } 2217 /* Deal with a OP (c1 + c2) being undefined but (a OP c1) OP c2 2218 being well defined. */ 2219 (if (low >= prec) 2220 (if (op == LROTATE_EXPR || op == RROTATE_EXPR) 2221 (op @0 { build_int_cst (TREE_TYPE (@1), low % prec); }) 2222 (if (TYPE_UNSIGNED (type) || op == LSHIFT_EXPR) 2223 { build_zero_cst (type); } 2224 (op @0 { build_int_cst (TREE_TYPE (@1), prec - 1); }))) 2225 (op @0 { build_int_cst (TREE_TYPE (@1), low); }))))))) 2226 2227 2228/* ((1 << A) & 1) != 0 -> A == 0 2229 ((1 << A) & 1) == 0 -> A != 0 */ 2230(for cmp (ne eq) 2231 icmp (eq ne) 2232 (simplify 2233 (cmp (bit_and (lshift integer_onep @0) integer_onep) integer_zerop) 2234 (icmp @0 { build_zero_cst (TREE_TYPE (@0)); }))) 2235 2236/* (CST1 << A) == CST2 -> A == ctz (CST2) - ctz (CST1) 2237 (CST1 << A) != CST2 -> A != ctz (CST2) - ctz (CST1) 2238 if CST2 != 0. */ 2239(for cmp (ne eq) 2240 (simplify 2241 (cmp (lshift INTEGER_CST@0 @1) INTEGER_CST@2) 2242 (with { int cand = wi::ctz (wi::to_wide (@2)) - wi::ctz (wi::to_wide (@0)); } 2243 (if (cand < 0 2244 || (!integer_zerop (@2) 2245 && wi::lshift (wi::to_wide (@0), cand) != wi::to_wide (@2))) 2246 { constant_boolean_node (cmp == NE_EXPR, type); } 2247 (if (!integer_zerop (@2) 2248 && wi::lshift (wi::to_wide (@0), cand) == wi::to_wide (@2)) 2249 (cmp @1 { build_int_cst (TREE_TYPE (@1), cand); })))))) 2250 2251/* Fold (X << C1) & C2 into (X << C1) & (C2 | ((1 << C1) - 1)) 2252 (X >> C1) & C2 into (X >> C1) & (C2 | ~((type) -1 >> C1)) 2253 if the new mask might be further optimized. */ 2254(for shift (lshift rshift) 2255 (simplify 2256 (bit_and (convert?:s@4 (shift:s@5 (convert1?@3 @0) INTEGER_CST@1)) 2257 INTEGER_CST@2) 2258 (if (tree_nop_conversion_p (TREE_TYPE (@4), TREE_TYPE (@5)) 2259 && TYPE_PRECISION (type) <= HOST_BITS_PER_WIDE_INT 2260 && tree_fits_uhwi_p (@1) 2261 && tree_to_uhwi (@1) > 0 2262 && tree_to_uhwi (@1) < TYPE_PRECISION (type)) 2263 (with 2264 { 2265 unsigned int shiftc = tree_to_uhwi (@1); 2266 unsigned HOST_WIDE_INT mask = TREE_INT_CST_LOW (@2); 2267 unsigned HOST_WIDE_INT newmask, zerobits = 0; 2268 tree shift_type = TREE_TYPE (@3); 2269 unsigned int prec; 2270 2271 if (shift == LSHIFT_EXPR) 2272 zerobits = ((HOST_WIDE_INT_1U << shiftc) - 1); 2273 else if (shift == RSHIFT_EXPR 2274 && type_has_mode_precision_p (shift_type)) 2275 { 2276 prec = TYPE_PRECISION (TREE_TYPE (@3)); 2277 tree arg00 = @0; 2278 /* See if more bits can be proven as zero because of 2279 zero extension. */ 2280 if (@3 != @0 2281 && TYPE_UNSIGNED (TREE_TYPE (@0))) 2282 { 2283 tree inner_type = TREE_TYPE (@0); 2284 if (type_has_mode_precision_p (inner_type) 2285 && TYPE_PRECISION (inner_type) < prec) 2286 { 2287 prec = TYPE_PRECISION (inner_type); 2288 /* See if we can shorten the right shift. */ 2289 if (shiftc < prec) 2290 shift_type = inner_type; 2291 /* Otherwise X >> C1 is all zeros, so we'll optimize 2292 it into (X, 0) later on by making sure zerobits 2293 is all ones. */ 2294 } 2295 } 2296 zerobits = HOST_WIDE_INT_M1U; 2297 if (shiftc < prec) 2298 { 2299 zerobits >>= HOST_BITS_PER_WIDE_INT - shiftc; 2300 zerobits <<= prec - shiftc; 2301 } 2302 /* For arithmetic shift if sign bit could be set, zerobits 2303 can contain actually sign bits, so no transformation is 2304 possible, unless MASK masks them all away. In that 2305 case the shift needs to be converted into logical shift. */ 2306 if (!TYPE_UNSIGNED (TREE_TYPE (@3)) 2307 && prec == TYPE_PRECISION (TREE_TYPE (@3))) 2308 { 2309 if ((mask & zerobits) == 0) 2310 shift_type = unsigned_type_for (TREE_TYPE (@3)); 2311 else 2312 zerobits = 0; 2313 } 2314 } 2315 } 2316 /* ((X << 16) & 0xff00) is (X, 0). */ 2317 (if ((mask & zerobits) == mask) 2318 { build_int_cst (type, 0); } 2319 (with { newmask = mask | zerobits; } 2320 (if (newmask != mask && (newmask & (newmask + 1)) == 0) 2321 (with 2322 { 2323 /* Only do the transformation if NEWMASK is some integer 2324 mode's mask. */ 2325 for (prec = BITS_PER_UNIT; 2326 prec < HOST_BITS_PER_WIDE_INT; prec <<= 1) 2327 if (newmask == (HOST_WIDE_INT_1U << prec) - 1) 2328 break; 2329 } 2330 (if (prec < HOST_BITS_PER_WIDE_INT 2331 || newmask == HOST_WIDE_INT_M1U) 2332 (with 2333 { tree newmaskt = build_int_cst_type (TREE_TYPE (@2), newmask); } 2334 (if (!tree_int_cst_equal (newmaskt, @2)) 2335 (if (shift_type != TREE_TYPE (@3)) 2336 (bit_and (convert (shift:shift_type (convert @3) @1)) { newmaskt; }) 2337 (bit_and @4 { newmaskt; }))))))))))))) 2338 2339/* Fold (X {&,^,|} C2) << C1 into (X << C1) {&,^,|} (C2 << C1) 2340 (X {&,^,|} C2) >> C1 into (X >> C1) & (C2 >> C1). */ 2341(for shift (lshift rshift) 2342 (for bit_op (bit_and bit_xor bit_ior) 2343 (simplify 2344 (shift (convert?:s (bit_op:s @0 INTEGER_CST@2)) INTEGER_CST@1) 2345 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))) 2346 (with { tree mask = int_const_binop (shift, fold_convert (type, @2), @1); } 2347 (bit_op (shift (convert @0) @1) { mask; })))))) 2348 2349/* ~(~X >> Y) -> X >> Y (for arithmetic shift). */ 2350(simplify 2351 (bit_not (convert1?:s (rshift:s (convert2?@0 (bit_not @1)) @2))) 2352 (if (!TYPE_UNSIGNED (TREE_TYPE (@0)) 2353 && (element_precision (TREE_TYPE (@0)) 2354 <= element_precision (TREE_TYPE (@1)) 2355 || !TYPE_UNSIGNED (TREE_TYPE (@1)))) 2356 (with 2357 { tree shift_type = TREE_TYPE (@0); } 2358 (convert (rshift (convert:shift_type @1) @2))))) 2359 2360/* ~(~X >>r Y) -> X >>r Y 2361 ~(~X <<r Y) -> X <<r Y */ 2362(for rotate (lrotate rrotate) 2363 (simplify 2364 (bit_not (convert1?:s (rotate:s (convert2?@0 (bit_not @1)) @2))) 2365 (if ((element_precision (TREE_TYPE (@0)) 2366 <= element_precision (TREE_TYPE (@1)) 2367 || !TYPE_UNSIGNED (TREE_TYPE (@1))) 2368 && (element_precision (type) <= element_precision (TREE_TYPE (@0)) 2369 || !TYPE_UNSIGNED (TREE_TYPE (@0)))) 2370 (with 2371 { tree rotate_type = TREE_TYPE (@0); } 2372 (convert (rotate (convert:rotate_type @1) @2)))))) 2373 2374/* Simplifications of conversions. */ 2375 2376/* Basic strip-useless-type-conversions / strip_nops. */ 2377(for cvt (convert view_convert float fix_trunc) 2378 (simplify 2379 (cvt @0) 2380 (if ((GIMPLE && useless_type_conversion_p (type, TREE_TYPE (@0))) 2381 || (GENERIC && type == TREE_TYPE (@0))) 2382 @0))) 2383 2384/* Contract view-conversions. */ 2385(simplify 2386 (view_convert (view_convert @0)) 2387 (view_convert @0)) 2388 2389/* For integral conversions with the same precision or pointer 2390 conversions use a NOP_EXPR instead. */ 2391(simplify 2392 (view_convert @0) 2393 (if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type)) 2394 && (INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0))) 2395 && TYPE_PRECISION (type) == TYPE_PRECISION (TREE_TYPE (@0))) 2396 (convert @0))) 2397 2398/* Strip inner integral conversions that do not change precision or size, or 2399 zero-extend while keeping the same size (for bool-to-char). */ 2400(simplify 2401 (view_convert (convert@0 @1)) 2402 (if ((INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0))) 2403 && (INTEGRAL_TYPE_P (TREE_TYPE (@1)) || POINTER_TYPE_P (TREE_TYPE (@1))) 2404 && TYPE_SIZE (TREE_TYPE (@0)) == TYPE_SIZE (TREE_TYPE (@1)) 2405 && (TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1)) 2406 || (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (TREE_TYPE (@1)) 2407 && TYPE_UNSIGNED (TREE_TYPE (@1))))) 2408 (view_convert @1))) 2409 2410/* Simplify a view-converted empty constructor. */ 2411(simplify 2412 (view_convert CONSTRUCTOR@0) 2413 (if (TREE_CODE (@0) != SSA_NAME 2414 && CONSTRUCTOR_NELTS (@0) == 0) 2415 { build_zero_cst (type); })) 2416 2417/* Re-association barriers around constants and other re-association 2418 barriers can be removed. */ 2419(simplify 2420 (paren CONSTANT_CLASS_P@0) 2421 @0) 2422(simplify 2423 (paren (paren@1 @0)) 2424 @1) 2425 2426/* Handle cases of two conversions in a row. */ 2427(for ocvt (convert float fix_trunc) 2428 (for icvt (convert float) 2429 (simplify 2430 (ocvt (icvt@1 @0)) 2431 (with 2432 { 2433 tree inside_type = TREE_TYPE (@0); 2434 tree inter_type = TREE_TYPE (@1); 2435 int inside_int = INTEGRAL_TYPE_P (inside_type); 2436 int inside_ptr = POINTER_TYPE_P (inside_type); 2437 int inside_float = FLOAT_TYPE_P (inside_type); 2438 int inside_vec = VECTOR_TYPE_P (inside_type); 2439 unsigned int inside_prec = TYPE_PRECISION (inside_type); 2440 int inside_unsignedp = TYPE_UNSIGNED (inside_type); 2441 int inter_int = INTEGRAL_TYPE_P (inter_type); 2442 int inter_ptr = POINTER_TYPE_P (inter_type); 2443 int inter_float = FLOAT_TYPE_P (inter_type); 2444 int inter_vec = VECTOR_TYPE_P (inter_type); 2445 unsigned int inter_prec = TYPE_PRECISION (inter_type); 2446 int inter_unsignedp = TYPE_UNSIGNED (inter_type); 2447 int final_int = INTEGRAL_TYPE_P (type); 2448 int final_ptr = POINTER_TYPE_P (type); 2449 int final_float = FLOAT_TYPE_P (type); 2450 int final_vec = VECTOR_TYPE_P (type); 2451 unsigned int final_prec = TYPE_PRECISION (type); 2452 int final_unsignedp = TYPE_UNSIGNED (type); 2453 } 2454 (switch 2455 /* In addition to the cases of two conversions in a row 2456 handled below, if we are converting something to its own 2457 type via an object of identical or wider precision, neither 2458 conversion is needed. */ 2459 (if (((GIMPLE && useless_type_conversion_p (type, inside_type)) 2460 || (GENERIC 2461 && TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (inside_type))) 2462 && (((inter_int || inter_ptr) && final_int) 2463 || (inter_float && final_float)) 2464 && inter_prec >= final_prec) 2465 (ocvt @0)) 2466 2467 /* Likewise, if the intermediate and initial types are either both 2468 float or both integer, we don't need the middle conversion if the 2469 former is wider than the latter and doesn't change the signedness 2470 (for integers). Avoid this if the final type is a pointer since 2471 then we sometimes need the middle conversion. */ 2472 (if (((inter_int && inside_int) || (inter_float && inside_float)) 2473 && (final_int || final_float) 2474 && inter_prec >= inside_prec 2475 && (inter_float || inter_unsignedp == inside_unsignedp)) 2476 (ocvt @0)) 2477 2478 /* If we have a sign-extension of a zero-extended value, we can 2479 replace that by a single zero-extension. Likewise if the 2480 final conversion does not change precision we can drop the 2481 intermediate conversion. */ 2482 (if (inside_int && inter_int && final_int 2483 && ((inside_prec < inter_prec && inter_prec < final_prec 2484 && inside_unsignedp && !inter_unsignedp) 2485 || final_prec == inter_prec)) 2486 (ocvt @0)) 2487 2488 /* Two conversions in a row are not needed unless: 2489 - some conversion is floating-point (overstrict for now), or 2490 - some conversion is a vector (overstrict for now), or 2491 - the intermediate type is narrower than both initial and 2492 final, or 2493 - the intermediate type and innermost type differ in signedness, 2494 and the outermost type is wider than the intermediate, or 2495 - the initial type is a pointer type and the precisions of the 2496 intermediate and final types differ, or 2497 - the final type is a pointer type and the precisions of the 2498 initial and intermediate types differ. */ 2499 (if (! inside_float && ! inter_float && ! final_float 2500 && ! inside_vec && ! inter_vec && ! final_vec 2501 && (inter_prec >= inside_prec || inter_prec >= final_prec) 2502 && ! (inside_int && inter_int 2503 && inter_unsignedp != inside_unsignedp 2504 && inter_prec < final_prec) 2505 && ((inter_unsignedp && inter_prec > inside_prec) 2506 == (final_unsignedp && final_prec > inter_prec)) 2507 && ! (inside_ptr && inter_prec != final_prec) 2508 && ! (final_ptr && inside_prec != inter_prec)) 2509 (ocvt @0)) 2510 2511 /* A truncation to an unsigned type (a zero-extension) should be 2512 canonicalized as bitwise and of a mask. */ 2513 (if (GIMPLE /* PR70366: doing this in GENERIC breaks -Wconversion. */ 2514 && final_int && inter_int && inside_int 2515 && final_prec == inside_prec 2516 && final_prec > inter_prec 2517 && inter_unsignedp) 2518 (convert (bit_and @0 { wide_int_to_tree 2519 (inside_type, 2520 wi::mask (inter_prec, false, 2521 TYPE_PRECISION (inside_type))); }))) 2522 2523 /* If we are converting an integer to a floating-point that can 2524 represent it exactly and back to an integer, we can skip the 2525 floating-point conversion. */ 2526 (if (GIMPLE /* PR66211 */ 2527 && inside_int && inter_float && final_int && 2528 (unsigned) significand_size (TYPE_MODE (inter_type)) 2529 >= inside_prec - !inside_unsignedp) 2530 (convert @0))))))) 2531 2532/* If we have a narrowing conversion to an integral type that is fed by a 2533 BIT_AND_EXPR, we might be able to remove the BIT_AND_EXPR if it merely 2534 masks off bits outside the final type (and nothing else). */ 2535(simplify 2536 (convert (bit_and @0 INTEGER_CST@1)) 2537 (if (INTEGRAL_TYPE_P (type) 2538 && INTEGRAL_TYPE_P (TREE_TYPE (@0)) 2539 && TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@0)) 2540 && operand_equal_p (@1, build_low_bits_mask (TREE_TYPE (@1), 2541 TYPE_PRECISION (type)), 0)) 2542 (convert @0))) 2543 2544 2545/* (X /[ex] A) * A -> X. */ 2546(simplify 2547 (mult (convert1? (exact_div @0 @@1)) (convert2? @1)) 2548 (convert @0)) 2549 2550/* Canonicalization of binary operations. */ 2551 2552/* Convert X + -C into X - C. */ 2553(simplify 2554 (plus @0 REAL_CST@1) 2555 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1))) 2556 (with { tree tem = const_unop (NEGATE_EXPR, type, @1); } 2557 (if (!TREE_OVERFLOW (tem) || !flag_trapping_math) 2558 (minus @0 { tem; }))))) 2559 2560/* Convert x+x into x*2. */ 2561(simplify 2562 (plus @0 @0) 2563 (if (SCALAR_FLOAT_TYPE_P (type)) 2564 (mult @0 { build_real (type, dconst2); }) 2565 (if (INTEGRAL_TYPE_P (type)) 2566 (mult @0 { build_int_cst (type, 2); })))) 2567 2568/* 0 - X -> -X. */ 2569(simplify 2570 (minus integer_zerop @1) 2571 (negate @1)) 2572(simplify 2573 (pointer_diff integer_zerop @1) 2574 (negate (convert @1))) 2575 2576/* (ARG0 - ARG1) is the same as (-ARG1 + ARG0). So check whether 2577 ARG0 is zero and X + ARG0 reduces to X, since that would mean 2578 (-ARG1 + ARG0) reduces to -ARG1. */ 2579(simplify 2580 (minus real_zerop@0 @1) 2581 (if (fold_real_zero_addition_p (type, @0, 0)) 2582 (negate @1))) 2583 2584/* Transform x * -1 into -x. */ 2585(simplify 2586 (mult @0 integer_minus_onep) 2587 (negate @0)) 2588 2589/* Reassociate (X * CST) * Y to (X * Y) * CST. This does not introduce 2590 signed overflow for CST != 0 && CST != -1. */ 2591(simplify 2592 (mult:c (mult:s@3 @0 INTEGER_CST@1) @2) 2593 (if (TREE_CODE (@2) != INTEGER_CST 2594 && single_use (@3) 2595 && !integer_zerop (@1) && !integer_minus_onep (@1)) 2596 (mult (mult @0 @2) @1))) 2597 2598/* True if we can easily extract the real and imaginary parts of a complex 2599 number. */ 2600(match compositional_complex 2601 (convert? (complex @0 @1))) 2602 2603/* COMPLEX_EXPR and REALPART/IMAGPART_EXPR cancellations. */ 2604(simplify 2605 (complex (realpart @0) (imagpart @0)) 2606 @0) 2607(simplify 2608 (realpart (complex @0 @1)) 2609 @0) 2610(simplify 2611 (imagpart (complex @0 @1)) 2612 @1) 2613 2614/* Sometimes we only care about half of a complex expression. */ 2615(simplify 2616 (realpart (convert?:s (conj:s @0))) 2617 (convert (realpart @0))) 2618(simplify 2619 (imagpart (convert?:s (conj:s @0))) 2620 (convert (negate (imagpart @0)))) 2621(for part (realpart imagpart) 2622 (for op (plus minus) 2623 (simplify 2624 (part (convert?:s@2 (op:s @0 @1))) 2625 (convert (op (part @0) (part @1)))))) 2626(simplify 2627 (realpart (convert?:s (CEXPI:s @0))) 2628 (convert (COS @0))) 2629(simplify 2630 (imagpart (convert?:s (CEXPI:s @0))) 2631 (convert (SIN @0))) 2632 2633/* conj(conj(x)) -> x */ 2634(simplify 2635 (conj (convert? (conj @0))) 2636 (if (tree_nop_conversion_p (TREE_TYPE (@0), type)) 2637 (convert @0))) 2638 2639/* conj({x,y}) -> {x,-y} */ 2640(simplify 2641 (conj (convert?:s (complex:s @0 @1))) 2642 (with { tree itype = TREE_TYPE (type); } 2643 (complex (convert:itype @0) (negate (convert:itype @1))))) 2644 2645/* BSWAP simplifications, transforms checked by gcc.dg/builtin-bswap-8.c. */ 2646(for bswap (BUILT_IN_BSWAP16 BUILT_IN_BSWAP32 BUILT_IN_BSWAP64) 2647 (simplify 2648 (bswap (bswap @0)) 2649 @0) 2650 (simplify 2651 (bswap (bit_not (bswap @0))) 2652 (bit_not @0)) 2653 (for bitop (bit_xor bit_ior bit_and) 2654 (simplify 2655 (bswap (bitop:c (bswap @0) @1)) 2656 (bitop @0 (bswap @1))))) 2657 2658 2659/* Combine COND_EXPRs and VEC_COND_EXPRs. */ 2660 2661/* Simplify constant conditions. 2662 Only optimize constant conditions when the selected branch 2663 has the same type as the COND_EXPR. This avoids optimizing 2664 away "c ? x : throw", where the throw has a void type. 2665 Note that we cannot throw away the fold-const.c variant nor 2666 this one as we depend on doing this transform before possibly 2667 A ? B : B -> B triggers and the fold-const.c one can optimize 2668 0 ? A : B to B even if A has side-effects. Something 2669 genmatch cannot handle. */ 2670(simplify 2671 (cond INTEGER_CST@0 @1 @2) 2672 (if (integer_zerop (@0)) 2673 (if (!VOID_TYPE_P (TREE_TYPE (@2)) || VOID_TYPE_P (type)) 2674 @2) 2675 (if (!VOID_TYPE_P (TREE_TYPE (@1)) || VOID_TYPE_P (type)) 2676 @1))) 2677(simplify 2678 (vec_cond VECTOR_CST@0 @1 @2) 2679 (if (integer_all_onesp (@0)) 2680 @1 2681 (if (integer_zerop (@0)) 2682 @2))) 2683 2684/* Simplification moved from fold_cond_expr_with_comparison. It may also 2685 be extended. */ 2686/* This pattern implements two kinds simplification: 2687 2688 Case 1) 2689 (cond (cmp (convert1? x) c1) (convert2? x) c2) -> (minmax (x c)) if: 2690 1) Conversions are type widening from smaller type. 2691 2) Const c1 equals to c2 after canonicalizing comparison. 2692 3) Comparison has tree code LT, LE, GT or GE. 2693 This specific pattern is needed when (cmp (convert x) c) may not 2694 be simplified by comparison patterns because of multiple uses of 2695 x. It also makes sense here because simplifying across multiple 2696 referred var is always benefitial for complicated cases. 2697 2698 Case 2) 2699 (cond (eq (convert1? x) c1) (convert2? x) c2) -> (cond (eq x c1) c1 c2). */ 2700(for cmp (lt le gt ge eq) 2701 (simplify 2702 (cond (cmp (convert1? @1) INTEGER_CST@3) (convert2? @1) INTEGER_CST@2) 2703 (with 2704 { 2705 tree from_type = TREE_TYPE (@1); 2706 tree c1_type = TREE_TYPE (@3), c2_type = TREE_TYPE (@2); 2707 enum tree_code code = ERROR_MARK; 2708 2709 if (INTEGRAL_TYPE_P (from_type) 2710 && int_fits_type_p (@2, from_type) 2711 && (types_match (c1_type, from_type) 2712 || (TYPE_PRECISION (c1_type) > TYPE_PRECISION (from_type) 2713 && (TYPE_UNSIGNED (from_type) 2714 || TYPE_SIGN (c1_type) == TYPE_SIGN (from_type)))) 2715 && (types_match (c2_type, from_type) 2716 || (TYPE_PRECISION (c2_type) > TYPE_PRECISION (from_type) 2717 && (TYPE_UNSIGNED (from_type) 2718 || TYPE_SIGN (c2_type) == TYPE_SIGN (from_type))))) 2719 { 2720 if (cmp != EQ_EXPR) 2721 { 2722 if (wi::to_widest (@3) == (wi::to_widest (@2) - 1)) 2723 { 2724 /* X <= Y - 1 equals to X < Y. */ 2725 if (cmp == LE_EXPR) 2726 code = LT_EXPR; 2727 /* X > Y - 1 equals to X >= Y. */ 2728 if (cmp == GT_EXPR) 2729 code = GE_EXPR; 2730 } 2731 if (wi::to_widest (@3) == (wi::to_widest (@2) + 1)) 2732 { 2733 /* X < Y + 1 equals to X <= Y. */ 2734 if (cmp == LT_EXPR) 2735 code = LE_EXPR; 2736 /* X >= Y + 1 equals to X > Y. */ 2737 if (cmp == GE_EXPR) 2738 code = GT_EXPR; 2739 } 2740 if (code != ERROR_MARK 2741 || wi::to_widest (@2) == wi::to_widest (@3)) 2742 { 2743 if (cmp == LT_EXPR || cmp == LE_EXPR) 2744 code = MIN_EXPR; 2745 if (cmp == GT_EXPR || cmp == GE_EXPR) 2746 code = MAX_EXPR; 2747 } 2748 } 2749 /* Can do A == C1 ? A : C2 -> A == C1 ? C1 : C2? */ 2750 else if (int_fits_type_p (@3, from_type)) 2751 code = EQ_EXPR; 2752 } 2753 } 2754 (if (code == MAX_EXPR) 2755 (convert (max @1 (convert @2))) 2756 (if (code == MIN_EXPR) 2757 (convert (min @1 (convert @2))) 2758 (if (code == EQ_EXPR) 2759 (convert (cond (eq @1 (convert @3)) 2760 (convert:from_type @3) (convert:from_type @2))))))))) 2761 2762/* (cond (cmp (convert? x) c1) (op x c2) c3) -> (op (minmax x c1) c2) if: 2763 2764 1) OP is PLUS or MINUS. 2765 2) CMP is LT, LE, GT or GE. 2766 3) C3 == (C1 op C2), and computation doesn't have undefined behavior. 2767 2768 This pattern also handles special cases like: 2769 2770 A) Operand x is a unsigned to signed type conversion and c1 is 2771 integer zero. In this case, 2772 (signed type)x < 0 <=> x > MAX_VAL(signed type) 2773 (signed type)x >= 0 <=> x <= MAX_VAL(signed type) 2774 B) Const c1 may not equal to (C3 op' C2). In this case we also 2775 check equality for (c1+1) and (c1-1) by adjusting comparison 2776 code. 2777 2778 TODO: Though signed type is handled by this pattern, it cannot be 2779 simplified at the moment because C standard requires additional 2780 type promotion. In order to match&simplify it here, the IR needs 2781 to be cleaned up by other optimizers, i.e, VRP. */ 2782(for op (plus minus) 2783 (for cmp (lt le gt ge) 2784 (simplify 2785 (cond (cmp (convert? @X) INTEGER_CST@1) (op @X INTEGER_CST@2) INTEGER_CST@3) 2786 (with { tree from_type = TREE_TYPE (@X), to_type = TREE_TYPE (@1); } 2787 (if (types_match (from_type, to_type) 2788 /* Check if it is special case A). */ 2789 || (TYPE_UNSIGNED (from_type) 2790 && !TYPE_UNSIGNED (to_type) 2791 && TYPE_PRECISION (from_type) == TYPE_PRECISION (to_type) 2792 && integer_zerop (@1) 2793 && (cmp == LT_EXPR || cmp == GE_EXPR))) 2794 (with 2795 { 2796 bool overflow = false; 2797 enum tree_code code, cmp_code = cmp; 2798 wide_int real_c1; 2799 wide_int c1 = wi::to_wide (@1); 2800 wide_int c2 = wi::to_wide (@2); 2801 wide_int c3 = wi::to_wide (@3); 2802 signop sgn = TYPE_SIGN (from_type); 2803 2804 /* Handle special case A), given x of unsigned type: 2805 ((signed type)x < 0) <=> (x > MAX_VAL(signed type)) 2806 ((signed type)x >= 0) <=> (x <= MAX_VAL(signed type)) */ 2807 if (!types_match (from_type, to_type)) 2808 { 2809 if (cmp_code == LT_EXPR) 2810 cmp_code = GT_EXPR; 2811 if (cmp_code == GE_EXPR) 2812 cmp_code = LE_EXPR; 2813 c1 = wi::max_value (to_type); 2814 } 2815 /* To simplify this pattern, we require c3 = (c1 op c2). Here we 2816 compute (c3 op' c2) and check if it equals to c1 with op' being 2817 the inverted operator of op. Make sure overflow doesn't happen 2818 if it is undefined. */ 2819 if (op == PLUS_EXPR) 2820 real_c1 = wi::sub (c3, c2, sgn, &overflow); 2821 else 2822 real_c1 = wi::add (c3, c2, sgn, &overflow); 2823 2824 code = cmp_code; 2825 if (!overflow || !TYPE_OVERFLOW_UNDEFINED (from_type)) 2826 { 2827 /* Check if c1 equals to real_c1. Boundary condition is handled 2828 by adjusting comparison operation if necessary. */ 2829 if (!wi::cmp (wi::sub (real_c1, 1, sgn, &overflow), c1, sgn) 2830 && !overflow) 2831 { 2832 /* X <= Y - 1 equals to X < Y. */ 2833 if (cmp_code == LE_EXPR) 2834 code = LT_EXPR; 2835 /* X > Y - 1 equals to X >= Y. */ 2836 if (cmp_code == GT_EXPR) 2837 code = GE_EXPR; 2838 } 2839 if (!wi::cmp (wi::add (real_c1, 1, sgn, &overflow), c1, sgn) 2840 && !overflow) 2841 { 2842 /* X < Y + 1 equals to X <= Y. */ 2843 if (cmp_code == LT_EXPR) 2844 code = LE_EXPR; 2845 /* X >= Y + 1 equals to X > Y. */ 2846 if (cmp_code == GE_EXPR) 2847 code = GT_EXPR; 2848 } 2849 if (code != cmp_code || !wi::cmp (real_c1, c1, sgn)) 2850 { 2851 if (cmp_code == LT_EXPR || cmp_code == LE_EXPR) 2852 code = MIN_EXPR; 2853 if (cmp_code == GT_EXPR || cmp_code == GE_EXPR) 2854 code = MAX_EXPR; 2855 } 2856 } 2857 } 2858 (if (code == MAX_EXPR) 2859 (op (max @X { wide_int_to_tree (from_type, real_c1); }) 2860 { wide_int_to_tree (from_type, c2); }) 2861 (if (code == MIN_EXPR) 2862 (op (min @X { wide_int_to_tree (from_type, real_c1); }) 2863 { wide_int_to_tree (from_type, c2); }))))))))) 2864 2865(for cnd (cond vec_cond) 2866 /* A ? B : (A ? X : C) -> A ? B : C. */ 2867 (simplify 2868 (cnd @0 (cnd @0 @1 @2) @3) 2869 (cnd @0 @1 @3)) 2870 (simplify 2871 (cnd @0 @1 (cnd @0 @2 @3)) 2872 (cnd @0 @1 @3)) 2873 /* A ? B : (!A ? C : X) -> A ? B : C. */ 2874 /* ??? This matches embedded conditions open-coded because genmatch 2875 would generate matching code for conditions in separate stmts only. 2876 The following is still important to merge then and else arm cases 2877 from if-conversion. */ 2878 (simplify 2879 (cnd @0 @1 (cnd @2 @3 @4)) 2880 (if (COMPARISON_CLASS_P (@0) 2881 && COMPARISON_CLASS_P (@2) 2882 && invert_tree_comparison 2883 (TREE_CODE (@0), HONOR_NANS (TREE_OPERAND (@0, 0))) == TREE_CODE (@2) 2884 && operand_equal_p (TREE_OPERAND (@0, 0), TREE_OPERAND (@2, 0), 0) 2885 && operand_equal_p (TREE_OPERAND (@0, 1), TREE_OPERAND (@2, 1), 0)) 2886 (cnd @0 @1 @3))) 2887 (simplify 2888 (cnd @0 (cnd @1 @2 @3) @4) 2889 (if (COMPARISON_CLASS_P (@0) 2890 && COMPARISON_CLASS_P (@1) 2891 && invert_tree_comparison 2892 (TREE_CODE (@0), HONOR_NANS (TREE_OPERAND (@0, 0))) == TREE_CODE (@1) 2893 && operand_equal_p (TREE_OPERAND (@0, 0), TREE_OPERAND (@1, 0), 0) 2894 && operand_equal_p (TREE_OPERAND (@0, 1), TREE_OPERAND (@1, 1), 0)) 2895 (cnd @0 @3 @4))) 2896 2897 /* A ? B : B -> B. */ 2898 (simplify 2899 (cnd @0 @1 @1) 2900 @1) 2901 2902 /* !A ? B : C -> A ? C : B. */ 2903 (simplify 2904 (cnd (logical_inverted_value truth_valued_p@0) @1 @2) 2905 (cnd @0 @2 @1))) 2906 2907/* A + (B vcmp C ? 1 : 0) -> A - (B vcmp C ? -1 : 0), since vector comparisons 2908 return all -1 or all 0 results. */ 2909/* ??? We could instead convert all instances of the vec_cond to negate, 2910 but that isn't necessarily a win on its own. */ 2911(simplify 2912 (plus:c @3 (view_convert? (vec_cond:s @0 integer_each_onep@1 integer_zerop@2))) 2913 (if (VECTOR_TYPE_P (type) 2914 && known_eq (TYPE_VECTOR_SUBPARTS (type), 2915 TYPE_VECTOR_SUBPARTS (TREE_TYPE (@1))) 2916 && (TYPE_MODE (TREE_TYPE (type)) 2917 == TYPE_MODE (TREE_TYPE (TREE_TYPE (@1))))) 2918 (minus @3 (view_convert (vec_cond @0 (negate @1) @2))))) 2919 2920/* ... likewise A - (B vcmp C ? 1 : 0) -> A + (B vcmp C ? -1 : 0). */ 2921(simplify 2922 (minus @3 (view_convert? (vec_cond:s @0 integer_each_onep@1 integer_zerop@2))) 2923 (if (VECTOR_TYPE_P (type) 2924 && known_eq (TYPE_VECTOR_SUBPARTS (type), 2925 TYPE_VECTOR_SUBPARTS (TREE_TYPE (@1))) 2926 && (TYPE_MODE (TREE_TYPE (type)) 2927 == TYPE_MODE (TREE_TYPE (TREE_TYPE (@1))))) 2928 (plus @3 (view_convert (vec_cond @0 (negate @1) @2))))) 2929 2930 2931/* Simplifications of comparisons. */ 2932 2933/* See if we can reduce the magnitude of a constant involved in a 2934 comparison by changing the comparison code. This is a canonicalization 2935 formerly done by maybe_canonicalize_comparison_1. */ 2936(for cmp (le gt) 2937 acmp (lt ge) 2938 (simplify 2939 (cmp @0 INTEGER_CST@1) 2940 (if (tree_int_cst_sgn (@1) == -1) 2941 (acmp @0 { wide_int_to_tree (TREE_TYPE (@1), wi::to_wide (@1) + 1); })))) 2942(for cmp (ge lt) 2943 acmp (gt le) 2944 (simplify 2945 (cmp @0 INTEGER_CST@1) 2946 (if (tree_int_cst_sgn (@1) == 1) 2947 (acmp @0 { wide_int_to_tree (TREE_TYPE (@1), wi::to_wide (@1) - 1); })))) 2948 2949 2950/* We can simplify a logical negation of a comparison to the 2951 inverted comparison. As we cannot compute an expression 2952 operator using invert_tree_comparison we have to simulate 2953 that with expression code iteration. */ 2954(for cmp (tcc_comparison) 2955 icmp (inverted_tcc_comparison) 2956 ncmp (inverted_tcc_comparison_with_nans) 2957 /* Ideally we'd like to combine the following two patterns 2958 and handle some more cases by using 2959 (logical_inverted_value (cmp @0 @1)) 2960 here but for that genmatch would need to "inline" that. 2961 For now implement what forward_propagate_comparison did. */ 2962 (simplify 2963 (bit_not (cmp @0 @1)) 2964 (if (VECTOR_TYPE_P (type) 2965 || (INTEGRAL_TYPE_P (type) && TYPE_PRECISION (type) == 1)) 2966 /* Comparison inversion may be impossible for trapping math, 2967 invert_tree_comparison will tell us. But we can't use 2968 a computed operator in the replacement tree thus we have 2969 to play the trick below. */ 2970 (with { enum tree_code ic = invert_tree_comparison 2971 (cmp, HONOR_NANS (@0)); } 2972 (if (ic == icmp) 2973 (icmp @0 @1) 2974 (if (ic == ncmp) 2975 (ncmp @0 @1)))))) 2976 (simplify 2977 (bit_xor (cmp @0 @1) integer_truep) 2978 (with { enum tree_code ic = invert_tree_comparison 2979 (cmp, HONOR_NANS (@0)); } 2980 (if (ic == icmp) 2981 (icmp @0 @1) 2982 (if (ic == ncmp) 2983 (ncmp @0 @1)))))) 2984 2985/* Transform comparisons of the form X - Y CMP 0 to X CMP Y. 2986 ??? The transformation is valid for the other operators if overflow 2987 is undefined for the type, but performing it here badly interacts 2988 with the transformation in fold_cond_expr_with_comparison which 2989 attempts to synthetize ABS_EXPR. */ 2990(for cmp (eq ne) 2991 (for sub (minus pointer_diff) 2992 (simplify 2993 (cmp (sub@2 @0 @1) integer_zerop) 2994 (if (single_use (@2)) 2995 (cmp @0 @1))))) 2996 2997/* Transform comparisons of the form X * C1 CMP 0 to X CMP 0 in the 2998 signed arithmetic case. That form is created by the compiler 2999 often enough for folding it to be of value. One example is in 3000 computing loop trip counts after Operator Strength Reduction. */ 3001(for cmp (simple_comparison) 3002 scmp (swapped_simple_comparison) 3003 (simplify 3004 (cmp (mult@3 @0 INTEGER_CST@1) integer_zerop@2) 3005 /* Handle unfolded multiplication by zero. */ 3006 (if (integer_zerop (@1)) 3007 (cmp @1 @2) 3008 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) 3009 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)) 3010 && single_use (@3)) 3011 /* If @1 is negative we swap the sense of the comparison. */ 3012 (if (tree_int_cst_sgn (@1) < 0) 3013 (scmp @0 @2) 3014 (cmp @0 @2)))))) 3015 3016/* Simplify comparison of something with itself. For IEEE 3017 floating-point, we can only do some of these simplifications. */ 3018(for cmp (eq ge le) 3019 (simplify 3020 (cmp @0 @0) 3021 (if (! FLOAT_TYPE_P (TREE_TYPE (@0)) 3022 || ! HONOR_NANS (@0)) 3023 { constant_boolean_node (true, type); } 3024 (if (cmp != EQ_EXPR) 3025 (eq @0 @0))))) 3026(for cmp (ne gt lt) 3027 (simplify 3028 (cmp @0 @0) 3029 (if (cmp != NE_EXPR 3030 || ! FLOAT_TYPE_P (TREE_TYPE (@0)) 3031 || ! HONOR_NANS (@0)) 3032 { constant_boolean_node (false, type); }))) 3033(for cmp (unle unge uneq) 3034 (simplify 3035 (cmp @0 @0) 3036 { constant_boolean_node (true, type); })) 3037(for cmp (unlt ungt) 3038 (simplify 3039 (cmp @0 @0) 3040 (unordered @0 @0))) 3041(simplify 3042 (ltgt @0 @0) 3043 (if (!flag_trapping_math) 3044 { constant_boolean_node (false, type); })) 3045 3046/* Fold ~X op ~Y as Y op X. */ 3047(for cmp (simple_comparison) 3048 (simplify 3049 (cmp (bit_not@2 @0) (bit_not@3 @1)) 3050 (if (single_use (@2) && single_use (@3)) 3051 (cmp @1 @0)))) 3052 3053/* Fold ~X op C as X op' ~C, where op' is the swapped comparison. */ 3054(for cmp (simple_comparison) 3055 scmp (swapped_simple_comparison) 3056 (simplify 3057 (cmp (bit_not@2 @0) CONSTANT_CLASS_P@1) 3058 (if (single_use (@2) 3059 && (TREE_CODE (@1) == INTEGER_CST || TREE_CODE (@1) == VECTOR_CST)) 3060 (scmp @0 (bit_not @1))))) 3061 3062(for cmp (simple_comparison) 3063 /* Fold (double)float1 CMP (double)float2 into float1 CMP float2. */ 3064 (simplify 3065 (cmp (convert@2 @0) (convert? @1)) 3066 (if (FLOAT_TYPE_P (TREE_TYPE (@0)) 3067 && (DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@2)) 3068 == DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@0))) 3069 && (DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@2)) 3070 == DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@1)))) 3071 (with 3072 { 3073 tree type1 = TREE_TYPE (@1); 3074 if (TREE_CODE (@1) == REAL_CST && !DECIMAL_FLOAT_TYPE_P (type1)) 3075 { 3076 REAL_VALUE_TYPE orig = TREE_REAL_CST (@1); 3077 if (TYPE_PRECISION (type1) > TYPE_PRECISION (float_type_node) 3078 && exact_real_truncate (TYPE_MODE (float_type_node), &orig)) 3079 type1 = float_type_node; 3080 if (TYPE_PRECISION (type1) > TYPE_PRECISION (double_type_node) 3081 && exact_real_truncate (TYPE_MODE (double_type_node), &orig)) 3082 type1 = double_type_node; 3083 } 3084 tree newtype 3085 = (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (type1) 3086 ? TREE_TYPE (@0) : type1); 3087 } 3088 (if (TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (newtype)) 3089 (cmp (convert:newtype @0) (convert:newtype @1)))))) 3090 3091 (simplify 3092 (cmp @0 REAL_CST@1) 3093 /* IEEE doesn't distinguish +0 and -0 in comparisons. */ 3094 (switch 3095 /* a CMP (-0) -> a CMP 0 */ 3096 (if (REAL_VALUE_MINUS_ZERO (TREE_REAL_CST (@1))) 3097 (cmp @0 { build_real (TREE_TYPE (@1), dconst0); })) 3098 /* x != NaN is always true, other ops are always false. */ 3099 (if (REAL_VALUE_ISNAN (TREE_REAL_CST (@1)) 3100 && ! HONOR_SNANS (@1)) 3101 { constant_boolean_node (cmp == NE_EXPR, type); }) 3102 /* Fold comparisons against infinity. */ 3103 (if (REAL_VALUE_ISINF (TREE_REAL_CST (@1)) 3104 && MODE_HAS_INFINITIES (TYPE_MODE (TREE_TYPE (@1)))) 3105 (with 3106 { 3107 REAL_VALUE_TYPE max; 3108 enum tree_code code = cmp; 3109 bool neg = REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1)); 3110 if (neg) 3111 code = swap_tree_comparison (code); 3112 } 3113 (switch 3114 /* x > +Inf is always false, if we ignore NaNs or exceptions. */ 3115 (if (code == GT_EXPR 3116 && !(HONOR_NANS (@0) && flag_trapping_math)) 3117 { constant_boolean_node (false, type); }) 3118 (if (code == LE_EXPR) 3119 /* x <= +Inf is always true, if we don't care about NaNs. */ 3120 (if (! HONOR_NANS (@0)) 3121 { constant_boolean_node (true, type); } 3122 /* x <= +Inf is the same as x == x, i.e. !isnan(x), but this loses 3123 an "invalid" exception. */ 3124 (if (!flag_trapping_math) 3125 (eq @0 @0)))) 3126 /* x == +Inf and x >= +Inf are always equal to x > DBL_MAX, but 3127 for == this introduces an exception for x a NaN. */ 3128 (if ((code == EQ_EXPR && !(HONOR_NANS (@0) && flag_trapping_math)) 3129 || code == GE_EXPR) 3130 (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); } 3131 (if (neg) 3132 (lt @0 { build_real (TREE_TYPE (@0), max); }) 3133 (gt @0 { build_real (TREE_TYPE (@0), max); })))) 3134 /* x < +Inf is always equal to x <= DBL_MAX. */ 3135 (if (code == LT_EXPR) 3136 (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); } 3137 (if (neg) 3138 (ge @0 { build_real (TREE_TYPE (@0), max); }) 3139 (le @0 { build_real (TREE_TYPE (@0), max); })))) 3140 /* x != +Inf is always equal to !(x > DBL_MAX), but this introduces 3141 an exception for x a NaN so use an unordered comparison. */ 3142 (if (code == NE_EXPR) 3143 (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); } 3144 (if (! HONOR_NANS (@0)) 3145 (if (neg) 3146 (ge @0 { build_real (TREE_TYPE (@0), max); }) 3147 (le @0 { build_real (TREE_TYPE (@0), max); })) 3148 (if (neg) 3149 (unge @0 { build_real (TREE_TYPE (@0), max); }) 3150 (unle @0 { build_real (TREE_TYPE (@0), max); })))))))))) 3151 3152 /* If this is a comparison of a real constant with a PLUS_EXPR 3153 or a MINUS_EXPR of a real constant, we can convert it into a 3154 comparison with a revised real constant as long as no overflow 3155 occurs when unsafe_math_optimizations are enabled. */ 3156 (if (flag_unsafe_math_optimizations) 3157 (for op (plus minus) 3158 (simplify 3159 (cmp (op @0 REAL_CST@1) REAL_CST@2) 3160 (with 3161 { 3162 tree tem = const_binop (op == PLUS_EXPR ? MINUS_EXPR : PLUS_EXPR, 3163 TREE_TYPE (@1), @2, @1); 3164 } 3165 (if (tem && !TREE_OVERFLOW (tem)) 3166 (cmp @0 { tem; })))))) 3167 3168 /* Likewise, we can simplify a comparison of a real constant with 3169 a MINUS_EXPR whose first operand is also a real constant, i.e. 3170 (c1 - x) < c2 becomes x > c1-c2. Reordering is allowed on 3171 floating-point types only if -fassociative-math is set. */ 3172 (if (flag_associative_math) 3173 (simplify 3174 (cmp (minus REAL_CST@0 @1) REAL_CST@2) 3175 (with { tree tem = const_binop (MINUS_EXPR, TREE_TYPE (@1), @0, @2); } 3176 (if (tem && !TREE_OVERFLOW (tem)) 3177 (cmp { tem; } @1))))) 3178 3179 /* Fold comparisons against built-in math functions. */ 3180 (if (flag_unsafe_math_optimizations 3181 && ! flag_errno_math) 3182 (for sq (SQRT) 3183 (simplify 3184 (cmp (sq @0) REAL_CST@1) 3185 (switch 3186 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1))) 3187 (switch 3188 /* sqrt(x) < y is always false, if y is negative. */ 3189 (if (cmp == EQ_EXPR || cmp == LT_EXPR || cmp == LE_EXPR) 3190 { constant_boolean_node (false, type); }) 3191 /* sqrt(x) > y is always true, if y is negative and we 3192 don't care about NaNs, i.e. negative values of x. */ 3193 (if (cmp == NE_EXPR || !HONOR_NANS (@0)) 3194 { constant_boolean_node (true, type); }) 3195 /* sqrt(x) > y is the same as x >= 0, if y is negative. */ 3196 (ge @0 { build_real (TREE_TYPE (@0), dconst0); }))) 3197 (if (real_equal (TREE_REAL_CST_PTR (@1), &dconst0)) 3198 (switch 3199 /* sqrt(x) < 0 is always false. */ 3200 (if (cmp == LT_EXPR) 3201 { constant_boolean_node (false, type); }) 3202 /* sqrt(x) >= 0 is always true if we don't care about NaNs. */ 3203 (if (cmp == GE_EXPR && !HONOR_NANS (@0)) 3204 { constant_boolean_node (true, type); }) 3205 /* sqrt(x) <= 0 -> x == 0. */ 3206 (if (cmp == LE_EXPR) 3207 (eq @0 @1)) 3208 /* Otherwise sqrt(x) cmp 0 -> x cmp 0. Here cmp can be >=, >, 3209 == or !=. In the last case: 3210 3211 (sqrt(x) != 0) == (NaN != 0) == true == (x != 0) 3212 3213 if x is negative or NaN. Due to -funsafe-math-optimizations, 3214 the results for other x follow from natural arithmetic. */ 3215 (cmp @0 @1))) 3216 (if (cmp == GT_EXPR || cmp == GE_EXPR) 3217 (with 3218 { 3219 REAL_VALUE_TYPE c2; 3220 real_arithmetic (&c2, MULT_EXPR, 3221 &TREE_REAL_CST (@1), &TREE_REAL_CST (@1)); 3222 real_convert (&c2, TYPE_MODE (TREE_TYPE (@0)), &c2); 3223 } 3224 (if (REAL_VALUE_ISINF (c2)) 3225 /* sqrt(x) > y is x == +Inf, when y is very large. */ 3226 (if (HONOR_INFINITIES (@0)) 3227 (eq @0 { build_real (TREE_TYPE (@0), c2); }) 3228 { constant_boolean_node (false, type); }) 3229 /* sqrt(x) > c is the same as x > c*c. */ 3230 (cmp @0 { build_real (TREE_TYPE (@0), c2); })))) 3231 (if (cmp == LT_EXPR || cmp == LE_EXPR) 3232 (with 3233 { 3234 REAL_VALUE_TYPE c2; 3235 real_arithmetic (&c2, MULT_EXPR, 3236 &TREE_REAL_CST (@1), &TREE_REAL_CST (@1)); 3237 real_convert (&c2, TYPE_MODE (TREE_TYPE (@0)), &c2); 3238 } 3239 (if (REAL_VALUE_ISINF (c2)) 3240 (switch 3241 /* sqrt(x) < y is always true, when y is a very large 3242 value and we don't care about NaNs or Infinities. */ 3243 (if (! HONOR_NANS (@0) && ! HONOR_INFINITIES (@0)) 3244 { constant_boolean_node (true, type); }) 3245 /* sqrt(x) < y is x != +Inf when y is very large and we 3246 don't care about NaNs. */ 3247 (if (! HONOR_NANS (@0)) 3248 (ne @0 { build_real (TREE_TYPE (@0), c2); })) 3249 /* sqrt(x) < y is x >= 0 when y is very large and we 3250 don't care about Infinities. */ 3251 (if (! HONOR_INFINITIES (@0)) 3252 (ge @0 { build_real (TREE_TYPE (@0), dconst0); })) 3253 /* sqrt(x) < y is x >= 0 && x != +Inf, when y is large. */ 3254 (if (GENERIC) 3255 (truth_andif 3256 (ge @0 { build_real (TREE_TYPE (@0), dconst0); }) 3257 (ne @0 { build_real (TREE_TYPE (@0), c2); })))) 3258 /* sqrt(x) < c is the same as x < c*c, if we ignore NaNs. */ 3259 (if (! HONOR_NANS (@0)) 3260 (cmp @0 { build_real (TREE_TYPE (@0), c2); }) 3261 /* sqrt(x) < c is the same as x >= 0 && x < c*c. */ 3262 (if (GENERIC) 3263 (truth_andif 3264 (ge @0 { build_real (TREE_TYPE (@0), dconst0); }) 3265 (cmp @0 { build_real (TREE_TYPE (@0), c2); }))))))))) 3266 /* Transform sqrt(x) cmp sqrt(y) -> x cmp y. */ 3267 (simplify 3268 (cmp (sq @0) (sq @1)) 3269 (if (! HONOR_NANS (@0)) 3270 (cmp @0 @1)))))) 3271 3272/* Optimize various special cases of (FTYPE) N CMP CST. */ 3273(for cmp (lt le eq ne ge gt) 3274 icmp (le le eq ne ge ge) 3275 (simplify 3276 (cmp (float @0) REAL_CST@1) 3277 (if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (@1)) 3278 && ! DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@1))) 3279 (with 3280 { 3281 tree itype = TREE_TYPE (@0); 3282 signop isign = TYPE_SIGN (itype); 3283 format_helper fmt (REAL_MODE_FORMAT (TYPE_MODE (TREE_TYPE (@1)))); 3284 const REAL_VALUE_TYPE *cst = TREE_REAL_CST_PTR (@1); 3285 /* Be careful to preserve any potential exceptions due to 3286 NaNs. qNaNs are ok in == or != context. 3287 TODO: relax under -fno-trapping-math or 3288 -fno-signaling-nans. */ 3289 bool exception_p 3290 = real_isnan (cst) && (cst->signalling 3291 || (cmp != EQ_EXPR && cmp != NE_EXPR)); 3292 /* INT?_MIN is power-of-two so it takes 3293 only one mantissa bit. */ 3294 bool signed_p = isign == SIGNED; 3295 bool itype_fits_ftype_p 3296 = TYPE_PRECISION (itype) - signed_p <= significand_size (fmt); 3297 } 3298 /* TODO: allow non-fitting itype and SNaNs when 3299 -fno-trapping-math. */ 3300 (if (itype_fits_ftype_p && ! exception_p) 3301 (with 3302 { 3303 REAL_VALUE_TYPE imin, imax; 3304 real_from_integer (&imin, fmt, wi::min_value (itype), isign); 3305 real_from_integer (&imax, fmt, wi::max_value (itype), isign); 3306 3307 REAL_VALUE_TYPE icst; 3308 if (cmp == GT_EXPR || cmp == GE_EXPR) 3309 real_ceil (&icst, fmt, cst); 3310 else if (cmp == LT_EXPR || cmp == LE_EXPR) 3311 real_floor (&icst, fmt, cst); 3312 else 3313 real_trunc (&icst, fmt, cst); 3314 3315 bool cst_int_p = !real_isnan (cst) && real_identical (&icst, cst); 3316 3317 bool overflow_p = false; 3318 wide_int icst_val 3319 = real_to_integer (&icst, &overflow_p, TYPE_PRECISION (itype)); 3320 } 3321 (switch 3322 /* Optimize cases when CST is outside of ITYPE's range. */ 3323 (if (real_compare (LT_EXPR, cst, &imin)) 3324 { constant_boolean_node (cmp == GT_EXPR || cmp == GE_EXPR || cmp == NE_EXPR, 3325 type); }) 3326 (if (real_compare (GT_EXPR, cst, &imax)) 3327 { constant_boolean_node (cmp == LT_EXPR || cmp == LE_EXPR || cmp == NE_EXPR, 3328 type); }) 3329 /* Remove cast if CST is an integer representable by ITYPE. */ 3330 (if (cst_int_p) 3331 (cmp @0 { gcc_assert (!overflow_p); 3332 wide_int_to_tree (itype, icst_val); }) 3333 ) 3334 /* When CST is fractional, optimize 3335 (FTYPE) N == CST -> 0 3336 (FTYPE) N != CST -> 1. */ 3337 (if (cmp == EQ_EXPR || cmp == NE_EXPR) 3338 { constant_boolean_node (cmp == NE_EXPR, type); }) 3339 /* Otherwise replace with sensible integer constant. */ 3340 (with 3341 { 3342 gcc_checking_assert (!overflow_p); 3343 } 3344 (icmp @0 { wide_int_to_tree (itype, icst_val); }))))))))) 3345 3346/* Fold A /[ex] B CMP C to A CMP B * C. */ 3347(for cmp (eq ne) 3348 (simplify 3349 (cmp (exact_div @0 @1) INTEGER_CST@2) 3350 (if (!integer_zerop (@1)) 3351 (if (wi::to_wide (@2) == 0) 3352 (cmp @0 @2) 3353 (if (TREE_CODE (@1) == INTEGER_CST) 3354 (with 3355 { 3356 bool ovf; 3357 wide_int prod = wi::mul (wi::to_wide (@2), wi::to_wide (@1), 3358 TYPE_SIGN (TREE_TYPE (@1)), &ovf); 3359 } 3360 (if (ovf) 3361 { constant_boolean_node (cmp == NE_EXPR, type); } 3362 (cmp @0 { wide_int_to_tree (TREE_TYPE (@0), prod); })))))))) 3363(for cmp (lt le gt ge) 3364 (simplify 3365 (cmp (exact_div @0 INTEGER_CST@1) INTEGER_CST@2) 3366 (if (wi::gt_p (wi::to_wide (@1), 0, TYPE_SIGN (TREE_TYPE (@1)))) 3367 (with 3368 { 3369 bool ovf; 3370 wide_int prod = wi::mul (wi::to_wide (@2), wi::to_wide (@1), 3371 TYPE_SIGN (TREE_TYPE (@1)), &ovf); 3372 } 3373 (if (ovf) 3374 { constant_boolean_node (wi::lt_p (wi::to_wide (@2), 0, 3375 TYPE_SIGN (TREE_TYPE (@2))) 3376 != (cmp == LT_EXPR || cmp == LE_EXPR), type); } 3377 (cmp @0 { wide_int_to_tree (TREE_TYPE (@0), prod); })))))) 3378 3379/* Unordered tests if either argument is a NaN. */ 3380(simplify 3381 (bit_ior (unordered @0 @0) (unordered @1 @1)) 3382 (if (types_match (@0, @1)) 3383 (unordered @0 @1))) 3384(simplify 3385 (bit_and (ordered @0 @0) (ordered @1 @1)) 3386 (if (types_match (@0, @1)) 3387 (ordered @0 @1))) 3388(simplify 3389 (bit_ior:c (unordered @0 @0) (unordered:c@2 @0 @1)) 3390 @2) 3391(simplify 3392 (bit_and:c (ordered @0 @0) (ordered:c@2 @0 @1)) 3393 @2) 3394 3395/* Simple range test simplifications. */ 3396/* A < B || A >= B -> true. */ 3397(for test1 (lt le le le ne ge) 3398 test2 (ge gt ge ne eq ne) 3399 (simplify 3400 (bit_ior:c (test1 @0 @1) (test2 @0 @1)) 3401 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) 3402 || VECTOR_INTEGER_TYPE_P (TREE_TYPE (@0))) 3403 { constant_boolean_node (true, type); }))) 3404/* A < B && A >= B -> false. */ 3405(for test1 (lt lt lt le ne eq) 3406 test2 (ge gt eq gt eq gt) 3407 (simplify 3408 (bit_and:c (test1 @0 @1) (test2 @0 @1)) 3409 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) 3410 || VECTOR_INTEGER_TYPE_P (TREE_TYPE (@0))) 3411 { constant_boolean_node (false, type); }))) 3412 3413/* A & (2**N - 1) <= 2**K - 1 -> A & (2**N - 2**K) == 0 3414 A & (2**N - 1) > 2**K - 1 -> A & (2**N - 2**K) != 0 3415 3416 Note that comparisons 3417 A & (2**N - 1) < 2**K -> A & (2**N - 2**K) == 0 3418 A & (2**N - 1) >= 2**K -> A & (2**N - 2**K) != 0 3419 will be canonicalized to above so there's no need to 3420 consider them here. 3421 */ 3422 3423(for cmp (le gt) 3424 eqcmp (eq ne) 3425 (simplify 3426 (cmp (bit_and@0 @1 INTEGER_CST@2) INTEGER_CST@3) 3427 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))) 3428 (with 3429 { 3430 tree ty = TREE_TYPE (@0); 3431 unsigned prec = TYPE_PRECISION (ty); 3432 wide_int mask = wi::to_wide (@2, prec); 3433 wide_int rhs = wi::to_wide (@3, prec); 3434 signop sgn = TYPE_SIGN (ty); 3435 } 3436 (if ((mask & (mask + 1)) == 0 && wi::gt_p (rhs, 0, sgn) 3437 && (rhs & (rhs + 1)) == 0 && wi::ge_p (mask, rhs, sgn)) 3438 (eqcmp (bit_and @1 { wide_int_to_tree (ty, mask - rhs); }) 3439 { build_zero_cst (ty); })))))) 3440 3441/* -A CMP -B -> B CMP A. */ 3442(for cmp (tcc_comparison) 3443 scmp (swapped_tcc_comparison) 3444 (simplify 3445 (cmp (negate @0) (negate @1)) 3446 (if (FLOAT_TYPE_P (TREE_TYPE (@0)) 3447 || (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) 3448 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))) 3449 (scmp @0 @1))) 3450 (simplify 3451 (cmp (negate @0) CONSTANT_CLASS_P@1) 3452 (if (FLOAT_TYPE_P (TREE_TYPE (@0)) 3453 || (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) 3454 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))) 3455 (with { tree tem = const_unop (NEGATE_EXPR, TREE_TYPE (@0), @1); } 3456 (if (tem && !TREE_OVERFLOW (tem)) 3457 (scmp @0 { tem; })))))) 3458 3459/* Convert ABS_EXPR<x> == 0 or ABS_EXPR<x> != 0 to x == 0 or x != 0. */ 3460(for op (eq ne) 3461 (simplify 3462 (op (abs @0) zerop@1) 3463 (op @0 @1))) 3464 3465/* From fold_sign_changed_comparison and fold_widened_comparison. 3466 FIXME: the lack of symmetry is disturbing. */ 3467(for cmp (simple_comparison) 3468 (simplify 3469 (cmp (convert@0 @00) (convert?@1 @10)) 3470 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) 3471 /* Disable this optimization if we're casting a function pointer 3472 type on targets that require function pointer canonicalization. */ 3473 && !(targetm.have_canonicalize_funcptr_for_compare () 3474 && POINTER_TYPE_P (TREE_TYPE (@00)) 3475 && FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@00)))) 3476 && single_use (@0)) 3477 (if (TYPE_PRECISION (TREE_TYPE (@00)) == TYPE_PRECISION (TREE_TYPE (@0)) 3478 && (TREE_CODE (@10) == INTEGER_CST 3479 || @1 != @10) 3480 && (TYPE_UNSIGNED (TREE_TYPE (@00)) == TYPE_UNSIGNED (TREE_TYPE (@0)) 3481 || cmp == NE_EXPR 3482 || cmp == EQ_EXPR) 3483 && !POINTER_TYPE_P (TREE_TYPE (@00))) 3484 /* ??? The special-casing of INTEGER_CST conversion was in the original 3485 code and here to avoid a spurious overflow flag on the resulting 3486 constant which fold_convert produces. */ 3487 (if (TREE_CODE (@1) == INTEGER_CST) 3488 (cmp @00 { force_fit_type (TREE_TYPE (@00), wi::to_widest (@1), 0, 3489 TREE_OVERFLOW (@1)); }) 3490 (cmp @00 (convert @1))) 3491 3492 (if (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (TREE_TYPE (@00))) 3493 /* If possible, express the comparison in the shorter mode. */ 3494 (if ((cmp == EQ_EXPR || cmp == NE_EXPR 3495 || TYPE_UNSIGNED (TREE_TYPE (@0)) == TYPE_UNSIGNED (TREE_TYPE (@00)) 3496 || (!TYPE_UNSIGNED (TREE_TYPE (@0)) 3497 && TYPE_UNSIGNED (TREE_TYPE (@00)))) 3498 && (types_match (TREE_TYPE (@10), TREE_TYPE (@00)) 3499 || ((TYPE_PRECISION (TREE_TYPE (@00)) 3500 >= TYPE_PRECISION (TREE_TYPE (@10))) 3501 && (TYPE_UNSIGNED (TREE_TYPE (@00)) 3502 == TYPE_UNSIGNED (TREE_TYPE (@10)))) 3503 || (TREE_CODE (@10) == INTEGER_CST 3504 && INTEGRAL_TYPE_P (TREE_TYPE (@00)) 3505 && int_fits_type_p (@10, TREE_TYPE (@00))))) 3506 (cmp @00 (convert @10)) 3507 (if (TREE_CODE (@10) == INTEGER_CST 3508 && INTEGRAL_TYPE_P (TREE_TYPE (@00)) 3509 && !int_fits_type_p (@10, TREE_TYPE (@00))) 3510 (with 3511 { 3512 tree min = lower_bound_in_type (TREE_TYPE (@10), TREE_TYPE (@00)); 3513 tree max = upper_bound_in_type (TREE_TYPE (@10), TREE_TYPE (@00)); 3514 bool above = integer_nonzerop (const_binop (LT_EXPR, type, max, @10)); 3515 bool below = integer_nonzerop (const_binop (LT_EXPR, type, @10, min)); 3516 } 3517 (if (above || below) 3518 (if (cmp == EQ_EXPR || cmp == NE_EXPR) 3519 { constant_boolean_node (cmp == EQ_EXPR ? false : true, type); } 3520 (if (cmp == LT_EXPR || cmp == LE_EXPR) 3521 { constant_boolean_node (above ? true : false, type); } 3522 (if (cmp == GT_EXPR || cmp == GE_EXPR) 3523 { constant_boolean_node (above ? false : true, type); })))))))))))) 3524 3525(for cmp (eq ne) 3526 /* A local variable can never be pointed to by 3527 the default SSA name of an incoming parameter. 3528 SSA names are canonicalized to 2nd place. */ 3529 (simplify 3530 (cmp addr@0 SSA_NAME@1) 3531 (if (SSA_NAME_IS_DEFAULT_DEF (@1) 3532 && TREE_CODE (SSA_NAME_VAR (@1)) == PARM_DECL) 3533 (with { tree base = get_base_address (TREE_OPERAND (@0, 0)); } 3534 (if (TREE_CODE (base) == VAR_DECL 3535 && auto_var_in_fn_p (base, current_function_decl)) 3536 (if (cmp == NE_EXPR) 3537 { constant_boolean_node (true, type); } 3538 { constant_boolean_node (false, type); })))))) 3539 3540/* Equality compare simplifications from fold_binary */ 3541(for cmp (eq ne) 3542 3543 /* If we have (A | C) == D where C & ~D != 0, convert this into 0. 3544 Similarly for NE_EXPR. */ 3545 (simplify 3546 (cmp (convert?@3 (bit_ior @0 INTEGER_CST@1)) INTEGER_CST@2) 3547 (if (tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@0)) 3548 && wi::bit_and_not (wi::to_wide (@1), wi::to_wide (@2)) != 0) 3549 { constant_boolean_node (cmp == NE_EXPR, type); })) 3550 3551 /* (X ^ Y) == 0 becomes X == Y, and (X ^ Y) != 0 becomes X != Y. */ 3552 (simplify 3553 (cmp (bit_xor @0 @1) integer_zerop) 3554 (cmp @0 @1)) 3555 3556 /* (X ^ Y) == Y becomes X == 0. 3557 Likewise (X ^ Y) == X becomes Y == 0. */ 3558 (simplify 3559 (cmp:c (bit_xor:c @0 @1) @0) 3560 (cmp @1 { build_zero_cst (TREE_TYPE (@1)); })) 3561 3562 /* (X ^ C1) op C2 can be rewritten as X op (C1 ^ C2). */ 3563 (simplify 3564 (cmp (convert?@3 (bit_xor @0 INTEGER_CST@1)) INTEGER_CST@2) 3565 (if (tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@0))) 3566 (cmp @0 (bit_xor @1 (convert @2))))) 3567 3568 (simplify 3569 (cmp (convert? addr@0) integer_zerop) 3570 (if (tree_single_nonzero_warnv_p (@0, NULL)) 3571 { constant_boolean_node (cmp == NE_EXPR, type); }))) 3572 3573/* If we have (A & C) == C where C is a power of 2, convert this into 3574 (A & C) != 0. Similarly for NE_EXPR. */ 3575(for cmp (eq ne) 3576 icmp (ne eq) 3577 (simplify 3578 (cmp (bit_and@2 @0 integer_pow2p@1) @1) 3579 (icmp @2 { build_zero_cst (TREE_TYPE (@0)); }))) 3580 3581/* If we have (A & C) != 0 ? D : 0 where C and D are powers of 2, 3582 convert this into a shift followed by ANDing with D. */ 3583(simplify 3584 (cond 3585 (ne (bit_and @0 integer_pow2p@1) integer_zerop) 3586 INTEGER_CST@2 integer_zerop) 3587 (if (integer_pow2p (@2)) 3588 (with { 3589 int shift = (wi::exact_log2 (wi::to_wide (@2)) 3590 - wi::exact_log2 (wi::to_wide (@1))); 3591 } 3592 (if (shift > 0) 3593 (bit_and 3594 (lshift (convert @0) { build_int_cst (integer_type_node, shift); }) @2) 3595 (bit_and 3596 (convert (rshift @0 { build_int_cst (integer_type_node, -shift); })) 3597 @2))))) 3598 3599/* If we have (A & C) != 0 where C is the sign bit of A, convert 3600 this into A < 0. Similarly for (A & C) == 0 into A >= 0. */ 3601(for cmp (eq ne) 3602 ncmp (ge lt) 3603 (simplify 3604 (cmp (bit_and (convert?@2 @0) integer_pow2p@1) integer_zerop) 3605 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) 3606 && type_has_mode_precision_p (TREE_TYPE (@0)) 3607 && element_precision (@2) >= element_precision (@0) 3608 && wi::only_sign_bit_p (wi::to_wide (@1), element_precision (@0))) 3609 (with { tree stype = signed_type_for (TREE_TYPE (@0)); } 3610 (ncmp (convert:stype @0) { build_zero_cst (stype); }))))) 3611 3612/* If we have A < 0 ? C : 0 where C is a power of 2, convert 3613 this into a right shift or sign extension followed by ANDing with C. */ 3614(simplify 3615 (cond 3616 (lt @0 integer_zerop) 3617 INTEGER_CST@1 integer_zerop) 3618 (if (integer_pow2p (@1) 3619 && !TYPE_UNSIGNED (TREE_TYPE (@0))) 3620 (with { 3621 int shift = element_precision (@0) - wi::exact_log2 (wi::to_wide (@1)) - 1; 3622 } 3623 (if (shift >= 0) 3624 (bit_and 3625 (convert (rshift @0 { build_int_cst (integer_type_node, shift); })) 3626 @1) 3627 /* Otherwise ctype must be wider than TREE_TYPE (@0) and pure 3628 sign extension followed by AND with C will achieve the effect. */ 3629 (bit_and (convert @0) @1))))) 3630 3631/* When the addresses are not directly of decls compare base and offset. 3632 This implements some remaining parts of fold_comparison address 3633 comparisons but still no complete part of it. Still it is good 3634 enough to make fold_stmt not regress when not dispatching to fold_binary. */ 3635(for cmp (simple_comparison) 3636 (simplify 3637 (cmp (convert1?@2 addr@0) (convert2? addr@1)) 3638 (with 3639 { 3640 poly_int64 off0, off1; 3641 tree base0 = get_addr_base_and_unit_offset (TREE_OPERAND (@0, 0), &off0); 3642 tree base1 = get_addr_base_and_unit_offset (TREE_OPERAND (@1, 0), &off1); 3643 if (base0 && TREE_CODE (base0) == MEM_REF) 3644 { 3645 off0 += mem_ref_offset (base0).force_shwi (); 3646 base0 = TREE_OPERAND (base0, 0); 3647 } 3648 if (base1 && TREE_CODE (base1) == MEM_REF) 3649 { 3650 off1 += mem_ref_offset (base1).force_shwi (); 3651 base1 = TREE_OPERAND (base1, 0); 3652 } 3653 } 3654 (if (base0 && base1) 3655 (with 3656 { 3657 int equal = 2; 3658 /* Punt in GENERIC on variables with value expressions; 3659 the value expressions might point to fields/elements 3660 of other vars etc. */ 3661 if (GENERIC 3662 && ((VAR_P (base0) && DECL_HAS_VALUE_EXPR_P (base0)) 3663 || (VAR_P (base1) && DECL_HAS_VALUE_EXPR_P (base1)))) 3664 ; 3665 else if (decl_in_symtab_p (base0) 3666 && decl_in_symtab_p (base1)) 3667 equal = symtab_node::get_create (base0) 3668 ->equal_address_to (symtab_node::get_create (base1)); 3669 else if ((DECL_P (base0) 3670 || TREE_CODE (base0) == SSA_NAME 3671 || TREE_CODE (base0) == STRING_CST) 3672 && (DECL_P (base1) 3673 || TREE_CODE (base1) == SSA_NAME 3674 || TREE_CODE (base1) == STRING_CST)) 3675 equal = (base0 == base1); 3676 } 3677 (if (equal == 1 3678 && (cmp == EQ_EXPR || cmp == NE_EXPR 3679 /* If the offsets are equal we can ignore overflow. */ 3680 || known_eq (off0, off1) 3681 || TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)) 3682 /* Or if we compare using pointers to decls or strings. */ 3683 || (POINTER_TYPE_P (TREE_TYPE (@2)) 3684 && (DECL_P (base0) || TREE_CODE (base0) == STRING_CST)))) 3685 (switch 3686 (if (cmp == EQ_EXPR && (known_eq (off0, off1) || known_ne (off0, off1))) 3687 { constant_boolean_node (known_eq (off0, off1), type); }) 3688 (if (cmp == NE_EXPR && (known_eq (off0, off1) || known_ne (off0, off1))) 3689 { constant_boolean_node (known_ne (off0, off1), type); }) 3690 (if (cmp == LT_EXPR && (known_lt (off0, off1) || known_ge (off0, off1))) 3691 { constant_boolean_node (known_lt (off0, off1), type); }) 3692 (if (cmp == LE_EXPR && (known_le (off0, off1) || known_gt (off0, off1))) 3693 { constant_boolean_node (known_le (off0, off1), type); }) 3694 (if (cmp == GE_EXPR && (known_ge (off0, off1) || known_lt (off0, off1))) 3695 { constant_boolean_node (known_ge (off0, off1), type); }) 3696 (if (cmp == GT_EXPR && (known_gt (off0, off1) || known_le (off0, off1))) 3697 { constant_boolean_node (known_gt (off0, off1), type); })) 3698 (if (equal == 0 3699 && DECL_P (base0) && DECL_P (base1) 3700 /* If we compare this as integers require equal offset. */ 3701 && (!INTEGRAL_TYPE_P (TREE_TYPE (@2)) 3702 || known_eq (off0, off1))) 3703 (switch 3704 (if (cmp == EQ_EXPR) 3705 { constant_boolean_node (false, type); }) 3706 (if (cmp == NE_EXPR) 3707 { constant_boolean_node (true, type); }))))))))) 3708 3709/* Simplify pointer equality compares using PTA. */ 3710(for neeq (ne eq) 3711 (simplify 3712 (neeq @0 @1) 3713 (if (POINTER_TYPE_P (TREE_TYPE (@0)) 3714 && ptrs_compare_unequal (@0, @1)) 3715 { constant_boolean_node (neeq != EQ_EXPR, type); }))) 3716 3717/* PR70920: Transform (intptr_t)x eq/ne CST to x eq/ne (typeof x) CST. 3718 and (typeof ptr_cst) x eq/ne ptr_cst to x eq/ne (typeof x) CST. 3719 Disable the transform if either operand is pointer to function. 3720 This broke pr22051-2.c for arm where function pointer 3721 canonicalizaion is not wanted. */ 3722 3723(for cmp (ne eq) 3724 (simplify 3725 (cmp (convert @0) INTEGER_CST@1) 3726 (if (((POINTER_TYPE_P (TREE_TYPE (@0)) 3727 && !FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@0))) 3728 && INTEGRAL_TYPE_P (TREE_TYPE (@1))) 3729 || (INTEGRAL_TYPE_P (TREE_TYPE (@0)) 3730 && POINTER_TYPE_P (TREE_TYPE (@1)) 3731 && !FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@1))))) 3732 && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1))) 3733 (cmp @0 (convert @1))))) 3734 3735/* Non-equality compare simplifications from fold_binary */ 3736(for cmp (lt gt le ge) 3737 /* Comparisons with the highest or lowest possible integer of 3738 the specified precision will have known values. */ 3739 (simplify 3740 (cmp (convert?@2 @0) INTEGER_CST@1) 3741 (if ((INTEGRAL_TYPE_P (TREE_TYPE (@1)) || POINTER_TYPE_P (TREE_TYPE (@1))) 3742 && tree_nop_conversion_p (TREE_TYPE (@2), TREE_TYPE (@0))) 3743 (with 3744 { 3745 tree arg1_type = TREE_TYPE (@1); 3746 unsigned int prec = TYPE_PRECISION (arg1_type); 3747 wide_int max = wi::max_value (arg1_type); 3748 wide_int signed_max = wi::max_value (prec, SIGNED); 3749 wide_int min = wi::min_value (arg1_type); 3750 } 3751 (switch 3752 (if (wi::to_wide (@1) == max) 3753 (switch 3754 (if (cmp == GT_EXPR) 3755 { constant_boolean_node (false, type); }) 3756 (if (cmp == GE_EXPR) 3757 (eq @2 @1)) 3758 (if (cmp == LE_EXPR) 3759 { constant_boolean_node (true, type); }) 3760 (if (cmp == LT_EXPR) 3761 (ne @2 @1)))) 3762 (if (wi::to_wide (@1) == min) 3763 (switch 3764 (if (cmp == LT_EXPR) 3765 { constant_boolean_node (false, type); }) 3766 (if (cmp == LE_EXPR) 3767 (eq @2 @1)) 3768 (if (cmp == GE_EXPR) 3769 { constant_boolean_node (true, type); }) 3770 (if (cmp == GT_EXPR) 3771 (ne @2 @1)))) 3772 (if (wi::to_wide (@1) == max - 1) 3773 (switch 3774 (if (cmp == GT_EXPR) 3775 (eq @2 { wide_int_to_tree (TREE_TYPE (@1), wi::to_wide (@1) + 1); })) 3776 (if (cmp == LE_EXPR) 3777 (ne @2 { wide_int_to_tree (TREE_TYPE (@1), wi::to_wide (@1) + 1); })))) 3778 (if (wi::to_wide (@1) == min + 1) 3779 (switch 3780 (if (cmp == GE_EXPR) 3781 (ne @2 { wide_int_to_tree (TREE_TYPE (@1), wi::to_wide (@1) - 1); })) 3782 (if (cmp == LT_EXPR) 3783 (eq @2 { wide_int_to_tree (TREE_TYPE (@1), wi::to_wide (@1) - 1); })))) 3784 (if (wi::to_wide (@1) == signed_max 3785 && TYPE_UNSIGNED (arg1_type) 3786 /* We will flip the signedness of the comparison operator 3787 associated with the mode of @1, so the sign bit is 3788 specified by this mode. Check that @1 is the signed 3789 max associated with this sign bit. */ 3790 && prec == GET_MODE_PRECISION (SCALAR_INT_TYPE_MODE (arg1_type)) 3791 /* signed_type does not work on pointer types. */ 3792 && INTEGRAL_TYPE_P (arg1_type)) 3793 /* The following case also applies to X < signed_max+1 3794 and X >= signed_max+1 because previous transformations. */ 3795 (if (cmp == LE_EXPR || cmp == GT_EXPR) 3796 (with { tree st = signed_type_for (arg1_type); } 3797 (if (cmp == LE_EXPR) 3798 (ge (convert:st @0) { build_zero_cst (st); }) 3799 (lt (convert:st @0) { build_zero_cst (st); })))))))))) 3800 3801(for cmp (unordered ordered unlt unle ungt unge uneq ltgt) 3802 /* If the second operand is NaN, the result is constant. */ 3803 (simplify 3804 (cmp @0 REAL_CST@1) 3805 (if (REAL_VALUE_ISNAN (TREE_REAL_CST (@1)) 3806 && (cmp != LTGT_EXPR || ! flag_trapping_math)) 3807 { constant_boolean_node (cmp == ORDERED_EXPR || cmp == LTGT_EXPR 3808 ? false : true, type); }))) 3809 3810/* bool_var != 0 becomes bool_var. */ 3811(simplify 3812 (ne @0 integer_zerop) 3813 (if (TREE_CODE (TREE_TYPE (@0)) == BOOLEAN_TYPE 3814 && types_match (type, TREE_TYPE (@0))) 3815 (non_lvalue @0))) 3816/* bool_var == 1 becomes bool_var. */ 3817(simplify 3818 (eq @0 integer_onep) 3819 (if (TREE_CODE (TREE_TYPE (@0)) == BOOLEAN_TYPE 3820 && types_match (type, TREE_TYPE (@0))) 3821 (non_lvalue @0))) 3822/* Do not handle 3823 bool_var == 0 becomes !bool_var or 3824 bool_var != 1 becomes !bool_var 3825 here because that only is good in assignment context as long 3826 as we require a tcc_comparison in GIMPLE_CONDs where we'd 3827 replace if (x == 0) with tem = ~x; if (tem != 0) which is 3828 clearly less optimal and which we'll transform again in forwprop. */ 3829 3830/* When one argument is a constant, overflow detection can be simplified. 3831 Currently restricted to single use so as not to interfere too much with 3832 ADD_OVERFLOW detection in tree-ssa-math-opts.c. 3833 A + CST CMP A -> A CMP' CST' */ 3834(for cmp (lt le ge gt) 3835 out (gt gt le le) 3836 (simplify 3837 (cmp:c (plus@2 @0 INTEGER_CST@1) @0) 3838 (if (TYPE_UNSIGNED (TREE_TYPE (@0)) 3839 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)) 3840 && wi::to_wide (@1) != 0 3841 && single_use (@2)) 3842 (with { unsigned int prec = TYPE_PRECISION (TREE_TYPE (@0)); } 3843 (out @0 { wide_int_to_tree (TREE_TYPE (@0), 3844 wi::max_value (prec, UNSIGNED) 3845 - wi::to_wide (@1)); }))))) 3846 3847/* To detect overflow in unsigned A - B, A < B is simpler than A - B > A. 3848 However, the detection logic for SUB_OVERFLOW in tree-ssa-math-opts.c 3849 expects the long form, so we restrict the transformation for now. */ 3850(for cmp (gt le) 3851 (simplify 3852 (cmp:c (minus@2 @0 @1) @0) 3853 (if (single_use (@2) 3854 && ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)) 3855 && TYPE_UNSIGNED (TREE_TYPE (@0)) 3856 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))) 3857 (cmp @1 @0)))) 3858 3859/* Testing for overflow is unnecessary if we already know the result. */ 3860/* A - B > A */ 3861(for cmp (gt le) 3862 out (ne eq) 3863 (simplify 3864 (cmp:c (realpart (IFN_SUB_OVERFLOW@2 @0 @1)) @0) 3865 (if (TYPE_UNSIGNED (TREE_TYPE (@0)) 3866 && types_match (TREE_TYPE (@0), TREE_TYPE (@1))) 3867 (out (imagpart @2) { build_zero_cst (TREE_TYPE (@0)); })))) 3868/* A + B < A */ 3869(for cmp (lt ge) 3870 out (ne eq) 3871 (simplify 3872 (cmp:c (realpart (IFN_ADD_OVERFLOW:c@2 @0 @1)) @0) 3873 (if (TYPE_UNSIGNED (TREE_TYPE (@0)) 3874 && types_match (TREE_TYPE (@0), TREE_TYPE (@1))) 3875 (out (imagpart @2) { build_zero_cst (TREE_TYPE (@0)); })))) 3876 3877/* For unsigned operands, -1 / B < A checks whether A * B would overflow. 3878 Simplify it to __builtin_mul_overflow (A, B, <unused>). */ 3879(for cmp (lt ge) 3880 out (ne eq) 3881 (simplify 3882 (cmp:c (trunc_div:s integer_all_onesp @1) @0) 3883 (if (TYPE_UNSIGNED (TREE_TYPE (@0)) && !VECTOR_TYPE_P (TREE_TYPE (@0))) 3884 (with { tree t = TREE_TYPE (@0), cpx = build_complex_type (t); } 3885 (out (imagpart (IFN_MUL_OVERFLOW:cpx @0 @1)) { build_zero_cst (t); }))))) 3886 3887/* Simplification of math builtins. These rules must all be optimizations 3888 as well as IL simplifications. If there is a possibility that the new 3889 form could be a pessimization, the rule should go in the canonicalization 3890 section that follows this one. 3891 3892 Rules can generally go in this section if they satisfy one of 3893 the following: 3894 3895 - the rule describes an identity 3896 3897 - the rule replaces calls with something as simple as addition or 3898 multiplication 3899 3900 - the rule contains unary calls only and simplifies the surrounding 3901 arithmetic. (The idea here is to exclude non-unary calls in which 3902 one operand is constant and in which the call is known to be cheap 3903 when the operand has that value.) */ 3904 3905(if (flag_unsafe_math_optimizations) 3906 /* Simplify sqrt(x) * sqrt(x) -> x. */ 3907 (simplify 3908 (mult (SQRT_ALL@1 @0) @1) 3909 (if (!HONOR_SNANS (type)) 3910 @0)) 3911 3912 (for op (plus minus) 3913 /* Simplify (A / C) +- (B / C) -> (A +- B) / C. */ 3914 (simplify 3915 (op (rdiv @0 @1) 3916 (rdiv @2 @1)) 3917 (rdiv (op @0 @2) @1))) 3918 3919 /* Simplify sqrt(x) * sqrt(y) -> sqrt(x*y). */ 3920 (for root (SQRT CBRT) 3921 (simplify 3922 (mult (root:s @0) (root:s @1)) 3923 (root (mult @0 @1)))) 3924 3925 /* Simplify expN(x) * expN(y) -> expN(x+y). */ 3926 (for exps (EXP EXP2 EXP10 POW10) 3927 (simplify 3928 (mult (exps:s @0) (exps:s @1)) 3929 (exps (plus @0 @1)))) 3930 3931 /* Simplify a/root(b/c) into a*root(c/b). */ 3932 (for root (SQRT CBRT) 3933 (simplify 3934 (rdiv @0 (root:s (rdiv:s @1 @2))) 3935 (mult @0 (root (rdiv @2 @1))))) 3936 3937 /* Simplify x/expN(y) into x*expN(-y). */ 3938 (for exps (EXP EXP2 EXP10 POW10) 3939 (simplify 3940 (rdiv @0 (exps:s @1)) 3941 (mult @0 (exps (negate @1))))) 3942 3943 (for logs (LOG LOG2 LOG10 LOG10) 3944 exps (EXP EXP2 EXP10 POW10) 3945 /* logN(expN(x)) -> x. */ 3946 (simplify 3947 (logs (exps @0)) 3948 @0) 3949 /* expN(logN(x)) -> x. */ 3950 (simplify 3951 (exps (logs @0)) 3952 @0)) 3953 3954 /* Optimize logN(func()) for various exponential functions. We 3955 want to determine the value "x" and the power "exponent" in 3956 order to transform logN(x**exponent) into exponent*logN(x). */ 3957 (for logs (LOG LOG LOG LOG2 LOG2 LOG2 LOG10 LOG10) 3958 exps (EXP2 EXP10 POW10 EXP EXP10 POW10 EXP EXP2) 3959 (simplify 3960 (logs (exps @0)) 3961 (if (SCALAR_FLOAT_TYPE_P (type)) 3962 (with { 3963 tree x; 3964 switch (exps) 3965 { 3966 CASE_CFN_EXP: 3967 /* Prepare to do logN(exp(exponent)) -> exponent*logN(e). */ 3968 x = build_real_truncate (type, dconst_e ()); 3969 break; 3970 CASE_CFN_EXP2: 3971 /* Prepare to do logN(exp2(exponent)) -> exponent*logN(2). */ 3972 x = build_real (type, dconst2); 3973 break; 3974 CASE_CFN_EXP10: 3975 CASE_CFN_POW10: 3976 /* Prepare to do logN(exp10(exponent)) -> exponent*logN(10). */ 3977 { 3978 REAL_VALUE_TYPE dconst10; 3979 real_from_integer (&dconst10, VOIDmode, 10, SIGNED); 3980 x = build_real (type, dconst10); 3981 } 3982 break; 3983 default: 3984 gcc_unreachable (); 3985 } 3986 } 3987 (mult (logs { x; }) @0))))) 3988 3989 (for logs (LOG LOG 3990 LOG2 LOG2 3991 LOG10 LOG10) 3992 exps (SQRT CBRT) 3993 (simplify 3994 (logs (exps @0)) 3995 (if (SCALAR_FLOAT_TYPE_P (type)) 3996 (with { 3997 tree x; 3998 switch (exps) 3999 { 4000 CASE_CFN_SQRT: 4001 /* Prepare to do logN(sqrt(x)) -> 0.5*logN(x). */ 4002 x = build_real (type, dconsthalf); 4003 break; 4004 CASE_CFN_CBRT: 4005 /* Prepare to do logN(cbrt(x)) -> (1/3)*logN(x). */ 4006 x = build_real_truncate (type, dconst_third ()); 4007 break; 4008 default: 4009 gcc_unreachable (); 4010 } 4011 } 4012 (mult { x; } (logs @0)))))) 4013 4014 /* logN(pow(x,exponent)) -> exponent*logN(x). */ 4015 (for logs (LOG LOG2 LOG10) 4016 pows (POW) 4017 (simplify 4018 (logs (pows @0 @1)) 4019 (mult @1 (logs @0)))) 4020 4021 /* pow(C,x) -> exp(log(C)*x) if C > 0, 4022 or if C is a positive power of 2, 4023 pow(C,x) -> exp2(log2(C)*x). */ 4024#if GIMPLE 4025 (for pows (POW) 4026 exps (EXP) 4027 logs (LOG) 4028 exp2s (EXP2) 4029 log2s (LOG2) 4030 (simplify 4031 (pows REAL_CST@0 @1) 4032 (if (real_compare (GT_EXPR, TREE_REAL_CST_PTR (@0), &dconst0) 4033 && real_isfinite (TREE_REAL_CST_PTR (@0)) 4034 /* As libmvec doesn't have a vectorized exp2, defer optimizing 4035 the use_exp2 case until after vectorization. It seems actually 4036 beneficial for all constants to postpone this until later, 4037 because exp(log(C)*x), while faster, will have worse precision 4038 and if x folds into a constant too, that is unnecessary 4039 pessimization. */ 4040 && canonicalize_math_after_vectorization_p ()) 4041 (with { 4042 const REAL_VALUE_TYPE *const value = TREE_REAL_CST_PTR (@0); 4043 bool use_exp2 = false; 4044 if (targetm.libc_has_function (function_c99_misc) 4045 && value->cl == rvc_normal) 4046 { 4047 REAL_VALUE_TYPE frac_rvt = *value; 4048 SET_REAL_EXP (&frac_rvt, 1); 4049 if (real_equal (&frac_rvt, &dconst1)) 4050 use_exp2 = true; 4051 } 4052 } 4053 (if (!use_exp2) 4054 (if (optimize_pow_to_exp (@0, @1)) 4055 (exps (mult (logs @0) @1))) 4056 (exp2s (mult (log2s @0) @1))))))) 4057#endif 4058 4059 /* pow(C,x)*expN(y) -> expN(logN(C)*x+y) if C > 0. */ 4060 (for pows (POW) 4061 exps (EXP EXP2 EXP10 POW10) 4062 logs (LOG LOG2 LOG10 LOG10) 4063 (simplify 4064 (mult:c (pows:s REAL_CST@0 @1) (exps:s @2)) 4065 (if (real_compare (GT_EXPR, TREE_REAL_CST_PTR (@0), &dconst0) 4066 && real_isfinite (TREE_REAL_CST_PTR (@0))) 4067 (exps (plus (mult (logs @0) @1) @2))))) 4068 4069 (for sqrts (SQRT) 4070 cbrts (CBRT) 4071 pows (POW) 4072 exps (EXP EXP2 EXP10 POW10) 4073 /* sqrt(expN(x)) -> expN(x*0.5). */ 4074 (simplify 4075 (sqrts (exps @0)) 4076 (exps (mult @0 { build_real (type, dconsthalf); }))) 4077 /* cbrt(expN(x)) -> expN(x/3). */ 4078 (simplify 4079 (cbrts (exps @0)) 4080 (exps (mult @0 { build_real_truncate (type, dconst_third ()); }))) 4081 /* pow(expN(x), y) -> expN(x*y). */ 4082 (simplify 4083 (pows (exps @0) @1) 4084 (exps (mult @0 @1)))) 4085 4086 /* tan(atan(x)) -> x. */ 4087 (for tans (TAN) 4088 atans (ATAN) 4089 (simplify 4090 (tans (atans @0)) 4091 @0))) 4092 4093/* cabs(x+0i) or cabs(0+xi) -> abs(x). */ 4094(simplify 4095 (CABS (complex:C @0 real_zerop@1)) 4096 (abs @0)) 4097 4098/* trunc(trunc(x)) -> trunc(x), etc. */ 4099(for fns (TRUNC_ALL FLOOR_ALL CEIL_ALL ROUND_ALL NEARBYINT_ALL RINT_ALL) 4100 (simplify 4101 (fns (fns @0)) 4102 (fns @0))) 4103/* f(x) -> x if x is integer valued and f does nothing for such values. */ 4104(for fns (TRUNC_ALL FLOOR_ALL CEIL_ALL ROUND_ALL NEARBYINT_ALL RINT_ALL) 4105 (simplify 4106 (fns integer_valued_real_p@0) 4107 @0)) 4108 4109/* hypot(x,0) and hypot(0,x) -> abs(x). */ 4110(simplify 4111 (HYPOT:c @0 real_zerop@1) 4112 (abs @0)) 4113 4114/* pow(1,x) -> 1. */ 4115(simplify 4116 (POW real_onep@0 @1) 4117 @0) 4118 4119(simplify 4120 /* copysign(x,x) -> x. */ 4121 (COPYSIGN_ALL @0 @0) 4122 @0) 4123 4124(simplify 4125 /* copysign(x,y) -> fabs(x) if y is nonnegative. */ 4126 (COPYSIGN_ALL @0 tree_expr_nonnegative_p@1) 4127 (abs @0)) 4128 4129(for scale (LDEXP SCALBN SCALBLN) 4130 /* ldexp(0, x) -> 0. */ 4131 (simplify 4132 (scale real_zerop@0 @1) 4133 @0) 4134 /* ldexp(x, 0) -> x. */ 4135 (simplify 4136 (scale @0 integer_zerop@1) 4137 @0) 4138 /* ldexp(x, y) -> x if x is +-Inf or NaN. */ 4139 (simplify 4140 (scale REAL_CST@0 @1) 4141 (if (!real_isfinite (TREE_REAL_CST_PTR (@0))) 4142 @0))) 4143 4144/* Canonicalization of sequences of math builtins. These rules represent 4145 IL simplifications but are not necessarily optimizations. 4146 4147 The sincos pass is responsible for picking "optimal" implementations 4148 of math builtins, which may be more complicated and can sometimes go 4149 the other way, e.g. converting pow into a sequence of sqrts. 4150 We only want to do these canonicalizations before the pass has run. */ 4151 4152(if (flag_unsafe_math_optimizations && canonicalize_math_p ()) 4153 /* Simplify tan(x) * cos(x) -> sin(x). */ 4154 (simplify 4155 (mult:c (TAN:s @0) (COS:s @0)) 4156 (SIN @0)) 4157 4158 /* Simplify x * pow(x,c) -> pow(x,c+1). */ 4159 (simplify 4160 (mult:c @0 (POW:s @0 REAL_CST@1)) 4161 (if (!TREE_OVERFLOW (@1)) 4162 (POW @0 (plus @1 { build_one_cst (type); })))) 4163 4164 /* Simplify sin(x) / cos(x) -> tan(x). */ 4165 (simplify 4166 (rdiv (SIN:s @0) (COS:s @0)) 4167 (TAN @0)) 4168 4169 /* Simplify cos(x) / sin(x) -> 1 / tan(x). */ 4170 (simplify 4171 (rdiv (COS:s @0) (SIN:s @0)) 4172 (rdiv { build_one_cst (type); } (TAN @0))) 4173 4174 /* Simplify sin(x) / tan(x) -> cos(x). */ 4175 (simplify 4176 (rdiv (SIN:s @0) (TAN:s @0)) 4177 (if (! HONOR_NANS (@0) 4178 && ! HONOR_INFINITIES (@0)) 4179 (COS @0))) 4180 4181 /* Simplify tan(x) / sin(x) -> 1.0 / cos(x). */ 4182 (simplify 4183 (rdiv (TAN:s @0) (SIN:s @0)) 4184 (if (! HONOR_NANS (@0) 4185 && ! HONOR_INFINITIES (@0)) 4186 (rdiv { build_one_cst (type); } (COS @0)))) 4187 4188 /* Simplify pow(x,y) * pow(x,z) -> pow(x,y+z). */ 4189 (simplify 4190 (mult (POW:s @0 @1) (POW:s @0 @2)) 4191 (POW @0 (plus @1 @2))) 4192 4193 /* Simplify pow(x,y) * pow(z,y) -> pow(x*z,y). */ 4194 (simplify 4195 (mult (POW:s @0 @1) (POW:s @2 @1)) 4196 (POW (mult @0 @2) @1)) 4197 4198 /* Simplify powi(x,y) * powi(z,y) -> powi(x*z,y). */ 4199 (simplify 4200 (mult (POWI:s @0 @1) (POWI:s @2 @1)) 4201 (POWI (mult @0 @2) @1)) 4202 4203 /* Simplify pow(x,c) / x -> pow(x,c-1). */ 4204 (simplify 4205 (rdiv (POW:s @0 REAL_CST@1) @0) 4206 (if (!TREE_OVERFLOW (@1)) 4207 (POW @0 (minus @1 { build_one_cst (type); })))) 4208 4209 /* Simplify x / pow (y,z) -> x * pow(y,-z). */ 4210 (simplify 4211 (rdiv @0 (POW:s @1 @2)) 4212 (mult @0 (POW @1 (negate @2)))) 4213 4214 (for sqrts (SQRT) 4215 cbrts (CBRT) 4216 pows (POW) 4217 /* sqrt(sqrt(x)) -> pow(x,1/4). */ 4218 (simplify 4219 (sqrts (sqrts @0)) 4220 (pows @0 { build_real (type, dconst_quarter ()); })) 4221 /* sqrt(cbrt(x)) -> pow(x,1/6). */ 4222 (simplify 4223 (sqrts (cbrts @0)) 4224 (pows @0 { build_real_truncate (type, dconst_sixth ()); })) 4225 /* cbrt(sqrt(x)) -> pow(x,1/6). */ 4226 (simplify 4227 (cbrts (sqrts @0)) 4228 (pows @0 { build_real_truncate (type, dconst_sixth ()); })) 4229 /* cbrt(cbrt(x)) -> pow(x,1/9), iff x is nonnegative. */ 4230 (simplify 4231 (cbrts (cbrts tree_expr_nonnegative_p@0)) 4232 (pows @0 { build_real_truncate (type, dconst_ninth ()); })) 4233 /* sqrt(pow(x,y)) -> pow(|x|,y*0.5). */ 4234 (simplify 4235 (sqrts (pows @0 @1)) 4236 (pows (abs @0) (mult @1 { build_real (type, dconsthalf); }))) 4237 /* cbrt(pow(x,y)) -> pow(x,y/3), iff x is nonnegative. */ 4238 (simplify 4239 (cbrts (pows tree_expr_nonnegative_p@0 @1)) 4240 (pows @0 (mult @1 { build_real_truncate (type, dconst_third ()); }))) 4241 /* pow(sqrt(x),y) -> pow(x,y*0.5). */ 4242 (simplify 4243 (pows (sqrts @0) @1) 4244 (pows @0 (mult @1 { build_real (type, dconsthalf); }))) 4245 /* pow(cbrt(x),y) -> pow(x,y/3) iff x is nonnegative. */ 4246 (simplify 4247 (pows (cbrts tree_expr_nonnegative_p@0) @1) 4248 (pows @0 (mult @1 { build_real_truncate (type, dconst_third ()); }))) 4249 /* pow(pow(x,y),z) -> pow(x,y*z) iff x is nonnegative. */ 4250 (simplify 4251 (pows (pows tree_expr_nonnegative_p@0 @1) @2) 4252 (pows @0 (mult @1 @2)))) 4253 4254 /* cabs(x+xi) -> fabs(x)*sqrt(2). */ 4255 (simplify 4256 (CABS (complex @0 @0)) 4257 (mult (abs @0) { build_real_truncate (type, dconst_sqrt2 ()); })) 4258 4259 /* hypot(x,x) -> fabs(x)*sqrt(2). */ 4260 (simplify 4261 (HYPOT @0 @0) 4262 (mult (abs @0) { build_real_truncate (type, dconst_sqrt2 ()); })) 4263 4264 /* cexp(x+yi) -> exp(x)*cexpi(y). */ 4265 (for cexps (CEXP) 4266 exps (EXP) 4267 cexpis (CEXPI) 4268 (simplify 4269 (cexps compositional_complex@0) 4270 (if (targetm.libc_has_function (function_c99_math_complex)) 4271 (complex 4272 (mult (exps@1 (realpart @0)) (realpart (cexpis:type@2 (imagpart @0)))) 4273 (mult @1 (imagpart @2))))))) 4274 4275(if (canonicalize_math_p ()) 4276 /* floor(x) -> trunc(x) if x is nonnegative. */ 4277 (for floors (FLOOR_ALL) 4278 truncs (TRUNC_ALL) 4279 (simplify 4280 (floors tree_expr_nonnegative_p@0) 4281 (truncs @0)))) 4282 4283(match double_value_p 4284 @0 4285 (if (TYPE_MAIN_VARIANT (TREE_TYPE (@0)) == double_type_node))) 4286(for froms (BUILT_IN_TRUNCL 4287 BUILT_IN_FLOORL 4288 BUILT_IN_CEILL 4289 BUILT_IN_ROUNDL 4290 BUILT_IN_NEARBYINTL 4291 BUILT_IN_RINTL) 4292 tos (BUILT_IN_TRUNC 4293 BUILT_IN_FLOOR 4294 BUILT_IN_CEIL 4295 BUILT_IN_ROUND 4296 BUILT_IN_NEARBYINT 4297 BUILT_IN_RINT) 4298 /* truncl(extend(x)) -> extend(trunc(x)), etc., if x is a double. */ 4299 (if (optimize && canonicalize_math_p ()) 4300 (simplify 4301 (froms (convert double_value_p@0)) 4302 (convert (tos @0))))) 4303 4304(match float_value_p 4305 @0 4306 (if (TYPE_MAIN_VARIANT (TREE_TYPE (@0)) == float_type_node))) 4307(for froms (BUILT_IN_TRUNCL BUILT_IN_TRUNC 4308 BUILT_IN_FLOORL BUILT_IN_FLOOR 4309 BUILT_IN_CEILL BUILT_IN_CEIL 4310 BUILT_IN_ROUNDL BUILT_IN_ROUND 4311 BUILT_IN_NEARBYINTL BUILT_IN_NEARBYINT 4312 BUILT_IN_RINTL BUILT_IN_RINT) 4313 tos (BUILT_IN_TRUNCF BUILT_IN_TRUNCF 4314 BUILT_IN_FLOORF BUILT_IN_FLOORF 4315 BUILT_IN_CEILF BUILT_IN_CEILF 4316 BUILT_IN_ROUNDF BUILT_IN_ROUNDF 4317 BUILT_IN_NEARBYINTF BUILT_IN_NEARBYINTF 4318 BUILT_IN_RINTF BUILT_IN_RINTF) 4319 /* truncl(extend(x)) and trunc(extend(x)) -> extend(truncf(x)), etc., 4320 if x is a float. */ 4321 (if (optimize && canonicalize_math_p () 4322 && targetm.libc_has_function (function_c99_misc)) 4323 (simplify 4324 (froms (convert float_value_p@0)) 4325 (convert (tos @0))))) 4326 4327(for froms (XFLOORL XCEILL XROUNDL XRINTL) 4328 tos (XFLOOR XCEIL XROUND XRINT) 4329 /* llfloorl(extend(x)) -> llfloor(x), etc., if x is a double. */ 4330 (if (optimize && canonicalize_math_p ()) 4331 (simplify 4332 (froms (convert double_value_p@0)) 4333 (tos @0)))) 4334 4335(for froms (XFLOORL XCEILL XROUNDL XRINTL 4336 XFLOOR XCEIL XROUND XRINT) 4337 tos (XFLOORF XCEILF XROUNDF XRINTF) 4338 /* llfloorl(extend(x)) and llfloor(extend(x)) -> llfloorf(x), etc., 4339 if x is a float. */ 4340 (if (optimize && canonicalize_math_p ()) 4341 (simplify 4342 (froms (convert float_value_p@0)) 4343 (tos @0)))) 4344 4345(if (canonicalize_math_p ()) 4346 /* xfloor(x) -> fix_trunc(x) if x is nonnegative. */ 4347 (for floors (IFLOOR LFLOOR LLFLOOR) 4348 (simplify 4349 (floors tree_expr_nonnegative_p@0) 4350 (fix_trunc @0)))) 4351 4352(if (canonicalize_math_p ()) 4353 /* xfloor(x) -> fix_trunc(x), etc., if x is integer valued. */ 4354 (for fns (IFLOOR LFLOOR LLFLOOR 4355 ICEIL LCEIL LLCEIL 4356 IROUND LROUND LLROUND) 4357 (simplify 4358 (fns integer_valued_real_p@0) 4359 (fix_trunc @0))) 4360 (if (!flag_errno_math) 4361 /* xrint(x) -> fix_trunc(x), etc., if x is integer valued. */ 4362 (for rints (IRINT LRINT LLRINT) 4363 (simplify 4364 (rints integer_valued_real_p@0) 4365 (fix_trunc @0))))) 4366 4367(if (canonicalize_math_p ()) 4368 (for ifn (IFLOOR ICEIL IROUND IRINT) 4369 lfn (LFLOOR LCEIL LROUND LRINT) 4370 llfn (LLFLOOR LLCEIL LLROUND LLRINT) 4371 /* Canonicalize iround (x) to lround (x) on ILP32 targets where 4372 sizeof (int) == sizeof (long). */ 4373 (if (TYPE_PRECISION (integer_type_node) 4374 == TYPE_PRECISION (long_integer_type_node)) 4375 (simplify 4376 (ifn @0) 4377 (lfn:long_integer_type_node @0))) 4378 /* Canonicalize llround (x) to lround (x) on LP64 targets where 4379 sizeof (long long) == sizeof (long). */ 4380 (if (TYPE_PRECISION (long_long_integer_type_node) 4381 == TYPE_PRECISION (long_integer_type_node)) 4382 (simplify 4383 (llfn @0) 4384 (lfn:long_integer_type_node @0))))) 4385 4386/* cproj(x) -> x if we're ignoring infinities. */ 4387(simplify 4388 (CPROJ @0) 4389 (if (!HONOR_INFINITIES (type)) 4390 @0)) 4391 4392/* If the real part is inf and the imag part is known to be 4393 nonnegative, return (inf + 0i). */ 4394(simplify 4395 (CPROJ (complex REAL_CST@0 tree_expr_nonnegative_p@1)) 4396 (if (real_isinf (TREE_REAL_CST_PTR (@0))) 4397 { build_complex_inf (type, false); })) 4398 4399/* If the imag part is inf, return (inf+I*copysign(0,imag)). */ 4400(simplify 4401 (CPROJ (complex @0 REAL_CST@1)) 4402 (if (real_isinf (TREE_REAL_CST_PTR (@1))) 4403 { build_complex_inf (type, TREE_REAL_CST_PTR (@1)->sign); })) 4404 4405(for pows (POW) 4406 sqrts (SQRT) 4407 cbrts (CBRT) 4408 (simplify 4409 (pows @0 REAL_CST@1) 4410 (with { 4411 const REAL_VALUE_TYPE *value = TREE_REAL_CST_PTR (@1); 4412 REAL_VALUE_TYPE tmp; 4413 } 4414 (switch 4415 /* pow(x,0) -> 1. */ 4416 (if (real_equal (value, &dconst0)) 4417 { build_real (type, dconst1); }) 4418 /* pow(x,1) -> x. */ 4419 (if (real_equal (value, &dconst1)) 4420 @0) 4421 /* pow(x,-1) -> 1/x. */ 4422 (if (real_equal (value, &dconstm1)) 4423 (rdiv { build_real (type, dconst1); } @0)) 4424 /* pow(x,0.5) -> sqrt(x). */ 4425 (if (flag_unsafe_math_optimizations 4426 && canonicalize_math_p () 4427 && real_equal (value, &dconsthalf)) 4428 (sqrts @0)) 4429 /* pow(x,1/3) -> cbrt(x). */ 4430 (if (flag_unsafe_math_optimizations 4431 && canonicalize_math_p () 4432 && (tmp = real_value_truncate (TYPE_MODE (type), dconst_third ()), 4433 real_equal (value, &tmp))) 4434 (cbrts @0)))))) 4435 4436/* powi(1,x) -> 1. */ 4437(simplify 4438 (POWI real_onep@0 @1) 4439 @0) 4440 4441(simplify 4442 (POWI @0 INTEGER_CST@1) 4443 (switch 4444 /* powi(x,0) -> 1. */ 4445 (if (wi::to_wide (@1) == 0) 4446 { build_real (type, dconst1); }) 4447 /* powi(x,1) -> x. */ 4448 (if (wi::to_wide (@1) == 1) 4449 @0) 4450 /* powi(x,-1) -> 1/x. */ 4451 (if (wi::to_wide (@1) == -1) 4452 (rdiv { build_real (type, dconst1); } @0)))) 4453 4454/* Narrowing of arithmetic and logical operations. 4455 4456 These are conceptually similar to the transformations performed for 4457 the C/C++ front-ends by shorten_binary_op and shorten_compare. Long 4458 term we want to move all that code out of the front-ends into here. */ 4459 4460/* If we have a narrowing conversion of an arithmetic operation where 4461 both operands are widening conversions from the same type as the outer 4462 narrowing conversion. Then convert the innermost operands to a suitable 4463 unsigned type (to avoid introducing undefined behavior), perform the 4464 operation and convert the result to the desired type. */ 4465(for op (plus minus) 4466 (simplify 4467 (convert (op:s (convert@2 @0) (convert?@3 @1))) 4468 (if (INTEGRAL_TYPE_P (type) 4469 /* We check for type compatibility between @0 and @1 below, 4470 so there's no need to check that @1/@3 are integral types. */ 4471 && INTEGRAL_TYPE_P (TREE_TYPE (@0)) 4472 && INTEGRAL_TYPE_P (TREE_TYPE (@2)) 4473 /* The precision of the type of each operand must match the 4474 precision of the mode of each operand, similarly for the 4475 result. */ 4476 && type_has_mode_precision_p (TREE_TYPE (@0)) 4477 && type_has_mode_precision_p (TREE_TYPE (@1)) 4478 && type_has_mode_precision_p (type) 4479 /* The inner conversion must be a widening conversion. */ 4480 && TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (TREE_TYPE (@0)) 4481 && types_match (@0, type) 4482 && (types_match (@0, @1) 4483 /* Or the second operand is const integer or converted const 4484 integer from valueize. */ 4485 || TREE_CODE (@1) == INTEGER_CST)) 4486 (if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))) 4487 (op @0 (convert @1)) 4488 (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); } 4489 (convert (op (convert:utype @0) 4490 (convert:utype @1)))))))) 4491 4492/* This is another case of narrowing, specifically when there's an outer 4493 BIT_AND_EXPR which masks off bits outside the type of the innermost 4494 operands. Like the previous case we have to convert the operands 4495 to unsigned types to avoid introducing undefined behavior for the 4496 arithmetic operation. */ 4497(for op (minus plus) 4498 (simplify 4499 (bit_and (op:s (convert@2 @0) (convert@3 @1)) INTEGER_CST@4) 4500 (if (INTEGRAL_TYPE_P (type) 4501 /* We check for type compatibility between @0 and @1 below, 4502 so there's no need to check that @1/@3 are integral types. */ 4503 && INTEGRAL_TYPE_P (TREE_TYPE (@0)) 4504 && INTEGRAL_TYPE_P (TREE_TYPE (@2)) 4505 /* The precision of the type of each operand must match the 4506 precision of the mode of each operand, similarly for the 4507 result. */ 4508 && type_has_mode_precision_p (TREE_TYPE (@0)) 4509 && type_has_mode_precision_p (TREE_TYPE (@1)) 4510 && type_has_mode_precision_p (type) 4511 /* The inner conversion must be a widening conversion. */ 4512 && TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (TREE_TYPE (@0)) 4513 && types_match (@0, @1) 4514 && (tree_int_cst_min_precision (@4, TYPE_SIGN (TREE_TYPE (@0))) 4515 <= TYPE_PRECISION (TREE_TYPE (@0))) 4516 && (wi::to_wide (@4) 4517 & wi::mask (TYPE_PRECISION (TREE_TYPE (@0)), 4518 true, TYPE_PRECISION (type))) == 0) 4519 (if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))) 4520 (with { tree ntype = TREE_TYPE (@0); } 4521 (convert (bit_and (op @0 @1) (convert:ntype @4)))) 4522 (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); } 4523 (convert (bit_and (op (convert:utype @0) (convert:utype @1)) 4524 (convert:utype @4)))))))) 4525 4526/* Transform (@0 < @1 and @0 < @2) to use min, 4527 (@0 > @1 and @0 > @2) to use max */ 4528(for op (lt le gt ge) 4529 ext (min min max max) 4530 (simplify 4531 (bit_and (op:cs @0 @1) (op:cs @0 @2)) 4532 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) 4533 && TREE_CODE (@0) != INTEGER_CST) 4534 (op @0 (ext @1 @2))))) 4535 4536(simplify 4537 /* signbit(x) -> 0 if x is nonnegative. */ 4538 (SIGNBIT tree_expr_nonnegative_p@0) 4539 { integer_zero_node; }) 4540 4541(simplify 4542 /* signbit(x) -> x<0 if x doesn't have signed zeros. */ 4543 (SIGNBIT @0) 4544 (if (!HONOR_SIGNED_ZEROS (@0)) 4545 (convert (lt @0 { build_real (TREE_TYPE (@0), dconst0); })))) 4546 4547/* Transform comparisons of the form X +- C1 CMP C2 to X CMP C2 -+ C1. */ 4548(for cmp (eq ne) 4549 (for op (plus minus) 4550 rop (minus plus) 4551 (simplify 4552 (cmp (op@3 @0 INTEGER_CST@1) INTEGER_CST@2) 4553 (if (!TREE_OVERFLOW (@1) && !TREE_OVERFLOW (@2) 4554 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@0)) 4555 && !TYPE_OVERFLOW_TRAPS (TREE_TYPE (@0)) 4556 && !TYPE_SATURATING (TREE_TYPE (@0))) 4557 (with { tree res = int_const_binop (rop, @2, @1); } 4558 (if (TREE_OVERFLOW (res) 4559 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))) 4560 { constant_boolean_node (cmp == NE_EXPR, type); } 4561 (if (single_use (@3)) 4562 (cmp @0 { TREE_OVERFLOW (res) 4563 ? drop_tree_overflow (res) : res; })))))))) 4564(for cmp (lt le gt ge) 4565 (for op (plus minus) 4566 rop (minus plus) 4567 (simplify 4568 (cmp (op@3 @0 INTEGER_CST@1) INTEGER_CST@2) 4569 (if (!TREE_OVERFLOW (@1) && !TREE_OVERFLOW (@2) 4570 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))) 4571 (with { tree res = int_const_binop (rop, @2, @1); } 4572 (if (TREE_OVERFLOW (res)) 4573 { 4574 fold_overflow_warning (("assuming signed overflow does not occur " 4575 "when simplifying conditional to constant"), 4576 WARN_STRICT_OVERFLOW_CONDITIONAL); 4577 bool less = cmp == LE_EXPR || cmp == LT_EXPR; 4578 /* wi::ges_p (@2, 0) should be sufficient for a signed type. */ 4579 bool ovf_high = wi::lt_p (wi::to_wide (@1), 0, 4580 TYPE_SIGN (TREE_TYPE (@1))) 4581 != (op == MINUS_EXPR); 4582 constant_boolean_node (less == ovf_high, type); 4583 } 4584 (if (single_use (@3)) 4585 (with 4586 { 4587 fold_overflow_warning (("assuming signed overflow does not occur " 4588 "when changing X +- C1 cmp C2 to " 4589 "X cmp C2 -+ C1"), 4590 WARN_STRICT_OVERFLOW_COMPARISON); 4591 } 4592 (cmp @0 { res; }))))))))) 4593 4594/* Canonicalizations of BIT_FIELD_REFs. */ 4595 4596(simplify 4597 (BIT_FIELD_REF @0 @1 @2) 4598 (switch 4599 (if (TREE_CODE (TREE_TYPE (@0)) == COMPLEX_TYPE 4600 && tree_int_cst_equal (@1, TYPE_SIZE (TREE_TYPE (TREE_TYPE (@0))))) 4601 (switch 4602 (if (integer_zerop (@2)) 4603 (view_convert (realpart @0))) 4604 (if (tree_int_cst_equal (@2, TYPE_SIZE (TREE_TYPE (TREE_TYPE (@0))))) 4605 (view_convert (imagpart @0))))) 4606 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) 4607 && INTEGRAL_TYPE_P (type) 4608 /* On GIMPLE this should only apply to register arguments. */ 4609 && (! GIMPLE || is_gimple_reg (@0)) 4610 /* A bit-field-ref that referenced the full argument can be stripped. */ 4611 && ((compare_tree_int (@1, TYPE_PRECISION (TREE_TYPE (@0))) == 0 4612 && integer_zerop (@2)) 4613 /* Low-parts can be reduced to integral conversions. 4614 ??? The following doesn't work for PDP endian. */ 4615 || (BYTES_BIG_ENDIAN == WORDS_BIG_ENDIAN 4616 /* Don't even think about BITS_BIG_ENDIAN. */ 4617 && TYPE_PRECISION (TREE_TYPE (@0)) % BITS_PER_UNIT == 0 4618 && TYPE_PRECISION (type) % BITS_PER_UNIT == 0 4619 && compare_tree_int (@2, (BYTES_BIG_ENDIAN 4620 ? (TYPE_PRECISION (TREE_TYPE (@0)) 4621 - TYPE_PRECISION (type)) 4622 : 0)) == 0))) 4623 (convert @0)))) 4624 4625/* Simplify vector extracts. */ 4626 4627(simplify 4628 (BIT_FIELD_REF CONSTRUCTOR@0 @1 @2) 4629 (if (VECTOR_TYPE_P (TREE_TYPE (@0)) 4630 && (types_match (type, TREE_TYPE (TREE_TYPE (@0))) 4631 || (VECTOR_TYPE_P (type) 4632 && types_match (TREE_TYPE (type), TREE_TYPE (TREE_TYPE (@0)))))) 4633 (with 4634 { 4635 tree ctor = (TREE_CODE (@0) == SSA_NAME 4636 ? gimple_assign_rhs1 (SSA_NAME_DEF_STMT (@0)) : @0); 4637 tree eltype = TREE_TYPE (TREE_TYPE (ctor)); 4638 unsigned HOST_WIDE_INT width = tree_to_uhwi (TYPE_SIZE (eltype)); 4639 unsigned HOST_WIDE_INT n = tree_to_uhwi (@1); 4640 unsigned HOST_WIDE_INT idx = tree_to_uhwi (@2); 4641 } 4642 (if (n != 0 4643 && (idx % width) == 0 4644 && (n % width) == 0 4645 && known_le ((idx + n) / width, 4646 TYPE_VECTOR_SUBPARTS (TREE_TYPE (ctor)))) 4647 (with 4648 { 4649 idx = idx / width; 4650 n = n / width; 4651 /* Constructor elements can be subvectors. */ 4652 poly_uint64 k = 1; 4653 if (CONSTRUCTOR_NELTS (ctor) != 0) 4654 { 4655 tree cons_elem = TREE_TYPE (CONSTRUCTOR_ELT (ctor, 0)->value); 4656 if (TREE_CODE (cons_elem) == VECTOR_TYPE) 4657 k = TYPE_VECTOR_SUBPARTS (cons_elem); 4658 } 4659 unsigned HOST_WIDE_INT elt, count, const_k; 4660 } 4661 (switch 4662 /* We keep an exact subset of the constructor elements. */ 4663 (if (multiple_p (idx, k, &elt) && multiple_p (n, k, &count)) 4664 (if (CONSTRUCTOR_NELTS (ctor) == 0) 4665 { build_constructor (type, NULL); } 4666 (if (count == 1) 4667 (if (elt < CONSTRUCTOR_NELTS (ctor)) 4668 (view_convert { CONSTRUCTOR_ELT (ctor, elt)->value; }) 4669 { build_zero_cst (type); }) 4670 { 4671 vec<constructor_elt, va_gc> *vals; 4672 vec_alloc (vals, count); 4673 for (unsigned i = 0; 4674 i < count && elt + i < CONSTRUCTOR_NELTS (ctor); ++i) 4675 CONSTRUCTOR_APPEND_ELT (vals, NULL_TREE, 4676 CONSTRUCTOR_ELT (ctor, elt + i)->value); 4677 build_constructor (type, vals); 4678 }))) 4679 /* The bitfield references a single constructor element. */ 4680 (if (k.is_constant (&const_k) 4681 && idx + n <= (idx / const_k + 1) * const_k) 4682 (switch 4683 (if (CONSTRUCTOR_NELTS (ctor) <= idx / const_k) 4684 { build_zero_cst (type); }) 4685 (if (n == const_k) 4686 (view_convert { CONSTRUCTOR_ELT (ctor, idx / const_k)->value; })) 4687 (BIT_FIELD_REF { CONSTRUCTOR_ELT (ctor, idx / const_k)->value; } 4688 @1 { bitsize_int ((idx % const_k) * width); }))))))))) 4689 4690/* Simplify a bit extraction from a bit insertion for the cases with 4691 the inserted element fully covering the extraction or the insertion 4692 not touching the extraction. */ 4693(simplify 4694 (BIT_FIELD_REF (bit_insert @0 @1 @ipos) @rsize @rpos) 4695 (with 4696 { 4697 unsigned HOST_WIDE_INT isize; 4698 if (INTEGRAL_TYPE_P (TREE_TYPE (@1))) 4699 isize = TYPE_PRECISION (TREE_TYPE (@1)); 4700 else 4701 isize = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (@1))); 4702 } 4703 (switch 4704 (if (wi::leu_p (wi::to_wide (@ipos), wi::to_wide (@rpos)) 4705 && wi::leu_p (wi::to_wide (@rpos) + wi::to_wide (@rsize), 4706 wi::to_wide (@ipos) + isize)) 4707 (BIT_FIELD_REF @1 @rsize { wide_int_to_tree (bitsizetype, 4708 wi::to_wide (@rpos) 4709 - wi::to_wide (@ipos)); })) 4710 (if (wi::geu_p (wi::to_wide (@ipos), 4711 wi::to_wide (@rpos) + wi::to_wide (@rsize)) 4712 || wi::geu_p (wi::to_wide (@rpos), 4713 wi::to_wide (@ipos) + isize)) 4714 (BIT_FIELD_REF @0 @rsize @rpos))))) 4715