1 /* Support routines for Value Range Propagation (VRP). 2 Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011 3 Free Software Foundation, Inc. 4 Contributed by Diego Novillo <dnovillo@redhat.com>. 5 6 This file is part of GCC. 7 8 GCC is free software; you can redistribute it and/or modify 9 it under the terms of the GNU General Public License as published by 10 the Free Software Foundation; either version 3, or (at your option) 11 any later version. 12 13 GCC is distributed in the hope that it will be useful, 14 but WITHOUT ANY WARRANTY; without even the implied warranty of 15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 GNU General Public License for more details. 17 18 You should have received a copy of the GNU General Public License 19 along with GCC; see the file COPYING3. If not see 20 <http://www.gnu.org/licenses/>. */ 21 22 #include "config.h" 23 #include "system.h" 24 #include "coretypes.h" 25 #include "tm.h" 26 #include "ggc.h" 27 #include "flags.h" 28 #include "tree.h" 29 #include "basic-block.h" 30 #include "tree-flow.h" 31 #include "tree-pass.h" 32 #include "tree-dump.h" 33 #include "timevar.h" 34 #include "tree-pretty-print.h" 35 #include "gimple-pretty-print.h" 36 #include "diagnostic-core.h" 37 #include "intl.h" 38 #include "cfgloop.h" 39 #include "tree-scalar-evolution.h" 40 #include "tree-ssa-propagate.h" 41 #include "tree-chrec.h" 42 #include "gimple-fold.h" 43 #include "expr.h" 44 #include "optabs.h" 45 46 47 /* Type of value ranges. See value_range_d for a description of these 48 types. */ 49 enum value_range_type { VR_UNDEFINED, VR_RANGE, VR_ANTI_RANGE, VR_VARYING }; 50 51 /* Range of values that can be associated with an SSA_NAME after VRP 52 has executed. */ 53 struct value_range_d 54 { 55 /* Lattice value represented by this range. */ 56 enum value_range_type type; 57 58 /* Minimum and maximum values represented by this range. These 59 values should be interpreted as follows: 60 61 - If TYPE is VR_UNDEFINED or VR_VARYING then MIN and MAX must 62 be NULL. 63 64 - If TYPE == VR_RANGE then MIN holds the minimum value and 65 MAX holds the maximum value of the range [MIN, MAX]. 66 67 - If TYPE == ANTI_RANGE the variable is known to NOT 68 take any values in the range [MIN, MAX]. */ 69 tree min; 70 tree max; 71 72 /* Set of SSA names whose value ranges are equivalent to this one. 73 This set is only valid when TYPE is VR_RANGE or VR_ANTI_RANGE. */ 74 bitmap equiv; 75 }; 76 77 typedef struct value_range_d value_range_t; 78 79 /* Set of SSA names found live during the RPO traversal of the function 80 for still active basic-blocks. */ 81 static sbitmap *live; 82 83 /* Return true if the SSA name NAME is live on the edge E. */ 84 85 static bool 86 live_on_edge (edge e, tree name) 87 { 88 return (live[e->dest->index] 89 && TEST_BIT (live[e->dest->index], SSA_NAME_VERSION (name))); 90 } 91 92 /* Local functions. */ 93 static int compare_values (tree val1, tree val2); 94 static int compare_values_warnv (tree val1, tree val2, bool *); 95 static void vrp_meet (value_range_t *, value_range_t *); 96 static tree vrp_evaluate_conditional_warnv_with_ops (enum tree_code, 97 tree, tree, bool, bool *, 98 bool *); 99 100 /* Location information for ASSERT_EXPRs. Each instance of this 101 structure describes an ASSERT_EXPR for an SSA name. Since a single 102 SSA name may have more than one assertion associated with it, these 103 locations are kept in a linked list attached to the corresponding 104 SSA name. */ 105 struct assert_locus_d 106 { 107 /* Basic block where the assertion would be inserted. */ 108 basic_block bb; 109 110 /* Some assertions need to be inserted on an edge (e.g., assertions 111 generated by COND_EXPRs). In those cases, BB will be NULL. */ 112 edge e; 113 114 /* Pointer to the statement that generated this assertion. */ 115 gimple_stmt_iterator si; 116 117 /* Predicate code for the ASSERT_EXPR. Must be COMPARISON_CLASS_P. */ 118 enum tree_code comp_code; 119 120 /* Value being compared against. */ 121 tree val; 122 123 /* Expression to compare. */ 124 tree expr; 125 126 /* Next node in the linked list. */ 127 struct assert_locus_d *next; 128 }; 129 130 typedef struct assert_locus_d *assert_locus_t; 131 132 /* If bit I is present, it means that SSA name N_i has a list of 133 assertions that should be inserted in the IL. */ 134 static bitmap need_assert_for; 135 136 /* Array of locations lists where to insert assertions. ASSERTS_FOR[I] 137 holds a list of ASSERT_LOCUS_T nodes that describe where 138 ASSERT_EXPRs for SSA name N_I should be inserted. */ 139 static assert_locus_t *asserts_for; 140 141 /* Value range array. After propagation, VR_VALUE[I] holds the range 142 of values that SSA name N_I may take. */ 143 static unsigned num_vr_values; 144 static value_range_t **vr_value; 145 static bool values_propagated; 146 147 /* For a PHI node which sets SSA name N_I, VR_COUNTS[I] holds the 148 number of executable edges we saw the last time we visited the 149 node. */ 150 static int *vr_phi_edge_counts; 151 152 typedef struct { 153 gimple stmt; 154 tree vec; 155 } switch_update; 156 157 static VEC (edge, heap) *to_remove_edges; 158 DEF_VEC_O(switch_update); 159 DEF_VEC_ALLOC_O(switch_update, heap); 160 static VEC (switch_update, heap) *to_update_switch_stmts; 161 162 163 /* Return the maximum value for TYPE. */ 164 165 static inline tree 166 vrp_val_max (const_tree type) 167 { 168 if (!INTEGRAL_TYPE_P (type)) 169 return NULL_TREE; 170 171 return TYPE_MAX_VALUE (type); 172 } 173 174 /* Return the minimum value for TYPE. */ 175 176 static inline tree 177 vrp_val_min (const_tree type) 178 { 179 if (!INTEGRAL_TYPE_P (type)) 180 return NULL_TREE; 181 182 return TYPE_MIN_VALUE (type); 183 } 184 185 /* Return whether VAL is equal to the maximum value of its type. This 186 will be true for a positive overflow infinity. We can't do a 187 simple equality comparison with TYPE_MAX_VALUE because C typedefs 188 and Ada subtypes can produce types whose TYPE_MAX_VALUE is not == 189 to the integer constant with the same value in the type. */ 190 191 static inline bool 192 vrp_val_is_max (const_tree val) 193 { 194 tree type_max = vrp_val_max (TREE_TYPE (val)); 195 return (val == type_max 196 || (type_max != NULL_TREE 197 && operand_equal_p (val, type_max, 0))); 198 } 199 200 /* Return whether VAL is equal to the minimum value of its type. This 201 will be true for a negative overflow infinity. */ 202 203 static inline bool 204 vrp_val_is_min (const_tree val) 205 { 206 tree type_min = vrp_val_min (TREE_TYPE (val)); 207 return (val == type_min 208 || (type_min != NULL_TREE 209 && operand_equal_p (val, type_min, 0))); 210 } 211 212 213 /* Return whether TYPE should use an overflow infinity distinct from 214 TYPE_{MIN,MAX}_VALUE. We use an overflow infinity value to 215 represent a signed overflow during VRP computations. An infinity 216 is distinct from a half-range, which will go from some number to 217 TYPE_{MIN,MAX}_VALUE. */ 218 219 static inline bool 220 needs_overflow_infinity (const_tree type) 221 { 222 return INTEGRAL_TYPE_P (type) && !TYPE_OVERFLOW_WRAPS (type); 223 } 224 225 /* Return whether TYPE can support our overflow infinity 226 representation: we use the TREE_OVERFLOW flag, which only exists 227 for constants. If TYPE doesn't support this, we don't optimize 228 cases which would require signed overflow--we drop them to 229 VARYING. */ 230 231 static inline bool 232 supports_overflow_infinity (const_tree type) 233 { 234 tree min = vrp_val_min (type), max = vrp_val_max (type); 235 #ifdef ENABLE_CHECKING 236 gcc_assert (needs_overflow_infinity (type)); 237 #endif 238 return (min != NULL_TREE 239 && CONSTANT_CLASS_P (min) 240 && max != NULL_TREE 241 && CONSTANT_CLASS_P (max)); 242 } 243 244 /* VAL is the maximum or minimum value of a type. Return a 245 corresponding overflow infinity. */ 246 247 static inline tree 248 make_overflow_infinity (tree val) 249 { 250 gcc_checking_assert (val != NULL_TREE && CONSTANT_CLASS_P (val)); 251 val = copy_node (val); 252 TREE_OVERFLOW (val) = 1; 253 return val; 254 } 255 256 /* Return a negative overflow infinity for TYPE. */ 257 258 static inline tree 259 negative_overflow_infinity (tree type) 260 { 261 gcc_checking_assert (supports_overflow_infinity (type)); 262 return make_overflow_infinity (vrp_val_min (type)); 263 } 264 265 /* Return a positive overflow infinity for TYPE. */ 266 267 static inline tree 268 positive_overflow_infinity (tree type) 269 { 270 gcc_checking_assert (supports_overflow_infinity (type)); 271 return make_overflow_infinity (vrp_val_max (type)); 272 } 273 274 /* Return whether VAL is a negative overflow infinity. */ 275 276 static inline bool 277 is_negative_overflow_infinity (const_tree val) 278 { 279 return (needs_overflow_infinity (TREE_TYPE (val)) 280 && CONSTANT_CLASS_P (val) 281 && TREE_OVERFLOW (val) 282 && vrp_val_is_min (val)); 283 } 284 285 /* Return whether VAL is a positive overflow infinity. */ 286 287 static inline bool 288 is_positive_overflow_infinity (const_tree val) 289 { 290 return (needs_overflow_infinity (TREE_TYPE (val)) 291 && CONSTANT_CLASS_P (val) 292 && TREE_OVERFLOW (val) 293 && vrp_val_is_max (val)); 294 } 295 296 /* Return whether VAL is a positive or negative overflow infinity. */ 297 298 static inline bool 299 is_overflow_infinity (const_tree val) 300 { 301 return (needs_overflow_infinity (TREE_TYPE (val)) 302 && CONSTANT_CLASS_P (val) 303 && TREE_OVERFLOW (val) 304 && (vrp_val_is_min (val) || vrp_val_is_max (val))); 305 } 306 307 /* Return whether STMT has a constant rhs that is_overflow_infinity. */ 308 309 static inline bool 310 stmt_overflow_infinity (gimple stmt) 311 { 312 if (is_gimple_assign (stmt) 313 && get_gimple_rhs_class (gimple_assign_rhs_code (stmt)) == 314 GIMPLE_SINGLE_RHS) 315 return is_overflow_infinity (gimple_assign_rhs1 (stmt)); 316 return false; 317 } 318 319 /* If VAL is now an overflow infinity, return VAL. Otherwise, return 320 the same value with TREE_OVERFLOW clear. This can be used to avoid 321 confusing a regular value with an overflow value. */ 322 323 static inline tree 324 avoid_overflow_infinity (tree val) 325 { 326 if (!is_overflow_infinity (val)) 327 return val; 328 329 if (vrp_val_is_max (val)) 330 return vrp_val_max (TREE_TYPE (val)); 331 else 332 { 333 gcc_checking_assert (vrp_val_is_min (val)); 334 return vrp_val_min (TREE_TYPE (val)); 335 } 336 } 337 338 339 /* Return true if ARG is marked with the nonnull attribute in the 340 current function signature. */ 341 342 static bool 343 nonnull_arg_p (const_tree arg) 344 { 345 tree t, attrs, fntype; 346 unsigned HOST_WIDE_INT arg_num; 347 348 gcc_assert (TREE_CODE (arg) == PARM_DECL && POINTER_TYPE_P (TREE_TYPE (arg))); 349 350 /* The static chain decl is always non null. */ 351 if (arg == cfun->static_chain_decl) 352 return true; 353 354 fntype = TREE_TYPE (current_function_decl); 355 attrs = lookup_attribute ("nonnull", TYPE_ATTRIBUTES (fntype)); 356 357 /* If "nonnull" wasn't specified, we know nothing about the argument. */ 358 if (attrs == NULL_TREE) 359 return false; 360 361 /* If "nonnull" applies to all the arguments, then ARG is non-null. */ 362 if (TREE_VALUE (attrs) == NULL_TREE) 363 return true; 364 365 /* Get the position number for ARG in the function signature. */ 366 for (arg_num = 1, t = DECL_ARGUMENTS (current_function_decl); 367 t; 368 t = DECL_CHAIN (t), arg_num++) 369 { 370 if (t == arg) 371 break; 372 } 373 374 gcc_assert (t == arg); 375 376 /* Now see if ARG_NUM is mentioned in the nonnull list. */ 377 for (t = TREE_VALUE (attrs); t; t = TREE_CHAIN (t)) 378 { 379 if (compare_tree_int (TREE_VALUE (t), arg_num) == 0) 380 return true; 381 } 382 383 return false; 384 } 385 386 387 /* Set value range VR to VR_VARYING. */ 388 389 static inline void 390 set_value_range_to_varying (value_range_t *vr) 391 { 392 vr->type = VR_VARYING; 393 vr->min = vr->max = NULL_TREE; 394 if (vr->equiv) 395 bitmap_clear (vr->equiv); 396 } 397 398 399 /* Set value range VR to {T, MIN, MAX, EQUIV}. */ 400 401 static void 402 set_value_range (value_range_t *vr, enum value_range_type t, tree min, 403 tree max, bitmap equiv) 404 { 405 #if defined ENABLE_CHECKING 406 /* Check the validity of the range. */ 407 if (t == VR_RANGE || t == VR_ANTI_RANGE) 408 { 409 int cmp; 410 411 gcc_assert (min && max); 412 413 if (INTEGRAL_TYPE_P (TREE_TYPE (min)) && t == VR_ANTI_RANGE) 414 gcc_assert (!vrp_val_is_min (min) || !vrp_val_is_max (max)); 415 416 cmp = compare_values (min, max); 417 gcc_assert (cmp == 0 || cmp == -1 || cmp == -2); 418 419 if (needs_overflow_infinity (TREE_TYPE (min))) 420 gcc_assert (!is_overflow_infinity (min) 421 || !is_overflow_infinity (max)); 422 } 423 424 if (t == VR_UNDEFINED || t == VR_VARYING) 425 gcc_assert (min == NULL_TREE && max == NULL_TREE); 426 427 if (t == VR_UNDEFINED || t == VR_VARYING) 428 gcc_assert (equiv == NULL || bitmap_empty_p (equiv)); 429 #endif 430 431 vr->type = t; 432 vr->min = min; 433 vr->max = max; 434 435 /* Since updating the equivalence set involves deep copying the 436 bitmaps, only do it if absolutely necessary. */ 437 if (vr->equiv == NULL 438 && equiv != NULL) 439 vr->equiv = BITMAP_ALLOC (NULL); 440 441 if (equiv != vr->equiv) 442 { 443 if (equiv && !bitmap_empty_p (equiv)) 444 bitmap_copy (vr->equiv, equiv); 445 else 446 bitmap_clear (vr->equiv); 447 } 448 } 449 450 451 /* Set value range VR to the canonical form of {T, MIN, MAX, EQUIV}. 452 This means adjusting T, MIN and MAX representing the case of a 453 wrapping range with MAX < MIN covering [MIN, type_max] U [type_min, MAX] 454 as anti-rage ~[MAX+1, MIN-1]. Likewise for wrapping anti-ranges. 455 In corner cases where MAX+1 or MIN-1 wraps this will fall back 456 to varying. 457 This routine exists to ease canonicalization in the case where we 458 extract ranges from var + CST op limit. */ 459 460 static void 461 set_and_canonicalize_value_range (value_range_t *vr, enum value_range_type t, 462 tree min, tree max, bitmap equiv) 463 { 464 /* Nothing to canonicalize for symbolic or unknown or varying ranges. */ 465 if ((t != VR_RANGE 466 && t != VR_ANTI_RANGE) 467 || TREE_CODE (min) != INTEGER_CST 468 || TREE_CODE (max) != INTEGER_CST) 469 { 470 set_value_range (vr, t, min, max, equiv); 471 return; 472 } 473 474 /* Wrong order for min and max, to swap them and the VR type we need 475 to adjust them. */ 476 if (tree_int_cst_lt (max, min)) 477 { 478 tree one = build_int_cst (TREE_TYPE (min), 1); 479 tree tmp = int_const_binop (PLUS_EXPR, max, one); 480 max = int_const_binop (MINUS_EXPR, min, one); 481 min = tmp; 482 483 /* There's one corner case, if we had [C+1, C] before we now have 484 that again. But this represents an empty value range, so drop 485 to varying in this case. */ 486 if (tree_int_cst_lt (max, min)) 487 { 488 set_value_range_to_varying (vr); 489 return; 490 } 491 492 t = t == VR_RANGE ? VR_ANTI_RANGE : VR_RANGE; 493 } 494 495 /* Anti-ranges that can be represented as ranges should be so. */ 496 if (t == VR_ANTI_RANGE) 497 { 498 bool is_min = vrp_val_is_min (min); 499 bool is_max = vrp_val_is_max (max); 500 501 if (is_min && is_max) 502 { 503 /* We cannot deal with empty ranges, drop to varying. */ 504 set_value_range_to_varying (vr); 505 return; 506 } 507 else if (is_min 508 /* As a special exception preserve non-null ranges. */ 509 && !(TYPE_UNSIGNED (TREE_TYPE (min)) 510 && integer_zerop (max))) 511 { 512 tree one = build_int_cst (TREE_TYPE (max), 1); 513 min = int_const_binop (PLUS_EXPR, max, one); 514 max = vrp_val_max (TREE_TYPE (max)); 515 t = VR_RANGE; 516 } 517 else if (is_max) 518 { 519 tree one = build_int_cst (TREE_TYPE (min), 1); 520 max = int_const_binop (MINUS_EXPR, min, one); 521 min = vrp_val_min (TREE_TYPE (min)); 522 t = VR_RANGE; 523 } 524 } 525 526 set_value_range (vr, t, min, max, equiv); 527 } 528 529 /* Copy value range FROM into value range TO. */ 530 531 static inline void 532 copy_value_range (value_range_t *to, value_range_t *from) 533 { 534 set_value_range (to, from->type, from->min, from->max, from->equiv); 535 } 536 537 /* Set value range VR to a single value. This function is only called 538 with values we get from statements, and exists to clear the 539 TREE_OVERFLOW flag so that we don't think we have an overflow 540 infinity when we shouldn't. */ 541 542 static inline void 543 set_value_range_to_value (value_range_t *vr, tree val, bitmap equiv) 544 { 545 gcc_assert (is_gimple_min_invariant (val)); 546 val = avoid_overflow_infinity (val); 547 set_value_range (vr, VR_RANGE, val, val, equiv); 548 } 549 550 /* Set value range VR to a non-negative range of type TYPE. 551 OVERFLOW_INFINITY indicates whether to use an overflow infinity 552 rather than TYPE_MAX_VALUE; this should be true if we determine 553 that the range is nonnegative based on the assumption that signed 554 overflow does not occur. */ 555 556 static inline void 557 set_value_range_to_nonnegative (value_range_t *vr, tree type, 558 bool overflow_infinity) 559 { 560 tree zero; 561 562 if (overflow_infinity && !supports_overflow_infinity (type)) 563 { 564 set_value_range_to_varying (vr); 565 return; 566 } 567 568 zero = build_int_cst (type, 0); 569 set_value_range (vr, VR_RANGE, zero, 570 (overflow_infinity 571 ? positive_overflow_infinity (type) 572 : TYPE_MAX_VALUE (type)), 573 vr->equiv); 574 } 575 576 /* Set value range VR to a non-NULL range of type TYPE. */ 577 578 static inline void 579 set_value_range_to_nonnull (value_range_t *vr, tree type) 580 { 581 tree zero = build_int_cst (type, 0); 582 set_value_range (vr, VR_ANTI_RANGE, zero, zero, vr->equiv); 583 } 584 585 586 /* Set value range VR to a NULL range of type TYPE. */ 587 588 static inline void 589 set_value_range_to_null (value_range_t *vr, tree type) 590 { 591 set_value_range_to_value (vr, build_int_cst (type, 0), vr->equiv); 592 } 593 594 595 /* Set value range VR to a range of a truthvalue of type TYPE. */ 596 597 static inline void 598 set_value_range_to_truthvalue (value_range_t *vr, tree type) 599 { 600 if (TYPE_PRECISION (type) == 1) 601 set_value_range_to_varying (vr); 602 else 603 set_value_range (vr, VR_RANGE, 604 build_int_cst (type, 0), build_int_cst (type, 1), 605 vr->equiv); 606 } 607 608 609 /* Set value range VR to VR_UNDEFINED. */ 610 611 static inline void 612 set_value_range_to_undefined (value_range_t *vr) 613 { 614 vr->type = VR_UNDEFINED; 615 vr->min = vr->max = NULL_TREE; 616 if (vr->equiv) 617 bitmap_clear (vr->equiv); 618 } 619 620 621 /* If abs (min) < abs (max), set VR to [-max, max], if 622 abs (min) >= abs (max), set VR to [-min, min]. */ 623 624 static void 625 abs_extent_range (value_range_t *vr, tree min, tree max) 626 { 627 int cmp; 628 629 gcc_assert (TREE_CODE (min) == INTEGER_CST); 630 gcc_assert (TREE_CODE (max) == INTEGER_CST); 631 gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (min))); 632 gcc_assert (!TYPE_UNSIGNED (TREE_TYPE (min))); 633 min = fold_unary (ABS_EXPR, TREE_TYPE (min), min); 634 max = fold_unary (ABS_EXPR, TREE_TYPE (max), max); 635 if (TREE_OVERFLOW (min) || TREE_OVERFLOW (max)) 636 { 637 set_value_range_to_varying (vr); 638 return; 639 } 640 cmp = compare_values (min, max); 641 if (cmp == -1) 642 min = fold_unary (NEGATE_EXPR, TREE_TYPE (min), max); 643 else if (cmp == 0 || cmp == 1) 644 { 645 max = min; 646 min = fold_unary (NEGATE_EXPR, TREE_TYPE (min), min); 647 } 648 else 649 { 650 set_value_range_to_varying (vr); 651 return; 652 } 653 set_and_canonicalize_value_range (vr, VR_RANGE, min, max, NULL); 654 } 655 656 657 /* Return value range information for VAR. 658 659 If we have no values ranges recorded (ie, VRP is not running), then 660 return NULL. Otherwise create an empty range if none existed for VAR. */ 661 662 static value_range_t * 663 get_value_range (const_tree var) 664 { 665 static const struct value_range_d vr_const_varying 666 = { VR_VARYING, NULL_TREE, NULL_TREE, NULL }; 667 value_range_t *vr; 668 tree sym; 669 unsigned ver = SSA_NAME_VERSION (var); 670 671 /* If we have no recorded ranges, then return NULL. */ 672 if (! vr_value) 673 return NULL; 674 675 /* If we query the range for a new SSA name return an unmodifiable VARYING. 676 We should get here at most from the substitute-and-fold stage which 677 will never try to change values. */ 678 if (ver >= num_vr_values) 679 return CONST_CAST (value_range_t *, &vr_const_varying); 680 681 vr = vr_value[ver]; 682 if (vr) 683 return vr; 684 685 /* After propagation finished do not allocate new value-ranges. */ 686 if (values_propagated) 687 return CONST_CAST (value_range_t *, &vr_const_varying); 688 689 /* Create a default value range. */ 690 vr_value[ver] = vr = XCNEW (value_range_t); 691 692 /* Defer allocating the equivalence set. */ 693 vr->equiv = NULL; 694 695 /* If VAR is a default definition of a parameter, the variable can 696 take any value in VAR's type. */ 697 sym = SSA_NAME_VAR (var); 698 if (SSA_NAME_IS_DEFAULT_DEF (var)) 699 { 700 if (TREE_CODE (sym) == PARM_DECL) 701 { 702 /* Try to use the "nonnull" attribute to create ~[0, 0] 703 anti-ranges for pointers. Note that this is only valid with 704 default definitions of PARM_DECLs. */ 705 if (POINTER_TYPE_P (TREE_TYPE (sym)) 706 && nonnull_arg_p (sym)) 707 set_value_range_to_nonnull (vr, TREE_TYPE (sym)); 708 else 709 set_value_range_to_varying (vr); 710 } 711 else if (TREE_CODE (sym) == RESULT_DECL 712 && DECL_BY_REFERENCE (sym)) 713 set_value_range_to_nonnull (vr, TREE_TYPE (sym)); 714 } 715 716 return vr; 717 } 718 719 /* Return true, if VAL1 and VAL2 are equal values for VRP purposes. */ 720 721 static inline bool 722 vrp_operand_equal_p (const_tree val1, const_tree val2) 723 { 724 if (val1 == val2) 725 return true; 726 if (!val1 || !val2 || !operand_equal_p (val1, val2, 0)) 727 return false; 728 if (is_overflow_infinity (val1)) 729 return is_overflow_infinity (val2); 730 return true; 731 } 732 733 /* Return true, if the bitmaps B1 and B2 are equal. */ 734 735 static inline bool 736 vrp_bitmap_equal_p (const_bitmap b1, const_bitmap b2) 737 { 738 return (b1 == b2 739 || ((!b1 || bitmap_empty_p (b1)) 740 && (!b2 || bitmap_empty_p (b2))) 741 || (b1 && b2 742 && bitmap_equal_p (b1, b2))); 743 } 744 745 /* Update the value range and equivalence set for variable VAR to 746 NEW_VR. Return true if NEW_VR is different from VAR's previous 747 value. 748 749 NOTE: This function assumes that NEW_VR is a temporary value range 750 object created for the sole purpose of updating VAR's range. The 751 storage used by the equivalence set from NEW_VR will be freed by 752 this function. Do not call update_value_range when NEW_VR 753 is the range object associated with another SSA name. */ 754 755 static inline bool 756 update_value_range (const_tree var, value_range_t *new_vr) 757 { 758 value_range_t *old_vr; 759 bool is_new; 760 761 /* Update the value range, if necessary. */ 762 old_vr = get_value_range (var); 763 is_new = old_vr->type != new_vr->type 764 || !vrp_operand_equal_p (old_vr->min, new_vr->min) 765 || !vrp_operand_equal_p (old_vr->max, new_vr->max) 766 || !vrp_bitmap_equal_p (old_vr->equiv, new_vr->equiv); 767 768 if (is_new) 769 set_value_range (old_vr, new_vr->type, new_vr->min, new_vr->max, 770 new_vr->equiv); 771 772 BITMAP_FREE (new_vr->equiv); 773 774 return is_new; 775 } 776 777 778 /* Add VAR and VAR's equivalence set to EQUIV. This is the central 779 point where equivalence processing can be turned on/off. */ 780 781 static void 782 add_equivalence (bitmap *equiv, const_tree var) 783 { 784 unsigned ver = SSA_NAME_VERSION (var); 785 value_range_t *vr = vr_value[ver]; 786 787 if (*equiv == NULL) 788 *equiv = BITMAP_ALLOC (NULL); 789 bitmap_set_bit (*equiv, ver); 790 if (vr && vr->equiv) 791 bitmap_ior_into (*equiv, vr->equiv); 792 } 793 794 795 /* Return true if VR is ~[0, 0]. */ 796 797 static inline bool 798 range_is_nonnull (value_range_t *vr) 799 { 800 return vr->type == VR_ANTI_RANGE 801 && integer_zerop (vr->min) 802 && integer_zerop (vr->max); 803 } 804 805 806 /* Return true if VR is [0, 0]. */ 807 808 static inline bool 809 range_is_null (value_range_t *vr) 810 { 811 return vr->type == VR_RANGE 812 && integer_zerop (vr->min) 813 && integer_zerop (vr->max); 814 } 815 816 /* Return true if max and min of VR are INTEGER_CST. It's not necessary 817 a singleton. */ 818 819 static inline bool 820 range_int_cst_p (value_range_t *vr) 821 { 822 return (vr->type == VR_RANGE 823 && TREE_CODE (vr->max) == INTEGER_CST 824 && TREE_CODE (vr->min) == INTEGER_CST 825 && !TREE_OVERFLOW (vr->max) 826 && !TREE_OVERFLOW (vr->min)); 827 } 828 829 /* Return true if VR is a INTEGER_CST singleton. */ 830 831 static inline bool 832 range_int_cst_singleton_p (value_range_t *vr) 833 { 834 return (range_int_cst_p (vr) 835 && tree_int_cst_equal (vr->min, vr->max)); 836 } 837 838 /* Return true if value range VR involves at least one symbol. */ 839 840 static inline bool 841 symbolic_range_p (value_range_t *vr) 842 { 843 return (!is_gimple_min_invariant (vr->min) 844 || !is_gimple_min_invariant (vr->max)); 845 } 846 847 /* Return true if value range VR uses an overflow infinity. */ 848 849 static inline bool 850 overflow_infinity_range_p (value_range_t *vr) 851 { 852 return (vr->type == VR_RANGE 853 && (is_overflow_infinity (vr->min) 854 || is_overflow_infinity (vr->max))); 855 } 856 857 /* Return false if we can not make a valid comparison based on VR; 858 this will be the case if it uses an overflow infinity and overflow 859 is not undefined (i.e., -fno-strict-overflow is in effect). 860 Otherwise return true, and set *STRICT_OVERFLOW_P to true if VR 861 uses an overflow infinity. */ 862 863 static bool 864 usable_range_p (value_range_t *vr, bool *strict_overflow_p) 865 { 866 gcc_assert (vr->type == VR_RANGE); 867 if (is_overflow_infinity (vr->min)) 868 { 869 *strict_overflow_p = true; 870 if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (vr->min))) 871 return false; 872 } 873 if (is_overflow_infinity (vr->max)) 874 { 875 *strict_overflow_p = true; 876 if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (vr->max))) 877 return false; 878 } 879 return true; 880 } 881 882 883 /* Return true if the result of assignment STMT is know to be non-negative. 884 If the return value is based on the assumption that signed overflow is 885 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change 886 *STRICT_OVERFLOW_P.*/ 887 888 static bool 889 gimple_assign_nonnegative_warnv_p (gimple stmt, bool *strict_overflow_p) 890 { 891 enum tree_code code = gimple_assign_rhs_code (stmt); 892 switch (get_gimple_rhs_class (code)) 893 { 894 case GIMPLE_UNARY_RHS: 895 return tree_unary_nonnegative_warnv_p (gimple_assign_rhs_code (stmt), 896 gimple_expr_type (stmt), 897 gimple_assign_rhs1 (stmt), 898 strict_overflow_p); 899 case GIMPLE_BINARY_RHS: 900 return tree_binary_nonnegative_warnv_p (gimple_assign_rhs_code (stmt), 901 gimple_expr_type (stmt), 902 gimple_assign_rhs1 (stmt), 903 gimple_assign_rhs2 (stmt), 904 strict_overflow_p); 905 case GIMPLE_TERNARY_RHS: 906 return false; 907 case GIMPLE_SINGLE_RHS: 908 return tree_single_nonnegative_warnv_p (gimple_assign_rhs1 (stmt), 909 strict_overflow_p); 910 case GIMPLE_INVALID_RHS: 911 gcc_unreachable (); 912 default: 913 gcc_unreachable (); 914 } 915 } 916 917 /* Return true if return value of call STMT is know to be non-negative. 918 If the return value is based on the assumption that signed overflow is 919 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change 920 *STRICT_OVERFLOW_P.*/ 921 922 static bool 923 gimple_call_nonnegative_warnv_p (gimple stmt, bool *strict_overflow_p) 924 { 925 tree arg0 = gimple_call_num_args (stmt) > 0 ? 926 gimple_call_arg (stmt, 0) : NULL_TREE; 927 tree arg1 = gimple_call_num_args (stmt) > 1 ? 928 gimple_call_arg (stmt, 1) : NULL_TREE; 929 930 return tree_call_nonnegative_warnv_p (gimple_expr_type (stmt), 931 gimple_call_fndecl (stmt), 932 arg0, 933 arg1, 934 strict_overflow_p); 935 } 936 937 /* Return true if STMT is know to to compute a non-negative value. 938 If the return value is based on the assumption that signed overflow is 939 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change 940 *STRICT_OVERFLOW_P.*/ 941 942 static bool 943 gimple_stmt_nonnegative_warnv_p (gimple stmt, bool *strict_overflow_p) 944 { 945 switch (gimple_code (stmt)) 946 { 947 case GIMPLE_ASSIGN: 948 return gimple_assign_nonnegative_warnv_p (stmt, strict_overflow_p); 949 case GIMPLE_CALL: 950 return gimple_call_nonnegative_warnv_p (stmt, strict_overflow_p); 951 default: 952 gcc_unreachable (); 953 } 954 } 955 956 /* Return true if the result of assignment STMT is know to be non-zero. 957 If the return value is based on the assumption that signed overflow is 958 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change 959 *STRICT_OVERFLOW_P.*/ 960 961 static bool 962 gimple_assign_nonzero_warnv_p (gimple stmt, bool *strict_overflow_p) 963 { 964 enum tree_code code = gimple_assign_rhs_code (stmt); 965 switch (get_gimple_rhs_class (code)) 966 { 967 case GIMPLE_UNARY_RHS: 968 return tree_unary_nonzero_warnv_p (gimple_assign_rhs_code (stmt), 969 gimple_expr_type (stmt), 970 gimple_assign_rhs1 (stmt), 971 strict_overflow_p); 972 case GIMPLE_BINARY_RHS: 973 return tree_binary_nonzero_warnv_p (gimple_assign_rhs_code (stmt), 974 gimple_expr_type (stmt), 975 gimple_assign_rhs1 (stmt), 976 gimple_assign_rhs2 (stmt), 977 strict_overflow_p); 978 case GIMPLE_TERNARY_RHS: 979 return false; 980 case GIMPLE_SINGLE_RHS: 981 return tree_single_nonzero_warnv_p (gimple_assign_rhs1 (stmt), 982 strict_overflow_p); 983 case GIMPLE_INVALID_RHS: 984 gcc_unreachable (); 985 default: 986 gcc_unreachable (); 987 } 988 } 989 990 /* Return true if STMT is know to to compute a non-zero value. 991 If the return value is based on the assumption that signed overflow is 992 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change 993 *STRICT_OVERFLOW_P.*/ 994 995 static bool 996 gimple_stmt_nonzero_warnv_p (gimple stmt, bool *strict_overflow_p) 997 { 998 switch (gimple_code (stmt)) 999 { 1000 case GIMPLE_ASSIGN: 1001 return gimple_assign_nonzero_warnv_p (stmt, strict_overflow_p); 1002 case GIMPLE_CALL: 1003 return gimple_alloca_call_p (stmt); 1004 default: 1005 gcc_unreachable (); 1006 } 1007 } 1008 1009 /* Like tree_expr_nonzero_warnv_p, but this function uses value ranges 1010 obtained so far. */ 1011 1012 static bool 1013 vrp_stmt_computes_nonzero (gimple stmt, bool *strict_overflow_p) 1014 { 1015 if (gimple_stmt_nonzero_warnv_p (stmt, strict_overflow_p)) 1016 return true; 1017 1018 /* If we have an expression of the form &X->a, then the expression 1019 is nonnull if X is nonnull. */ 1020 if (is_gimple_assign (stmt) 1021 && gimple_assign_rhs_code (stmt) == ADDR_EXPR) 1022 { 1023 tree expr = gimple_assign_rhs1 (stmt); 1024 tree base = get_base_address (TREE_OPERAND (expr, 0)); 1025 1026 if (base != NULL_TREE 1027 && TREE_CODE (base) == MEM_REF 1028 && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME) 1029 { 1030 value_range_t *vr = get_value_range (TREE_OPERAND (base, 0)); 1031 if (range_is_nonnull (vr)) 1032 return true; 1033 } 1034 } 1035 1036 return false; 1037 } 1038 1039 /* Returns true if EXPR is a valid value (as expected by compare_values) -- 1040 a gimple invariant, or SSA_NAME +- CST. */ 1041 1042 static bool 1043 valid_value_p (tree expr) 1044 { 1045 if (TREE_CODE (expr) == SSA_NAME) 1046 return true; 1047 1048 if (TREE_CODE (expr) == PLUS_EXPR 1049 || TREE_CODE (expr) == MINUS_EXPR) 1050 return (TREE_CODE (TREE_OPERAND (expr, 0)) == SSA_NAME 1051 && TREE_CODE (TREE_OPERAND (expr, 1)) == INTEGER_CST); 1052 1053 return is_gimple_min_invariant (expr); 1054 } 1055 1056 /* Return 1057 1 if VAL < VAL2 1058 0 if !(VAL < VAL2) 1059 -2 if those are incomparable. */ 1060 static inline int 1061 operand_less_p (tree val, tree val2) 1062 { 1063 /* LT is folded faster than GE and others. Inline the common case. */ 1064 if (TREE_CODE (val) == INTEGER_CST && TREE_CODE (val2) == INTEGER_CST) 1065 { 1066 if (TYPE_UNSIGNED (TREE_TYPE (val))) 1067 return INT_CST_LT_UNSIGNED (val, val2); 1068 else 1069 { 1070 if (INT_CST_LT (val, val2)) 1071 return 1; 1072 } 1073 } 1074 else 1075 { 1076 tree tcmp; 1077 1078 fold_defer_overflow_warnings (); 1079 1080 tcmp = fold_binary_to_constant (LT_EXPR, boolean_type_node, val, val2); 1081 1082 fold_undefer_and_ignore_overflow_warnings (); 1083 1084 if (!tcmp 1085 || TREE_CODE (tcmp) != INTEGER_CST) 1086 return -2; 1087 1088 if (!integer_zerop (tcmp)) 1089 return 1; 1090 } 1091 1092 /* val >= val2, not considering overflow infinity. */ 1093 if (is_negative_overflow_infinity (val)) 1094 return is_negative_overflow_infinity (val2) ? 0 : 1; 1095 else if (is_positive_overflow_infinity (val2)) 1096 return is_positive_overflow_infinity (val) ? 0 : 1; 1097 1098 return 0; 1099 } 1100 1101 /* Compare two values VAL1 and VAL2. Return 1102 1103 -2 if VAL1 and VAL2 cannot be compared at compile-time, 1104 -1 if VAL1 < VAL2, 1105 0 if VAL1 == VAL2, 1106 +1 if VAL1 > VAL2, and 1107 +2 if VAL1 != VAL2 1108 1109 This is similar to tree_int_cst_compare but supports pointer values 1110 and values that cannot be compared at compile time. 1111 1112 If STRICT_OVERFLOW_P is not NULL, then set *STRICT_OVERFLOW_P to 1113 true if the return value is only valid if we assume that signed 1114 overflow is undefined. */ 1115 1116 static int 1117 compare_values_warnv (tree val1, tree val2, bool *strict_overflow_p) 1118 { 1119 if (val1 == val2) 1120 return 0; 1121 1122 /* Below we rely on the fact that VAL1 and VAL2 are both pointers or 1123 both integers. */ 1124 gcc_assert (POINTER_TYPE_P (TREE_TYPE (val1)) 1125 == POINTER_TYPE_P (TREE_TYPE (val2))); 1126 /* Convert the two values into the same type. This is needed because 1127 sizetype causes sign extension even for unsigned types. */ 1128 val2 = fold_convert (TREE_TYPE (val1), val2); 1129 STRIP_USELESS_TYPE_CONVERSION (val2); 1130 1131 if ((TREE_CODE (val1) == SSA_NAME 1132 || TREE_CODE (val1) == PLUS_EXPR 1133 || TREE_CODE (val1) == MINUS_EXPR) 1134 && (TREE_CODE (val2) == SSA_NAME 1135 || TREE_CODE (val2) == PLUS_EXPR 1136 || TREE_CODE (val2) == MINUS_EXPR)) 1137 { 1138 tree n1, c1, n2, c2; 1139 enum tree_code code1, code2; 1140 1141 /* If VAL1 and VAL2 are of the form 'NAME [+-] CST' or 'NAME', 1142 return -1 or +1 accordingly. If VAL1 and VAL2 don't use the 1143 same name, return -2. */ 1144 if (TREE_CODE (val1) == SSA_NAME) 1145 { 1146 code1 = SSA_NAME; 1147 n1 = val1; 1148 c1 = NULL_TREE; 1149 } 1150 else 1151 { 1152 code1 = TREE_CODE (val1); 1153 n1 = TREE_OPERAND (val1, 0); 1154 c1 = TREE_OPERAND (val1, 1); 1155 if (tree_int_cst_sgn (c1) == -1) 1156 { 1157 if (is_negative_overflow_infinity (c1)) 1158 return -2; 1159 c1 = fold_unary_to_constant (NEGATE_EXPR, TREE_TYPE (c1), c1); 1160 if (!c1) 1161 return -2; 1162 code1 = code1 == MINUS_EXPR ? PLUS_EXPR : MINUS_EXPR; 1163 } 1164 } 1165 1166 if (TREE_CODE (val2) == SSA_NAME) 1167 { 1168 code2 = SSA_NAME; 1169 n2 = val2; 1170 c2 = NULL_TREE; 1171 } 1172 else 1173 { 1174 code2 = TREE_CODE (val2); 1175 n2 = TREE_OPERAND (val2, 0); 1176 c2 = TREE_OPERAND (val2, 1); 1177 if (tree_int_cst_sgn (c2) == -1) 1178 { 1179 if (is_negative_overflow_infinity (c2)) 1180 return -2; 1181 c2 = fold_unary_to_constant (NEGATE_EXPR, TREE_TYPE (c2), c2); 1182 if (!c2) 1183 return -2; 1184 code2 = code2 == MINUS_EXPR ? PLUS_EXPR : MINUS_EXPR; 1185 } 1186 } 1187 1188 /* Both values must use the same name. */ 1189 if (n1 != n2) 1190 return -2; 1191 1192 if (code1 == SSA_NAME 1193 && code2 == SSA_NAME) 1194 /* NAME == NAME */ 1195 return 0; 1196 1197 /* If overflow is defined we cannot simplify more. */ 1198 if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (val1))) 1199 return -2; 1200 1201 if (strict_overflow_p != NULL 1202 && (code1 == SSA_NAME || !TREE_NO_WARNING (val1)) 1203 && (code2 == SSA_NAME || !TREE_NO_WARNING (val2))) 1204 *strict_overflow_p = true; 1205 1206 if (code1 == SSA_NAME) 1207 { 1208 if (code2 == PLUS_EXPR) 1209 /* NAME < NAME + CST */ 1210 return -1; 1211 else if (code2 == MINUS_EXPR) 1212 /* NAME > NAME - CST */ 1213 return 1; 1214 } 1215 else if (code1 == PLUS_EXPR) 1216 { 1217 if (code2 == SSA_NAME) 1218 /* NAME + CST > NAME */ 1219 return 1; 1220 else if (code2 == PLUS_EXPR) 1221 /* NAME + CST1 > NAME + CST2, if CST1 > CST2 */ 1222 return compare_values_warnv (c1, c2, strict_overflow_p); 1223 else if (code2 == MINUS_EXPR) 1224 /* NAME + CST1 > NAME - CST2 */ 1225 return 1; 1226 } 1227 else if (code1 == MINUS_EXPR) 1228 { 1229 if (code2 == SSA_NAME) 1230 /* NAME - CST < NAME */ 1231 return -1; 1232 else if (code2 == PLUS_EXPR) 1233 /* NAME - CST1 < NAME + CST2 */ 1234 return -1; 1235 else if (code2 == MINUS_EXPR) 1236 /* NAME - CST1 > NAME - CST2, if CST1 < CST2. Notice that 1237 C1 and C2 are swapped in the call to compare_values. */ 1238 return compare_values_warnv (c2, c1, strict_overflow_p); 1239 } 1240 1241 gcc_unreachable (); 1242 } 1243 1244 /* We cannot compare non-constants. */ 1245 if (!is_gimple_min_invariant (val1) || !is_gimple_min_invariant (val2)) 1246 return -2; 1247 1248 if (!POINTER_TYPE_P (TREE_TYPE (val1))) 1249 { 1250 /* We cannot compare overflowed values, except for overflow 1251 infinities. */ 1252 if (TREE_OVERFLOW (val1) || TREE_OVERFLOW (val2)) 1253 { 1254 if (strict_overflow_p != NULL) 1255 *strict_overflow_p = true; 1256 if (is_negative_overflow_infinity (val1)) 1257 return is_negative_overflow_infinity (val2) ? 0 : -1; 1258 else if (is_negative_overflow_infinity (val2)) 1259 return 1; 1260 else if (is_positive_overflow_infinity (val1)) 1261 return is_positive_overflow_infinity (val2) ? 0 : 1; 1262 else if (is_positive_overflow_infinity (val2)) 1263 return -1; 1264 return -2; 1265 } 1266 1267 return tree_int_cst_compare (val1, val2); 1268 } 1269 else 1270 { 1271 tree t; 1272 1273 /* First see if VAL1 and VAL2 are not the same. */ 1274 if (val1 == val2 || operand_equal_p (val1, val2, 0)) 1275 return 0; 1276 1277 /* If VAL1 is a lower address than VAL2, return -1. */ 1278 if (operand_less_p (val1, val2) == 1) 1279 return -1; 1280 1281 /* If VAL1 is a higher address than VAL2, return +1. */ 1282 if (operand_less_p (val2, val1) == 1) 1283 return 1; 1284 1285 /* If VAL1 is different than VAL2, return +2. 1286 For integer constants we either have already returned -1 or 1 1287 or they are equivalent. We still might succeed in proving 1288 something about non-trivial operands. */ 1289 if (TREE_CODE (val1) != INTEGER_CST 1290 || TREE_CODE (val2) != INTEGER_CST) 1291 { 1292 t = fold_binary_to_constant (NE_EXPR, boolean_type_node, val1, val2); 1293 if (t && integer_onep (t)) 1294 return 2; 1295 } 1296 1297 return -2; 1298 } 1299 } 1300 1301 /* Compare values like compare_values_warnv, but treat comparisons of 1302 nonconstants which rely on undefined overflow as incomparable. */ 1303 1304 static int 1305 compare_values (tree val1, tree val2) 1306 { 1307 bool sop; 1308 int ret; 1309 1310 sop = false; 1311 ret = compare_values_warnv (val1, val2, &sop); 1312 if (sop 1313 && (!is_gimple_min_invariant (val1) || !is_gimple_min_invariant (val2))) 1314 ret = -2; 1315 return ret; 1316 } 1317 1318 1319 /* Return 1 if VAL is inside value range MIN <= VAL <= MAX, 1320 0 if VAL is not inside [MIN, MAX], 1321 -2 if we cannot tell either way. 1322 1323 Benchmark compile/20001226-1.c compilation time after changing this 1324 function. */ 1325 1326 static inline int 1327 value_inside_range (tree val, tree min, tree max) 1328 { 1329 int cmp1, cmp2; 1330 1331 cmp1 = operand_less_p (val, min); 1332 if (cmp1 == -2) 1333 return -2; 1334 if (cmp1 == 1) 1335 return 0; 1336 1337 cmp2 = operand_less_p (max, val); 1338 if (cmp2 == -2) 1339 return -2; 1340 1341 return !cmp2; 1342 } 1343 1344 1345 /* Return true if value ranges VR0 and VR1 have a non-empty 1346 intersection. 1347 1348 Benchmark compile/20001226-1.c compilation time after changing this 1349 function. 1350 */ 1351 1352 static inline bool 1353 value_ranges_intersect_p (value_range_t *vr0, value_range_t *vr1) 1354 { 1355 /* The value ranges do not intersect if the maximum of the first range is 1356 less than the minimum of the second range or vice versa. 1357 When those relations are unknown, we can't do any better. */ 1358 if (operand_less_p (vr0->max, vr1->min) != 0) 1359 return false; 1360 if (operand_less_p (vr1->max, vr0->min) != 0) 1361 return false; 1362 return true; 1363 } 1364 1365 1366 /* Return 1 if [MIN, MAX] includes the value zero, 0 if it does not 1367 include the value zero, -2 if we cannot tell. */ 1368 1369 static inline int 1370 range_includes_zero_p (tree min, tree max) 1371 { 1372 tree zero = build_int_cst (TREE_TYPE (min), 0); 1373 return value_inside_range (zero, min, max); 1374 } 1375 1376 /* Return true if *VR is know to only contain nonnegative values. */ 1377 1378 static inline bool 1379 value_range_nonnegative_p (value_range_t *vr) 1380 { 1381 /* Testing for VR_ANTI_RANGE is not useful here as any anti-range 1382 which would return a useful value should be encoded as a 1383 VR_RANGE. */ 1384 if (vr->type == VR_RANGE) 1385 { 1386 int result = compare_values (vr->min, integer_zero_node); 1387 return (result == 0 || result == 1); 1388 } 1389 1390 return false; 1391 } 1392 1393 /* Return true if T, an SSA_NAME, is known to be nonnegative. Return 1394 false otherwise or if no value range information is available. */ 1395 1396 bool 1397 ssa_name_nonnegative_p (const_tree t) 1398 { 1399 value_range_t *vr = get_value_range (t); 1400 1401 if (INTEGRAL_TYPE_P (t) 1402 && TYPE_UNSIGNED (t)) 1403 return true; 1404 1405 if (!vr) 1406 return false; 1407 1408 return value_range_nonnegative_p (vr); 1409 } 1410 1411 /* If *VR has a value rante that is a single constant value return that, 1412 otherwise return NULL_TREE. */ 1413 1414 static tree 1415 value_range_constant_singleton (value_range_t *vr) 1416 { 1417 if (vr->type == VR_RANGE 1418 && operand_equal_p (vr->min, vr->max, 0) 1419 && is_gimple_min_invariant (vr->min)) 1420 return vr->min; 1421 1422 return NULL_TREE; 1423 } 1424 1425 /* If OP has a value range with a single constant value return that, 1426 otherwise return NULL_TREE. This returns OP itself if OP is a 1427 constant. */ 1428 1429 static tree 1430 op_with_constant_singleton_value_range (tree op) 1431 { 1432 if (is_gimple_min_invariant (op)) 1433 return op; 1434 1435 if (TREE_CODE (op) != SSA_NAME) 1436 return NULL_TREE; 1437 1438 return value_range_constant_singleton (get_value_range (op)); 1439 } 1440 1441 /* Return true if op is in a boolean [0, 1] value-range. */ 1442 1443 static bool 1444 op_with_boolean_value_range_p (tree op) 1445 { 1446 value_range_t *vr; 1447 1448 if (TYPE_PRECISION (TREE_TYPE (op)) == 1) 1449 return true; 1450 1451 if (integer_zerop (op) 1452 || integer_onep (op)) 1453 return true; 1454 1455 if (TREE_CODE (op) != SSA_NAME) 1456 return false; 1457 1458 vr = get_value_range (op); 1459 return (vr->type == VR_RANGE 1460 && integer_zerop (vr->min) 1461 && integer_onep (vr->max)); 1462 } 1463 1464 /* Extract value range information from an ASSERT_EXPR EXPR and store 1465 it in *VR_P. */ 1466 1467 static void 1468 extract_range_from_assert (value_range_t *vr_p, tree expr) 1469 { 1470 tree var, cond, limit, min, max, type; 1471 value_range_t *var_vr, *limit_vr; 1472 enum tree_code cond_code; 1473 1474 var = ASSERT_EXPR_VAR (expr); 1475 cond = ASSERT_EXPR_COND (expr); 1476 1477 gcc_assert (COMPARISON_CLASS_P (cond)); 1478 1479 /* Find VAR in the ASSERT_EXPR conditional. */ 1480 if (var == TREE_OPERAND (cond, 0) 1481 || TREE_CODE (TREE_OPERAND (cond, 0)) == PLUS_EXPR 1482 || TREE_CODE (TREE_OPERAND (cond, 0)) == NOP_EXPR) 1483 { 1484 /* If the predicate is of the form VAR COMP LIMIT, then we just 1485 take LIMIT from the RHS and use the same comparison code. */ 1486 cond_code = TREE_CODE (cond); 1487 limit = TREE_OPERAND (cond, 1); 1488 cond = TREE_OPERAND (cond, 0); 1489 } 1490 else 1491 { 1492 /* If the predicate is of the form LIMIT COMP VAR, then we need 1493 to flip around the comparison code to create the proper range 1494 for VAR. */ 1495 cond_code = swap_tree_comparison (TREE_CODE (cond)); 1496 limit = TREE_OPERAND (cond, 0); 1497 cond = TREE_OPERAND (cond, 1); 1498 } 1499 1500 limit = avoid_overflow_infinity (limit); 1501 1502 type = TREE_TYPE (var); 1503 gcc_assert (limit != var); 1504 1505 /* For pointer arithmetic, we only keep track of pointer equality 1506 and inequality. */ 1507 if (POINTER_TYPE_P (type) && cond_code != NE_EXPR && cond_code != EQ_EXPR) 1508 { 1509 set_value_range_to_varying (vr_p); 1510 return; 1511 } 1512 1513 /* If LIMIT is another SSA name and LIMIT has a range of its own, 1514 try to use LIMIT's range to avoid creating symbolic ranges 1515 unnecessarily. */ 1516 limit_vr = (TREE_CODE (limit) == SSA_NAME) ? get_value_range (limit) : NULL; 1517 1518 /* LIMIT's range is only interesting if it has any useful information. */ 1519 if (limit_vr 1520 && (limit_vr->type == VR_UNDEFINED 1521 || limit_vr->type == VR_VARYING 1522 || symbolic_range_p (limit_vr))) 1523 limit_vr = NULL; 1524 1525 /* Initially, the new range has the same set of equivalences of 1526 VAR's range. This will be revised before returning the final 1527 value. Since assertions may be chained via mutually exclusive 1528 predicates, we will need to trim the set of equivalences before 1529 we are done. */ 1530 gcc_assert (vr_p->equiv == NULL); 1531 add_equivalence (&vr_p->equiv, var); 1532 1533 /* Extract a new range based on the asserted comparison for VAR and 1534 LIMIT's value range. Notice that if LIMIT has an anti-range, we 1535 will only use it for equality comparisons (EQ_EXPR). For any 1536 other kind of assertion, we cannot derive a range from LIMIT's 1537 anti-range that can be used to describe the new range. For 1538 instance, ASSERT_EXPR <x_2, x_2 <= b_4>. If b_4 is ~[2, 10], 1539 then b_4 takes on the ranges [-INF, 1] and [11, +INF]. There is 1540 no single range for x_2 that could describe LE_EXPR, so we might 1541 as well build the range [b_4, +INF] for it. 1542 One special case we handle is extracting a range from a 1543 range test encoded as (unsigned)var + CST <= limit. */ 1544 if (TREE_CODE (cond) == NOP_EXPR 1545 || TREE_CODE (cond) == PLUS_EXPR) 1546 { 1547 if (TREE_CODE (cond) == PLUS_EXPR) 1548 { 1549 min = fold_build1 (NEGATE_EXPR, TREE_TYPE (TREE_OPERAND (cond, 1)), 1550 TREE_OPERAND (cond, 1)); 1551 max = int_const_binop (PLUS_EXPR, limit, min); 1552 cond = TREE_OPERAND (cond, 0); 1553 } 1554 else 1555 { 1556 min = build_int_cst (TREE_TYPE (var), 0); 1557 max = limit; 1558 } 1559 1560 /* Make sure to not set TREE_OVERFLOW on the final type 1561 conversion. We are willingly interpreting large positive 1562 unsigned values as negative singed values here. */ 1563 min = force_fit_type_double (TREE_TYPE (var), tree_to_double_int (min), 1564 0, false); 1565 max = force_fit_type_double (TREE_TYPE (var), tree_to_double_int (max), 1566 0, false); 1567 1568 /* We can transform a max, min range to an anti-range or 1569 vice-versa. Use set_and_canonicalize_value_range which does 1570 this for us. */ 1571 if (cond_code == LE_EXPR) 1572 set_and_canonicalize_value_range (vr_p, VR_RANGE, 1573 min, max, vr_p->equiv); 1574 else if (cond_code == GT_EXPR) 1575 set_and_canonicalize_value_range (vr_p, VR_ANTI_RANGE, 1576 min, max, vr_p->equiv); 1577 else 1578 gcc_unreachable (); 1579 } 1580 else if (cond_code == EQ_EXPR) 1581 { 1582 enum value_range_type range_type; 1583 1584 if (limit_vr) 1585 { 1586 range_type = limit_vr->type; 1587 min = limit_vr->min; 1588 max = limit_vr->max; 1589 } 1590 else 1591 { 1592 range_type = VR_RANGE; 1593 min = limit; 1594 max = limit; 1595 } 1596 1597 set_value_range (vr_p, range_type, min, max, vr_p->equiv); 1598 1599 /* When asserting the equality VAR == LIMIT and LIMIT is another 1600 SSA name, the new range will also inherit the equivalence set 1601 from LIMIT. */ 1602 if (TREE_CODE (limit) == SSA_NAME) 1603 add_equivalence (&vr_p->equiv, limit); 1604 } 1605 else if (cond_code == NE_EXPR) 1606 { 1607 /* As described above, when LIMIT's range is an anti-range and 1608 this assertion is an inequality (NE_EXPR), then we cannot 1609 derive anything from the anti-range. For instance, if 1610 LIMIT's range was ~[0, 0], the assertion 'VAR != LIMIT' does 1611 not imply that VAR's range is [0, 0]. So, in the case of 1612 anti-ranges, we just assert the inequality using LIMIT and 1613 not its anti-range. 1614 1615 If LIMIT_VR is a range, we can only use it to build a new 1616 anti-range if LIMIT_VR is a single-valued range. For 1617 instance, if LIMIT_VR is [0, 1], the predicate 1618 VAR != [0, 1] does not mean that VAR's range is ~[0, 1]. 1619 Rather, it means that for value 0 VAR should be ~[0, 0] 1620 and for value 1, VAR should be ~[1, 1]. We cannot 1621 represent these ranges. 1622 1623 The only situation in which we can build a valid 1624 anti-range is when LIMIT_VR is a single-valued range 1625 (i.e., LIMIT_VR->MIN == LIMIT_VR->MAX). In that case, 1626 build the anti-range ~[LIMIT_VR->MIN, LIMIT_VR->MAX]. */ 1627 if (limit_vr 1628 && limit_vr->type == VR_RANGE 1629 && compare_values (limit_vr->min, limit_vr->max) == 0) 1630 { 1631 min = limit_vr->min; 1632 max = limit_vr->max; 1633 } 1634 else 1635 { 1636 /* In any other case, we cannot use LIMIT's range to build a 1637 valid anti-range. */ 1638 min = max = limit; 1639 } 1640 1641 /* If MIN and MAX cover the whole range for their type, then 1642 just use the original LIMIT. */ 1643 if (INTEGRAL_TYPE_P (type) 1644 && vrp_val_is_min (min) 1645 && vrp_val_is_max (max)) 1646 min = max = limit; 1647 1648 set_value_range (vr_p, VR_ANTI_RANGE, min, max, vr_p->equiv); 1649 } 1650 else if (cond_code == LE_EXPR || cond_code == LT_EXPR) 1651 { 1652 min = TYPE_MIN_VALUE (type); 1653 1654 if (limit_vr == NULL || limit_vr->type == VR_ANTI_RANGE) 1655 max = limit; 1656 else 1657 { 1658 /* If LIMIT_VR is of the form [N1, N2], we need to build the 1659 range [MIN, N2] for LE_EXPR and [MIN, N2 - 1] for 1660 LT_EXPR. */ 1661 max = limit_vr->max; 1662 } 1663 1664 /* If the maximum value forces us to be out of bounds, simply punt. 1665 It would be pointless to try and do anything more since this 1666 all should be optimized away above us. */ 1667 if ((cond_code == LT_EXPR 1668 && compare_values (max, min) == 0) 1669 || (CONSTANT_CLASS_P (max) && TREE_OVERFLOW (max))) 1670 set_value_range_to_varying (vr_p); 1671 else 1672 { 1673 /* For LT_EXPR, we create the range [MIN, MAX - 1]. */ 1674 if (cond_code == LT_EXPR) 1675 { 1676 if (TYPE_PRECISION (TREE_TYPE (max)) == 1 1677 && !TYPE_UNSIGNED (TREE_TYPE (max))) 1678 max = fold_build2 (PLUS_EXPR, TREE_TYPE (max), max, 1679 build_int_cst (TREE_TYPE (max), -1)); 1680 else 1681 max = fold_build2 (MINUS_EXPR, TREE_TYPE (max), max, 1682 build_int_cst (TREE_TYPE (max), 1)); 1683 if (EXPR_P (max)) 1684 TREE_NO_WARNING (max) = 1; 1685 } 1686 1687 set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv); 1688 } 1689 } 1690 else if (cond_code == GE_EXPR || cond_code == GT_EXPR) 1691 { 1692 max = TYPE_MAX_VALUE (type); 1693 1694 if (limit_vr == NULL || limit_vr->type == VR_ANTI_RANGE) 1695 min = limit; 1696 else 1697 { 1698 /* If LIMIT_VR is of the form [N1, N2], we need to build the 1699 range [N1, MAX] for GE_EXPR and [N1 + 1, MAX] for 1700 GT_EXPR. */ 1701 min = limit_vr->min; 1702 } 1703 1704 /* If the minimum value forces us to be out of bounds, simply punt. 1705 It would be pointless to try and do anything more since this 1706 all should be optimized away above us. */ 1707 if ((cond_code == GT_EXPR 1708 && compare_values (min, max) == 0) 1709 || (CONSTANT_CLASS_P (min) && TREE_OVERFLOW (min))) 1710 set_value_range_to_varying (vr_p); 1711 else 1712 { 1713 /* For GT_EXPR, we create the range [MIN + 1, MAX]. */ 1714 if (cond_code == GT_EXPR) 1715 { 1716 if (TYPE_PRECISION (TREE_TYPE (min)) == 1 1717 && !TYPE_UNSIGNED (TREE_TYPE (min))) 1718 min = fold_build2 (MINUS_EXPR, TREE_TYPE (min), min, 1719 build_int_cst (TREE_TYPE (min), -1)); 1720 else 1721 min = fold_build2 (PLUS_EXPR, TREE_TYPE (min), min, 1722 build_int_cst (TREE_TYPE (min), 1)); 1723 if (EXPR_P (min)) 1724 TREE_NO_WARNING (min) = 1; 1725 } 1726 1727 set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv); 1728 } 1729 } 1730 else 1731 gcc_unreachable (); 1732 1733 /* If VAR already had a known range, it may happen that the new 1734 range we have computed and VAR's range are not compatible. For 1735 instance, 1736 1737 if (p_5 == NULL) 1738 p_6 = ASSERT_EXPR <p_5, p_5 == NULL>; 1739 x_7 = p_6->fld; 1740 p_8 = ASSERT_EXPR <p_6, p_6 != NULL>; 1741 1742 While the above comes from a faulty program, it will cause an ICE 1743 later because p_8 and p_6 will have incompatible ranges and at 1744 the same time will be considered equivalent. A similar situation 1745 would arise from 1746 1747 if (i_5 > 10) 1748 i_6 = ASSERT_EXPR <i_5, i_5 > 10>; 1749 if (i_5 < 5) 1750 i_7 = ASSERT_EXPR <i_6, i_6 < 5>; 1751 1752 Again i_6 and i_7 will have incompatible ranges. It would be 1753 pointless to try and do anything with i_7's range because 1754 anything dominated by 'if (i_5 < 5)' will be optimized away. 1755 Note, due to the wa in which simulation proceeds, the statement 1756 i_7 = ASSERT_EXPR <...> we would never be visited because the 1757 conditional 'if (i_5 < 5)' always evaluates to false. However, 1758 this extra check does not hurt and may protect against future 1759 changes to VRP that may get into a situation similar to the 1760 NULL pointer dereference example. 1761 1762 Note that these compatibility tests are only needed when dealing 1763 with ranges or a mix of range and anti-range. If VAR_VR and VR_P 1764 are both anti-ranges, they will always be compatible, because two 1765 anti-ranges will always have a non-empty intersection. */ 1766 1767 var_vr = get_value_range (var); 1768 1769 /* We may need to make adjustments when VR_P and VAR_VR are numeric 1770 ranges or anti-ranges. */ 1771 if (vr_p->type == VR_VARYING 1772 || vr_p->type == VR_UNDEFINED 1773 || var_vr->type == VR_VARYING 1774 || var_vr->type == VR_UNDEFINED 1775 || symbolic_range_p (vr_p) 1776 || symbolic_range_p (var_vr)) 1777 return; 1778 1779 if (var_vr->type == VR_RANGE && vr_p->type == VR_RANGE) 1780 { 1781 /* If the two ranges have a non-empty intersection, we can 1782 refine the resulting range. Since the assert expression 1783 creates an equivalency and at the same time it asserts a 1784 predicate, we can take the intersection of the two ranges to 1785 get better precision. */ 1786 if (value_ranges_intersect_p (var_vr, vr_p)) 1787 { 1788 /* Use the larger of the two minimums. */ 1789 if (compare_values (vr_p->min, var_vr->min) == -1) 1790 min = var_vr->min; 1791 else 1792 min = vr_p->min; 1793 1794 /* Use the smaller of the two maximums. */ 1795 if (compare_values (vr_p->max, var_vr->max) == 1) 1796 max = var_vr->max; 1797 else 1798 max = vr_p->max; 1799 1800 set_value_range (vr_p, vr_p->type, min, max, vr_p->equiv); 1801 } 1802 else 1803 { 1804 /* The two ranges do not intersect, set the new range to 1805 VARYING, because we will not be able to do anything 1806 meaningful with it. */ 1807 set_value_range_to_varying (vr_p); 1808 } 1809 } 1810 else if ((var_vr->type == VR_RANGE && vr_p->type == VR_ANTI_RANGE) 1811 || (var_vr->type == VR_ANTI_RANGE && vr_p->type == VR_RANGE)) 1812 { 1813 /* A range and an anti-range will cancel each other only if 1814 their ends are the same. For instance, in the example above, 1815 p_8's range ~[0, 0] and p_6's range [0, 0] are incompatible, 1816 so VR_P should be set to VR_VARYING. */ 1817 if (compare_values (var_vr->min, vr_p->min) == 0 1818 && compare_values (var_vr->max, vr_p->max) == 0) 1819 set_value_range_to_varying (vr_p); 1820 else 1821 { 1822 tree min, max, anti_min, anti_max, real_min, real_max; 1823 int cmp; 1824 1825 /* We want to compute the logical AND of the two ranges; 1826 there are three cases to consider. 1827 1828 1829 1. The VR_ANTI_RANGE range is completely within the 1830 VR_RANGE and the endpoints of the ranges are 1831 different. In that case the resulting range 1832 should be whichever range is more precise. 1833 Typically that will be the VR_RANGE. 1834 1835 2. The VR_ANTI_RANGE is completely disjoint from 1836 the VR_RANGE. In this case the resulting range 1837 should be the VR_RANGE. 1838 1839 3. There is some overlap between the VR_ANTI_RANGE 1840 and the VR_RANGE. 1841 1842 3a. If the high limit of the VR_ANTI_RANGE resides 1843 within the VR_RANGE, then the result is a new 1844 VR_RANGE starting at the high limit of the 1845 VR_ANTI_RANGE + 1 and extending to the 1846 high limit of the original VR_RANGE. 1847 1848 3b. If the low limit of the VR_ANTI_RANGE resides 1849 within the VR_RANGE, then the result is a new 1850 VR_RANGE starting at the low limit of the original 1851 VR_RANGE and extending to the low limit of the 1852 VR_ANTI_RANGE - 1. */ 1853 if (vr_p->type == VR_ANTI_RANGE) 1854 { 1855 anti_min = vr_p->min; 1856 anti_max = vr_p->max; 1857 real_min = var_vr->min; 1858 real_max = var_vr->max; 1859 } 1860 else 1861 { 1862 anti_min = var_vr->min; 1863 anti_max = var_vr->max; 1864 real_min = vr_p->min; 1865 real_max = vr_p->max; 1866 } 1867 1868 1869 /* Case 1, VR_ANTI_RANGE completely within VR_RANGE, 1870 not including any endpoints. */ 1871 if (compare_values (anti_max, real_max) == -1 1872 && compare_values (anti_min, real_min) == 1) 1873 { 1874 /* If the range is covering the whole valid range of 1875 the type keep the anti-range. */ 1876 if (!vrp_val_is_min (real_min) 1877 || !vrp_val_is_max (real_max)) 1878 set_value_range (vr_p, VR_RANGE, real_min, 1879 real_max, vr_p->equiv); 1880 } 1881 /* Case 2, VR_ANTI_RANGE completely disjoint from 1882 VR_RANGE. */ 1883 else if (compare_values (anti_min, real_max) == 1 1884 || compare_values (anti_max, real_min) == -1) 1885 { 1886 set_value_range (vr_p, VR_RANGE, real_min, 1887 real_max, vr_p->equiv); 1888 } 1889 /* Case 3a, the anti-range extends into the low 1890 part of the real range. Thus creating a new 1891 low for the real range. */ 1892 else if (((cmp = compare_values (anti_max, real_min)) == 1 1893 || cmp == 0) 1894 && compare_values (anti_max, real_max) == -1) 1895 { 1896 gcc_assert (!is_positive_overflow_infinity (anti_max)); 1897 if (needs_overflow_infinity (TREE_TYPE (anti_max)) 1898 && vrp_val_is_max (anti_max)) 1899 { 1900 if (!supports_overflow_infinity (TREE_TYPE (var_vr->min))) 1901 { 1902 set_value_range_to_varying (vr_p); 1903 return; 1904 } 1905 min = positive_overflow_infinity (TREE_TYPE (var_vr->min)); 1906 } 1907 else if (!POINTER_TYPE_P (TREE_TYPE (var_vr->min))) 1908 { 1909 if (TYPE_PRECISION (TREE_TYPE (var_vr->min)) == 1 1910 && !TYPE_UNSIGNED (TREE_TYPE (var_vr->min))) 1911 min = fold_build2 (MINUS_EXPR, TREE_TYPE (var_vr->min), 1912 anti_max, 1913 build_int_cst (TREE_TYPE (var_vr->min), 1914 -1)); 1915 else 1916 min = fold_build2 (PLUS_EXPR, TREE_TYPE (var_vr->min), 1917 anti_max, 1918 build_int_cst (TREE_TYPE (var_vr->min), 1919 1)); 1920 } 1921 else 1922 min = fold_build_pointer_plus_hwi (anti_max, 1); 1923 max = real_max; 1924 set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv); 1925 } 1926 /* Case 3b, the anti-range extends into the high 1927 part of the real range. Thus creating a new 1928 higher for the real range. */ 1929 else if (compare_values (anti_min, real_min) == 1 1930 && ((cmp = compare_values (anti_min, real_max)) == -1 1931 || cmp == 0)) 1932 { 1933 gcc_assert (!is_negative_overflow_infinity (anti_min)); 1934 if (needs_overflow_infinity (TREE_TYPE (anti_min)) 1935 && vrp_val_is_min (anti_min)) 1936 { 1937 if (!supports_overflow_infinity (TREE_TYPE (var_vr->min))) 1938 { 1939 set_value_range_to_varying (vr_p); 1940 return; 1941 } 1942 max = negative_overflow_infinity (TREE_TYPE (var_vr->min)); 1943 } 1944 else if (!POINTER_TYPE_P (TREE_TYPE (var_vr->min))) 1945 { 1946 if (TYPE_PRECISION (TREE_TYPE (var_vr->min)) == 1 1947 && !TYPE_UNSIGNED (TREE_TYPE (var_vr->min))) 1948 max = fold_build2 (PLUS_EXPR, TREE_TYPE (var_vr->min), 1949 anti_min, 1950 build_int_cst (TREE_TYPE (var_vr->min), 1951 -1)); 1952 else 1953 max = fold_build2 (MINUS_EXPR, TREE_TYPE (var_vr->min), 1954 anti_min, 1955 build_int_cst (TREE_TYPE (var_vr->min), 1956 1)); 1957 } 1958 else 1959 max = fold_build_pointer_plus_hwi (anti_min, -1); 1960 min = real_min; 1961 set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv); 1962 } 1963 } 1964 } 1965 } 1966 1967 1968 /* Extract range information from SSA name VAR and store it in VR. If 1969 VAR has an interesting range, use it. Otherwise, create the 1970 range [VAR, VAR] and return it. This is useful in situations where 1971 we may have conditionals testing values of VARYING names. For 1972 instance, 1973 1974 x_3 = y_5; 1975 if (x_3 > y_5) 1976 ... 1977 1978 Even if y_5 is deemed VARYING, we can determine that x_3 > y_5 is 1979 always false. */ 1980 1981 static void 1982 extract_range_from_ssa_name (value_range_t *vr, tree var) 1983 { 1984 value_range_t *var_vr = get_value_range (var); 1985 1986 if (var_vr->type != VR_UNDEFINED && var_vr->type != VR_VARYING) 1987 copy_value_range (vr, var_vr); 1988 else 1989 set_value_range (vr, VR_RANGE, var, var, NULL); 1990 1991 add_equivalence (&vr->equiv, var); 1992 } 1993 1994 1995 /* Wrapper around int_const_binop. If the operation overflows and we 1996 are not using wrapping arithmetic, then adjust the result to be 1997 -INF or +INF depending on CODE, VAL1 and VAL2. This can return 1998 NULL_TREE if we need to use an overflow infinity representation but 1999 the type does not support it. */ 2000 2001 static tree 2002 vrp_int_const_binop (enum tree_code code, tree val1, tree val2) 2003 { 2004 tree res; 2005 2006 res = int_const_binop (code, val1, val2); 2007 2008 /* If we are using unsigned arithmetic, operate symbolically 2009 on -INF and +INF as int_const_binop only handles signed overflow. */ 2010 if (TYPE_UNSIGNED (TREE_TYPE (val1))) 2011 { 2012 int checkz = compare_values (res, val1); 2013 bool overflow = false; 2014 2015 /* Ensure that res = val1 [+*] val2 >= val1 2016 or that res = val1 - val2 <= val1. */ 2017 if ((code == PLUS_EXPR 2018 && !(checkz == 1 || checkz == 0)) 2019 || (code == MINUS_EXPR 2020 && !(checkz == 0 || checkz == -1))) 2021 { 2022 overflow = true; 2023 } 2024 /* Checking for multiplication overflow is done by dividing the 2025 output of the multiplication by the first input of the 2026 multiplication. If the result of that division operation is 2027 not equal to the second input of the multiplication, then the 2028 multiplication overflowed. */ 2029 else if (code == MULT_EXPR && !integer_zerop (val1)) 2030 { 2031 tree tmp = int_const_binop (TRUNC_DIV_EXPR, 2032 res, 2033 val1); 2034 int check = compare_values (tmp, val2); 2035 2036 if (check != 0) 2037 overflow = true; 2038 } 2039 2040 if (overflow) 2041 { 2042 res = copy_node (res); 2043 TREE_OVERFLOW (res) = 1; 2044 } 2045 2046 } 2047 else if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (val1))) 2048 /* If the singed operation wraps then int_const_binop has done 2049 everything we want. */ 2050 ; 2051 else if ((TREE_OVERFLOW (res) 2052 && !TREE_OVERFLOW (val1) 2053 && !TREE_OVERFLOW (val2)) 2054 || is_overflow_infinity (val1) 2055 || is_overflow_infinity (val2)) 2056 { 2057 /* If the operation overflowed but neither VAL1 nor VAL2 are 2058 overflown, return -INF or +INF depending on the operation 2059 and the combination of signs of the operands. */ 2060 int sgn1 = tree_int_cst_sgn (val1); 2061 int sgn2 = tree_int_cst_sgn (val2); 2062 2063 if (needs_overflow_infinity (TREE_TYPE (res)) 2064 && !supports_overflow_infinity (TREE_TYPE (res))) 2065 return NULL_TREE; 2066 2067 /* We have to punt on adding infinities of different signs, 2068 since we can't tell what the sign of the result should be. 2069 Likewise for subtracting infinities of the same sign. */ 2070 if (((code == PLUS_EXPR && sgn1 != sgn2) 2071 || (code == MINUS_EXPR && sgn1 == sgn2)) 2072 && is_overflow_infinity (val1) 2073 && is_overflow_infinity (val2)) 2074 return NULL_TREE; 2075 2076 /* Don't try to handle division or shifting of infinities. */ 2077 if ((code == TRUNC_DIV_EXPR 2078 || code == FLOOR_DIV_EXPR 2079 || code == CEIL_DIV_EXPR 2080 || code == EXACT_DIV_EXPR 2081 || code == ROUND_DIV_EXPR 2082 || code == RSHIFT_EXPR) 2083 && (is_overflow_infinity (val1) 2084 || is_overflow_infinity (val2))) 2085 return NULL_TREE; 2086 2087 /* Notice that we only need to handle the restricted set of 2088 operations handled by extract_range_from_binary_expr. 2089 Among them, only multiplication, addition and subtraction 2090 can yield overflow without overflown operands because we 2091 are working with integral types only... except in the 2092 case VAL1 = -INF and VAL2 = -1 which overflows to +INF 2093 for division too. */ 2094 2095 /* For multiplication, the sign of the overflow is given 2096 by the comparison of the signs of the operands. */ 2097 if ((code == MULT_EXPR && sgn1 == sgn2) 2098 /* For addition, the operands must be of the same sign 2099 to yield an overflow. Its sign is therefore that 2100 of one of the operands, for example the first. For 2101 infinite operands X + -INF is negative, not positive. */ 2102 || (code == PLUS_EXPR 2103 && (sgn1 >= 0 2104 ? !is_negative_overflow_infinity (val2) 2105 : is_positive_overflow_infinity (val2))) 2106 /* For subtraction, non-infinite operands must be of 2107 different signs to yield an overflow. Its sign is 2108 therefore that of the first operand or the opposite of 2109 that of the second operand. A first operand of 0 counts 2110 as positive here, for the corner case 0 - (-INF), which 2111 overflows, but must yield +INF. For infinite operands 0 2112 - INF is negative, not positive. */ 2113 || (code == MINUS_EXPR 2114 && (sgn1 >= 0 2115 ? !is_positive_overflow_infinity (val2) 2116 : is_negative_overflow_infinity (val2))) 2117 /* We only get in here with positive shift count, so the 2118 overflow direction is the same as the sign of val1. 2119 Actually rshift does not overflow at all, but we only 2120 handle the case of shifting overflowed -INF and +INF. */ 2121 || (code == RSHIFT_EXPR 2122 && sgn1 >= 0) 2123 /* For division, the only case is -INF / -1 = +INF. */ 2124 || code == TRUNC_DIV_EXPR 2125 || code == FLOOR_DIV_EXPR 2126 || code == CEIL_DIV_EXPR 2127 || code == EXACT_DIV_EXPR 2128 || code == ROUND_DIV_EXPR) 2129 return (needs_overflow_infinity (TREE_TYPE (res)) 2130 ? positive_overflow_infinity (TREE_TYPE (res)) 2131 : TYPE_MAX_VALUE (TREE_TYPE (res))); 2132 else 2133 return (needs_overflow_infinity (TREE_TYPE (res)) 2134 ? negative_overflow_infinity (TREE_TYPE (res)) 2135 : TYPE_MIN_VALUE (TREE_TYPE (res))); 2136 } 2137 2138 return res; 2139 } 2140 2141 2142 /* For range VR compute two double_int bitmasks. In *MAY_BE_NONZERO 2143 bitmask if some bit is unset, it means for all numbers in the range 2144 the bit is 0, otherwise it might be 0 or 1. In *MUST_BE_NONZERO 2145 bitmask if some bit is set, it means for all numbers in the range 2146 the bit is 1, otherwise it might be 0 or 1. */ 2147 2148 static bool 2149 zero_nonzero_bits_from_vr (value_range_t *vr, 2150 double_int *may_be_nonzero, 2151 double_int *must_be_nonzero) 2152 { 2153 *may_be_nonzero = double_int_minus_one; 2154 *must_be_nonzero = double_int_zero; 2155 if (!range_int_cst_p (vr)) 2156 return false; 2157 2158 if (range_int_cst_singleton_p (vr)) 2159 { 2160 *may_be_nonzero = tree_to_double_int (vr->min); 2161 *must_be_nonzero = *may_be_nonzero; 2162 } 2163 else if (tree_int_cst_sgn (vr->min) >= 0 2164 || tree_int_cst_sgn (vr->max) < 0) 2165 { 2166 double_int dmin = tree_to_double_int (vr->min); 2167 double_int dmax = tree_to_double_int (vr->max); 2168 double_int xor_mask = double_int_xor (dmin, dmax); 2169 *may_be_nonzero = double_int_ior (dmin, dmax); 2170 *must_be_nonzero = double_int_and (dmin, dmax); 2171 if (xor_mask.high != 0) 2172 { 2173 unsigned HOST_WIDE_INT mask 2174 = ((unsigned HOST_WIDE_INT) 1 2175 << floor_log2 (xor_mask.high)) - 1; 2176 may_be_nonzero->low = ALL_ONES; 2177 may_be_nonzero->high |= mask; 2178 must_be_nonzero->low = 0; 2179 must_be_nonzero->high &= ~mask; 2180 } 2181 else if (xor_mask.low != 0) 2182 { 2183 unsigned HOST_WIDE_INT mask 2184 = ((unsigned HOST_WIDE_INT) 1 2185 << floor_log2 (xor_mask.low)) - 1; 2186 may_be_nonzero->low |= mask; 2187 must_be_nonzero->low &= ~mask; 2188 } 2189 } 2190 2191 return true; 2192 } 2193 2194 /* Helper to extract a value-range *VR for a multiplicative operation 2195 *VR0 CODE *VR1. */ 2196 2197 static void 2198 extract_range_from_multiplicative_op_1 (value_range_t *vr, 2199 enum tree_code code, 2200 value_range_t *vr0, value_range_t *vr1) 2201 { 2202 enum value_range_type type; 2203 tree val[4]; 2204 size_t i; 2205 tree min, max; 2206 bool sop; 2207 int cmp; 2208 2209 /* Multiplications, divisions and shifts are a bit tricky to handle, 2210 depending on the mix of signs we have in the two ranges, we 2211 need to operate on different values to get the minimum and 2212 maximum values for the new range. One approach is to figure 2213 out all the variations of range combinations and do the 2214 operations. 2215 2216 However, this involves several calls to compare_values and it 2217 is pretty convoluted. It's simpler to do the 4 operations 2218 (MIN0 OP MIN1, MIN0 OP MAX1, MAX0 OP MIN1 and MAX0 OP MAX0 OP 2219 MAX1) and then figure the smallest and largest values to form 2220 the new range. */ 2221 gcc_assert (code == MULT_EXPR 2222 || code == TRUNC_DIV_EXPR 2223 || code == FLOOR_DIV_EXPR 2224 || code == CEIL_DIV_EXPR 2225 || code == EXACT_DIV_EXPR 2226 || code == ROUND_DIV_EXPR 2227 || code == RSHIFT_EXPR); 2228 gcc_assert ((vr0->type == VR_RANGE 2229 || (code == MULT_EXPR && vr0->type == VR_ANTI_RANGE)) 2230 && vr0->type == vr1->type); 2231 2232 type = vr0->type; 2233 2234 /* Compute the 4 cross operations. */ 2235 sop = false; 2236 val[0] = vrp_int_const_binop (code, vr0->min, vr1->min); 2237 if (val[0] == NULL_TREE) 2238 sop = true; 2239 2240 if (vr1->max == vr1->min) 2241 val[1] = NULL_TREE; 2242 else 2243 { 2244 val[1] = vrp_int_const_binop (code, vr0->min, vr1->max); 2245 if (val[1] == NULL_TREE) 2246 sop = true; 2247 } 2248 2249 if (vr0->max == vr0->min) 2250 val[2] = NULL_TREE; 2251 else 2252 { 2253 val[2] = vrp_int_const_binop (code, vr0->max, vr1->min); 2254 if (val[2] == NULL_TREE) 2255 sop = true; 2256 } 2257 2258 if (vr0->min == vr0->max || vr1->min == vr1->max) 2259 val[3] = NULL_TREE; 2260 else 2261 { 2262 val[3] = vrp_int_const_binop (code, vr0->max, vr1->max); 2263 if (val[3] == NULL_TREE) 2264 sop = true; 2265 } 2266 2267 if (sop) 2268 { 2269 set_value_range_to_varying (vr); 2270 return; 2271 } 2272 2273 /* Set MIN to the minimum of VAL[i] and MAX to the maximum 2274 of VAL[i]. */ 2275 min = val[0]; 2276 max = val[0]; 2277 for (i = 1; i < 4; i++) 2278 { 2279 if (!is_gimple_min_invariant (min) 2280 || (TREE_OVERFLOW (min) && !is_overflow_infinity (min)) 2281 || !is_gimple_min_invariant (max) 2282 || (TREE_OVERFLOW (max) && !is_overflow_infinity (max))) 2283 break; 2284 2285 if (val[i]) 2286 { 2287 if (!is_gimple_min_invariant (val[i]) 2288 || (TREE_OVERFLOW (val[i]) 2289 && !is_overflow_infinity (val[i]))) 2290 { 2291 /* If we found an overflowed value, set MIN and MAX 2292 to it so that we set the resulting range to 2293 VARYING. */ 2294 min = max = val[i]; 2295 break; 2296 } 2297 2298 if (compare_values (val[i], min) == -1) 2299 min = val[i]; 2300 2301 if (compare_values (val[i], max) == 1) 2302 max = val[i]; 2303 } 2304 } 2305 2306 /* If either MIN or MAX overflowed, then set the resulting range to 2307 VARYING. But we do accept an overflow infinity 2308 representation. */ 2309 if (min == NULL_TREE 2310 || !is_gimple_min_invariant (min) 2311 || (TREE_OVERFLOW (min) && !is_overflow_infinity (min)) 2312 || max == NULL_TREE 2313 || !is_gimple_min_invariant (max) 2314 || (TREE_OVERFLOW (max) && !is_overflow_infinity (max))) 2315 { 2316 set_value_range_to_varying (vr); 2317 return; 2318 } 2319 2320 /* We punt if: 2321 1) [-INF, +INF] 2322 2) [-INF, +-INF(OVF)] 2323 3) [+-INF(OVF), +INF] 2324 4) [+-INF(OVF), +-INF(OVF)] 2325 We learn nothing when we have INF and INF(OVF) on both sides. 2326 Note that we do accept [-INF, -INF] and [+INF, +INF] without 2327 overflow. */ 2328 if ((vrp_val_is_min (min) || is_overflow_infinity (min)) 2329 && (vrp_val_is_max (max) || is_overflow_infinity (max))) 2330 { 2331 set_value_range_to_varying (vr); 2332 return; 2333 } 2334 2335 cmp = compare_values (min, max); 2336 if (cmp == -2 || cmp == 1) 2337 { 2338 /* If the new range has its limits swapped around (MIN > MAX), 2339 then the operation caused one of them to wrap around, mark 2340 the new range VARYING. */ 2341 set_value_range_to_varying (vr); 2342 } 2343 else 2344 set_value_range (vr, type, min, max, NULL); 2345 } 2346 2347 /* Extract range information from a binary operation CODE based on 2348 the ranges of each of its operands, *VR0 and *VR1 with resulting 2349 type EXPR_TYPE. The resulting range is stored in *VR. */ 2350 2351 static void 2352 extract_range_from_binary_expr_1 (value_range_t *vr, 2353 enum tree_code code, tree expr_type, 2354 value_range_t *vr0_, value_range_t *vr1_) 2355 { 2356 value_range_t vr0 = *vr0_, vr1 = *vr1_; 2357 enum value_range_type type; 2358 tree min = NULL_TREE, max = NULL_TREE; 2359 int cmp; 2360 2361 if (!INTEGRAL_TYPE_P (expr_type) 2362 && !POINTER_TYPE_P (expr_type)) 2363 { 2364 set_value_range_to_varying (vr); 2365 return; 2366 } 2367 2368 /* Not all binary expressions can be applied to ranges in a 2369 meaningful way. Handle only arithmetic operations. */ 2370 if (code != PLUS_EXPR 2371 && code != MINUS_EXPR 2372 && code != POINTER_PLUS_EXPR 2373 && code != MULT_EXPR 2374 && code != TRUNC_DIV_EXPR 2375 && code != FLOOR_DIV_EXPR 2376 && code != CEIL_DIV_EXPR 2377 && code != EXACT_DIV_EXPR 2378 && code != ROUND_DIV_EXPR 2379 && code != TRUNC_MOD_EXPR 2380 && code != RSHIFT_EXPR 2381 && code != MIN_EXPR 2382 && code != MAX_EXPR 2383 && code != BIT_AND_EXPR 2384 && code != BIT_IOR_EXPR 2385 && code != BIT_XOR_EXPR) 2386 { 2387 set_value_range_to_varying (vr); 2388 return; 2389 } 2390 2391 /* If both ranges are UNDEFINED, so is the result. */ 2392 if (vr0.type == VR_UNDEFINED && vr1.type == VR_UNDEFINED) 2393 { 2394 set_value_range_to_undefined (vr); 2395 return; 2396 } 2397 /* If one of the ranges is UNDEFINED drop it to VARYING for the following 2398 code. At some point we may want to special-case operations that 2399 have UNDEFINED result for all or some value-ranges of the not UNDEFINED 2400 operand. */ 2401 else if (vr0.type == VR_UNDEFINED) 2402 set_value_range_to_varying (&vr0); 2403 else if (vr1.type == VR_UNDEFINED) 2404 set_value_range_to_varying (&vr1); 2405 2406 /* The type of the resulting value range defaults to VR0.TYPE. */ 2407 type = vr0.type; 2408 2409 /* Refuse to operate on VARYING ranges, ranges of different kinds 2410 and symbolic ranges. As an exception, we allow BIT_AND_EXPR 2411 because we may be able to derive a useful range even if one of 2412 the operands is VR_VARYING or symbolic range. Similarly for 2413 divisions. TODO, we may be able to derive anti-ranges in 2414 some cases. */ 2415 if (code != BIT_AND_EXPR 2416 && code != BIT_IOR_EXPR 2417 && code != TRUNC_DIV_EXPR 2418 && code != FLOOR_DIV_EXPR 2419 && code != CEIL_DIV_EXPR 2420 && code != EXACT_DIV_EXPR 2421 && code != ROUND_DIV_EXPR 2422 && code != TRUNC_MOD_EXPR 2423 && (vr0.type == VR_VARYING 2424 || vr1.type == VR_VARYING 2425 || vr0.type != vr1.type 2426 || symbolic_range_p (&vr0) 2427 || symbolic_range_p (&vr1))) 2428 { 2429 set_value_range_to_varying (vr); 2430 return; 2431 } 2432 2433 /* Now evaluate the expression to determine the new range. */ 2434 if (POINTER_TYPE_P (expr_type)) 2435 { 2436 if (code == MIN_EXPR || code == MAX_EXPR) 2437 { 2438 /* For MIN/MAX expressions with pointers, we only care about 2439 nullness, if both are non null, then the result is nonnull. 2440 If both are null, then the result is null. Otherwise they 2441 are varying. */ 2442 if (range_is_nonnull (&vr0) && range_is_nonnull (&vr1)) 2443 set_value_range_to_nonnull (vr, expr_type); 2444 else if (range_is_null (&vr0) && range_is_null (&vr1)) 2445 set_value_range_to_null (vr, expr_type); 2446 else 2447 set_value_range_to_varying (vr); 2448 } 2449 else if (code == POINTER_PLUS_EXPR) 2450 { 2451 /* For pointer types, we are really only interested in asserting 2452 whether the expression evaluates to non-NULL. */ 2453 if (range_is_nonnull (&vr0) || range_is_nonnull (&vr1)) 2454 set_value_range_to_nonnull (vr, expr_type); 2455 else if (range_is_null (&vr0) && range_is_null (&vr1)) 2456 set_value_range_to_null (vr, expr_type); 2457 else 2458 set_value_range_to_varying (vr); 2459 } 2460 else if (code == BIT_AND_EXPR) 2461 { 2462 /* For pointer types, we are really only interested in asserting 2463 whether the expression evaluates to non-NULL. */ 2464 if (range_is_nonnull (&vr0) && range_is_nonnull (&vr1)) 2465 set_value_range_to_nonnull (vr, expr_type); 2466 else if (range_is_null (&vr0) || range_is_null (&vr1)) 2467 set_value_range_to_null (vr, expr_type); 2468 else 2469 set_value_range_to_varying (vr); 2470 } 2471 else 2472 set_value_range_to_varying (vr); 2473 2474 return; 2475 } 2476 2477 /* For integer ranges, apply the operation to each end of the 2478 range and see what we end up with. */ 2479 if (code == PLUS_EXPR) 2480 { 2481 /* If we have a PLUS_EXPR with two VR_ANTI_RANGEs, drop to 2482 VR_VARYING. It would take more effort to compute a precise 2483 range for such a case. For example, if we have op0 == 1 and 2484 op1 == -1 with their ranges both being ~[0,0], we would have 2485 op0 + op1 == 0, so we cannot claim that the sum is in ~[0,0]. 2486 Note that we are guaranteed to have vr0.type == vr1.type at 2487 this point. */ 2488 if (vr0.type == VR_ANTI_RANGE) 2489 { 2490 set_value_range_to_varying (vr); 2491 return; 2492 } 2493 2494 /* For operations that make the resulting range directly 2495 proportional to the original ranges, apply the operation to 2496 the same end of each range. */ 2497 min = vrp_int_const_binop (code, vr0.min, vr1.min); 2498 max = vrp_int_const_binop (code, vr0.max, vr1.max); 2499 2500 /* If both additions overflowed the range kind is still correct. 2501 This happens regularly with subtracting something in unsigned 2502 arithmetic. 2503 ??? See PR30318 for all the cases we do not handle. */ 2504 if ((TREE_OVERFLOW (min) && !is_overflow_infinity (min)) 2505 && (TREE_OVERFLOW (max) && !is_overflow_infinity (max))) 2506 { 2507 min = build_int_cst_wide (TREE_TYPE (min), 2508 TREE_INT_CST_LOW (min), 2509 TREE_INT_CST_HIGH (min)); 2510 max = build_int_cst_wide (TREE_TYPE (max), 2511 TREE_INT_CST_LOW (max), 2512 TREE_INT_CST_HIGH (max)); 2513 } 2514 } 2515 else if (code == MIN_EXPR 2516 || code == MAX_EXPR) 2517 { 2518 if (vr0.type == VR_ANTI_RANGE) 2519 { 2520 /* For MIN_EXPR and MAX_EXPR with two VR_ANTI_RANGEs, 2521 the resulting VR_ANTI_RANGE is the same - intersection 2522 of the two ranges. */ 2523 min = vrp_int_const_binop (MAX_EXPR, vr0.min, vr1.min); 2524 max = vrp_int_const_binop (MIN_EXPR, vr0.max, vr1.max); 2525 } 2526 else 2527 { 2528 /* For operations that make the resulting range directly 2529 proportional to the original ranges, apply the operation to 2530 the same end of each range. */ 2531 min = vrp_int_const_binop (code, vr0.min, vr1.min); 2532 max = vrp_int_const_binop (code, vr0.max, vr1.max); 2533 } 2534 } 2535 else if (code == MULT_EXPR) 2536 { 2537 /* If we have an unsigned MULT_EXPR with two VR_ANTI_RANGEs, 2538 drop to VR_VARYING. It would take more effort to compute a 2539 precise range for such a case. For example, if we have 2540 op0 == 65536 and op1 == 65536 with their ranges both being 2541 ~[0,0] on a 32-bit machine, we would have op0 * op1 == 0, so 2542 we cannot claim that the product is in ~[0,0]. Note that we 2543 are guaranteed to have vr0.type == vr1.type at this 2544 point. */ 2545 if (vr0.type == VR_ANTI_RANGE 2546 && !TYPE_OVERFLOW_UNDEFINED (expr_type)) 2547 { 2548 set_value_range_to_varying (vr); 2549 return; 2550 } 2551 2552 extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1); 2553 return; 2554 } 2555 else if (code == RSHIFT_EXPR) 2556 { 2557 /* If we have a RSHIFT_EXPR with any shift values outside [0..prec-1], 2558 then drop to VR_VARYING. Outside of this range we get undefined 2559 behavior from the shift operation. We cannot even trust 2560 SHIFT_COUNT_TRUNCATED at this stage, because that applies to rtl 2561 shifts, and the operation at the tree level may be widened. */ 2562 if (vr1.type != VR_RANGE 2563 || !value_range_nonnegative_p (&vr1) 2564 || TREE_CODE (vr1.max) != INTEGER_CST 2565 || compare_tree_int (vr1.max, TYPE_PRECISION (expr_type) - 1) == 1) 2566 { 2567 set_value_range_to_varying (vr); 2568 return; 2569 } 2570 2571 extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1); 2572 return; 2573 } 2574 else if (code == TRUNC_DIV_EXPR 2575 || code == FLOOR_DIV_EXPR 2576 || code == CEIL_DIV_EXPR 2577 || code == EXACT_DIV_EXPR 2578 || code == ROUND_DIV_EXPR) 2579 { 2580 if (vr0.type != VR_RANGE || symbolic_range_p (&vr0)) 2581 { 2582 /* For division, if op1 has VR_RANGE but op0 does not, something 2583 can be deduced just from that range. Say [min, max] / [4, max] 2584 gives [min / 4, max / 4] range. */ 2585 if (vr1.type == VR_RANGE 2586 && !symbolic_range_p (&vr1) 2587 && range_includes_zero_p (vr1.min, vr1.max) == 0) 2588 { 2589 vr0.type = type = VR_RANGE; 2590 vr0.min = vrp_val_min (expr_type); 2591 vr0.max = vrp_val_max (expr_type); 2592 } 2593 else 2594 { 2595 set_value_range_to_varying (vr); 2596 return; 2597 } 2598 } 2599 2600 /* For divisions, if flag_non_call_exceptions is true, we must 2601 not eliminate a division by zero. */ 2602 if (cfun->can_throw_non_call_exceptions 2603 && (vr1.type != VR_RANGE 2604 || range_includes_zero_p (vr1.min, vr1.max) != 0)) 2605 { 2606 set_value_range_to_varying (vr); 2607 return; 2608 } 2609 2610 /* For divisions, if op0 is VR_RANGE, we can deduce a range 2611 even if op1 is VR_VARYING, VR_ANTI_RANGE, symbolic or can 2612 include 0. */ 2613 if (vr0.type == VR_RANGE 2614 && (vr1.type != VR_RANGE 2615 || range_includes_zero_p (vr1.min, vr1.max) != 0)) 2616 { 2617 tree zero = build_int_cst (TREE_TYPE (vr0.min), 0); 2618 int cmp; 2619 2620 min = NULL_TREE; 2621 max = NULL_TREE; 2622 if (TYPE_UNSIGNED (expr_type) 2623 || value_range_nonnegative_p (&vr1)) 2624 { 2625 /* For unsigned division or when divisor is known 2626 to be non-negative, the range has to cover 2627 all numbers from 0 to max for positive max 2628 and all numbers from min to 0 for negative min. */ 2629 cmp = compare_values (vr0.max, zero); 2630 if (cmp == -1) 2631 max = zero; 2632 else if (cmp == 0 || cmp == 1) 2633 max = vr0.max; 2634 else 2635 type = VR_VARYING; 2636 cmp = compare_values (vr0.min, zero); 2637 if (cmp == 1) 2638 min = zero; 2639 else if (cmp == 0 || cmp == -1) 2640 min = vr0.min; 2641 else 2642 type = VR_VARYING; 2643 } 2644 else 2645 { 2646 /* Otherwise the range is -max .. max or min .. -min 2647 depending on which bound is bigger in absolute value, 2648 as the division can change the sign. */ 2649 abs_extent_range (vr, vr0.min, vr0.max); 2650 return; 2651 } 2652 if (type == VR_VARYING) 2653 { 2654 set_value_range_to_varying (vr); 2655 return; 2656 } 2657 } 2658 else 2659 { 2660 extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1); 2661 return; 2662 } 2663 } 2664 else if (code == TRUNC_MOD_EXPR) 2665 { 2666 if (vr1.type != VR_RANGE 2667 || range_includes_zero_p (vr1.min, vr1.max) != 0 2668 || vrp_val_is_min (vr1.min)) 2669 { 2670 set_value_range_to_varying (vr); 2671 return; 2672 } 2673 type = VR_RANGE; 2674 /* Compute MAX <|vr1.min|, |vr1.max|> - 1. */ 2675 max = fold_unary_to_constant (ABS_EXPR, expr_type, vr1.min); 2676 if (tree_int_cst_lt (max, vr1.max)) 2677 max = vr1.max; 2678 max = int_const_binop (MINUS_EXPR, max, integer_one_node); 2679 /* If the dividend is non-negative the modulus will be 2680 non-negative as well. */ 2681 if (TYPE_UNSIGNED (expr_type) 2682 || value_range_nonnegative_p (&vr0)) 2683 min = build_int_cst (TREE_TYPE (max), 0); 2684 else 2685 min = fold_unary_to_constant (NEGATE_EXPR, expr_type, max); 2686 } 2687 else if (code == MINUS_EXPR) 2688 { 2689 /* If we have a MINUS_EXPR with two VR_ANTI_RANGEs, drop to 2690 VR_VARYING. It would take more effort to compute a precise 2691 range for such a case. For example, if we have op0 == 1 and 2692 op1 == 1 with their ranges both being ~[0,0], we would have 2693 op0 - op1 == 0, so we cannot claim that the difference is in 2694 ~[0,0]. Note that we are guaranteed to have 2695 vr0.type == vr1.type at this point. */ 2696 if (vr0.type == VR_ANTI_RANGE) 2697 { 2698 set_value_range_to_varying (vr); 2699 return; 2700 } 2701 2702 /* For MINUS_EXPR, apply the operation to the opposite ends of 2703 each range. */ 2704 min = vrp_int_const_binop (code, vr0.min, vr1.max); 2705 max = vrp_int_const_binop (code, vr0.max, vr1.min); 2706 } 2707 else if (code == BIT_AND_EXPR || code == BIT_IOR_EXPR || code == BIT_XOR_EXPR) 2708 { 2709 bool int_cst_range0, int_cst_range1; 2710 double_int may_be_nonzero0, may_be_nonzero1; 2711 double_int must_be_nonzero0, must_be_nonzero1; 2712 2713 int_cst_range0 = zero_nonzero_bits_from_vr (&vr0, &may_be_nonzero0, 2714 &must_be_nonzero0); 2715 int_cst_range1 = zero_nonzero_bits_from_vr (&vr1, &may_be_nonzero1, 2716 &must_be_nonzero1); 2717 2718 type = VR_RANGE; 2719 if (code == BIT_AND_EXPR) 2720 { 2721 double_int dmax; 2722 min = double_int_to_tree (expr_type, 2723 double_int_and (must_be_nonzero0, 2724 must_be_nonzero1)); 2725 dmax = double_int_and (may_be_nonzero0, may_be_nonzero1); 2726 /* If both input ranges contain only negative values we can 2727 truncate the result range maximum to the minimum of the 2728 input range maxima. */ 2729 if (int_cst_range0 && int_cst_range1 2730 && tree_int_cst_sgn (vr0.max) < 0 2731 && tree_int_cst_sgn (vr1.max) < 0) 2732 { 2733 dmax = double_int_min (dmax, tree_to_double_int (vr0.max), 2734 TYPE_UNSIGNED (expr_type)); 2735 dmax = double_int_min (dmax, tree_to_double_int (vr1.max), 2736 TYPE_UNSIGNED (expr_type)); 2737 } 2738 /* If either input range contains only non-negative values 2739 we can truncate the result range maximum to the respective 2740 maximum of the input range. */ 2741 if (int_cst_range0 && tree_int_cst_sgn (vr0.min) >= 0) 2742 dmax = double_int_min (dmax, tree_to_double_int (vr0.max), 2743 TYPE_UNSIGNED (expr_type)); 2744 if (int_cst_range1 && tree_int_cst_sgn (vr1.min) >= 0) 2745 dmax = double_int_min (dmax, tree_to_double_int (vr1.max), 2746 TYPE_UNSIGNED (expr_type)); 2747 max = double_int_to_tree (expr_type, dmax); 2748 } 2749 else if (code == BIT_IOR_EXPR) 2750 { 2751 double_int dmin; 2752 max = double_int_to_tree (expr_type, 2753 double_int_ior (may_be_nonzero0, 2754 may_be_nonzero1)); 2755 dmin = double_int_ior (must_be_nonzero0, must_be_nonzero1); 2756 /* If the input ranges contain only positive values we can 2757 truncate the minimum of the result range to the maximum 2758 of the input range minima. */ 2759 if (int_cst_range0 && int_cst_range1 2760 && tree_int_cst_sgn (vr0.min) >= 0 2761 && tree_int_cst_sgn (vr1.min) >= 0) 2762 { 2763 dmin = double_int_max (dmin, tree_to_double_int (vr0.min), 2764 TYPE_UNSIGNED (expr_type)); 2765 dmin = double_int_max (dmin, tree_to_double_int (vr1.min), 2766 TYPE_UNSIGNED (expr_type)); 2767 } 2768 /* If either input range contains only negative values 2769 we can truncate the minimum of the result range to the 2770 respective minimum range. */ 2771 if (int_cst_range0 && tree_int_cst_sgn (vr0.max) < 0) 2772 dmin = double_int_max (dmin, tree_to_double_int (vr0.min), 2773 TYPE_UNSIGNED (expr_type)); 2774 if (int_cst_range1 && tree_int_cst_sgn (vr1.max) < 0) 2775 dmin = double_int_max (dmin, tree_to_double_int (vr1.min), 2776 TYPE_UNSIGNED (expr_type)); 2777 min = double_int_to_tree (expr_type, dmin); 2778 } 2779 else if (code == BIT_XOR_EXPR) 2780 { 2781 double_int result_zero_bits, result_one_bits; 2782 result_zero_bits 2783 = double_int_ior (double_int_and (must_be_nonzero0, 2784 must_be_nonzero1), 2785 double_int_not 2786 (double_int_ior (may_be_nonzero0, 2787 may_be_nonzero1))); 2788 result_one_bits 2789 = double_int_ior (double_int_and 2790 (must_be_nonzero0, 2791 double_int_not (may_be_nonzero1)), 2792 double_int_and 2793 (must_be_nonzero1, 2794 double_int_not (may_be_nonzero0))); 2795 max = double_int_to_tree (expr_type, 2796 double_int_not (result_zero_bits)); 2797 min = double_int_to_tree (expr_type, result_one_bits); 2798 /* If the range has all positive or all negative values the 2799 result is better than VARYING. */ 2800 if (tree_int_cst_sgn (min) < 0 2801 || tree_int_cst_sgn (max) >= 0) 2802 ; 2803 else 2804 max = min = NULL_TREE; 2805 } 2806 } 2807 else 2808 gcc_unreachable (); 2809 2810 /* If either MIN or MAX overflowed, then set the resulting range to 2811 VARYING. But we do accept an overflow infinity 2812 representation. */ 2813 if (min == NULL_TREE 2814 || !is_gimple_min_invariant (min) 2815 || (TREE_OVERFLOW (min) && !is_overflow_infinity (min)) 2816 || max == NULL_TREE 2817 || !is_gimple_min_invariant (max) 2818 || (TREE_OVERFLOW (max) && !is_overflow_infinity (max))) 2819 { 2820 set_value_range_to_varying (vr); 2821 return; 2822 } 2823 2824 /* We punt if: 2825 1) [-INF, +INF] 2826 2) [-INF, +-INF(OVF)] 2827 3) [+-INF(OVF), +INF] 2828 4) [+-INF(OVF), +-INF(OVF)] 2829 We learn nothing when we have INF and INF(OVF) on both sides. 2830 Note that we do accept [-INF, -INF] and [+INF, +INF] without 2831 overflow. */ 2832 if ((vrp_val_is_min (min) || is_overflow_infinity (min)) 2833 && (vrp_val_is_max (max) || is_overflow_infinity (max))) 2834 { 2835 set_value_range_to_varying (vr); 2836 return; 2837 } 2838 2839 cmp = compare_values (min, max); 2840 if (cmp == -2 || cmp == 1) 2841 { 2842 /* If the new range has its limits swapped around (MIN > MAX), 2843 then the operation caused one of them to wrap around, mark 2844 the new range VARYING. */ 2845 set_value_range_to_varying (vr); 2846 } 2847 else 2848 set_value_range (vr, type, min, max, NULL); 2849 } 2850 2851 /* Extract range information from a binary expression OP0 CODE OP1 based on 2852 the ranges of each of its operands with resulting type EXPR_TYPE. 2853 The resulting range is stored in *VR. */ 2854 2855 static void 2856 extract_range_from_binary_expr (value_range_t *vr, 2857 enum tree_code code, 2858 tree expr_type, tree op0, tree op1) 2859 { 2860 value_range_t vr0 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL }; 2861 value_range_t vr1 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL }; 2862 2863 /* Get value ranges for each operand. For constant operands, create 2864 a new value range with the operand to simplify processing. */ 2865 if (TREE_CODE (op0) == SSA_NAME) 2866 vr0 = *(get_value_range (op0)); 2867 else if (is_gimple_min_invariant (op0)) 2868 set_value_range_to_value (&vr0, op0, NULL); 2869 else 2870 set_value_range_to_varying (&vr0); 2871 2872 if (TREE_CODE (op1) == SSA_NAME) 2873 vr1 = *(get_value_range (op1)); 2874 else if (is_gimple_min_invariant (op1)) 2875 set_value_range_to_value (&vr1, op1, NULL); 2876 else 2877 set_value_range_to_varying (&vr1); 2878 2879 extract_range_from_binary_expr_1 (vr, code, expr_type, &vr0, &vr1); 2880 } 2881 2882 /* Extract range information from a unary operation CODE based on 2883 the range of its operand *VR0 with type OP0_TYPE with resulting type TYPE. 2884 The The resulting range is stored in *VR. */ 2885 2886 static void 2887 extract_range_from_unary_expr_1 (value_range_t *vr, 2888 enum tree_code code, tree type, 2889 value_range_t *vr0_, tree op0_type) 2890 { 2891 value_range_t vr0 = *vr0_; 2892 2893 /* VRP only operates on integral and pointer types. */ 2894 if (!(INTEGRAL_TYPE_P (op0_type) 2895 || POINTER_TYPE_P (op0_type)) 2896 || !(INTEGRAL_TYPE_P (type) 2897 || POINTER_TYPE_P (type))) 2898 { 2899 set_value_range_to_varying (vr); 2900 return; 2901 } 2902 2903 /* If VR0 is UNDEFINED, so is the result. */ 2904 if (vr0.type == VR_UNDEFINED) 2905 { 2906 set_value_range_to_undefined (vr); 2907 return; 2908 } 2909 2910 if (CONVERT_EXPR_CODE_P (code)) 2911 { 2912 tree inner_type = op0_type; 2913 tree outer_type = type; 2914 2915 /* If the expression evaluates to a pointer, we are only interested in 2916 determining if it evaluates to NULL [0, 0] or non-NULL (~[0, 0]). */ 2917 if (POINTER_TYPE_P (type)) 2918 { 2919 if (range_is_nonnull (&vr0)) 2920 set_value_range_to_nonnull (vr, type); 2921 else if (range_is_null (&vr0)) 2922 set_value_range_to_null (vr, type); 2923 else 2924 set_value_range_to_varying (vr); 2925 return; 2926 } 2927 2928 /* If VR0 is varying and we increase the type precision, assume 2929 a full range for the following transformation. */ 2930 if (vr0.type == VR_VARYING 2931 && INTEGRAL_TYPE_P (inner_type) 2932 && TYPE_PRECISION (inner_type) < TYPE_PRECISION (outer_type)) 2933 { 2934 vr0.type = VR_RANGE; 2935 vr0.min = TYPE_MIN_VALUE (inner_type); 2936 vr0.max = TYPE_MAX_VALUE (inner_type); 2937 } 2938 2939 /* If VR0 is a constant range or anti-range and the conversion is 2940 not truncating we can convert the min and max values and 2941 canonicalize the resulting range. Otherwise we can do the 2942 conversion if the size of the range is less than what the 2943 precision of the target type can represent and the range is 2944 not an anti-range. */ 2945 if ((vr0.type == VR_RANGE 2946 || vr0.type == VR_ANTI_RANGE) 2947 && TREE_CODE (vr0.min) == INTEGER_CST 2948 && TREE_CODE (vr0.max) == INTEGER_CST 2949 && (!is_overflow_infinity (vr0.min) 2950 || (vr0.type == VR_RANGE 2951 && TYPE_PRECISION (outer_type) > TYPE_PRECISION (inner_type) 2952 && needs_overflow_infinity (outer_type) 2953 && supports_overflow_infinity (outer_type))) 2954 && (!is_overflow_infinity (vr0.max) 2955 || (vr0.type == VR_RANGE 2956 && TYPE_PRECISION (outer_type) > TYPE_PRECISION (inner_type) 2957 && needs_overflow_infinity (outer_type) 2958 && supports_overflow_infinity (outer_type))) 2959 && (TYPE_PRECISION (outer_type) >= TYPE_PRECISION (inner_type) 2960 || (vr0.type == VR_RANGE 2961 && integer_zerop (int_const_binop (RSHIFT_EXPR, 2962 int_const_binop (MINUS_EXPR, vr0.max, vr0.min), 2963 size_int (TYPE_PRECISION (outer_type))))))) 2964 { 2965 tree new_min, new_max; 2966 if (is_overflow_infinity (vr0.min)) 2967 new_min = negative_overflow_infinity (outer_type); 2968 else 2969 new_min = force_fit_type_double (outer_type, 2970 tree_to_double_int (vr0.min), 2971 0, false); 2972 if (is_overflow_infinity (vr0.max)) 2973 new_max = positive_overflow_infinity (outer_type); 2974 else 2975 new_max = force_fit_type_double (outer_type, 2976 tree_to_double_int (vr0.max), 2977 0, false); 2978 set_and_canonicalize_value_range (vr, vr0.type, 2979 new_min, new_max, NULL); 2980 return; 2981 } 2982 2983 set_value_range_to_varying (vr); 2984 return; 2985 } 2986 else if (code == NEGATE_EXPR) 2987 { 2988 /* -X is simply 0 - X, so re-use existing code that also handles 2989 anti-ranges fine. */ 2990 value_range_t zero = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL }; 2991 set_value_range_to_value (&zero, build_int_cst (type, 0), NULL); 2992 extract_range_from_binary_expr_1 (vr, MINUS_EXPR, type, &zero, &vr0); 2993 return; 2994 } 2995 else if (code == ABS_EXPR) 2996 { 2997 tree min, max; 2998 int cmp; 2999 3000 /* Pass through vr0 in the easy cases. */ 3001 if (TYPE_UNSIGNED (type) 3002 || value_range_nonnegative_p (&vr0)) 3003 { 3004 copy_value_range (vr, &vr0); 3005 return; 3006 } 3007 3008 /* For the remaining varying or symbolic ranges we can't do anything 3009 useful. */ 3010 if (vr0.type == VR_VARYING 3011 || symbolic_range_p (&vr0)) 3012 { 3013 set_value_range_to_varying (vr); 3014 return; 3015 } 3016 3017 /* -TYPE_MIN_VALUE = TYPE_MIN_VALUE with flag_wrapv so we can't get a 3018 useful range. */ 3019 if (!TYPE_OVERFLOW_UNDEFINED (type) 3020 && ((vr0.type == VR_RANGE 3021 && vrp_val_is_min (vr0.min)) 3022 || (vr0.type == VR_ANTI_RANGE 3023 && !vrp_val_is_min (vr0.min)))) 3024 { 3025 set_value_range_to_varying (vr); 3026 return; 3027 } 3028 3029 /* ABS_EXPR may flip the range around, if the original range 3030 included negative values. */ 3031 if (is_overflow_infinity (vr0.min)) 3032 min = positive_overflow_infinity (type); 3033 else if (!vrp_val_is_min (vr0.min)) 3034 min = fold_unary_to_constant (code, type, vr0.min); 3035 else if (!needs_overflow_infinity (type)) 3036 min = TYPE_MAX_VALUE (type); 3037 else if (supports_overflow_infinity (type)) 3038 min = positive_overflow_infinity (type); 3039 else 3040 { 3041 set_value_range_to_varying (vr); 3042 return; 3043 } 3044 3045 if (is_overflow_infinity (vr0.max)) 3046 max = positive_overflow_infinity (type); 3047 else if (!vrp_val_is_min (vr0.max)) 3048 max = fold_unary_to_constant (code, type, vr0.max); 3049 else if (!needs_overflow_infinity (type)) 3050 max = TYPE_MAX_VALUE (type); 3051 else if (supports_overflow_infinity (type) 3052 /* We shouldn't generate [+INF, +INF] as set_value_range 3053 doesn't like this and ICEs. */ 3054 && !is_positive_overflow_infinity (min)) 3055 max = positive_overflow_infinity (type); 3056 else 3057 { 3058 set_value_range_to_varying (vr); 3059 return; 3060 } 3061 3062 cmp = compare_values (min, max); 3063 3064 /* If a VR_ANTI_RANGEs contains zero, then we have 3065 ~[-INF, min(MIN, MAX)]. */ 3066 if (vr0.type == VR_ANTI_RANGE) 3067 { 3068 if (range_includes_zero_p (vr0.min, vr0.max) == 1) 3069 { 3070 /* Take the lower of the two values. */ 3071 if (cmp != 1) 3072 max = min; 3073 3074 /* Create ~[-INF, min (abs(MIN), abs(MAX))] 3075 or ~[-INF + 1, min (abs(MIN), abs(MAX))] when 3076 flag_wrapv is set and the original anti-range doesn't include 3077 TYPE_MIN_VALUE, remember -TYPE_MIN_VALUE = TYPE_MIN_VALUE. */ 3078 if (TYPE_OVERFLOW_WRAPS (type)) 3079 { 3080 tree type_min_value = TYPE_MIN_VALUE (type); 3081 3082 min = (vr0.min != type_min_value 3083 ? int_const_binop (PLUS_EXPR, type_min_value, 3084 integer_one_node) 3085 : type_min_value); 3086 } 3087 else 3088 { 3089 if (overflow_infinity_range_p (&vr0)) 3090 min = negative_overflow_infinity (type); 3091 else 3092 min = TYPE_MIN_VALUE (type); 3093 } 3094 } 3095 else 3096 { 3097 /* All else has failed, so create the range [0, INF], even for 3098 flag_wrapv since TYPE_MIN_VALUE is in the original 3099 anti-range. */ 3100 vr0.type = VR_RANGE; 3101 min = build_int_cst (type, 0); 3102 if (needs_overflow_infinity (type)) 3103 { 3104 if (supports_overflow_infinity (type)) 3105 max = positive_overflow_infinity (type); 3106 else 3107 { 3108 set_value_range_to_varying (vr); 3109 return; 3110 } 3111 } 3112 else 3113 max = TYPE_MAX_VALUE (type); 3114 } 3115 } 3116 3117 /* If the range contains zero then we know that the minimum value in the 3118 range will be zero. */ 3119 else if (range_includes_zero_p (vr0.min, vr0.max) == 1) 3120 { 3121 if (cmp == 1) 3122 max = min; 3123 min = build_int_cst (type, 0); 3124 } 3125 else 3126 { 3127 /* If the range was reversed, swap MIN and MAX. */ 3128 if (cmp == 1) 3129 { 3130 tree t = min; 3131 min = max; 3132 max = t; 3133 } 3134 } 3135 3136 cmp = compare_values (min, max); 3137 if (cmp == -2 || cmp == 1) 3138 { 3139 /* If the new range has its limits swapped around (MIN > MAX), 3140 then the operation caused one of them to wrap around, mark 3141 the new range VARYING. */ 3142 set_value_range_to_varying (vr); 3143 } 3144 else 3145 set_value_range (vr, vr0.type, min, max, NULL); 3146 return; 3147 } 3148 else if (code == BIT_NOT_EXPR) 3149 { 3150 /* ~X is simply -1 - X, so re-use existing code that also handles 3151 anti-ranges fine. */ 3152 value_range_t minusone = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL }; 3153 set_value_range_to_value (&minusone, build_int_cst (type, -1), NULL); 3154 extract_range_from_binary_expr_1 (vr, MINUS_EXPR, 3155 type, &minusone, &vr0); 3156 return; 3157 } 3158 else if (code == PAREN_EXPR) 3159 { 3160 copy_value_range (vr, &vr0); 3161 return; 3162 } 3163 3164 /* For unhandled operations fall back to varying. */ 3165 set_value_range_to_varying (vr); 3166 return; 3167 } 3168 3169 3170 /* Extract range information from a unary expression CODE OP0 based on 3171 the range of its operand with resulting type TYPE. 3172 The resulting range is stored in *VR. */ 3173 3174 static void 3175 extract_range_from_unary_expr (value_range_t *vr, enum tree_code code, 3176 tree type, tree op0) 3177 { 3178 value_range_t vr0 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL }; 3179 3180 /* Get value ranges for the operand. For constant operands, create 3181 a new value range with the operand to simplify processing. */ 3182 if (TREE_CODE (op0) == SSA_NAME) 3183 vr0 = *(get_value_range (op0)); 3184 else if (is_gimple_min_invariant (op0)) 3185 set_value_range_to_value (&vr0, op0, NULL); 3186 else 3187 set_value_range_to_varying (&vr0); 3188 3189 extract_range_from_unary_expr_1 (vr, code, type, &vr0, TREE_TYPE (op0)); 3190 } 3191 3192 3193 /* Extract range information from a conditional expression STMT based on 3194 the ranges of each of its operands and the expression code. */ 3195 3196 static void 3197 extract_range_from_cond_expr (value_range_t *vr, gimple stmt) 3198 { 3199 tree op0, op1; 3200 value_range_t vr0 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL }; 3201 value_range_t vr1 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL }; 3202 3203 /* Get value ranges for each operand. For constant operands, create 3204 a new value range with the operand to simplify processing. */ 3205 op0 = gimple_assign_rhs2 (stmt); 3206 if (TREE_CODE (op0) == SSA_NAME) 3207 vr0 = *(get_value_range (op0)); 3208 else if (is_gimple_min_invariant (op0)) 3209 set_value_range_to_value (&vr0, op0, NULL); 3210 else 3211 set_value_range_to_varying (&vr0); 3212 3213 op1 = gimple_assign_rhs3 (stmt); 3214 if (TREE_CODE (op1) == SSA_NAME) 3215 vr1 = *(get_value_range (op1)); 3216 else if (is_gimple_min_invariant (op1)) 3217 set_value_range_to_value (&vr1, op1, NULL); 3218 else 3219 set_value_range_to_varying (&vr1); 3220 3221 /* The resulting value range is the union of the operand ranges */ 3222 copy_value_range (vr, &vr0); 3223 vrp_meet (vr, &vr1); 3224 } 3225 3226 3227 /* Extract range information from a comparison expression EXPR based 3228 on the range of its operand and the expression code. */ 3229 3230 static void 3231 extract_range_from_comparison (value_range_t *vr, enum tree_code code, 3232 tree type, tree op0, tree op1) 3233 { 3234 bool sop = false; 3235 tree val; 3236 3237 val = vrp_evaluate_conditional_warnv_with_ops (code, op0, op1, false, &sop, 3238 NULL); 3239 3240 /* A disadvantage of using a special infinity as an overflow 3241 representation is that we lose the ability to record overflow 3242 when we don't have an infinity. So we have to ignore a result 3243 which relies on overflow. */ 3244 3245 if (val && !is_overflow_infinity (val) && !sop) 3246 { 3247 /* Since this expression was found on the RHS of an assignment, 3248 its type may be different from _Bool. Convert VAL to EXPR's 3249 type. */ 3250 val = fold_convert (type, val); 3251 if (is_gimple_min_invariant (val)) 3252 set_value_range_to_value (vr, val, vr->equiv); 3253 else 3254 set_value_range (vr, VR_RANGE, val, val, vr->equiv); 3255 } 3256 else 3257 /* The result of a comparison is always true or false. */ 3258 set_value_range_to_truthvalue (vr, type); 3259 } 3260 3261 /* Try to derive a nonnegative or nonzero range out of STMT relying 3262 primarily on generic routines in fold in conjunction with range data. 3263 Store the result in *VR */ 3264 3265 static void 3266 extract_range_basic (value_range_t *vr, gimple stmt) 3267 { 3268 bool sop = false; 3269 tree type = gimple_expr_type (stmt); 3270 3271 /* If the call is __builtin_constant_p and the argument is a 3272 function parameter resolve it to false. This avoids bogus 3273 array bound warnings. 3274 ??? We could do this as early as inlining is finished. */ 3275 if (gimple_call_builtin_p (stmt, BUILT_IN_CONSTANT_P)) 3276 { 3277 tree arg = gimple_call_arg (stmt, 0); 3278 if (TREE_CODE (arg) == SSA_NAME 3279 && SSA_NAME_IS_DEFAULT_DEF (arg) 3280 && TREE_CODE (SSA_NAME_VAR (arg)) == PARM_DECL) 3281 set_value_range_to_null (vr, type); 3282 } 3283 else if (INTEGRAL_TYPE_P (type) 3284 && gimple_stmt_nonnegative_warnv_p (stmt, &sop)) 3285 set_value_range_to_nonnegative (vr, type, 3286 sop || stmt_overflow_infinity (stmt)); 3287 else if (vrp_stmt_computes_nonzero (stmt, &sop) 3288 && !sop) 3289 set_value_range_to_nonnull (vr, type); 3290 else 3291 set_value_range_to_varying (vr); 3292 } 3293 3294 3295 /* Try to compute a useful range out of assignment STMT and store it 3296 in *VR. */ 3297 3298 static void 3299 extract_range_from_assignment (value_range_t *vr, gimple stmt) 3300 { 3301 enum tree_code code = gimple_assign_rhs_code (stmt); 3302 3303 if (code == ASSERT_EXPR) 3304 extract_range_from_assert (vr, gimple_assign_rhs1 (stmt)); 3305 else if (code == SSA_NAME) 3306 extract_range_from_ssa_name (vr, gimple_assign_rhs1 (stmt)); 3307 else if (TREE_CODE_CLASS (code) == tcc_binary) 3308 extract_range_from_binary_expr (vr, gimple_assign_rhs_code (stmt), 3309 gimple_expr_type (stmt), 3310 gimple_assign_rhs1 (stmt), 3311 gimple_assign_rhs2 (stmt)); 3312 else if (TREE_CODE_CLASS (code) == tcc_unary) 3313 extract_range_from_unary_expr (vr, gimple_assign_rhs_code (stmt), 3314 gimple_expr_type (stmt), 3315 gimple_assign_rhs1 (stmt)); 3316 else if (code == COND_EXPR) 3317 extract_range_from_cond_expr (vr, stmt); 3318 else if (TREE_CODE_CLASS (code) == tcc_comparison) 3319 extract_range_from_comparison (vr, gimple_assign_rhs_code (stmt), 3320 gimple_expr_type (stmt), 3321 gimple_assign_rhs1 (stmt), 3322 gimple_assign_rhs2 (stmt)); 3323 else if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS 3324 && is_gimple_min_invariant (gimple_assign_rhs1 (stmt))) 3325 set_value_range_to_value (vr, gimple_assign_rhs1 (stmt), NULL); 3326 else 3327 set_value_range_to_varying (vr); 3328 3329 if (vr->type == VR_VARYING) 3330 extract_range_basic (vr, stmt); 3331 } 3332 3333 /* Given a range VR, a LOOP and a variable VAR, determine whether it 3334 would be profitable to adjust VR using scalar evolution information 3335 for VAR. If so, update VR with the new limits. */ 3336 3337 static void 3338 adjust_range_with_scev (value_range_t *vr, struct loop *loop, 3339 gimple stmt, tree var) 3340 { 3341 tree init, step, chrec, tmin, tmax, min, max, type, tem; 3342 enum ev_direction dir; 3343 3344 /* TODO. Don't adjust anti-ranges. An anti-range may provide 3345 better opportunities than a regular range, but I'm not sure. */ 3346 if (vr->type == VR_ANTI_RANGE) 3347 return; 3348 3349 chrec = instantiate_parameters (loop, analyze_scalar_evolution (loop, var)); 3350 3351 /* Like in PR19590, scev can return a constant function. */ 3352 if (is_gimple_min_invariant (chrec)) 3353 { 3354 set_value_range_to_value (vr, chrec, vr->equiv); 3355 return; 3356 } 3357 3358 if (TREE_CODE (chrec) != POLYNOMIAL_CHREC) 3359 return; 3360 3361 init = initial_condition_in_loop_num (chrec, loop->num); 3362 tem = op_with_constant_singleton_value_range (init); 3363 if (tem) 3364 init = tem; 3365 step = evolution_part_in_loop_num (chrec, loop->num); 3366 tem = op_with_constant_singleton_value_range (step); 3367 if (tem) 3368 step = tem; 3369 3370 /* If STEP is symbolic, we can't know whether INIT will be the 3371 minimum or maximum value in the range. Also, unless INIT is 3372 a simple expression, compare_values and possibly other functions 3373 in tree-vrp won't be able to handle it. */ 3374 if (step == NULL_TREE 3375 || !is_gimple_min_invariant (step) 3376 || !valid_value_p (init)) 3377 return; 3378 3379 dir = scev_direction (chrec); 3380 if (/* Do not adjust ranges if we do not know whether the iv increases 3381 or decreases, ... */ 3382 dir == EV_DIR_UNKNOWN 3383 /* ... or if it may wrap. */ 3384 || scev_probably_wraps_p (init, step, stmt, get_chrec_loop (chrec), 3385 true)) 3386 return; 3387 3388 /* We use TYPE_MIN_VALUE and TYPE_MAX_VALUE here instead of 3389 negative_overflow_infinity and positive_overflow_infinity, 3390 because we have concluded that the loop probably does not 3391 wrap. */ 3392 3393 type = TREE_TYPE (var); 3394 if (POINTER_TYPE_P (type) || !TYPE_MIN_VALUE (type)) 3395 tmin = lower_bound_in_type (type, type); 3396 else 3397 tmin = TYPE_MIN_VALUE (type); 3398 if (POINTER_TYPE_P (type) || !TYPE_MAX_VALUE (type)) 3399 tmax = upper_bound_in_type (type, type); 3400 else 3401 tmax = TYPE_MAX_VALUE (type); 3402 3403 /* Try to use estimated number of iterations for the loop to constrain the 3404 final value in the evolution. */ 3405 if (TREE_CODE (step) == INTEGER_CST 3406 && is_gimple_val (init) 3407 && (TREE_CODE (init) != SSA_NAME 3408 || get_value_range (init)->type == VR_RANGE)) 3409 { 3410 double_int nit; 3411 3412 if (estimated_loop_iterations (loop, true, &nit)) 3413 { 3414 value_range_t maxvr = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL }; 3415 double_int dtmp; 3416 bool unsigned_p = TYPE_UNSIGNED (TREE_TYPE (step)); 3417 int overflow = 0; 3418 3419 dtmp = double_int_mul_with_sign (tree_to_double_int (step), nit, 3420 unsigned_p, &overflow); 3421 /* If the multiplication overflowed we can't do a meaningful 3422 adjustment. Likewise if the result doesn't fit in the type 3423 of the induction variable. For a signed type we have to 3424 check whether the result has the expected signedness which 3425 is that of the step as number of iterations is unsigned. */ 3426 if (!overflow 3427 && double_int_fits_to_tree_p (TREE_TYPE (init), dtmp) 3428 && (unsigned_p 3429 || ((dtmp.high ^ TREE_INT_CST_HIGH (step)) >= 0))) 3430 { 3431 tem = double_int_to_tree (TREE_TYPE (init), dtmp); 3432 extract_range_from_binary_expr (&maxvr, PLUS_EXPR, 3433 TREE_TYPE (init), init, tem); 3434 /* Likewise if the addition did. */ 3435 if (maxvr.type == VR_RANGE) 3436 { 3437 tmin = maxvr.min; 3438 tmax = maxvr.max; 3439 } 3440 } 3441 } 3442 } 3443 3444 if (vr->type == VR_VARYING || vr->type == VR_UNDEFINED) 3445 { 3446 min = tmin; 3447 max = tmax; 3448 3449 /* For VARYING or UNDEFINED ranges, just about anything we get 3450 from scalar evolutions should be better. */ 3451 3452 if (dir == EV_DIR_DECREASES) 3453 max = init; 3454 else 3455 min = init; 3456 3457 /* If we would create an invalid range, then just assume we 3458 know absolutely nothing. This may be over-conservative, 3459 but it's clearly safe, and should happen only in unreachable 3460 parts of code, or for invalid programs. */ 3461 if (compare_values (min, max) == 1) 3462 return; 3463 3464 set_value_range (vr, VR_RANGE, min, max, vr->equiv); 3465 } 3466 else if (vr->type == VR_RANGE) 3467 { 3468 min = vr->min; 3469 max = vr->max; 3470 3471 if (dir == EV_DIR_DECREASES) 3472 { 3473 /* INIT is the maximum value. If INIT is lower than VR->MAX 3474 but no smaller than VR->MIN, set VR->MAX to INIT. */ 3475 if (compare_values (init, max) == -1) 3476 max = init; 3477 3478 /* According to the loop information, the variable does not 3479 overflow. If we think it does, probably because of an 3480 overflow due to arithmetic on a different INF value, 3481 reset now. */ 3482 if (is_negative_overflow_infinity (min) 3483 || compare_values (min, tmin) == -1) 3484 min = tmin; 3485 3486 } 3487 else 3488 { 3489 /* If INIT is bigger than VR->MIN, set VR->MIN to INIT. */ 3490 if (compare_values (init, min) == 1) 3491 min = init; 3492 3493 if (is_positive_overflow_infinity (max) 3494 || compare_values (tmax, max) == -1) 3495 max = tmax; 3496 } 3497 3498 /* If we just created an invalid range with the minimum 3499 greater than the maximum, we fail conservatively. 3500 This should happen only in unreachable 3501 parts of code, or for invalid programs. */ 3502 if (compare_values (min, max) == 1) 3503 return; 3504 3505 set_value_range (vr, VR_RANGE, min, max, vr->equiv); 3506 } 3507 } 3508 3509 /* Return true if VAR may overflow at STMT. This checks any available 3510 loop information to see if we can determine that VAR does not 3511 overflow. */ 3512 3513 static bool 3514 vrp_var_may_overflow (tree var, gimple stmt) 3515 { 3516 struct loop *l; 3517 tree chrec, init, step; 3518 3519 if (current_loops == NULL) 3520 return true; 3521 3522 l = loop_containing_stmt (stmt); 3523 if (l == NULL 3524 || !loop_outer (l)) 3525 return true; 3526 3527 chrec = instantiate_parameters (l, analyze_scalar_evolution (l, var)); 3528 if (TREE_CODE (chrec) != POLYNOMIAL_CHREC) 3529 return true; 3530 3531 init = initial_condition_in_loop_num (chrec, l->num); 3532 step = evolution_part_in_loop_num (chrec, l->num); 3533 3534 if (step == NULL_TREE 3535 || !is_gimple_min_invariant (step) 3536 || !valid_value_p (init)) 3537 return true; 3538 3539 /* If we get here, we know something useful about VAR based on the 3540 loop information. If it wraps, it may overflow. */ 3541 3542 if (scev_probably_wraps_p (init, step, stmt, get_chrec_loop (chrec), 3543 true)) 3544 return true; 3545 3546 if (dump_file && (dump_flags & TDF_DETAILS) != 0) 3547 { 3548 print_generic_expr (dump_file, var, 0); 3549 fprintf (dump_file, ": loop information indicates does not overflow\n"); 3550 } 3551 3552 return false; 3553 } 3554 3555 3556 /* Given two numeric value ranges VR0, VR1 and a comparison code COMP: 3557 3558 - Return BOOLEAN_TRUE_NODE if VR0 COMP VR1 always returns true for 3559 all the values in the ranges. 3560 3561 - Return BOOLEAN_FALSE_NODE if the comparison always returns false. 3562 3563 - Return NULL_TREE if it is not always possible to determine the 3564 value of the comparison. 3565 3566 Also set *STRICT_OVERFLOW_P to indicate whether a range with an 3567 overflow infinity was used in the test. */ 3568 3569 3570 static tree 3571 compare_ranges (enum tree_code comp, value_range_t *vr0, value_range_t *vr1, 3572 bool *strict_overflow_p) 3573 { 3574 /* VARYING or UNDEFINED ranges cannot be compared. */ 3575 if (vr0->type == VR_VARYING 3576 || vr0->type == VR_UNDEFINED 3577 || vr1->type == VR_VARYING 3578 || vr1->type == VR_UNDEFINED) 3579 return NULL_TREE; 3580 3581 /* Anti-ranges need to be handled separately. */ 3582 if (vr0->type == VR_ANTI_RANGE || vr1->type == VR_ANTI_RANGE) 3583 { 3584 /* If both are anti-ranges, then we cannot compute any 3585 comparison. */ 3586 if (vr0->type == VR_ANTI_RANGE && vr1->type == VR_ANTI_RANGE) 3587 return NULL_TREE; 3588 3589 /* These comparisons are never statically computable. */ 3590 if (comp == GT_EXPR 3591 || comp == GE_EXPR 3592 || comp == LT_EXPR 3593 || comp == LE_EXPR) 3594 return NULL_TREE; 3595 3596 /* Equality can be computed only between a range and an 3597 anti-range. ~[VAL1, VAL2] == [VAL1, VAL2] is always false. */ 3598 if (vr0->type == VR_RANGE) 3599 { 3600 /* To simplify processing, make VR0 the anti-range. */ 3601 value_range_t *tmp = vr0; 3602 vr0 = vr1; 3603 vr1 = tmp; 3604 } 3605 3606 gcc_assert (comp == NE_EXPR || comp == EQ_EXPR); 3607 3608 if (compare_values_warnv (vr0->min, vr1->min, strict_overflow_p) == 0 3609 && compare_values_warnv (vr0->max, vr1->max, strict_overflow_p) == 0) 3610 return (comp == NE_EXPR) ? boolean_true_node : boolean_false_node; 3611 3612 return NULL_TREE; 3613 } 3614 3615 if (!usable_range_p (vr0, strict_overflow_p) 3616 || !usable_range_p (vr1, strict_overflow_p)) 3617 return NULL_TREE; 3618 3619 /* Simplify processing. If COMP is GT_EXPR or GE_EXPR, switch the 3620 operands around and change the comparison code. */ 3621 if (comp == GT_EXPR || comp == GE_EXPR) 3622 { 3623 value_range_t *tmp; 3624 comp = (comp == GT_EXPR) ? LT_EXPR : LE_EXPR; 3625 tmp = vr0; 3626 vr0 = vr1; 3627 vr1 = tmp; 3628 } 3629 3630 if (comp == EQ_EXPR) 3631 { 3632 /* Equality may only be computed if both ranges represent 3633 exactly one value. */ 3634 if (compare_values_warnv (vr0->min, vr0->max, strict_overflow_p) == 0 3635 && compare_values_warnv (vr1->min, vr1->max, strict_overflow_p) == 0) 3636 { 3637 int cmp_min = compare_values_warnv (vr0->min, vr1->min, 3638 strict_overflow_p); 3639 int cmp_max = compare_values_warnv (vr0->max, vr1->max, 3640 strict_overflow_p); 3641 if (cmp_min == 0 && cmp_max == 0) 3642 return boolean_true_node; 3643 else if (cmp_min != -2 && cmp_max != -2) 3644 return boolean_false_node; 3645 } 3646 /* If [V0_MIN, V1_MAX] < [V1_MIN, V1_MAX] then V0 != V1. */ 3647 else if (compare_values_warnv (vr0->min, vr1->max, 3648 strict_overflow_p) == 1 3649 || compare_values_warnv (vr1->min, vr0->max, 3650 strict_overflow_p) == 1) 3651 return boolean_false_node; 3652 3653 return NULL_TREE; 3654 } 3655 else if (comp == NE_EXPR) 3656 { 3657 int cmp1, cmp2; 3658 3659 /* If VR0 is completely to the left or completely to the right 3660 of VR1, they are always different. Notice that we need to 3661 make sure that both comparisons yield similar results to 3662 avoid comparing values that cannot be compared at 3663 compile-time. */ 3664 cmp1 = compare_values_warnv (vr0->max, vr1->min, strict_overflow_p); 3665 cmp2 = compare_values_warnv (vr0->min, vr1->max, strict_overflow_p); 3666 if ((cmp1 == -1 && cmp2 == -1) || (cmp1 == 1 && cmp2 == 1)) 3667 return boolean_true_node; 3668 3669 /* If VR0 and VR1 represent a single value and are identical, 3670 return false. */ 3671 else if (compare_values_warnv (vr0->min, vr0->max, 3672 strict_overflow_p) == 0 3673 && compare_values_warnv (vr1->min, vr1->max, 3674 strict_overflow_p) == 0 3675 && compare_values_warnv (vr0->min, vr1->min, 3676 strict_overflow_p) == 0 3677 && compare_values_warnv (vr0->max, vr1->max, 3678 strict_overflow_p) == 0) 3679 return boolean_false_node; 3680 3681 /* Otherwise, they may or may not be different. */ 3682 else 3683 return NULL_TREE; 3684 } 3685 else if (comp == LT_EXPR || comp == LE_EXPR) 3686 { 3687 int tst; 3688 3689 /* If VR0 is to the left of VR1, return true. */ 3690 tst = compare_values_warnv (vr0->max, vr1->min, strict_overflow_p); 3691 if ((comp == LT_EXPR && tst == -1) 3692 || (comp == LE_EXPR && (tst == -1 || tst == 0))) 3693 { 3694 if (overflow_infinity_range_p (vr0) 3695 || overflow_infinity_range_p (vr1)) 3696 *strict_overflow_p = true; 3697 return boolean_true_node; 3698 } 3699 3700 /* If VR0 is to the right of VR1, return false. */ 3701 tst = compare_values_warnv (vr0->min, vr1->max, strict_overflow_p); 3702 if ((comp == LT_EXPR && (tst == 0 || tst == 1)) 3703 || (comp == LE_EXPR && tst == 1)) 3704 { 3705 if (overflow_infinity_range_p (vr0) 3706 || overflow_infinity_range_p (vr1)) 3707 *strict_overflow_p = true; 3708 return boolean_false_node; 3709 } 3710 3711 /* Otherwise, we don't know. */ 3712 return NULL_TREE; 3713 } 3714 3715 gcc_unreachable (); 3716 } 3717 3718 3719 /* Given a value range VR, a value VAL and a comparison code COMP, return 3720 BOOLEAN_TRUE_NODE if VR COMP VAL always returns true for all the 3721 values in VR. Return BOOLEAN_FALSE_NODE if the comparison 3722 always returns false. Return NULL_TREE if it is not always 3723 possible to determine the value of the comparison. Also set 3724 *STRICT_OVERFLOW_P to indicate whether a range with an overflow 3725 infinity was used in the test. */ 3726 3727 static tree 3728 compare_range_with_value (enum tree_code comp, value_range_t *vr, tree val, 3729 bool *strict_overflow_p) 3730 { 3731 if (vr->type == VR_VARYING || vr->type == VR_UNDEFINED) 3732 return NULL_TREE; 3733 3734 /* Anti-ranges need to be handled separately. */ 3735 if (vr->type == VR_ANTI_RANGE) 3736 { 3737 /* For anti-ranges, the only predicates that we can compute at 3738 compile time are equality and inequality. */ 3739 if (comp == GT_EXPR 3740 || comp == GE_EXPR 3741 || comp == LT_EXPR 3742 || comp == LE_EXPR) 3743 return NULL_TREE; 3744 3745 /* ~[VAL_1, VAL_2] OP VAL is known if VAL_1 <= VAL <= VAL_2. */ 3746 if (value_inside_range (val, vr->min, vr->max) == 1) 3747 return (comp == NE_EXPR) ? boolean_true_node : boolean_false_node; 3748 3749 return NULL_TREE; 3750 } 3751 3752 if (!usable_range_p (vr, strict_overflow_p)) 3753 return NULL_TREE; 3754 3755 if (comp == EQ_EXPR) 3756 { 3757 /* EQ_EXPR may only be computed if VR represents exactly 3758 one value. */ 3759 if (compare_values_warnv (vr->min, vr->max, strict_overflow_p) == 0) 3760 { 3761 int cmp = compare_values_warnv (vr->min, val, strict_overflow_p); 3762 if (cmp == 0) 3763 return boolean_true_node; 3764 else if (cmp == -1 || cmp == 1 || cmp == 2) 3765 return boolean_false_node; 3766 } 3767 else if (compare_values_warnv (val, vr->min, strict_overflow_p) == -1 3768 || compare_values_warnv (vr->max, val, strict_overflow_p) == -1) 3769 return boolean_false_node; 3770 3771 return NULL_TREE; 3772 } 3773 else if (comp == NE_EXPR) 3774 { 3775 /* If VAL is not inside VR, then they are always different. */ 3776 if (compare_values_warnv (vr->max, val, strict_overflow_p) == -1 3777 || compare_values_warnv (vr->min, val, strict_overflow_p) == 1) 3778 return boolean_true_node; 3779 3780 /* If VR represents exactly one value equal to VAL, then return 3781 false. */ 3782 if (compare_values_warnv (vr->min, vr->max, strict_overflow_p) == 0 3783 && compare_values_warnv (vr->min, val, strict_overflow_p) == 0) 3784 return boolean_false_node; 3785 3786 /* Otherwise, they may or may not be different. */ 3787 return NULL_TREE; 3788 } 3789 else if (comp == LT_EXPR || comp == LE_EXPR) 3790 { 3791 int tst; 3792 3793 /* If VR is to the left of VAL, return true. */ 3794 tst = compare_values_warnv (vr->max, val, strict_overflow_p); 3795 if ((comp == LT_EXPR && tst == -1) 3796 || (comp == LE_EXPR && (tst == -1 || tst == 0))) 3797 { 3798 if (overflow_infinity_range_p (vr)) 3799 *strict_overflow_p = true; 3800 return boolean_true_node; 3801 } 3802 3803 /* If VR is to the right of VAL, return false. */ 3804 tst = compare_values_warnv (vr->min, val, strict_overflow_p); 3805 if ((comp == LT_EXPR && (tst == 0 || tst == 1)) 3806 || (comp == LE_EXPR && tst == 1)) 3807 { 3808 if (overflow_infinity_range_p (vr)) 3809 *strict_overflow_p = true; 3810 return boolean_false_node; 3811 } 3812 3813 /* Otherwise, we don't know. */ 3814 return NULL_TREE; 3815 } 3816 else if (comp == GT_EXPR || comp == GE_EXPR) 3817 { 3818 int tst; 3819 3820 /* If VR is to the right of VAL, return true. */ 3821 tst = compare_values_warnv (vr->min, val, strict_overflow_p); 3822 if ((comp == GT_EXPR && tst == 1) 3823 || (comp == GE_EXPR && (tst == 0 || tst == 1))) 3824 { 3825 if (overflow_infinity_range_p (vr)) 3826 *strict_overflow_p = true; 3827 return boolean_true_node; 3828 } 3829 3830 /* If VR is to the left of VAL, return false. */ 3831 tst = compare_values_warnv (vr->max, val, strict_overflow_p); 3832 if ((comp == GT_EXPR && (tst == -1 || tst == 0)) 3833 || (comp == GE_EXPR && tst == -1)) 3834 { 3835 if (overflow_infinity_range_p (vr)) 3836 *strict_overflow_p = true; 3837 return boolean_false_node; 3838 } 3839 3840 /* Otherwise, we don't know. */ 3841 return NULL_TREE; 3842 } 3843 3844 gcc_unreachable (); 3845 } 3846 3847 3848 /* Debugging dumps. */ 3849 3850 void dump_value_range (FILE *, value_range_t *); 3851 void debug_value_range (value_range_t *); 3852 void dump_all_value_ranges (FILE *); 3853 void debug_all_value_ranges (void); 3854 void dump_vr_equiv (FILE *, bitmap); 3855 void debug_vr_equiv (bitmap); 3856 3857 3858 /* Dump value range VR to FILE. */ 3859 3860 void 3861 dump_value_range (FILE *file, value_range_t *vr) 3862 { 3863 if (vr == NULL) 3864 fprintf (file, "[]"); 3865 else if (vr->type == VR_UNDEFINED) 3866 fprintf (file, "UNDEFINED"); 3867 else if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE) 3868 { 3869 tree type = TREE_TYPE (vr->min); 3870 3871 fprintf (file, "%s[", (vr->type == VR_ANTI_RANGE) ? "~" : ""); 3872 3873 if (is_negative_overflow_infinity (vr->min)) 3874 fprintf (file, "-INF(OVF)"); 3875 else if (INTEGRAL_TYPE_P (type) 3876 && !TYPE_UNSIGNED (type) 3877 && vrp_val_is_min (vr->min)) 3878 fprintf (file, "-INF"); 3879 else 3880 print_generic_expr (file, vr->min, 0); 3881 3882 fprintf (file, ", "); 3883 3884 if (is_positive_overflow_infinity (vr->max)) 3885 fprintf (file, "+INF(OVF)"); 3886 else if (INTEGRAL_TYPE_P (type) 3887 && vrp_val_is_max (vr->max)) 3888 fprintf (file, "+INF"); 3889 else 3890 print_generic_expr (file, vr->max, 0); 3891 3892 fprintf (file, "]"); 3893 3894 if (vr->equiv) 3895 { 3896 bitmap_iterator bi; 3897 unsigned i, c = 0; 3898 3899 fprintf (file, " EQUIVALENCES: { "); 3900 3901 EXECUTE_IF_SET_IN_BITMAP (vr->equiv, 0, i, bi) 3902 { 3903 print_generic_expr (file, ssa_name (i), 0); 3904 fprintf (file, " "); 3905 c++; 3906 } 3907 3908 fprintf (file, "} (%u elements)", c); 3909 } 3910 } 3911 else if (vr->type == VR_VARYING) 3912 fprintf (file, "VARYING"); 3913 else 3914 fprintf (file, "INVALID RANGE"); 3915 } 3916 3917 3918 /* Dump value range VR to stderr. */ 3919 3920 DEBUG_FUNCTION void 3921 debug_value_range (value_range_t *vr) 3922 { 3923 dump_value_range (stderr, vr); 3924 fprintf (stderr, "\n"); 3925 } 3926 3927 3928 /* Dump value ranges of all SSA_NAMEs to FILE. */ 3929 3930 void 3931 dump_all_value_ranges (FILE *file) 3932 { 3933 size_t i; 3934 3935 for (i = 0; i < num_vr_values; i++) 3936 { 3937 if (vr_value[i]) 3938 { 3939 print_generic_expr (file, ssa_name (i), 0); 3940 fprintf (file, ": "); 3941 dump_value_range (file, vr_value[i]); 3942 fprintf (file, "\n"); 3943 } 3944 } 3945 3946 fprintf (file, "\n"); 3947 } 3948 3949 3950 /* Dump all value ranges to stderr. */ 3951 3952 DEBUG_FUNCTION void 3953 debug_all_value_ranges (void) 3954 { 3955 dump_all_value_ranges (stderr); 3956 } 3957 3958 3959 /* Given a COND_EXPR COND of the form 'V OP W', and an SSA name V, 3960 create a new SSA name N and return the assertion assignment 3961 'V = ASSERT_EXPR <V, V OP W>'. */ 3962 3963 static gimple 3964 build_assert_expr_for (tree cond, tree v) 3965 { 3966 tree n; 3967 gimple assertion; 3968 3969 gcc_assert (TREE_CODE (v) == SSA_NAME); 3970 n = duplicate_ssa_name (v, NULL); 3971 3972 if (COMPARISON_CLASS_P (cond)) 3973 { 3974 tree a = build2 (ASSERT_EXPR, TREE_TYPE (v), v, cond); 3975 assertion = gimple_build_assign (n, a); 3976 } 3977 else if (TREE_CODE (cond) == SSA_NAME) 3978 { 3979 /* Given V, build the assignment N = true. */ 3980 gcc_assert (v == cond); 3981 assertion = gimple_build_assign (n, boolean_true_node); 3982 } 3983 else 3984 gcc_unreachable (); 3985 3986 SSA_NAME_DEF_STMT (n) = assertion; 3987 3988 /* The new ASSERT_EXPR, creates a new SSA name that replaces the 3989 operand of the ASSERT_EXPR. Register the new name and the old one 3990 in the replacement table so that we can fix the SSA web after 3991 adding all the ASSERT_EXPRs. */ 3992 register_new_name_mapping (n, v); 3993 3994 return assertion; 3995 } 3996 3997 3998 /* Return false if EXPR is a predicate expression involving floating 3999 point values. */ 4000 4001 static inline bool 4002 fp_predicate (gimple stmt) 4003 { 4004 GIMPLE_CHECK (stmt, GIMPLE_COND); 4005 4006 return FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt))); 4007 } 4008 4009 4010 /* If the range of values taken by OP can be inferred after STMT executes, 4011 return the comparison code (COMP_CODE_P) and value (VAL_P) that 4012 describes the inferred range. Return true if a range could be 4013 inferred. */ 4014 4015 static bool 4016 infer_value_range (gimple stmt, tree op, enum tree_code *comp_code_p, tree *val_p) 4017 { 4018 *val_p = NULL_TREE; 4019 *comp_code_p = ERROR_MARK; 4020 4021 /* Do not attempt to infer anything in names that flow through 4022 abnormal edges. */ 4023 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op)) 4024 return false; 4025 4026 /* Similarly, don't infer anything from statements that may throw 4027 exceptions. */ 4028 if (stmt_could_throw_p (stmt)) 4029 return false; 4030 4031 /* If STMT is the last statement of a basic block with no 4032 successors, there is no point inferring anything about any of its 4033 operands. We would not be able to find a proper insertion point 4034 for the assertion, anyway. */ 4035 if (stmt_ends_bb_p (stmt) && EDGE_COUNT (gimple_bb (stmt)->succs) == 0) 4036 return false; 4037 4038 /* We can only assume that a pointer dereference will yield 4039 non-NULL if -fdelete-null-pointer-checks is enabled. */ 4040 if (flag_delete_null_pointer_checks 4041 && POINTER_TYPE_P (TREE_TYPE (op)) 4042 && gimple_code (stmt) != GIMPLE_ASM) 4043 { 4044 unsigned num_uses, num_loads, num_stores; 4045 4046 count_uses_and_derefs (op, stmt, &num_uses, &num_loads, &num_stores); 4047 if (num_loads + num_stores > 0) 4048 { 4049 *val_p = build_int_cst (TREE_TYPE (op), 0); 4050 *comp_code_p = NE_EXPR; 4051 return true; 4052 } 4053 } 4054 4055 return false; 4056 } 4057 4058 4059 void dump_asserts_for (FILE *, tree); 4060 void debug_asserts_for (tree); 4061 void dump_all_asserts (FILE *); 4062 void debug_all_asserts (void); 4063 4064 /* Dump all the registered assertions for NAME to FILE. */ 4065 4066 void 4067 dump_asserts_for (FILE *file, tree name) 4068 { 4069 assert_locus_t loc; 4070 4071 fprintf (file, "Assertions to be inserted for "); 4072 print_generic_expr (file, name, 0); 4073 fprintf (file, "\n"); 4074 4075 loc = asserts_for[SSA_NAME_VERSION (name)]; 4076 while (loc) 4077 { 4078 fprintf (file, "\t"); 4079 print_gimple_stmt (file, gsi_stmt (loc->si), 0, 0); 4080 fprintf (file, "\n\tBB #%d", loc->bb->index); 4081 if (loc->e) 4082 { 4083 fprintf (file, "\n\tEDGE %d->%d", loc->e->src->index, 4084 loc->e->dest->index); 4085 dump_edge_info (file, loc->e, 0); 4086 } 4087 fprintf (file, "\n\tPREDICATE: "); 4088 print_generic_expr (file, name, 0); 4089 fprintf (file, " %s ", tree_code_name[(int)loc->comp_code]); 4090 print_generic_expr (file, loc->val, 0); 4091 fprintf (file, "\n\n"); 4092 loc = loc->next; 4093 } 4094 4095 fprintf (file, "\n"); 4096 } 4097 4098 4099 /* Dump all the registered assertions for NAME to stderr. */ 4100 4101 DEBUG_FUNCTION void 4102 debug_asserts_for (tree name) 4103 { 4104 dump_asserts_for (stderr, name); 4105 } 4106 4107 4108 /* Dump all the registered assertions for all the names to FILE. */ 4109 4110 void 4111 dump_all_asserts (FILE *file) 4112 { 4113 unsigned i; 4114 bitmap_iterator bi; 4115 4116 fprintf (file, "\nASSERT_EXPRs to be inserted\n\n"); 4117 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi) 4118 dump_asserts_for (file, ssa_name (i)); 4119 fprintf (file, "\n"); 4120 } 4121 4122 4123 /* Dump all the registered assertions for all the names to stderr. */ 4124 4125 DEBUG_FUNCTION void 4126 debug_all_asserts (void) 4127 { 4128 dump_all_asserts (stderr); 4129 } 4130 4131 4132 /* If NAME doesn't have an ASSERT_EXPR registered for asserting 4133 'EXPR COMP_CODE VAL' at a location that dominates block BB or 4134 E->DEST, then register this location as a possible insertion point 4135 for ASSERT_EXPR <NAME, EXPR COMP_CODE VAL>. 4136 4137 BB, E and SI provide the exact insertion point for the new 4138 ASSERT_EXPR. If BB is NULL, then the ASSERT_EXPR is to be inserted 4139 on edge E. Otherwise, if E is NULL, the ASSERT_EXPR is inserted on 4140 BB. If SI points to a COND_EXPR or a SWITCH_EXPR statement, then E 4141 must not be NULL. */ 4142 4143 static void 4144 register_new_assert_for (tree name, tree expr, 4145 enum tree_code comp_code, 4146 tree val, 4147 basic_block bb, 4148 edge e, 4149 gimple_stmt_iterator si) 4150 { 4151 assert_locus_t n, loc, last_loc; 4152 basic_block dest_bb; 4153 4154 gcc_checking_assert (bb == NULL || e == NULL); 4155 4156 if (e == NULL) 4157 gcc_checking_assert (gimple_code (gsi_stmt (si)) != GIMPLE_COND 4158 && gimple_code (gsi_stmt (si)) != GIMPLE_SWITCH); 4159 4160 /* Never build an assert comparing against an integer constant with 4161 TREE_OVERFLOW set. This confuses our undefined overflow warning 4162 machinery. */ 4163 if (TREE_CODE (val) == INTEGER_CST 4164 && TREE_OVERFLOW (val)) 4165 val = build_int_cst_wide (TREE_TYPE (val), 4166 TREE_INT_CST_LOW (val), TREE_INT_CST_HIGH (val)); 4167 4168 /* The new assertion A will be inserted at BB or E. We need to 4169 determine if the new location is dominated by a previously 4170 registered location for A. If we are doing an edge insertion, 4171 assume that A will be inserted at E->DEST. Note that this is not 4172 necessarily true. 4173 4174 If E is a critical edge, it will be split. But even if E is 4175 split, the new block will dominate the same set of blocks that 4176 E->DEST dominates. 4177 4178 The reverse, however, is not true, blocks dominated by E->DEST 4179 will not be dominated by the new block created to split E. So, 4180 if the insertion location is on a critical edge, we will not use 4181 the new location to move another assertion previously registered 4182 at a block dominated by E->DEST. */ 4183 dest_bb = (bb) ? bb : e->dest; 4184 4185 /* If NAME already has an ASSERT_EXPR registered for COMP_CODE and 4186 VAL at a block dominating DEST_BB, then we don't need to insert a new 4187 one. Similarly, if the same assertion already exists at a block 4188 dominated by DEST_BB and the new location is not on a critical 4189 edge, then update the existing location for the assertion (i.e., 4190 move the assertion up in the dominance tree). 4191 4192 Note, this is implemented as a simple linked list because there 4193 should not be more than a handful of assertions registered per 4194 name. If this becomes a performance problem, a table hashed by 4195 COMP_CODE and VAL could be implemented. */ 4196 loc = asserts_for[SSA_NAME_VERSION (name)]; 4197 last_loc = loc; 4198 while (loc) 4199 { 4200 if (loc->comp_code == comp_code 4201 && (loc->val == val 4202 || operand_equal_p (loc->val, val, 0)) 4203 && (loc->expr == expr 4204 || operand_equal_p (loc->expr, expr, 0))) 4205 { 4206 /* If the assertion NAME COMP_CODE VAL has already been 4207 registered at a basic block that dominates DEST_BB, then 4208 we don't need to insert the same assertion again. Note 4209 that we don't check strict dominance here to avoid 4210 replicating the same assertion inside the same basic 4211 block more than once (e.g., when a pointer is 4212 dereferenced several times inside a block). 4213 4214 An exception to this rule are edge insertions. If the 4215 new assertion is to be inserted on edge E, then it will 4216 dominate all the other insertions that we may want to 4217 insert in DEST_BB. So, if we are doing an edge 4218 insertion, don't do this dominance check. */ 4219 if (e == NULL 4220 && dominated_by_p (CDI_DOMINATORS, dest_bb, loc->bb)) 4221 return; 4222 4223 /* Otherwise, if E is not a critical edge and DEST_BB 4224 dominates the existing location for the assertion, move 4225 the assertion up in the dominance tree by updating its 4226 location information. */ 4227 if ((e == NULL || !EDGE_CRITICAL_P (e)) 4228 && dominated_by_p (CDI_DOMINATORS, loc->bb, dest_bb)) 4229 { 4230 loc->bb = dest_bb; 4231 loc->e = e; 4232 loc->si = si; 4233 return; 4234 } 4235 } 4236 4237 /* Update the last node of the list and move to the next one. */ 4238 last_loc = loc; 4239 loc = loc->next; 4240 } 4241 4242 /* If we didn't find an assertion already registered for 4243 NAME COMP_CODE VAL, add a new one at the end of the list of 4244 assertions associated with NAME. */ 4245 n = XNEW (struct assert_locus_d); 4246 n->bb = dest_bb; 4247 n->e = e; 4248 n->si = si; 4249 n->comp_code = comp_code; 4250 n->val = val; 4251 n->expr = expr; 4252 n->next = NULL; 4253 4254 if (last_loc) 4255 last_loc->next = n; 4256 else 4257 asserts_for[SSA_NAME_VERSION (name)] = n; 4258 4259 bitmap_set_bit (need_assert_for, SSA_NAME_VERSION (name)); 4260 } 4261 4262 /* (COND_OP0 COND_CODE COND_OP1) is a predicate which uses NAME. 4263 Extract a suitable test code and value and store them into *CODE_P and 4264 *VAL_P so the predicate is normalized to NAME *CODE_P *VAL_P. 4265 4266 If no extraction was possible, return FALSE, otherwise return TRUE. 4267 4268 If INVERT is true, then we invert the result stored into *CODE_P. */ 4269 4270 static bool 4271 extract_code_and_val_from_cond_with_ops (tree name, enum tree_code cond_code, 4272 tree cond_op0, tree cond_op1, 4273 bool invert, enum tree_code *code_p, 4274 tree *val_p) 4275 { 4276 enum tree_code comp_code; 4277 tree val; 4278 4279 /* Otherwise, we have a comparison of the form NAME COMP VAL 4280 or VAL COMP NAME. */ 4281 if (name == cond_op1) 4282 { 4283 /* If the predicate is of the form VAL COMP NAME, flip 4284 COMP around because we need to register NAME as the 4285 first operand in the predicate. */ 4286 comp_code = swap_tree_comparison (cond_code); 4287 val = cond_op0; 4288 } 4289 else 4290 { 4291 /* The comparison is of the form NAME COMP VAL, so the 4292 comparison code remains unchanged. */ 4293 comp_code = cond_code; 4294 val = cond_op1; 4295 } 4296 4297 /* Invert the comparison code as necessary. */ 4298 if (invert) 4299 comp_code = invert_tree_comparison (comp_code, 0); 4300 4301 /* VRP does not handle float types. */ 4302 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (val))) 4303 return false; 4304 4305 /* Do not register always-false predicates. 4306 FIXME: this works around a limitation in fold() when dealing with 4307 enumerations. Given 'enum { N1, N2 } x;', fold will not 4308 fold 'if (x > N2)' to 'if (0)'. */ 4309 if ((comp_code == GT_EXPR || comp_code == LT_EXPR) 4310 && INTEGRAL_TYPE_P (TREE_TYPE (val))) 4311 { 4312 tree min = TYPE_MIN_VALUE (TREE_TYPE (val)); 4313 tree max = TYPE_MAX_VALUE (TREE_TYPE (val)); 4314 4315 if (comp_code == GT_EXPR 4316 && (!max 4317 || compare_values (val, max) == 0)) 4318 return false; 4319 4320 if (comp_code == LT_EXPR 4321 && (!min 4322 || compare_values (val, min) == 0)) 4323 return false; 4324 } 4325 *code_p = comp_code; 4326 *val_p = val; 4327 return true; 4328 } 4329 4330 /* Try to register an edge assertion for SSA name NAME on edge E for 4331 the condition COND contributing to the conditional jump pointed to by BSI. 4332 Invert the condition COND if INVERT is true. 4333 Return true if an assertion for NAME could be registered. */ 4334 4335 static bool 4336 register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi, 4337 enum tree_code cond_code, 4338 tree cond_op0, tree cond_op1, bool invert) 4339 { 4340 tree val; 4341 enum tree_code comp_code; 4342 bool retval = false; 4343 4344 if (!extract_code_and_val_from_cond_with_ops (name, cond_code, 4345 cond_op0, 4346 cond_op1, 4347 invert, &comp_code, &val)) 4348 return false; 4349 4350 /* Only register an ASSERT_EXPR if NAME was found in the sub-graph 4351 reachable from E. */ 4352 if (live_on_edge (e, name) 4353 && !has_single_use (name)) 4354 { 4355 register_new_assert_for (name, name, comp_code, val, NULL, e, bsi); 4356 retval = true; 4357 } 4358 4359 /* In the case of NAME <= CST and NAME being defined as 4360 NAME = (unsigned) NAME2 + CST2 we can assert NAME2 >= -CST2 4361 and NAME2 <= CST - CST2. We can do the same for NAME > CST. 4362 This catches range and anti-range tests. */ 4363 if ((comp_code == LE_EXPR 4364 || comp_code == GT_EXPR) 4365 && TREE_CODE (val) == INTEGER_CST 4366 && TYPE_UNSIGNED (TREE_TYPE (val))) 4367 { 4368 gimple def_stmt = SSA_NAME_DEF_STMT (name); 4369 tree cst2 = NULL_TREE, name2 = NULL_TREE, name3 = NULL_TREE; 4370 4371 /* Extract CST2 from the (optional) addition. */ 4372 if (is_gimple_assign (def_stmt) 4373 && gimple_assign_rhs_code (def_stmt) == PLUS_EXPR) 4374 { 4375 name2 = gimple_assign_rhs1 (def_stmt); 4376 cst2 = gimple_assign_rhs2 (def_stmt); 4377 if (TREE_CODE (name2) == SSA_NAME 4378 && TREE_CODE (cst2) == INTEGER_CST) 4379 def_stmt = SSA_NAME_DEF_STMT (name2); 4380 } 4381 4382 /* Extract NAME2 from the (optional) sign-changing cast. */ 4383 if (gimple_assign_cast_p (def_stmt)) 4384 { 4385 if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt)) 4386 && ! TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (def_stmt))) 4387 && (TYPE_PRECISION (gimple_expr_type (def_stmt)) 4388 == TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt))))) 4389 name3 = gimple_assign_rhs1 (def_stmt); 4390 } 4391 4392 /* If name3 is used later, create an ASSERT_EXPR for it. */ 4393 if (name3 != NULL_TREE 4394 && TREE_CODE (name3) == SSA_NAME 4395 && (cst2 == NULL_TREE 4396 || TREE_CODE (cst2) == INTEGER_CST) 4397 && INTEGRAL_TYPE_P (TREE_TYPE (name3)) 4398 && live_on_edge (e, name3) 4399 && !has_single_use (name3)) 4400 { 4401 tree tmp; 4402 4403 /* Build an expression for the range test. */ 4404 tmp = build1 (NOP_EXPR, TREE_TYPE (name), name3); 4405 if (cst2 != NULL_TREE) 4406 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2); 4407 4408 if (dump_file) 4409 { 4410 fprintf (dump_file, "Adding assert for "); 4411 print_generic_expr (dump_file, name3, 0); 4412 fprintf (dump_file, " from "); 4413 print_generic_expr (dump_file, tmp, 0); 4414 fprintf (dump_file, "\n"); 4415 } 4416 4417 register_new_assert_for (name3, tmp, comp_code, val, NULL, e, bsi); 4418 4419 retval = true; 4420 } 4421 4422 /* If name2 is used later, create an ASSERT_EXPR for it. */ 4423 if (name2 != NULL_TREE 4424 && TREE_CODE (name2) == SSA_NAME 4425 && TREE_CODE (cst2) == INTEGER_CST 4426 && INTEGRAL_TYPE_P (TREE_TYPE (name2)) 4427 && live_on_edge (e, name2) 4428 && !has_single_use (name2)) 4429 { 4430 tree tmp; 4431 4432 /* Build an expression for the range test. */ 4433 tmp = name2; 4434 if (TREE_TYPE (name) != TREE_TYPE (name2)) 4435 tmp = build1 (NOP_EXPR, TREE_TYPE (name), tmp); 4436 if (cst2 != NULL_TREE) 4437 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2); 4438 4439 if (dump_file) 4440 { 4441 fprintf (dump_file, "Adding assert for "); 4442 print_generic_expr (dump_file, name2, 0); 4443 fprintf (dump_file, " from "); 4444 print_generic_expr (dump_file, tmp, 0); 4445 fprintf (dump_file, "\n"); 4446 } 4447 4448 register_new_assert_for (name2, tmp, comp_code, val, NULL, e, bsi); 4449 4450 retval = true; 4451 } 4452 } 4453 4454 return retval; 4455 } 4456 4457 /* OP is an operand of a truth value expression which is known to have 4458 a particular value. Register any asserts for OP and for any 4459 operands in OP's defining statement. 4460 4461 If CODE is EQ_EXPR, then we want to register OP is zero (false), 4462 if CODE is NE_EXPR, then we want to register OP is nonzero (true). */ 4463 4464 static bool 4465 register_edge_assert_for_1 (tree op, enum tree_code code, 4466 edge e, gimple_stmt_iterator bsi) 4467 { 4468 bool retval = false; 4469 gimple op_def; 4470 tree val; 4471 enum tree_code rhs_code; 4472 4473 /* We only care about SSA_NAMEs. */ 4474 if (TREE_CODE (op) != SSA_NAME) 4475 return false; 4476 4477 /* We know that OP will have a zero or nonzero value. If OP is used 4478 more than once go ahead and register an assert for OP. 4479 4480 The FOUND_IN_SUBGRAPH support is not helpful in this situation as 4481 it will always be set for OP (because OP is used in a COND_EXPR in 4482 the subgraph). */ 4483 if (!has_single_use (op)) 4484 { 4485 val = build_int_cst (TREE_TYPE (op), 0); 4486 register_new_assert_for (op, op, code, val, NULL, e, bsi); 4487 retval = true; 4488 } 4489 4490 /* Now look at how OP is set. If it's set from a comparison, 4491 a truth operation or some bit operations, then we may be able 4492 to register information about the operands of that assignment. */ 4493 op_def = SSA_NAME_DEF_STMT (op); 4494 if (gimple_code (op_def) != GIMPLE_ASSIGN) 4495 return retval; 4496 4497 rhs_code = gimple_assign_rhs_code (op_def); 4498 4499 if (TREE_CODE_CLASS (rhs_code) == tcc_comparison) 4500 { 4501 bool invert = (code == EQ_EXPR ? true : false); 4502 tree op0 = gimple_assign_rhs1 (op_def); 4503 tree op1 = gimple_assign_rhs2 (op_def); 4504 4505 if (TREE_CODE (op0) == SSA_NAME) 4506 retval |= register_edge_assert_for_2 (op0, e, bsi, rhs_code, op0, op1, 4507 invert); 4508 if (TREE_CODE (op1) == SSA_NAME) 4509 retval |= register_edge_assert_for_2 (op1, e, bsi, rhs_code, op0, op1, 4510 invert); 4511 } 4512 else if ((code == NE_EXPR 4513 && gimple_assign_rhs_code (op_def) == BIT_AND_EXPR) 4514 || (code == EQ_EXPR 4515 && gimple_assign_rhs_code (op_def) == BIT_IOR_EXPR)) 4516 { 4517 /* Recurse on each operand. */ 4518 retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), 4519 code, e, bsi); 4520 retval |= register_edge_assert_for_1 (gimple_assign_rhs2 (op_def), 4521 code, e, bsi); 4522 } 4523 else if (gimple_assign_rhs_code (op_def) == BIT_NOT_EXPR 4524 && TYPE_PRECISION (TREE_TYPE (gimple_assign_lhs (op_def))) == 1) 4525 { 4526 /* Recurse, flipping CODE. */ 4527 code = invert_tree_comparison (code, false); 4528 retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), 4529 code, e, bsi); 4530 } 4531 else if (gimple_assign_rhs_code (op_def) == SSA_NAME) 4532 { 4533 /* Recurse through the copy. */ 4534 retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), 4535 code, e, bsi); 4536 } 4537 else if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (op_def))) 4538 { 4539 /* Recurse through the type conversion. */ 4540 retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), 4541 code, e, bsi); 4542 } 4543 4544 return retval; 4545 } 4546 4547 /* Try to register an edge assertion for SSA name NAME on edge E for 4548 the condition COND contributing to the conditional jump pointed to by SI. 4549 Return true if an assertion for NAME could be registered. */ 4550 4551 static bool 4552 register_edge_assert_for (tree name, edge e, gimple_stmt_iterator si, 4553 enum tree_code cond_code, tree cond_op0, 4554 tree cond_op1) 4555 { 4556 tree val; 4557 enum tree_code comp_code; 4558 bool retval = false; 4559 bool is_else_edge = (e->flags & EDGE_FALSE_VALUE) != 0; 4560 4561 /* Do not attempt to infer anything in names that flow through 4562 abnormal edges. */ 4563 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name)) 4564 return false; 4565 4566 if (!extract_code_and_val_from_cond_with_ops (name, cond_code, 4567 cond_op0, cond_op1, 4568 is_else_edge, 4569 &comp_code, &val)) 4570 return false; 4571 4572 /* Register ASSERT_EXPRs for name. */ 4573 retval |= register_edge_assert_for_2 (name, e, si, cond_code, cond_op0, 4574 cond_op1, is_else_edge); 4575 4576 4577 /* If COND is effectively an equality test of an SSA_NAME against 4578 the value zero or one, then we may be able to assert values 4579 for SSA_NAMEs which flow into COND. */ 4580 4581 /* In the case of NAME == 1 or NAME != 0, for BIT_AND_EXPR defining 4582 statement of NAME we can assert both operands of the BIT_AND_EXPR 4583 have nonzero value. */ 4584 if (((comp_code == EQ_EXPR && integer_onep (val)) 4585 || (comp_code == NE_EXPR && integer_zerop (val)))) 4586 { 4587 gimple def_stmt = SSA_NAME_DEF_STMT (name); 4588 4589 if (is_gimple_assign (def_stmt) 4590 && gimple_assign_rhs_code (def_stmt) == BIT_AND_EXPR) 4591 { 4592 tree op0 = gimple_assign_rhs1 (def_stmt); 4593 tree op1 = gimple_assign_rhs2 (def_stmt); 4594 retval |= register_edge_assert_for_1 (op0, NE_EXPR, e, si); 4595 retval |= register_edge_assert_for_1 (op1, NE_EXPR, e, si); 4596 } 4597 } 4598 4599 /* In the case of NAME == 0 or NAME != 1, for BIT_IOR_EXPR defining 4600 statement of NAME we can assert both operands of the BIT_IOR_EXPR 4601 have zero value. */ 4602 if (((comp_code == EQ_EXPR && integer_zerop (val)) 4603 || (comp_code == NE_EXPR && integer_onep (val)))) 4604 { 4605 gimple def_stmt = SSA_NAME_DEF_STMT (name); 4606 4607 /* For BIT_IOR_EXPR only if NAME == 0 both operands have 4608 necessarily zero value, or if type-precision is one. */ 4609 if (is_gimple_assign (def_stmt) 4610 && (gimple_assign_rhs_code (def_stmt) == BIT_IOR_EXPR 4611 && (TYPE_PRECISION (TREE_TYPE (name)) == 1 4612 || comp_code == EQ_EXPR))) 4613 { 4614 tree op0 = gimple_assign_rhs1 (def_stmt); 4615 tree op1 = gimple_assign_rhs2 (def_stmt); 4616 retval |= register_edge_assert_for_1 (op0, EQ_EXPR, e, si); 4617 retval |= register_edge_assert_for_1 (op1, EQ_EXPR, e, si); 4618 } 4619 } 4620 4621 return retval; 4622 } 4623 4624 4625 /* Determine whether the outgoing edges of BB should receive an 4626 ASSERT_EXPR for each of the operands of BB's LAST statement. 4627 The last statement of BB must be a COND_EXPR. 4628 4629 If any of the sub-graphs rooted at BB have an interesting use of 4630 the predicate operands, an assert location node is added to the 4631 list of assertions for the corresponding operands. */ 4632 4633 static bool 4634 find_conditional_asserts (basic_block bb, gimple last) 4635 { 4636 bool need_assert; 4637 gimple_stmt_iterator bsi; 4638 tree op; 4639 edge_iterator ei; 4640 edge e; 4641 ssa_op_iter iter; 4642 4643 need_assert = false; 4644 bsi = gsi_for_stmt (last); 4645 4646 /* Look for uses of the operands in each of the sub-graphs 4647 rooted at BB. We need to check each of the outgoing edges 4648 separately, so that we know what kind of ASSERT_EXPR to 4649 insert. */ 4650 FOR_EACH_EDGE (e, ei, bb->succs) 4651 { 4652 if (e->dest == bb) 4653 continue; 4654 4655 /* Register the necessary assertions for each operand in the 4656 conditional predicate. */ 4657 FOR_EACH_SSA_TREE_OPERAND (op, last, iter, SSA_OP_USE) 4658 { 4659 need_assert |= register_edge_assert_for (op, e, bsi, 4660 gimple_cond_code (last), 4661 gimple_cond_lhs (last), 4662 gimple_cond_rhs (last)); 4663 } 4664 } 4665 4666 return need_assert; 4667 } 4668 4669 struct case_info 4670 { 4671 tree expr; 4672 basic_block bb; 4673 }; 4674 4675 /* Compare two case labels sorting first by the destination bb index 4676 and then by the case value. */ 4677 4678 static int 4679 compare_case_labels (const void *p1, const void *p2) 4680 { 4681 const struct case_info *ci1 = (const struct case_info *) p1; 4682 const struct case_info *ci2 = (const struct case_info *) p2; 4683 int idx1 = ci1->bb->index; 4684 int idx2 = ci2->bb->index; 4685 4686 if (idx1 < idx2) 4687 return -1; 4688 else if (idx1 == idx2) 4689 { 4690 /* Make sure the default label is first in a group. */ 4691 if (!CASE_LOW (ci1->expr)) 4692 return -1; 4693 else if (!CASE_LOW (ci2->expr)) 4694 return 1; 4695 else 4696 return tree_int_cst_compare (CASE_LOW (ci1->expr), 4697 CASE_LOW (ci2->expr)); 4698 } 4699 else 4700 return 1; 4701 } 4702 4703 /* Determine whether the outgoing edges of BB should receive an 4704 ASSERT_EXPR for each of the operands of BB's LAST statement. 4705 The last statement of BB must be a SWITCH_EXPR. 4706 4707 If any of the sub-graphs rooted at BB have an interesting use of 4708 the predicate operands, an assert location node is added to the 4709 list of assertions for the corresponding operands. */ 4710 4711 static bool 4712 find_switch_asserts (basic_block bb, gimple last) 4713 { 4714 bool need_assert; 4715 gimple_stmt_iterator bsi; 4716 tree op; 4717 edge e; 4718 struct case_info *ci; 4719 size_t n = gimple_switch_num_labels (last); 4720 #if GCC_VERSION >= 4000 4721 unsigned int idx; 4722 #else 4723 /* Work around GCC 3.4 bug (PR 37086). */ 4724 volatile unsigned int idx; 4725 #endif 4726 4727 need_assert = false; 4728 bsi = gsi_for_stmt (last); 4729 op = gimple_switch_index (last); 4730 if (TREE_CODE (op) != SSA_NAME) 4731 return false; 4732 4733 /* Build a vector of case labels sorted by destination label. */ 4734 ci = XNEWVEC (struct case_info, n); 4735 for (idx = 0; idx < n; ++idx) 4736 { 4737 ci[idx].expr = gimple_switch_label (last, idx); 4738 ci[idx].bb = label_to_block (CASE_LABEL (ci[idx].expr)); 4739 } 4740 qsort (ci, n, sizeof (struct case_info), compare_case_labels); 4741 4742 for (idx = 0; idx < n; ++idx) 4743 { 4744 tree min, max; 4745 tree cl = ci[idx].expr; 4746 basic_block cbb = ci[idx].bb; 4747 4748 min = CASE_LOW (cl); 4749 max = CASE_HIGH (cl); 4750 4751 /* If there are multiple case labels with the same destination 4752 we need to combine them to a single value range for the edge. */ 4753 if (idx + 1 < n && cbb == ci[idx + 1].bb) 4754 { 4755 /* Skip labels until the last of the group. */ 4756 do { 4757 ++idx; 4758 } while (idx < n && cbb == ci[idx].bb); 4759 --idx; 4760 4761 /* Pick up the maximum of the case label range. */ 4762 if (CASE_HIGH (ci[idx].expr)) 4763 max = CASE_HIGH (ci[idx].expr); 4764 else 4765 max = CASE_LOW (ci[idx].expr); 4766 } 4767 4768 /* Nothing to do if the range includes the default label until we 4769 can register anti-ranges. */ 4770 if (min == NULL_TREE) 4771 continue; 4772 4773 /* Find the edge to register the assert expr on. */ 4774 e = find_edge (bb, cbb); 4775 4776 /* Register the necessary assertions for the operand in the 4777 SWITCH_EXPR. */ 4778 need_assert |= register_edge_assert_for (op, e, bsi, 4779 max ? GE_EXPR : EQ_EXPR, 4780 op, 4781 fold_convert (TREE_TYPE (op), 4782 min)); 4783 if (max) 4784 { 4785 need_assert |= register_edge_assert_for (op, e, bsi, LE_EXPR, 4786 op, 4787 fold_convert (TREE_TYPE (op), 4788 max)); 4789 } 4790 } 4791 4792 XDELETEVEC (ci); 4793 return need_assert; 4794 } 4795 4796 4797 /* Traverse all the statements in block BB looking for statements that 4798 may generate useful assertions for the SSA names in their operand. 4799 If a statement produces a useful assertion A for name N_i, then the 4800 list of assertions already generated for N_i is scanned to 4801 determine if A is actually needed. 4802 4803 If N_i already had the assertion A at a location dominating the 4804 current location, then nothing needs to be done. Otherwise, the 4805 new location for A is recorded instead. 4806 4807 1- For every statement S in BB, all the variables used by S are 4808 added to bitmap FOUND_IN_SUBGRAPH. 4809 4810 2- If statement S uses an operand N in a way that exposes a known 4811 value range for N, then if N was not already generated by an 4812 ASSERT_EXPR, create a new assert location for N. For instance, 4813 if N is a pointer and the statement dereferences it, we can 4814 assume that N is not NULL. 4815 4816 3- COND_EXPRs are a special case of #2. We can derive range 4817 information from the predicate but need to insert different 4818 ASSERT_EXPRs for each of the sub-graphs rooted at the 4819 conditional block. If the last statement of BB is a conditional 4820 expression of the form 'X op Y', then 4821 4822 a) Remove X and Y from the set FOUND_IN_SUBGRAPH. 4823 4824 b) If the conditional is the only entry point to the sub-graph 4825 corresponding to the THEN_CLAUSE, recurse into it. On 4826 return, if X and/or Y are marked in FOUND_IN_SUBGRAPH, then 4827 an ASSERT_EXPR is added for the corresponding variable. 4828 4829 c) Repeat step (b) on the ELSE_CLAUSE. 4830 4831 d) Mark X and Y in FOUND_IN_SUBGRAPH. 4832 4833 For instance, 4834 4835 if (a == 9) 4836 b = a; 4837 else 4838 b = c + 1; 4839 4840 In this case, an assertion on the THEN clause is useful to 4841 determine that 'a' is always 9 on that edge. However, an assertion 4842 on the ELSE clause would be unnecessary. 4843 4844 4- If BB does not end in a conditional expression, then we recurse 4845 into BB's dominator children. 4846 4847 At the end of the recursive traversal, every SSA name will have a 4848 list of locations where ASSERT_EXPRs should be added. When a new 4849 location for name N is found, it is registered by calling 4850 register_new_assert_for. That function keeps track of all the 4851 registered assertions to prevent adding unnecessary assertions. 4852 For instance, if a pointer P_4 is dereferenced more than once in a 4853 dominator tree, only the location dominating all the dereference of 4854 P_4 will receive an ASSERT_EXPR. 4855 4856 If this function returns true, then it means that there are names 4857 for which we need to generate ASSERT_EXPRs. Those assertions are 4858 inserted by process_assert_insertions. */ 4859 4860 static bool 4861 find_assert_locations_1 (basic_block bb, sbitmap live) 4862 { 4863 gimple_stmt_iterator si; 4864 gimple last; 4865 gimple phi; 4866 bool need_assert; 4867 4868 need_assert = false; 4869 last = last_stmt (bb); 4870 4871 /* If BB's last statement is a conditional statement involving integer 4872 operands, determine if we need to add ASSERT_EXPRs. */ 4873 if (last 4874 && gimple_code (last) == GIMPLE_COND 4875 && !fp_predicate (last) 4876 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE)) 4877 need_assert |= find_conditional_asserts (bb, last); 4878 4879 /* If BB's last statement is a switch statement involving integer 4880 operands, determine if we need to add ASSERT_EXPRs. */ 4881 if (last 4882 && gimple_code (last) == GIMPLE_SWITCH 4883 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE)) 4884 need_assert |= find_switch_asserts (bb, last); 4885 4886 /* Traverse all the statements in BB marking used names and looking 4887 for statements that may infer assertions for their used operands. */ 4888 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si)) 4889 { 4890 gimple stmt; 4891 tree op; 4892 ssa_op_iter i; 4893 4894 stmt = gsi_stmt (si); 4895 4896 if (is_gimple_debug (stmt)) 4897 continue; 4898 4899 /* See if we can derive an assertion for any of STMT's operands. */ 4900 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE) 4901 { 4902 tree value; 4903 enum tree_code comp_code; 4904 4905 /* Mark OP in our live bitmap. */ 4906 SET_BIT (live, SSA_NAME_VERSION (op)); 4907 4908 /* If OP is used in such a way that we can infer a value 4909 range for it, and we don't find a previous assertion for 4910 it, create a new assertion location node for OP. */ 4911 if (infer_value_range (stmt, op, &comp_code, &value)) 4912 { 4913 /* If we are able to infer a nonzero value range for OP, 4914 then walk backwards through the use-def chain to see if OP 4915 was set via a typecast. 4916 4917 If so, then we can also infer a nonzero value range 4918 for the operand of the NOP_EXPR. */ 4919 if (comp_code == NE_EXPR && integer_zerop (value)) 4920 { 4921 tree t = op; 4922 gimple def_stmt = SSA_NAME_DEF_STMT (t); 4923 4924 while (is_gimple_assign (def_stmt) 4925 && gimple_assign_rhs_code (def_stmt) == NOP_EXPR 4926 && TREE_CODE 4927 (gimple_assign_rhs1 (def_stmt)) == SSA_NAME 4928 && POINTER_TYPE_P 4929 (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))) 4930 { 4931 t = gimple_assign_rhs1 (def_stmt); 4932 def_stmt = SSA_NAME_DEF_STMT (t); 4933 4934 /* Note we want to register the assert for the 4935 operand of the NOP_EXPR after SI, not after the 4936 conversion. */ 4937 if (! has_single_use (t)) 4938 { 4939 register_new_assert_for (t, t, comp_code, value, 4940 bb, NULL, si); 4941 need_assert = true; 4942 } 4943 } 4944 } 4945 4946 /* If OP is used only once, namely in this STMT, don't 4947 bother creating an ASSERT_EXPR for it. Such an 4948 ASSERT_EXPR would do nothing but increase compile time. */ 4949 if (!has_single_use (op)) 4950 { 4951 register_new_assert_for (op, op, comp_code, value, 4952 bb, NULL, si); 4953 need_assert = true; 4954 } 4955 } 4956 } 4957 } 4958 4959 /* Traverse all PHI nodes in BB marking used operands. */ 4960 for (si = gsi_start_phis (bb); !gsi_end_p(si); gsi_next (&si)) 4961 { 4962 use_operand_p arg_p; 4963 ssa_op_iter i; 4964 phi = gsi_stmt (si); 4965 4966 FOR_EACH_PHI_ARG (arg_p, phi, i, SSA_OP_USE) 4967 { 4968 tree arg = USE_FROM_PTR (arg_p); 4969 if (TREE_CODE (arg) == SSA_NAME) 4970 SET_BIT (live, SSA_NAME_VERSION (arg)); 4971 } 4972 } 4973 4974 return need_assert; 4975 } 4976 4977 /* Do an RPO walk over the function computing SSA name liveness 4978 on-the-fly and deciding on assert expressions to insert. 4979 Returns true if there are assert expressions to be inserted. */ 4980 4981 static bool 4982 find_assert_locations (void) 4983 { 4984 int *rpo = XCNEWVEC (int, last_basic_block + NUM_FIXED_BLOCKS); 4985 int *bb_rpo = XCNEWVEC (int, last_basic_block + NUM_FIXED_BLOCKS); 4986 int *last_rpo = XCNEWVEC (int, last_basic_block + NUM_FIXED_BLOCKS); 4987 int rpo_cnt, i; 4988 bool need_asserts; 4989 4990 live = XCNEWVEC (sbitmap, last_basic_block + NUM_FIXED_BLOCKS); 4991 rpo_cnt = pre_and_rev_post_order_compute (NULL, rpo, false); 4992 for (i = 0; i < rpo_cnt; ++i) 4993 bb_rpo[rpo[i]] = i; 4994 4995 need_asserts = false; 4996 for (i = rpo_cnt-1; i >= 0; --i) 4997 { 4998 basic_block bb = BASIC_BLOCK (rpo[i]); 4999 edge e; 5000 edge_iterator ei; 5001 5002 if (!live[rpo[i]]) 5003 { 5004 live[rpo[i]] = sbitmap_alloc (num_ssa_names); 5005 sbitmap_zero (live[rpo[i]]); 5006 } 5007 5008 /* Process BB and update the live information with uses in 5009 this block. */ 5010 need_asserts |= find_assert_locations_1 (bb, live[rpo[i]]); 5011 5012 /* Merge liveness into the predecessor blocks and free it. */ 5013 if (!sbitmap_empty_p (live[rpo[i]])) 5014 { 5015 int pred_rpo = i; 5016 FOR_EACH_EDGE (e, ei, bb->preds) 5017 { 5018 int pred = e->src->index; 5019 if (e->flags & EDGE_DFS_BACK) 5020 continue; 5021 5022 if (!live[pred]) 5023 { 5024 live[pred] = sbitmap_alloc (num_ssa_names); 5025 sbitmap_zero (live[pred]); 5026 } 5027 sbitmap_a_or_b (live[pred], live[pred], live[rpo[i]]); 5028 5029 if (bb_rpo[pred] < pred_rpo) 5030 pred_rpo = bb_rpo[pred]; 5031 } 5032 5033 /* Record the RPO number of the last visited block that needs 5034 live information from this block. */ 5035 last_rpo[rpo[i]] = pred_rpo; 5036 } 5037 else 5038 { 5039 sbitmap_free (live[rpo[i]]); 5040 live[rpo[i]] = NULL; 5041 } 5042 5043 /* We can free all successors live bitmaps if all their 5044 predecessors have been visited already. */ 5045 FOR_EACH_EDGE (e, ei, bb->succs) 5046 if (last_rpo[e->dest->index] == i 5047 && live[e->dest->index]) 5048 { 5049 sbitmap_free (live[e->dest->index]); 5050 live[e->dest->index] = NULL; 5051 } 5052 } 5053 5054 XDELETEVEC (rpo); 5055 XDELETEVEC (bb_rpo); 5056 XDELETEVEC (last_rpo); 5057 for (i = 0; i < last_basic_block + NUM_FIXED_BLOCKS; ++i) 5058 if (live[i]) 5059 sbitmap_free (live[i]); 5060 XDELETEVEC (live); 5061 5062 return need_asserts; 5063 } 5064 5065 /* Create an ASSERT_EXPR for NAME and insert it in the location 5066 indicated by LOC. Return true if we made any edge insertions. */ 5067 5068 static bool 5069 process_assert_insertions_for (tree name, assert_locus_t loc) 5070 { 5071 /* Build the comparison expression NAME_i COMP_CODE VAL. */ 5072 gimple stmt; 5073 tree cond; 5074 gimple assert_stmt; 5075 edge_iterator ei; 5076 edge e; 5077 5078 /* If we have X <=> X do not insert an assert expr for that. */ 5079 if (loc->expr == loc->val) 5080 return false; 5081 5082 cond = build2 (loc->comp_code, boolean_type_node, loc->expr, loc->val); 5083 assert_stmt = build_assert_expr_for (cond, name); 5084 if (loc->e) 5085 { 5086 /* We have been asked to insert the assertion on an edge. This 5087 is used only by COND_EXPR and SWITCH_EXPR assertions. */ 5088 gcc_checking_assert (gimple_code (gsi_stmt (loc->si)) == GIMPLE_COND 5089 || (gimple_code (gsi_stmt (loc->si)) 5090 == GIMPLE_SWITCH)); 5091 5092 gsi_insert_on_edge (loc->e, assert_stmt); 5093 return true; 5094 } 5095 5096 /* Otherwise, we can insert right after LOC->SI iff the 5097 statement must not be the last statement in the block. */ 5098 stmt = gsi_stmt (loc->si); 5099 if (!stmt_ends_bb_p (stmt)) 5100 { 5101 gsi_insert_after (&loc->si, assert_stmt, GSI_SAME_STMT); 5102 return false; 5103 } 5104 5105 /* If STMT must be the last statement in BB, we can only insert new 5106 assertions on the non-abnormal edge out of BB. Note that since 5107 STMT is not control flow, there may only be one non-abnormal edge 5108 out of BB. */ 5109 FOR_EACH_EDGE (e, ei, loc->bb->succs) 5110 if (!(e->flags & EDGE_ABNORMAL)) 5111 { 5112 gsi_insert_on_edge (e, assert_stmt); 5113 return true; 5114 } 5115 5116 gcc_unreachable (); 5117 } 5118 5119 5120 /* Process all the insertions registered for every name N_i registered 5121 in NEED_ASSERT_FOR. The list of assertions to be inserted are 5122 found in ASSERTS_FOR[i]. */ 5123 5124 static void 5125 process_assert_insertions (void) 5126 { 5127 unsigned i; 5128 bitmap_iterator bi; 5129 bool update_edges_p = false; 5130 int num_asserts = 0; 5131 5132 if (dump_file && (dump_flags & TDF_DETAILS)) 5133 dump_all_asserts (dump_file); 5134 5135 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi) 5136 { 5137 assert_locus_t loc = asserts_for[i]; 5138 gcc_assert (loc); 5139 5140 while (loc) 5141 { 5142 assert_locus_t next = loc->next; 5143 update_edges_p |= process_assert_insertions_for (ssa_name (i), loc); 5144 free (loc); 5145 loc = next; 5146 num_asserts++; 5147 } 5148 } 5149 5150 if (update_edges_p) 5151 gsi_commit_edge_inserts (); 5152 5153 statistics_counter_event (cfun, "Number of ASSERT_EXPR expressions inserted", 5154 num_asserts); 5155 } 5156 5157 5158 /* Traverse the flowgraph looking for conditional jumps to insert range 5159 expressions. These range expressions are meant to provide information 5160 to optimizations that need to reason in terms of value ranges. They 5161 will not be expanded into RTL. For instance, given: 5162 5163 x = ... 5164 y = ... 5165 if (x < y) 5166 y = x - 2; 5167 else 5168 x = y + 3; 5169 5170 this pass will transform the code into: 5171 5172 x = ... 5173 y = ... 5174 if (x < y) 5175 { 5176 x = ASSERT_EXPR <x, x < y> 5177 y = x - 2 5178 } 5179 else 5180 { 5181 y = ASSERT_EXPR <y, x <= y> 5182 x = y + 3 5183 } 5184 5185 The idea is that once copy and constant propagation have run, other 5186 optimizations will be able to determine what ranges of values can 'x' 5187 take in different paths of the code, simply by checking the reaching 5188 definition of 'x'. */ 5189 5190 static void 5191 insert_range_assertions (void) 5192 { 5193 need_assert_for = BITMAP_ALLOC (NULL); 5194 asserts_for = XCNEWVEC (assert_locus_t, num_ssa_names); 5195 5196 calculate_dominance_info (CDI_DOMINATORS); 5197 5198 if (find_assert_locations ()) 5199 { 5200 process_assert_insertions (); 5201 update_ssa (TODO_update_ssa_no_phi); 5202 } 5203 5204 if (dump_file && (dump_flags & TDF_DETAILS)) 5205 { 5206 fprintf (dump_file, "\nSSA form after inserting ASSERT_EXPRs\n"); 5207 dump_function_to_file (current_function_decl, dump_file, dump_flags); 5208 } 5209 5210 free (asserts_for); 5211 BITMAP_FREE (need_assert_for); 5212 } 5213 5214 /* Checks one ARRAY_REF in REF, located at LOCUS. Ignores flexible arrays 5215 and "struct" hacks. If VRP can determine that the 5216 array subscript is a constant, check if it is outside valid 5217 range. If the array subscript is a RANGE, warn if it is 5218 non-overlapping with valid range. 5219 IGNORE_OFF_BY_ONE is true if the ARRAY_REF is inside a ADDR_EXPR. */ 5220 5221 static void 5222 check_array_ref (location_t location, tree ref, bool ignore_off_by_one) 5223 { 5224 value_range_t* vr = NULL; 5225 tree low_sub, up_sub; 5226 tree low_bound, up_bound, up_bound_p1; 5227 tree base; 5228 5229 if (TREE_NO_WARNING (ref)) 5230 return; 5231 5232 low_sub = up_sub = TREE_OPERAND (ref, 1); 5233 up_bound = array_ref_up_bound (ref); 5234 5235 /* Can not check flexible arrays. */ 5236 if (!up_bound 5237 || TREE_CODE (up_bound) != INTEGER_CST) 5238 return; 5239 5240 /* Accesses to trailing arrays via pointers may access storage 5241 beyond the types array bounds. */ 5242 base = get_base_address (ref); 5243 if (base && TREE_CODE (base) == MEM_REF) 5244 { 5245 tree cref, next = NULL_TREE; 5246 5247 if (TREE_CODE (TREE_OPERAND (ref, 0)) != COMPONENT_REF) 5248 return; 5249 5250 cref = TREE_OPERAND (ref, 0); 5251 if (TREE_CODE (TREE_TYPE (TREE_OPERAND (cref, 0))) == RECORD_TYPE) 5252 for (next = DECL_CHAIN (TREE_OPERAND (cref, 1)); 5253 next && TREE_CODE (next) != FIELD_DECL; 5254 next = DECL_CHAIN (next)) 5255 ; 5256 5257 /* If this is the last field in a struct type or a field in a 5258 union type do not warn. */ 5259 if (!next) 5260 return; 5261 } 5262 5263 low_bound = array_ref_low_bound (ref); 5264 up_bound_p1 = int_const_binop (PLUS_EXPR, up_bound, integer_one_node); 5265 5266 if (TREE_CODE (low_sub) == SSA_NAME) 5267 { 5268 vr = get_value_range (low_sub); 5269 if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE) 5270 { 5271 low_sub = vr->type == VR_RANGE ? vr->max : vr->min; 5272 up_sub = vr->type == VR_RANGE ? vr->min : vr->max; 5273 } 5274 } 5275 5276 if (vr && vr->type == VR_ANTI_RANGE) 5277 { 5278 if (TREE_CODE (up_sub) == INTEGER_CST 5279 && tree_int_cst_lt (up_bound, up_sub) 5280 && TREE_CODE (low_sub) == INTEGER_CST 5281 && tree_int_cst_lt (low_sub, low_bound)) 5282 { 5283 warning_at (location, OPT_Warray_bounds, 5284 "array subscript is outside array bounds"); 5285 TREE_NO_WARNING (ref) = 1; 5286 } 5287 } 5288 else if (TREE_CODE (up_sub) == INTEGER_CST 5289 && (ignore_off_by_one 5290 ? (tree_int_cst_lt (up_bound, up_sub) 5291 && !tree_int_cst_equal (up_bound_p1, up_sub)) 5292 : (tree_int_cst_lt (up_bound, up_sub) 5293 || tree_int_cst_equal (up_bound_p1, up_sub)))) 5294 { 5295 warning_at (location, OPT_Warray_bounds, 5296 "array subscript is above array bounds"); 5297 TREE_NO_WARNING (ref) = 1; 5298 } 5299 else if (TREE_CODE (low_sub) == INTEGER_CST 5300 && tree_int_cst_lt (low_sub, low_bound)) 5301 { 5302 warning_at (location, OPT_Warray_bounds, 5303 "array subscript is below array bounds"); 5304 TREE_NO_WARNING (ref) = 1; 5305 } 5306 } 5307 5308 /* Searches if the expr T, located at LOCATION computes 5309 address of an ARRAY_REF, and call check_array_ref on it. */ 5310 5311 static void 5312 search_for_addr_array (tree t, location_t location) 5313 { 5314 while (TREE_CODE (t) == SSA_NAME) 5315 { 5316 gimple g = SSA_NAME_DEF_STMT (t); 5317 5318 if (gimple_code (g) != GIMPLE_ASSIGN) 5319 return; 5320 5321 if (get_gimple_rhs_class (gimple_assign_rhs_code (g)) 5322 != GIMPLE_SINGLE_RHS) 5323 return; 5324 5325 t = gimple_assign_rhs1 (g); 5326 } 5327 5328 5329 /* We are only interested in addresses of ARRAY_REF's. */ 5330 if (TREE_CODE (t) != ADDR_EXPR) 5331 return; 5332 5333 /* Check each ARRAY_REFs in the reference chain. */ 5334 do 5335 { 5336 if (TREE_CODE (t) == ARRAY_REF) 5337 check_array_ref (location, t, true /*ignore_off_by_one*/); 5338 5339 t = TREE_OPERAND (t, 0); 5340 } 5341 while (handled_component_p (t)); 5342 5343 if (TREE_CODE (t) == MEM_REF 5344 && TREE_CODE (TREE_OPERAND (t, 0)) == ADDR_EXPR 5345 && !TREE_NO_WARNING (t)) 5346 { 5347 tree tem = TREE_OPERAND (TREE_OPERAND (t, 0), 0); 5348 tree low_bound, up_bound, el_sz; 5349 double_int idx; 5350 if (TREE_CODE (TREE_TYPE (tem)) != ARRAY_TYPE 5351 || TREE_CODE (TREE_TYPE (TREE_TYPE (tem))) == ARRAY_TYPE 5352 || !TYPE_DOMAIN (TREE_TYPE (tem))) 5353 return; 5354 5355 low_bound = TYPE_MIN_VALUE (TYPE_DOMAIN (TREE_TYPE (tem))); 5356 up_bound = TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (tem))); 5357 el_sz = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (tem))); 5358 if (!low_bound 5359 || TREE_CODE (low_bound) != INTEGER_CST 5360 || !up_bound 5361 || TREE_CODE (up_bound) != INTEGER_CST 5362 || !el_sz 5363 || TREE_CODE (el_sz) != INTEGER_CST) 5364 return; 5365 5366 idx = mem_ref_offset (t); 5367 idx = double_int_sdiv (idx, tree_to_double_int (el_sz), TRUNC_DIV_EXPR); 5368 if (double_int_scmp (idx, double_int_zero) < 0) 5369 { 5370 warning_at (location, OPT_Warray_bounds, 5371 "array subscript is below array bounds"); 5372 TREE_NO_WARNING (t) = 1; 5373 } 5374 else if (double_int_scmp (idx, 5375 double_int_add 5376 (double_int_add 5377 (tree_to_double_int (up_bound), 5378 double_int_neg 5379 (tree_to_double_int (low_bound))), 5380 double_int_one)) > 0) 5381 { 5382 warning_at (location, OPT_Warray_bounds, 5383 "array subscript is above array bounds"); 5384 TREE_NO_WARNING (t) = 1; 5385 } 5386 } 5387 } 5388 5389 /* walk_tree() callback that checks if *TP is 5390 an ARRAY_REF inside an ADDR_EXPR (in which an array 5391 subscript one outside the valid range is allowed). Call 5392 check_array_ref for each ARRAY_REF found. The location is 5393 passed in DATA. */ 5394 5395 static tree 5396 check_array_bounds (tree *tp, int *walk_subtree, void *data) 5397 { 5398 tree t = *tp; 5399 struct walk_stmt_info *wi = (struct walk_stmt_info *) data; 5400 location_t location; 5401 5402 if (EXPR_HAS_LOCATION (t)) 5403 location = EXPR_LOCATION (t); 5404 else 5405 { 5406 location_t *locp = (location_t *) wi->info; 5407 location = *locp; 5408 } 5409 5410 *walk_subtree = TRUE; 5411 5412 if (TREE_CODE (t) == ARRAY_REF) 5413 check_array_ref (location, t, false /*ignore_off_by_one*/); 5414 5415 if (TREE_CODE (t) == MEM_REF 5416 || (TREE_CODE (t) == RETURN_EXPR && TREE_OPERAND (t, 0))) 5417 search_for_addr_array (TREE_OPERAND (t, 0), location); 5418 5419 if (TREE_CODE (t) == ADDR_EXPR) 5420 *walk_subtree = FALSE; 5421 5422 return NULL_TREE; 5423 } 5424 5425 /* Walk over all statements of all reachable BBs and call check_array_bounds 5426 on them. */ 5427 5428 static void 5429 check_all_array_refs (void) 5430 { 5431 basic_block bb; 5432 gimple_stmt_iterator si; 5433 5434 FOR_EACH_BB (bb) 5435 { 5436 edge_iterator ei; 5437 edge e; 5438 bool executable = false; 5439 5440 /* Skip blocks that were found to be unreachable. */ 5441 FOR_EACH_EDGE (e, ei, bb->preds) 5442 executable |= !!(e->flags & EDGE_EXECUTABLE); 5443 if (!executable) 5444 continue; 5445 5446 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si)) 5447 { 5448 gimple stmt = gsi_stmt (si); 5449 struct walk_stmt_info wi; 5450 if (!gimple_has_location (stmt)) 5451 continue; 5452 5453 if (is_gimple_call (stmt)) 5454 { 5455 size_t i; 5456 size_t n = gimple_call_num_args (stmt); 5457 for (i = 0; i < n; i++) 5458 { 5459 tree arg = gimple_call_arg (stmt, i); 5460 search_for_addr_array (arg, gimple_location (stmt)); 5461 } 5462 } 5463 else 5464 { 5465 memset (&wi, 0, sizeof (wi)); 5466 wi.info = CONST_CAST (void *, (const void *) 5467 gimple_location_ptr (stmt)); 5468 5469 walk_gimple_op (gsi_stmt (si), 5470 check_array_bounds, 5471 &wi); 5472 } 5473 } 5474 } 5475 } 5476 5477 /* Convert range assertion expressions into the implied copies and 5478 copy propagate away the copies. Doing the trivial copy propagation 5479 here avoids the need to run the full copy propagation pass after 5480 VRP. 5481 5482 FIXME, this will eventually lead to copy propagation removing the 5483 names that had useful range information attached to them. For 5484 instance, if we had the assertion N_i = ASSERT_EXPR <N_j, N_j > 3>, 5485 then N_i will have the range [3, +INF]. 5486 5487 However, by converting the assertion into the implied copy 5488 operation N_i = N_j, we will then copy-propagate N_j into the uses 5489 of N_i and lose the range information. We may want to hold on to 5490 ASSERT_EXPRs a little while longer as the ranges could be used in 5491 things like jump threading. 5492 5493 The problem with keeping ASSERT_EXPRs around is that passes after 5494 VRP need to handle them appropriately. 5495 5496 Another approach would be to make the range information a first 5497 class property of the SSA_NAME so that it can be queried from 5498 any pass. This is made somewhat more complex by the need for 5499 multiple ranges to be associated with one SSA_NAME. */ 5500 5501 static void 5502 remove_range_assertions (void) 5503 { 5504 basic_block bb; 5505 gimple_stmt_iterator si; 5506 5507 /* Note that the BSI iterator bump happens at the bottom of the 5508 loop and no bump is necessary if we're removing the statement 5509 referenced by the current BSI. */ 5510 FOR_EACH_BB (bb) 5511 for (si = gsi_start_bb (bb); !gsi_end_p (si);) 5512 { 5513 gimple stmt = gsi_stmt (si); 5514 gimple use_stmt; 5515 5516 if (is_gimple_assign (stmt) 5517 && gimple_assign_rhs_code (stmt) == ASSERT_EXPR) 5518 { 5519 tree rhs = gimple_assign_rhs1 (stmt); 5520 tree var; 5521 tree cond = fold (ASSERT_EXPR_COND (rhs)); 5522 use_operand_p use_p; 5523 imm_use_iterator iter; 5524 5525 gcc_assert (cond != boolean_false_node); 5526 5527 /* Propagate the RHS into every use of the LHS. */ 5528 var = ASSERT_EXPR_VAR (rhs); 5529 FOR_EACH_IMM_USE_STMT (use_stmt, iter, 5530 gimple_assign_lhs (stmt)) 5531 FOR_EACH_IMM_USE_ON_STMT (use_p, iter) 5532 { 5533 SET_USE (use_p, var); 5534 gcc_assert (TREE_CODE (var) == SSA_NAME); 5535 } 5536 5537 /* And finally, remove the copy, it is not needed. */ 5538 gsi_remove (&si, true); 5539 release_defs (stmt); 5540 } 5541 else 5542 gsi_next (&si); 5543 } 5544 } 5545 5546 5547 /* Return true if STMT is interesting for VRP. */ 5548 5549 static bool 5550 stmt_interesting_for_vrp (gimple stmt) 5551 { 5552 if (gimple_code (stmt) == GIMPLE_PHI 5553 && is_gimple_reg (gimple_phi_result (stmt)) 5554 && (INTEGRAL_TYPE_P (TREE_TYPE (gimple_phi_result (stmt))) 5555 || POINTER_TYPE_P (TREE_TYPE (gimple_phi_result (stmt))))) 5556 return true; 5557 else if (is_gimple_assign (stmt) || is_gimple_call (stmt)) 5558 { 5559 tree lhs = gimple_get_lhs (stmt); 5560 5561 /* In general, assignments with virtual operands are not useful 5562 for deriving ranges, with the obvious exception of calls to 5563 builtin functions. */ 5564 if (lhs && TREE_CODE (lhs) == SSA_NAME 5565 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs)) 5566 || POINTER_TYPE_P (TREE_TYPE (lhs))) 5567 && ((is_gimple_call (stmt) 5568 && gimple_call_fndecl (stmt) != NULL_TREE 5569 && DECL_BUILT_IN (gimple_call_fndecl (stmt))) 5570 || !gimple_vuse (stmt))) 5571 return true; 5572 } 5573 else if (gimple_code (stmt) == GIMPLE_COND 5574 || gimple_code (stmt) == GIMPLE_SWITCH) 5575 return true; 5576 5577 return false; 5578 } 5579 5580 5581 /* Initialize local data structures for VRP. */ 5582 5583 static void 5584 vrp_initialize (void) 5585 { 5586 basic_block bb; 5587 5588 values_propagated = false; 5589 num_vr_values = num_ssa_names; 5590 vr_value = XCNEWVEC (value_range_t *, num_vr_values); 5591 vr_phi_edge_counts = XCNEWVEC (int, num_ssa_names); 5592 5593 FOR_EACH_BB (bb) 5594 { 5595 gimple_stmt_iterator si; 5596 5597 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si)) 5598 { 5599 gimple phi = gsi_stmt (si); 5600 if (!stmt_interesting_for_vrp (phi)) 5601 { 5602 tree lhs = PHI_RESULT (phi); 5603 set_value_range_to_varying (get_value_range (lhs)); 5604 prop_set_simulate_again (phi, false); 5605 } 5606 else 5607 prop_set_simulate_again (phi, true); 5608 } 5609 5610 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si)) 5611 { 5612 gimple stmt = gsi_stmt (si); 5613 5614 /* If the statement is a control insn, then we do not 5615 want to avoid simulating the statement once. Failure 5616 to do so means that those edges will never get added. */ 5617 if (stmt_ends_bb_p (stmt)) 5618 prop_set_simulate_again (stmt, true); 5619 else if (!stmt_interesting_for_vrp (stmt)) 5620 { 5621 ssa_op_iter i; 5622 tree def; 5623 FOR_EACH_SSA_TREE_OPERAND (def, stmt, i, SSA_OP_DEF) 5624 set_value_range_to_varying (get_value_range (def)); 5625 prop_set_simulate_again (stmt, false); 5626 } 5627 else 5628 prop_set_simulate_again (stmt, true); 5629 } 5630 } 5631 } 5632 5633 /* Return the singleton value-range for NAME or NAME. */ 5634 5635 static inline tree 5636 vrp_valueize (tree name) 5637 { 5638 if (TREE_CODE (name) == SSA_NAME) 5639 { 5640 value_range_t *vr = get_value_range (name); 5641 if (vr->type == VR_RANGE 5642 && (vr->min == vr->max 5643 || operand_equal_p (vr->min, vr->max, 0))) 5644 return vr->min; 5645 } 5646 return name; 5647 } 5648 5649 /* Visit assignment STMT. If it produces an interesting range, record 5650 the SSA name in *OUTPUT_P. */ 5651 5652 static enum ssa_prop_result 5653 vrp_visit_assignment_or_call (gimple stmt, tree *output_p) 5654 { 5655 tree def, lhs; 5656 ssa_op_iter iter; 5657 enum gimple_code code = gimple_code (stmt); 5658 lhs = gimple_get_lhs (stmt); 5659 5660 /* We only keep track of ranges in integral and pointer types. */ 5661 if (TREE_CODE (lhs) == SSA_NAME 5662 && ((INTEGRAL_TYPE_P (TREE_TYPE (lhs)) 5663 /* It is valid to have NULL MIN/MAX values on a type. See 5664 build_range_type. */ 5665 && TYPE_MIN_VALUE (TREE_TYPE (lhs)) 5666 && TYPE_MAX_VALUE (TREE_TYPE (lhs))) 5667 || POINTER_TYPE_P (TREE_TYPE (lhs)))) 5668 { 5669 value_range_t new_vr = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL }; 5670 5671 /* Try folding the statement to a constant first. */ 5672 tree tem = gimple_fold_stmt_to_constant (stmt, vrp_valueize); 5673 if (tem && !is_overflow_infinity (tem)) 5674 set_value_range (&new_vr, VR_RANGE, tem, tem, NULL); 5675 /* Then dispatch to value-range extracting functions. */ 5676 else if (code == GIMPLE_CALL) 5677 extract_range_basic (&new_vr, stmt); 5678 else 5679 extract_range_from_assignment (&new_vr, stmt); 5680 5681 if (update_value_range (lhs, &new_vr)) 5682 { 5683 *output_p = lhs; 5684 5685 if (dump_file && (dump_flags & TDF_DETAILS)) 5686 { 5687 fprintf (dump_file, "Found new range for "); 5688 print_generic_expr (dump_file, lhs, 0); 5689 fprintf (dump_file, ": "); 5690 dump_value_range (dump_file, &new_vr); 5691 fprintf (dump_file, "\n\n"); 5692 } 5693 5694 if (new_vr.type == VR_VARYING) 5695 return SSA_PROP_VARYING; 5696 5697 return SSA_PROP_INTERESTING; 5698 } 5699 5700 return SSA_PROP_NOT_INTERESTING; 5701 } 5702 5703 /* Every other statement produces no useful ranges. */ 5704 FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_DEF) 5705 set_value_range_to_varying (get_value_range (def)); 5706 5707 return SSA_PROP_VARYING; 5708 } 5709 5710 /* Helper that gets the value range of the SSA_NAME with version I 5711 or a symbolic range containing the SSA_NAME only if the value range 5712 is varying or undefined. */ 5713 5714 static inline value_range_t 5715 get_vr_for_comparison (int i) 5716 { 5717 value_range_t vr = *get_value_range (ssa_name (i)); 5718 5719 /* If name N_i does not have a valid range, use N_i as its own 5720 range. This allows us to compare against names that may 5721 have N_i in their ranges. */ 5722 if (vr.type == VR_VARYING || vr.type == VR_UNDEFINED) 5723 { 5724 vr.type = VR_RANGE; 5725 vr.min = ssa_name (i); 5726 vr.max = ssa_name (i); 5727 } 5728 5729 return vr; 5730 } 5731 5732 /* Compare all the value ranges for names equivalent to VAR with VAL 5733 using comparison code COMP. Return the same value returned by 5734 compare_range_with_value, including the setting of 5735 *STRICT_OVERFLOW_P. */ 5736 5737 static tree 5738 compare_name_with_value (enum tree_code comp, tree var, tree val, 5739 bool *strict_overflow_p) 5740 { 5741 bitmap_iterator bi; 5742 unsigned i; 5743 bitmap e; 5744 tree retval, t; 5745 int used_strict_overflow; 5746 bool sop; 5747 value_range_t equiv_vr; 5748 5749 /* Get the set of equivalences for VAR. */ 5750 e = get_value_range (var)->equiv; 5751 5752 /* Start at -1. Set it to 0 if we do a comparison without relying 5753 on overflow, or 1 if all comparisons rely on overflow. */ 5754 used_strict_overflow = -1; 5755 5756 /* Compare vars' value range with val. */ 5757 equiv_vr = get_vr_for_comparison (SSA_NAME_VERSION (var)); 5758 sop = false; 5759 retval = compare_range_with_value (comp, &equiv_vr, val, &sop); 5760 if (retval) 5761 used_strict_overflow = sop ? 1 : 0; 5762 5763 /* If the equiv set is empty we have done all work we need to do. */ 5764 if (e == NULL) 5765 { 5766 if (retval 5767 && used_strict_overflow > 0) 5768 *strict_overflow_p = true; 5769 return retval; 5770 } 5771 5772 EXECUTE_IF_SET_IN_BITMAP (e, 0, i, bi) 5773 { 5774 equiv_vr = get_vr_for_comparison (i); 5775 sop = false; 5776 t = compare_range_with_value (comp, &equiv_vr, val, &sop); 5777 if (t) 5778 { 5779 /* If we get different answers from different members 5780 of the equivalence set this check must be in a dead 5781 code region. Folding it to a trap representation 5782 would be correct here. For now just return don't-know. */ 5783 if (retval != NULL 5784 && t != retval) 5785 { 5786 retval = NULL_TREE; 5787 break; 5788 } 5789 retval = t; 5790 5791 if (!sop) 5792 used_strict_overflow = 0; 5793 else if (used_strict_overflow < 0) 5794 used_strict_overflow = 1; 5795 } 5796 } 5797 5798 if (retval 5799 && used_strict_overflow > 0) 5800 *strict_overflow_p = true; 5801 5802 return retval; 5803 } 5804 5805 5806 /* Given a comparison code COMP and names N1 and N2, compare all the 5807 ranges equivalent to N1 against all the ranges equivalent to N2 5808 to determine the value of N1 COMP N2. Return the same value 5809 returned by compare_ranges. Set *STRICT_OVERFLOW_P to indicate 5810 whether we relied on an overflow infinity in the comparison. */ 5811 5812 5813 static tree 5814 compare_names (enum tree_code comp, tree n1, tree n2, 5815 bool *strict_overflow_p) 5816 { 5817 tree t, retval; 5818 bitmap e1, e2; 5819 bitmap_iterator bi1, bi2; 5820 unsigned i1, i2; 5821 int used_strict_overflow; 5822 static bitmap_obstack *s_obstack = NULL; 5823 static bitmap s_e1 = NULL, s_e2 = NULL; 5824 5825 /* Compare the ranges of every name equivalent to N1 against the 5826 ranges of every name equivalent to N2. */ 5827 e1 = get_value_range (n1)->equiv; 5828 e2 = get_value_range (n2)->equiv; 5829 5830 /* Use the fake bitmaps if e1 or e2 are not available. */ 5831 if (s_obstack == NULL) 5832 { 5833 s_obstack = XNEW (bitmap_obstack); 5834 bitmap_obstack_initialize (s_obstack); 5835 s_e1 = BITMAP_ALLOC (s_obstack); 5836 s_e2 = BITMAP_ALLOC (s_obstack); 5837 } 5838 if (e1 == NULL) 5839 e1 = s_e1; 5840 if (e2 == NULL) 5841 e2 = s_e2; 5842 5843 /* Add N1 and N2 to their own set of equivalences to avoid 5844 duplicating the body of the loop just to check N1 and N2 5845 ranges. */ 5846 bitmap_set_bit (e1, SSA_NAME_VERSION (n1)); 5847 bitmap_set_bit (e2, SSA_NAME_VERSION (n2)); 5848 5849 /* If the equivalence sets have a common intersection, then the two 5850 names can be compared without checking their ranges. */ 5851 if (bitmap_intersect_p (e1, e2)) 5852 { 5853 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1)); 5854 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2)); 5855 5856 return (comp == EQ_EXPR || comp == GE_EXPR || comp == LE_EXPR) 5857 ? boolean_true_node 5858 : boolean_false_node; 5859 } 5860 5861 /* Start at -1. Set it to 0 if we do a comparison without relying 5862 on overflow, or 1 if all comparisons rely on overflow. */ 5863 used_strict_overflow = -1; 5864 5865 /* Otherwise, compare all the equivalent ranges. First, add N1 and 5866 N2 to their own set of equivalences to avoid duplicating the body 5867 of the loop just to check N1 and N2 ranges. */ 5868 EXECUTE_IF_SET_IN_BITMAP (e1, 0, i1, bi1) 5869 { 5870 value_range_t vr1 = get_vr_for_comparison (i1); 5871 5872 t = retval = NULL_TREE; 5873 EXECUTE_IF_SET_IN_BITMAP (e2, 0, i2, bi2) 5874 { 5875 bool sop = false; 5876 5877 value_range_t vr2 = get_vr_for_comparison (i2); 5878 5879 t = compare_ranges (comp, &vr1, &vr2, &sop); 5880 if (t) 5881 { 5882 /* If we get different answers from different members 5883 of the equivalence set this check must be in a dead 5884 code region. Folding it to a trap representation 5885 would be correct here. For now just return don't-know. */ 5886 if (retval != NULL 5887 && t != retval) 5888 { 5889 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1)); 5890 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2)); 5891 return NULL_TREE; 5892 } 5893 retval = t; 5894 5895 if (!sop) 5896 used_strict_overflow = 0; 5897 else if (used_strict_overflow < 0) 5898 used_strict_overflow = 1; 5899 } 5900 } 5901 5902 if (retval) 5903 { 5904 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1)); 5905 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2)); 5906 if (used_strict_overflow > 0) 5907 *strict_overflow_p = true; 5908 return retval; 5909 } 5910 } 5911 5912 /* None of the equivalent ranges are useful in computing this 5913 comparison. */ 5914 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1)); 5915 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2)); 5916 return NULL_TREE; 5917 } 5918 5919 /* Helper function for vrp_evaluate_conditional_warnv. */ 5920 5921 static tree 5922 vrp_evaluate_conditional_warnv_with_ops_using_ranges (enum tree_code code, 5923 tree op0, tree op1, 5924 bool * strict_overflow_p) 5925 { 5926 value_range_t *vr0, *vr1; 5927 5928 vr0 = (TREE_CODE (op0) == SSA_NAME) ? get_value_range (op0) : NULL; 5929 vr1 = (TREE_CODE (op1) == SSA_NAME) ? get_value_range (op1) : NULL; 5930 5931 if (vr0 && vr1) 5932 return compare_ranges (code, vr0, vr1, strict_overflow_p); 5933 else if (vr0 && vr1 == NULL) 5934 return compare_range_with_value (code, vr0, op1, strict_overflow_p); 5935 else if (vr0 == NULL && vr1) 5936 return (compare_range_with_value 5937 (swap_tree_comparison (code), vr1, op0, strict_overflow_p)); 5938 return NULL; 5939 } 5940 5941 /* Helper function for vrp_evaluate_conditional_warnv. */ 5942 5943 static tree 5944 vrp_evaluate_conditional_warnv_with_ops (enum tree_code code, tree op0, 5945 tree op1, bool use_equiv_p, 5946 bool *strict_overflow_p, bool *only_ranges) 5947 { 5948 tree ret; 5949 if (only_ranges) 5950 *only_ranges = true; 5951 5952 /* We only deal with integral and pointer types. */ 5953 if (!INTEGRAL_TYPE_P (TREE_TYPE (op0)) 5954 && !POINTER_TYPE_P (TREE_TYPE (op0))) 5955 return NULL_TREE; 5956 5957 if (use_equiv_p) 5958 { 5959 if (only_ranges 5960 && (ret = vrp_evaluate_conditional_warnv_with_ops_using_ranges 5961 (code, op0, op1, strict_overflow_p))) 5962 return ret; 5963 *only_ranges = false; 5964 if (TREE_CODE (op0) == SSA_NAME && TREE_CODE (op1) == SSA_NAME) 5965 return compare_names (code, op0, op1, strict_overflow_p); 5966 else if (TREE_CODE (op0) == SSA_NAME) 5967 return compare_name_with_value (code, op0, op1, strict_overflow_p); 5968 else if (TREE_CODE (op1) == SSA_NAME) 5969 return (compare_name_with_value 5970 (swap_tree_comparison (code), op1, op0, strict_overflow_p)); 5971 } 5972 else 5973 return vrp_evaluate_conditional_warnv_with_ops_using_ranges (code, op0, op1, 5974 strict_overflow_p); 5975 return NULL_TREE; 5976 } 5977 5978 /* Given (CODE OP0 OP1) within STMT, try to simplify it based on value range 5979 information. Return NULL if the conditional can not be evaluated. 5980 The ranges of all the names equivalent with the operands in COND 5981 will be used when trying to compute the value. If the result is 5982 based on undefined signed overflow, issue a warning if 5983 appropriate. */ 5984 5985 static tree 5986 vrp_evaluate_conditional (enum tree_code code, tree op0, tree op1, gimple stmt) 5987 { 5988 bool sop; 5989 tree ret; 5990 bool only_ranges; 5991 5992 /* Some passes and foldings leak constants with overflow flag set 5993 into the IL. Avoid doing wrong things with these and bail out. */ 5994 if ((TREE_CODE (op0) == INTEGER_CST 5995 && TREE_OVERFLOW (op0)) 5996 || (TREE_CODE (op1) == INTEGER_CST 5997 && TREE_OVERFLOW (op1))) 5998 return NULL_TREE; 5999 6000 sop = false; 6001 ret = vrp_evaluate_conditional_warnv_with_ops (code, op0, op1, true, &sop, 6002 &only_ranges); 6003 6004 if (ret && sop) 6005 { 6006 enum warn_strict_overflow_code wc; 6007 const char* warnmsg; 6008 6009 if (is_gimple_min_invariant (ret)) 6010 { 6011 wc = WARN_STRICT_OVERFLOW_CONDITIONAL; 6012 warnmsg = G_("assuming signed overflow does not occur when " 6013 "simplifying conditional to constant"); 6014 } 6015 else 6016 { 6017 wc = WARN_STRICT_OVERFLOW_COMPARISON; 6018 warnmsg = G_("assuming signed overflow does not occur when " 6019 "simplifying conditional"); 6020 } 6021 6022 if (issue_strict_overflow_warning (wc)) 6023 { 6024 location_t location; 6025 6026 if (!gimple_has_location (stmt)) 6027 location = input_location; 6028 else 6029 location = gimple_location (stmt); 6030 warning_at (location, OPT_Wstrict_overflow, "%s", warnmsg); 6031 } 6032 } 6033 6034 if (warn_type_limits 6035 && ret && only_ranges 6036 && TREE_CODE_CLASS (code) == tcc_comparison 6037 && TREE_CODE (op0) == SSA_NAME) 6038 { 6039 /* If the comparison is being folded and the operand on the LHS 6040 is being compared against a constant value that is outside of 6041 the natural range of OP0's type, then the predicate will 6042 always fold regardless of the value of OP0. If -Wtype-limits 6043 was specified, emit a warning. */ 6044 tree type = TREE_TYPE (op0); 6045 value_range_t *vr0 = get_value_range (op0); 6046 6047 if (vr0->type != VR_VARYING 6048 && INTEGRAL_TYPE_P (type) 6049 && vrp_val_is_min (vr0->min) 6050 && vrp_val_is_max (vr0->max) 6051 && is_gimple_min_invariant (op1)) 6052 { 6053 location_t location; 6054 6055 if (!gimple_has_location (stmt)) 6056 location = input_location; 6057 else 6058 location = gimple_location (stmt); 6059 6060 warning_at (location, OPT_Wtype_limits, 6061 integer_zerop (ret) 6062 ? G_("comparison always false " 6063 "due to limited range of data type") 6064 : G_("comparison always true " 6065 "due to limited range of data type")); 6066 } 6067 } 6068 6069 return ret; 6070 } 6071 6072 6073 /* Visit conditional statement STMT. If we can determine which edge 6074 will be taken out of STMT's basic block, record it in 6075 *TAKEN_EDGE_P and return SSA_PROP_INTERESTING. Otherwise, return 6076 SSA_PROP_VARYING. */ 6077 6078 static enum ssa_prop_result 6079 vrp_visit_cond_stmt (gimple stmt, edge *taken_edge_p) 6080 { 6081 tree val; 6082 bool sop; 6083 6084 *taken_edge_p = NULL; 6085 6086 if (dump_file && (dump_flags & TDF_DETAILS)) 6087 { 6088 tree use; 6089 ssa_op_iter i; 6090 6091 fprintf (dump_file, "\nVisiting conditional with predicate: "); 6092 print_gimple_stmt (dump_file, stmt, 0, 0); 6093 fprintf (dump_file, "\nWith known ranges\n"); 6094 6095 FOR_EACH_SSA_TREE_OPERAND (use, stmt, i, SSA_OP_USE) 6096 { 6097 fprintf (dump_file, "\t"); 6098 print_generic_expr (dump_file, use, 0); 6099 fprintf (dump_file, ": "); 6100 dump_value_range (dump_file, vr_value[SSA_NAME_VERSION (use)]); 6101 } 6102 6103 fprintf (dump_file, "\n"); 6104 } 6105 6106 /* Compute the value of the predicate COND by checking the known 6107 ranges of each of its operands. 6108 6109 Note that we cannot evaluate all the equivalent ranges here 6110 because those ranges may not yet be final and with the current 6111 propagation strategy, we cannot determine when the value ranges 6112 of the names in the equivalence set have changed. 6113 6114 For instance, given the following code fragment 6115 6116 i_5 = PHI <8, i_13> 6117 ... 6118 i_14 = ASSERT_EXPR <i_5, i_5 != 0> 6119 if (i_14 == 1) 6120 ... 6121 6122 Assume that on the first visit to i_14, i_5 has the temporary 6123 range [8, 8] because the second argument to the PHI function is 6124 not yet executable. We derive the range ~[0, 0] for i_14 and the 6125 equivalence set { i_5 }. So, when we visit 'if (i_14 == 1)' for 6126 the first time, since i_14 is equivalent to the range [8, 8], we 6127 determine that the predicate is always false. 6128 6129 On the next round of propagation, i_13 is determined to be 6130 VARYING, which causes i_5 to drop down to VARYING. So, another 6131 visit to i_14 is scheduled. In this second visit, we compute the 6132 exact same range and equivalence set for i_14, namely ~[0, 0] and 6133 { i_5 }. But we did not have the previous range for i_5 6134 registered, so vrp_visit_assignment thinks that the range for 6135 i_14 has not changed. Therefore, the predicate 'if (i_14 == 1)' 6136 is not visited again, which stops propagation from visiting 6137 statements in the THEN clause of that if(). 6138 6139 To properly fix this we would need to keep the previous range 6140 value for the names in the equivalence set. This way we would've 6141 discovered that from one visit to the other i_5 changed from 6142 range [8, 8] to VR_VARYING. 6143 6144 However, fixing this apparent limitation may not be worth the 6145 additional checking. Testing on several code bases (GCC, DLV, 6146 MICO, TRAMP3D and SPEC2000) showed that doing this results in 6147 4 more predicates folded in SPEC. */ 6148 sop = false; 6149 6150 val = vrp_evaluate_conditional_warnv_with_ops (gimple_cond_code (stmt), 6151 gimple_cond_lhs (stmt), 6152 gimple_cond_rhs (stmt), 6153 false, &sop, NULL); 6154 if (val) 6155 { 6156 if (!sop) 6157 *taken_edge_p = find_taken_edge (gimple_bb (stmt), val); 6158 else 6159 { 6160 if (dump_file && (dump_flags & TDF_DETAILS)) 6161 fprintf (dump_file, 6162 "\nIgnoring predicate evaluation because " 6163 "it assumes that signed overflow is undefined"); 6164 val = NULL_TREE; 6165 } 6166 } 6167 6168 if (dump_file && (dump_flags & TDF_DETAILS)) 6169 { 6170 fprintf (dump_file, "\nPredicate evaluates to: "); 6171 if (val == NULL_TREE) 6172 fprintf (dump_file, "DON'T KNOW\n"); 6173 else 6174 print_generic_stmt (dump_file, val, 0); 6175 } 6176 6177 return (*taken_edge_p) ? SSA_PROP_INTERESTING : SSA_PROP_VARYING; 6178 } 6179 6180 /* Searches the case label vector VEC for the index *IDX of the CASE_LABEL 6181 that includes the value VAL. The search is restricted to the range 6182 [START_IDX, n - 1] where n is the size of VEC. 6183 6184 If there is a CASE_LABEL for VAL, its index is placed in IDX and true is 6185 returned. 6186 6187 If there is no CASE_LABEL for VAL and there is one that is larger than VAL, 6188 it is placed in IDX and false is returned. 6189 6190 If VAL is larger than any CASE_LABEL, n is placed on IDX and false is 6191 returned. */ 6192 6193 static bool 6194 find_case_label_index (gimple stmt, size_t start_idx, tree val, size_t *idx) 6195 { 6196 size_t n = gimple_switch_num_labels (stmt); 6197 size_t low, high; 6198 6199 /* Find case label for minimum of the value range or the next one. 6200 At each iteration we are searching in [low, high - 1]. */ 6201 6202 for (low = start_idx, high = n; high != low; ) 6203 { 6204 tree t; 6205 int cmp; 6206 /* Note that i != high, so we never ask for n. */ 6207 size_t i = (high + low) / 2; 6208 t = gimple_switch_label (stmt, i); 6209 6210 /* Cache the result of comparing CASE_LOW and val. */ 6211 cmp = tree_int_cst_compare (CASE_LOW (t), val); 6212 6213 if (cmp == 0) 6214 { 6215 /* Ranges cannot be empty. */ 6216 *idx = i; 6217 return true; 6218 } 6219 else if (cmp > 0) 6220 high = i; 6221 else 6222 { 6223 low = i + 1; 6224 if (CASE_HIGH (t) != NULL 6225 && tree_int_cst_compare (CASE_HIGH (t), val) >= 0) 6226 { 6227 *idx = i; 6228 return true; 6229 } 6230 } 6231 } 6232 6233 *idx = high; 6234 return false; 6235 } 6236 6237 /* Searches the case label vector VEC for the range of CASE_LABELs that is used 6238 for values between MIN and MAX. The first index is placed in MIN_IDX. The 6239 last index is placed in MAX_IDX. If the range of CASE_LABELs is empty 6240 then MAX_IDX < MIN_IDX. 6241 Returns true if the default label is not needed. */ 6242 6243 static bool 6244 find_case_label_range (gimple stmt, tree min, tree max, size_t *min_idx, 6245 size_t *max_idx) 6246 { 6247 size_t i, j; 6248 bool min_take_default = !find_case_label_index (stmt, 1, min, &i); 6249 bool max_take_default = !find_case_label_index (stmt, i, max, &j); 6250 6251 if (i == j 6252 && min_take_default 6253 && max_take_default) 6254 { 6255 /* Only the default case label reached. 6256 Return an empty range. */ 6257 *min_idx = 1; 6258 *max_idx = 0; 6259 return false; 6260 } 6261 else 6262 { 6263 bool take_default = min_take_default || max_take_default; 6264 tree low, high; 6265 size_t k; 6266 6267 if (max_take_default) 6268 j--; 6269 6270 /* If the case label range is continuous, we do not need 6271 the default case label. Verify that. */ 6272 high = CASE_LOW (gimple_switch_label (stmt, i)); 6273 if (CASE_HIGH (gimple_switch_label (stmt, i))) 6274 high = CASE_HIGH (gimple_switch_label (stmt, i)); 6275 for (k = i + 1; k <= j; ++k) 6276 { 6277 low = CASE_LOW (gimple_switch_label (stmt, k)); 6278 if (!integer_onep (int_const_binop (MINUS_EXPR, low, high))) 6279 { 6280 take_default = true; 6281 break; 6282 } 6283 high = low; 6284 if (CASE_HIGH (gimple_switch_label (stmt, k))) 6285 high = CASE_HIGH (gimple_switch_label (stmt, k)); 6286 } 6287 6288 *min_idx = i; 6289 *max_idx = j; 6290 return !take_default; 6291 } 6292 } 6293 6294 /* Visit switch statement STMT. If we can determine which edge 6295 will be taken out of STMT's basic block, record it in 6296 *TAKEN_EDGE_P and return SSA_PROP_INTERESTING. Otherwise, return 6297 SSA_PROP_VARYING. */ 6298 6299 static enum ssa_prop_result 6300 vrp_visit_switch_stmt (gimple stmt, edge *taken_edge_p) 6301 { 6302 tree op, val; 6303 value_range_t *vr; 6304 size_t i = 0, j = 0; 6305 bool take_default; 6306 6307 *taken_edge_p = NULL; 6308 op = gimple_switch_index (stmt); 6309 if (TREE_CODE (op) != SSA_NAME) 6310 return SSA_PROP_VARYING; 6311 6312 vr = get_value_range (op); 6313 if (dump_file && (dump_flags & TDF_DETAILS)) 6314 { 6315 fprintf (dump_file, "\nVisiting switch expression with operand "); 6316 print_generic_expr (dump_file, op, 0); 6317 fprintf (dump_file, " with known range "); 6318 dump_value_range (dump_file, vr); 6319 fprintf (dump_file, "\n"); 6320 } 6321 6322 if (vr->type != VR_RANGE 6323 || symbolic_range_p (vr)) 6324 return SSA_PROP_VARYING; 6325 6326 /* Find the single edge that is taken from the switch expression. */ 6327 take_default = !find_case_label_range (stmt, vr->min, vr->max, &i, &j); 6328 6329 /* Check if the range spans no CASE_LABEL. If so, we only reach the default 6330 label */ 6331 if (j < i) 6332 { 6333 gcc_assert (take_default); 6334 val = gimple_switch_default_label (stmt); 6335 } 6336 else 6337 { 6338 /* Check if labels with index i to j and maybe the default label 6339 are all reaching the same label. */ 6340 6341 val = gimple_switch_label (stmt, i); 6342 if (take_default 6343 && CASE_LABEL (gimple_switch_default_label (stmt)) 6344 != CASE_LABEL (val)) 6345 { 6346 if (dump_file && (dump_flags & TDF_DETAILS)) 6347 fprintf (dump_file, " not a single destination for this " 6348 "range\n"); 6349 return SSA_PROP_VARYING; 6350 } 6351 for (++i; i <= j; ++i) 6352 { 6353 if (CASE_LABEL (gimple_switch_label (stmt, i)) != CASE_LABEL (val)) 6354 { 6355 if (dump_file && (dump_flags & TDF_DETAILS)) 6356 fprintf (dump_file, " not a single destination for this " 6357 "range\n"); 6358 return SSA_PROP_VARYING; 6359 } 6360 } 6361 } 6362 6363 *taken_edge_p = find_edge (gimple_bb (stmt), 6364 label_to_block (CASE_LABEL (val))); 6365 6366 if (dump_file && (dump_flags & TDF_DETAILS)) 6367 { 6368 fprintf (dump_file, " will take edge to "); 6369 print_generic_stmt (dump_file, CASE_LABEL (val), 0); 6370 } 6371 6372 return SSA_PROP_INTERESTING; 6373 } 6374 6375 6376 /* Evaluate statement STMT. If the statement produces a useful range, 6377 return SSA_PROP_INTERESTING and record the SSA name with the 6378 interesting range into *OUTPUT_P. 6379 6380 If STMT is a conditional branch and we can determine its truth 6381 value, the taken edge is recorded in *TAKEN_EDGE_P. 6382 6383 If STMT produces a varying value, return SSA_PROP_VARYING. */ 6384 6385 static enum ssa_prop_result 6386 vrp_visit_stmt (gimple stmt, edge *taken_edge_p, tree *output_p) 6387 { 6388 tree def; 6389 ssa_op_iter iter; 6390 6391 if (dump_file && (dump_flags & TDF_DETAILS)) 6392 { 6393 fprintf (dump_file, "\nVisiting statement:\n"); 6394 print_gimple_stmt (dump_file, stmt, 0, dump_flags); 6395 fprintf (dump_file, "\n"); 6396 } 6397 6398 if (!stmt_interesting_for_vrp (stmt)) 6399 gcc_assert (stmt_ends_bb_p (stmt)); 6400 else if (is_gimple_assign (stmt) || is_gimple_call (stmt)) 6401 { 6402 /* In general, assignments with virtual operands are not useful 6403 for deriving ranges, with the obvious exception of calls to 6404 builtin functions. */ 6405 if ((is_gimple_call (stmt) 6406 && gimple_call_fndecl (stmt) != NULL_TREE 6407 && DECL_BUILT_IN (gimple_call_fndecl (stmt))) 6408 || !gimple_vuse (stmt)) 6409 return vrp_visit_assignment_or_call (stmt, output_p); 6410 } 6411 else if (gimple_code (stmt) == GIMPLE_COND) 6412 return vrp_visit_cond_stmt (stmt, taken_edge_p); 6413 else if (gimple_code (stmt) == GIMPLE_SWITCH) 6414 return vrp_visit_switch_stmt (stmt, taken_edge_p); 6415 6416 /* All other statements produce nothing of interest for VRP, so mark 6417 their outputs varying and prevent further simulation. */ 6418 FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_DEF) 6419 set_value_range_to_varying (get_value_range (def)); 6420 6421 return SSA_PROP_VARYING; 6422 } 6423 6424 6425 /* Meet operation for value ranges. Given two value ranges VR0 and 6426 VR1, store in VR0 a range that contains both VR0 and VR1. This 6427 may not be the smallest possible such range. */ 6428 6429 static void 6430 vrp_meet (value_range_t *vr0, value_range_t *vr1) 6431 { 6432 if (vr0->type == VR_UNDEFINED) 6433 { 6434 /* Drop equivalences. See PR53465. */ 6435 set_value_range (vr0, vr1->type, vr1->min, vr1->max, NULL); 6436 return; 6437 } 6438 6439 if (vr1->type == VR_UNDEFINED) 6440 { 6441 /* VR0 already has the resulting range, just drop equivalences. 6442 See PR53465. */ 6443 if (vr0->equiv) 6444 bitmap_clear (vr0->equiv); 6445 return; 6446 } 6447 6448 if (vr0->type == VR_VARYING) 6449 { 6450 /* Nothing to do. VR0 already has the resulting range. */ 6451 return; 6452 } 6453 6454 if (vr1->type == VR_VARYING) 6455 { 6456 set_value_range_to_varying (vr0); 6457 return; 6458 } 6459 6460 if (vr0->type == VR_RANGE && vr1->type == VR_RANGE) 6461 { 6462 int cmp; 6463 tree min, max; 6464 6465 /* Compute the convex hull of the ranges. The lower limit of 6466 the new range is the minimum of the two ranges. If they 6467 cannot be compared, then give up. */ 6468 cmp = compare_values (vr0->min, vr1->min); 6469 if (cmp == 0 || cmp == 1) 6470 min = vr1->min; 6471 else if (cmp == -1) 6472 min = vr0->min; 6473 else 6474 goto give_up; 6475 6476 /* Similarly, the upper limit of the new range is the maximum 6477 of the two ranges. If they cannot be compared, then 6478 give up. */ 6479 cmp = compare_values (vr0->max, vr1->max); 6480 if (cmp == 0 || cmp == -1) 6481 max = vr1->max; 6482 else if (cmp == 1) 6483 max = vr0->max; 6484 else 6485 goto give_up; 6486 6487 /* Check for useless ranges. */ 6488 if (INTEGRAL_TYPE_P (TREE_TYPE (min)) 6489 && ((vrp_val_is_min (min) || is_overflow_infinity (min)) 6490 && (vrp_val_is_max (max) || is_overflow_infinity (max)))) 6491 goto give_up; 6492 6493 /* The resulting set of equivalences is the intersection of 6494 the two sets. */ 6495 if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv) 6496 bitmap_and_into (vr0->equiv, vr1->equiv); 6497 else if (vr0->equiv && !vr1->equiv) 6498 bitmap_clear (vr0->equiv); 6499 6500 set_value_range (vr0, vr0->type, min, max, vr0->equiv); 6501 } 6502 else if (vr0->type == VR_ANTI_RANGE && vr1->type == VR_ANTI_RANGE) 6503 { 6504 /* Two anti-ranges meet only if their complements intersect. 6505 Only handle the case of identical ranges. */ 6506 if (compare_values (vr0->min, vr1->min) == 0 6507 && compare_values (vr0->max, vr1->max) == 0 6508 && compare_values (vr0->min, vr0->max) == 0) 6509 { 6510 /* The resulting set of equivalences is the intersection of 6511 the two sets. */ 6512 if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv) 6513 bitmap_and_into (vr0->equiv, vr1->equiv); 6514 else if (vr0->equiv && !vr1->equiv) 6515 bitmap_clear (vr0->equiv); 6516 } 6517 else 6518 goto give_up; 6519 } 6520 else if (vr0->type == VR_ANTI_RANGE || vr1->type == VR_ANTI_RANGE) 6521 { 6522 /* For a numeric range [VAL1, VAL2] and an anti-range ~[VAL3, VAL4], 6523 only handle the case where the ranges have an empty intersection. 6524 The result of the meet operation is the anti-range. */ 6525 if (!symbolic_range_p (vr0) 6526 && !symbolic_range_p (vr1) 6527 && !value_ranges_intersect_p (vr0, vr1)) 6528 { 6529 /* Copy most of VR1 into VR0. Don't copy VR1's equivalence 6530 set. We need to compute the intersection of the two 6531 equivalence sets. */ 6532 if (vr1->type == VR_ANTI_RANGE) 6533 set_value_range (vr0, vr1->type, vr1->min, vr1->max, vr0->equiv); 6534 6535 /* The resulting set of equivalences is the intersection of 6536 the two sets. */ 6537 if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv) 6538 bitmap_and_into (vr0->equiv, vr1->equiv); 6539 else if (vr0->equiv && !vr1->equiv) 6540 bitmap_clear (vr0->equiv); 6541 } 6542 else 6543 goto give_up; 6544 } 6545 else 6546 gcc_unreachable (); 6547 6548 return; 6549 6550 give_up: 6551 /* Failed to find an efficient meet. Before giving up and setting 6552 the result to VARYING, see if we can at least derive a useful 6553 anti-range. FIXME, all this nonsense about distinguishing 6554 anti-ranges from ranges is necessary because of the odd 6555 semantics of range_includes_zero_p and friends. */ 6556 if (!symbolic_range_p (vr0) 6557 && ((vr0->type == VR_RANGE 6558 && range_includes_zero_p (vr0->min, vr0->max) == 0) 6559 || (vr0->type == VR_ANTI_RANGE 6560 && range_includes_zero_p (vr0->min, vr0->max) == 1)) 6561 && !symbolic_range_p (vr1) 6562 && ((vr1->type == VR_RANGE 6563 && range_includes_zero_p (vr1->min, vr1->max) == 0) 6564 || (vr1->type == VR_ANTI_RANGE 6565 && range_includes_zero_p (vr1->min, vr1->max) == 1))) 6566 { 6567 set_value_range_to_nonnull (vr0, TREE_TYPE (vr0->min)); 6568 6569 /* Since this meet operation did not result from the meeting of 6570 two equivalent names, VR0 cannot have any equivalences. */ 6571 if (vr0->equiv) 6572 bitmap_clear (vr0->equiv); 6573 } 6574 else 6575 set_value_range_to_varying (vr0); 6576 } 6577 6578 6579 /* Visit all arguments for PHI node PHI that flow through executable 6580 edges. If a valid value range can be derived from all the incoming 6581 value ranges, set a new range for the LHS of PHI. */ 6582 6583 static enum ssa_prop_result 6584 vrp_visit_phi_node (gimple phi) 6585 { 6586 size_t i; 6587 tree lhs = PHI_RESULT (phi); 6588 value_range_t *lhs_vr = get_value_range (lhs); 6589 value_range_t vr_result = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL }; 6590 bool first = true; 6591 int edges, old_edges; 6592 struct loop *l; 6593 6594 if (dump_file && (dump_flags & TDF_DETAILS)) 6595 { 6596 fprintf (dump_file, "\nVisiting PHI node: "); 6597 print_gimple_stmt (dump_file, phi, 0, dump_flags); 6598 } 6599 6600 edges = 0; 6601 for (i = 0; i < gimple_phi_num_args (phi); i++) 6602 { 6603 edge e = gimple_phi_arg_edge (phi, i); 6604 6605 if (dump_file && (dump_flags & TDF_DETAILS)) 6606 { 6607 fprintf (dump_file, 6608 "\n Argument #%d (%d -> %d %sexecutable)\n", 6609 (int) i, e->src->index, e->dest->index, 6610 (e->flags & EDGE_EXECUTABLE) ? "" : "not "); 6611 } 6612 6613 if (e->flags & EDGE_EXECUTABLE) 6614 { 6615 tree arg = PHI_ARG_DEF (phi, i); 6616 value_range_t vr_arg; 6617 6618 ++edges; 6619 6620 if (TREE_CODE (arg) == SSA_NAME) 6621 { 6622 vr_arg = *(get_value_range (arg)); 6623 /* Do not allow equivalences or symbolic ranges to leak in from 6624 backedges. That creates invalid equivalencies. */ 6625 if (e->flags & EDGE_DFS_BACK 6626 && (vr_arg.type == VR_RANGE 6627 || vr_arg.type == VR_ANTI_RANGE)) 6628 { 6629 vr_arg.equiv = NULL; 6630 if (symbolic_range_p (&vr_arg)) 6631 { 6632 vr_arg.type = VR_VARYING; 6633 vr_arg.min = NULL_TREE; 6634 vr_arg.max = NULL_TREE; 6635 } 6636 } 6637 } 6638 else 6639 { 6640 if (is_overflow_infinity (arg)) 6641 { 6642 arg = copy_node (arg); 6643 TREE_OVERFLOW (arg) = 0; 6644 } 6645 6646 vr_arg.type = VR_RANGE; 6647 vr_arg.min = arg; 6648 vr_arg.max = arg; 6649 vr_arg.equiv = NULL; 6650 } 6651 6652 if (dump_file && (dump_flags & TDF_DETAILS)) 6653 { 6654 fprintf (dump_file, "\t"); 6655 print_generic_expr (dump_file, arg, dump_flags); 6656 fprintf (dump_file, "\n\tValue: "); 6657 dump_value_range (dump_file, &vr_arg); 6658 fprintf (dump_file, "\n"); 6659 } 6660 6661 if (first) 6662 copy_value_range (&vr_result, &vr_arg); 6663 else 6664 vrp_meet (&vr_result, &vr_arg); 6665 first = false; 6666 6667 if (vr_result.type == VR_VARYING) 6668 break; 6669 } 6670 } 6671 6672 if (vr_result.type == VR_VARYING) 6673 goto varying; 6674 else if (vr_result.type == VR_UNDEFINED) 6675 goto update_range; 6676 6677 old_edges = vr_phi_edge_counts[SSA_NAME_VERSION (lhs)]; 6678 vr_phi_edge_counts[SSA_NAME_VERSION (lhs)] = edges; 6679 6680 /* To prevent infinite iterations in the algorithm, derive ranges 6681 when the new value is slightly bigger or smaller than the 6682 previous one. We don't do this if we have seen a new executable 6683 edge; this helps us avoid an overflow infinity for conditionals 6684 which are not in a loop. */ 6685 if (edges > 0 6686 && gimple_phi_num_args (phi) > 1 6687 && edges == old_edges) 6688 { 6689 int cmp_min = compare_values (lhs_vr->min, vr_result.min); 6690 int cmp_max = compare_values (lhs_vr->max, vr_result.max); 6691 6692 /* For non VR_RANGE or for pointers fall back to varying if 6693 the range changed. */ 6694 if ((lhs_vr->type != VR_RANGE || vr_result.type != VR_RANGE 6695 || POINTER_TYPE_P (TREE_TYPE (lhs))) 6696 && (cmp_min != 0 || cmp_max != 0)) 6697 goto varying; 6698 6699 /* If the new minimum is smaller or larger than the previous 6700 one, go all the way to -INF. In the first case, to avoid 6701 iterating millions of times to reach -INF, and in the 6702 other case to avoid infinite bouncing between different 6703 minimums. */ 6704 if (cmp_min > 0 || cmp_min < 0) 6705 { 6706 if (!needs_overflow_infinity (TREE_TYPE (vr_result.min)) 6707 || !vrp_var_may_overflow (lhs, phi)) 6708 vr_result.min = TYPE_MIN_VALUE (TREE_TYPE (vr_result.min)); 6709 else if (supports_overflow_infinity (TREE_TYPE (vr_result.min))) 6710 vr_result.min = 6711 negative_overflow_infinity (TREE_TYPE (vr_result.min)); 6712 } 6713 6714 /* Similarly, if the new maximum is smaller or larger than 6715 the previous one, go all the way to +INF. */ 6716 if (cmp_max < 0 || cmp_max > 0) 6717 { 6718 if (!needs_overflow_infinity (TREE_TYPE (vr_result.max)) 6719 || !vrp_var_may_overflow (lhs, phi)) 6720 vr_result.max = TYPE_MAX_VALUE (TREE_TYPE (vr_result.max)); 6721 else if (supports_overflow_infinity (TREE_TYPE (vr_result.max))) 6722 vr_result.max = 6723 positive_overflow_infinity (TREE_TYPE (vr_result.max)); 6724 } 6725 6726 /* If we dropped either bound to +-INF then if this is a loop 6727 PHI node SCEV may known more about its value-range. */ 6728 if ((cmp_min > 0 || cmp_min < 0 6729 || cmp_max < 0 || cmp_max > 0) 6730 && current_loops 6731 && (l = loop_containing_stmt (phi)) 6732 && l->header == gimple_bb (phi)) 6733 adjust_range_with_scev (&vr_result, l, phi, lhs); 6734 6735 /* If we will end up with a (-INF, +INF) range, set it to 6736 VARYING. Same if the previous max value was invalid for 6737 the type and we end up with vr_result.min > vr_result.max. */ 6738 if ((vrp_val_is_max (vr_result.max) 6739 && vrp_val_is_min (vr_result.min)) 6740 || compare_values (vr_result.min, 6741 vr_result.max) > 0) 6742 goto varying; 6743 } 6744 6745 /* If the new range is different than the previous value, keep 6746 iterating. */ 6747 update_range: 6748 if (update_value_range (lhs, &vr_result)) 6749 { 6750 if (dump_file && (dump_flags & TDF_DETAILS)) 6751 { 6752 fprintf (dump_file, "Found new range for "); 6753 print_generic_expr (dump_file, lhs, 0); 6754 fprintf (dump_file, ": "); 6755 dump_value_range (dump_file, &vr_result); 6756 fprintf (dump_file, "\n\n"); 6757 } 6758 6759 return SSA_PROP_INTERESTING; 6760 } 6761 6762 /* Nothing changed, don't add outgoing edges. */ 6763 return SSA_PROP_NOT_INTERESTING; 6764 6765 /* No match found. Set the LHS to VARYING. */ 6766 varying: 6767 set_value_range_to_varying (lhs_vr); 6768 return SSA_PROP_VARYING; 6769 } 6770 6771 /* Simplify boolean operations if the source is known 6772 to be already a boolean. */ 6773 static bool 6774 simplify_truth_ops_using_ranges (gimple_stmt_iterator *gsi, gimple stmt) 6775 { 6776 enum tree_code rhs_code = gimple_assign_rhs_code (stmt); 6777 tree lhs, op0, op1; 6778 bool need_conversion; 6779 6780 /* We handle only !=/== case here. */ 6781 gcc_assert (rhs_code == EQ_EXPR || rhs_code == NE_EXPR); 6782 6783 op0 = gimple_assign_rhs1 (stmt); 6784 if (!op_with_boolean_value_range_p (op0)) 6785 return false; 6786 6787 op1 = gimple_assign_rhs2 (stmt); 6788 if (!op_with_boolean_value_range_p (op1)) 6789 return false; 6790 6791 /* Reduce number of cases to handle to NE_EXPR. As there is no 6792 BIT_XNOR_EXPR we cannot replace A == B with a single statement. */ 6793 if (rhs_code == EQ_EXPR) 6794 { 6795 if (TREE_CODE (op1) == INTEGER_CST) 6796 op1 = int_const_binop (BIT_XOR_EXPR, op1, integer_one_node); 6797 else 6798 return false; 6799 } 6800 6801 lhs = gimple_assign_lhs (stmt); 6802 need_conversion 6803 = !useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (op0)); 6804 6805 /* Make sure to not sign-extend a 1-bit 1 when converting the result. */ 6806 if (need_conversion 6807 && !TYPE_UNSIGNED (TREE_TYPE (op0)) 6808 && TYPE_PRECISION (TREE_TYPE (op0)) == 1 6809 && TYPE_PRECISION (TREE_TYPE (lhs)) > 1) 6810 return false; 6811 6812 /* For A != 0 we can substitute A itself. */ 6813 if (integer_zerop (op1)) 6814 gimple_assign_set_rhs_with_ops (gsi, 6815 need_conversion 6816 ? NOP_EXPR : TREE_CODE (op0), 6817 op0, NULL_TREE); 6818 /* For A != B we substitute A ^ B. Either with conversion. */ 6819 else if (need_conversion) 6820 { 6821 gimple newop; 6822 tree tem = create_tmp_reg (TREE_TYPE (op0), NULL); 6823 newop = gimple_build_assign_with_ops (BIT_XOR_EXPR, tem, op0, op1); 6824 tem = make_ssa_name (tem, newop); 6825 gimple_assign_set_lhs (newop, tem); 6826 gsi_insert_before (gsi, newop, GSI_SAME_STMT); 6827 update_stmt (newop); 6828 gimple_assign_set_rhs_with_ops (gsi, NOP_EXPR, tem, NULL_TREE); 6829 } 6830 /* Or without. */ 6831 else 6832 gimple_assign_set_rhs_with_ops (gsi, BIT_XOR_EXPR, op0, op1); 6833 update_stmt (gsi_stmt (*gsi)); 6834 6835 return true; 6836 } 6837 6838 /* Simplify a division or modulo operator to a right shift or 6839 bitwise and if the first operand is unsigned or is greater 6840 than zero and the second operand is an exact power of two. */ 6841 6842 static bool 6843 simplify_div_or_mod_using_ranges (gimple stmt) 6844 { 6845 enum tree_code rhs_code = gimple_assign_rhs_code (stmt); 6846 tree val = NULL; 6847 tree op0 = gimple_assign_rhs1 (stmt); 6848 tree op1 = gimple_assign_rhs2 (stmt); 6849 value_range_t *vr = get_value_range (gimple_assign_rhs1 (stmt)); 6850 6851 if (TYPE_UNSIGNED (TREE_TYPE (op0))) 6852 { 6853 val = integer_one_node; 6854 } 6855 else 6856 { 6857 bool sop = false; 6858 6859 val = compare_range_with_value (GE_EXPR, vr, integer_zero_node, &sop); 6860 6861 if (val 6862 && sop 6863 && integer_onep (val) 6864 && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC)) 6865 { 6866 location_t location; 6867 6868 if (!gimple_has_location (stmt)) 6869 location = input_location; 6870 else 6871 location = gimple_location (stmt); 6872 warning_at (location, OPT_Wstrict_overflow, 6873 "assuming signed overflow does not occur when " 6874 "simplifying %</%> or %<%%%> to %<>>%> or %<&%>"); 6875 } 6876 } 6877 6878 if (val && integer_onep (val)) 6879 { 6880 tree t; 6881 6882 if (rhs_code == TRUNC_DIV_EXPR) 6883 { 6884 t = build_int_cst (integer_type_node, tree_log2 (op1)); 6885 gimple_assign_set_rhs_code (stmt, RSHIFT_EXPR); 6886 gimple_assign_set_rhs1 (stmt, op0); 6887 gimple_assign_set_rhs2 (stmt, t); 6888 } 6889 else 6890 { 6891 t = build_int_cst (TREE_TYPE (op1), 1); 6892 t = int_const_binop (MINUS_EXPR, op1, t); 6893 t = fold_convert (TREE_TYPE (op0), t); 6894 6895 gimple_assign_set_rhs_code (stmt, BIT_AND_EXPR); 6896 gimple_assign_set_rhs1 (stmt, op0); 6897 gimple_assign_set_rhs2 (stmt, t); 6898 } 6899 6900 update_stmt (stmt); 6901 return true; 6902 } 6903 6904 return false; 6905 } 6906 6907 /* If the operand to an ABS_EXPR is >= 0, then eliminate the 6908 ABS_EXPR. If the operand is <= 0, then simplify the 6909 ABS_EXPR into a NEGATE_EXPR. */ 6910 6911 static bool 6912 simplify_abs_using_ranges (gimple stmt) 6913 { 6914 tree val = NULL; 6915 tree op = gimple_assign_rhs1 (stmt); 6916 tree type = TREE_TYPE (op); 6917 value_range_t *vr = get_value_range (op); 6918 6919 if (TYPE_UNSIGNED (type)) 6920 { 6921 val = integer_zero_node; 6922 } 6923 else if (vr) 6924 { 6925 bool sop = false; 6926 6927 val = compare_range_with_value (LE_EXPR, vr, integer_zero_node, &sop); 6928 if (!val) 6929 { 6930 sop = false; 6931 val = compare_range_with_value (GE_EXPR, vr, integer_zero_node, 6932 &sop); 6933 6934 if (val) 6935 { 6936 if (integer_zerop (val)) 6937 val = integer_one_node; 6938 else if (integer_onep (val)) 6939 val = integer_zero_node; 6940 } 6941 } 6942 6943 if (val 6944 && (integer_onep (val) || integer_zerop (val))) 6945 { 6946 if (sop && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC)) 6947 { 6948 location_t location; 6949 6950 if (!gimple_has_location (stmt)) 6951 location = input_location; 6952 else 6953 location = gimple_location (stmt); 6954 warning_at (location, OPT_Wstrict_overflow, 6955 "assuming signed overflow does not occur when " 6956 "simplifying %<abs (X)%> to %<X%> or %<-X%>"); 6957 } 6958 6959 gimple_assign_set_rhs1 (stmt, op); 6960 if (integer_onep (val)) 6961 gimple_assign_set_rhs_code (stmt, NEGATE_EXPR); 6962 else 6963 gimple_assign_set_rhs_code (stmt, SSA_NAME); 6964 update_stmt (stmt); 6965 return true; 6966 } 6967 } 6968 6969 return false; 6970 } 6971 6972 /* Optimize away redundant BIT_AND_EXPR and BIT_IOR_EXPR. 6973 If all the bits that are being cleared by & are already 6974 known to be zero from VR, or all the bits that are being 6975 set by | are already known to be one from VR, the bit 6976 operation is redundant. */ 6977 6978 static bool 6979 simplify_bit_ops_using_ranges (gimple_stmt_iterator *gsi, gimple stmt) 6980 { 6981 tree op0 = gimple_assign_rhs1 (stmt); 6982 tree op1 = gimple_assign_rhs2 (stmt); 6983 tree op = NULL_TREE; 6984 value_range_t vr0 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL }; 6985 value_range_t vr1 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL }; 6986 double_int may_be_nonzero0, may_be_nonzero1; 6987 double_int must_be_nonzero0, must_be_nonzero1; 6988 double_int mask; 6989 6990 if (TREE_CODE (op0) == SSA_NAME) 6991 vr0 = *(get_value_range (op0)); 6992 else if (is_gimple_min_invariant (op0)) 6993 set_value_range_to_value (&vr0, op0, NULL); 6994 else 6995 return false; 6996 6997 if (TREE_CODE (op1) == SSA_NAME) 6998 vr1 = *(get_value_range (op1)); 6999 else if (is_gimple_min_invariant (op1)) 7000 set_value_range_to_value (&vr1, op1, NULL); 7001 else 7002 return false; 7003 7004 if (!zero_nonzero_bits_from_vr (&vr0, &may_be_nonzero0, &must_be_nonzero0)) 7005 return false; 7006 if (!zero_nonzero_bits_from_vr (&vr1, &may_be_nonzero1, &must_be_nonzero1)) 7007 return false; 7008 7009 switch (gimple_assign_rhs_code (stmt)) 7010 { 7011 case BIT_AND_EXPR: 7012 mask = double_int_and_not (may_be_nonzero0, must_be_nonzero1); 7013 if (double_int_zero_p (mask)) 7014 { 7015 op = op0; 7016 break; 7017 } 7018 mask = double_int_and_not (may_be_nonzero1, must_be_nonzero0); 7019 if (double_int_zero_p (mask)) 7020 { 7021 op = op1; 7022 break; 7023 } 7024 break; 7025 case BIT_IOR_EXPR: 7026 mask = double_int_and_not (may_be_nonzero0, must_be_nonzero1); 7027 if (double_int_zero_p (mask)) 7028 { 7029 op = op1; 7030 break; 7031 } 7032 mask = double_int_and_not (may_be_nonzero1, must_be_nonzero0); 7033 if (double_int_zero_p (mask)) 7034 { 7035 op = op0; 7036 break; 7037 } 7038 break; 7039 default: 7040 gcc_unreachable (); 7041 } 7042 7043 if (op == NULL_TREE) 7044 return false; 7045 7046 gimple_assign_set_rhs_with_ops (gsi, TREE_CODE (op), op, NULL); 7047 update_stmt (gsi_stmt (*gsi)); 7048 return true; 7049 } 7050 7051 /* We are comparing trees OP0 and OP1 using COND_CODE. OP0 has 7052 a known value range VR. 7053 7054 If there is one and only one value which will satisfy the 7055 conditional, then return that value. Else return NULL. */ 7056 7057 static tree 7058 test_for_singularity (enum tree_code cond_code, tree op0, 7059 tree op1, value_range_t *vr) 7060 { 7061 tree min = NULL; 7062 tree max = NULL; 7063 7064 /* Extract minimum/maximum values which satisfy the 7065 the conditional as it was written. */ 7066 if (cond_code == LE_EXPR || cond_code == LT_EXPR) 7067 { 7068 /* This should not be negative infinity; there is no overflow 7069 here. */ 7070 min = TYPE_MIN_VALUE (TREE_TYPE (op0)); 7071 7072 max = op1; 7073 if (cond_code == LT_EXPR && !is_overflow_infinity (max)) 7074 { 7075 tree one = build_int_cst (TREE_TYPE (op0), 1); 7076 max = fold_build2 (MINUS_EXPR, TREE_TYPE (op0), max, one); 7077 if (EXPR_P (max)) 7078 TREE_NO_WARNING (max) = 1; 7079 } 7080 } 7081 else if (cond_code == GE_EXPR || cond_code == GT_EXPR) 7082 { 7083 /* This should not be positive infinity; there is no overflow 7084 here. */ 7085 max = TYPE_MAX_VALUE (TREE_TYPE (op0)); 7086 7087 min = op1; 7088 if (cond_code == GT_EXPR && !is_overflow_infinity (min)) 7089 { 7090 tree one = build_int_cst (TREE_TYPE (op0), 1); 7091 min = fold_build2 (PLUS_EXPR, TREE_TYPE (op0), min, one); 7092 if (EXPR_P (min)) 7093 TREE_NO_WARNING (min) = 1; 7094 } 7095 } 7096 7097 /* Now refine the minimum and maximum values using any 7098 value range information we have for op0. */ 7099 if (min && max) 7100 { 7101 if (compare_values (vr->min, min) == 1) 7102 min = vr->min; 7103 if (compare_values (vr->max, max) == -1) 7104 max = vr->max; 7105 7106 /* If the new min/max values have converged to a single value, 7107 then there is only one value which can satisfy the condition, 7108 return that value. */ 7109 if (operand_equal_p (min, max, 0) && is_gimple_min_invariant (min)) 7110 return min; 7111 } 7112 return NULL; 7113 } 7114 7115 /* Simplify a conditional using a relational operator to an equality 7116 test if the range information indicates only one value can satisfy 7117 the original conditional. */ 7118 7119 static bool 7120 simplify_cond_using_ranges (gimple stmt) 7121 { 7122 tree op0 = gimple_cond_lhs (stmt); 7123 tree op1 = gimple_cond_rhs (stmt); 7124 enum tree_code cond_code = gimple_cond_code (stmt); 7125 7126 if (cond_code != NE_EXPR 7127 && cond_code != EQ_EXPR 7128 && TREE_CODE (op0) == SSA_NAME 7129 && INTEGRAL_TYPE_P (TREE_TYPE (op0)) 7130 && is_gimple_min_invariant (op1)) 7131 { 7132 value_range_t *vr = get_value_range (op0); 7133 7134 /* If we have range information for OP0, then we might be 7135 able to simplify this conditional. */ 7136 if (vr->type == VR_RANGE) 7137 { 7138 tree new_tree = test_for_singularity (cond_code, op0, op1, vr); 7139 7140 if (new_tree) 7141 { 7142 if (dump_file) 7143 { 7144 fprintf (dump_file, "Simplified relational "); 7145 print_gimple_stmt (dump_file, stmt, 0, 0); 7146 fprintf (dump_file, " into "); 7147 } 7148 7149 gimple_cond_set_code (stmt, EQ_EXPR); 7150 gimple_cond_set_lhs (stmt, op0); 7151 gimple_cond_set_rhs (stmt, new_tree); 7152 7153 update_stmt (stmt); 7154 7155 if (dump_file) 7156 { 7157 print_gimple_stmt (dump_file, stmt, 0, 0); 7158 fprintf (dump_file, "\n"); 7159 } 7160 7161 return true; 7162 } 7163 7164 /* Try again after inverting the condition. We only deal 7165 with integral types here, so no need to worry about 7166 issues with inverting FP comparisons. */ 7167 cond_code = invert_tree_comparison (cond_code, false); 7168 new_tree = test_for_singularity (cond_code, op0, op1, vr); 7169 7170 if (new_tree) 7171 { 7172 if (dump_file) 7173 { 7174 fprintf (dump_file, "Simplified relational "); 7175 print_gimple_stmt (dump_file, stmt, 0, 0); 7176 fprintf (dump_file, " into "); 7177 } 7178 7179 gimple_cond_set_code (stmt, NE_EXPR); 7180 gimple_cond_set_lhs (stmt, op0); 7181 gimple_cond_set_rhs (stmt, new_tree); 7182 7183 update_stmt (stmt); 7184 7185 if (dump_file) 7186 { 7187 print_gimple_stmt (dump_file, stmt, 0, 0); 7188 fprintf (dump_file, "\n"); 7189 } 7190 7191 return true; 7192 } 7193 } 7194 } 7195 7196 return false; 7197 } 7198 7199 /* Simplify a switch statement using the value range of the switch 7200 argument. */ 7201 7202 static bool 7203 simplify_switch_using_ranges (gimple stmt) 7204 { 7205 tree op = gimple_switch_index (stmt); 7206 value_range_t *vr; 7207 bool take_default; 7208 edge e; 7209 edge_iterator ei; 7210 size_t i = 0, j = 0, n, n2; 7211 tree vec2; 7212 switch_update su; 7213 7214 if (TREE_CODE (op) == SSA_NAME) 7215 { 7216 vr = get_value_range (op); 7217 7218 /* We can only handle integer ranges. */ 7219 if (vr->type != VR_RANGE 7220 || symbolic_range_p (vr)) 7221 return false; 7222 7223 /* Find case label for min/max of the value range. */ 7224 take_default = !find_case_label_range (stmt, vr->min, vr->max, &i, &j); 7225 } 7226 else if (TREE_CODE (op) == INTEGER_CST) 7227 { 7228 take_default = !find_case_label_index (stmt, 1, op, &i); 7229 if (take_default) 7230 { 7231 i = 1; 7232 j = 0; 7233 } 7234 else 7235 { 7236 j = i; 7237 } 7238 } 7239 else 7240 return false; 7241 7242 n = gimple_switch_num_labels (stmt); 7243 7244 /* Bail out if this is just all edges taken. */ 7245 if (i == 1 7246 && j == n - 1 7247 && take_default) 7248 return false; 7249 7250 /* Build a new vector of taken case labels. */ 7251 vec2 = make_tree_vec (j - i + 1 + (int)take_default); 7252 n2 = 0; 7253 7254 /* Add the default edge, if necessary. */ 7255 if (take_default) 7256 TREE_VEC_ELT (vec2, n2++) = gimple_switch_default_label (stmt); 7257 7258 for (; i <= j; ++i, ++n2) 7259 TREE_VEC_ELT (vec2, n2) = gimple_switch_label (stmt, i); 7260 7261 /* Mark needed edges. */ 7262 for (i = 0; i < n2; ++i) 7263 { 7264 e = find_edge (gimple_bb (stmt), 7265 label_to_block (CASE_LABEL (TREE_VEC_ELT (vec2, i)))); 7266 e->aux = (void *)-1; 7267 } 7268 7269 /* Queue not needed edges for later removal. */ 7270 FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs) 7271 { 7272 if (e->aux == (void *)-1) 7273 { 7274 e->aux = NULL; 7275 continue; 7276 } 7277 7278 if (dump_file && (dump_flags & TDF_DETAILS)) 7279 { 7280 fprintf (dump_file, "removing unreachable case label\n"); 7281 } 7282 VEC_safe_push (edge, heap, to_remove_edges, e); 7283 e->flags &= ~EDGE_EXECUTABLE; 7284 } 7285 7286 /* And queue an update for the stmt. */ 7287 su.stmt = stmt; 7288 su.vec = vec2; 7289 VEC_safe_push (switch_update, heap, to_update_switch_stmts, &su); 7290 return false; 7291 } 7292 7293 /* Simplify an integral conversion from an SSA name in STMT. */ 7294 7295 static bool 7296 simplify_conversion_using_ranges (gimple stmt) 7297 { 7298 tree innerop, middleop, finaltype; 7299 gimple def_stmt; 7300 value_range_t *innervr; 7301 bool inner_unsigned_p, middle_unsigned_p, final_unsigned_p; 7302 unsigned inner_prec, middle_prec, final_prec; 7303 double_int innermin, innermed, innermax, middlemin, middlemed, middlemax; 7304 7305 finaltype = TREE_TYPE (gimple_assign_lhs (stmt)); 7306 if (!INTEGRAL_TYPE_P (finaltype)) 7307 return false; 7308 middleop = gimple_assign_rhs1 (stmt); 7309 def_stmt = SSA_NAME_DEF_STMT (middleop); 7310 if (!is_gimple_assign (def_stmt) 7311 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt))) 7312 return false; 7313 innerop = gimple_assign_rhs1 (def_stmt); 7314 if (TREE_CODE (innerop) != SSA_NAME) 7315 return false; 7316 7317 /* Get the value-range of the inner operand. */ 7318 innervr = get_value_range (innerop); 7319 if (innervr->type != VR_RANGE 7320 || TREE_CODE (innervr->min) != INTEGER_CST 7321 || TREE_CODE (innervr->max) != INTEGER_CST) 7322 return false; 7323 7324 /* Simulate the conversion chain to check if the result is equal if 7325 the middle conversion is removed. */ 7326 innermin = tree_to_double_int (innervr->min); 7327 innermax = tree_to_double_int (innervr->max); 7328 7329 inner_prec = TYPE_PRECISION (TREE_TYPE (innerop)); 7330 middle_prec = TYPE_PRECISION (TREE_TYPE (middleop)); 7331 final_prec = TYPE_PRECISION (finaltype); 7332 7333 /* If the first conversion is not injective, the second must not 7334 be widening. */ 7335 if (double_int_cmp (double_int_sub (innermax, innermin), 7336 double_int_mask (middle_prec), true) > 0 7337 && middle_prec < final_prec) 7338 return false; 7339 /* We also want a medium value so that we can track the effect that 7340 narrowing conversions with sign change have. */ 7341 inner_unsigned_p = TYPE_UNSIGNED (TREE_TYPE (innerop)); 7342 if (inner_unsigned_p) 7343 innermed = double_int_rshift (double_int_mask (inner_prec), 7344 1, inner_prec, false); 7345 else 7346 innermed = double_int_zero; 7347 if (double_int_cmp (innermin, innermed, inner_unsigned_p) >= 0 7348 || double_int_cmp (innermed, innermax, inner_unsigned_p) >= 0) 7349 innermed = innermin; 7350 7351 middle_unsigned_p = TYPE_UNSIGNED (TREE_TYPE (middleop)); 7352 middlemin = double_int_ext (innermin, middle_prec, middle_unsigned_p); 7353 middlemed = double_int_ext (innermed, middle_prec, middle_unsigned_p); 7354 middlemax = double_int_ext (innermax, middle_prec, middle_unsigned_p); 7355 7356 /* Require that the final conversion applied to both the original 7357 and the intermediate range produces the same result. */ 7358 final_unsigned_p = TYPE_UNSIGNED (finaltype); 7359 if (!double_int_equal_p (double_int_ext (middlemin, 7360 final_prec, final_unsigned_p), 7361 double_int_ext (innermin, 7362 final_prec, final_unsigned_p)) 7363 || !double_int_equal_p (double_int_ext (middlemed, 7364 final_prec, final_unsigned_p), 7365 double_int_ext (innermed, 7366 final_prec, final_unsigned_p)) 7367 || !double_int_equal_p (double_int_ext (middlemax, 7368 final_prec, final_unsigned_p), 7369 double_int_ext (innermax, 7370 final_prec, final_unsigned_p))) 7371 return false; 7372 7373 gimple_assign_set_rhs1 (stmt, innerop); 7374 update_stmt (stmt); 7375 return true; 7376 } 7377 7378 /* Return whether the value range *VR fits in an integer type specified 7379 by PRECISION and UNSIGNED_P. */ 7380 7381 static bool 7382 range_fits_type_p (value_range_t *vr, unsigned precision, bool unsigned_p) 7383 { 7384 tree src_type; 7385 unsigned src_precision; 7386 double_int tem; 7387 7388 /* We can only handle integral and pointer types. */ 7389 src_type = TREE_TYPE (vr->min); 7390 if (!INTEGRAL_TYPE_P (src_type) 7391 && !POINTER_TYPE_P (src_type)) 7392 return false; 7393 7394 /* An extension is always fine, so is an identity transform. */ 7395 src_precision = TYPE_PRECISION (TREE_TYPE (vr->min)); 7396 if (src_precision < precision 7397 || (src_precision == precision 7398 && TYPE_UNSIGNED (src_type) == unsigned_p)) 7399 return true; 7400 7401 /* Now we can only handle ranges with constant bounds. */ 7402 if (vr->type != VR_RANGE 7403 || TREE_CODE (vr->min) != INTEGER_CST 7404 || TREE_CODE (vr->max) != INTEGER_CST) 7405 return false; 7406 7407 /* For precision-preserving sign-changes the MSB of the double-int 7408 has to be clear. */ 7409 if (src_precision == precision 7410 && (TREE_INT_CST_HIGH (vr->min) | TREE_INT_CST_HIGH (vr->max)) < 0) 7411 return false; 7412 7413 /* Then we can perform the conversion on both ends and compare 7414 the result for equality. */ 7415 tem = double_int_ext (tree_to_double_int (vr->min), precision, unsigned_p); 7416 if (!double_int_equal_p (tree_to_double_int (vr->min), tem)) 7417 return false; 7418 tem = double_int_ext (tree_to_double_int (vr->max), precision, unsigned_p); 7419 if (!double_int_equal_p (tree_to_double_int (vr->max), tem)) 7420 return false; 7421 7422 return true; 7423 } 7424 7425 /* Simplify a conversion from integral SSA name to float in STMT. */ 7426 7427 static bool 7428 simplify_float_conversion_using_ranges (gimple_stmt_iterator *gsi, gimple stmt) 7429 { 7430 tree rhs1 = gimple_assign_rhs1 (stmt); 7431 value_range_t *vr = get_value_range (rhs1); 7432 enum machine_mode fltmode = TYPE_MODE (TREE_TYPE (gimple_assign_lhs (stmt))); 7433 enum machine_mode mode; 7434 tree tem; 7435 gimple conv; 7436 7437 /* We can only handle constant ranges. */ 7438 if (vr->type != VR_RANGE 7439 || TREE_CODE (vr->min) != INTEGER_CST 7440 || TREE_CODE (vr->max) != INTEGER_CST) 7441 return false; 7442 7443 /* First check if we can use a signed type in place of an unsigned. */ 7444 if (TYPE_UNSIGNED (TREE_TYPE (rhs1)) 7445 && (can_float_p (fltmode, TYPE_MODE (TREE_TYPE (rhs1)), 0) 7446 != CODE_FOR_nothing) 7447 && range_fits_type_p (vr, GET_MODE_PRECISION 7448 (TYPE_MODE (TREE_TYPE (rhs1))), 0)) 7449 mode = TYPE_MODE (TREE_TYPE (rhs1)); 7450 /* If we can do the conversion in the current input mode do nothing. */ 7451 else if (can_float_p (fltmode, TYPE_MODE (TREE_TYPE (rhs1)), 7452 TYPE_UNSIGNED (TREE_TYPE (rhs1)))) 7453 return false; 7454 /* Otherwise search for a mode we can use, starting from the narrowest 7455 integer mode available. */ 7456 else 7457 { 7458 mode = GET_CLASS_NARROWEST_MODE (MODE_INT); 7459 do 7460 { 7461 /* If we cannot do a signed conversion to float from mode 7462 or if the value-range does not fit in the signed type 7463 try with a wider mode. */ 7464 if (can_float_p (fltmode, mode, 0) != CODE_FOR_nothing 7465 && range_fits_type_p (vr, GET_MODE_PRECISION (mode), 0)) 7466 break; 7467 7468 mode = GET_MODE_WIDER_MODE (mode); 7469 /* But do not widen the input. Instead leave that to the 7470 optabs expansion code. */ 7471 if (GET_MODE_PRECISION (mode) > TYPE_PRECISION (TREE_TYPE (rhs1))) 7472 return false; 7473 } 7474 while (mode != VOIDmode); 7475 if (mode == VOIDmode) 7476 return false; 7477 } 7478 7479 /* It works, insert a truncation or sign-change before the 7480 float conversion. */ 7481 tem = create_tmp_var (build_nonstandard_integer_type 7482 (GET_MODE_PRECISION (mode), 0), NULL); 7483 conv = gimple_build_assign_with_ops (NOP_EXPR, tem, rhs1, NULL_TREE); 7484 tem = make_ssa_name (tem, conv); 7485 gimple_assign_set_lhs (conv, tem); 7486 gsi_insert_before (gsi, conv, GSI_SAME_STMT); 7487 gimple_assign_set_rhs1 (stmt, tem); 7488 update_stmt (stmt); 7489 7490 return true; 7491 } 7492 7493 /* Simplify STMT using ranges if possible. */ 7494 7495 static bool 7496 simplify_stmt_using_ranges (gimple_stmt_iterator *gsi) 7497 { 7498 gimple stmt = gsi_stmt (*gsi); 7499 if (is_gimple_assign (stmt)) 7500 { 7501 enum tree_code rhs_code = gimple_assign_rhs_code (stmt); 7502 tree rhs1 = gimple_assign_rhs1 (stmt); 7503 7504 switch (rhs_code) 7505 { 7506 case EQ_EXPR: 7507 case NE_EXPR: 7508 /* Transform EQ_EXPR, NE_EXPR into BIT_XOR_EXPR or identity 7509 if the RHS is zero or one, and the LHS are known to be boolean 7510 values. */ 7511 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))) 7512 return simplify_truth_ops_using_ranges (gsi, stmt); 7513 break; 7514 7515 /* Transform TRUNC_DIV_EXPR and TRUNC_MOD_EXPR into RSHIFT_EXPR 7516 and BIT_AND_EXPR respectively if the first operand is greater 7517 than zero and the second operand is an exact power of two. */ 7518 case TRUNC_DIV_EXPR: 7519 case TRUNC_MOD_EXPR: 7520 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1)) 7521 && integer_pow2p (gimple_assign_rhs2 (stmt))) 7522 return simplify_div_or_mod_using_ranges (stmt); 7523 break; 7524 7525 /* Transform ABS (X) into X or -X as appropriate. */ 7526 case ABS_EXPR: 7527 if (TREE_CODE (rhs1) == SSA_NAME 7528 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1))) 7529 return simplify_abs_using_ranges (stmt); 7530 break; 7531 7532 case BIT_AND_EXPR: 7533 case BIT_IOR_EXPR: 7534 /* Optimize away BIT_AND_EXPR and BIT_IOR_EXPR 7535 if all the bits being cleared are already cleared or 7536 all the bits being set are already set. */ 7537 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))) 7538 return simplify_bit_ops_using_ranges (gsi, stmt); 7539 break; 7540 7541 CASE_CONVERT: 7542 if (TREE_CODE (rhs1) == SSA_NAME 7543 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1))) 7544 return simplify_conversion_using_ranges (stmt); 7545 break; 7546 7547 case FLOAT_EXPR: 7548 if (TREE_CODE (rhs1) == SSA_NAME 7549 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1))) 7550 return simplify_float_conversion_using_ranges (gsi, stmt); 7551 break; 7552 7553 default: 7554 break; 7555 } 7556 } 7557 else if (gimple_code (stmt) == GIMPLE_COND) 7558 return simplify_cond_using_ranges (stmt); 7559 else if (gimple_code (stmt) == GIMPLE_SWITCH) 7560 return simplify_switch_using_ranges (stmt); 7561 7562 return false; 7563 } 7564 7565 /* If the statement pointed by SI has a predicate whose value can be 7566 computed using the value range information computed by VRP, compute 7567 its value and return true. Otherwise, return false. */ 7568 7569 static bool 7570 fold_predicate_in (gimple_stmt_iterator *si) 7571 { 7572 bool assignment_p = false; 7573 tree val; 7574 gimple stmt = gsi_stmt (*si); 7575 7576 if (is_gimple_assign (stmt) 7577 && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison) 7578 { 7579 assignment_p = true; 7580 val = vrp_evaluate_conditional (gimple_assign_rhs_code (stmt), 7581 gimple_assign_rhs1 (stmt), 7582 gimple_assign_rhs2 (stmt), 7583 stmt); 7584 } 7585 else if (gimple_code (stmt) == GIMPLE_COND) 7586 val = vrp_evaluate_conditional (gimple_cond_code (stmt), 7587 gimple_cond_lhs (stmt), 7588 gimple_cond_rhs (stmt), 7589 stmt); 7590 else 7591 return false; 7592 7593 if (val) 7594 { 7595 if (assignment_p) 7596 val = fold_convert (gimple_expr_type (stmt), val); 7597 7598 if (dump_file) 7599 { 7600 fprintf (dump_file, "Folding predicate "); 7601 print_gimple_expr (dump_file, stmt, 0, 0); 7602 fprintf (dump_file, " to "); 7603 print_generic_expr (dump_file, val, 0); 7604 fprintf (dump_file, "\n"); 7605 } 7606 7607 if (is_gimple_assign (stmt)) 7608 gimple_assign_set_rhs_from_tree (si, val); 7609 else 7610 { 7611 gcc_assert (gimple_code (stmt) == GIMPLE_COND); 7612 if (integer_zerop (val)) 7613 gimple_cond_make_false (stmt); 7614 else if (integer_onep (val)) 7615 gimple_cond_make_true (stmt); 7616 else 7617 gcc_unreachable (); 7618 } 7619 7620 return true; 7621 } 7622 7623 return false; 7624 } 7625 7626 /* Callback for substitute_and_fold folding the stmt at *SI. */ 7627 7628 static bool 7629 vrp_fold_stmt (gimple_stmt_iterator *si) 7630 { 7631 if (fold_predicate_in (si)) 7632 return true; 7633 7634 return simplify_stmt_using_ranges (si); 7635 } 7636 7637 /* Stack of dest,src equivalency pairs that need to be restored after 7638 each attempt to thread a block's incoming edge to an outgoing edge. 7639 7640 A NULL entry is used to mark the end of pairs which need to be 7641 restored. */ 7642 static VEC(tree,heap) *stack; 7643 7644 /* A trivial wrapper so that we can present the generic jump threading 7645 code with a simple API for simplifying statements. STMT is the 7646 statement we want to simplify, WITHIN_STMT provides the location 7647 for any overflow warnings. */ 7648 7649 static tree 7650 simplify_stmt_for_jump_threading (gimple stmt, gimple within_stmt) 7651 { 7652 /* We only use VRP information to simplify conditionals. This is 7653 overly conservative, but it's unclear if doing more would be 7654 worth the compile time cost. */ 7655 if (gimple_code (stmt) != GIMPLE_COND) 7656 return NULL; 7657 7658 return vrp_evaluate_conditional (gimple_cond_code (stmt), 7659 gimple_cond_lhs (stmt), 7660 gimple_cond_rhs (stmt), within_stmt); 7661 } 7662 7663 /* Blocks which have more than one predecessor and more than 7664 one successor present jump threading opportunities, i.e., 7665 when the block is reached from a specific predecessor, we 7666 may be able to determine which of the outgoing edges will 7667 be traversed. When this optimization applies, we are able 7668 to avoid conditionals at runtime and we may expose secondary 7669 optimization opportunities. 7670 7671 This routine is effectively a driver for the generic jump 7672 threading code. It basically just presents the generic code 7673 with edges that may be suitable for jump threading. 7674 7675 Unlike DOM, we do not iterate VRP if jump threading was successful. 7676 While iterating may expose new opportunities for VRP, it is expected 7677 those opportunities would be very limited and the compile time cost 7678 to expose those opportunities would be significant. 7679 7680 As jump threading opportunities are discovered, they are registered 7681 for later realization. */ 7682 7683 static void 7684 identify_jump_threads (void) 7685 { 7686 basic_block bb; 7687 gimple dummy; 7688 int i; 7689 edge e; 7690 7691 /* Ugh. When substituting values earlier in this pass we can 7692 wipe the dominance information. So rebuild the dominator 7693 information as we need it within the jump threading code. */ 7694 calculate_dominance_info (CDI_DOMINATORS); 7695 7696 /* We do not allow VRP information to be used for jump threading 7697 across a back edge in the CFG. Otherwise it becomes too 7698 difficult to avoid eliminating loop exit tests. Of course 7699 EDGE_DFS_BACK is not accurate at this time so we have to 7700 recompute it. */ 7701 mark_dfs_back_edges (); 7702 7703 /* Do not thread across edges we are about to remove. Just marking 7704 them as EDGE_DFS_BACK will do. */ 7705 FOR_EACH_VEC_ELT (edge, to_remove_edges, i, e) 7706 e->flags |= EDGE_DFS_BACK; 7707 7708 /* Allocate our unwinder stack to unwind any temporary equivalences 7709 that might be recorded. */ 7710 stack = VEC_alloc (tree, heap, 20); 7711 7712 /* To avoid lots of silly node creation, we create a single 7713 conditional and just modify it in-place when attempting to 7714 thread jumps. */ 7715 dummy = gimple_build_cond (EQ_EXPR, 7716 integer_zero_node, integer_zero_node, 7717 NULL, NULL); 7718 7719 /* Walk through all the blocks finding those which present a 7720 potential jump threading opportunity. We could set this up 7721 as a dominator walker and record data during the walk, but 7722 I doubt it's worth the effort for the classes of jump 7723 threading opportunities we are trying to identify at this 7724 point in compilation. */ 7725 FOR_EACH_BB (bb) 7726 { 7727 gimple last; 7728 7729 /* If the generic jump threading code does not find this block 7730 interesting, then there is nothing to do. */ 7731 if (! potentially_threadable_block (bb)) 7732 continue; 7733 7734 /* We only care about blocks ending in a COND_EXPR. While there 7735 may be some value in handling SWITCH_EXPR here, I doubt it's 7736 terribly important. */ 7737 last = gsi_stmt (gsi_last_bb (bb)); 7738 7739 /* We're basically looking for a switch or any kind of conditional with 7740 integral or pointer type arguments. Note the type of the second 7741 argument will be the same as the first argument, so no need to 7742 check it explicitly. */ 7743 if (gimple_code (last) == GIMPLE_SWITCH 7744 || (gimple_code (last) == GIMPLE_COND 7745 && TREE_CODE (gimple_cond_lhs (last)) == SSA_NAME 7746 && (INTEGRAL_TYPE_P (TREE_TYPE (gimple_cond_lhs (last))) 7747 || POINTER_TYPE_P (TREE_TYPE (gimple_cond_lhs (last)))) 7748 && (TREE_CODE (gimple_cond_rhs (last)) == SSA_NAME 7749 || is_gimple_min_invariant (gimple_cond_rhs (last))))) 7750 { 7751 edge_iterator ei; 7752 7753 /* We've got a block with multiple predecessors and multiple 7754 successors which also ends in a suitable conditional or 7755 switch statement. For each predecessor, see if we can thread 7756 it to a specific successor. */ 7757 FOR_EACH_EDGE (e, ei, bb->preds) 7758 { 7759 /* Do not thread across back edges or abnormal edges 7760 in the CFG. */ 7761 if (e->flags & (EDGE_DFS_BACK | EDGE_COMPLEX)) 7762 continue; 7763 7764 thread_across_edge (dummy, e, true, &stack, 7765 simplify_stmt_for_jump_threading); 7766 } 7767 } 7768 } 7769 7770 /* We do not actually update the CFG or SSA graphs at this point as 7771 ASSERT_EXPRs are still in the IL and cfg cleanup code does not yet 7772 handle ASSERT_EXPRs gracefully. */ 7773 } 7774 7775 /* We identified all the jump threading opportunities earlier, but could 7776 not transform the CFG at that time. This routine transforms the 7777 CFG and arranges for the dominator tree to be rebuilt if necessary. 7778 7779 Note the SSA graph update will occur during the normal TODO 7780 processing by the pass manager. */ 7781 static void 7782 finalize_jump_threads (void) 7783 { 7784 thread_through_all_blocks (false); 7785 VEC_free (tree, heap, stack); 7786 } 7787 7788 7789 /* Traverse all the blocks folding conditionals with known ranges. */ 7790 7791 static void 7792 vrp_finalize (void) 7793 { 7794 size_t i; 7795 7796 values_propagated = true; 7797 7798 if (dump_file) 7799 { 7800 fprintf (dump_file, "\nValue ranges after VRP:\n\n"); 7801 dump_all_value_ranges (dump_file); 7802 fprintf (dump_file, "\n"); 7803 } 7804 7805 substitute_and_fold (op_with_constant_singleton_value_range, 7806 vrp_fold_stmt, false); 7807 7808 if (warn_array_bounds) 7809 check_all_array_refs (); 7810 7811 /* We must identify jump threading opportunities before we release 7812 the datastructures built by VRP. */ 7813 identify_jump_threads (); 7814 7815 /* Free allocated memory. */ 7816 for (i = 0; i < num_vr_values; i++) 7817 if (vr_value[i]) 7818 { 7819 BITMAP_FREE (vr_value[i]->equiv); 7820 free (vr_value[i]); 7821 } 7822 7823 free (vr_value); 7824 free (vr_phi_edge_counts); 7825 7826 /* So that we can distinguish between VRP data being available 7827 and not available. */ 7828 vr_value = NULL; 7829 vr_phi_edge_counts = NULL; 7830 } 7831 7832 7833 /* Main entry point to VRP (Value Range Propagation). This pass is 7834 loosely based on J. R. C. Patterson, ``Accurate Static Branch 7835 Prediction by Value Range Propagation,'' in SIGPLAN Conference on 7836 Programming Language Design and Implementation, pp. 67-78, 1995. 7837 Also available at http://citeseer.ist.psu.edu/patterson95accurate.html 7838 7839 This is essentially an SSA-CCP pass modified to deal with ranges 7840 instead of constants. 7841 7842 While propagating ranges, we may find that two or more SSA name 7843 have equivalent, though distinct ranges. For instance, 7844 7845 1 x_9 = p_3->a; 7846 2 p_4 = ASSERT_EXPR <p_3, p_3 != 0> 7847 3 if (p_4 == q_2) 7848 4 p_5 = ASSERT_EXPR <p_4, p_4 == q_2>; 7849 5 endif 7850 6 if (q_2) 7851 7852 In the code above, pointer p_5 has range [q_2, q_2], but from the 7853 code we can also determine that p_5 cannot be NULL and, if q_2 had 7854 a non-varying range, p_5's range should also be compatible with it. 7855 7856 These equivalences are created by two expressions: ASSERT_EXPR and 7857 copy operations. Since p_5 is an assertion on p_4, and p_4 was the 7858 result of another assertion, then we can use the fact that p_5 and 7859 p_4 are equivalent when evaluating p_5's range. 7860 7861 Together with value ranges, we also propagate these equivalences 7862 between names so that we can take advantage of information from 7863 multiple ranges when doing final replacement. Note that this 7864 equivalency relation is transitive but not symmetric. 7865 7866 In the example above, p_5 is equivalent to p_4, q_2 and p_3, but we 7867 cannot assert that q_2 is equivalent to p_5 because q_2 may be used 7868 in contexts where that assertion does not hold (e.g., in line 6). 7869 7870 TODO, the main difference between this pass and Patterson's is that 7871 we do not propagate edge probabilities. We only compute whether 7872 edges can be taken or not. That is, instead of having a spectrum 7873 of jump probabilities between 0 and 1, we only deal with 0, 1 and 7874 DON'T KNOW. In the future, it may be worthwhile to propagate 7875 probabilities to aid branch prediction. */ 7876 7877 static unsigned int 7878 execute_vrp (void) 7879 { 7880 int i; 7881 edge e; 7882 switch_update *su; 7883 7884 loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS); 7885 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa); 7886 scev_initialize (); 7887 7888 /* ??? This ends up using stale EDGE_DFS_BACK for liveness computation. 7889 Inserting assertions may split edges which will invalidate 7890 EDGE_DFS_BACK. */ 7891 insert_range_assertions (); 7892 7893 /* Estimate number of iterations - but do not use undefined behavior 7894 for this. We can't do this lazily as other functions may compute 7895 this using undefined behavior. */ 7896 free_numbers_of_iterations_estimates (); 7897 estimate_numbers_of_iterations (false); 7898 7899 to_remove_edges = VEC_alloc (edge, heap, 10); 7900 to_update_switch_stmts = VEC_alloc (switch_update, heap, 5); 7901 threadedge_initialize_values (); 7902 7903 /* For visiting PHI nodes we need EDGE_DFS_BACK computed. */ 7904 mark_dfs_back_edges (); 7905 7906 vrp_initialize (); 7907 ssa_propagate (vrp_visit_stmt, vrp_visit_phi_node); 7908 vrp_finalize (); 7909 7910 free_numbers_of_iterations_estimates (); 7911 7912 /* ASSERT_EXPRs must be removed before finalizing jump threads 7913 as finalizing jump threads calls the CFG cleanup code which 7914 does not properly handle ASSERT_EXPRs. */ 7915 remove_range_assertions (); 7916 7917 /* If we exposed any new variables, go ahead and put them into 7918 SSA form now, before we handle jump threading. This simplifies 7919 interactions between rewriting of _DECL nodes into SSA form 7920 and rewriting SSA_NAME nodes into SSA form after block 7921 duplication and CFG manipulation. */ 7922 update_ssa (TODO_update_ssa); 7923 7924 finalize_jump_threads (); 7925 7926 /* Remove dead edges from SWITCH_EXPR optimization. This leaves the 7927 CFG in a broken state and requires a cfg_cleanup run. */ 7928 FOR_EACH_VEC_ELT (edge, to_remove_edges, i, e) 7929 remove_edge (e); 7930 /* Update SWITCH_EXPR case label vector. */ 7931 FOR_EACH_VEC_ELT (switch_update, to_update_switch_stmts, i, su) 7932 { 7933 size_t j; 7934 size_t n = TREE_VEC_LENGTH (su->vec); 7935 tree label; 7936 gimple_switch_set_num_labels (su->stmt, n); 7937 for (j = 0; j < n; j++) 7938 gimple_switch_set_label (su->stmt, j, TREE_VEC_ELT (su->vec, j)); 7939 /* As we may have replaced the default label with a regular one 7940 make sure to make it a real default label again. This ensures 7941 optimal expansion. */ 7942 label = gimple_switch_default_label (su->stmt); 7943 CASE_LOW (label) = NULL_TREE; 7944 CASE_HIGH (label) = NULL_TREE; 7945 } 7946 7947 if (VEC_length (edge, to_remove_edges) > 0) 7948 free_dominance_info (CDI_DOMINATORS); 7949 7950 VEC_free (edge, heap, to_remove_edges); 7951 VEC_free (switch_update, heap, to_update_switch_stmts); 7952 threadedge_finalize_values (); 7953 7954 scev_finalize (); 7955 loop_optimizer_finalize (); 7956 return 0; 7957 } 7958 7959 static bool 7960 gate_vrp (void) 7961 { 7962 return flag_tree_vrp != 0; 7963 } 7964 7965 struct gimple_opt_pass pass_vrp = 7966 { 7967 { 7968 GIMPLE_PASS, 7969 "vrp", /* name */ 7970 gate_vrp, /* gate */ 7971 execute_vrp, /* execute */ 7972 NULL, /* sub */ 7973 NULL, /* next */ 7974 0, /* static_pass_number */ 7975 TV_TREE_VRP, /* tv_id */ 7976 PROP_ssa, /* properties_required */ 7977 0, /* properties_provided */ 7978 0, /* properties_destroyed */ 7979 0, /* todo_flags_start */ 7980 TODO_cleanup_cfg 7981 | TODO_update_ssa 7982 | TODO_verify_ssa 7983 | TODO_verify_flow 7984 | TODO_ggc_collect /* todo_flags_finish */ 7985 } 7986 }; 7987