1 /* Support routines for Value Range Propagation (VRP).
2 Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011
3 Free Software Foundation, Inc.
4 Contributed by Diego Novillo <dnovillo@redhat.com>.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
11 any later version.
12
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "ggc.h"
27 #include "flags.h"
28 #include "tree.h"
29 #include "basic-block.h"
30 #include "tree-flow.h"
31 #include "tree-pass.h"
32 #include "tree-dump.h"
33 #include "timevar.h"
34 #include "tree-pretty-print.h"
35 #include "gimple-pretty-print.h"
36 #include "diagnostic-core.h"
37 #include "intl.h"
38 #include "cfgloop.h"
39 #include "tree-scalar-evolution.h"
40 #include "tree-ssa-propagate.h"
41 #include "tree-chrec.h"
42 #include "gimple-fold.h"
43 #include "expr.h"
44 #include "optabs.h"
45
46
47 /* Type of value ranges. See value_range_d for a description of these
48 types. */
49 enum value_range_type { VR_UNDEFINED, VR_RANGE, VR_ANTI_RANGE, VR_VARYING };
50
51 /* Range of values that can be associated with an SSA_NAME after VRP
52 has executed. */
53 struct value_range_d
54 {
55 /* Lattice value represented by this range. */
56 enum value_range_type type;
57
58 /* Minimum and maximum values represented by this range. These
59 values should be interpreted as follows:
60
61 - If TYPE is VR_UNDEFINED or VR_VARYING then MIN and MAX must
62 be NULL.
63
64 - If TYPE == VR_RANGE then MIN holds the minimum value and
65 MAX holds the maximum value of the range [MIN, MAX].
66
67 - If TYPE == ANTI_RANGE the variable is known to NOT
68 take any values in the range [MIN, MAX]. */
69 tree min;
70 tree max;
71
72 /* Set of SSA names whose value ranges are equivalent to this one.
73 This set is only valid when TYPE is VR_RANGE or VR_ANTI_RANGE. */
74 bitmap equiv;
75 };
76
77 typedef struct value_range_d value_range_t;
78
79 /* Set of SSA names found live during the RPO traversal of the function
80 for still active basic-blocks. */
81 static sbitmap *live;
82
83 /* Return true if the SSA name NAME is live on the edge E. */
84
85 static bool
live_on_edge(edge e,tree name)86 live_on_edge (edge e, tree name)
87 {
88 return (live[e->dest->index]
89 && TEST_BIT (live[e->dest->index], SSA_NAME_VERSION (name)));
90 }
91
92 /* Local functions. */
93 static int compare_values (tree val1, tree val2);
94 static int compare_values_warnv (tree val1, tree val2, bool *);
95 static void vrp_meet (value_range_t *, value_range_t *);
96 static tree vrp_evaluate_conditional_warnv_with_ops (enum tree_code,
97 tree, tree, bool, bool *,
98 bool *);
99
100 /* Location information for ASSERT_EXPRs. Each instance of this
101 structure describes an ASSERT_EXPR for an SSA name. Since a single
102 SSA name may have more than one assertion associated with it, these
103 locations are kept in a linked list attached to the corresponding
104 SSA name. */
105 struct assert_locus_d
106 {
107 /* Basic block where the assertion would be inserted. */
108 basic_block bb;
109
110 /* Some assertions need to be inserted on an edge (e.g., assertions
111 generated by COND_EXPRs). In those cases, BB will be NULL. */
112 edge e;
113
114 /* Pointer to the statement that generated this assertion. */
115 gimple_stmt_iterator si;
116
117 /* Predicate code for the ASSERT_EXPR. Must be COMPARISON_CLASS_P. */
118 enum tree_code comp_code;
119
120 /* Value being compared against. */
121 tree val;
122
123 /* Expression to compare. */
124 tree expr;
125
126 /* Next node in the linked list. */
127 struct assert_locus_d *next;
128 };
129
130 typedef struct assert_locus_d *assert_locus_t;
131
132 /* If bit I is present, it means that SSA name N_i has a list of
133 assertions that should be inserted in the IL. */
134 static bitmap need_assert_for;
135
136 /* Array of locations lists where to insert assertions. ASSERTS_FOR[I]
137 holds a list of ASSERT_LOCUS_T nodes that describe where
138 ASSERT_EXPRs for SSA name N_I should be inserted. */
139 static assert_locus_t *asserts_for;
140
141 /* Value range array. After propagation, VR_VALUE[I] holds the range
142 of values that SSA name N_I may take. */
143 static unsigned num_vr_values;
144 static value_range_t **vr_value;
145 static bool values_propagated;
146
147 /* For a PHI node which sets SSA name N_I, VR_COUNTS[I] holds the
148 number of executable edges we saw the last time we visited the
149 node. */
150 static int *vr_phi_edge_counts;
151
152 typedef struct {
153 gimple stmt;
154 tree vec;
155 } switch_update;
156
157 static VEC (edge, heap) *to_remove_edges;
158 DEF_VEC_O(switch_update);
159 DEF_VEC_ALLOC_O(switch_update, heap);
VEC(switch_update,heap)160 static VEC (switch_update, heap) *to_update_switch_stmts;
161
162
163 /* Return the maximum value for TYPE. */
164
165 static inline tree
166 vrp_val_max (const_tree type)
167 {
168 if (!INTEGRAL_TYPE_P (type))
169 return NULL_TREE;
170
171 return TYPE_MAX_VALUE (type);
172 }
173
174 /* Return the minimum value for TYPE. */
175
176 static inline tree
vrp_val_min(const_tree type)177 vrp_val_min (const_tree type)
178 {
179 if (!INTEGRAL_TYPE_P (type))
180 return NULL_TREE;
181
182 return TYPE_MIN_VALUE (type);
183 }
184
185 /* Return whether VAL is equal to the maximum value of its type. This
186 will be true for a positive overflow infinity. We can't do a
187 simple equality comparison with TYPE_MAX_VALUE because C typedefs
188 and Ada subtypes can produce types whose TYPE_MAX_VALUE is not ==
189 to the integer constant with the same value in the type. */
190
191 static inline bool
vrp_val_is_max(const_tree val)192 vrp_val_is_max (const_tree val)
193 {
194 tree type_max = vrp_val_max (TREE_TYPE (val));
195 return (val == type_max
196 || (type_max != NULL_TREE
197 && operand_equal_p (val, type_max, 0)));
198 }
199
200 /* Return whether VAL is equal to the minimum value of its type. This
201 will be true for a negative overflow infinity. */
202
203 static inline bool
vrp_val_is_min(const_tree val)204 vrp_val_is_min (const_tree val)
205 {
206 tree type_min = vrp_val_min (TREE_TYPE (val));
207 return (val == type_min
208 || (type_min != NULL_TREE
209 && operand_equal_p (val, type_min, 0)));
210 }
211
212
213 /* Return whether TYPE should use an overflow infinity distinct from
214 TYPE_{MIN,MAX}_VALUE. We use an overflow infinity value to
215 represent a signed overflow during VRP computations. An infinity
216 is distinct from a half-range, which will go from some number to
217 TYPE_{MIN,MAX}_VALUE. */
218
219 static inline bool
needs_overflow_infinity(const_tree type)220 needs_overflow_infinity (const_tree type)
221 {
222 return INTEGRAL_TYPE_P (type) && !TYPE_OVERFLOW_WRAPS (type);
223 }
224
225 /* Return whether TYPE can support our overflow infinity
226 representation: we use the TREE_OVERFLOW flag, which only exists
227 for constants. If TYPE doesn't support this, we don't optimize
228 cases which would require signed overflow--we drop them to
229 VARYING. */
230
231 static inline bool
supports_overflow_infinity(const_tree type)232 supports_overflow_infinity (const_tree type)
233 {
234 tree min = vrp_val_min (type), max = vrp_val_max (type);
235 #ifdef ENABLE_CHECKING
236 gcc_assert (needs_overflow_infinity (type));
237 #endif
238 return (min != NULL_TREE
239 && CONSTANT_CLASS_P (min)
240 && max != NULL_TREE
241 && CONSTANT_CLASS_P (max));
242 }
243
244 /* VAL is the maximum or minimum value of a type. Return a
245 corresponding overflow infinity. */
246
247 static inline tree
make_overflow_infinity(tree val)248 make_overflow_infinity (tree val)
249 {
250 gcc_checking_assert (val != NULL_TREE && CONSTANT_CLASS_P (val));
251 val = copy_node (val);
252 TREE_OVERFLOW (val) = 1;
253 return val;
254 }
255
256 /* Return a negative overflow infinity for TYPE. */
257
258 static inline tree
negative_overflow_infinity(tree type)259 negative_overflow_infinity (tree type)
260 {
261 gcc_checking_assert (supports_overflow_infinity (type));
262 return make_overflow_infinity (vrp_val_min (type));
263 }
264
265 /* Return a positive overflow infinity for TYPE. */
266
267 static inline tree
positive_overflow_infinity(tree type)268 positive_overflow_infinity (tree type)
269 {
270 gcc_checking_assert (supports_overflow_infinity (type));
271 return make_overflow_infinity (vrp_val_max (type));
272 }
273
274 /* Return whether VAL is a negative overflow infinity. */
275
276 static inline bool
is_negative_overflow_infinity(const_tree val)277 is_negative_overflow_infinity (const_tree val)
278 {
279 return (needs_overflow_infinity (TREE_TYPE (val))
280 && CONSTANT_CLASS_P (val)
281 && TREE_OVERFLOW (val)
282 && vrp_val_is_min (val));
283 }
284
285 /* Return whether VAL is a positive overflow infinity. */
286
287 static inline bool
is_positive_overflow_infinity(const_tree val)288 is_positive_overflow_infinity (const_tree val)
289 {
290 return (needs_overflow_infinity (TREE_TYPE (val))
291 && CONSTANT_CLASS_P (val)
292 && TREE_OVERFLOW (val)
293 && vrp_val_is_max (val));
294 }
295
296 /* Return whether VAL is a positive or negative overflow infinity. */
297
298 static inline bool
is_overflow_infinity(const_tree val)299 is_overflow_infinity (const_tree val)
300 {
301 return (needs_overflow_infinity (TREE_TYPE (val))
302 && CONSTANT_CLASS_P (val)
303 && TREE_OVERFLOW (val)
304 && (vrp_val_is_min (val) || vrp_val_is_max (val)));
305 }
306
307 /* Return whether STMT has a constant rhs that is_overflow_infinity. */
308
309 static inline bool
stmt_overflow_infinity(gimple stmt)310 stmt_overflow_infinity (gimple stmt)
311 {
312 if (is_gimple_assign (stmt)
313 && get_gimple_rhs_class (gimple_assign_rhs_code (stmt)) ==
314 GIMPLE_SINGLE_RHS)
315 return is_overflow_infinity (gimple_assign_rhs1 (stmt));
316 return false;
317 }
318
319 /* If VAL is now an overflow infinity, return VAL. Otherwise, return
320 the same value with TREE_OVERFLOW clear. This can be used to avoid
321 confusing a regular value with an overflow value. */
322
323 static inline tree
avoid_overflow_infinity(tree val)324 avoid_overflow_infinity (tree val)
325 {
326 if (!is_overflow_infinity (val))
327 return val;
328
329 if (vrp_val_is_max (val))
330 return vrp_val_max (TREE_TYPE (val));
331 else
332 {
333 gcc_checking_assert (vrp_val_is_min (val));
334 return vrp_val_min (TREE_TYPE (val));
335 }
336 }
337
338
339 /* Return true if ARG is marked with the nonnull attribute in the
340 current function signature. */
341
342 static bool
nonnull_arg_p(const_tree arg)343 nonnull_arg_p (const_tree arg)
344 {
345 tree t, attrs, fntype;
346 unsigned HOST_WIDE_INT arg_num;
347
348 gcc_assert (TREE_CODE (arg) == PARM_DECL && POINTER_TYPE_P (TREE_TYPE (arg)));
349
350 /* The static chain decl is always non null. */
351 if (arg == cfun->static_chain_decl)
352 return true;
353
354 fntype = TREE_TYPE (current_function_decl);
355 attrs = lookup_attribute ("nonnull", TYPE_ATTRIBUTES (fntype));
356
357 /* If "nonnull" wasn't specified, we know nothing about the argument. */
358 if (attrs == NULL_TREE)
359 return false;
360
361 /* If "nonnull" applies to all the arguments, then ARG is non-null. */
362 if (TREE_VALUE (attrs) == NULL_TREE)
363 return true;
364
365 /* Get the position number for ARG in the function signature. */
366 for (arg_num = 1, t = DECL_ARGUMENTS (current_function_decl);
367 t;
368 t = DECL_CHAIN (t), arg_num++)
369 {
370 if (t == arg)
371 break;
372 }
373
374 gcc_assert (t == arg);
375
376 /* Now see if ARG_NUM is mentioned in the nonnull list. */
377 for (t = TREE_VALUE (attrs); t; t = TREE_CHAIN (t))
378 {
379 if (compare_tree_int (TREE_VALUE (t), arg_num) == 0)
380 return true;
381 }
382
383 return false;
384 }
385
386
387 /* Set value range VR to VR_VARYING. */
388
389 static inline void
set_value_range_to_varying(value_range_t * vr)390 set_value_range_to_varying (value_range_t *vr)
391 {
392 vr->type = VR_VARYING;
393 vr->min = vr->max = NULL_TREE;
394 if (vr->equiv)
395 bitmap_clear (vr->equiv);
396 }
397
398
399 /* Set value range VR to {T, MIN, MAX, EQUIV}. */
400
401 static void
set_value_range(value_range_t * vr,enum value_range_type t,tree min,tree max,bitmap equiv)402 set_value_range (value_range_t *vr, enum value_range_type t, tree min,
403 tree max, bitmap equiv)
404 {
405 #if defined ENABLE_CHECKING
406 /* Check the validity of the range. */
407 if (t == VR_RANGE || t == VR_ANTI_RANGE)
408 {
409 int cmp;
410
411 gcc_assert (min && max);
412
413 if (INTEGRAL_TYPE_P (TREE_TYPE (min)) && t == VR_ANTI_RANGE)
414 gcc_assert (!vrp_val_is_min (min) || !vrp_val_is_max (max));
415
416 cmp = compare_values (min, max);
417 gcc_assert (cmp == 0 || cmp == -1 || cmp == -2);
418
419 if (needs_overflow_infinity (TREE_TYPE (min)))
420 gcc_assert (!is_overflow_infinity (min)
421 || !is_overflow_infinity (max));
422 }
423
424 if (t == VR_UNDEFINED || t == VR_VARYING)
425 gcc_assert (min == NULL_TREE && max == NULL_TREE);
426
427 if (t == VR_UNDEFINED || t == VR_VARYING)
428 gcc_assert (equiv == NULL || bitmap_empty_p (equiv));
429 #endif
430
431 vr->type = t;
432 vr->min = min;
433 vr->max = max;
434
435 /* Since updating the equivalence set involves deep copying the
436 bitmaps, only do it if absolutely necessary. */
437 if (vr->equiv == NULL
438 && equiv != NULL)
439 vr->equiv = BITMAP_ALLOC (NULL);
440
441 if (equiv != vr->equiv)
442 {
443 if (equiv && !bitmap_empty_p (equiv))
444 bitmap_copy (vr->equiv, equiv);
445 else
446 bitmap_clear (vr->equiv);
447 }
448 }
449
450
451 /* Set value range VR to the canonical form of {T, MIN, MAX, EQUIV}.
452 This means adjusting T, MIN and MAX representing the case of a
453 wrapping range with MAX < MIN covering [MIN, type_max] U [type_min, MAX]
454 as anti-rage ~[MAX+1, MIN-1]. Likewise for wrapping anti-ranges.
455 In corner cases where MAX+1 or MIN-1 wraps this will fall back
456 to varying.
457 This routine exists to ease canonicalization in the case where we
458 extract ranges from var + CST op limit. */
459
460 static void
set_and_canonicalize_value_range(value_range_t * vr,enum value_range_type t,tree min,tree max,bitmap equiv)461 set_and_canonicalize_value_range (value_range_t *vr, enum value_range_type t,
462 tree min, tree max, bitmap equiv)
463 {
464 /* Nothing to canonicalize for symbolic or unknown or varying ranges. */
465 if ((t != VR_RANGE
466 && t != VR_ANTI_RANGE)
467 || TREE_CODE (min) != INTEGER_CST
468 || TREE_CODE (max) != INTEGER_CST)
469 {
470 set_value_range (vr, t, min, max, equiv);
471 return;
472 }
473
474 /* Wrong order for min and max, to swap them and the VR type we need
475 to adjust them. */
476 if (tree_int_cst_lt (max, min))
477 {
478 tree one = build_int_cst (TREE_TYPE (min), 1);
479 tree tmp = int_const_binop (PLUS_EXPR, max, one);
480 max = int_const_binop (MINUS_EXPR, min, one);
481 min = tmp;
482
483 /* There's one corner case, if we had [C+1, C] before we now have
484 that again. But this represents an empty value range, so drop
485 to varying in this case. */
486 if (tree_int_cst_lt (max, min))
487 {
488 set_value_range_to_varying (vr);
489 return;
490 }
491
492 t = t == VR_RANGE ? VR_ANTI_RANGE : VR_RANGE;
493 }
494
495 /* Anti-ranges that can be represented as ranges should be so. */
496 if (t == VR_ANTI_RANGE)
497 {
498 bool is_min = vrp_val_is_min (min);
499 bool is_max = vrp_val_is_max (max);
500
501 if (is_min && is_max)
502 {
503 /* We cannot deal with empty ranges, drop to varying. */
504 set_value_range_to_varying (vr);
505 return;
506 }
507 else if (is_min
508 /* As a special exception preserve non-null ranges. */
509 && !(TYPE_UNSIGNED (TREE_TYPE (min))
510 && integer_zerop (max)))
511 {
512 tree one = build_int_cst (TREE_TYPE (max), 1);
513 min = int_const_binop (PLUS_EXPR, max, one);
514 max = vrp_val_max (TREE_TYPE (max));
515 t = VR_RANGE;
516 }
517 else if (is_max)
518 {
519 tree one = build_int_cst (TREE_TYPE (min), 1);
520 max = int_const_binop (MINUS_EXPR, min, one);
521 min = vrp_val_min (TREE_TYPE (min));
522 t = VR_RANGE;
523 }
524 }
525
526 set_value_range (vr, t, min, max, equiv);
527 }
528
529 /* Copy value range FROM into value range TO. */
530
531 static inline void
copy_value_range(value_range_t * to,value_range_t * from)532 copy_value_range (value_range_t *to, value_range_t *from)
533 {
534 set_value_range (to, from->type, from->min, from->max, from->equiv);
535 }
536
537 /* Set value range VR to a single value. This function is only called
538 with values we get from statements, and exists to clear the
539 TREE_OVERFLOW flag so that we don't think we have an overflow
540 infinity when we shouldn't. */
541
542 static inline void
set_value_range_to_value(value_range_t * vr,tree val,bitmap equiv)543 set_value_range_to_value (value_range_t *vr, tree val, bitmap equiv)
544 {
545 gcc_assert (is_gimple_min_invariant (val));
546 val = avoid_overflow_infinity (val);
547 set_value_range (vr, VR_RANGE, val, val, equiv);
548 }
549
550 /* Set value range VR to a non-negative range of type TYPE.
551 OVERFLOW_INFINITY indicates whether to use an overflow infinity
552 rather than TYPE_MAX_VALUE; this should be true if we determine
553 that the range is nonnegative based on the assumption that signed
554 overflow does not occur. */
555
556 static inline void
set_value_range_to_nonnegative(value_range_t * vr,tree type,bool overflow_infinity)557 set_value_range_to_nonnegative (value_range_t *vr, tree type,
558 bool overflow_infinity)
559 {
560 tree zero;
561
562 if (overflow_infinity && !supports_overflow_infinity (type))
563 {
564 set_value_range_to_varying (vr);
565 return;
566 }
567
568 zero = build_int_cst (type, 0);
569 set_value_range (vr, VR_RANGE, zero,
570 (overflow_infinity
571 ? positive_overflow_infinity (type)
572 : TYPE_MAX_VALUE (type)),
573 vr->equiv);
574 }
575
576 /* Set value range VR to a non-NULL range of type TYPE. */
577
578 static inline void
set_value_range_to_nonnull(value_range_t * vr,tree type)579 set_value_range_to_nonnull (value_range_t *vr, tree type)
580 {
581 tree zero = build_int_cst (type, 0);
582 set_value_range (vr, VR_ANTI_RANGE, zero, zero, vr->equiv);
583 }
584
585
586 /* Set value range VR to a NULL range of type TYPE. */
587
588 static inline void
set_value_range_to_null(value_range_t * vr,tree type)589 set_value_range_to_null (value_range_t *vr, tree type)
590 {
591 set_value_range_to_value (vr, build_int_cst (type, 0), vr->equiv);
592 }
593
594
595 /* Set value range VR to a range of a truthvalue of type TYPE. */
596
597 static inline void
set_value_range_to_truthvalue(value_range_t * vr,tree type)598 set_value_range_to_truthvalue (value_range_t *vr, tree type)
599 {
600 if (TYPE_PRECISION (type) == 1)
601 set_value_range_to_varying (vr);
602 else
603 set_value_range (vr, VR_RANGE,
604 build_int_cst (type, 0), build_int_cst (type, 1),
605 vr->equiv);
606 }
607
608
609 /* Set value range VR to VR_UNDEFINED. */
610
611 static inline void
set_value_range_to_undefined(value_range_t * vr)612 set_value_range_to_undefined (value_range_t *vr)
613 {
614 vr->type = VR_UNDEFINED;
615 vr->min = vr->max = NULL_TREE;
616 if (vr->equiv)
617 bitmap_clear (vr->equiv);
618 }
619
620
621 /* If abs (min) < abs (max), set VR to [-max, max], if
622 abs (min) >= abs (max), set VR to [-min, min]. */
623
624 static void
abs_extent_range(value_range_t * vr,tree min,tree max)625 abs_extent_range (value_range_t *vr, tree min, tree max)
626 {
627 int cmp;
628
629 gcc_assert (TREE_CODE (min) == INTEGER_CST);
630 gcc_assert (TREE_CODE (max) == INTEGER_CST);
631 gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (min)));
632 gcc_assert (!TYPE_UNSIGNED (TREE_TYPE (min)));
633 min = fold_unary (ABS_EXPR, TREE_TYPE (min), min);
634 max = fold_unary (ABS_EXPR, TREE_TYPE (max), max);
635 if (TREE_OVERFLOW (min) || TREE_OVERFLOW (max))
636 {
637 set_value_range_to_varying (vr);
638 return;
639 }
640 cmp = compare_values (min, max);
641 if (cmp == -1)
642 min = fold_unary (NEGATE_EXPR, TREE_TYPE (min), max);
643 else if (cmp == 0 || cmp == 1)
644 {
645 max = min;
646 min = fold_unary (NEGATE_EXPR, TREE_TYPE (min), min);
647 }
648 else
649 {
650 set_value_range_to_varying (vr);
651 return;
652 }
653 set_and_canonicalize_value_range (vr, VR_RANGE, min, max, NULL);
654 }
655
656
657 /* Return value range information for VAR.
658
659 If we have no values ranges recorded (ie, VRP is not running), then
660 return NULL. Otherwise create an empty range if none existed for VAR. */
661
662 static value_range_t *
get_value_range(const_tree var)663 get_value_range (const_tree var)
664 {
665 static const struct value_range_d vr_const_varying
666 = { VR_VARYING, NULL_TREE, NULL_TREE, NULL };
667 value_range_t *vr;
668 tree sym;
669 unsigned ver = SSA_NAME_VERSION (var);
670
671 /* If we have no recorded ranges, then return NULL. */
672 if (! vr_value)
673 return NULL;
674
675 /* If we query the range for a new SSA name return an unmodifiable VARYING.
676 We should get here at most from the substitute-and-fold stage which
677 will never try to change values. */
678 if (ver >= num_vr_values)
679 return CONST_CAST (value_range_t *, &vr_const_varying);
680
681 vr = vr_value[ver];
682 if (vr)
683 return vr;
684
685 /* After propagation finished do not allocate new value-ranges. */
686 if (values_propagated)
687 return CONST_CAST (value_range_t *, &vr_const_varying);
688
689 /* Create a default value range. */
690 vr_value[ver] = vr = XCNEW (value_range_t);
691
692 /* Defer allocating the equivalence set. */
693 vr->equiv = NULL;
694
695 /* If VAR is a default definition of a parameter, the variable can
696 take any value in VAR's type. */
697 sym = SSA_NAME_VAR (var);
698 if (SSA_NAME_IS_DEFAULT_DEF (var))
699 {
700 if (TREE_CODE (sym) == PARM_DECL)
701 {
702 /* Try to use the "nonnull" attribute to create ~[0, 0]
703 anti-ranges for pointers. Note that this is only valid with
704 default definitions of PARM_DECLs. */
705 if (POINTER_TYPE_P (TREE_TYPE (sym))
706 && nonnull_arg_p (sym))
707 set_value_range_to_nonnull (vr, TREE_TYPE (sym));
708 else
709 set_value_range_to_varying (vr);
710 }
711 else if (TREE_CODE (sym) == RESULT_DECL
712 && DECL_BY_REFERENCE (sym))
713 set_value_range_to_nonnull (vr, TREE_TYPE (sym));
714 }
715
716 return vr;
717 }
718
719 /* Return true, if VAL1 and VAL2 are equal values for VRP purposes. */
720
721 static inline bool
vrp_operand_equal_p(const_tree val1,const_tree val2)722 vrp_operand_equal_p (const_tree val1, const_tree val2)
723 {
724 if (val1 == val2)
725 return true;
726 if (!val1 || !val2 || !operand_equal_p (val1, val2, 0))
727 return false;
728 if (is_overflow_infinity (val1))
729 return is_overflow_infinity (val2);
730 return true;
731 }
732
733 /* Return true, if the bitmaps B1 and B2 are equal. */
734
735 static inline bool
vrp_bitmap_equal_p(const_bitmap b1,const_bitmap b2)736 vrp_bitmap_equal_p (const_bitmap b1, const_bitmap b2)
737 {
738 return (b1 == b2
739 || ((!b1 || bitmap_empty_p (b1))
740 && (!b2 || bitmap_empty_p (b2)))
741 || (b1 && b2
742 && bitmap_equal_p (b1, b2)));
743 }
744
745 /* Update the value range and equivalence set for variable VAR to
746 NEW_VR. Return true if NEW_VR is different from VAR's previous
747 value.
748
749 NOTE: This function assumes that NEW_VR is a temporary value range
750 object created for the sole purpose of updating VAR's range. The
751 storage used by the equivalence set from NEW_VR will be freed by
752 this function. Do not call update_value_range when NEW_VR
753 is the range object associated with another SSA name. */
754
755 static inline bool
update_value_range(const_tree var,value_range_t * new_vr)756 update_value_range (const_tree var, value_range_t *new_vr)
757 {
758 value_range_t *old_vr;
759 bool is_new;
760
761 /* Update the value range, if necessary. */
762 old_vr = get_value_range (var);
763 is_new = old_vr->type != new_vr->type
764 || !vrp_operand_equal_p (old_vr->min, new_vr->min)
765 || !vrp_operand_equal_p (old_vr->max, new_vr->max)
766 || !vrp_bitmap_equal_p (old_vr->equiv, new_vr->equiv);
767
768 if (is_new)
769 set_value_range (old_vr, new_vr->type, new_vr->min, new_vr->max,
770 new_vr->equiv);
771
772 BITMAP_FREE (new_vr->equiv);
773
774 return is_new;
775 }
776
777
778 /* Add VAR and VAR's equivalence set to EQUIV. This is the central
779 point where equivalence processing can be turned on/off. */
780
781 static void
add_equivalence(bitmap * equiv,const_tree var)782 add_equivalence (bitmap *equiv, const_tree var)
783 {
784 unsigned ver = SSA_NAME_VERSION (var);
785 value_range_t *vr = vr_value[ver];
786
787 if (*equiv == NULL)
788 *equiv = BITMAP_ALLOC (NULL);
789 bitmap_set_bit (*equiv, ver);
790 if (vr && vr->equiv)
791 bitmap_ior_into (*equiv, vr->equiv);
792 }
793
794
795 /* Return true if VR is ~[0, 0]. */
796
797 static inline bool
range_is_nonnull(value_range_t * vr)798 range_is_nonnull (value_range_t *vr)
799 {
800 return vr->type == VR_ANTI_RANGE
801 && integer_zerop (vr->min)
802 && integer_zerop (vr->max);
803 }
804
805
806 /* Return true if VR is [0, 0]. */
807
808 static inline bool
range_is_null(value_range_t * vr)809 range_is_null (value_range_t *vr)
810 {
811 return vr->type == VR_RANGE
812 && integer_zerop (vr->min)
813 && integer_zerop (vr->max);
814 }
815
816 /* Return true if max and min of VR are INTEGER_CST. It's not necessary
817 a singleton. */
818
819 static inline bool
range_int_cst_p(value_range_t * vr)820 range_int_cst_p (value_range_t *vr)
821 {
822 return (vr->type == VR_RANGE
823 && TREE_CODE (vr->max) == INTEGER_CST
824 && TREE_CODE (vr->min) == INTEGER_CST
825 && !TREE_OVERFLOW (vr->max)
826 && !TREE_OVERFLOW (vr->min));
827 }
828
829 /* Return true if VR is a INTEGER_CST singleton. */
830
831 static inline bool
range_int_cst_singleton_p(value_range_t * vr)832 range_int_cst_singleton_p (value_range_t *vr)
833 {
834 return (range_int_cst_p (vr)
835 && tree_int_cst_equal (vr->min, vr->max));
836 }
837
838 /* Return true if value range VR involves at least one symbol. */
839
840 static inline bool
symbolic_range_p(value_range_t * vr)841 symbolic_range_p (value_range_t *vr)
842 {
843 return (!is_gimple_min_invariant (vr->min)
844 || !is_gimple_min_invariant (vr->max));
845 }
846
847 /* Return true if value range VR uses an overflow infinity. */
848
849 static inline bool
overflow_infinity_range_p(value_range_t * vr)850 overflow_infinity_range_p (value_range_t *vr)
851 {
852 return (vr->type == VR_RANGE
853 && (is_overflow_infinity (vr->min)
854 || is_overflow_infinity (vr->max)));
855 }
856
857 /* Return false if we can not make a valid comparison based on VR;
858 this will be the case if it uses an overflow infinity and overflow
859 is not undefined (i.e., -fno-strict-overflow is in effect).
860 Otherwise return true, and set *STRICT_OVERFLOW_P to true if VR
861 uses an overflow infinity. */
862
863 static bool
usable_range_p(value_range_t * vr,bool * strict_overflow_p)864 usable_range_p (value_range_t *vr, bool *strict_overflow_p)
865 {
866 gcc_assert (vr->type == VR_RANGE);
867 if (is_overflow_infinity (vr->min))
868 {
869 *strict_overflow_p = true;
870 if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (vr->min)))
871 return false;
872 }
873 if (is_overflow_infinity (vr->max))
874 {
875 *strict_overflow_p = true;
876 if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (vr->max)))
877 return false;
878 }
879 return true;
880 }
881
882
883 /* Return true if the result of assignment STMT is know to be non-negative.
884 If the return value is based on the assumption that signed overflow is
885 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
886 *STRICT_OVERFLOW_P.*/
887
888 static bool
gimple_assign_nonnegative_warnv_p(gimple stmt,bool * strict_overflow_p)889 gimple_assign_nonnegative_warnv_p (gimple stmt, bool *strict_overflow_p)
890 {
891 enum tree_code code = gimple_assign_rhs_code (stmt);
892 switch (get_gimple_rhs_class (code))
893 {
894 case GIMPLE_UNARY_RHS:
895 return tree_unary_nonnegative_warnv_p (gimple_assign_rhs_code (stmt),
896 gimple_expr_type (stmt),
897 gimple_assign_rhs1 (stmt),
898 strict_overflow_p);
899 case GIMPLE_BINARY_RHS:
900 return tree_binary_nonnegative_warnv_p (gimple_assign_rhs_code (stmt),
901 gimple_expr_type (stmt),
902 gimple_assign_rhs1 (stmt),
903 gimple_assign_rhs2 (stmt),
904 strict_overflow_p);
905 case GIMPLE_TERNARY_RHS:
906 return false;
907 case GIMPLE_SINGLE_RHS:
908 return tree_single_nonnegative_warnv_p (gimple_assign_rhs1 (stmt),
909 strict_overflow_p);
910 case GIMPLE_INVALID_RHS:
911 gcc_unreachable ();
912 default:
913 gcc_unreachable ();
914 }
915 }
916
917 /* Return true if return value of call STMT is know to be non-negative.
918 If the return value is based on the assumption that signed overflow is
919 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
920 *STRICT_OVERFLOW_P.*/
921
922 static bool
gimple_call_nonnegative_warnv_p(gimple stmt,bool * strict_overflow_p)923 gimple_call_nonnegative_warnv_p (gimple stmt, bool *strict_overflow_p)
924 {
925 tree arg0 = gimple_call_num_args (stmt) > 0 ?
926 gimple_call_arg (stmt, 0) : NULL_TREE;
927 tree arg1 = gimple_call_num_args (stmt) > 1 ?
928 gimple_call_arg (stmt, 1) : NULL_TREE;
929
930 return tree_call_nonnegative_warnv_p (gimple_expr_type (stmt),
931 gimple_call_fndecl (stmt),
932 arg0,
933 arg1,
934 strict_overflow_p);
935 }
936
937 /* Return true if STMT is know to to compute a non-negative value.
938 If the return value is based on the assumption that signed overflow is
939 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
940 *STRICT_OVERFLOW_P.*/
941
942 static bool
gimple_stmt_nonnegative_warnv_p(gimple stmt,bool * strict_overflow_p)943 gimple_stmt_nonnegative_warnv_p (gimple stmt, bool *strict_overflow_p)
944 {
945 switch (gimple_code (stmt))
946 {
947 case GIMPLE_ASSIGN:
948 return gimple_assign_nonnegative_warnv_p (stmt, strict_overflow_p);
949 case GIMPLE_CALL:
950 return gimple_call_nonnegative_warnv_p (stmt, strict_overflow_p);
951 default:
952 gcc_unreachable ();
953 }
954 }
955
956 /* Return true if the result of assignment STMT is know to be non-zero.
957 If the return value is based on the assumption that signed overflow is
958 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
959 *STRICT_OVERFLOW_P.*/
960
961 static bool
gimple_assign_nonzero_warnv_p(gimple stmt,bool * strict_overflow_p)962 gimple_assign_nonzero_warnv_p (gimple stmt, bool *strict_overflow_p)
963 {
964 enum tree_code code = gimple_assign_rhs_code (stmt);
965 switch (get_gimple_rhs_class (code))
966 {
967 case GIMPLE_UNARY_RHS:
968 return tree_unary_nonzero_warnv_p (gimple_assign_rhs_code (stmt),
969 gimple_expr_type (stmt),
970 gimple_assign_rhs1 (stmt),
971 strict_overflow_p);
972 case GIMPLE_BINARY_RHS:
973 return tree_binary_nonzero_warnv_p (gimple_assign_rhs_code (stmt),
974 gimple_expr_type (stmt),
975 gimple_assign_rhs1 (stmt),
976 gimple_assign_rhs2 (stmt),
977 strict_overflow_p);
978 case GIMPLE_TERNARY_RHS:
979 return false;
980 case GIMPLE_SINGLE_RHS:
981 return tree_single_nonzero_warnv_p (gimple_assign_rhs1 (stmt),
982 strict_overflow_p);
983 case GIMPLE_INVALID_RHS:
984 gcc_unreachable ();
985 default:
986 gcc_unreachable ();
987 }
988 }
989
990 /* Return true if STMT is know to to compute a non-zero value.
991 If the return value is based on the assumption that signed overflow is
992 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
993 *STRICT_OVERFLOW_P.*/
994
995 static bool
gimple_stmt_nonzero_warnv_p(gimple stmt,bool * strict_overflow_p)996 gimple_stmt_nonzero_warnv_p (gimple stmt, bool *strict_overflow_p)
997 {
998 switch (gimple_code (stmt))
999 {
1000 case GIMPLE_ASSIGN:
1001 return gimple_assign_nonzero_warnv_p (stmt, strict_overflow_p);
1002 case GIMPLE_CALL:
1003 return gimple_alloca_call_p (stmt);
1004 default:
1005 gcc_unreachable ();
1006 }
1007 }
1008
1009 /* Like tree_expr_nonzero_warnv_p, but this function uses value ranges
1010 obtained so far. */
1011
1012 static bool
vrp_stmt_computes_nonzero(gimple stmt,bool * strict_overflow_p)1013 vrp_stmt_computes_nonzero (gimple stmt, bool *strict_overflow_p)
1014 {
1015 if (gimple_stmt_nonzero_warnv_p (stmt, strict_overflow_p))
1016 return true;
1017
1018 /* If we have an expression of the form &X->a, then the expression
1019 is nonnull if X is nonnull. */
1020 if (is_gimple_assign (stmt)
1021 && gimple_assign_rhs_code (stmt) == ADDR_EXPR)
1022 {
1023 tree expr = gimple_assign_rhs1 (stmt);
1024 tree base = get_base_address (TREE_OPERAND (expr, 0));
1025
1026 if (base != NULL_TREE
1027 && TREE_CODE (base) == MEM_REF
1028 && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME)
1029 {
1030 value_range_t *vr = get_value_range (TREE_OPERAND (base, 0));
1031 if (range_is_nonnull (vr))
1032 return true;
1033 }
1034 }
1035
1036 return false;
1037 }
1038
1039 /* Returns true if EXPR is a valid value (as expected by compare_values) --
1040 a gimple invariant, or SSA_NAME +- CST. */
1041
1042 static bool
valid_value_p(tree expr)1043 valid_value_p (tree expr)
1044 {
1045 if (TREE_CODE (expr) == SSA_NAME)
1046 return true;
1047
1048 if (TREE_CODE (expr) == PLUS_EXPR
1049 || TREE_CODE (expr) == MINUS_EXPR)
1050 return (TREE_CODE (TREE_OPERAND (expr, 0)) == SSA_NAME
1051 && TREE_CODE (TREE_OPERAND (expr, 1)) == INTEGER_CST);
1052
1053 return is_gimple_min_invariant (expr);
1054 }
1055
1056 /* Return
1057 1 if VAL < VAL2
1058 0 if !(VAL < VAL2)
1059 -2 if those are incomparable. */
1060 static inline int
operand_less_p(tree val,tree val2)1061 operand_less_p (tree val, tree val2)
1062 {
1063 /* LT is folded faster than GE and others. Inline the common case. */
1064 if (TREE_CODE (val) == INTEGER_CST && TREE_CODE (val2) == INTEGER_CST)
1065 {
1066 if (TYPE_UNSIGNED (TREE_TYPE (val)))
1067 return INT_CST_LT_UNSIGNED (val, val2);
1068 else
1069 {
1070 if (INT_CST_LT (val, val2))
1071 return 1;
1072 }
1073 }
1074 else
1075 {
1076 tree tcmp;
1077
1078 fold_defer_overflow_warnings ();
1079
1080 tcmp = fold_binary_to_constant (LT_EXPR, boolean_type_node, val, val2);
1081
1082 fold_undefer_and_ignore_overflow_warnings ();
1083
1084 if (!tcmp
1085 || TREE_CODE (tcmp) != INTEGER_CST)
1086 return -2;
1087
1088 if (!integer_zerop (tcmp))
1089 return 1;
1090 }
1091
1092 /* val >= val2, not considering overflow infinity. */
1093 if (is_negative_overflow_infinity (val))
1094 return is_negative_overflow_infinity (val2) ? 0 : 1;
1095 else if (is_positive_overflow_infinity (val2))
1096 return is_positive_overflow_infinity (val) ? 0 : 1;
1097
1098 return 0;
1099 }
1100
1101 /* Compare two values VAL1 and VAL2. Return
1102
1103 -2 if VAL1 and VAL2 cannot be compared at compile-time,
1104 -1 if VAL1 < VAL2,
1105 0 if VAL1 == VAL2,
1106 +1 if VAL1 > VAL2, and
1107 +2 if VAL1 != VAL2
1108
1109 This is similar to tree_int_cst_compare but supports pointer values
1110 and values that cannot be compared at compile time.
1111
1112 If STRICT_OVERFLOW_P is not NULL, then set *STRICT_OVERFLOW_P to
1113 true if the return value is only valid if we assume that signed
1114 overflow is undefined. */
1115
1116 static int
compare_values_warnv(tree val1,tree val2,bool * strict_overflow_p)1117 compare_values_warnv (tree val1, tree val2, bool *strict_overflow_p)
1118 {
1119 if (val1 == val2)
1120 return 0;
1121
1122 /* Below we rely on the fact that VAL1 and VAL2 are both pointers or
1123 both integers. */
1124 gcc_assert (POINTER_TYPE_P (TREE_TYPE (val1))
1125 == POINTER_TYPE_P (TREE_TYPE (val2)));
1126 /* Convert the two values into the same type. This is needed because
1127 sizetype causes sign extension even for unsigned types. */
1128 val2 = fold_convert (TREE_TYPE (val1), val2);
1129 STRIP_USELESS_TYPE_CONVERSION (val2);
1130
1131 if ((TREE_CODE (val1) == SSA_NAME
1132 || TREE_CODE (val1) == PLUS_EXPR
1133 || TREE_CODE (val1) == MINUS_EXPR)
1134 && (TREE_CODE (val2) == SSA_NAME
1135 || TREE_CODE (val2) == PLUS_EXPR
1136 || TREE_CODE (val2) == MINUS_EXPR))
1137 {
1138 tree n1, c1, n2, c2;
1139 enum tree_code code1, code2;
1140
1141 /* If VAL1 and VAL2 are of the form 'NAME [+-] CST' or 'NAME',
1142 return -1 or +1 accordingly. If VAL1 and VAL2 don't use the
1143 same name, return -2. */
1144 if (TREE_CODE (val1) == SSA_NAME)
1145 {
1146 code1 = SSA_NAME;
1147 n1 = val1;
1148 c1 = NULL_TREE;
1149 }
1150 else
1151 {
1152 code1 = TREE_CODE (val1);
1153 n1 = TREE_OPERAND (val1, 0);
1154 c1 = TREE_OPERAND (val1, 1);
1155 if (tree_int_cst_sgn (c1) == -1)
1156 {
1157 if (is_negative_overflow_infinity (c1))
1158 return -2;
1159 c1 = fold_unary_to_constant (NEGATE_EXPR, TREE_TYPE (c1), c1);
1160 if (!c1)
1161 return -2;
1162 code1 = code1 == MINUS_EXPR ? PLUS_EXPR : MINUS_EXPR;
1163 }
1164 }
1165
1166 if (TREE_CODE (val2) == SSA_NAME)
1167 {
1168 code2 = SSA_NAME;
1169 n2 = val2;
1170 c2 = NULL_TREE;
1171 }
1172 else
1173 {
1174 code2 = TREE_CODE (val2);
1175 n2 = TREE_OPERAND (val2, 0);
1176 c2 = TREE_OPERAND (val2, 1);
1177 if (tree_int_cst_sgn (c2) == -1)
1178 {
1179 if (is_negative_overflow_infinity (c2))
1180 return -2;
1181 c2 = fold_unary_to_constant (NEGATE_EXPR, TREE_TYPE (c2), c2);
1182 if (!c2)
1183 return -2;
1184 code2 = code2 == MINUS_EXPR ? PLUS_EXPR : MINUS_EXPR;
1185 }
1186 }
1187
1188 /* Both values must use the same name. */
1189 if (n1 != n2)
1190 return -2;
1191
1192 if (code1 == SSA_NAME
1193 && code2 == SSA_NAME)
1194 /* NAME == NAME */
1195 return 0;
1196
1197 /* If overflow is defined we cannot simplify more. */
1198 if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (val1)))
1199 return -2;
1200
1201 if (strict_overflow_p != NULL
1202 && (code1 == SSA_NAME || !TREE_NO_WARNING (val1))
1203 && (code2 == SSA_NAME || !TREE_NO_WARNING (val2)))
1204 *strict_overflow_p = true;
1205
1206 if (code1 == SSA_NAME)
1207 {
1208 if (code2 == PLUS_EXPR)
1209 /* NAME < NAME + CST */
1210 return -1;
1211 else if (code2 == MINUS_EXPR)
1212 /* NAME > NAME - CST */
1213 return 1;
1214 }
1215 else if (code1 == PLUS_EXPR)
1216 {
1217 if (code2 == SSA_NAME)
1218 /* NAME + CST > NAME */
1219 return 1;
1220 else if (code2 == PLUS_EXPR)
1221 /* NAME + CST1 > NAME + CST2, if CST1 > CST2 */
1222 return compare_values_warnv (c1, c2, strict_overflow_p);
1223 else if (code2 == MINUS_EXPR)
1224 /* NAME + CST1 > NAME - CST2 */
1225 return 1;
1226 }
1227 else if (code1 == MINUS_EXPR)
1228 {
1229 if (code2 == SSA_NAME)
1230 /* NAME - CST < NAME */
1231 return -1;
1232 else if (code2 == PLUS_EXPR)
1233 /* NAME - CST1 < NAME + CST2 */
1234 return -1;
1235 else if (code2 == MINUS_EXPR)
1236 /* NAME - CST1 > NAME - CST2, if CST1 < CST2. Notice that
1237 C1 and C2 are swapped in the call to compare_values. */
1238 return compare_values_warnv (c2, c1, strict_overflow_p);
1239 }
1240
1241 gcc_unreachable ();
1242 }
1243
1244 /* We cannot compare non-constants. */
1245 if (!is_gimple_min_invariant (val1) || !is_gimple_min_invariant (val2))
1246 return -2;
1247
1248 if (!POINTER_TYPE_P (TREE_TYPE (val1)))
1249 {
1250 /* We cannot compare overflowed values, except for overflow
1251 infinities. */
1252 if (TREE_OVERFLOW (val1) || TREE_OVERFLOW (val2))
1253 {
1254 if (strict_overflow_p != NULL)
1255 *strict_overflow_p = true;
1256 if (is_negative_overflow_infinity (val1))
1257 return is_negative_overflow_infinity (val2) ? 0 : -1;
1258 else if (is_negative_overflow_infinity (val2))
1259 return 1;
1260 else if (is_positive_overflow_infinity (val1))
1261 return is_positive_overflow_infinity (val2) ? 0 : 1;
1262 else if (is_positive_overflow_infinity (val2))
1263 return -1;
1264 return -2;
1265 }
1266
1267 return tree_int_cst_compare (val1, val2);
1268 }
1269 else
1270 {
1271 tree t;
1272
1273 /* First see if VAL1 and VAL2 are not the same. */
1274 if (val1 == val2 || operand_equal_p (val1, val2, 0))
1275 return 0;
1276
1277 /* If VAL1 is a lower address than VAL2, return -1. */
1278 if (operand_less_p (val1, val2) == 1)
1279 return -1;
1280
1281 /* If VAL1 is a higher address than VAL2, return +1. */
1282 if (operand_less_p (val2, val1) == 1)
1283 return 1;
1284
1285 /* If VAL1 is different than VAL2, return +2.
1286 For integer constants we either have already returned -1 or 1
1287 or they are equivalent. We still might succeed in proving
1288 something about non-trivial operands. */
1289 if (TREE_CODE (val1) != INTEGER_CST
1290 || TREE_CODE (val2) != INTEGER_CST)
1291 {
1292 t = fold_binary_to_constant (NE_EXPR, boolean_type_node, val1, val2);
1293 if (t && integer_onep (t))
1294 return 2;
1295 }
1296
1297 return -2;
1298 }
1299 }
1300
1301 /* Compare values like compare_values_warnv, but treat comparisons of
1302 nonconstants which rely on undefined overflow as incomparable. */
1303
1304 static int
compare_values(tree val1,tree val2)1305 compare_values (tree val1, tree val2)
1306 {
1307 bool sop;
1308 int ret;
1309
1310 sop = false;
1311 ret = compare_values_warnv (val1, val2, &sop);
1312 if (sop
1313 && (!is_gimple_min_invariant (val1) || !is_gimple_min_invariant (val2)))
1314 ret = -2;
1315 return ret;
1316 }
1317
1318
1319 /* Return 1 if VAL is inside value range MIN <= VAL <= MAX,
1320 0 if VAL is not inside [MIN, MAX],
1321 -2 if we cannot tell either way.
1322
1323 Benchmark compile/20001226-1.c compilation time after changing this
1324 function. */
1325
1326 static inline int
value_inside_range(tree val,tree min,tree max)1327 value_inside_range (tree val, tree min, tree max)
1328 {
1329 int cmp1, cmp2;
1330
1331 cmp1 = operand_less_p (val, min);
1332 if (cmp1 == -2)
1333 return -2;
1334 if (cmp1 == 1)
1335 return 0;
1336
1337 cmp2 = operand_less_p (max, val);
1338 if (cmp2 == -2)
1339 return -2;
1340
1341 return !cmp2;
1342 }
1343
1344
1345 /* Return true if value ranges VR0 and VR1 have a non-empty
1346 intersection.
1347
1348 Benchmark compile/20001226-1.c compilation time after changing this
1349 function.
1350 */
1351
1352 static inline bool
value_ranges_intersect_p(value_range_t * vr0,value_range_t * vr1)1353 value_ranges_intersect_p (value_range_t *vr0, value_range_t *vr1)
1354 {
1355 /* The value ranges do not intersect if the maximum of the first range is
1356 less than the minimum of the second range or vice versa.
1357 When those relations are unknown, we can't do any better. */
1358 if (operand_less_p (vr0->max, vr1->min) != 0)
1359 return false;
1360 if (operand_less_p (vr1->max, vr0->min) != 0)
1361 return false;
1362 return true;
1363 }
1364
1365
1366 /* Return 1 if [MIN, MAX] includes the value zero, 0 if it does not
1367 include the value zero, -2 if we cannot tell. */
1368
1369 static inline int
range_includes_zero_p(tree min,tree max)1370 range_includes_zero_p (tree min, tree max)
1371 {
1372 tree zero = build_int_cst (TREE_TYPE (min), 0);
1373 return value_inside_range (zero, min, max);
1374 }
1375
1376 /* Return true if *VR is know to only contain nonnegative values. */
1377
1378 static inline bool
value_range_nonnegative_p(value_range_t * vr)1379 value_range_nonnegative_p (value_range_t *vr)
1380 {
1381 /* Testing for VR_ANTI_RANGE is not useful here as any anti-range
1382 which would return a useful value should be encoded as a
1383 VR_RANGE. */
1384 if (vr->type == VR_RANGE)
1385 {
1386 int result = compare_values (vr->min, integer_zero_node);
1387 return (result == 0 || result == 1);
1388 }
1389
1390 return false;
1391 }
1392
1393 /* Return true if T, an SSA_NAME, is known to be nonnegative. Return
1394 false otherwise or if no value range information is available. */
1395
1396 bool
ssa_name_nonnegative_p(const_tree t)1397 ssa_name_nonnegative_p (const_tree t)
1398 {
1399 value_range_t *vr = get_value_range (t);
1400
1401 if (INTEGRAL_TYPE_P (t)
1402 && TYPE_UNSIGNED (t))
1403 return true;
1404
1405 if (!vr)
1406 return false;
1407
1408 return value_range_nonnegative_p (vr);
1409 }
1410
1411 /* If *VR has a value rante that is a single constant value return that,
1412 otherwise return NULL_TREE. */
1413
1414 static tree
value_range_constant_singleton(value_range_t * vr)1415 value_range_constant_singleton (value_range_t *vr)
1416 {
1417 if (vr->type == VR_RANGE
1418 && operand_equal_p (vr->min, vr->max, 0)
1419 && is_gimple_min_invariant (vr->min))
1420 return vr->min;
1421
1422 return NULL_TREE;
1423 }
1424
1425 /* If OP has a value range with a single constant value return that,
1426 otherwise return NULL_TREE. This returns OP itself if OP is a
1427 constant. */
1428
1429 static tree
op_with_constant_singleton_value_range(tree op)1430 op_with_constant_singleton_value_range (tree op)
1431 {
1432 if (is_gimple_min_invariant (op))
1433 return op;
1434
1435 if (TREE_CODE (op) != SSA_NAME)
1436 return NULL_TREE;
1437
1438 return value_range_constant_singleton (get_value_range (op));
1439 }
1440
1441 /* Return true if op is in a boolean [0, 1] value-range. */
1442
1443 static bool
op_with_boolean_value_range_p(tree op)1444 op_with_boolean_value_range_p (tree op)
1445 {
1446 value_range_t *vr;
1447
1448 if (TYPE_PRECISION (TREE_TYPE (op)) == 1)
1449 return true;
1450
1451 if (integer_zerop (op)
1452 || integer_onep (op))
1453 return true;
1454
1455 if (TREE_CODE (op) != SSA_NAME)
1456 return false;
1457
1458 vr = get_value_range (op);
1459 return (vr->type == VR_RANGE
1460 && integer_zerop (vr->min)
1461 && integer_onep (vr->max));
1462 }
1463
1464 /* Extract value range information from an ASSERT_EXPR EXPR and store
1465 it in *VR_P. */
1466
1467 static void
extract_range_from_assert(value_range_t * vr_p,tree expr)1468 extract_range_from_assert (value_range_t *vr_p, tree expr)
1469 {
1470 tree var, cond, limit, min, max, type;
1471 value_range_t *var_vr, *limit_vr;
1472 enum tree_code cond_code;
1473
1474 var = ASSERT_EXPR_VAR (expr);
1475 cond = ASSERT_EXPR_COND (expr);
1476
1477 gcc_assert (COMPARISON_CLASS_P (cond));
1478
1479 /* Find VAR in the ASSERT_EXPR conditional. */
1480 if (var == TREE_OPERAND (cond, 0)
1481 || TREE_CODE (TREE_OPERAND (cond, 0)) == PLUS_EXPR
1482 || TREE_CODE (TREE_OPERAND (cond, 0)) == NOP_EXPR)
1483 {
1484 /* If the predicate is of the form VAR COMP LIMIT, then we just
1485 take LIMIT from the RHS and use the same comparison code. */
1486 cond_code = TREE_CODE (cond);
1487 limit = TREE_OPERAND (cond, 1);
1488 cond = TREE_OPERAND (cond, 0);
1489 }
1490 else
1491 {
1492 /* If the predicate is of the form LIMIT COMP VAR, then we need
1493 to flip around the comparison code to create the proper range
1494 for VAR. */
1495 cond_code = swap_tree_comparison (TREE_CODE (cond));
1496 limit = TREE_OPERAND (cond, 0);
1497 cond = TREE_OPERAND (cond, 1);
1498 }
1499
1500 limit = avoid_overflow_infinity (limit);
1501
1502 type = TREE_TYPE (var);
1503 gcc_assert (limit != var);
1504
1505 /* For pointer arithmetic, we only keep track of pointer equality
1506 and inequality. */
1507 if (POINTER_TYPE_P (type) && cond_code != NE_EXPR && cond_code != EQ_EXPR)
1508 {
1509 set_value_range_to_varying (vr_p);
1510 return;
1511 }
1512
1513 /* If LIMIT is another SSA name and LIMIT has a range of its own,
1514 try to use LIMIT's range to avoid creating symbolic ranges
1515 unnecessarily. */
1516 limit_vr = (TREE_CODE (limit) == SSA_NAME) ? get_value_range (limit) : NULL;
1517
1518 /* LIMIT's range is only interesting if it has any useful information. */
1519 if (limit_vr
1520 && (limit_vr->type == VR_UNDEFINED
1521 || limit_vr->type == VR_VARYING
1522 || symbolic_range_p (limit_vr)))
1523 limit_vr = NULL;
1524
1525 /* Initially, the new range has the same set of equivalences of
1526 VAR's range. This will be revised before returning the final
1527 value. Since assertions may be chained via mutually exclusive
1528 predicates, we will need to trim the set of equivalences before
1529 we are done. */
1530 gcc_assert (vr_p->equiv == NULL);
1531 add_equivalence (&vr_p->equiv, var);
1532
1533 /* Extract a new range based on the asserted comparison for VAR and
1534 LIMIT's value range. Notice that if LIMIT has an anti-range, we
1535 will only use it for equality comparisons (EQ_EXPR). For any
1536 other kind of assertion, we cannot derive a range from LIMIT's
1537 anti-range that can be used to describe the new range. For
1538 instance, ASSERT_EXPR <x_2, x_2 <= b_4>. If b_4 is ~[2, 10],
1539 then b_4 takes on the ranges [-INF, 1] and [11, +INF]. There is
1540 no single range for x_2 that could describe LE_EXPR, so we might
1541 as well build the range [b_4, +INF] for it.
1542 One special case we handle is extracting a range from a
1543 range test encoded as (unsigned)var + CST <= limit. */
1544 if (TREE_CODE (cond) == NOP_EXPR
1545 || TREE_CODE (cond) == PLUS_EXPR)
1546 {
1547 if (TREE_CODE (cond) == PLUS_EXPR)
1548 {
1549 min = fold_build1 (NEGATE_EXPR, TREE_TYPE (TREE_OPERAND (cond, 1)),
1550 TREE_OPERAND (cond, 1));
1551 max = int_const_binop (PLUS_EXPR, limit, min);
1552 cond = TREE_OPERAND (cond, 0);
1553 }
1554 else
1555 {
1556 min = build_int_cst (TREE_TYPE (var), 0);
1557 max = limit;
1558 }
1559
1560 /* Make sure to not set TREE_OVERFLOW on the final type
1561 conversion. We are willingly interpreting large positive
1562 unsigned values as negative singed values here. */
1563 min = force_fit_type_double (TREE_TYPE (var), tree_to_double_int (min),
1564 0, false);
1565 max = force_fit_type_double (TREE_TYPE (var), tree_to_double_int (max),
1566 0, false);
1567
1568 /* We can transform a max, min range to an anti-range or
1569 vice-versa. Use set_and_canonicalize_value_range which does
1570 this for us. */
1571 if (cond_code == LE_EXPR)
1572 set_and_canonicalize_value_range (vr_p, VR_RANGE,
1573 min, max, vr_p->equiv);
1574 else if (cond_code == GT_EXPR)
1575 set_and_canonicalize_value_range (vr_p, VR_ANTI_RANGE,
1576 min, max, vr_p->equiv);
1577 else
1578 gcc_unreachable ();
1579 }
1580 else if (cond_code == EQ_EXPR)
1581 {
1582 enum value_range_type range_type;
1583
1584 if (limit_vr)
1585 {
1586 range_type = limit_vr->type;
1587 min = limit_vr->min;
1588 max = limit_vr->max;
1589 }
1590 else
1591 {
1592 range_type = VR_RANGE;
1593 min = limit;
1594 max = limit;
1595 }
1596
1597 set_value_range (vr_p, range_type, min, max, vr_p->equiv);
1598
1599 /* When asserting the equality VAR == LIMIT and LIMIT is another
1600 SSA name, the new range will also inherit the equivalence set
1601 from LIMIT. */
1602 if (TREE_CODE (limit) == SSA_NAME)
1603 add_equivalence (&vr_p->equiv, limit);
1604 }
1605 else if (cond_code == NE_EXPR)
1606 {
1607 /* As described above, when LIMIT's range is an anti-range and
1608 this assertion is an inequality (NE_EXPR), then we cannot
1609 derive anything from the anti-range. For instance, if
1610 LIMIT's range was ~[0, 0], the assertion 'VAR != LIMIT' does
1611 not imply that VAR's range is [0, 0]. So, in the case of
1612 anti-ranges, we just assert the inequality using LIMIT and
1613 not its anti-range.
1614
1615 If LIMIT_VR is a range, we can only use it to build a new
1616 anti-range if LIMIT_VR is a single-valued range. For
1617 instance, if LIMIT_VR is [0, 1], the predicate
1618 VAR != [0, 1] does not mean that VAR's range is ~[0, 1].
1619 Rather, it means that for value 0 VAR should be ~[0, 0]
1620 and for value 1, VAR should be ~[1, 1]. We cannot
1621 represent these ranges.
1622
1623 The only situation in which we can build a valid
1624 anti-range is when LIMIT_VR is a single-valued range
1625 (i.e., LIMIT_VR->MIN == LIMIT_VR->MAX). In that case,
1626 build the anti-range ~[LIMIT_VR->MIN, LIMIT_VR->MAX]. */
1627 if (limit_vr
1628 && limit_vr->type == VR_RANGE
1629 && compare_values (limit_vr->min, limit_vr->max) == 0)
1630 {
1631 min = limit_vr->min;
1632 max = limit_vr->max;
1633 }
1634 else
1635 {
1636 /* In any other case, we cannot use LIMIT's range to build a
1637 valid anti-range. */
1638 min = max = limit;
1639 }
1640
1641 /* If MIN and MAX cover the whole range for their type, then
1642 just use the original LIMIT. */
1643 if (INTEGRAL_TYPE_P (type)
1644 && vrp_val_is_min (min)
1645 && vrp_val_is_max (max))
1646 min = max = limit;
1647
1648 set_value_range (vr_p, VR_ANTI_RANGE, min, max, vr_p->equiv);
1649 }
1650 else if (cond_code == LE_EXPR || cond_code == LT_EXPR)
1651 {
1652 min = TYPE_MIN_VALUE (type);
1653
1654 if (limit_vr == NULL || limit_vr->type == VR_ANTI_RANGE)
1655 max = limit;
1656 else
1657 {
1658 /* If LIMIT_VR is of the form [N1, N2], we need to build the
1659 range [MIN, N2] for LE_EXPR and [MIN, N2 - 1] for
1660 LT_EXPR. */
1661 max = limit_vr->max;
1662 }
1663
1664 /* If the maximum value forces us to be out of bounds, simply punt.
1665 It would be pointless to try and do anything more since this
1666 all should be optimized away above us. */
1667 if ((cond_code == LT_EXPR
1668 && compare_values (max, min) == 0)
1669 || (CONSTANT_CLASS_P (max) && TREE_OVERFLOW (max)))
1670 set_value_range_to_varying (vr_p);
1671 else
1672 {
1673 /* For LT_EXPR, we create the range [MIN, MAX - 1]. */
1674 if (cond_code == LT_EXPR)
1675 {
1676 if (TYPE_PRECISION (TREE_TYPE (max)) == 1
1677 && !TYPE_UNSIGNED (TREE_TYPE (max)))
1678 max = fold_build2 (PLUS_EXPR, TREE_TYPE (max), max,
1679 build_int_cst (TREE_TYPE (max), -1));
1680 else
1681 max = fold_build2 (MINUS_EXPR, TREE_TYPE (max), max,
1682 build_int_cst (TREE_TYPE (max), 1));
1683 if (EXPR_P (max))
1684 TREE_NO_WARNING (max) = 1;
1685 }
1686
1687 set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
1688 }
1689 }
1690 else if (cond_code == GE_EXPR || cond_code == GT_EXPR)
1691 {
1692 max = TYPE_MAX_VALUE (type);
1693
1694 if (limit_vr == NULL || limit_vr->type == VR_ANTI_RANGE)
1695 min = limit;
1696 else
1697 {
1698 /* If LIMIT_VR is of the form [N1, N2], we need to build the
1699 range [N1, MAX] for GE_EXPR and [N1 + 1, MAX] for
1700 GT_EXPR. */
1701 min = limit_vr->min;
1702 }
1703
1704 /* If the minimum value forces us to be out of bounds, simply punt.
1705 It would be pointless to try and do anything more since this
1706 all should be optimized away above us. */
1707 if ((cond_code == GT_EXPR
1708 && compare_values (min, max) == 0)
1709 || (CONSTANT_CLASS_P (min) && TREE_OVERFLOW (min)))
1710 set_value_range_to_varying (vr_p);
1711 else
1712 {
1713 /* For GT_EXPR, we create the range [MIN + 1, MAX]. */
1714 if (cond_code == GT_EXPR)
1715 {
1716 if (TYPE_PRECISION (TREE_TYPE (min)) == 1
1717 && !TYPE_UNSIGNED (TREE_TYPE (min)))
1718 min = fold_build2 (MINUS_EXPR, TREE_TYPE (min), min,
1719 build_int_cst (TREE_TYPE (min), -1));
1720 else
1721 min = fold_build2 (PLUS_EXPR, TREE_TYPE (min), min,
1722 build_int_cst (TREE_TYPE (min), 1));
1723 if (EXPR_P (min))
1724 TREE_NO_WARNING (min) = 1;
1725 }
1726
1727 set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
1728 }
1729 }
1730 else
1731 gcc_unreachable ();
1732
1733 /* If VAR already had a known range, it may happen that the new
1734 range we have computed and VAR's range are not compatible. For
1735 instance,
1736
1737 if (p_5 == NULL)
1738 p_6 = ASSERT_EXPR <p_5, p_5 == NULL>;
1739 x_7 = p_6->fld;
1740 p_8 = ASSERT_EXPR <p_6, p_6 != NULL>;
1741
1742 While the above comes from a faulty program, it will cause an ICE
1743 later because p_8 and p_6 will have incompatible ranges and at
1744 the same time will be considered equivalent. A similar situation
1745 would arise from
1746
1747 if (i_5 > 10)
1748 i_6 = ASSERT_EXPR <i_5, i_5 > 10>;
1749 if (i_5 < 5)
1750 i_7 = ASSERT_EXPR <i_6, i_6 < 5>;
1751
1752 Again i_6 and i_7 will have incompatible ranges. It would be
1753 pointless to try and do anything with i_7's range because
1754 anything dominated by 'if (i_5 < 5)' will be optimized away.
1755 Note, due to the wa in which simulation proceeds, the statement
1756 i_7 = ASSERT_EXPR <...> we would never be visited because the
1757 conditional 'if (i_5 < 5)' always evaluates to false. However,
1758 this extra check does not hurt and may protect against future
1759 changes to VRP that may get into a situation similar to the
1760 NULL pointer dereference example.
1761
1762 Note that these compatibility tests are only needed when dealing
1763 with ranges or a mix of range and anti-range. If VAR_VR and VR_P
1764 are both anti-ranges, they will always be compatible, because two
1765 anti-ranges will always have a non-empty intersection. */
1766
1767 var_vr = get_value_range (var);
1768
1769 /* We may need to make adjustments when VR_P and VAR_VR are numeric
1770 ranges or anti-ranges. */
1771 if (vr_p->type == VR_VARYING
1772 || vr_p->type == VR_UNDEFINED
1773 || var_vr->type == VR_VARYING
1774 || var_vr->type == VR_UNDEFINED
1775 || symbolic_range_p (vr_p)
1776 || symbolic_range_p (var_vr))
1777 return;
1778
1779 if (var_vr->type == VR_RANGE && vr_p->type == VR_RANGE)
1780 {
1781 /* If the two ranges have a non-empty intersection, we can
1782 refine the resulting range. Since the assert expression
1783 creates an equivalency and at the same time it asserts a
1784 predicate, we can take the intersection of the two ranges to
1785 get better precision. */
1786 if (value_ranges_intersect_p (var_vr, vr_p))
1787 {
1788 /* Use the larger of the two minimums. */
1789 if (compare_values (vr_p->min, var_vr->min) == -1)
1790 min = var_vr->min;
1791 else
1792 min = vr_p->min;
1793
1794 /* Use the smaller of the two maximums. */
1795 if (compare_values (vr_p->max, var_vr->max) == 1)
1796 max = var_vr->max;
1797 else
1798 max = vr_p->max;
1799
1800 set_value_range (vr_p, vr_p->type, min, max, vr_p->equiv);
1801 }
1802 else
1803 {
1804 /* The two ranges do not intersect, set the new range to
1805 VARYING, because we will not be able to do anything
1806 meaningful with it. */
1807 set_value_range_to_varying (vr_p);
1808 }
1809 }
1810 else if ((var_vr->type == VR_RANGE && vr_p->type == VR_ANTI_RANGE)
1811 || (var_vr->type == VR_ANTI_RANGE && vr_p->type == VR_RANGE))
1812 {
1813 /* A range and an anti-range will cancel each other only if
1814 their ends are the same. For instance, in the example above,
1815 p_8's range ~[0, 0] and p_6's range [0, 0] are incompatible,
1816 so VR_P should be set to VR_VARYING. */
1817 if (compare_values (var_vr->min, vr_p->min) == 0
1818 && compare_values (var_vr->max, vr_p->max) == 0)
1819 set_value_range_to_varying (vr_p);
1820 else
1821 {
1822 tree min, max, anti_min, anti_max, real_min, real_max;
1823 int cmp;
1824
1825 /* We want to compute the logical AND of the two ranges;
1826 there are three cases to consider.
1827
1828
1829 1. The VR_ANTI_RANGE range is completely within the
1830 VR_RANGE and the endpoints of the ranges are
1831 different. In that case the resulting range
1832 should be whichever range is more precise.
1833 Typically that will be the VR_RANGE.
1834
1835 2. The VR_ANTI_RANGE is completely disjoint from
1836 the VR_RANGE. In this case the resulting range
1837 should be the VR_RANGE.
1838
1839 3. There is some overlap between the VR_ANTI_RANGE
1840 and the VR_RANGE.
1841
1842 3a. If the high limit of the VR_ANTI_RANGE resides
1843 within the VR_RANGE, then the result is a new
1844 VR_RANGE starting at the high limit of the
1845 VR_ANTI_RANGE + 1 and extending to the
1846 high limit of the original VR_RANGE.
1847
1848 3b. If the low limit of the VR_ANTI_RANGE resides
1849 within the VR_RANGE, then the result is a new
1850 VR_RANGE starting at the low limit of the original
1851 VR_RANGE and extending to the low limit of the
1852 VR_ANTI_RANGE - 1. */
1853 if (vr_p->type == VR_ANTI_RANGE)
1854 {
1855 anti_min = vr_p->min;
1856 anti_max = vr_p->max;
1857 real_min = var_vr->min;
1858 real_max = var_vr->max;
1859 }
1860 else
1861 {
1862 anti_min = var_vr->min;
1863 anti_max = var_vr->max;
1864 real_min = vr_p->min;
1865 real_max = vr_p->max;
1866 }
1867
1868
1869 /* Case 1, VR_ANTI_RANGE completely within VR_RANGE,
1870 not including any endpoints. */
1871 if (compare_values (anti_max, real_max) == -1
1872 && compare_values (anti_min, real_min) == 1)
1873 {
1874 /* If the range is covering the whole valid range of
1875 the type keep the anti-range. */
1876 if (!vrp_val_is_min (real_min)
1877 || !vrp_val_is_max (real_max))
1878 set_value_range (vr_p, VR_RANGE, real_min,
1879 real_max, vr_p->equiv);
1880 }
1881 /* Case 2, VR_ANTI_RANGE completely disjoint from
1882 VR_RANGE. */
1883 else if (compare_values (anti_min, real_max) == 1
1884 || compare_values (anti_max, real_min) == -1)
1885 {
1886 set_value_range (vr_p, VR_RANGE, real_min,
1887 real_max, vr_p->equiv);
1888 }
1889 /* Case 3a, the anti-range extends into the low
1890 part of the real range. Thus creating a new
1891 low for the real range. */
1892 else if (((cmp = compare_values (anti_max, real_min)) == 1
1893 || cmp == 0)
1894 && compare_values (anti_max, real_max) == -1)
1895 {
1896 gcc_assert (!is_positive_overflow_infinity (anti_max));
1897 if (needs_overflow_infinity (TREE_TYPE (anti_max))
1898 && vrp_val_is_max (anti_max))
1899 {
1900 if (!supports_overflow_infinity (TREE_TYPE (var_vr->min)))
1901 {
1902 set_value_range_to_varying (vr_p);
1903 return;
1904 }
1905 min = positive_overflow_infinity (TREE_TYPE (var_vr->min));
1906 }
1907 else if (!POINTER_TYPE_P (TREE_TYPE (var_vr->min)))
1908 {
1909 if (TYPE_PRECISION (TREE_TYPE (var_vr->min)) == 1
1910 && !TYPE_UNSIGNED (TREE_TYPE (var_vr->min)))
1911 min = fold_build2 (MINUS_EXPR, TREE_TYPE (var_vr->min),
1912 anti_max,
1913 build_int_cst (TREE_TYPE (var_vr->min),
1914 -1));
1915 else
1916 min = fold_build2 (PLUS_EXPR, TREE_TYPE (var_vr->min),
1917 anti_max,
1918 build_int_cst (TREE_TYPE (var_vr->min),
1919 1));
1920 }
1921 else
1922 min = fold_build_pointer_plus_hwi (anti_max, 1);
1923 max = real_max;
1924 set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
1925 }
1926 /* Case 3b, the anti-range extends into the high
1927 part of the real range. Thus creating a new
1928 higher for the real range. */
1929 else if (compare_values (anti_min, real_min) == 1
1930 && ((cmp = compare_values (anti_min, real_max)) == -1
1931 || cmp == 0))
1932 {
1933 gcc_assert (!is_negative_overflow_infinity (anti_min));
1934 if (needs_overflow_infinity (TREE_TYPE (anti_min))
1935 && vrp_val_is_min (anti_min))
1936 {
1937 if (!supports_overflow_infinity (TREE_TYPE (var_vr->min)))
1938 {
1939 set_value_range_to_varying (vr_p);
1940 return;
1941 }
1942 max = negative_overflow_infinity (TREE_TYPE (var_vr->min));
1943 }
1944 else if (!POINTER_TYPE_P (TREE_TYPE (var_vr->min)))
1945 {
1946 if (TYPE_PRECISION (TREE_TYPE (var_vr->min)) == 1
1947 && !TYPE_UNSIGNED (TREE_TYPE (var_vr->min)))
1948 max = fold_build2 (PLUS_EXPR, TREE_TYPE (var_vr->min),
1949 anti_min,
1950 build_int_cst (TREE_TYPE (var_vr->min),
1951 -1));
1952 else
1953 max = fold_build2 (MINUS_EXPR, TREE_TYPE (var_vr->min),
1954 anti_min,
1955 build_int_cst (TREE_TYPE (var_vr->min),
1956 1));
1957 }
1958 else
1959 max = fold_build_pointer_plus_hwi (anti_min, -1);
1960 min = real_min;
1961 set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
1962 }
1963 }
1964 }
1965 }
1966
1967
1968 /* Extract range information from SSA name VAR and store it in VR. If
1969 VAR has an interesting range, use it. Otherwise, create the
1970 range [VAR, VAR] and return it. This is useful in situations where
1971 we may have conditionals testing values of VARYING names. For
1972 instance,
1973
1974 x_3 = y_5;
1975 if (x_3 > y_5)
1976 ...
1977
1978 Even if y_5 is deemed VARYING, we can determine that x_3 > y_5 is
1979 always false. */
1980
1981 static void
extract_range_from_ssa_name(value_range_t * vr,tree var)1982 extract_range_from_ssa_name (value_range_t *vr, tree var)
1983 {
1984 value_range_t *var_vr = get_value_range (var);
1985
1986 if (var_vr->type != VR_UNDEFINED && var_vr->type != VR_VARYING)
1987 copy_value_range (vr, var_vr);
1988 else
1989 set_value_range (vr, VR_RANGE, var, var, NULL);
1990
1991 add_equivalence (&vr->equiv, var);
1992 }
1993
1994
1995 /* Wrapper around int_const_binop. If the operation overflows and we
1996 are not using wrapping arithmetic, then adjust the result to be
1997 -INF or +INF depending on CODE, VAL1 and VAL2. This can return
1998 NULL_TREE if we need to use an overflow infinity representation but
1999 the type does not support it. */
2000
2001 static tree
vrp_int_const_binop(enum tree_code code,tree val1,tree val2)2002 vrp_int_const_binop (enum tree_code code, tree val1, tree val2)
2003 {
2004 tree res;
2005
2006 res = int_const_binop (code, val1, val2);
2007
2008 /* If we are using unsigned arithmetic, operate symbolically
2009 on -INF and +INF as int_const_binop only handles signed overflow. */
2010 if (TYPE_UNSIGNED (TREE_TYPE (val1)))
2011 {
2012 int checkz = compare_values (res, val1);
2013 bool overflow = false;
2014
2015 /* Ensure that res = val1 [+*] val2 >= val1
2016 or that res = val1 - val2 <= val1. */
2017 if ((code == PLUS_EXPR
2018 && !(checkz == 1 || checkz == 0))
2019 || (code == MINUS_EXPR
2020 && !(checkz == 0 || checkz == -1)))
2021 {
2022 overflow = true;
2023 }
2024 /* Checking for multiplication overflow is done by dividing the
2025 output of the multiplication by the first input of the
2026 multiplication. If the result of that division operation is
2027 not equal to the second input of the multiplication, then the
2028 multiplication overflowed. */
2029 else if (code == MULT_EXPR && !integer_zerop (val1))
2030 {
2031 tree tmp = int_const_binop (TRUNC_DIV_EXPR,
2032 res,
2033 val1);
2034 int check = compare_values (tmp, val2);
2035
2036 if (check != 0)
2037 overflow = true;
2038 }
2039
2040 if (overflow)
2041 {
2042 res = copy_node (res);
2043 TREE_OVERFLOW (res) = 1;
2044 }
2045
2046 }
2047 else if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (val1)))
2048 /* If the singed operation wraps then int_const_binop has done
2049 everything we want. */
2050 ;
2051 else if ((TREE_OVERFLOW (res)
2052 && !TREE_OVERFLOW (val1)
2053 && !TREE_OVERFLOW (val2))
2054 || is_overflow_infinity (val1)
2055 || is_overflow_infinity (val2))
2056 {
2057 /* If the operation overflowed but neither VAL1 nor VAL2 are
2058 overflown, return -INF or +INF depending on the operation
2059 and the combination of signs of the operands. */
2060 int sgn1 = tree_int_cst_sgn (val1);
2061 int sgn2 = tree_int_cst_sgn (val2);
2062
2063 if (needs_overflow_infinity (TREE_TYPE (res))
2064 && !supports_overflow_infinity (TREE_TYPE (res)))
2065 return NULL_TREE;
2066
2067 /* We have to punt on adding infinities of different signs,
2068 since we can't tell what the sign of the result should be.
2069 Likewise for subtracting infinities of the same sign. */
2070 if (((code == PLUS_EXPR && sgn1 != sgn2)
2071 || (code == MINUS_EXPR && sgn1 == sgn2))
2072 && is_overflow_infinity (val1)
2073 && is_overflow_infinity (val2))
2074 return NULL_TREE;
2075
2076 /* Don't try to handle division or shifting of infinities. */
2077 if ((code == TRUNC_DIV_EXPR
2078 || code == FLOOR_DIV_EXPR
2079 || code == CEIL_DIV_EXPR
2080 || code == EXACT_DIV_EXPR
2081 || code == ROUND_DIV_EXPR
2082 || code == RSHIFT_EXPR)
2083 && (is_overflow_infinity (val1)
2084 || is_overflow_infinity (val2)))
2085 return NULL_TREE;
2086
2087 /* Notice that we only need to handle the restricted set of
2088 operations handled by extract_range_from_binary_expr.
2089 Among them, only multiplication, addition and subtraction
2090 can yield overflow without overflown operands because we
2091 are working with integral types only... except in the
2092 case VAL1 = -INF and VAL2 = -1 which overflows to +INF
2093 for division too. */
2094
2095 /* For multiplication, the sign of the overflow is given
2096 by the comparison of the signs of the operands. */
2097 if ((code == MULT_EXPR && sgn1 == sgn2)
2098 /* For addition, the operands must be of the same sign
2099 to yield an overflow. Its sign is therefore that
2100 of one of the operands, for example the first. For
2101 infinite operands X + -INF is negative, not positive. */
2102 || (code == PLUS_EXPR
2103 && (sgn1 >= 0
2104 ? !is_negative_overflow_infinity (val2)
2105 : is_positive_overflow_infinity (val2)))
2106 /* For subtraction, non-infinite operands must be of
2107 different signs to yield an overflow. Its sign is
2108 therefore that of the first operand or the opposite of
2109 that of the second operand. A first operand of 0 counts
2110 as positive here, for the corner case 0 - (-INF), which
2111 overflows, but must yield +INF. For infinite operands 0
2112 - INF is negative, not positive. */
2113 || (code == MINUS_EXPR
2114 && (sgn1 >= 0
2115 ? !is_positive_overflow_infinity (val2)
2116 : is_negative_overflow_infinity (val2)))
2117 /* We only get in here with positive shift count, so the
2118 overflow direction is the same as the sign of val1.
2119 Actually rshift does not overflow at all, but we only
2120 handle the case of shifting overflowed -INF and +INF. */
2121 || (code == RSHIFT_EXPR
2122 && sgn1 >= 0)
2123 /* For division, the only case is -INF / -1 = +INF. */
2124 || code == TRUNC_DIV_EXPR
2125 || code == FLOOR_DIV_EXPR
2126 || code == CEIL_DIV_EXPR
2127 || code == EXACT_DIV_EXPR
2128 || code == ROUND_DIV_EXPR)
2129 return (needs_overflow_infinity (TREE_TYPE (res))
2130 ? positive_overflow_infinity (TREE_TYPE (res))
2131 : TYPE_MAX_VALUE (TREE_TYPE (res)));
2132 else
2133 return (needs_overflow_infinity (TREE_TYPE (res))
2134 ? negative_overflow_infinity (TREE_TYPE (res))
2135 : TYPE_MIN_VALUE (TREE_TYPE (res)));
2136 }
2137
2138 return res;
2139 }
2140
2141
2142 /* For range VR compute two double_int bitmasks. In *MAY_BE_NONZERO
2143 bitmask if some bit is unset, it means for all numbers in the range
2144 the bit is 0, otherwise it might be 0 or 1. In *MUST_BE_NONZERO
2145 bitmask if some bit is set, it means for all numbers in the range
2146 the bit is 1, otherwise it might be 0 or 1. */
2147
2148 static bool
zero_nonzero_bits_from_vr(value_range_t * vr,double_int * may_be_nonzero,double_int * must_be_nonzero)2149 zero_nonzero_bits_from_vr (value_range_t *vr,
2150 double_int *may_be_nonzero,
2151 double_int *must_be_nonzero)
2152 {
2153 *may_be_nonzero = double_int_minus_one;
2154 *must_be_nonzero = double_int_zero;
2155 if (!range_int_cst_p (vr))
2156 return false;
2157
2158 if (range_int_cst_singleton_p (vr))
2159 {
2160 *may_be_nonzero = tree_to_double_int (vr->min);
2161 *must_be_nonzero = *may_be_nonzero;
2162 }
2163 else if (tree_int_cst_sgn (vr->min) >= 0
2164 || tree_int_cst_sgn (vr->max) < 0)
2165 {
2166 double_int dmin = tree_to_double_int (vr->min);
2167 double_int dmax = tree_to_double_int (vr->max);
2168 double_int xor_mask = double_int_xor (dmin, dmax);
2169 *may_be_nonzero = double_int_ior (dmin, dmax);
2170 *must_be_nonzero = double_int_and (dmin, dmax);
2171 if (xor_mask.high != 0)
2172 {
2173 unsigned HOST_WIDE_INT mask
2174 = ((unsigned HOST_WIDE_INT) 1
2175 << floor_log2 (xor_mask.high)) - 1;
2176 may_be_nonzero->low = ALL_ONES;
2177 may_be_nonzero->high |= mask;
2178 must_be_nonzero->low = 0;
2179 must_be_nonzero->high &= ~mask;
2180 }
2181 else if (xor_mask.low != 0)
2182 {
2183 unsigned HOST_WIDE_INT mask
2184 = ((unsigned HOST_WIDE_INT) 1
2185 << floor_log2 (xor_mask.low)) - 1;
2186 may_be_nonzero->low |= mask;
2187 must_be_nonzero->low &= ~mask;
2188 }
2189 }
2190
2191 return true;
2192 }
2193
2194 /* Helper to extract a value-range *VR for a multiplicative operation
2195 *VR0 CODE *VR1. */
2196
2197 static void
extract_range_from_multiplicative_op_1(value_range_t * vr,enum tree_code code,value_range_t * vr0,value_range_t * vr1)2198 extract_range_from_multiplicative_op_1 (value_range_t *vr,
2199 enum tree_code code,
2200 value_range_t *vr0, value_range_t *vr1)
2201 {
2202 enum value_range_type type;
2203 tree val[4];
2204 size_t i;
2205 tree min, max;
2206 bool sop;
2207 int cmp;
2208
2209 /* Multiplications, divisions and shifts are a bit tricky to handle,
2210 depending on the mix of signs we have in the two ranges, we
2211 need to operate on different values to get the minimum and
2212 maximum values for the new range. One approach is to figure
2213 out all the variations of range combinations and do the
2214 operations.
2215
2216 However, this involves several calls to compare_values and it
2217 is pretty convoluted. It's simpler to do the 4 operations
2218 (MIN0 OP MIN1, MIN0 OP MAX1, MAX0 OP MIN1 and MAX0 OP MAX0 OP
2219 MAX1) and then figure the smallest and largest values to form
2220 the new range. */
2221 gcc_assert (code == MULT_EXPR
2222 || code == TRUNC_DIV_EXPR
2223 || code == FLOOR_DIV_EXPR
2224 || code == CEIL_DIV_EXPR
2225 || code == EXACT_DIV_EXPR
2226 || code == ROUND_DIV_EXPR
2227 || code == RSHIFT_EXPR);
2228 gcc_assert ((vr0->type == VR_RANGE
2229 || (code == MULT_EXPR && vr0->type == VR_ANTI_RANGE))
2230 && vr0->type == vr1->type);
2231
2232 type = vr0->type;
2233
2234 /* Compute the 4 cross operations. */
2235 sop = false;
2236 val[0] = vrp_int_const_binop (code, vr0->min, vr1->min);
2237 if (val[0] == NULL_TREE)
2238 sop = true;
2239
2240 if (vr1->max == vr1->min)
2241 val[1] = NULL_TREE;
2242 else
2243 {
2244 val[1] = vrp_int_const_binop (code, vr0->min, vr1->max);
2245 if (val[1] == NULL_TREE)
2246 sop = true;
2247 }
2248
2249 if (vr0->max == vr0->min)
2250 val[2] = NULL_TREE;
2251 else
2252 {
2253 val[2] = vrp_int_const_binop (code, vr0->max, vr1->min);
2254 if (val[2] == NULL_TREE)
2255 sop = true;
2256 }
2257
2258 if (vr0->min == vr0->max || vr1->min == vr1->max)
2259 val[3] = NULL_TREE;
2260 else
2261 {
2262 val[3] = vrp_int_const_binop (code, vr0->max, vr1->max);
2263 if (val[3] == NULL_TREE)
2264 sop = true;
2265 }
2266
2267 if (sop)
2268 {
2269 set_value_range_to_varying (vr);
2270 return;
2271 }
2272
2273 /* Set MIN to the minimum of VAL[i] and MAX to the maximum
2274 of VAL[i]. */
2275 min = val[0];
2276 max = val[0];
2277 for (i = 1; i < 4; i++)
2278 {
2279 if (!is_gimple_min_invariant (min)
2280 || (TREE_OVERFLOW (min) && !is_overflow_infinity (min))
2281 || !is_gimple_min_invariant (max)
2282 || (TREE_OVERFLOW (max) && !is_overflow_infinity (max)))
2283 break;
2284
2285 if (val[i])
2286 {
2287 if (!is_gimple_min_invariant (val[i])
2288 || (TREE_OVERFLOW (val[i])
2289 && !is_overflow_infinity (val[i])))
2290 {
2291 /* If we found an overflowed value, set MIN and MAX
2292 to it so that we set the resulting range to
2293 VARYING. */
2294 min = max = val[i];
2295 break;
2296 }
2297
2298 if (compare_values (val[i], min) == -1)
2299 min = val[i];
2300
2301 if (compare_values (val[i], max) == 1)
2302 max = val[i];
2303 }
2304 }
2305
2306 /* If either MIN or MAX overflowed, then set the resulting range to
2307 VARYING. But we do accept an overflow infinity
2308 representation. */
2309 if (min == NULL_TREE
2310 || !is_gimple_min_invariant (min)
2311 || (TREE_OVERFLOW (min) && !is_overflow_infinity (min))
2312 || max == NULL_TREE
2313 || !is_gimple_min_invariant (max)
2314 || (TREE_OVERFLOW (max) && !is_overflow_infinity (max)))
2315 {
2316 set_value_range_to_varying (vr);
2317 return;
2318 }
2319
2320 /* We punt if:
2321 1) [-INF, +INF]
2322 2) [-INF, +-INF(OVF)]
2323 3) [+-INF(OVF), +INF]
2324 4) [+-INF(OVF), +-INF(OVF)]
2325 We learn nothing when we have INF and INF(OVF) on both sides.
2326 Note that we do accept [-INF, -INF] and [+INF, +INF] without
2327 overflow. */
2328 if ((vrp_val_is_min (min) || is_overflow_infinity (min))
2329 && (vrp_val_is_max (max) || is_overflow_infinity (max)))
2330 {
2331 set_value_range_to_varying (vr);
2332 return;
2333 }
2334
2335 cmp = compare_values (min, max);
2336 if (cmp == -2 || cmp == 1)
2337 {
2338 /* If the new range has its limits swapped around (MIN > MAX),
2339 then the operation caused one of them to wrap around, mark
2340 the new range VARYING. */
2341 set_value_range_to_varying (vr);
2342 }
2343 else
2344 set_value_range (vr, type, min, max, NULL);
2345 }
2346
2347 /* Extract range information from a binary operation CODE based on
2348 the ranges of each of its operands, *VR0 and *VR1 with resulting
2349 type EXPR_TYPE. The resulting range is stored in *VR. */
2350
2351 static void
extract_range_from_binary_expr_1(value_range_t * vr,enum tree_code code,tree expr_type,value_range_t * vr0_,value_range_t * vr1_)2352 extract_range_from_binary_expr_1 (value_range_t *vr,
2353 enum tree_code code, tree expr_type,
2354 value_range_t *vr0_, value_range_t *vr1_)
2355 {
2356 value_range_t vr0 = *vr0_, vr1 = *vr1_;
2357 enum value_range_type type;
2358 tree min = NULL_TREE, max = NULL_TREE;
2359 int cmp;
2360
2361 if (!INTEGRAL_TYPE_P (expr_type)
2362 && !POINTER_TYPE_P (expr_type))
2363 {
2364 set_value_range_to_varying (vr);
2365 return;
2366 }
2367
2368 /* Not all binary expressions can be applied to ranges in a
2369 meaningful way. Handle only arithmetic operations. */
2370 if (code != PLUS_EXPR
2371 && code != MINUS_EXPR
2372 && code != POINTER_PLUS_EXPR
2373 && code != MULT_EXPR
2374 && code != TRUNC_DIV_EXPR
2375 && code != FLOOR_DIV_EXPR
2376 && code != CEIL_DIV_EXPR
2377 && code != EXACT_DIV_EXPR
2378 && code != ROUND_DIV_EXPR
2379 && code != TRUNC_MOD_EXPR
2380 && code != RSHIFT_EXPR
2381 && code != MIN_EXPR
2382 && code != MAX_EXPR
2383 && code != BIT_AND_EXPR
2384 && code != BIT_IOR_EXPR
2385 && code != BIT_XOR_EXPR)
2386 {
2387 set_value_range_to_varying (vr);
2388 return;
2389 }
2390
2391 /* If both ranges are UNDEFINED, so is the result. */
2392 if (vr0.type == VR_UNDEFINED && vr1.type == VR_UNDEFINED)
2393 {
2394 set_value_range_to_undefined (vr);
2395 return;
2396 }
2397 /* If one of the ranges is UNDEFINED drop it to VARYING for the following
2398 code. At some point we may want to special-case operations that
2399 have UNDEFINED result for all or some value-ranges of the not UNDEFINED
2400 operand. */
2401 else if (vr0.type == VR_UNDEFINED)
2402 set_value_range_to_varying (&vr0);
2403 else if (vr1.type == VR_UNDEFINED)
2404 set_value_range_to_varying (&vr1);
2405
2406 /* The type of the resulting value range defaults to VR0.TYPE. */
2407 type = vr0.type;
2408
2409 /* Refuse to operate on VARYING ranges, ranges of different kinds
2410 and symbolic ranges. As an exception, we allow BIT_AND_EXPR
2411 because we may be able to derive a useful range even if one of
2412 the operands is VR_VARYING or symbolic range. Similarly for
2413 divisions. TODO, we may be able to derive anti-ranges in
2414 some cases. */
2415 if (code != BIT_AND_EXPR
2416 && code != BIT_IOR_EXPR
2417 && code != TRUNC_DIV_EXPR
2418 && code != FLOOR_DIV_EXPR
2419 && code != CEIL_DIV_EXPR
2420 && code != EXACT_DIV_EXPR
2421 && code != ROUND_DIV_EXPR
2422 && code != TRUNC_MOD_EXPR
2423 && (vr0.type == VR_VARYING
2424 || vr1.type == VR_VARYING
2425 || vr0.type != vr1.type
2426 || symbolic_range_p (&vr0)
2427 || symbolic_range_p (&vr1)))
2428 {
2429 set_value_range_to_varying (vr);
2430 return;
2431 }
2432
2433 /* Now evaluate the expression to determine the new range. */
2434 if (POINTER_TYPE_P (expr_type))
2435 {
2436 if (code == MIN_EXPR || code == MAX_EXPR)
2437 {
2438 /* For MIN/MAX expressions with pointers, we only care about
2439 nullness, if both are non null, then the result is nonnull.
2440 If both are null, then the result is null. Otherwise they
2441 are varying. */
2442 if (range_is_nonnull (&vr0) && range_is_nonnull (&vr1))
2443 set_value_range_to_nonnull (vr, expr_type);
2444 else if (range_is_null (&vr0) && range_is_null (&vr1))
2445 set_value_range_to_null (vr, expr_type);
2446 else
2447 set_value_range_to_varying (vr);
2448 }
2449 else if (code == POINTER_PLUS_EXPR)
2450 {
2451 /* For pointer types, we are really only interested in asserting
2452 whether the expression evaluates to non-NULL. */
2453 if (range_is_nonnull (&vr0) || range_is_nonnull (&vr1))
2454 set_value_range_to_nonnull (vr, expr_type);
2455 else if (range_is_null (&vr0) && range_is_null (&vr1))
2456 set_value_range_to_null (vr, expr_type);
2457 else
2458 set_value_range_to_varying (vr);
2459 }
2460 else if (code == BIT_AND_EXPR)
2461 {
2462 /* For pointer types, we are really only interested in asserting
2463 whether the expression evaluates to non-NULL. */
2464 if (range_is_nonnull (&vr0) && range_is_nonnull (&vr1))
2465 set_value_range_to_nonnull (vr, expr_type);
2466 else if (range_is_null (&vr0) || range_is_null (&vr1))
2467 set_value_range_to_null (vr, expr_type);
2468 else
2469 set_value_range_to_varying (vr);
2470 }
2471 else
2472 set_value_range_to_varying (vr);
2473
2474 return;
2475 }
2476
2477 /* For integer ranges, apply the operation to each end of the
2478 range and see what we end up with. */
2479 if (code == PLUS_EXPR)
2480 {
2481 /* If we have a PLUS_EXPR with two VR_ANTI_RANGEs, drop to
2482 VR_VARYING. It would take more effort to compute a precise
2483 range for such a case. For example, if we have op0 == 1 and
2484 op1 == -1 with their ranges both being ~[0,0], we would have
2485 op0 + op1 == 0, so we cannot claim that the sum is in ~[0,0].
2486 Note that we are guaranteed to have vr0.type == vr1.type at
2487 this point. */
2488 if (vr0.type == VR_ANTI_RANGE)
2489 {
2490 set_value_range_to_varying (vr);
2491 return;
2492 }
2493
2494 /* For operations that make the resulting range directly
2495 proportional to the original ranges, apply the operation to
2496 the same end of each range. */
2497 min = vrp_int_const_binop (code, vr0.min, vr1.min);
2498 max = vrp_int_const_binop (code, vr0.max, vr1.max);
2499
2500 /* If both additions overflowed the range kind is still correct.
2501 This happens regularly with subtracting something in unsigned
2502 arithmetic.
2503 ??? See PR30318 for all the cases we do not handle. */
2504 if ((TREE_OVERFLOW (min) && !is_overflow_infinity (min))
2505 && (TREE_OVERFLOW (max) && !is_overflow_infinity (max)))
2506 {
2507 min = build_int_cst_wide (TREE_TYPE (min),
2508 TREE_INT_CST_LOW (min),
2509 TREE_INT_CST_HIGH (min));
2510 max = build_int_cst_wide (TREE_TYPE (max),
2511 TREE_INT_CST_LOW (max),
2512 TREE_INT_CST_HIGH (max));
2513 }
2514 }
2515 else if (code == MIN_EXPR
2516 || code == MAX_EXPR)
2517 {
2518 if (vr0.type == VR_ANTI_RANGE)
2519 {
2520 /* For MIN_EXPR and MAX_EXPR with two VR_ANTI_RANGEs,
2521 the resulting VR_ANTI_RANGE is the same - intersection
2522 of the two ranges. */
2523 min = vrp_int_const_binop (MAX_EXPR, vr0.min, vr1.min);
2524 max = vrp_int_const_binop (MIN_EXPR, vr0.max, vr1.max);
2525 }
2526 else
2527 {
2528 /* For operations that make the resulting range directly
2529 proportional to the original ranges, apply the operation to
2530 the same end of each range. */
2531 min = vrp_int_const_binop (code, vr0.min, vr1.min);
2532 max = vrp_int_const_binop (code, vr0.max, vr1.max);
2533 }
2534 }
2535 else if (code == MULT_EXPR)
2536 {
2537 /* If we have an unsigned MULT_EXPR with two VR_ANTI_RANGEs,
2538 drop to VR_VARYING. It would take more effort to compute a
2539 precise range for such a case. For example, if we have
2540 op0 == 65536 and op1 == 65536 with their ranges both being
2541 ~[0,0] on a 32-bit machine, we would have op0 * op1 == 0, so
2542 we cannot claim that the product is in ~[0,0]. Note that we
2543 are guaranteed to have vr0.type == vr1.type at this
2544 point. */
2545 if (vr0.type == VR_ANTI_RANGE
2546 && !TYPE_OVERFLOW_UNDEFINED (expr_type))
2547 {
2548 set_value_range_to_varying (vr);
2549 return;
2550 }
2551
2552 extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
2553 return;
2554 }
2555 else if (code == RSHIFT_EXPR)
2556 {
2557 /* If we have a RSHIFT_EXPR with any shift values outside [0..prec-1],
2558 then drop to VR_VARYING. Outside of this range we get undefined
2559 behavior from the shift operation. We cannot even trust
2560 SHIFT_COUNT_TRUNCATED at this stage, because that applies to rtl
2561 shifts, and the operation at the tree level may be widened. */
2562 if (vr1.type != VR_RANGE
2563 || !value_range_nonnegative_p (&vr1)
2564 || TREE_CODE (vr1.max) != INTEGER_CST
2565 || compare_tree_int (vr1.max, TYPE_PRECISION (expr_type) - 1) == 1)
2566 {
2567 set_value_range_to_varying (vr);
2568 return;
2569 }
2570
2571 extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
2572 return;
2573 }
2574 else if (code == TRUNC_DIV_EXPR
2575 || code == FLOOR_DIV_EXPR
2576 || code == CEIL_DIV_EXPR
2577 || code == EXACT_DIV_EXPR
2578 || code == ROUND_DIV_EXPR)
2579 {
2580 if (vr0.type != VR_RANGE || symbolic_range_p (&vr0))
2581 {
2582 /* For division, if op1 has VR_RANGE but op0 does not, something
2583 can be deduced just from that range. Say [min, max] / [4, max]
2584 gives [min / 4, max / 4] range. */
2585 if (vr1.type == VR_RANGE
2586 && !symbolic_range_p (&vr1)
2587 && range_includes_zero_p (vr1.min, vr1.max) == 0)
2588 {
2589 vr0.type = type = VR_RANGE;
2590 vr0.min = vrp_val_min (expr_type);
2591 vr0.max = vrp_val_max (expr_type);
2592 }
2593 else
2594 {
2595 set_value_range_to_varying (vr);
2596 return;
2597 }
2598 }
2599
2600 /* For divisions, if flag_non_call_exceptions is true, we must
2601 not eliminate a division by zero. */
2602 if (cfun->can_throw_non_call_exceptions
2603 && (vr1.type != VR_RANGE
2604 || range_includes_zero_p (vr1.min, vr1.max) != 0))
2605 {
2606 set_value_range_to_varying (vr);
2607 return;
2608 }
2609
2610 /* For divisions, if op0 is VR_RANGE, we can deduce a range
2611 even if op1 is VR_VARYING, VR_ANTI_RANGE, symbolic or can
2612 include 0. */
2613 if (vr0.type == VR_RANGE
2614 && (vr1.type != VR_RANGE
2615 || range_includes_zero_p (vr1.min, vr1.max) != 0))
2616 {
2617 tree zero = build_int_cst (TREE_TYPE (vr0.min), 0);
2618 int cmp;
2619
2620 min = NULL_TREE;
2621 max = NULL_TREE;
2622 if (TYPE_UNSIGNED (expr_type)
2623 || value_range_nonnegative_p (&vr1))
2624 {
2625 /* For unsigned division or when divisor is known
2626 to be non-negative, the range has to cover
2627 all numbers from 0 to max for positive max
2628 and all numbers from min to 0 for negative min. */
2629 cmp = compare_values (vr0.max, zero);
2630 if (cmp == -1)
2631 max = zero;
2632 else if (cmp == 0 || cmp == 1)
2633 max = vr0.max;
2634 else
2635 type = VR_VARYING;
2636 cmp = compare_values (vr0.min, zero);
2637 if (cmp == 1)
2638 min = zero;
2639 else if (cmp == 0 || cmp == -1)
2640 min = vr0.min;
2641 else
2642 type = VR_VARYING;
2643 }
2644 else
2645 {
2646 /* Otherwise the range is -max .. max or min .. -min
2647 depending on which bound is bigger in absolute value,
2648 as the division can change the sign. */
2649 abs_extent_range (vr, vr0.min, vr0.max);
2650 return;
2651 }
2652 if (type == VR_VARYING)
2653 {
2654 set_value_range_to_varying (vr);
2655 return;
2656 }
2657 }
2658 else
2659 {
2660 extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
2661 return;
2662 }
2663 }
2664 else if (code == TRUNC_MOD_EXPR)
2665 {
2666 if (vr1.type != VR_RANGE
2667 || range_includes_zero_p (vr1.min, vr1.max) != 0
2668 || vrp_val_is_min (vr1.min))
2669 {
2670 set_value_range_to_varying (vr);
2671 return;
2672 }
2673 type = VR_RANGE;
2674 /* Compute MAX <|vr1.min|, |vr1.max|> - 1. */
2675 max = fold_unary_to_constant (ABS_EXPR, expr_type, vr1.min);
2676 if (tree_int_cst_lt (max, vr1.max))
2677 max = vr1.max;
2678 max = int_const_binop (MINUS_EXPR, max, integer_one_node);
2679 /* If the dividend is non-negative the modulus will be
2680 non-negative as well. */
2681 if (TYPE_UNSIGNED (expr_type)
2682 || value_range_nonnegative_p (&vr0))
2683 min = build_int_cst (TREE_TYPE (max), 0);
2684 else
2685 min = fold_unary_to_constant (NEGATE_EXPR, expr_type, max);
2686 }
2687 else if (code == MINUS_EXPR)
2688 {
2689 /* If we have a MINUS_EXPR with two VR_ANTI_RANGEs, drop to
2690 VR_VARYING. It would take more effort to compute a precise
2691 range for such a case. For example, if we have op0 == 1 and
2692 op1 == 1 with their ranges both being ~[0,0], we would have
2693 op0 - op1 == 0, so we cannot claim that the difference is in
2694 ~[0,0]. Note that we are guaranteed to have
2695 vr0.type == vr1.type at this point. */
2696 if (vr0.type == VR_ANTI_RANGE)
2697 {
2698 set_value_range_to_varying (vr);
2699 return;
2700 }
2701
2702 /* For MINUS_EXPR, apply the operation to the opposite ends of
2703 each range. */
2704 min = vrp_int_const_binop (code, vr0.min, vr1.max);
2705 max = vrp_int_const_binop (code, vr0.max, vr1.min);
2706 }
2707 else if (code == BIT_AND_EXPR || code == BIT_IOR_EXPR || code == BIT_XOR_EXPR)
2708 {
2709 bool int_cst_range0, int_cst_range1;
2710 double_int may_be_nonzero0, may_be_nonzero1;
2711 double_int must_be_nonzero0, must_be_nonzero1;
2712
2713 int_cst_range0 = zero_nonzero_bits_from_vr (&vr0, &may_be_nonzero0,
2714 &must_be_nonzero0);
2715 int_cst_range1 = zero_nonzero_bits_from_vr (&vr1, &may_be_nonzero1,
2716 &must_be_nonzero1);
2717
2718 type = VR_RANGE;
2719 if (code == BIT_AND_EXPR)
2720 {
2721 double_int dmax;
2722 min = double_int_to_tree (expr_type,
2723 double_int_and (must_be_nonzero0,
2724 must_be_nonzero1));
2725 dmax = double_int_and (may_be_nonzero0, may_be_nonzero1);
2726 /* If both input ranges contain only negative values we can
2727 truncate the result range maximum to the minimum of the
2728 input range maxima. */
2729 if (int_cst_range0 && int_cst_range1
2730 && tree_int_cst_sgn (vr0.max) < 0
2731 && tree_int_cst_sgn (vr1.max) < 0)
2732 {
2733 dmax = double_int_min (dmax, tree_to_double_int (vr0.max),
2734 TYPE_UNSIGNED (expr_type));
2735 dmax = double_int_min (dmax, tree_to_double_int (vr1.max),
2736 TYPE_UNSIGNED (expr_type));
2737 }
2738 /* If either input range contains only non-negative values
2739 we can truncate the result range maximum to the respective
2740 maximum of the input range. */
2741 if (int_cst_range0 && tree_int_cst_sgn (vr0.min) >= 0)
2742 dmax = double_int_min (dmax, tree_to_double_int (vr0.max),
2743 TYPE_UNSIGNED (expr_type));
2744 if (int_cst_range1 && tree_int_cst_sgn (vr1.min) >= 0)
2745 dmax = double_int_min (dmax, tree_to_double_int (vr1.max),
2746 TYPE_UNSIGNED (expr_type));
2747 max = double_int_to_tree (expr_type, dmax);
2748 }
2749 else if (code == BIT_IOR_EXPR)
2750 {
2751 double_int dmin;
2752 max = double_int_to_tree (expr_type,
2753 double_int_ior (may_be_nonzero0,
2754 may_be_nonzero1));
2755 dmin = double_int_ior (must_be_nonzero0, must_be_nonzero1);
2756 /* If the input ranges contain only positive values we can
2757 truncate the minimum of the result range to the maximum
2758 of the input range minima. */
2759 if (int_cst_range0 && int_cst_range1
2760 && tree_int_cst_sgn (vr0.min) >= 0
2761 && tree_int_cst_sgn (vr1.min) >= 0)
2762 {
2763 dmin = double_int_max (dmin, tree_to_double_int (vr0.min),
2764 TYPE_UNSIGNED (expr_type));
2765 dmin = double_int_max (dmin, tree_to_double_int (vr1.min),
2766 TYPE_UNSIGNED (expr_type));
2767 }
2768 /* If either input range contains only negative values
2769 we can truncate the minimum of the result range to the
2770 respective minimum range. */
2771 if (int_cst_range0 && tree_int_cst_sgn (vr0.max) < 0)
2772 dmin = double_int_max (dmin, tree_to_double_int (vr0.min),
2773 TYPE_UNSIGNED (expr_type));
2774 if (int_cst_range1 && tree_int_cst_sgn (vr1.max) < 0)
2775 dmin = double_int_max (dmin, tree_to_double_int (vr1.min),
2776 TYPE_UNSIGNED (expr_type));
2777 min = double_int_to_tree (expr_type, dmin);
2778 }
2779 else if (code == BIT_XOR_EXPR)
2780 {
2781 double_int result_zero_bits, result_one_bits;
2782 result_zero_bits
2783 = double_int_ior (double_int_and (must_be_nonzero0,
2784 must_be_nonzero1),
2785 double_int_not
2786 (double_int_ior (may_be_nonzero0,
2787 may_be_nonzero1)));
2788 result_one_bits
2789 = double_int_ior (double_int_and
2790 (must_be_nonzero0,
2791 double_int_not (may_be_nonzero1)),
2792 double_int_and
2793 (must_be_nonzero1,
2794 double_int_not (may_be_nonzero0)));
2795 max = double_int_to_tree (expr_type,
2796 double_int_not (result_zero_bits));
2797 min = double_int_to_tree (expr_type, result_one_bits);
2798 /* If the range has all positive or all negative values the
2799 result is better than VARYING. */
2800 if (tree_int_cst_sgn (min) < 0
2801 || tree_int_cst_sgn (max) >= 0)
2802 ;
2803 else
2804 max = min = NULL_TREE;
2805 }
2806 }
2807 else
2808 gcc_unreachable ();
2809
2810 /* If either MIN or MAX overflowed, then set the resulting range to
2811 VARYING. But we do accept an overflow infinity
2812 representation. */
2813 if (min == NULL_TREE
2814 || !is_gimple_min_invariant (min)
2815 || (TREE_OVERFLOW (min) && !is_overflow_infinity (min))
2816 || max == NULL_TREE
2817 || !is_gimple_min_invariant (max)
2818 || (TREE_OVERFLOW (max) && !is_overflow_infinity (max)))
2819 {
2820 set_value_range_to_varying (vr);
2821 return;
2822 }
2823
2824 /* We punt if:
2825 1) [-INF, +INF]
2826 2) [-INF, +-INF(OVF)]
2827 3) [+-INF(OVF), +INF]
2828 4) [+-INF(OVF), +-INF(OVF)]
2829 We learn nothing when we have INF and INF(OVF) on both sides.
2830 Note that we do accept [-INF, -INF] and [+INF, +INF] without
2831 overflow. */
2832 if ((vrp_val_is_min (min) || is_overflow_infinity (min))
2833 && (vrp_val_is_max (max) || is_overflow_infinity (max)))
2834 {
2835 set_value_range_to_varying (vr);
2836 return;
2837 }
2838
2839 cmp = compare_values (min, max);
2840 if (cmp == -2 || cmp == 1)
2841 {
2842 /* If the new range has its limits swapped around (MIN > MAX),
2843 then the operation caused one of them to wrap around, mark
2844 the new range VARYING. */
2845 set_value_range_to_varying (vr);
2846 }
2847 else
2848 set_value_range (vr, type, min, max, NULL);
2849 }
2850
2851 /* Extract range information from a binary expression OP0 CODE OP1 based on
2852 the ranges of each of its operands with resulting type EXPR_TYPE.
2853 The resulting range is stored in *VR. */
2854
2855 static void
extract_range_from_binary_expr(value_range_t * vr,enum tree_code code,tree expr_type,tree op0,tree op1)2856 extract_range_from_binary_expr (value_range_t *vr,
2857 enum tree_code code,
2858 tree expr_type, tree op0, tree op1)
2859 {
2860 value_range_t vr0 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
2861 value_range_t vr1 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
2862
2863 /* Get value ranges for each operand. For constant operands, create
2864 a new value range with the operand to simplify processing. */
2865 if (TREE_CODE (op0) == SSA_NAME)
2866 vr0 = *(get_value_range (op0));
2867 else if (is_gimple_min_invariant (op0))
2868 set_value_range_to_value (&vr0, op0, NULL);
2869 else
2870 set_value_range_to_varying (&vr0);
2871
2872 if (TREE_CODE (op1) == SSA_NAME)
2873 vr1 = *(get_value_range (op1));
2874 else if (is_gimple_min_invariant (op1))
2875 set_value_range_to_value (&vr1, op1, NULL);
2876 else
2877 set_value_range_to_varying (&vr1);
2878
2879 extract_range_from_binary_expr_1 (vr, code, expr_type, &vr0, &vr1);
2880 }
2881
2882 /* Extract range information from a unary operation CODE based on
2883 the range of its operand *VR0 with type OP0_TYPE with resulting type TYPE.
2884 The The resulting range is stored in *VR. */
2885
2886 static void
extract_range_from_unary_expr_1(value_range_t * vr,enum tree_code code,tree type,value_range_t * vr0_,tree op0_type)2887 extract_range_from_unary_expr_1 (value_range_t *vr,
2888 enum tree_code code, tree type,
2889 value_range_t *vr0_, tree op0_type)
2890 {
2891 value_range_t vr0 = *vr0_;
2892
2893 /* VRP only operates on integral and pointer types. */
2894 if (!(INTEGRAL_TYPE_P (op0_type)
2895 || POINTER_TYPE_P (op0_type))
2896 || !(INTEGRAL_TYPE_P (type)
2897 || POINTER_TYPE_P (type)))
2898 {
2899 set_value_range_to_varying (vr);
2900 return;
2901 }
2902
2903 /* If VR0 is UNDEFINED, so is the result. */
2904 if (vr0.type == VR_UNDEFINED)
2905 {
2906 set_value_range_to_undefined (vr);
2907 return;
2908 }
2909
2910 if (CONVERT_EXPR_CODE_P (code))
2911 {
2912 tree inner_type = op0_type;
2913 tree outer_type = type;
2914
2915 /* If the expression evaluates to a pointer, we are only interested in
2916 determining if it evaluates to NULL [0, 0] or non-NULL (~[0, 0]). */
2917 if (POINTER_TYPE_P (type))
2918 {
2919 if (range_is_nonnull (&vr0))
2920 set_value_range_to_nonnull (vr, type);
2921 else if (range_is_null (&vr0))
2922 set_value_range_to_null (vr, type);
2923 else
2924 set_value_range_to_varying (vr);
2925 return;
2926 }
2927
2928 /* If VR0 is varying and we increase the type precision, assume
2929 a full range for the following transformation. */
2930 if (vr0.type == VR_VARYING
2931 && INTEGRAL_TYPE_P (inner_type)
2932 && TYPE_PRECISION (inner_type) < TYPE_PRECISION (outer_type))
2933 {
2934 vr0.type = VR_RANGE;
2935 vr0.min = TYPE_MIN_VALUE (inner_type);
2936 vr0.max = TYPE_MAX_VALUE (inner_type);
2937 }
2938
2939 /* If VR0 is a constant range or anti-range and the conversion is
2940 not truncating we can convert the min and max values and
2941 canonicalize the resulting range. Otherwise we can do the
2942 conversion if the size of the range is less than what the
2943 precision of the target type can represent and the range is
2944 not an anti-range. */
2945 if ((vr0.type == VR_RANGE
2946 || vr0.type == VR_ANTI_RANGE)
2947 && TREE_CODE (vr0.min) == INTEGER_CST
2948 && TREE_CODE (vr0.max) == INTEGER_CST
2949 && (!is_overflow_infinity (vr0.min)
2950 || (vr0.type == VR_RANGE
2951 && TYPE_PRECISION (outer_type) > TYPE_PRECISION (inner_type)
2952 && needs_overflow_infinity (outer_type)
2953 && supports_overflow_infinity (outer_type)))
2954 && (!is_overflow_infinity (vr0.max)
2955 || (vr0.type == VR_RANGE
2956 && TYPE_PRECISION (outer_type) > TYPE_PRECISION (inner_type)
2957 && needs_overflow_infinity (outer_type)
2958 && supports_overflow_infinity (outer_type)))
2959 && (TYPE_PRECISION (outer_type) >= TYPE_PRECISION (inner_type)
2960 || (vr0.type == VR_RANGE
2961 && integer_zerop (int_const_binop (RSHIFT_EXPR,
2962 int_const_binop (MINUS_EXPR, vr0.max, vr0.min),
2963 size_int (TYPE_PRECISION (outer_type)))))))
2964 {
2965 tree new_min, new_max;
2966 if (is_overflow_infinity (vr0.min))
2967 new_min = negative_overflow_infinity (outer_type);
2968 else
2969 new_min = force_fit_type_double (outer_type,
2970 tree_to_double_int (vr0.min),
2971 0, false);
2972 if (is_overflow_infinity (vr0.max))
2973 new_max = positive_overflow_infinity (outer_type);
2974 else
2975 new_max = force_fit_type_double (outer_type,
2976 tree_to_double_int (vr0.max),
2977 0, false);
2978 set_and_canonicalize_value_range (vr, vr0.type,
2979 new_min, new_max, NULL);
2980 return;
2981 }
2982
2983 set_value_range_to_varying (vr);
2984 return;
2985 }
2986 else if (code == NEGATE_EXPR)
2987 {
2988 /* -X is simply 0 - X, so re-use existing code that also handles
2989 anti-ranges fine. */
2990 value_range_t zero = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
2991 set_value_range_to_value (&zero, build_int_cst (type, 0), NULL);
2992 extract_range_from_binary_expr_1 (vr, MINUS_EXPR, type, &zero, &vr0);
2993 return;
2994 }
2995 else if (code == ABS_EXPR)
2996 {
2997 tree min, max;
2998 int cmp;
2999
3000 /* Pass through vr0 in the easy cases. */
3001 if (TYPE_UNSIGNED (type)
3002 || value_range_nonnegative_p (&vr0))
3003 {
3004 copy_value_range (vr, &vr0);
3005 return;
3006 }
3007
3008 /* For the remaining varying or symbolic ranges we can't do anything
3009 useful. */
3010 if (vr0.type == VR_VARYING
3011 || symbolic_range_p (&vr0))
3012 {
3013 set_value_range_to_varying (vr);
3014 return;
3015 }
3016
3017 /* -TYPE_MIN_VALUE = TYPE_MIN_VALUE with flag_wrapv so we can't get a
3018 useful range. */
3019 if (!TYPE_OVERFLOW_UNDEFINED (type)
3020 && ((vr0.type == VR_RANGE
3021 && vrp_val_is_min (vr0.min))
3022 || (vr0.type == VR_ANTI_RANGE
3023 && !vrp_val_is_min (vr0.min))))
3024 {
3025 set_value_range_to_varying (vr);
3026 return;
3027 }
3028
3029 /* ABS_EXPR may flip the range around, if the original range
3030 included negative values. */
3031 if (is_overflow_infinity (vr0.min))
3032 min = positive_overflow_infinity (type);
3033 else if (!vrp_val_is_min (vr0.min))
3034 min = fold_unary_to_constant (code, type, vr0.min);
3035 else if (!needs_overflow_infinity (type))
3036 min = TYPE_MAX_VALUE (type);
3037 else if (supports_overflow_infinity (type))
3038 min = positive_overflow_infinity (type);
3039 else
3040 {
3041 set_value_range_to_varying (vr);
3042 return;
3043 }
3044
3045 if (is_overflow_infinity (vr0.max))
3046 max = positive_overflow_infinity (type);
3047 else if (!vrp_val_is_min (vr0.max))
3048 max = fold_unary_to_constant (code, type, vr0.max);
3049 else if (!needs_overflow_infinity (type))
3050 max = TYPE_MAX_VALUE (type);
3051 else if (supports_overflow_infinity (type)
3052 /* We shouldn't generate [+INF, +INF] as set_value_range
3053 doesn't like this and ICEs. */
3054 && !is_positive_overflow_infinity (min))
3055 max = positive_overflow_infinity (type);
3056 else
3057 {
3058 set_value_range_to_varying (vr);
3059 return;
3060 }
3061
3062 cmp = compare_values (min, max);
3063
3064 /* If a VR_ANTI_RANGEs contains zero, then we have
3065 ~[-INF, min(MIN, MAX)]. */
3066 if (vr0.type == VR_ANTI_RANGE)
3067 {
3068 if (range_includes_zero_p (vr0.min, vr0.max) == 1)
3069 {
3070 /* Take the lower of the two values. */
3071 if (cmp != 1)
3072 max = min;
3073
3074 /* Create ~[-INF, min (abs(MIN), abs(MAX))]
3075 or ~[-INF + 1, min (abs(MIN), abs(MAX))] when
3076 flag_wrapv is set and the original anti-range doesn't include
3077 TYPE_MIN_VALUE, remember -TYPE_MIN_VALUE = TYPE_MIN_VALUE. */
3078 if (TYPE_OVERFLOW_WRAPS (type))
3079 {
3080 tree type_min_value = TYPE_MIN_VALUE (type);
3081
3082 min = (vr0.min != type_min_value
3083 ? int_const_binop (PLUS_EXPR, type_min_value,
3084 integer_one_node)
3085 : type_min_value);
3086 }
3087 else
3088 {
3089 if (overflow_infinity_range_p (&vr0))
3090 min = negative_overflow_infinity (type);
3091 else
3092 min = TYPE_MIN_VALUE (type);
3093 }
3094 }
3095 else
3096 {
3097 /* All else has failed, so create the range [0, INF], even for
3098 flag_wrapv since TYPE_MIN_VALUE is in the original
3099 anti-range. */
3100 vr0.type = VR_RANGE;
3101 min = build_int_cst (type, 0);
3102 if (needs_overflow_infinity (type))
3103 {
3104 if (supports_overflow_infinity (type))
3105 max = positive_overflow_infinity (type);
3106 else
3107 {
3108 set_value_range_to_varying (vr);
3109 return;
3110 }
3111 }
3112 else
3113 max = TYPE_MAX_VALUE (type);
3114 }
3115 }
3116
3117 /* If the range contains zero then we know that the minimum value in the
3118 range will be zero. */
3119 else if (range_includes_zero_p (vr0.min, vr0.max) == 1)
3120 {
3121 if (cmp == 1)
3122 max = min;
3123 min = build_int_cst (type, 0);
3124 }
3125 else
3126 {
3127 /* If the range was reversed, swap MIN and MAX. */
3128 if (cmp == 1)
3129 {
3130 tree t = min;
3131 min = max;
3132 max = t;
3133 }
3134 }
3135
3136 cmp = compare_values (min, max);
3137 if (cmp == -2 || cmp == 1)
3138 {
3139 /* If the new range has its limits swapped around (MIN > MAX),
3140 then the operation caused one of them to wrap around, mark
3141 the new range VARYING. */
3142 set_value_range_to_varying (vr);
3143 }
3144 else
3145 set_value_range (vr, vr0.type, min, max, NULL);
3146 return;
3147 }
3148 else if (code == BIT_NOT_EXPR)
3149 {
3150 /* ~X is simply -1 - X, so re-use existing code that also handles
3151 anti-ranges fine. */
3152 value_range_t minusone = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
3153 set_value_range_to_value (&minusone, build_int_cst (type, -1), NULL);
3154 extract_range_from_binary_expr_1 (vr, MINUS_EXPR,
3155 type, &minusone, &vr0);
3156 return;
3157 }
3158 else if (code == PAREN_EXPR)
3159 {
3160 copy_value_range (vr, &vr0);
3161 return;
3162 }
3163
3164 /* For unhandled operations fall back to varying. */
3165 set_value_range_to_varying (vr);
3166 return;
3167 }
3168
3169
3170 /* Extract range information from a unary expression CODE OP0 based on
3171 the range of its operand with resulting type TYPE.
3172 The resulting range is stored in *VR. */
3173
3174 static void
extract_range_from_unary_expr(value_range_t * vr,enum tree_code code,tree type,tree op0)3175 extract_range_from_unary_expr (value_range_t *vr, enum tree_code code,
3176 tree type, tree op0)
3177 {
3178 value_range_t vr0 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
3179
3180 /* Get value ranges for the operand. For constant operands, create
3181 a new value range with the operand to simplify processing. */
3182 if (TREE_CODE (op0) == SSA_NAME)
3183 vr0 = *(get_value_range (op0));
3184 else if (is_gimple_min_invariant (op0))
3185 set_value_range_to_value (&vr0, op0, NULL);
3186 else
3187 set_value_range_to_varying (&vr0);
3188
3189 extract_range_from_unary_expr_1 (vr, code, type, &vr0, TREE_TYPE (op0));
3190 }
3191
3192
3193 /* Extract range information from a conditional expression STMT based on
3194 the ranges of each of its operands and the expression code. */
3195
3196 static void
extract_range_from_cond_expr(value_range_t * vr,gimple stmt)3197 extract_range_from_cond_expr (value_range_t *vr, gimple stmt)
3198 {
3199 tree op0, op1;
3200 value_range_t vr0 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
3201 value_range_t vr1 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
3202
3203 /* Get value ranges for each operand. For constant operands, create
3204 a new value range with the operand to simplify processing. */
3205 op0 = gimple_assign_rhs2 (stmt);
3206 if (TREE_CODE (op0) == SSA_NAME)
3207 vr0 = *(get_value_range (op0));
3208 else if (is_gimple_min_invariant (op0))
3209 set_value_range_to_value (&vr0, op0, NULL);
3210 else
3211 set_value_range_to_varying (&vr0);
3212
3213 op1 = gimple_assign_rhs3 (stmt);
3214 if (TREE_CODE (op1) == SSA_NAME)
3215 vr1 = *(get_value_range (op1));
3216 else if (is_gimple_min_invariant (op1))
3217 set_value_range_to_value (&vr1, op1, NULL);
3218 else
3219 set_value_range_to_varying (&vr1);
3220
3221 /* The resulting value range is the union of the operand ranges */
3222 copy_value_range (vr, &vr0);
3223 vrp_meet (vr, &vr1);
3224 }
3225
3226
3227 /* Extract range information from a comparison expression EXPR based
3228 on the range of its operand and the expression code. */
3229
3230 static void
extract_range_from_comparison(value_range_t * vr,enum tree_code code,tree type,tree op0,tree op1)3231 extract_range_from_comparison (value_range_t *vr, enum tree_code code,
3232 tree type, tree op0, tree op1)
3233 {
3234 bool sop = false;
3235 tree val;
3236
3237 val = vrp_evaluate_conditional_warnv_with_ops (code, op0, op1, false, &sop,
3238 NULL);
3239
3240 /* A disadvantage of using a special infinity as an overflow
3241 representation is that we lose the ability to record overflow
3242 when we don't have an infinity. So we have to ignore a result
3243 which relies on overflow. */
3244
3245 if (val && !is_overflow_infinity (val) && !sop)
3246 {
3247 /* Since this expression was found on the RHS of an assignment,
3248 its type may be different from _Bool. Convert VAL to EXPR's
3249 type. */
3250 val = fold_convert (type, val);
3251 if (is_gimple_min_invariant (val))
3252 set_value_range_to_value (vr, val, vr->equiv);
3253 else
3254 set_value_range (vr, VR_RANGE, val, val, vr->equiv);
3255 }
3256 else
3257 /* The result of a comparison is always true or false. */
3258 set_value_range_to_truthvalue (vr, type);
3259 }
3260
3261 /* Try to derive a nonnegative or nonzero range out of STMT relying
3262 primarily on generic routines in fold in conjunction with range data.
3263 Store the result in *VR */
3264
3265 static void
extract_range_basic(value_range_t * vr,gimple stmt)3266 extract_range_basic (value_range_t *vr, gimple stmt)
3267 {
3268 bool sop = false;
3269 tree type = gimple_expr_type (stmt);
3270
3271 /* If the call is __builtin_constant_p and the argument is a
3272 function parameter resolve it to false. This avoids bogus
3273 array bound warnings.
3274 ??? We could do this as early as inlining is finished. */
3275 if (gimple_call_builtin_p (stmt, BUILT_IN_CONSTANT_P))
3276 {
3277 tree arg = gimple_call_arg (stmt, 0);
3278 if (TREE_CODE (arg) == SSA_NAME
3279 && SSA_NAME_IS_DEFAULT_DEF (arg)
3280 && TREE_CODE (SSA_NAME_VAR (arg)) == PARM_DECL)
3281 set_value_range_to_null (vr, type);
3282 }
3283 else if (INTEGRAL_TYPE_P (type)
3284 && gimple_stmt_nonnegative_warnv_p (stmt, &sop))
3285 set_value_range_to_nonnegative (vr, type,
3286 sop || stmt_overflow_infinity (stmt));
3287 else if (vrp_stmt_computes_nonzero (stmt, &sop)
3288 && !sop)
3289 set_value_range_to_nonnull (vr, type);
3290 else
3291 set_value_range_to_varying (vr);
3292 }
3293
3294
3295 /* Try to compute a useful range out of assignment STMT and store it
3296 in *VR. */
3297
3298 static void
extract_range_from_assignment(value_range_t * vr,gimple stmt)3299 extract_range_from_assignment (value_range_t *vr, gimple stmt)
3300 {
3301 enum tree_code code = gimple_assign_rhs_code (stmt);
3302
3303 if (code == ASSERT_EXPR)
3304 extract_range_from_assert (vr, gimple_assign_rhs1 (stmt));
3305 else if (code == SSA_NAME)
3306 extract_range_from_ssa_name (vr, gimple_assign_rhs1 (stmt));
3307 else if (TREE_CODE_CLASS (code) == tcc_binary)
3308 extract_range_from_binary_expr (vr, gimple_assign_rhs_code (stmt),
3309 gimple_expr_type (stmt),
3310 gimple_assign_rhs1 (stmt),
3311 gimple_assign_rhs2 (stmt));
3312 else if (TREE_CODE_CLASS (code) == tcc_unary)
3313 extract_range_from_unary_expr (vr, gimple_assign_rhs_code (stmt),
3314 gimple_expr_type (stmt),
3315 gimple_assign_rhs1 (stmt));
3316 else if (code == COND_EXPR)
3317 extract_range_from_cond_expr (vr, stmt);
3318 else if (TREE_CODE_CLASS (code) == tcc_comparison)
3319 extract_range_from_comparison (vr, gimple_assign_rhs_code (stmt),
3320 gimple_expr_type (stmt),
3321 gimple_assign_rhs1 (stmt),
3322 gimple_assign_rhs2 (stmt));
3323 else if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS
3324 && is_gimple_min_invariant (gimple_assign_rhs1 (stmt)))
3325 set_value_range_to_value (vr, gimple_assign_rhs1 (stmt), NULL);
3326 else
3327 set_value_range_to_varying (vr);
3328
3329 if (vr->type == VR_VARYING)
3330 extract_range_basic (vr, stmt);
3331 }
3332
3333 /* Given a range VR, a LOOP and a variable VAR, determine whether it
3334 would be profitable to adjust VR using scalar evolution information
3335 for VAR. If so, update VR with the new limits. */
3336
3337 static void
adjust_range_with_scev(value_range_t * vr,struct loop * loop,gimple stmt,tree var)3338 adjust_range_with_scev (value_range_t *vr, struct loop *loop,
3339 gimple stmt, tree var)
3340 {
3341 tree init, step, chrec, tmin, tmax, min, max, type, tem;
3342 enum ev_direction dir;
3343
3344 /* TODO. Don't adjust anti-ranges. An anti-range may provide
3345 better opportunities than a regular range, but I'm not sure. */
3346 if (vr->type == VR_ANTI_RANGE)
3347 return;
3348
3349 chrec = instantiate_parameters (loop, analyze_scalar_evolution (loop, var));
3350
3351 /* Like in PR19590, scev can return a constant function. */
3352 if (is_gimple_min_invariant (chrec))
3353 {
3354 set_value_range_to_value (vr, chrec, vr->equiv);
3355 return;
3356 }
3357
3358 if (TREE_CODE (chrec) != POLYNOMIAL_CHREC)
3359 return;
3360
3361 init = initial_condition_in_loop_num (chrec, loop->num);
3362 tem = op_with_constant_singleton_value_range (init);
3363 if (tem)
3364 init = tem;
3365 step = evolution_part_in_loop_num (chrec, loop->num);
3366 tem = op_with_constant_singleton_value_range (step);
3367 if (tem)
3368 step = tem;
3369
3370 /* If STEP is symbolic, we can't know whether INIT will be the
3371 minimum or maximum value in the range. Also, unless INIT is
3372 a simple expression, compare_values and possibly other functions
3373 in tree-vrp won't be able to handle it. */
3374 if (step == NULL_TREE
3375 || !is_gimple_min_invariant (step)
3376 || !valid_value_p (init))
3377 return;
3378
3379 dir = scev_direction (chrec);
3380 if (/* Do not adjust ranges if we do not know whether the iv increases
3381 or decreases, ... */
3382 dir == EV_DIR_UNKNOWN
3383 /* ... or if it may wrap. */
3384 || scev_probably_wraps_p (init, step, stmt, get_chrec_loop (chrec),
3385 true))
3386 return;
3387
3388 /* We use TYPE_MIN_VALUE and TYPE_MAX_VALUE here instead of
3389 negative_overflow_infinity and positive_overflow_infinity,
3390 because we have concluded that the loop probably does not
3391 wrap. */
3392
3393 type = TREE_TYPE (var);
3394 if (POINTER_TYPE_P (type) || !TYPE_MIN_VALUE (type))
3395 tmin = lower_bound_in_type (type, type);
3396 else
3397 tmin = TYPE_MIN_VALUE (type);
3398 if (POINTER_TYPE_P (type) || !TYPE_MAX_VALUE (type))
3399 tmax = upper_bound_in_type (type, type);
3400 else
3401 tmax = TYPE_MAX_VALUE (type);
3402
3403 /* Try to use estimated number of iterations for the loop to constrain the
3404 final value in the evolution. */
3405 if (TREE_CODE (step) == INTEGER_CST
3406 && is_gimple_val (init)
3407 && (TREE_CODE (init) != SSA_NAME
3408 || get_value_range (init)->type == VR_RANGE))
3409 {
3410 double_int nit;
3411
3412 if (estimated_loop_iterations (loop, true, &nit))
3413 {
3414 value_range_t maxvr = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
3415 double_int dtmp;
3416 bool unsigned_p = TYPE_UNSIGNED (TREE_TYPE (step));
3417 int overflow = 0;
3418
3419 dtmp = double_int_mul_with_sign (tree_to_double_int (step), nit,
3420 unsigned_p, &overflow);
3421 /* If the multiplication overflowed we can't do a meaningful
3422 adjustment. Likewise if the result doesn't fit in the type
3423 of the induction variable. For a signed type we have to
3424 check whether the result has the expected signedness which
3425 is that of the step as number of iterations is unsigned. */
3426 if (!overflow
3427 && double_int_fits_to_tree_p (TREE_TYPE (init), dtmp)
3428 && (unsigned_p
3429 || ((dtmp.high ^ TREE_INT_CST_HIGH (step)) >= 0)))
3430 {
3431 tem = double_int_to_tree (TREE_TYPE (init), dtmp);
3432 extract_range_from_binary_expr (&maxvr, PLUS_EXPR,
3433 TREE_TYPE (init), init, tem);
3434 /* Likewise if the addition did. */
3435 if (maxvr.type == VR_RANGE)
3436 {
3437 tmin = maxvr.min;
3438 tmax = maxvr.max;
3439 }
3440 }
3441 }
3442 }
3443
3444 if (vr->type == VR_VARYING || vr->type == VR_UNDEFINED)
3445 {
3446 min = tmin;
3447 max = tmax;
3448
3449 /* For VARYING or UNDEFINED ranges, just about anything we get
3450 from scalar evolutions should be better. */
3451
3452 if (dir == EV_DIR_DECREASES)
3453 max = init;
3454 else
3455 min = init;
3456
3457 /* If we would create an invalid range, then just assume we
3458 know absolutely nothing. This may be over-conservative,
3459 but it's clearly safe, and should happen only in unreachable
3460 parts of code, or for invalid programs. */
3461 if (compare_values (min, max) == 1)
3462 return;
3463
3464 set_value_range (vr, VR_RANGE, min, max, vr->equiv);
3465 }
3466 else if (vr->type == VR_RANGE)
3467 {
3468 min = vr->min;
3469 max = vr->max;
3470
3471 if (dir == EV_DIR_DECREASES)
3472 {
3473 /* INIT is the maximum value. If INIT is lower than VR->MAX
3474 but no smaller than VR->MIN, set VR->MAX to INIT. */
3475 if (compare_values (init, max) == -1)
3476 max = init;
3477
3478 /* According to the loop information, the variable does not
3479 overflow. If we think it does, probably because of an
3480 overflow due to arithmetic on a different INF value,
3481 reset now. */
3482 if (is_negative_overflow_infinity (min)
3483 || compare_values (min, tmin) == -1)
3484 min = tmin;
3485
3486 }
3487 else
3488 {
3489 /* If INIT is bigger than VR->MIN, set VR->MIN to INIT. */
3490 if (compare_values (init, min) == 1)
3491 min = init;
3492
3493 if (is_positive_overflow_infinity (max)
3494 || compare_values (tmax, max) == -1)
3495 max = tmax;
3496 }
3497
3498 /* If we just created an invalid range with the minimum
3499 greater than the maximum, we fail conservatively.
3500 This should happen only in unreachable
3501 parts of code, or for invalid programs. */
3502 if (compare_values (min, max) == 1)
3503 return;
3504
3505 set_value_range (vr, VR_RANGE, min, max, vr->equiv);
3506 }
3507 }
3508
3509 /* Return true if VAR may overflow at STMT. This checks any available
3510 loop information to see if we can determine that VAR does not
3511 overflow. */
3512
3513 static bool
vrp_var_may_overflow(tree var,gimple stmt)3514 vrp_var_may_overflow (tree var, gimple stmt)
3515 {
3516 struct loop *l;
3517 tree chrec, init, step;
3518
3519 if (current_loops == NULL)
3520 return true;
3521
3522 l = loop_containing_stmt (stmt);
3523 if (l == NULL
3524 || !loop_outer (l))
3525 return true;
3526
3527 chrec = instantiate_parameters (l, analyze_scalar_evolution (l, var));
3528 if (TREE_CODE (chrec) != POLYNOMIAL_CHREC)
3529 return true;
3530
3531 init = initial_condition_in_loop_num (chrec, l->num);
3532 step = evolution_part_in_loop_num (chrec, l->num);
3533
3534 if (step == NULL_TREE
3535 || !is_gimple_min_invariant (step)
3536 || !valid_value_p (init))
3537 return true;
3538
3539 /* If we get here, we know something useful about VAR based on the
3540 loop information. If it wraps, it may overflow. */
3541
3542 if (scev_probably_wraps_p (init, step, stmt, get_chrec_loop (chrec),
3543 true))
3544 return true;
3545
3546 if (dump_file && (dump_flags & TDF_DETAILS) != 0)
3547 {
3548 print_generic_expr (dump_file, var, 0);
3549 fprintf (dump_file, ": loop information indicates does not overflow\n");
3550 }
3551
3552 return false;
3553 }
3554
3555
3556 /* Given two numeric value ranges VR0, VR1 and a comparison code COMP:
3557
3558 - Return BOOLEAN_TRUE_NODE if VR0 COMP VR1 always returns true for
3559 all the values in the ranges.
3560
3561 - Return BOOLEAN_FALSE_NODE if the comparison always returns false.
3562
3563 - Return NULL_TREE if it is not always possible to determine the
3564 value of the comparison.
3565
3566 Also set *STRICT_OVERFLOW_P to indicate whether a range with an
3567 overflow infinity was used in the test. */
3568
3569
3570 static tree
compare_ranges(enum tree_code comp,value_range_t * vr0,value_range_t * vr1,bool * strict_overflow_p)3571 compare_ranges (enum tree_code comp, value_range_t *vr0, value_range_t *vr1,
3572 bool *strict_overflow_p)
3573 {
3574 /* VARYING or UNDEFINED ranges cannot be compared. */
3575 if (vr0->type == VR_VARYING
3576 || vr0->type == VR_UNDEFINED
3577 || vr1->type == VR_VARYING
3578 || vr1->type == VR_UNDEFINED)
3579 return NULL_TREE;
3580
3581 /* Anti-ranges need to be handled separately. */
3582 if (vr0->type == VR_ANTI_RANGE || vr1->type == VR_ANTI_RANGE)
3583 {
3584 /* If both are anti-ranges, then we cannot compute any
3585 comparison. */
3586 if (vr0->type == VR_ANTI_RANGE && vr1->type == VR_ANTI_RANGE)
3587 return NULL_TREE;
3588
3589 /* These comparisons are never statically computable. */
3590 if (comp == GT_EXPR
3591 || comp == GE_EXPR
3592 || comp == LT_EXPR
3593 || comp == LE_EXPR)
3594 return NULL_TREE;
3595
3596 /* Equality can be computed only between a range and an
3597 anti-range. ~[VAL1, VAL2] == [VAL1, VAL2] is always false. */
3598 if (vr0->type == VR_RANGE)
3599 {
3600 /* To simplify processing, make VR0 the anti-range. */
3601 value_range_t *tmp = vr0;
3602 vr0 = vr1;
3603 vr1 = tmp;
3604 }
3605
3606 gcc_assert (comp == NE_EXPR || comp == EQ_EXPR);
3607
3608 if (compare_values_warnv (vr0->min, vr1->min, strict_overflow_p) == 0
3609 && compare_values_warnv (vr0->max, vr1->max, strict_overflow_p) == 0)
3610 return (comp == NE_EXPR) ? boolean_true_node : boolean_false_node;
3611
3612 return NULL_TREE;
3613 }
3614
3615 if (!usable_range_p (vr0, strict_overflow_p)
3616 || !usable_range_p (vr1, strict_overflow_p))
3617 return NULL_TREE;
3618
3619 /* Simplify processing. If COMP is GT_EXPR or GE_EXPR, switch the
3620 operands around and change the comparison code. */
3621 if (comp == GT_EXPR || comp == GE_EXPR)
3622 {
3623 value_range_t *tmp;
3624 comp = (comp == GT_EXPR) ? LT_EXPR : LE_EXPR;
3625 tmp = vr0;
3626 vr0 = vr1;
3627 vr1 = tmp;
3628 }
3629
3630 if (comp == EQ_EXPR)
3631 {
3632 /* Equality may only be computed if both ranges represent
3633 exactly one value. */
3634 if (compare_values_warnv (vr0->min, vr0->max, strict_overflow_p) == 0
3635 && compare_values_warnv (vr1->min, vr1->max, strict_overflow_p) == 0)
3636 {
3637 int cmp_min = compare_values_warnv (vr0->min, vr1->min,
3638 strict_overflow_p);
3639 int cmp_max = compare_values_warnv (vr0->max, vr1->max,
3640 strict_overflow_p);
3641 if (cmp_min == 0 && cmp_max == 0)
3642 return boolean_true_node;
3643 else if (cmp_min != -2 && cmp_max != -2)
3644 return boolean_false_node;
3645 }
3646 /* If [V0_MIN, V1_MAX] < [V1_MIN, V1_MAX] then V0 != V1. */
3647 else if (compare_values_warnv (vr0->min, vr1->max,
3648 strict_overflow_p) == 1
3649 || compare_values_warnv (vr1->min, vr0->max,
3650 strict_overflow_p) == 1)
3651 return boolean_false_node;
3652
3653 return NULL_TREE;
3654 }
3655 else if (comp == NE_EXPR)
3656 {
3657 int cmp1, cmp2;
3658
3659 /* If VR0 is completely to the left or completely to the right
3660 of VR1, they are always different. Notice that we need to
3661 make sure that both comparisons yield similar results to
3662 avoid comparing values that cannot be compared at
3663 compile-time. */
3664 cmp1 = compare_values_warnv (vr0->max, vr1->min, strict_overflow_p);
3665 cmp2 = compare_values_warnv (vr0->min, vr1->max, strict_overflow_p);
3666 if ((cmp1 == -1 && cmp2 == -1) || (cmp1 == 1 && cmp2 == 1))
3667 return boolean_true_node;
3668
3669 /* If VR0 and VR1 represent a single value and are identical,
3670 return false. */
3671 else if (compare_values_warnv (vr0->min, vr0->max,
3672 strict_overflow_p) == 0
3673 && compare_values_warnv (vr1->min, vr1->max,
3674 strict_overflow_p) == 0
3675 && compare_values_warnv (vr0->min, vr1->min,
3676 strict_overflow_p) == 0
3677 && compare_values_warnv (vr0->max, vr1->max,
3678 strict_overflow_p) == 0)
3679 return boolean_false_node;
3680
3681 /* Otherwise, they may or may not be different. */
3682 else
3683 return NULL_TREE;
3684 }
3685 else if (comp == LT_EXPR || comp == LE_EXPR)
3686 {
3687 int tst;
3688
3689 /* If VR0 is to the left of VR1, return true. */
3690 tst = compare_values_warnv (vr0->max, vr1->min, strict_overflow_p);
3691 if ((comp == LT_EXPR && tst == -1)
3692 || (comp == LE_EXPR && (tst == -1 || tst == 0)))
3693 {
3694 if (overflow_infinity_range_p (vr0)
3695 || overflow_infinity_range_p (vr1))
3696 *strict_overflow_p = true;
3697 return boolean_true_node;
3698 }
3699
3700 /* If VR0 is to the right of VR1, return false. */
3701 tst = compare_values_warnv (vr0->min, vr1->max, strict_overflow_p);
3702 if ((comp == LT_EXPR && (tst == 0 || tst == 1))
3703 || (comp == LE_EXPR && tst == 1))
3704 {
3705 if (overflow_infinity_range_p (vr0)
3706 || overflow_infinity_range_p (vr1))
3707 *strict_overflow_p = true;
3708 return boolean_false_node;
3709 }
3710
3711 /* Otherwise, we don't know. */
3712 return NULL_TREE;
3713 }
3714
3715 gcc_unreachable ();
3716 }
3717
3718
3719 /* Given a value range VR, a value VAL and a comparison code COMP, return
3720 BOOLEAN_TRUE_NODE if VR COMP VAL always returns true for all the
3721 values in VR. Return BOOLEAN_FALSE_NODE if the comparison
3722 always returns false. Return NULL_TREE if it is not always
3723 possible to determine the value of the comparison. Also set
3724 *STRICT_OVERFLOW_P to indicate whether a range with an overflow
3725 infinity was used in the test. */
3726
3727 static tree
compare_range_with_value(enum tree_code comp,value_range_t * vr,tree val,bool * strict_overflow_p)3728 compare_range_with_value (enum tree_code comp, value_range_t *vr, tree val,
3729 bool *strict_overflow_p)
3730 {
3731 if (vr->type == VR_VARYING || vr->type == VR_UNDEFINED)
3732 return NULL_TREE;
3733
3734 /* Anti-ranges need to be handled separately. */
3735 if (vr->type == VR_ANTI_RANGE)
3736 {
3737 /* For anti-ranges, the only predicates that we can compute at
3738 compile time are equality and inequality. */
3739 if (comp == GT_EXPR
3740 || comp == GE_EXPR
3741 || comp == LT_EXPR
3742 || comp == LE_EXPR)
3743 return NULL_TREE;
3744
3745 /* ~[VAL_1, VAL_2] OP VAL is known if VAL_1 <= VAL <= VAL_2. */
3746 if (value_inside_range (val, vr->min, vr->max) == 1)
3747 return (comp == NE_EXPR) ? boolean_true_node : boolean_false_node;
3748
3749 return NULL_TREE;
3750 }
3751
3752 if (!usable_range_p (vr, strict_overflow_p))
3753 return NULL_TREE;
3754
3755 if (comp == EQ_EXPR)
3756 {
3757 /* EQ_EXPR may only be computed if VR represents exactly
3758 one value. */
3759 if (compare_values_warnv (vr->min, vr->max, strict_overflow_p) == 0)
3760 {
3761 int cmp = compare_values_warnv (vr->min, val, strict_overflow_p);
3762 if (cmp == 0)
3763 return boolean_true_node;
3764 else if (cmp == -1 || cmp == 1 || cmp == 2)
3765 return boolean_false_node;
3766 }
3767 else if (compare_values_warnv (val, vr->min, strict_overflow_p) == -1
3768 || compare_values_warnv (vr->max, val, strict_overflow_p) == -1)
3769 return boolean_false_node;
3770
3771 return NULL_TREE;
3772 }
3773 else if (comp == NE_EXPR)
3774 {
3775 /* If VAL is not inside VR, then they are always different. */
3776 if (compare_values_warnv (vr->max, val, strict_overflow_p) == -1
3777 || compare_values_warnv (vr->min, val, strict_overflow_p) == 1)
3778 return boolean_true_node;
3779
3780 /* If VR represents exactly one value equal to VAL, then return
3781 false. */
3782 if (compare_values_warnv (vr->min, vr->max, strict_overflow_p) == 0
3783 && compare_values_warnv (vr->min, val, strict_overflow_p) == 0)
3784 return boolean_false_node;
3785
3786 /* Otherwise, they may or may not be different. */
3787 return NULL_TREE;
3788 }
3789 else if (comp == LT_EXPR || comp == LE_EXPR)
3790 {
3791 int tst;
3792
3793 /* If VR is to the left of VAL, return true. */
3794 tst = compare_values_warnv (vr->max, val, strict_overflow_p);
3795 if ((comp == LT_EXPR && tst == -1)
3796 || (comp == LE_EXPR && (tst == -1 || tst == 0)))
3797 {
3798 if (overflow_infinity_range_p (vr))
3799 *strict_overflow_p = true;
3800 return boolean_true_node;
3801 }
3802
3803 /* If VR is to the right of VAL, return false. */
3804 tst = compare_values_warnv (vr->min, val, strict_overflow_p);
3805 if ((comp == LT_EXPR && (tst == 0 || tst == 1))
3806 || (comp == LE_EXPR && tst == 1))
3807 {
3808 if (overflow_infinity_range_p (vr))
3809 *strict_overflow_p = true;
3810 return boolean_false_node;
3811 }
3812
3813 /* Otherwise, we don't know. */
3814 return NULL_TREE;
3815 }
3816 else if (comp == GT_EXPR || comp == GE_EXPR)
3817 {
3818 int tst;
3819
3820 /* If VR is to the right of VAL, return true. */
3821 tst = compare_values_warnv (vr->min, val, strict_overflow_p);
3822 if ((comp == GT_EXPR && tst == 1)
3823 || (comp == GE_EXPR && (tst == 0 || tst == 1)))
3824 {
3825 if (overflow_infinity_range_p (vr))
3826 *strict_overflow_p = true;
3827 return boolean_true_node;
3828 }
3829
3830 /* If VR is to the left of VAL, return false. */
3831 tst = compare_values_warnv (vr->max, val, strict_overflow_p);
3832 if ((comp == GT_EXPR && (tst == -1 || tst == 0))
3833 || (comp == GE_EXPR && tst == -1))
3834 {
3835 if (overflow_infinity_range_p (vr))
3836 *strict_overflow_p = true;
3837 return boolean_false_node;
3838 }
3839
3840 /* Otherwise, we don't know. */
3841 return NULL_TREE;
3842 }
3843
3844 gcc_unreachable ();
3845 }
3846
3847
3848 /* Debugging dumps. */
3849
3850 void dump_value_range (FILE *, value_range_t *);
3851 void debug_value_range (value_range_t *);
3852 void dump_all_value_ranges (FILE *);
3853 void debug_all_value_ranges (void);
3854 void dump_vr_equiv (FILE *, bitmap);
3855 void debug_vr_equiv (bitmap);
3856
3857
3858 /* Dump value range VR to FILE. */
3859
3860 void
dump_value_range(FILE * file,value_range_t * vr)3861 dump_value_range (FILE *file, value_range_t *vr)
3862 {
3863 if (vr == NULL)
3864 fprintf (file, "[]");
3865 else if (vr->type == VR_UNDEFINED)
3866 fprintf (file, "UNDEFINED");
3867 else if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE)
3868 {
3869 tree type = TREE_TYPE (vr->min);
3870
3871 fprintf (file, "%s[", (vr->type == VR_ANTI_RANGE) ? "~" : "");
3872
3873 if (is_negative_overflow_infinity (vr->min))
3874 fprintf (file, "-INF(OVF)");
3875 else if (INTEGRAL_TYPE_P (type)
3876 && !TYPE_UNSIGNED (type)
3877 && vrp_val_is_min (vr->min))
3878 fprintf (file, "-INF");
3879 else
3880 print_generic_expr (file, vr->min, 0);
3881
3882 fprintf (file, ", ");
3883
3884 if (is_positive_overflow_infinity (vr->max))
3885 fprintf (file, "+INF(OVF)");
3886 else if (INTEGRAL_TYPE_P (type)
3887 && vrp_val_is_max (vr->max))
3888 fprintf (file, "+INF");
3889 else
3890 print_generic_expr (file, vr->max, 0);
3891
3892 fprintf (file, "]");
3893
3894 if (vr->equiv)
3895 {
3896 bitmap_iterator bi;
3897 unsigned i, c = 0;
3898
3899 fprintf (file, " EQUIVALENCES: { ");
3900
3901 EXECUTE_IF_SET_IN_BITMAP (vr->equiv, 0, i, bi)
3902 {
3903 print_generic_expr (file, ssa_name (i), 0);
3904 fprintf (file, " ");
3905 c++;
3906 }
3907
3908 fprintf (file, "} (%u elements)", c);
3909 }
3910 }
3911 else if (vr->type == VR_VARYING)
3912 fprintf (file, "VARYING");
3913 else
3914 fprintf (file, "INVALID RANGE");
3915 }
3916
3917
3918 /* Dump value range VR to stderr. */
3919
3920 DEBUG_FUNCTION void
debug_value_range(value_range_t * vr)3921 debug_value_range (value_range_t *vr)
3922 {
3923 dump_value_range (stderr, vr);
3924 fprintf (stderr, "\n");
3925 }
3926
3927
3928 /* Dump value ranges of all SSA_NAMEs to FILE. */
3929
3930 void
dump_all_value_ranges(FILE * file)3931 dump_all_value_ranges (FILE *file)
3932 {
3933 size_t i;
3934
3935 for (i = 0; i < num_vr_values; i++)
3936 {
3937 if (vr_value[i])
3938 {
3939 print_generic_expr (file, ssa_name (i), 0);
3940 fprintf (file, ": ");
3941 dump_value_range (file, vr_value[i]);
3942 fprintf (file, "\n");
3943 }
3944 }
3945
3946 fprintf (file, "\n");
3947 }
3948
3949
3950 /* Dump all value ranges to stderr. */
3951
3952 DEBUG_FUNCTION void
debug_all_value_ranges(void)3953 debug_all_value_ranges (void)
3954 {
3955 dump_all_value_ranges (stderr);
3956 }
3957
3958
3959 /* Given a COND_EXPR COND of the form 'V OP W', and an SSA name V,
3960 create a new SSA name N and return the assertion assignment
3961 'V = ASSERT_EXPR <V, V OP W>'. */
3962
3963 static gimple
build_assert_expr_for(tree cond,tree v)3964 build_assert_expr_for (tree cond, tree v)
3965 {
3966 tree n;
3967 gimple assertion;
3968
3969 gcc_assert (TREE_CODE (v) == SSA_NAME);
3970 n = duplicate_ssa_name (v, NULL);
3971
3972 if (COMPARISON_CLASS_P (cond))
3973 {
3974 tree a = build2 (ASSERT_EXPR, TREE_TYPE (v), v, cond);
3975 assertion = gimple_build_assign (n, a);
3976 }
3977 else if (TREE_CODE (cond) == SSA_NAME)
3978 {
3979 /* Given V, build the assignment N = true. */
3980 gcc_assert (v == cond);
3981 assertion = gimple_build_assign (n, boolean_true_node);
3982 }
3983 else
3984 gcc_unreachable ();
3985
3986 SSA_NAME_DEF_STMT (n) = assertion;
3987
3988 /* The new ASSERT_EXPR, creates a new SSA name that replaces the
3989 operand of the ASSERT_EXPR. Register the new name and the old one
3990 in the replacement table so that we can fix the SSA web after
3991 adding all the ASSERT_EXPRs. */
3992 register_new_name_mapping (n, v);
3993
3994 return assertion;
3995 }
3996
3997
3998 /* Return false if EXPR is a predicate expression involving floating
3999 point values. */
4000
4001 static inline bool
fp_predicate(gimple stmt)4002 fp_predicate (gimple stmt)
4003 {
4004 GIMPLE_CHECK (stmt, GIMPLE_COND);
4005
4006 return FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt)));
4007 }
4008
4009
4010 /* If the range of values taken by OP can be inferred after STMT executes,
4011 return the comparison code (COMP_CODE_P) and value (VAL_P) that
4012 describes the inferred range. Return true if a range could be
4013 inferred. */
4014
4015 static bool
infer_value_range(gimple stmt,tree op,enum tree_code * comp_code_p,tree * val_p)4016 infer_value_range (gimple stmt, tree op, enum tree_code *comp_code_p, tree *val_p)
4017 {
4018 *val_p = NULL_TREE;
4019 *comp_code_p = ERROR_MARK;
4020
4021 /* Do not attempt to infer anything in names that flow through
4022 abnormal edges. */
4023 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op))
4024 return false;
4025
4026 /* Similarly, don't infer anything from statements that may throw
4027 exceptions. */
4028 if (stmt_could_throw_p (stmt))
4029 return false;
4030
4031 /* If STMT is the last statement of a basic block with no
4032 successors, there is no point inferring anything about any of its
4033 operands. We would not be able to find a proper insertion point
4034 for the assertion, anyway. */
4035 if (stmt_ends_bb_p (stmt) && EDGE_COUNT (gimple_bb (stmt)->succs) == 0)
4036 return false;
4037
4038 /* We can only assume that a pointer dereference will yield
4039 non-NULL if -fdelete-null-pointer-checks is enabled. */
4040 if (flag_delete_null_pointer_checks
4041 && POINTER_TYPE_P (TREE_TYPE (op))
4042 && gimple_code (stmt) != GIMPLE_ASM)
4043 {
4044 unsigned num_uses, num_loads, num_stores;
4045
4046 count_uses_and_derefs (op, stmt, &num_uses, &num_loads, &num_stores);
4047 if (num_loads + num_stores > 0)
4048 {
4049 *val_p = build_int_cst (TREE_TYPE (op), 0);
4050 *comp_code_p = NE_EXPR;
4051 return true;
4052 }
4053 }
4054
4055 return false;
4056 }
4057
4058
4059 void dump_asserts_for (FILE *, tree);
4060 void debug_asserts_for (tree);
4061 void dump_all_asserts (FILE *);
4062 void debug_all_asserts (void);
4063
4064 /* Dump all the registered assertions for NAME to FILE. */
4065
4066 void
dump_asserts_for(FILE * file,tree name)4067 dump_asserts_for (FILE *file, tree name)
4068 {
4069 assert_locus_t loc;
4070
4071 fprintf (file, "Assertions to be inserted for ");
4072 print_generic_expr (file, name, 0);
4073 fprintf (file, "\n");
4074
4075 loc = asserts_for[SSA_NAME_VERSION (name)];
4076 while (loc)
4077 {
4078 fprintf (file, "\t");
4079 print_gimple_stmt (file, gsi_stmt (loc->si), 0, 0);
4080 fprintf (file, "\n\tBB #%d", loc->bb->index);
4081 if (loc->e)
4082 {
4083 fprintf (file, "\n\tEDGE %d->%d", loc->e->src->index,
4084 loc->e->dest->index);
4085 dump_edge_info (file, loc->e, 0);
4086 }
4087 fprintf (file, "\n\tPREDICATE: ");
4088 print_generic_expr (file, name, 0);
4089 fprintf (file, " %s ", tree_code_name[(int)loc->comp_code]);
4090 print_generic_expr (file, loc->val, 0);
4091 fprintf (file, "\n\n");
4092 loc = loc->next;
4093 }
4094
4095 fprintf (file, "\n");
4096 }
4097
4098
4099 /* Dump all the registered assertions for NAME to stderr. */
4100
4101 DEBUG_FUNCTION void
debug_asserts_for(tree name)4102 debug_asserts_for (tree name)
4103 {
4104 dump_asserts_for (stderr, name);
4105 }
4106
4107
4108 /* Dump all the registered assertions for all the names to FILE. */
4109
4110 void
dump_all_asserts(FILE * file)4111 dump_all_asserts (FILE *file)
4112 {
4113 unsigned i;
4114 bitmap_iterator bi;
4115
4116 fprintf (file, "\nASSERT_EXPRs to be inserted\n\n");
4117 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
4118 dump_asserts_for (file, ssa_name (i));
4119 fprintf (file, "\n");
4120 }
4121
4122
4123 /* Dump all the registered assertions for all the names to stderr. */
4124
4125 DEBUG_FUNCTION void
debug_all_asserts(void)4126 debug_all_asserts (void)
4127 {
4128 dump_all_asserts (stderr);
4129 }
4130
4131
4132 /* If NAME doesn't have an ASSERT_EXPR registered for asserting
4133 'EXPR COMP_CODE VAL' at a location that dominates block BB or
4134 E->DEST, then register this location as a possible insertion point
4135 for ASSERT_EXPR <NAME, EXPR COMP_CODE VAL>.
4136
4137 BB, E and SI provide the exact insertion point for the new
4138 ASSERT_EXPR. If BB is NULL, then the ASSERT_EXPR is to be inserted
4139 on edge E. Otherwise, if E is NULL, the ASSERT_EXPR is inserted on
4140 BB. If SI points to a COND_EXPR or a SWITCH_EXPR statement, then E
4141 must not be NULL. */
4142
4143 static void
register_new_assert_for(tree name,tree expr,enum tree_code comp_code,tree val,basic_block bb,edge e,gimple_stmt_iterator si)4144 register_new_assert_for (tree name, tree expr,
4145 enum tree_code comp_code,
4146 tree val,
4147 basic_block bb,
4148 edge e,
4149 gimple_stmt_iterator si)
4150 {
4151 assert_locus_t n, loc, last_loc;
4152 basic_block dest_bb;
4153
4154 gcc_checking_assert (bb == NULL || e == NULL);
4155
4156 if (e == NULL)
4157 gcc_checking_assert (gimple_code (gsi_stmt (si)) != GIMPLE_COND
4158 && gimple_code (gsi_stmt (si)) != GIMPLE_SWITCH);
4159
4160 /* Never build an assert comparing against an integer constant with
4161 TREE_OVERFLOW set. This confuses our undefined overflow warning
4162 machinery. */
4163 if (TREE_CODE (val) == INTEGER_CST
4164 && TREE_OVERFLOW (val))
4165 val = build_int_cst_wide (TREE_TYPE (val),
4166 TREE_INT_CST_LOW (val), TREE_INT_CST_HIGH (val));
4167
4168 /* The new assertion A will be inserted at BB or E. We need to
4169 determine if the new location is dominated by a previously
4170 registered location for A. If we are doing an edge insertion,
4171 assume that A will be inserted at E->DEST. Note that this is not
4172 necessarily true.
4173
4174 If E is a critical edge, it will be split. But even if E is
4175 split, the new block will dominate the same set of blocks that
4176 E->DEST dominates.
4177
4178 The reverse, however, is not true, blocks dominated by E->DEST
4179 will not be dominated by the new block created to split E. So,
4180 if the insertion location is on a critical edge, we will not use
4181 the new location to move another assertion previously registered
4182 at a block dominated by E->DEST. */
4183 dest_bb = (bb) ? bb : e->dest;
4184
4185 /* If NAME already has an ASSERT_EXPR registered for COMP_CODE and
4186 VAL at a block dominating DEST_BB, then we don't need to insert a new
4187 one. Similarly, if the same assertion already exists at a block
4188 dominated by DEST_BB and the new location is not on a critical
4189 edge, then update the existing location for the assertion (i.e.,
4190 move the assertion up in the dominance tree).
4191
4192 Note, this is implemented as a simple linked list because there
4193 should not be more than a handful of assertions registered per
4194 name. If this becomes a performance problem, a table hashed by
4195 COMP_CODE and VAL could be implemented. */
4196 loc = asserts_for[SSA_NAME_VERSION (name)];
4197 last_loc = loc;
4198 while (loc)
4199 {
4200 if (loc->comp_code == comp_code
4201 && (loc->val == val
4202 || operand_equal_p (loc->val, val, 0))
4203 && (loc->expr == expr
4204 || operand_equal_p (loc->expr, expr, 0)))
4205 {
4206 /* If the assertion NAME COMP_CODE VAL has already been
4207 registered at a basic block that dominates DEST_BB, then
4208 we don't need to insert the same assertion again. Note
4209 that we don't check strict dominance here to avoid
4210 replicating the same assertion inside the same basic
4211 block more than once (e.g., when a pointer is
4212 dereferenced several times inside a block).
4213
4214 An exception to this rule are edge insertions. If the
4215 new assertion is to be inserted on edge E, then it will
4216 dominate all the other insertions that we may want to
4217 insert in DEST_BB. So, if we are doing an edge
4218 insertion, don't do this dominance check. */
4219 if (e == NULL
4220 && dominated_by_p (CDI_DOMINATORS, dest_bb, loc->bb))
4221 return;
4222
4223 /* Otherwise, if E is not a critical edge and DEST_BB
4224 dominates the existing location for the assertion, move
4225 the assertion up in the dominance tree by updating its
4226 location information. */
4227 if ((e == NULL || !EDGE_CRITICAL_P (e))
4228 && dominated_by_p (CDI_DOMINATORS, loc->bb, dest_bb))
4229 {
4230 loc->bb = dest_bb;
4231 loc->e = e;
4232 loc->si = si;
4233 return;
4234 }
4235 }
4236
4237 /* Update the last node of the list and move to the next one. */
4238 last_loc = loc;
4239 loc = loc->next;
4240 }
4241
4242 /* If we didn't find an assertion already registered for
4243 NAME COMP_CODE VAL, add a new one at the end of the list of
4244 assertions associated with NAME. */
4245 n = XNEW (struct assert_locus_d);
4246 n->bb = dest_bb;
4247 n->e = e;
4248 n->si = si;
4249 n->comp_code = comp_code;
4250 n->val = val;
4251 n->expr = expr;
4252 n->next = NULL;
4253
4254 if (last_loc)
4255 last_loc->next = n;
4256 else
4257 asserts_for[SSA_NAME_VERSION (name)] = n;
4258
4259 bitmap_set_bit (need_assert_for, SSA_NAME_VERSION (name));
4260 }
4261
4262 /* (COND_OP0 COND_CODE COND_OP1) is a predicate which uses NAME.
4263 Extract a suitable test code and value and store them into *CODE_P and
4264 *VAL_P so the predicate is normalized to NAME *CODE_P *VAL_P.
4265
4266 If no extraction was possible, return FALSE, otherwise return TRUE.
4267
4268 If INVERT is true, then we invert the result stored into *CODE_P. */
4269
4270 static bool
extract_code_and_val_from_cond_with_ops(tree name,enum tree_code cond_code,tree cond_op0,tree cond_op1,bool invert,enum tree_code * code_p,tree * val_p)4271 extract_code_and_val_from_cond_with_ops (tree name, enum tree_code cond_code,
4272 tree cond_op0, tree cond_op1,
4273 bool invert, enum tree_code *code_p,
4274 tree *val_p)
4275 {
4276 enum tree_code comp_code;
4277 tree val;
4278
4279 /* Otherwise, we have a comparison of the form NAME COMP VAL
4280 or VAL COMP NAME. */
4281 if (name == cond_op1)
4282 {
4283 /* If the predicate is of the form VAL COMP NAME, flip
4284 COMP around because we need to register NAME as the
4285 first operand in the predicate. */
4286 comp_code = swap_tree_comparison (cond_code);
4287 val = cond_op0;
4288 }
4289 else
4290 {
4291 /* The comparison is of the form NAME COMP VAL, so the
4292 comparison code remains unchanged. */
4293 comp_code = cond_code;
4294 val = cond_op1;
4295 }
4296
4297 /* Invert the comparison code as necessary. */
4298 if (invert)
4299 comp_code = invert_tree_comparison (comp_code, 0);
4300
4301 /* VRP does not handle float types. */
4302 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (val)))
4303 return false;
4304
4305 /* Do not register always-false predicates.
4306 FIXME: this works around a limitation in fold() when dealing with
4307 enumerations. Given 'enum { N1, N2 } x;', fold will not
4308 fold 'if (x > N2)' to 'if (0)'. */
4309 if ((comp_code == GT_EXPR || comp_code == LT_EXPR)
4310 && INTEGRAL_TYPE_P (TREE_TYPE (val)))
4311 {
4312 tree min = TYPE_MIN_VALUE (TREE_TYPE (val));
4313 tree max = TYPE_MAX_VALUE (TREE_TYPE (val));
4314
4315 if (comp_code == GT_EXPR
4316 && (!max
4317 || compare_values (val, max) == 0))
4318 return false;
4319
4320 if (comp_code == LT_EXPR
4321 && (!min
4322 || compare_values (val, min) == 0))
4323 return false;
4324 }
4325 *code_p = comp_code;
4326 *val_p = val;
4327 return true;
4328 }
4329
4330 /* Try to register an edge assertion for SSA name NAME on edge E for
4331 the condition COND contributing to the conditional jump pointed to by BSI.
4332 Invert the condition COND if INVERT is true.
4333 Return true if an assertion for NAME could be registered. */
4334
4335 static bool
register_edge_assert_for_2(tree name,edge e,gimple_stmt_iterator bsi,enum tree_code cond_code,tree cond_op0,tree cond_op1,bool invert)4336 register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi,
4337 enum tree_code cond_code,
4338 tree cond_op0, tree cond_op1, bool invert)
4339 {
4340 tree val;
4341 enum tree_code comp_code;
4342 bool retval = false;
4343
4344 if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
4345 cond_op0,
4346 cond_op1,
4347 invert, &comp_code, &val))
4348 return false;
4349
4350 /* Only register an ASSERT_EXPR if NAME was found in the sub-graph
4351 reachable from E. */
4352 if (live_on_edge (e, name)
4353 && !has_single_use (name))
4354 {
4355 register_new_assert_for (name, name, comp_code, val, NULL, e, bsi);
4356 retval = true;
4357 }
4358
4359 /* In the case of NAME <= CST and NAME being defined as
4360 NAME = (unsigned) NAME2 + CST2 we can assert NAME2 >= -CST2
4361 and NAME2 <= CST - CST2. We can do the same for NAME > CST.
4362 This catches range and anti-range tests. */
4363 if ((comp_code == LE_EXPR
4364 || comp_code == GT_EXPR)
4365 && TREE_CODE (val) == INTEGER_CST
4366 && TYPE_UNSIGNED (TREE_TYPE (val)))
4367 {
4368 gimple def_stmt = SSA_NAME_DEF_STMT (name);
4369 tree cst2 = NULL_TREE, name2 = NULL_TREE, name3 = NULL_TREE;
4370
4371 /* Extract CST2 from the (optional) addition. */
4372 if (is_gimple_assign (def_stmt)
4373 && gimple_assign_rhs_code (def_stmt) == PLUS_EXPR)
4374 {
4375 name2 = gimple_assign_rhs1 (def_stmt);
4376 cst2 = gimple_assign_rhs2 (def_stmt);
4377 if (TREE_CODE (name2) == SSA_NAME
4378 && TREE_CODE (cst2) == INTEGER_CST)
4379 def_stmt = SSA_NAME_DEF_STMT (name2);
4380 }
4381
4382 /* Extract NAME2 from the (optional) sign-changing cast. */
4383 if (gimple_assign_cast_p (def_stmt))
4384 {
4385 if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt))
4386 && ! TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
4387 && (TYPE_PRECISION (gimple_expr_type (def_stmt))
4388 == TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))))
4389 name3 = gimple_assign_rhs1 (def_stmt);
4390 }
4391
4392 /* If name3 is used later, create an ASSERT_EXPR for it. */
4393 if (name3 != NULL_TREE
4394 && TREE_CODE (name3) == SSA_NAME
4395 && (cst2 == NULL_TREE
4396 || TREE_CODE (cst2) == INTEGER_CST)
4397 && INTEGRAL_TYPE_P (TREE_TYPE (name3))
4398 && live_on_edge (e, name3)
4399 && !has_single_use (name3))
4400 {
4401 tree tmp;
4402
4403 /* Build an expression for the range test. */
4404 tmp = build1 (NOP_EXPR, TREE_TYPE (name), name3);
4405 if (cst2 != NULL_TREE)
4406 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
4407
4408 if (dump_file)
4409 {
4410 fprintf (dump_file, "Adding assert for ");
4411 print_generic_expr (dump_file, name3, 0);
4412 fprintf (dump_file, " from ");
4413 print_generic_expr (dump_file, tmp, 0);
4414 fprintf (dump_file, "\n");
4415 }
4416
4417 register_new_assert_for (name3, tmp, comp_code, val, NULL, e, bsi);
4418
4419 retval = true;
4420 }
4421
4422 /* If name2 is used later, create an ASSERT_EXPR for it. */
4423 if (name2 != NULL_TREE
4424 && TREE_CODE (name2) == SSA_NAME
4425 && TREE_CODE (cst2) == INTEGER_CST
4426 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
4427 && live_on_edge (e, name2)
4428 && !has_single_use (name2))
4429 {
4430 tree tmp;
4431
4432 /* Build an expression for the range test. */
4433 tmp = name2;
4434 if (TREE_TYPE (name) != TREE_TYPE (name2))
4435 tmp = build1 (NOP_EXPR, TREE_TYPE (name), tmp);
4436 if (cst2 != NULL_TREE)
4437 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
4438
4439 if (dump_file)
4440 {
4441 fprintf (dump_file, "Adding assert for ");
4442 print_generic_expr (dump_file, name2, 0);
4443 fprintf (dump_file, " from ");
4444 print_generic_expr (dump_file, tmp, 0);
4445 fprintf (dump_file, "\n");
4446 }
4447
4448 register_new_assert_for (name2, tmp, comp_code, val, NULL, e, bsi);
4449
4450 retval = true;
4451 }
4452 }
4453
4454 return retval;
4455 }
4456
4457 /* OP is an operand of a truth value expression which is known to have
4458 a particular value. Register any asserts for OP and for any
4459 operands in OP's defining statement.
4460
4461 If CODE is EQ_EXPR, then we want to register OP is zero (false),
4462 if CODE is NE_EXPR, then we want to register OP is nonzero (true). */
4463
4464 static bool
register_edge_assert_for_1(tree op,enum tree_code code,edge e,gimple_stmt_iterator bsi)4465 register_edge_assert_for_1 (tree op, enum tree_code code,
4466 edge e, gimple_stmt_iterator bsi)
4467 {
4468 bool retval = false;
4469 gimple op_def;
4470 tree val;
4471 enum tree_code rhs_code;
4472
4473 /* We only care about SSA_NAMEs. */
4474 if (TREE_CODE (op) != SSA_NAME)
4475 return false;
4476
4477 /* We know that OP will have a zero or nonzero value. If OP is used
4478 more than once go ahead and register an assert for OP.
4479
4480 The FOUND_IN_SUBGRAPH support is not helpful in this situation as
4481 it will always be set for OP (because OP is used in a COND_EXPR in
4482 the subgraph). */
4483 if (!has_single_use (op))
4484 {
4485 val = build_int_cst (TREE_TYPE (op), 0);
4486 register_new_assert_for (op, op, code, val, NULL, e, bsi);
4487 retval = true;
4488 }
4489
4490 /* Now look at how OP is set. If it's set from a comparison,
4491 a truth operation or some bit operations, then we may be able
4492 to register information about the operands of that assignment. */
4493 op_def = SSA_NAME_DEF_STMT (op);
4494 if (gimple_code (op_def) != GIMPLE_ASSIGN)
4495 return retval;
4496
4497 rhs_code = gimple_assign_rhs_code (op_def);
4498
4499 if (TREE_CODE_CLASS (rhs_code) == tcc_comparison)
4500 {
4501 bool invert = (code == EQ_EXPR ? true : false);
4502 tree op0 = gimple_assign_rhs1 (op_def);
4503 tree op1 = gimple_assign_rhs2 (op_def);
4504
4505 if (TREE_CODE (op0) == SSA_NAME)
4506 retval |= register_edge_assert_for_2 (op0, e, bsi, rhs_code, op0, op1,
4507 invert);
4508 if (TREE_CODE (op1) == SSA_NAME)
4509 retval |= register_edge_assert_for_2 (op1, e, bsi, rhs_code, op0, op1,
4510 invert);
4511 }
4512 else if ((code == NE_EXPR
4513 && gimple_assign_rhs_code (op_def) == BIT_AND_EXPR)
4514 || (code == EQ_EXPR
4515 && gimple_assign_rhs_code (op_def) == BIT_IOR_EXPR))
4516 {
4517 /* Recurse on each operand. */
4518 retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def),
4519 code, e, bsi);
4520 retval |= register_edge_assert_for_1 (gimple_assign_rhs2 (op_def),
4521 code, e, bsi);
4522 }
4523 else if (gimple_assign_rhs_code (op_def) == BIT_NOT_EXPR
4524 && TYPE_PRECISION (TREE_TYPE (gimple_assign_lhs (op_def))) == 1)
4525 {
4526 /* Recurse, flipping CODE. */
4527 code = invert_tree_comparison (code, false);
4528 retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def),
4529 code, e, bsi);
4530 }
4531 else if (gimple_assign_rhs_code (op_def) == SSA_NAME)
4532 {
4533 /* Recurse through the copy. */
4534 retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def),
4535 code, e, bsi);
4536 }
4537 else if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (op_def)))
4538 {
4539 /* Recurse through the type conversion, unless it is a narrowing
4540 conversion or conversion from non-integral type. */
4541 tree rhs = gimple_assign_rhs1 (op_def);
4542 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs))
4543 && (TYPE_PRECISION (TREE_TYPE (rhs))
4544 <= TYPE_PRECISION (TREE_TYPE (op))))
4545 retval |= register_edge_assert_for_1 (rhs, code, e, bsi);
4546 }
4547
4548 return retval;
4549 }
4550
4551 /* Try to register an edge assertion for SSA name NAME on edge E for
4552 the condition COND contributing to the conditional jump pointed to by SI.
4553 Return true if an assertion for NAME could be registered. */
4554
4555 static bool
register_edge_assert_for(tree name,edge e,gimple_stmt_iterator si,enum tree_code cond_code,tree cond_op0,tree cond_op1)4556 register_edge_assert_for (tree name, edge e, gimple_stmt_iterator si,
4557 enum tree_code cond_code, tree cond_op0,
4558 tree cond_op1)
4559 {
4560 tree val;
4561 enum tree_code comp_code;
4562 bool retval = false;
4563 bool is_else_edge = (e->flags & EDGE_FALSE_VALUE) != 0;
4564
4565 /* Do not attempt to infer anything in names that flow through
4566 abnormal edges. */
4567 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
4568 return false;
4569
4570 if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
4571 cond_op0, cond_op1,
4572 is_else_edge,
4573 &comp_code, &val))
4574 return false;
4575
4576 /* Register ASSERT_EXPRs for name. */
4577 retval |= register_edge_assert_for_2 (name, e, si, cond_code, cond_op0,
4578 cond_op1, is_else_edge);
4579
4580
4581 /* If COND is effectively an equality test of an SSA_NAME against
4582 the value zero or one, then we may be able to assert values
4583 for SSA_NAMEs which flow into COND. */
4584
4585 /* In the case of NAME == 1 or NAME != 0, for BIT_AND_EXPR defining
4586 statement of NAME we can assert both operands of the BIT_AND_EXPR
4587 have nonzero value. */
4588 if (((comp_code == EQ_EXPR && integer_onep (val))
4589 || (comp_code == NE_EXPR && integer_zerop (val))))
4590 {
4591 gimple def_stmt = SSA_NAME_DEF_STMT (name);
4592
4593 if (is_gimple_assign (def_stmt)
4594 && gimple_assign_rhs_code (def_stmt) == BIT_AND_EXPR)
4595 {
4596 tree op0 = gimple_assign_rhs1 (def_stmt);
4597 tree op1 = gimple_assign_rhs2 (def_stmt);
4598 retval |= register_edge_assert_for_1 (op0, NE_EXPR, e, si);
4599 retval |= register_edge_assert_for_1 (op1, NE_EXPR, e, si);
4600 }
4601 }
4602
4603 /* In the case of NAME == 0 or NAME != 1, for BIT_IOR_EXPR defining
4604 statement of NAME we can assert both operands of the BIT_IOR_EXPR
4605 have zero value. */
4606 if (((comp_code == EQ_EXPR && integer_zerop (val))
4607 || (comp_code == NE_EXPR && integer_onep (val))))
4608 {
4609 gimple def_stmt = SSA_NAME_DEF_STMT (name);
4610
4611 /* For BIT_IOR_EXPR only if NAME == 0 both operands have
4612 necessarily zero value, or if type-precision is one. */
4613 if (is_gimple_assign (def_stmt)
4614 && (gimple_assign_rhs_code (def_stmt) == BIT_IOR_EXPR
4615 && (TYPE_PRECISION (TREE_TYPE (name)) == 1
4616 || comp_code == EQ_EXPR)))
4617 {
4618 tree op0 = gimple_assign_rhs1 (def_stmt);
4619 tree op1 = gimple_assign_rhs2 (def_stmt);
4620 retval |= register_edge_assert_for_1 (op0, EQ_EXPR, e, si);
4621 retval |= register_edge_assert_for_1 (op1, EQ_EXPR, e, si);
4622 }
4623 }
4624
4625 return retval;
4626 }
4627
4628
4629 /* Determine whether the outgoing edges of BB should receive an
4630 ASSERT_EXPR for each of the operands of BB's LAST statement.
4631 The last statement of BB must be a COND_EXPR.
4632
4633 If any of the sub-graphs rooted at BB have an interesting use of
4634 the predicate operands, an assert location node is added to the
4635 list of assertions for the corresponding operands. */
4636
4637 static bool
find_conditional_asserts(basic_block bb,gimple last)4638 find_conditional_asserts (basic_block bb, gimple last)
4639 {
4640 bool need_assert;
4641 gimple_stmt_iterator bsi;
4642 tree op;
4643 edge_iterator ei;
4644 edge e;
4645 ssa_op_iter iter;
4646
4647 need_assert = false;
4648 bsi = gsi_for_stmt (last);
4649
4650 /* Look for uses of the operands in each of the sub-graphs
4651 rooted at BB. We need to check each of the outgoing edges
4652 separately, so that we know what kind of ASSERT_EXPR to
4653 insert. */
4654 FOR_EACH_EDGE (e, ei, bb->succs)
4655 {
4656 if (e->dest == bb)
4657 continue;
4658
4659 /* Register the necessary assertions for each operand in the
4660 conditional predicate. */
4661 FOR_EACH_SSA_TREE_OPERAND (op, last, iter, SSA_OP_USE)
4662 {
4663 need_assert |= register_edge_assert_for (op, e, bsi,
4664 gimple_cond_code (last),
4665 gimple_cond_lhs (last),
4666 gimple_cond_rhs (last));
4667 }
4668 }
4669
4670 return need_assert;
4671 }
4672
4673 struct case_info
4674 {
4675 tree expr;
4676 basic_block bb;
4677 };
4678
4679 /* Compare two case labels sorting first by the destination bb index
4680 and then by the case value. */
4681
4682 static int
compare_case_labels(const void * p1,const void * p2)4683 compare_case_labels (const void *p1, const void *p2)
4684 {
4685 const struct case_info *ci1 = (const struct case_info *) p1;
4686 const struct case_info *ci2 = (const struct case_info *) p2;
4687 int idx1 = ci1->bb->index;
4688 int idx2 = ci2->bb->index;
4689
4690 if (idx1 < idx2)
4691 return -1;
4692 else if (idx1 == idx2)
4693 {
4694 /* Make sure the default label is first in a group. */
4695 if (!CASE_LOW (ci1->expr))
4696 return -1;
4697 else if (!CASE_LOW (ci2->expr))
4698 return 1;
4699 else
4700 return tree_int_cst_compare (CASE_LOW (ci1->expr),
4701 CASE_LOW (ci2->expr));
4702 }
4703 else
4704 return 1;
4705 }
4706
4707 /* Determine whether the outgoing edges of BB should receive an
4708 ASSERT_EXPR for each of the operands of BB's LAST statement.
4709 The last statement of BB must be a SWITCH_EXPR.
4710
4711 If any of the sub-graphs rooted at BB have an interesting use of
4712 the predicate operands, an assert location node is added to the
4713 list of assertions for the corresponding operands. */
4714
4715 static bool
find_switch_asserts(basic_block bb,gimple last)4716 find_switch_asserts (basic_block bb, gimple last)
4717 {
4718 bool need_assert;
4719 gimple_stmt_iterator bsi;
4720 tree op;
4721 edge e;
4722 struct case_info *ci;
4723 size_t n = gimple_switch_num_labels (last);
4724 #if GCC_VERSION >= 4000
4725 unsigned int idx;
4726 #else
4727 /* Work around GCC 3.4 bug (PR 37086). */
4728 volatile unsigned int idx;
4729 #endif
4730
4731 need_assert = false;
4732 bsi = gsi_for_stmt (last);
4733 op = gimple_switch_index (last);
4734 if (TREE_CODE (op) != SSA_NAME)
4735 return false;
4736
4737 /* Build a vector of case labels sorted by destination label. */
4738 ci = XNEWVEC (struct case_info, n);
4739 for (idx = 0; idx < n; ++idx)
4740 {
4741 ci[idx].expr = gimple_switch_label (last, idx);
4742 ci[idx].bb = label_to_block (CASE_LABEL (ci[idx].expr));
4743 }
4744 qsort (ci, n, sizeof (struct case_info), compare_case_labels);
4745
4746 for (idx = 0; idx < n; ++idx)
4747 {
4748 tree min, max;
4749 tree cl = ci[idx].expr;
4750 basic_block cbb = ci[idx].bb;
4751
4752 min = CASE_LOW (cl);
4753 max = CASE_HIGH (cl);
4754
4755 /* If there are multiple case labels with the same destination
4756 we need to combine them to a single value range for the edge. */
4757 if (idx + 1 < n && cbb == ci[idx + 1].bb)
4758 {
4759 /* Skip labels until the last of the group. */
4760 do {
4761 ++idx;
4762 } while (idx < n && cbb == ci[idx].bb);
4763 --idx;
4764
4765 /* Pick up the maximum of the case label range. */
4766 if (CASE_HIGH (ci[idx].expr))
4767 max = CASE_HIGH (ci[idx].expr);
4768 else
4769 max = CASE_LOW (ci[idx].expr);
4770 }
4771
4772 /* Nothing to do if the range includes the default label until we
4773 can register anti-ranges. */
4774 if (min == NULL_TREE)
4775 continue;
4776
4777 /* Find the edge to register the assert expr on. */
4778 e = find_edge (bb, cbb);
4779
4780 /* Register the necessary assertions for the operand in the
4781 SWITCH_EXPR. */
4782 need_assert |= register_edge_assert_for (op, e, bsi,
4783 max ? GE_EXPR : EQ_EXPR,
4784 op,
4785 fold_convert (TREE_TYPE (op),
4786 min));
4787 if (max)
4788 {
4789 need_assert |= register_edge_assert_for (op, e, bsi, LE_EXPR,
4790 op,
4791 fold_convert (TREE_TYPE (op),
4792 max));
4793 }
4794 }
4795
4796 XDELETEVEC (ci);
4797 return need_assert;
4798 }
4799
4800
4801 /* Traverse all the statements in block BB looking for statements that
4802 may generate useful assertions for the SSA names in their operand.
4803 If a statement produces a useful assertion A for name N_i, then the
4804 list of assertions already generated for N_i is scanned to
4805 determine if A is actually needed.
4806
4807 If N_i already had the assertion A at a location dominating the
4808 current location, then nothing needs to be done. Otherwise, the
4809 new location for A is recorded instead.
4810
4811 1- For every statement S in BB, all the variables used by S are
4812 added to bitmap FOUND_IN_SUBGRAPH.
4813
4814 2- If statement S uses an operand N in a way that exposes a known
4815 value range for N, then if N was not already generated by an
4816 ASSERT_EXPR, create a new assert location for N. For instance,
4817 if N is a pointer and the statement dereferences it, we can
4818 assume that N is not NULL.
4819
4820 3- COND_EXPRs are a special case of #2. We can derive range
4821 information from the predicate but need to insert different
4822 ASSERT_EXPRs for each of the sub-graphs rooted at the
4823 conditional block. If the last statement of BB is a conditional
4824 expression of the form 'X op Y', then
4825
4826 a) Remove X and Y from the set FOUND_IN_SUBGRAPH.
4827
4828 b) If the conditional is the only entry point to the sub-graph
4829 corresponding to the THEN_CLAUSE, recurse into it. On
4830 return, if X and/or Y are marked in FOUND_IN_SUBGRAPH, then
4831 an ASSERT_EXPR is added for the corresponding variable.
4832
4833 c) Repeat step (b) on the ELSE_CLAUSE.
4834
4835 d) Mark X and Y in FOUND_IN_SUBGRAPH.
4836
4837 For instance,
4838
4839 if (a == 9)
4840 b = a;
4841 else
4842 b = c + 1;
4843
4844 In this case, an assertion on the THEN clause is useful to
4845 determine that 'a' is always 9 on that edge. However, an assertion
4846 on the ELSE clause would be unnecessary.
4847
4848 4- If BB does not end in a conditional expression, then we recurse
4849 into BB's dominator children.
4850
4851 At the end of the recursive traversal, every SSA name will have a
4852 list of locations where ASSERT_EXPRs should be added. When a new
4853 location for name N is found, it is registered by calling
4854 register_new_assert_for. That function keeps track of all the
4855 registered assertions to prevent adding unnecessary assertions.
4856 For instance, if a pointer P_4 is dereferenced more than once in a
4857 dominator tree, only the location dominating all the dereference of
4858 P_4 will receive an ASSERT_EXPR.
4859
4860 If this function returns true, then it means that there are names
4861 for which we need to generate ASSERT_EXPRs. Those assertions are
4862 inserted by process_assert_insertions. */
4863
4864 static bool
find_assert_locations_1(basic_block bb,sbitmap live)4865 find_assert_locations_1 (basic_block bb, sbitmap live)
4866 {
4867 gimple_stmt_iterator si;
4868 gimple last;
4869 gimple phi;
4870 bool need_assert;
4871
4872 need_assert = false;
4873 last = last_stmt (bb);
4874
4875 /* If BB's last statement is a conditional statement involving integer
4876 operands, determine if we need to add ASSERT_EXPRs. */
4877 if (last
4878 && gimple_code (last) == GIMPLE_COND
4879 && !fp_predicate (last)
4880 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
4881 need_assert |= find_conditional_asserts (bb, last);
4882
4883 /* If BB's last statement is a switch statement involving integer
4884 operands, determine if we need to add ASSERT_EXPRs. */
4885 if (last
4886 && gimple_code (last) == GIMPLE_SWITCH
4887 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
4888 need_assert |= find_switch_asserts (bb, last);
4889
4890 /* Traverse all the statements in BB marking used names and looking
4891 for statements that may infer assertions for their used operands. */
4892 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
4893 {
4894 gimple stmt;
4895 tree op;
4896 ssa_op_iter i;
4897
4898 stmt = gsi_stmt (si);
4899
4900 if (is_gimple_debug (stmt))
4901 continue;
4902
4903 /* See if we can derive an assertion for any of STMT's operands. */
4904 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
4905 {
4906 tree value;
4907 enum tree_code comp_code;
4908
4909 /* Mark OP in our live bitmap. */
4910 SET_BIT (live, SSA_NAME_VERSION (op));
4911
4912 /* If OP is used in such a way that we can infer a value
4913 range for it, and we don't find a previous assertion for
4914 it, create a new assertion location node for OP. */
4915 if (infer_value_range (stmt, op, &comp_code, &value))
4916 {
4917 /* If we are able to infer a nonzero value range for OP,
4918 then walk backwards through the use-def chain to see if OP
4919 was set via a typecast.
4920
4921 If so, then we can also infer a nonzero value range
4922 for the operand of the NOP_EXPR. */
4923 if (comp_code == NE_EXPR && integer_zerop (value))
4924 {
4925 tree t = op;
4926 gimple def_stmt = SSA_NAME_DEF_STMT (t);
4927
4928 while (is_gimple_assign (def_stmt)
4929 && gimple_assign_rhs_code (def_stmt) == NOP_EXPR
4930 && TREE_CODE
4931 (gimple_assign_rhs1 (def_stmt)) == SSA_NAME
4932 && POINTER_TYPE_P
4933 (TREE_TYPE (gimple_assign_rhs1 (def_stmt))))
4934 {
4935 t = gimple_assign_rhs1 (def_stmt);
4936 def_stmt = SSA_NAME_DEF_STMT (t);
4937
4938 /* Note we want to register the assert for the
4939 operand of the NOP_EXPR after SI, not after the
4940 conversion. */
4941 if (! has_single_use (t))
4942 {
4943 register_new_assert_for (t, t, comp_code, value,
4944 bb, NULL, si);
4945 need_assert = true;
4946 }
4947 }
4948 }
4949
4950 /* If OP is used only once, namely in this STMT, don't
4951 bother creating an ASSERT_EXPR for it. Such an
4952 ASSERT_EXPR would do nothing but increase compile time. */
4953 if (!has_single_use (op))
4954 {
4955 register_new_assert_for (op, op, comp_code, value,
4956 bb, NULL, si);
4957 need_assert = true;
4958 }
4959 }
4960 }
4961 }
4962
4963 /* Traverse all PHI nodes in BB marking used operands. */
4964 for (si = gsi_start_phis (bb); !gsi_end_p(si); gsi_next (&si))
4965 {
4966 use_operand_p arg_p;
4967 ssa_op_iter i;
4968 phi = gsi_stmt (si);
4969
4970 FOR_EACH_PHI_ARG (arg_p, phi, i, SSA_OP_USE)
4971 {
4972 tree arg = USE_FROM_PTR (arg_p);
4973 if (TREE_CODE (arg) == SSA_NAME)
4974 SET_BIT (live, SSA_NAME_VERSION (arg));
4975 }
4976 }
4977
4978 return need_assert;
4979 }
4980
4981 /* Do an RPO walk over the function computing SSA name liveness
4982 on-the-fly and deciding on assert expressions to insert.
4983 Returns true if there are assert expressions to be inserted. */
4984
4985 static bool
find_assert_locations(void)4986 find_assert_locations (void)
4987 {
4988 int *rpo = XCNEWVEC (int, last_basic_block + NUM_FIXED_BLOCKS);
4989 int *bb_rpo = XCNEWVEC (int, last_basic_block + NUM_FIXED_BLOCKS);
4990 int *last_rpo = XCNEWVEC (int, last_basic_block + NUM_FIXED_BLOCKS);
4991 int rpo_cnt, i;
4992 bool need_asserts;
4993
4994 live = XCNEWVEC (sbitmap, last_basic_block + NUM_FIXED_BLOCKS);
4995 rpo_cnt = pre_and_rev_post_order_compute (NULL, rpo, false);
4996 for (i = 0; i < rpo_cnt; ++i)
4997 bb_rpo[rpo[i]] = i;
4998
4999 need_asserts = false;
5000 for (i = rpo_cnt-1; i >= 0; --i)
5001 {
5002 basic_block bb = BASIC_BLOCK (rpo[i]);
5003 edge e;
5004 edge_iterator ei;
5005
5006 if (!live[rpo[i]])
5007 {
5008 live[rpo[i]] = sbitmap_alloc (num_ssa_names);
5009 sbitmap_zero (live[rpo[i]]);
5010 }
5011
5012 /* Process BB and update the live information with uses in
5013 this block. */
5014 need_asserts |= find_assert_locations_1 (bb, live[rpo[i]]);
5015
5016 /* Merge liveness into the predecessor blocks and free it. */
5017 if (!sbitmap_empty_p (live[rpo[i]]))
5018 {
5019 int pred_rpo = i;
5020 FOR_EACH_EDGE (e, ei, bb->preds)
5021 {
5022 int pred = e->src->index;
5023 if (e->flags & EDGE_DFS_BACK)
5024 continue;
5025
5026 if (!live[pred])
5027 {
5028 live[pred] = sbitmap_alloc (num_ssa_names);
5029 sbitmap_zero (live[pred]);
5030 }
5031 sbitmap_a_or_b (live[pred], live[pred], live[rpo[i]]);
5032
5033 if (bb_rpo[pred] < pred_rpo)
5034 pred_rpo = bb_rpo[pred];
5035 }
5036
5037 /* Record the RPO number of the last visited block that needs
5038 live information from this block. */
5039 last_rpo[rpo[i]] = pred_rpo;
5040 }
5041 else
5042 {
5043 sbitmap_free (live[rpo[i]]);
5044 live[rpo[i]] = NULL;
5045 }
5046
5047 /* We can free all successors live bitmaps if all their
5048 predecessors have been visited already. */
5049 FOR_EACH_EDGE (e, ei, bb->succs)
5050 if (last_rpo[e->dest->index] == i
5051 && live[e->dest->index])
5052 {
5053 sbitmap_free (live[e->dest->index]);
5054 live[e->dest->index] = NULL;
5055 }
5056 }
5057
5058 XDELETEVEC (rpo);
5059 XDELETEVEC (bb_rpo);
5060 XDELETEVEC (last_rpo);
5061 for (i = 0; i < last_basic_block + NUM_FIXED_BLOCKS; ++i)
5062 if (live[i])
5063 sbitmap_free (live[i]);
5064 XDELETEVEC (live);
5065
5066 return need_asserts;
5067 }
5068
5069 /* Create an ASSERT_EXPR for NAME and insert it in the location
5070 indicated by LOC. Return true if we made any edge insertions. */
5071
5072 static bool
process_assert_insertions_for(tree name,assert_locus_t loc)5073 process_assert_insertions_for (tree name, assert_locus_t loc)
5074 {
5075 /* Build the comparison expression NAME_i COMP_CODE VAL. */
5076 gimple stmt;
5077 tree cond;
5078 gimple assert_stmt;
5079 edge_iterator ei;
5080 edge e;
5081
5082 /* If we have X <=> X do not insert an assert expr for that. */
5083 if (loc->expr == loc->val)
5084 return false;
5085
5086 cond = build2 (loc->comp_code, boolean_type_node, loc->expr, loc->val);
5087 assert_stmt = build_assert_expr_for (cond, name);
5088 if (loc->e)
5089 {
5090 /* We have been asked to insert the assertion on an edge. This
5091 is used only by COND_EXPR and SWITCH_EXPR assertions. */
5092 gcc_checking_assert (gimple_code (gsi_stmt (loc->si)) == GIMPLE_COND
5093 || (gimple_code (gsi_stmt (loc->si))
5094 == GIMPLE_SWITCH));
5095
5096 gsi_insert_on_edge (loc->e, assert_stmt);
5097 return true;
5098 }
5099
5100 /* Otherwise, we can insert right after LOC->SI iff the
5101 statement must not be the last statement in the block. */
5102 stmt = gsi_stmt (loc->si);
5103 if (!stmt_ends_bb_p (stmt))
5104 {
5105 gsi_insert_after (&loc->si, assert_stmt, GSI_SAME_STMT);
5106 return false;
5107 }
5108
5109 /* If STMT must be the last statement in BB, we can only insert new
5110 assertions on the non-abnormal edge out of BB. Note that since
5111 STMT is not control flow, there may only be one non-abnormal edge
5112 out of BB. */
5113 FOR_EACH_EDGE (e, ei, loc->bb->succs)
5114 if (!(e->flags & EDGE_ABNORMAL))
5115 {
5116 gsi_insert_on_edge (e, assert_stmt);
5117 return true;
5118 }
5119
5120 gcc_unreachable ();
5121 }
5122
5123
5124 /* Process all the insertions registered for every name N_i registered
5125 in NEED_ASSERT_FOR. The list of assertions to be inserted are
5126 found in ASSERTS_FOR[i]. */
5127
5128 static void
process_assert_insertions(void)5129 process_assert_insertions (void)
5130 {
5131 unsigned i;
5132 bitmap_iterator bi;
5133 bool update_edges_p = false;
5134 int num_asserts = 0;
5135
5136 if (dump_file && (dump_flags & TDF_DETAILS))
5137 dump_all_asserts (dump_file);
5138
5139 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
5140 {
5141 assert_locus_t loc = asserts_for[i];
5142 gcc_assert (loc);
5143
5144 while (loc)
5145 {
5146 assert_locus_t next = loc->next;
5147 update_edges_p |= process_assert_insertions_for (ssa_name (i), loc);
5148 free (loc);
5149 loc = next;
5150 num_asserts++;
5151 }
5152 }
5153
5154 if (update_edges_p)
5155 gsi_commit_edge_inserts ();
5156
5157 statistics_counter_event (cfun, "Number of ASSERT_EXPR expressions inserted",
5158 num_asserts);
5159 }
5160
5161
5162 /* Traverse the flowgraph looking for conditional jumps to insert range
5163 expressions. These range expressions are meant to provide information
5164 to optimizations that need to reason in terms of value ranges. They
5165 will not be expanded into RTL. For instance, given:
5166
5167 x = ...
5168 y = ...
5169 if (x < y)
5170 y = x - 2;
5171 else
5172 x = y + 3;
5173
5174 this pass will transform the code into:
5175
5176 x = ...
5177 y = ...
5178 if (x < y)
5179 {
5180 x = ASSERT_EXPR <x, x < y>
5181 y = x - 2
5182 }
5183 else
5184 {
5185 y = ASSERT_EXPR <y, x <= y>
5186 x = y + 3
5187 }
5188
5189 The idea is that once copy and constant propagation have run, other
5190 optimizations will be able to determine what ranges of values can 'x'
5191 take in different paths of the code, simply by checking the reaching
5192 definition of 'x'. */
5193
5194 static void
insert_range_assertions(void)5195 insert_range_assertions (void)
5196 {
5197 need_assert_for = BITMAP_ALLOC (NULL);
5198 asserts_for = XCNEWVEC (assert_locus_t, num_ssa_names);
5199
5200 calculate_dominance_info (CDI_DOMINATORS);
5201
5202 if (find_assert_locations ())
5203 {
5204 process_assert_insertions ();
5205 update_ssa (TODO_update_ssa_no_phi);
5206 }
5207
5208 if (dump_file && (dump_flags & TDF_DETAILS))
5209 {
5210 fprintf (dump_file, "\nSSA form after inserting ASSERT_EXPRs\n");
5211 dump_function_to_file (current_function_decl, dump_file, dump_flags);
5212 }
5213
5214 free (asserts_for);
5215 BITMAP_FREE (need_assert_for);
5216 }
5217
5218 /* Checks one ARRAY_REF in REF, located at LOCUS. Ignores flexible arrays
5219 and "struct" hacks. If VRP can determine that the
5220 array subscript is a constant, check if it is outside valid
5221 range. If the array subscript is a RANGE, warn if it is
5222 non-overlapping with valid range.
5223 IGNORE_OFF_BY_ONE is true if the ARRAY_REF is inside a ADDR_EXPR. */
5224
5225 static void
check_array_ref(location_t location,tree ref,bool ignore_off_by_one)5226 check_array_ref (location_t location, tree ref, bool ignore_off_by_one)
5227 {
5228 value_range_t* vr = NULL;
5229 tree low_sub, up_sub;
5230 tree low_bound, up_bound, up_bound_p1;
5231 tree base;
5232
5233 if (TREE_NO_WARNING (ref))
5234 return;
5235
5236 low_sub = up_sub = TREE_OPERAND (ref, 1);
5237 up_bound = array_ref_up_bound (ref);
5238
5239 /* Can not check flexible arrays. */
5240 if (!up_bound
5241 || TREE_CODE (up_bound) != INTEGER_CST)
5242 return;
5243
5244 /* Accesses to trailing arrays via pointers may access storage
5245 beyond the types array bounds. */
5246 base = get_base_address (ref);
5247 if (base && TREE_CODE (base) == MEM_REF)
5248 {
5249 tree cref, next = NULL_TREE;
5250
5251 if (TREE_CODE (TREE_OPERAND (ref, 0)) != COMPONENT_REF)
5252 return;
5253
5254 cref = TREE_OPERAND (ref, 0);
5255 if (TREE_CODE (TREE_TYPE (TREE_OPERAND (cref, 0))) == RECORD_TYPE)
5256 for (next = DECL_CHAIN (TREE_OPERAND (cref, 1));
5257 next && TREE_CODE (next) != FIELD_DECL;
5258 next = DECL_CHAIN (next))
5259 ;
5260
5261 /* If this is the last field in a struct type or a field in a
5262 union type do not warn. */
5263 if (!next)
5264 return;
5265 }
5266
5267 low_bound = array_ref_low_bound (ref);
5268 up_bound_p1 = int_const_binop (PLUS_EXPR, up_bound, integer_one_node);
5269
5270 if (TREE_CODE (low_sub) == SSA_NAME)
5271 {
5272 vr = get_value_range (low_sub);
5273 if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE)
5274 {
5275 low_sub = vr->type == VR_RANGE ? vr->max : vr->min;
5276 up_sub = vr->type == VR_RANGE ? vr->min : vr->max;
5277 }
5278 }
5279
5280 if (vr && vr->type == VR_ANTI_RANGE)
5281 {
5282 if (TREE_CODE (up_sub) == INTEGER_CST
5283 && tree_int_cst_lt (up_bound, up_sub)
5284 && TREE_CODE (low_sub) == INTEGER_CST
5285 && tree_int_cst_lt (low_sub, low_bound))
5286 {
5287 warning_at (location, OPT_Warray_bounds,
5288 "array subscript is outside array bounds");
5289 TREE_NO_WARNING (ref) = 1;
5290 }
5291 }
5292 else if (TREE_CODE (up_sub) == INTEGER_CST
5293 && (ignore_off_by_one
5294 ? (tree_int_cst_lt (up_bound, up_sub)
5295 && !tree_int_cst_equal (up_bound_p1, up_sub))
5296 : (tree_int_cst_lt (up_bound, up_sub)
5297 || tree_int_cst_equal (up_bound_p1, up_sub))))
5298 {
5299 warning_at (location, OPT_Warray_bounds,
5300 "array subscript is above array bounds");
5301 TREE_NO_WARNING (ref) = 1;
5302 }
5303 else if (TREE_CODE (low_sub) == INTEGER_CST
5304 && tree_int_cst_lt (low_sub, low_bound))
5305 {
5306 warning_at (location, OPT_Warray_bounds,
5307 "array subscript is below array bounds");
5308 TREE_NO_WARNING (ref) = 1;
5309 }
5310 }
5311
5312 /* Searches if the expr T, located at LOCATION computes
5313 address of an ARRAY_REF, and call check_array_ref on it. */
5314
5315 static void
search_for_addr_array(tree t,location_t location)5316 search_for_addr_array (tree t, location_t location)
5317 {
5318 while (TREE_CODE (t) == SSA_NAME)
5319 {
5320 gimple g = SSA_NAME_DEF_STMT (t);
5321
5322 if (gimple_code (g) != GIMPLE_ASSIGN)
5323 return;
5324
5325 if (get_gimple_rhs_class (gimple_assign_rhs_code (g))
5326 != GIMPLE_SINGLE_RHS)
5327 return;
5328
5329 t = gimple_assign_rhs1 (g);
5330 }
5331
5332
5333 /* We are only interested in addresses of ARRAY_REF's. */
5334 if (TREE_CODE (t) != ADDR_EXPR)
5335 return;
5336
5337 /* Check each ARRAY_REFs in the reference chain. */
5338 do
5339 {
5340 if (TREE_CODE (t) == ARRAY_REF)
5341 check_array_ref (location, t, true /*ignore_off_by_one*/);
5342
5343 t = TREE_OPERAND (t, 0);
5344 }
5345 while (handled_component_p (t));
5346
5347 if (TREE_CODE (t) == MEM_REF
5348 && TREE_CODE (TREE_OPERAND (t, 0)) == ADDR_EXPR
5349 && !TREE_NO_WARNING (t))
5350 {
5351 tree tem = TREE_OPERAND (TREE_OPERAND (t, 0), 0);
5352 tree low_bound, up_bound, el_sz;
5353 double_int idx;
5354 if (TREE_CODE (TREE_TYPE (tem)) != ARRAY_TYPE
5355 || TREE_CODE (TREE_TYPE (TREE_TYPE (tem))) == ARRAY_TYPE
5356 || !TYPE_DOMAIN (TREE_TYPE (tem)))
5357 return;
5358
5359 low_bound = TYPE_MIN_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
5360 up_bound = TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
5361 el_sz = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (tem)));
5362 if (!low_bound
5363 || TREE_CODE (low_bound) != INTEGER_CST
5364 || !up_bound
5365 || TREE_CODE (up_bound) != INTEGER_CST
5366 || !el_sz
5367 || TREE_CODE (el_sz) != INTEGER_CST)
5368 return;
5369
5370 idx = mem_ref_offset (t);
5371 idx = double_int_sdiv (idx, tree_to_double_int (el_sz), TRUNC_DIV_EXPR);
5372 if (double_int_scmp (idx, double_int_zero) < 0)
5373 {
5374 warning_at (location, OPT_Warray_bounds,
5375 "array subscript is below array bounds");
5376 TREE_NO_WARNING (t) = 1;
5377 }
5378 else if (double_int_scmp (idx,
5379 double_int_add
5380 (double_int_add
5381 (tree_to_double_int (up_bound),
5382 double_int_neg
5383 (tree_to_double_int (low_bound))),
5384 double_int_one)) > 0)
5385 {
5386 warning_at (location, OPT_Warray_bounds,
5387 "array subscript is above array bounds");
5388 TREE_NO_WARNING (t) = 1;
5389 }
5390 }
5391 }
5392
5393 /* walk_tree() callback that checks if *TP is
5394 an ARRAY_REF inside an ADDR_EXPR (in which an array
5395 subscript one outside the valid range is allowed). Call
5396 check_array_ref for each ARRAY_REF found. The location is
5397 passed in DATA. */
5398
5399 static tree
check_array_bounds(tree * tp,int * walk_subtree,void * data)5400 check_array_bounds (tree *tp, int *walk_subtree, void *data)
5401 {
5402 tree t = *tp;
5403 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
5404 location_t location;
5405
5406 if (EXPR_HAS_LOCATION (t))
5407 location = EXPR_LOCATION (t);
5408 else
5409 {
5410 location_t *locp = (location_t *) wi->info;
5411 location = *locp;
5412 }
5413
5414 *walk_subtree = TRUE;
5415
5416 if (TREE_CODE (t) == ARRAY_REF)
5417 check_array_ref (location, t, false /*ignore_off_by_one*/);
5418
5419 if (TREE_CODE (t) == MEM_REF
5420 || (TREE_CODE (t) == RETURN_EXPR && TREE_OPERAND (t, 0)))
5421 search_for_addr_array (TREE_OPERAND (t, 0), location);
5422
5423 if (TREE_CODE (t) == ADDR_EXPR)
5424 *walk_subtree = FALSE;
5425
5426 return NULL_TREE;
5427 }
5428
5429 /* Walk over all statements of all reachable BBs and call check_array_bounds
5430 on them. */
5431
5432 static void
check_all_array_refs(void)5433 check_all_array_refs (void)
5434 {
5435 basic_block bb;
5436 gimple_stmt_iterator si;
5437
5438 FOR_EACH_BB (bb)
5439 {
5440 edge_iterator ei;
5441 edge e;
5442 bool executable = false;
5443
5444 /* Skip blocks that were found to be unreachable. */
5445 FOR_EACH_EDGE (e, ei, bb->preds)
5446 executable |= !!(e->flags & EDGE_EXECUTABLE);
5447 if (!executable)
5448 continue;
5449
5450 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
5451 {
5452 gimple stmt = gsi_stmt (si);
5453 struct walk_stmt_info wi;
5454 if (!gimple_has_location (stmt))
5455 continue;
5456
5457 if (is_gimple_call (stmt))
5458 {
5459 size_t i;
5460 size_t n = gimple_call_num_args (stmt);
5461 for (i = 0; i < n; i++)
5462 {
5463 tree arg = gimple_call_arg (stmt, i);
5464 search_for_addr_array (arg, gimple_location (stmt));
5465 }
5466 }
5467 else
5468 {
5469 memset (&wi, 0, sizeof (wi));
5470 wi.info = CONST_CAST (void *, (const void *)
5471 gimple_location_ptr (stmt));
5472
5473 walk_gimple_op (gsi_stmt (si),
5474 check_array_bounds,
5475 &wi);
5476 }
5477 }
5478 }
5479 }
5480
5481 /* Convert range assertion expressions into the implied copies and
5482 copy propagate away the copies. Doing the trivial copy propagation
5483 here avoids the need to run the full copy propagation pass after
5484 VRP.
5485
5486 FIXME, this will eventually lead to copy propagation removing the
5487 names that had useful range information attached to them. For
5488 instance, if we had the assertion N_i = ASSERT_EXPR <N_j, N_j > 3>,
5489 then N_i will have the range [3, +INF].
5490
5491 However, by converting the assertion into the implied copy
5492 operation N_i = N_j, we will then copy-propagate N_j into the uses
5493 of N_i and lose the range information. We may want to hold on to
5494 ASSERT_EXPRs a little while longer as the ranges could be used in
5495 things like jump threading.
5496
5497 The problem with keeping ASSERT_EXPRs around is that passes after
5498 VRP need to handle them appropriately.
5499
5500 Another approach would be to make the range information a first
5501 class property of the SSA_NAME so that it can be queried from
5502 any pass. This is made somewhat more complex by the need for
5503 multiple ranges to be associated with one SSA_NAME. */
5504
5505 static void
remove_range_assertions(void)5506 remove_range_assertions (void)
5507 {
5508 basic_block bb;
5509 gimple_stmt_iterator si;
5510
5511 /* Note that the BSI iterator bump happens at the bottom of the
5512 loop and no bump is necessary if we're removing the statement
5513 referenced by the current BSI. */
5514 FOR_EACH_BB (bb)
5515 for (si = gsi_start_bb (bb); !gsi_end_p (si);)
5516 {
5517 gimple stmt = gsi_stmt (si);
5518 gimple use_stmt;
5519
5520 if (is_gimple_assign (stmt)
5521 && gimple_assign_rhs_code (stmt) == ASSERT_EXPR)
5522 {
5523 tree rhs = gimple_assign_rhs1 (stmt);
5524 tree var;
5525 tree cond = fold (ASSERT_EXPR_COND (rhs));
5526 use_operand_p use_p;
5527 imm_use_iterator iter;
5528
5529 gcc_assert (cond != boolean_false_node);
5530
5531 /* Propagate the RHS into every use of the LHS. */
5532 var = ASSERT_EXPR_VAR (rhs);
5533 FOR_EACH_IMM_USE_STMT (use_stmt, iter,
5534 gimple_assign_lhs (stmt))
5535 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
5536 {
5537 SET_USE (use_p, var);
5538 gcc_assert (TREE_CODE (var) == SSA_NAME);
5539 }
5540
5541 /* And finally, remove the copy, it is not needed. */
5542 gsi_remove (&si, true);
5543 release_defs (stmt);
5544 }
5545 else
5546 gsi_next (&si);
5547 }
5548 }
5549
5550
5551 /* Return true if STMT is interesting for VRP. */
5552
5553 static bool
stmt_interesting_for_vrp(gimple stmt)5554 stmt_interesting_for_vrp (gimple stmt)
5555 {
5556 if (gimple_code (stmt) == GIMPLE_PHI
5557 && is_gimple_reg (gimple_phi_result (stmt))
5558 && (INTEGRAL_TYPE_P (TREE_TYPE (gimple_phi_result (stmt)))
5559 || POINTER_TYPE_P (TREE_TYPE (gimple_phi_result (stmt)))))
5560 return true;
5561 else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
5562 {
5563 tree lhs = gimple_get_lhs (stmt);
5564
5565 /* In general, assignments with virtual operands are not useful
5566 for deriving ranges, with the obvious exception of calls to
5567 builtin functions. */
5568 if (lhs && TREE_CODE (lhs) == SSA_NAME
5569 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
5570 || POINTER_TYPE_P (TREE_TYPE (lhs)))
5571 && ((is_gimple_call (stmt)
5572 && gimple_call_fndecl (stmt) != NULL_TREE
5573 && DECL_BUILT_IN (gimple_call_fndecl (stmt)))
5574 || !gimple_vuse (stmt)))
5575 return true;
5576 }
5577 else if (gimple_code (stmt) == GIMPLE_COND
5578 || gimple_code (stmt) == GIMPLE_SWITCH)
5579 return true;
5580
5581 return false;
5582 }
5583
5584
5585 /* Initialize local data structures for VRP. */
5586
5587 static void
vrp_initialize(void)5588 vrp_initialize (void)
5589 {
5590 basic_block bb;
5591
5592 values_propagated = false;
5593 num_vr_values = num_ssa_names;
5594 vr_value = XCNEWVEC (value_range_t *, num_vr_values);
5595 vr_phi_edge_counts = XCNEWVEC (int, num_ssa_names);
5596
5597 FOR_EACH_BB (bb)
5598 {
5599 gimple_stmt_iterator si;
5600
5601 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
5602 {
5603 gimple phi = gsi_stmt (si);
5604 if (!stmt_interesting_for_vrp (phi))
5605 {
5606 tree lhs = PHI_RESULT (phi);
5607 set_value_range_to_varying (get_value_range (lhs));
5608 prop_set_simulate_again (phi, false);
5609 }
5610 else
5611 prop_set_simulate_again (phi, true);
5612 }
5613
5614 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
5615 {
5616 gimple stmt = gsi_stmt (si);
5617
5618 /* If the statement is a control insn, then we do not
5619 want to avoid simulating the statement once. Failure
5620 to do so means that those edges will never get added. */
5621 if (stmt_ends_bb_p (stmt))
5622 prop_set_simulate_again (stmt, true);
5623 else if (!stmt_interesting_for_vrp (stmt))
5624 {
5625 ssa_op_iter i;
5626 tree def;
5627 FOR_EACH_SSA_TREE_OPERAND (def, stmt, i, SSA_OP_DEF)
5628 set_value_range_to_varying (get_value_range (def));
5629 prop_set_simulate_again (stmt, false);
5630 }
5631 else
5632 prop_set_simulate_again (stmt, true);
5633 }
5634 }
5635 }
5636
5637 /* Return the singleton value-range for NAME or NAME. */
5638
5639 static inline tree
vrp_valueize(tree name)5640 vrp_valueize (tree name)
5641 {
5642 if (TREE_CODE (name) == SSA_NAME)
5643 {
5644 value_range_t *vr = get_value_range (name);
5645 if (vr->type == VR_RANGE
5646 && (vr->min == vr->max
5647 || operand_equal_p (vr->min, vr->max, 0)))
5648 return vr->min;
5649 }
5650 return name;
5651 }
5652
5653 /* Visit assignment STMT. If it produces an interesting range, record
5654 the SSA name in *OUTPUT_P. */
5655
5656 static enum ssa_prop_result
vrp_visit_assignment_or_call(gimple stmt,tree * output_p)5657 vrp_visit_assignment_or_call (gimple stmt, tree *output_p)
5658 {
5659 tree def, lhs;
5660 ssa_op_iter iter;
5661 enum gimple_code code = gimple_code (stmt);
5662 lhs = gimple_get_lhs (stmt);
5663
5664 /* We only keep track of ranges in integral and pointer types. */
5665 if (TREE_CODE (lhs) == SSA_NAME
5666 && ((INTEGRAL_TYPE_P (TREE_TYPE (lhs))
5667 /* It is valid to have NULL MIN/MAX values on a type. See
5668 build_range_type. */
5669 && TYPE_MIN_VALUE (TREE_TYPE (lhs))
5670 && TYPE_MAX_VALUE (TREE_TYPE (lhs)))
5671 || POINTER_TYPE_P (TREE_TYPE (lhs))))
5672 {
5673 value_range_t new_vr = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
5674
5675 /* Try folding the statement to a constant first. */
5676 tree tem = gimple_fold_stmt_to_constant (stmt, vrp_valueize);
5677 if (tem && !is_overflow_infinity (tem))
5678 set_value_range (&new_vr, VR_RANGE, tem, tem, NULL);
5679 /* Then dispatch to value-range extracting functions. */
5680 else if (code == GIMPLE_CALL)
5681 extract_range_basic (&new_vr, stmt);
5682 else
5683 extract_range_from_assignment (&new_vr, stmt);
5684
5685 if (update_value_range (lhs, &new_vr))
5686 {
5687 *output_p = lhs;
5688
5689 if (dump_file && (dump_flags & TDF_DETAILS))
5690 {
5691 fprintf (dump_file, "Found new range for ");
5692 print_generic_expr (dump_file, lhs, 0);
5693 fprintf (dump_file, ": ");
5694 dump_value_range (dump_file, &new_vr);
5695 fprintf (dump_file, "\n\n");
5696 }
5697
5698 if (new_vr.type == VR_VARYING)
5699 return SSA_PROP_VARYING;
5700
5701 return SSA_PROP_INTERESTING;
5702 }
5703
5704 return SSA_PROP_NOT_INTERESTING;
5705 }
5706
5707 /* Every other statement produces no useful ranges. */
5708 FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_DEF)
5709 set_value_range_to_varying (get_value_range (def));
5710
5711 return SSA_PROP_VARYING;
5712 }
5713
5714 /* Helper that gets the value range of the SSA_NAME with version I
5715 or a symbolic range containing the SSA_NAME only if the value range
5716 is varying or undefined. */
5717
5718 static inline value_range_t
get_vr_for_comparison(int i)5719 get_vr_for_comparison (int i)
5720 {
5721 value_range_t vr = *get_value_range (ssa_name (i));
5722
5723 /* If name N_i does not have a valid range, use N_i as its own
5724 range. This allows us to compare against names that may
5725 have N_i in their ranges. */
5726 if (vr.type == VR_VARYING || vr.type == VR_UNDEFINED)
5727 {
5728 vr.type = VR_RANGE;
5729 vr.min = ssa_name (i);
5730 vr.max = ssa_name (i);
5731 }
5732
5733 return vr;
5734 }
5735
5736 /* Compare all the value ranges for names equivalent to VAR with VAL
5737 using comparison code COMP. Return the same value returned by
5738 compare_range_with_value, including the setting of
5739 *STRICT_OVERFLOW_P. */
5740
5741 static tree
compare_name_with_value(enum tree_code comp,tree var,tree val,bool * strict_overflow_p)5742 compare_name_with_value (enum tree_code comp, tree var, tree val,
5743 bool *strict_overflow_p)
5744 {
5745 bitmap_iterator bi;
5746 unsigned i;
5747 bitmap e;
5748 tree retval, t;
5749 int used_strict_overflow;
5750 bool sop;
5751 value_range_t equiv_vr;
5752
5753 /* Get the set of equivalences for VAR. */
5754 e = get_value_range (var)->equiv;
5755
5756 /* Start at -1. Set it to 0 if we do a comparison without relying
5757 on overflow, or 1 if all comparisons rely on overflow. */
5758 used_strict_overflow = -1;
5759
5760 /* Compare vars' value range with val. */
5761 equiv_vr = get_vr_for_comparison (SSA_NAME_VERSION (var));
5762 sop = false;
5763 retval = compare_range_with_value (comp, &equiv_vr, val, &sop);
5764 if (retval)
5765 used_strict_overflow = sop ? 1 : 0;
5766
5767 /* If the equiv set is empty we have done all work we need to do. */
5768 if (e == NULL)
5769 {
5770 if (retval
5771 && used_strict_overflow > 0)
5772 *strict_overflow_p = true;
5773 return retval;
5774 }
5775
5776 EXECUTE_IF_SET_IN_BITMAP (e, 0, i, bi)
5777 {
5778 equiv_vr = get_vr_for_comparison (i);
5779 sop = false;
5780 t = compare_range_with_value (comp, &equiv_vr, val, &sop);
5781 if (t)
5782 {
5783 /* If we get different answers from different members
5784 of the equivalence set this check must be in a dead
5785 code region. Folding it to a trap representation
5786 would be correct here. For now just return don't-know. */
5787 if (retval != NULL
5788 && t != retval)
5789 {
5790 retval = NULL_TREE;
5791 break;
5792 }
5793 retval = t;
5794
5795 if (!sop)
5796 used_strict_overflow = 0;
5797 else if (used_strict_overflow < 0)
5798 used_strict_overflow = 1;
5799 }
5800 }
5801
5802 if (retval
5803 && used_strict_overflow > 0)
5804 *strict_overflow_p = true;
5805
5806 return retval;
5807 }
5808
5809
5810 /* Given a comparison code COMP and names N1 and N2, compare all the
5811 ranges equivalent to N1 against all the ranges equivalent to N2
5812 to determine the value of N1 COMP N2. Return the same value
5813 returned by compare_ranges. Set *STRICT_OVERFLOW_P to indicate
5814 whether we relied on an overflow infinity in the comparison. */
5815
5816
5817 static tree
compare_names(enum tree_code comp,tree n1,tree n2,bool * strict_overflow_p)5818 compare_names (enum tree_code comp, tree n1, tree n2,
5819 bool *strict_overflow_p)
5820 {
5821 tree t, retval;
5822 bitmap e1, e2;
5823 bitmap_iterator bi1, bi2;
5824 unsigned i1, i2;
5825 int used_strict_overflow;
5826 static bitmap_obstack *s_obstack = NULL;
5827 static bitmap s_e1 = NULL, s_e2 = NULL;
5828
5829 /* Compare the ranges of every name equivalent to N1 against the
5830 ranges of every name equivalent to N2. */
5831 e1 = get_value_range (n1)->equiv;
5832 e2 = get_value_range (n2)->equiv;
5833
5834 /* Use the fake bitmaps if e1 or e2 are not available. */
5835 if (s_obstack == NULL)
5836 {
5837 s_obstack = XNEW (bitmap_obstack);
5838 bitmap_obstack_initialize (s_obstack);
5839 s_e1 = BITMAP_ALLOC (s_obstack);
5840 s_e2 = BITMAP_ALLOC (s_obstack);
5841 }
5842 if (e1 == NULL)
5843 e1 = s_e1;
5844 if (e2 == NULL)
5845 e2 = s_e2;
5846
5847 /* Add N1 and N2 to their own set of equivalences to avoid
5848 duplicating the body of the loop just to check N1 and N2
5849 ranges. */
5850 bitmap_set_bit (e1, SSA_NAME_VERSION (n1));
5851 bitmap_set_bit (e2, SSA_NAME_VERSION (n2));
5852
5853 /* If the equivalence sets have a common intersection, then the two
5854 names can be compared without checking their ranges. */
5855 if (bitmap_intersect_p (e1, e2))
5856 {
5857 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
5858 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
5859
5860 return (comp == EQ_EXPR || comp == GE_EXPR || comp == LE_EXPR)
5861 ? boolean_true_node
5862 : boolean_false_node;
5863 }
5864
5865 /* Start at -1. Set it to 0 if we do a comparison without relying
5866 on overflow, or 1 if all comparisons rely on overflow. */
5867 used_strict_overflow = -1;
5868
5869 /* Otherwise, compare all the equivalent ranges. First, add N1 and
5870 N2 to their own set of equivalences to avoid duplicating the body
5871 of the loop just to check N1 and N2 ranges. */
5872 EXECUTE_IF_SET_IN_BITMAP (e1, 0, i1, bi1)
5873 {
5874 value_range_t vr1 = get_vr_for_comparison (i1);
5875
5876 t = retval = NULL_TREE;
5877 EXECUTE_IF_SET_IN_BITMAP (e2, 0, i2, bi2)
5878 {
5879 bool sop = false;
5880
5881 value_range_t vr2 = get_vr_for_comparison (i2);
5882
5883 t = compare_ranges (comp, &vr1, &vr2, &sop);
5884 if (t)
5885 {
5886 /* If we get different answers from different members
5887 of the equivalence set this check must be in a dead
5888 code region. Folding it to a trap representation
5889 would be correct here. For now just return don't-know. */
5890 if (retval != NULL
5891 && t != retval)
5892 {
5893 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
5894 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
5895 return NULL_TREE;
5896 }
5897 retval = t;
5898
5899 if (!sop)
5900 used_strict_overflow = 0;
5901 else if (used_strict_overflow < 0)
5902 used_strict_overflow = 1;
5903 }
5904 }
5905
5906 if (retval)
5907 {
5908 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
5909 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
5910 if (used_strict_overflow > 0)
5911 *strict_overflow_p = true;
5912 return retval;
5913 }
5914 }
5915
5916 /* None of the equivalent ranges are useful in computing this
5917 comparison. */
5918 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
5919 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
5920 return NULL_TREE;
5921 }
5922
5923 /* Helper function for vrp_evaluate_conditional_warnv. */
5924
5925 static tree
vrp_evaluate_conditional_warnv_with_ops_using_ranges(enum tree_code code,tree op0,tree op1,bool * strict_overflow_p)5926 vrp_evaluate_conditional_warnv_with_ops_using_ranges (enum tree_code code,
5927 tree op0, tree op1,
5928 bool * strict_overflow_p)
5929 {
5930 value_range_t *vr0, *vr1;
5931
5932 vr0 = (TREE_CODE (op0) == SSA_NAME) ? get_value_range (op0) : NULL;
5933 vr1 = (TREE_CODE (op1) == SSA_NAME) ? get_value_range (op1) : NULL;
5934
5935 if (vr0 && vr1)
5936 return compare_ranges (code, vr0, vr1, strict_overflow_p);
5937 else if (vr0 && vr1 == NULL)
5938 return compare_range_with_value (code, vr0, op1, strict_overflow_p);
5939 else if (vr0 == NULL && vr1)
5940 return (compare_range_with_value
5941 (swap_tree_comparison (code), vr1, op0, strict_overflow_p));
5942 return NULL;
5943 }
5944
5945 /* Helper function for vrp_evaluate_conditional_warnv. */
5946
5947 static tree
vrp_evaluate_conditional_warnv_with_ops(enum tree_code code,tree op0,tree op1,bool use_equiv_p,bool * strict_overflow_p,bool * only_ranges)5948 vrp_evaluate_conditional_warnv_with_ops (enum tree_code code, tree op0,
5949 tree op1, bool use_equiv_p,
5950 bool *strict_overflow_p, bool *only_ranges)
5951 {
5952 tree ret;
5953 if (only_ranges)
5954 *only_ranges = true;
5955
5956 /* We only deal with integral and pointer types. */
5957 if (!INTEGRAL_TYPE_P (TREE_TYPE (op0))
5958 && !POINTER_TYPE_P (TREE_TYPE (op0)))
5959 return NULL_TREE;
5960
5961 if (use_equiv_p)
5962 {
5963 if (only_ranges
5964 && (ret = vrp_evaluate_conditional_warnv_with_ops_using_ranges
5965 (code, op0, op1, strict_overflow_p)))
5966 return ret;
5967 *only_ranges = false;
5968 if (TREE_CODE (op0) == SSA_NAME && TREE_CODE (op1) == SSA_NAME)
5969 return compare_names (code, op0, op1, strict_overflow_p);
5970 else if (TREE_CODE (op0) == SSA_NAME)
5971 return compare_name_with_value (code, op0, op1, strict_overflow_p);
5972 else if (TREE_CODE (op1) == SSA_NAME)
5973 return (compare_name_with_value
5974 (swap_tree_comparison (code), op1, op0, strict_overflow_p));
5975 }
5976 else
5977 return vrp_evaluate_conditional_warnv_with_ops_using_ranges (code, op0, op1,
5978 strict_overflow_p);
5979 return NULL_TREE;
5980 }
5981
5982 /* Given (CODE OP0 OP1) within STMT, try to simplify it based on value range
5983 information. Return NULL if the conditional can not be evaluated.
5984 The ranges of all the names equivalent with the operands in COND
5985 will be used when trying to compute the value. If the result is
5986 based on undefined signed overflow, issue a warning if
5987 appropriate. */
5988
5989 static tree
vrp_evaluate_conditional(enum tree_code code,tree op0,tree op1,gimple stmt)5990 vrp_evaluate_conditional (enum tree_code code, tree op0, tree op1, gimple stmt)
5991 {
5992 bool sop;
5993 tree ret;
5994 bool only_ranges;
5995
5996 /* Some passes and foldings leak constants with overflow flag set
5997 into the IL. Avoid doing wrong things with these and bail out. */
5998 if ((TREE_CODE (op0) == INTEGER_CST
5999 && TREE_OVERFLOW (op0))
6000 || (TREE_CODE (op1) == INTEGER_CST
6001 && TREE_OVERFLOW (op1)))
6002 return NULL_TREE;
6003
6004 sop = false;
6005 ret = vrp_evaluate_conditional_warnv_with_ops (code, op0, op1, true, &sop,
6006 &only_ranges);
6007
6008 if (ret && sop)
6009 {
6010 enum warn_strict_overflow_code wc;
6011 const char* warnmsg;
6012
6013 if (is_gimple_min_invariant (ret))
6014 {
6015 wc = WARN_STRICT_OVERFLOW_CONDITIONAL;
6016 warnmsg = G_("assuming signed overflow does not occur when "
6017 "simplifying conditional to constant");
6018 }
6019 else
6020 {
6021 wc = WARN_STRICT_OVERFLOW_COMPARISON;
6022 warnmsg = G_("assuming signed overflow does not occur when "
6023 "simplifying conditional");
6024 }
6025
6026 if (issue_strict_overflow_warning (wc))
6027 {
6028 location_t location;
6029
6030 if (!gimple_has_location (stmt))
6031 location = input_location;
6032 else
6033 location = gimple_location (stmt);
6034 warning_at (location, OPT_Wstrict_overflow, "%s", warnmsg);
6035 }
6036 }
6037
6038 if (warn_type_limits
6039 && ret && only_ranges
6040 && TREE_CODE_CLASS (code) == tcc_comparison
6041 && TREE_CODE (op0) == SSA_NAME)
6042 {
6043 /* If the comparison is being folded and the operand on the LHS
6044 is being compared against a constant value that is outside of
6045 the natural range of OP0's type, then the predicate will
6046 always fold regardless of the value of OP0. If -Wtype-limits
6047 was specified, emit a warning. */
6048 tree type = TREE_TYPE (op0);
6049 value_range_t *vr0 = get_value_range (op0);
6050
6051 if (vr0->type != VR_VARYING
6052 && INTEGRAL_TYPE_P (type)
6053 && vrp_val_is_min (vr0->min)
6054 && vrp_val_is_max (vr0->max)
6055 && is_gimple_min_invariant (op1))
6056 {
6057 location_t location;
6058
6059 if (!gimple_has_location (stmt))
6060 location = input_location;
6061 else
6062 location = gimple_location (stmt);
6063
6064 warning_at (location, OPT_Wtype_limits,
6065 integer_zerop (ret)
6066 ? G_("comparison always false "
6067 "due to limited range of data type")
6068 : G_("comparison always true "
6069 "due to limited range of data type"));
6070 }
6071 }
6072
6073 return ret;
6074 }
6075
6076
6077 /* Visit conditional statement STMT. If we can determine which edge
6078 will be taken out of STMT's basic block, record it in
6079 *TAKEN_EDGE_P and return SSA_PROP_INTERESTING. Otherwise, return
6080 SSA_PROP_VARYING. */
6081
6082 static enum ssa_prop_result
vrp_visit_cond_stmt(gimple stmt,edge * taken_edge_p)6083 vrp_visit_cond_stmt (gimple stmt, edge *taken_edge_p)
6084 {
6085 tree val;
6086 bool sop;
6087
6088 *taken_edge_p = NULL;
6089
6090 if (dump_file && (dump_flags & TDF_DETAILS))
6091 {
6092 tree use;
6093 ssa_op_iter i;
6094
6095 fprintf (dump_file, "\nVisiting conditional with predicate: ");
6096 print_gimple_stmt (dump_file, stmt, 0, 0);
6097 fprintf (dump_file, "\nWith known ranges\n");
6098
6099 FOR_EACH_SSA_TREE_OPERAND (use, stmt, i, SSA_OP_USE)
6100 {
6101 fprintf (dump_file, "\t");
6102 print_generic_expr (dump_file, use, 0);
6103 fprintf (dump_file, ": ");
6104 dump_value_range (dump_file, vr_value[SSA_NAME_VERSION (use)]);
6105 }
6106
6107 fprintf (dump_file, "\n");
6108 }
6109
6110 /* Compute the value of the predicate COND by checking the known
6111 ranges of each of its operands.
6112
6113 Note that we cannot evaluate all the equivalent ranges here
6114 because those ranges may not yet be final and with the current
6115 propagation strategy, we cannot determine when the value ranges
6116 of the names in the equivalence set have changed.
6117
6118 For instance, given the following code fragment
6119
6120 i_5 = PHI <8, i_13>
6121 ...
6122 i_14 = ASSERT_EXPR <i_5, i_5 != 0>
6123 if (i_14 == 1)
6124 ...
6125
6126 Assume that on the first visit to i_14, i_5 has the temporary
6127 range [8, 8] because the second argument to the PHI function is
6128 not yet executable. We derive the range ~[0, 0] for i_14 and the
6129 equivalence set { i_5 }. So, when we visit 'if (i_14 == 1)' for
6130 the first time, since i_14 is equivalent to the range [8, 8], we
6131 determine that the predicate is always false.
6132
6133 On the next round of propagation, i_13 is determined to be
6134 VARYING, which causes i_5 to drop down to VARYING. So, another
6135 visit to i_14 is scheduled. In this second visit, we compute the
6136 exact same range and equivalence set for i_14, namely ~[0, 0] and
6137 { i_5 }. But we did not have the previous range for i_5
6138 registered, so vrp_visit_assignment thinks that the range for
6139 i_14 has not changed. Therefore, the predicate 'if (i_14 == 1)'
6140 is not visited again, which stops propagation from visiting
6141 statements in the THEN clause of that if().
6142
6143 To properly fix this we would need to keep the previous range
6144 value for the names in the equivalence set. This way we would've
6145 discovered that from one visit to the other i_5 changed from
6146 range [8, 8] to VR_VARYING.
6147
6148 However, fixing this apparent limitation may not be worth the
6149 additional checking. Testing on several code bases (GCC, DLV,
6150 MICO, TRAMP3D and SPEC2000) showed that doing this results in
6151 4 more predicates folded in SPEC. */
6152 sop = false;
6153
6154 val = vrp_evaluate_conditional_warnv_with_ops (gimple_cond_code (stmt),
6155 gimple_cond_lhs (stmt),
6156 gimple_cond_rhs (stmt),
6157 false, &sop, NULL);
6158 if (val)
6159 {
6160 if (!sop)
6161 *taken_edge_p = find_taken_edge (gimple_bb (stmt), val);
6162 else
6163 {
6164 if (dump_file && (dump_flags & TDF_DETAILS))
6165 fprintf (dump_file,
6166 "\nIgnoring predicate evaluation because "
6167 "it assumes that signed overflow is undefined");
6168 val = NULL_TREE;
6169 }
6170 }
6171
6172 if (dump_file && (dump_flags & TDF_DETAILS))
6173 {
6174 fprintf (dump_file, "\nPredicate evaluates to: ");
6175 if (val == NULL_TREE)
6176 fprintf (dump_file, "DON'T KNOW\n");
6177 else
6178 print_generic_stmt (dump_file, val, 0);
6179 }
6180
6181 return (*taken_edge_p) ? SSA_PROP_INTERESTING : SSA_PROP_VARYING;
6182 }
6183
6184 /* Searches the case label vector VEC for the index *IDX of the CASE_LABEL
6185 that includes the value VAL. The search is restricted to the range
6186 [START_IDX, n - 1] where n is the size of VEC.
6187
6188 If there is a CASE_LABEL for VAL, its index is placed in IDX and true is
6189 returned.
6190
6191 If there is no CASE_LABEL for VAL and there is one that is larger than VAL,
6192 it is placed in IDX and false is returned.
6193
6194 If VAL is larger than any CASE_LABEL, n is placed on IDX and false is
6195 returned. */
6196
6197 static bool
find_case_label_index(gimple stmt,size_t start_idx,tree val,size_t * idx)6198 find_case_label_index (gimple stmt, size_t start_idx, tree val, size_t *idx)
6199 {
6200 size_t n = gimple_switch_num_labels (stmt);
6201 size_t low, high;
6202
6203 /* Find case label for minimum of the value range or the next one.
6204 At each iteration we are searching in [low, high - 1]. */
6205
6206 for (low = start_idx, high = n; high != low; )
6207 {
6208 tree t;
6209 int cmp;
6210 /* Note that i != high, so we never ask for n. */
6211 size_t i = (high + low) / 2;
6212 t = gimple_switch_label (stmt, i);
6213
6214 /* Cache the result of comparing CASE_LOW and val. */
6215 cmp = tree_int_cst_compare (CASE_LOW (t), val);
6216
6217 if (cmp == 0)
6218 {
6219 /* Ranges cannot be empty. */
6220 *idx = i;
6221 return true;
6222 }
6223 else if (cmp > 0)
6224 high = i;
6225 else
6226 {
6227 low = i + 1;
6228 if (CASE_HIGH (t) != NULL
6229 && tree_int_cst_compare (CASE_HIGH (t), val) >= 0)
6230 {
6231 *idx = i;
6232 return true;
6233 }
6234 }
6235 }
6236
6237 *idx = high;
6238 return false;
6239 }
6240
6241 /* Searches the case label vector VEC for the range of CASE_LABELs that is used
6242 for values between MIN and MAX. The first index is placed in MIN_IDX. The
6243 last index is placed in MAX_IDX. If the range of CASE_LABELs is empty
6244 then MAX_IDX < MIN_IDX.
6245 Returns true if the default label is not needed. */
6246
6247 static bool
find_case_label_range(gimple stmt,tree min,tree max,size_t * min_idx,size_t * max_idx)6248 find_case_label_range (gimple stmt, tree min, tree max, size_t *min_idx,
6249 size_t *max_idx)
6250 {
6251 size_t i, j;
6252 bool min_take_default = !find_case_label_index (stmt, 1, min, &i);
6253 bool max_take_default = !find_case_label_index (stmt, i, max, &j);
6254
6255 if (i == j
6256 && min_take_default
6257 && max_take_default)
6258 {
6259 /* Only the default case label reached.
6260 Return an empty range. */
6261 *min_idx = 1;
6262 *max_idx = 0;
6263 return false;
6264 }
6265 else
6266 {
6267 bool take_default = min_take_default || max_take_default;
6268 tree low, high;
6269 size_t k;
6270
6271 if (max_take_default)
6272 j--;
6273
6274 /* If the case label range is continuous, we do not need
6275 the default case label. Verify that. */
6276 high = CASE_LOW (gimple_switch_label (stmt, i));
6277 if (CASE_HIGH (gimple_switch_label (stmt, i)))
6278 high = CASE_HIGH (gimple_switch_label (stmt, i));
6279 for (k = i + 1; k <= j; ++k)
6280 {
6281 low = CASE_LOW (gimple_switch_label (stmt, k));
6282 if (!integer_onep (int_const_binop (MINUS_EXPR, low, high)))
6283 {
6284 take_default = true;
6285 break;
6286 }
6287 high = low;
6288 if (CASE_HIGH (gimple_switch_label (stmt, k)))
6289 high = CASE_HIGH (gimple_switch_label (stmt, k));
6290 }
6291
6292 *min_idx = i;
6293 *max_idx = j;
6294 return !take_default;
6295 }
6296 }
6297
6298 /* Visit switch statement STMT. If we can determine which edge
6299 will be taken out of STMT's basic block, record it in
6300 *TAKEN_EDGE_P and return SSA_PROP_INTERESTING. Otherwise, return
6301 SSA_PROP_VARYING. */
6302
6303 static enum ssa_prop_result
vrp_visit_switch_stmt(gimple stmt,edge * taken_edge_p)6304 vrp_visit_switch_stmt (gimple stmt, edge *taken_edge_p)
6305 {
6306 tree op, val;
6307 value_range_t *vr;
6308 size_t i = 0, j = 0;
6309 bool take_default;
6310
6311 *taken_edge_p = NULL;
6312 op = gimple_switch_index (stmt);
6313 if (TREE_CODE (op) != SSA_NAME)
6314 return SSA_PROP_VARYING;
6315
6316 vr = get_value_range (op);
6317 if (dump_file && (dump_flags & TDF_DETAILS))
6318 {
6319 fprintf (dump_file, "\nVisiting switch expression with operand ");
6320 print_generic_expr (dump_file, op, 0);
6321 fprintf (dump_file, " with known range ");
6322 dump_value_range (dump_file, vr);
6323 fprintf (dump_file, "\n");
6324 }
6325
6326 if (vr->type != VR_RANGE
6327 || symbolic_range_p (vr))
6328 return SSA_PROP_VARYING;
6329
6330 /* Find the single edge that is taken from the switch expression. */
6331 take_default = !find_case_label_range (stmt, vr->min, vr->max, &i, &j);
6332
6333 /* Check if the range spans no CASE_LABEL. If so, we only reach the default
6334 label */
6335 if (j < i)
6336 {
6337 gcc_assert (take_default);
6338 val = gimple_switch_default_label (stmt);
6339 }
6340 else
6341 {
6342 /* Check if labels with index i to j and maybe the default label
6343 are all reaching the same label. */
6344
6345 val = gimple_switch_label (stmt, i);
6346 if (take_default
6347 && CASE_LABEL (gimple_switch_default_label (stmt))
6348 != CASE_LABEL (val))
6349 {
6350 if (dump_file && (dump_flags & TDF_DETAILS))
6351 fprintf (dump_file, " not a single destination for this "
6352 "range\n");
6353 return SSA_PROP_VARYING;
6354 }
6355 for (++i; i <= j; ++i)
6356 {
6357 if (CASE_LABEL (gimple_switch_label (stmt, i)) != CASE_LABEL (val))
6358 {
6359 if (dump_file && (dump_flags & TDF_DETAILS))
6360 fprintf (dump_file, " not a single destination for this "
6361 "range\n");
6362 return SSA_PROP_VARYING;
6363 }
6364 }
6365 }
6366
6367 *taken_edge_p = find_edge (gimple_bb (stmt),
6368 label_to_block (CASE_LABEL (val)));
6369
6370 if (dump_file && (dump_flags & TDF_DETAILS))
6371 {
6372 fprintf (dump_file, " will take edge to ");
6373 print_generic_stmt (dump_file, CASE_LABEL (val), 0);
6374 }
6375
6376 return SSA_PROP_INTERESTING;
6377 }
6378
6379
6380 /* Evaluate statement STMT. If the statement produces a useful range,
6381 return SSA_PROP_INTERESTING and record the SSA name with the
6382 interesting range into *OUTPUT_P.
6383
6384 If STMT is a conditional branch and we can determine its truth
6385 value, the taken edge is recorded in *TAKEN_EDGE_P.
6386
6387 If STMT produces a varying value, return SSA_PROP_VARYING. */
6388
6389 static enum ssa_prop_result
vrp_visit_stmt(gimple stmt,edge * taken_edge_p,tree * output_p)6390 vrp_visit_stmt (gimple stmt, edge *taken_edge_p, tree *output_p)
6391 {
6392 tree def;
6393 ssa_op_iter iter;
6394
6395 if (dump_file && (dump_flags & TDF_DETAILS))
6396 {
6397 fprintf (dump_file, "\nVisiting statement:\n");
6398 print_gimple_stmt (dump_file, stmt, 0, dump_flags);
6399 fprintf (dump_file, "\n");
6400 }
6401
6402 if (!stmt_interesting_for_vrp (stmt))
6403 gcc_assert (stmt_ends_bb_p (stmt));
6404 else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
6405 {
6406 /* In general, assignments with virtual operands are not useful
6407 for deriving ranges, with the obvious exception of calls to
6408 builtin functions. */
6409 if ((is_gimple_call (stmt)
6410 && gimple_call_fndecl (stmt) != NULL_TREE
6411 && DECL_BUILT_IN (gimple_call_fndecl (stmt)))
6412 || !gimple_vuse (stmt))
6413 return vrp_visit_assignment_or_call (stmt, output_p);
6414 }
6415 else if (gimple_code (stmt) == GIMPLE_COND)
6416 return vrp_visit_cond_stmt (stmt, taken_edge_p);
6417 else if (gimple_code (stmt) == GIMPLE_SWITCH)
6418 return vrp_visit_switch_stmt (stmt, taken_edge_p);
6419
6420 /* All other statements produce nothing of interest for VRP, so mark
6421 their outputs varying and prevent further simulation. */
6422 FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_DEF)
6423 set_value_range_to_varying (get_value_range (def));
6424
6425 return SSA_PROP_VARYING;
6426 }
6427
6428
6429 /* Meet operation for value ranges. Given two value ranges VR0 and
6430 VR1, store in VR0 a range that contains both VR0 and VR1. This
6431 may not be the smallest possible such range. */
6432
6433 static void
vrp_meet(value_range_t * vr0,value_range_t * vr1)6434 vrp_meet (value_range_t *vr0, value_range_t *vr1)
6435 {
6436 if (vr0->type == VR_UNDEFINED)
6437 {
6438 /* Drop equivalences. See PR53465. */
6439 set_value_range (vr0, vr1->type, vr1->min, vr1->max, NULL);
6440 return;
6441 }
6442
6443 if (vr1->type == VR_UNDEFINED)
6444 {
6445 /* VR0 already has the resulting range, just drop equivalences.
6446 See PR53465. */
6447 if (vr0->equiv)
6448 bitmap_clear (vr0->equiv);
6449 return;
6450 }
6451
6452 if (vr0->type == VR_VARYING)
6453 {
6454 /* Nothing to do. VR0 already has the resulting range. */
6455 return;
6456 }
6457
6458 if (vr1->type == VR_VARYING)
6459 {
6460 set_value_range_to_varying (vr0);
6461 return;
6462 }
6463
6464 if (vr0->type == VR_RANGE && vr1->type == VR_RANGE)
6465 {
6466 int cmp;
6467 tree min, max;
6468
6469 /* Compute the convex hull of the ranges. The lower limit of
6470 the new range is the minimum of the two ranges. If they
6471 cannot be compared, then give up. */
6472 cmp = compare_values (vr0->min, vr1->min);
6473 if (cmp == 0 || cmp == 1)
6474 min = vr1->min;
6475 else if (cmp == -1)
6476 min = vr0->min;
6477 else
6478 goto give_up;
6479
6480 /* Similarly, the upper limit of the new range is the maximum
6481 of the two ranges. If they cannot be compared, then
6482 give up. */
6483 cmp = compare_values (vr0->max, vr1->max);
6484 if (cmp == 0 || cmp == -1)
6485 max = vr1->max;
6486 else if (cmp == 1)
6487 max = vr0->max;
6488 else
6489 goto give_up;
6490
6491 /* Check for useless ranges. */
6492 if (INTEGRAL_TYPE_P (TREE_TYPE (min))
6493 && ((vrp_val_is_min (min) || is_overflow_infinity (min))
6494 && (vrp_val_is_max (max) || is_overflow_infinity (max))))
6495 goto give_up;
6496
6497 /* The resulting set of equivalences is the intersection of
6498 the two sets. */
6499 if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
6500 bitmap_and_into (vr0->equiv, vr1->equiv);
6501 else if (vr0->equiv && !vr1->equiv)
6502 bitmap_clear (vr0->equiv);
6503
6504 set_value_range (vr0, vr0->type, min, max, vr0->equiv);
6505 }
6506 else if (vr0->type == VR_ANTI_RANGE && vr1->type == VR_ANTI_RANGE)
6507 {
6508 /* Two anti-ranges meet only if their complements intersect.
6509 Only handle the case of identical ranges. */
6510 if (compare_values (vr0->min, vr1->min) == 0
6511 && compare_values (vr0->max, vr1->max) == 0
6512 && compare_values (vr0->min, vr0->max) == 0)
6513 {
6514 /* The resulting set of equivalences is the intersection of
6515 the two sets. */
6516 if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
6517 bitmap_and_into (vr0->equiv, vr1->equiv);
6518 else if (vr0->equiv && !vr1->equiv)
6519 bitmap_clear (vr0->equiv);
6520 }
6521 else
6522 goto give_up;
6523 }
6524 else if (vr0->type == VR_ANTI_RANGE || vr1->type == VR_ANTI_RANGE)
6525 {
6526 /* For a numeric range [VAL1, VAL2] and an anti-range ~[VAL3, VAL4],
6527 only handle the case where the ranges have an empty intersection.
6528 The result of the meet operation is the anti-range. */
6529 if (!symbolic_range_p (vr0)
6530 && !symbolic_range_p (vr1)
6531 && !value_ranges_intersect_p (vr0, vr1))
6532 {
6533 /* Copy most of VR1 into VR0. Don't copy VR1's equivalence
6534 set. We need to compute the intersection of the two
6535 equivalence sets. */
6536 if (vr1->type == VR_ANTI_RANGE)
6537 set_value_range (vr0, vr1->type, vr1->min, vr1->max, vr0->equiv);
6538
6539 /* The resulting set of equivalences is the intersection of
6540 the two sets. */
6541 if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
6542 bitmap_and_into (vr0->equiv, vr1->equiv);
6543 else if (vr0->equiv && !vr1->equiv)
6544 bitmap_clear (vr0->equiv);
6545 }
6546 else
6547 goto give_up;
6548 }
6549 else
6550 gcc_unreachable ();
6551
6552 return;
6553
6554 give_up:
6555 /* Failed to find an efficient meet. Before giving up and setting
6556 the result to VARYING, see if we can at least derive a useful
6557 anti-range. FIXME, all this nonsense about distinguishing
6558 anti-ranges from ranges is necessary because of the odd
6559 semantics of range_includes_zero_p and friends. */
6560 if (!symbolic_range_p (vr0)
6561 && ((vr0->type == VR_RANGE
6562 && range_includes_zero_p (vr0->min, vr0->max) == 0)
6563 || (vr0->type == VR_ANTI_RANGE
6564 && range_includes_zero_p (vr0->min, vr0->max) == 1))
6565 && !symbolic_range_p (vr1)
6566 && ((vr1->type == VR_RANGE
6567 && range_includes_zero_p (vr1->min, vr1->max) == 0)
6568 || (vr1->type == VR_ANTI_RANGE
6569 && range_includes_zero_p (vr1->min, vr1->max) == 1)))
6570 {
6571 set_value_range_to_nonnull (vr0, TREE_TYPE (vr0->min));
6572
6573 /* Since this meet operation did not result from the meeting of
6574 two equivalent names, VR0 cannot have any equivalences. */
6575 if (vr0->equiv)
6576 bitmap_clear (vr0->equiv);
6577 }
6578 else
6579 set_value_range_to_varying (vr0);
6580 }
6581
6582
6583 /* Visit all arguments for PHI node PHI that flow through executable
6584 edges. If a valid value range can be derived from all the incoming
6585 value ranges, set a new range for the LHS of PHI. */
6586
6587 static enum ssa_prop_result
vrp_visit_phi_node(gimple phi)6588 vrp_visit_phi_node (gimple phi)
6589 {
6590 size_t i;
6591 tree lhs = PHI_RESULT (phi);
6592 value_range_t *lhs_vr = get_value_range (lhs);
6593 value_range_t vr_result = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
6594 bool first = true;
6595 int edges, old_edges;
6596 struct loop *l;
6597
6598 if (dump_file && (dump_flags & TDF_DETAILS))
6599 {
6600 fprintf (dump_file, "\nVisiting PHI node: ");
6601 print_gimple_stmt (dump_file, phi, 0, dump_flags);
6602 }
6603
6604 edges = 0;
6605 for (i = 0; i < gimple_phi_num_args (phi); i++)
6606 {
6607 edge e = gimple_phi_arg_edge (phi, i);
6608
6609 if (dump_file && (dump_flags & TDF_DETAILS))
6610 {
6611 fprintf (dump_file,
6612 "\n Argument #%d (%d -> %d %sexecutable)\n",
6613 (int) i, e->src->index, e->dest->index,
6614 (e->flags & EDGE_EXECUTABLE) ? "" : "not ");
6615 }
6616
6617 if (e->flags & EDGE_EXECUTABLE)
6618 {
6619 tree arg = PHI_ARG_DEF (phi, i);
6620 value_range_t vr_arg;
6621
6622 ++edges;
6623
6624 if (TREE_CODE (arg) == SSA_NAME)
6625 {
6626 vr_arg = *(get_value_range (arg));
6627 /* Do not allow equivalences or symbolic ranges to leak in from
6628 backedges. That creates invalid equivalencies. */
6629 if (e->flags & EDGE_DFS_BACK
6630 && (vr_arg.type == VR_RANGE
6631 || vr_arg.type == VR_ANTI_RANGE))
6632 {
6633 vr_arg.equiv = NULL;
6634 if (symbolic_range_p (&vr_arg))
6635 {
6636 vr_arg.type = VR_VARYING;
6637 vr_arg.min = NULL_TREE;
6638 vr_arg.max = NULL_TREE;
6639 }
6640 }
6641 }
6642 else
6643 {
6644 if (is_overflow_infinity (arg))
6645 {
6646 arg = copy_node (arg);
6647 TREE_OVERFLOW (arg) = 0;
6648 }
6649
6650 vr_arg.type = VR_RANGE;
6651 vr_arg.min = arg;
6652 vr_arg.max = arg;
6653 vr_arg.equiv = NULL;
6654 }
6655
6656 if (dump_file && (dump_flags & TDF_DETAILS))
6657 {
6658 fprintf (dump_file, "\t");
6659 print_generic_expr (dump_file, arg, dump_flags);
6660 fprintf (dump_file, "\n\tValue: ");
6661 dump_value_range (dump_file, &vr_arg);
6662 fprintf (dump_file, "\n");
6663 }
6664
6665 if (first)
6666 copy_value_range (&vr_result, &vr_arg);
6667 else
6668 vrp_meet (&vr_result, &vr_arg);
6669 first = false;
6670
6671 if (vr_result.type == VR_VARYING)
6672 break;
6673 }
6674 }
6675
6676 if (vr_result.type == VR_VARYING)
6677 goto varying;
6678 else if (vr_result.type == VR_UNDEFINED)
6679 goto update_range;
6680
6681 old_edges = vr_phi_edge_counts[SSA_NAME_VERSION (lhs)];
6682 vr_phi_edge_counts[SSA_NAME_VERSION (lhs)] = edges;
6683
6684 /* To prevent infinite iterations in the algorithm, derive ranges
6685 when the new value is slightly bigger or smaller than the
6686 previous one. We don't do this if we have seen a new executable
6687 edge; this helps us avoid an overflow infinity for conditionals
6688 which are not in a loop. */
6689 if (edges > 0
6690 && gimple_phi_num_args (phi) > 1
6691 && edges == old_edges)
6692 {
6693 int cmp_min = compare_values (lhs_vr->min, vr_result.min);
6694 int cmp_max = compare_values (lhs_vr->max, vr_result.max);
6695
6696 /* For non VR_RANGE or for pointers fall back to varying if
6697 the range changed. */
6698 if ((lhs_vr->type != VR_RANGE || vr_result.type != VR_RANGE
6699 || POINTER_TYPE_P (TREE_TYPE (lhs)))
6700 && (cmp_min != 0 || cmp_max != 0))
6701 goto varying;
6702
6703 /* If the new minimum is smaller or larger than the previous
6704 one, go all the way to -INF. In the first case, to avoid
6705 iterating millions of times to reach -INF, and in the
6706 other case to avoid infinite bouncing between different
6707 minimums. */
6708 if (cmp_min > 0 || cmp_min < 0)
6709 {
6710 if (!needs_overflow_infinity (TREE_TYPE (vr_result.min))
6711 || !vrp_var_may_overflow (lhs, phi))
6712 vr_result.min = TYPE_MIN_VALUE (TREE_TYPE (vr_result.min));
6713 else if (supports_overflow_infinity (TREE_TYPE (vr_result.min)))
6714 vr_result.min =
6715 negative_overflow_infinity (TREE_TYPE (vr_result.min));
6716 }
6717
6718 /* Similarly, if the new maximum is smaller or larger than
6719 the previous one, go all the way to +INF. */
6720 if (cmp_max < 0 || cmp_max > 0)
6721 {
6722 if (!needs_overflow_infinity (TREE_TYPE (vr_result.max))
6723 || !vrp_var_may_overflow (lhs, phi))
6724 vr_result.max = TYPE_MAX_VALUE (TREE_TYPE (vr_result.max));
6725 else if (supports_overflow_infinity (TREE_TYPE (vr_result.max)))
6726 vr_result.max =
6727 positive_overflow_infinity (TREE_TYPE (vr_result.max));
6728 }
6729
6730 /* If we dropped either bound to +-INF then if this is a loop
6731 PHI node SCEV may known more about its value-range. */
6732 if ((cmp_min > 0 || cmp_min < 0
6733 || cmp_max < 0 || cmp_max > 0)
6734 && current_loops
6735 && (l = loop_containing_stmt (phi))
6736 && l->header == gimple_bb (phi))
6737 adjust_range_with_scev (&vr_result, l, phi, lhs);
6738
6739 /* If we will end up with a (-INF, +INF) range, set it to
6740 VARYING. Same if the previous max value was invalid for
6741 the type and we end up with vr_result.min > vr_result.max. */
6742 if ((vrp_val_is_max (vr_result.max)
6743 && vrp_val_is_min (vr_result.min))
6744 || compare_values (vr_result.min,
6745 vr_result.max) > 0)
6746 goto varying;
6747 }
6748
6749 /* If the new range is different than the previous value, keep
6750 iterating. */
6751 update_range:
6752 if (update_value_range (lhs, &vr_result))
6753 {
6754 if (dump_file && (dump_flags & TDF_DETAILS))
6755 {
6756 fprintf (dump_file, "Found new range for ");
6757 print_generic_expr (dump_file, lhs, 0);
6758 fprintf (dump_file, ": ");
6759 dump_value_range (dump_file, &vr_result);
6760 fprintf (dump_file, "\n\n");
6761 }
6762
6763 return SSA_PROP_INTERESTING;
6764 }
6765
6766 /* Nothing changed, don't add outgoing edges. */
6767 return SSA_PROP_NOT_INTERESTING;
6768
6769 /* No match found. Set the LHS to VARYING. */
6770 varying:
6771 set_value_range_to_varying (lhs_vr);
6772 return SSA_PROP_VARYING;
6773 }
6774
6775 /* Simplify boolean operations if the source is known
6776 to be already a boolean. */
6777 static bool
simplify_truth_ops_using_ranges(gimple_stmt_iterator * gsi,gimple stmt)6778 simplify_truth_ops_using_ranges (gimple_stmt_iterator *gsi, gimple stmt)
6779 {
6780 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
6781 tree lhs, op0, op1;
6782 bool need_conversion;
6783
6784 /* We handle only !=/== case here. */
6785 gcc_assert (rhs_code == EQ_EXPR || rhs_code == NE_EXPR);
6786
6787 op0 = gimple_assign_rhs1 (stmt);
6788 if (!op_with_boolean_value_range_p (op0))
6789 return false;
6790
6791 op1 = gimple_assign_rhs2 (stmt);
6792 if (!op_with_boolean_value_range_p (op1))
6793 return false;
6794
6795 /* Reduce number of cases to handle to NE_EXPR. As there is no
6796 BIT_XNOR_EXPR we cannot replace A == B with a single statement. */
6797 if (rhs_code == EQ_EXPR)
6798 {
6799 if (TREE_CODE (op1) == INTEGER_CST)
6800 op1 = int_const_binop (BIT_XOR_EXPR, op1, integer_one_node);
6801 else
6802 return false;
6803 }
6804
6805 lhs = gimple_assign_lhs (stmt);
6806 need_conversion
6807 = !useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (op0));
6808
6809 /* Make sure to not sign-extend a 1-bit 1 when converting the result. */
6810 if (need_conversion
6811 && !TYPE_UNSIGNED (TREE_TYPE (op0))
6812 && TYPE_PRECISION (TREE_TYPE (op0)) == 1
6813 && TYPE_PRECISION (TREE_TYPE (lhs)) > 1)
6814 return false;
6815
6816 /* For A != 0 we can substitute A itself. */
6817 if (integer_zerop (op1))
6818 gimple_assign_set_rhs_with_ops (gsi,
6819 need_conversion
6820 ? NOP_EXPR : TREE_CODE (op0),
6821 op0, NULL_TREE);
6822 /* For A != B we substitute A ^ B. Either with conversion. */
6823 else if (need_conversion)
6824 {
6825 gimple newop;
6826 tree tem = create_tmp_reg (TREE_TYPE (op0), NULL);
6827 newop = gimple_build_assign_with_ops (BIT_XOR_EXPR, tem, op0, op1);
6828 tem = make_ssa_name (tem, newop);
6829 gimple_assign_set_lhs (newop, tem);
6830 gsi_insert_before (gsi, newop, GSI_SAME_STMT);
6831 update_stmt (newop);
6832 gimple_assign_set_rhs_with_ops (gsi, NOP_EXPR, tem, NULL_TREE);
6833 }
6834 /* Or without. */
6835 else
6836 gimple_assign_set_rhs_with_ops (gsi, BIT_XOR_EXPR, op0, op1);
6837 update_stmt (gsi_stmt (*gsi));
6838
6839 return true;
6840 }
6841
6842 /* Simplify a division or modulo operator to a right shift or
6843 bitwise and if the first operand is unsigned or is greater
6844 than zero and the second operand is an exact power of two. */
6845
6846 static bool
simplify_div_or_mod_using_ranges(gimple stmt)6847 simplify_div_or_mod_using_ranges (gimple stmt)
6848 {
6849 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
6850 tree val = NULL;
6851 tree op0 = gimple_assign_rhs1 (stmt);
6852 tree op1 = gimple_assign_rhs2 (stmt);
6853 value_range_t *vr = get_value_range (gimple_assign_rhs1 (stmt));
6854
6855 if (TYPE_UNSIGNED (TREE_TYPE (op0)))
6856 {
6857 val = integer_one_node;
6858 }
6859 else
6860 {
6861 bool sop = false;
6862
6863 val = compare_range_with_value (GE_EXPR, vr, integer_zero_node, &sop);
6864
6865 if (val
6866 && sop
6867 && integer_onep (val)
6868 && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
6869 {
6870 location_t location;
6871
6872 if (!gimple_has_location (stmt))
6873 location = input_location;
6874 else
6875 location = gimple_location (stmt);
6876 warning_at (location, OPT_Wstrict_overflow,
6877 "assuming signed overflow does not occur when "
6878 "simplifying %</%> or %<%%%> to %<>>%> or %<&%>");
6879 }
6880 }
6881
6882 if (val && integer_onep (val))
6883 {
6884 tree t;
6885
6886 if (rhs_code == TRUNC_DIV_EXPR)
6887 {
6888 t = build_int_cst (integer_type_node, tree_log2 (op1));
6889 gimple_assign_set_rhs_code (stmt, RSHIFT_EXPR);
6890 gimple_assign_set_rhs1 (stmt, op0);
6891 gimple_assign_set_rhs2 (stmt, t);
6892 }
6893 else
6894 {
6895 t = build_int_cst (TREE_TYPE (op1), 1);
6896 t = int_const_binop (MINUS_EXPR, op1, t);
6897 t = fold_convert (TREE_TYPE (op0), t);
6898
6899 gimple_assign_set_rhs_code (stmt, BIT_AND_EXPR);
6900 gimple_assign_set_rhs1 (stmt, op0);
6901 gimple_assign_set_rhs2 (stmt, t);
6902 }
6903
6904 update_stmt (stmt);
6905 return true;
6906 }
6907
6908 return false;
6909 }
6910
6911 /* If the operand to an ABS_EXPR is >= 0, then eliminate the
6912 ABS_EXPR. If the operand is <= 0, then simplify the
6913 ABS_EXPR into a NEGATE_EXPR. */
6914
6915 static bool
simplify_abs_using_ranges(gimple stmt)6916 simplify_abs_using_ranges (gimple stmt)
6917 {
6918 tree val = NULL;
6919 tree op = gimple_assign_rhs1 (stmt);
6920 tree type = TREE_TYPE (op);
6921 value_range_t *vr = get_value_range (op);
6922
6923 if (TYPE_UNSIGNED (type))
6924 {
6925 val = integer_zero_node;
6926 }
6927 else if (vr)
6928 {
6929 bool sop = false;
6930
6931 val = compare_range_with_value (LE_EXPR, vr, integer_zero_node, &sop);
6932 if (!val)
6933 {
6934 sop = false;
6935 val = compare_range_with_value (GE_EXPR, vr, integer_zero_node,
6936 &sop);
6937
6938 if (val)
6939 {
6940 if (integer_zerop (val))
6941 val = integer_one_node;
6942 else if (integer_onep (val))
6943 val = integer_zero_node;
6944 }
6945 }
6946
6947 if (val
6948 && (integer_onep (val) || integer_zerop (val)))
6949 {
6950 if (sop && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
6951 {
6952 location_t location;
6953
6954 if (!gimple_has_location (stmt))
6955 location = input_location;
6956 else
6957 location = gimple_location (stmt);
6958 warning_at (location, OPT_Wstrict_overflow,
6959 "assuming signed overflow does not occur when "
6960 "simplifying %<abs (X)%> to %<X%> or %<-X%>");
6961 }
6962
6963 gimple_assign_set_rhs1 (stmt, op);
6964 if (integer_onep (val))
6965 gimple_assign_set_rhs_code (stmt, NEGATE_EXPR);
6966 else
6967 gimple_assign_set_rhs_code (stmt, SSA_NAME);
6968 update_stmt (stmt);
6969 return true;
6970 }
6971 }
6972
6973 return false;
6974 }
6975
6976 /* Optimize away redundant BIT_AND_EXPR and BIT_IOR_EXPR.
6977 If all the bits that are being cleared by & are already
6978 known to be zero from VR, or all the bits that are being
6979 set by | are already known to be one from VR, the bit
6980 operation is redundant. */
6981
6982 static bool
simplify_bit_ops_using_ranges(gimple_stmt_iterator * gsi,gimple stmt)6983 simplify_bit_ops_using_ranges (gimple_stmt_iterator *gsi, gimple stmt)
6984 {
6985 tree op0 = gimple_assign_rhs1 (stmt);
6986 tree op1 = gimple_assign_rhs2 (stmt);
6987 tree op = NULL_TREE;
6988 value_range_t vr0 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
6989 value_range_t vr1 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
6990 double_int may_be_nonzero0, may_be_nonzero1;
6991 double_int must_be_nonzero0, must_be_nonzero1;
6992 double_int mask;
6993
6994 if (TREE_CODE (op0) == SSA_NAME)
6995 vr0 = *(get_value_range (op0));
6996 else if (is_gimple_min_invariant (op0))
6997 set_value_range_to_value (&vr0, op0, NULL);
6998 else
6999 return false;
7000
7001 if (TREE_CODE (op1) == SSA_NAME)
7002 vr1 = *(get_value_range (op1));
7003 else if (is_gimple_min_invariant (op1))
7004 set_value_range_to_value (&vr1, op1, NULL);
7005 else
7006 return false;
7007
7008 if (!zero_nonzero_bits_from_vr (&vr0, &may_be_nonzero0, &must_be_nonzero0))
7009 return false;
7010 if (!zero_nonzero_bits_from_vr (&vr1, &may_be_nonzero1, &must_be_nonzero1))
7011 return false;
7012
7013 switch (gimple_assign_rhs_code (stmt))
7014 {
7015 case BIT_AND_EXPR:
7016 mask = double_int_and_not (may_be_nonzero0, must_be_nonzero1);
7017 if (double_int_zero_p (mask))
7018 {
7019 op = op0;
7020 break;
7021 }
7022 mask = double_int_and_not (may_be_nonzero1, must_be_nonzero0);
7023 if (double_int_zero_p (mask))
7024 {
7025 op = op1;
7026 break;
7027 }
7028 break;
7029 case BIT_IOR_EXPR:
7030 mask = double_int_and_not (may_be_nonzero0, must_be_nonzero1);
7031 if (double_int_zero_p (mask))
7032 {
7033 op = op1;
7034 break;
7035 }
7036 mask = double_int_and_not (may_be_nonzero1, must_be_nonzero0);
7037 if (double_int_zero_p (mask))
7038 {
7039 op = op0;
7040 break;
7041 }
7042 break;
7043 default:
7044 gcc_unreachable ();
7045 }
7046
7047 if (op == NULL_TREE)
7048 return false;
7049
7050 gimple_assign_set_rhs_with_ops (gsi, TREE_CODE (op), op, NULL);
7051 update_stmt (gsi_stmt (*gsi));
7052 return true;
7053 }
7054
7055 /* We are comparing trees OP0 and OP1 using COND_CODE. OP0 has
7056 a known value range VR.
7057
7058 If there is one and only one value which will satisfy the
7059 conditional, then return that value. Else return NULL. */
7060
7061 static tree
test_for_singularity(enum tree_code cond_code,tree op0,tree op1,value_range_t * vr)7062 test_for_singularity (enum tree_code cond_code, tree op0,
7063 tree op1, value_range_t *vr)
7064 {
7065 tree min = NULL;
7066 tree max = NULL;
7067
7068 /* Extract minimum/maximum values which satisfy the
7069 the conditional as it was written. */
7070 if (cond_code == LE_EXPR || cond_code == LT_EXPR)
7071 {
7072 /* This should not be negative infinity; there is no overflow
7073 here. */
7074 min = TYPE_MIN_VALUE (TREE_TYPE (op0));
7075
7076 max = op1;
7077 if (cond_code == LT_EXPR && !is_overflow_infinity (max))
7078 {
7079 tree one = build_int_cst (TREE_TYPE (op0), 1);
7080 max = fold_build2 (MINUS_EXPR, TREE_TYPE (op0), max, one);
7081 if (EXPR_P (max))
7082 TREE_NO_WARNING (max) = 1;
7083 }
7084 }
7085 else if (cond_code == GE_EXPR || cond_code == GT_EXPR)
7086 {
7087 /* This should not be positive infinity; there is no overflow
7088 here. */
7089 max = TYPE_MAX_VALUE (TREE_TYPE (op0));
7090
7091 min = op1;
7092 if (cond_code == GT_EXPR && !is_overflow_infinity (min))
7093 {
7094 tree one = build_int_cst (TREE_TYPE (op0), 1);
7095 min = fold_build2 (PLUS_EXPR, TREE_TYPE (op0), min, one);
7096 if (EXPR_P (min))
7097 TREE_NO_WARNING (min) = 1;
7098 }
7099 }
7100
7101 /* Now refine the minimum and maximum values using any
7102 value range information we have for op0. */
7103 if (min && max)
7104 {
7105 if (compare_values (vr->min, min) == 1)
7106 min = vr->min;
7107 if (compare_values (vr->max, max) == -1)
7108 max = vr->max;
7109
7110 /* If the new min/max values have converged to a single value,
7111 then there is only one value which can satisfy the condition,
7112 return that value. */
7113 if (operand_equal_p (min, max, 0) && is_gimple_min_invariant (min))
7114 return min;
7115 }
7116 return NULL;
7117 }
7118
7119 /* Simplify a conditional using a relational operator to an equality
7120 test if the range information indicates only one value can satisfy
7121 the original conditional. */
7122
7123 static bool
simplify_cond_using_ranges(gimple stmt)7124 simplify_cond_using_ranges (gimple stmt)
7125 {
7126 tree op0 = gimple_cond_lhs (stmt);
7127 tree op1 = gimple_cond_rhs (stmt);
7128 enum tree_code cond_code = gimple_cond_code (stmt);
7129
7130 if (cond_code != NE_EXPR
7131 && cond_code != EQ_EXPR
7132 && TREE_CODE (op0) == SSA_NAME
7133 && INTEGRAL_TYPE_P (TREE_TYPE (op0))
7134 && is_gimple_min_invariant (op1))
7135 {
7136 value_range_t *vr = get_value_range (op0);
7137
7138 /* If we have range information for OP0, then we might be
7139 able to simplify this conditional. */
7140 if (vr->type == VR_RANGE)
7141 {
7142 tree new_tree = test_for_singularity (cond_code, op0, op1, vr);
7143
7144 if (new_tree)
7145 {
7146 if (dump_file)
7147 {
7148 fprintf (dump_file, "Simplified relational ");
7149 print_gimple_stmt (dump_file, stmt, 0, 0);
7150 fprintf (dump_file, " into ");
7151 }
7152
7153 gimple_cond_set_code (stmt, EQ_EXPR);
7154 gimple_cond_set_lhs (stmt, op0);
7155 gimple_cond_set_rhs (stmt, new_tree);
7156
7157 update_stmt (stmt);
7158
7159 if (dump_file)
7160 {
7161 print_gimple_stmt (dump_file, stmt, 0, 0);
7162 fprintf (dump_file, "\n");
7163 }
7164
7165 return true;
7166 }
7167
7168 /* Try again after inverting the condition. We only deal
7169 with integral types here, so no need to worry about
7170 issues with inverting FP comparisons. */
7171 cond_code = invert_tree_comparison (cond_code, false);
7172 new_tree = test_for_singularity (cond_code, op0, op1, vr);
7173
7174 if (new_tree)
7175 {
7176 if (dump_file)
7177 {
7178 fprintf (dump_file, "Simplified relational ");
7179 print_gimple_stmt (dump_file, stmt, 0, 0);
7180 fprintf (dump_file, " into ");
7181 }
7182
7183 gimple_cond_set_code (stmt, NE_EXPR);
7184 gimple_cond_set_lhs (stmt, op0);
7185 gimple_cond_set_rhs (stmt, new_tree);
7186
7187 update_stmt (stmt);
7188
7189 if (dump_file)
7190 {
7191 print_gimple_stmt (dump_file, stmt, 0, 0);
7192 fprintf (dump_file, "\n");
7193 }
7194
7195 return true;
7196 }
7197 }
7198 }
7199
7200 return false;
7201 }
7202
7203 /* Simplify a switch statement using the value range of the switch
7204 argument. */
7205
7206 static bool
simplify_switch_using_ranges(gimple stmt)7207 simplify_switch_using_ranges (gimple stmt)
7208 {
7209 tree op = gimple_switch_index (stmt);
7210 value_range_t *vr;
7211 bool take_default;
7212 edge e;
7213 edge_iterator ei;
7214 size_t i = 0, j = 0, n, n2;
7215 tree vec2;
7216 switch_update su;
7217
7218 if (TREE_CODE (op) == SSA_NAME)
7219 {
7220 vr = get_value_range (op);
7221
7222 /* We can only handle integer ranges. */
7223 if (vr->type != VR_RANGE
7224 || symbolic_range_p (vr))
7225 return false;
7226
7227 /* Find case label for min/max of the value range. */
7228 take_default = !find_case_label_range (stmt, vr->min, vr->max, &i, &j);
7229 }
7230 else if (TREE_CODE (op) == INTEGER_CST)
7231 {
7232 take_default = !find_case_label_index (stmt, 1, op, &i);
7233 if (take_default)
7234 {
7235 i = 1;
7236 j = 0;
7237 }
7238 else
7239 {
7240 j = i;
7241 }
7242 }
7243 else
7244 return false;
7245
7246 n = gimple_switch_num_labels (stmt);
7247
7248 /* Bail out if this is just all edges taken. */
7249 if (i == 1
7250 && j == n - 1
7251 && take_default)
7252 return false;
7253
7254 /* Build a new vector of taken case labels. */
7255 vec2 = make_tree_vec (j - i + 1 + (int)take_default);
7256 n2 = 0;
7257
7258 /* Add the default edge, if necessary. */
7259 if (take_default)
7260 TREE_VEC_ELT (vec2, n2++) = gimple_switch_default_label (stmt);
7261
7262 for (; i <= j; ++i, ++n2)
7263 TREE_VEC_ELT (vec2, n2) = gimple_switch_label (stmt, i);
7264
7265 /* Mark needed edges. */
7266 for (i = 0; i < n2; ++i)
7267 {
7268 e = find_edge (gimple_bb (stmt),
7269 label_to_block (CASE_LABEL (TREE_VEC_ELT (vec2, i))));
7270 e->aux = (void *)-1;
7271 }
7272
7273 /* Queue not needed edges for later removal. */
7274 FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs)
7275 {
7276 if (e->aux == (void *)-1)
7277 {
7278 e->aux = NULL;
7279 continue;
7280 }
7281
7282 if (dump_file && (dump_flags & TDF_DETAILS))
7283 {
7284 fprintf (dump_file, "removing unreachable case label\n");
7285 }
7286 VEC_safe_push (edge, heap, to_remove_edges, e);
7287 e->flags &= ~EDGE_EXECUTABLE;
7288 }
7289
7290 /* And queue an update for the stmt. */
7291 su.stmt = stmt;
7292 su.vec = vec2;
7293 VEC_safe_push (switch_update, heap, to_update_switch_stmts, &su);
7294 return false;
7295 }
7296
7297 /* Simplify an integral conversion from an SSA name in STMT. */
7298
7299 static bool
simplify_conversion_using_ranges(gimple stmt)7300 simplify_conversion_using_ranges (gimple stmt)
7301 {
7302 tree innerop, middleop, finaltype;
7303 gimple def_stmt;
7304 value_range_t *innervr;
7305 bool inner_unsigned_p, middle_unsigned_p, final_unsigned_p;
7306 unsigned inner_prec, middle_prec, final_prec;
7307 double_int innermin, innermed, innermax, middlemin, middlemed, middlemax;
7308
7309 finaltype = TREE_TYPE (gimple_assign_lhs (stmt));
7310 if (!INTEGRAL_TYPE_P (finaltype))
7311 return false;
7312 middleop = gimple_assign_rhs1 (stmt);
7313 def_stmt = SSA_NAME_DEF_STMT (middleop);
7314 if (!is_gimple_assign (def_stmt)
7315 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt)))
7316 return false;
7317 innerop = gimple_assign_rhs1 (def_stmt);
7318 if (TREE_CODE (innerop) != SSA_NAME)
7319 return false;
7320
7321 /* Get the value-range of the inner operand. */
7322 innervr = get_value_range (innerop);
7323 if (innervr->type != VR_RANGE
7324 || TREE_CODE (innervr->min) != INTEGER_CST
7325 || TREE_CODE (innervr->max) != INTEGER_CST)
7326 return false;
7327
7328 /* Simulate the conversion chain to check if the result is equal if
7329 the middle conversion is removed. */
7330 innermin = tree_to_double_int (innervr->min);
7331 innermax = tree_to_double_int (innervr->max);
7332
7333 inner_prec = TYPE_PRECISION (TREE_TYPE (innerop));
7334 middle_prec = TYPE_PRECISION (TREE_TYPE (middleop));
7335 final_prec = TYPE_PRECISION (finaltype);
7336
7337 /* If the first conversion is not injective, the second must not
7338 be widening. */
7339 if (double_int_cmp (double_int_sub (innermax, innermin),
7340 double_int_mask (middle_prec), true) > 0
7341 && middle_prec < final_prec)
7342 return false;
7343 /* We also want a medium value so that we can track the effect that
7344 narrowing conversions with sign change have. */
7345 inner_unsigned_p = TYPE_UNSIGNED (TREE_TYPE (innerop));
7346 if (inner_unsigned_p)
7347 innermed = double_int_rshift (double_int_mask (inner_prec),
7348 1, inner_prec, false);
7349 else
7350 innermed = double_int_zero;
7351 if (double_int_cmp (innermin, innermed, inner_unsigned_p) >= 0
7352 || double_int_cmp (innermed, innermax, inner_unsigned_p) >= 0)
7353 innermed = innermin;
7354
7355 middle_unsigned_p = TYPE_UNSIGNED (TREE_TYPE (middleop));
7356 middlemin = double_int_ext (innermin, middle_prec, middle_unsigned_p);
7357 middlemed = double_int_ext (innermed, middle_prec, middle_unsigned_p);
7358 middlemax = double_int_ext (innermax, middle_prec, middle_unsigned_p);
7359
7360 /* Require that the final conversion applied to both the original
7361 and the intermediate range produces the same result. */
7362 final_unsigned_p = TYPE_UNSIGNED (finaltype);
7363 if (!double_int_equal_p (double_int_ext (middlemin,
7364 final_prec, final_unsigned_p),
7365 double_int_ext (innermin,
7366 final_prec, final_unsigned_p))
7367 || !double_int_equal_p (double_int_ext (middlemed,
7368 final_prec, final_unsigned_p),
7369 double_int_ext (innermed,
7370 final_prec, final_unsigned_p))
7371 || !double_int_equal_p (double_int_ext (middlemax,
7372 final_prec, final_unsigned_p),
7373 double_int_ext (innermax,
7374 final_prec, final_unsigned_p)))
7375 return false;
7376
7377 gimple_assign_set_rhs1 (stmt, innerop);
7378 update_stmt (stmt);
7379 return true;
7380 }
7381
7382 /* Return whether the value range *VR fits in an integer type specified
7383 by PRECISION and UNSIGNED_P. */
7384
7385 static bool
range_fits_type_p(value_range_t * vr,unsigned precision,bool unsigned_p)7386 range_fits_type_p (value_range_t *vr, unsigned precision, bool unsigned_p)
7387 {
7388 tree src_type;
7389 unsigned src_precision;
7390 double_int tem;
7391
7392 /* We can only handle integral and pointer types. */
7393 src_type = TREE_TYPE (vr->min);
7394 if (!INTEGRAL_TYPE_P (src_type)
7395 && !POINTER_TYPE_P (src_type))
7396 return false;
7397
7398 /* An extension is always fine, so is an identity transform. */
7399 src_precision = TYPE_PRECISION (TREE_TYPE (vr->min));
7400 if (src_precision < precision
7401 || (src_precision == precision
7402 && TYPE_UNSIGNED (src_type) == unsigned_p))
7403 return true;
7404
7405 /* Now we can only handle ranges with constant bounds. */
7406 if (vr->type != VR_RANGE
7407 || TREE_CODE (vr->min) != INTEGER_CST
7408 || TREE_CODE (vr->max) != INTEGER_CST)
7409 return false;
7410
7411 /* For precision-preserving sign-changes the MSB of the double-int
7412 has to be clear. */
7413 if (src_precision == precision
7414 && (TREE_INT_CST_HIGH (vr->min) | TREE_INT_CST_HIGH (vr->max)) < 0)
7415 return false;
7416
7417 /* Then we can perform the conversion on both ends and compare
7418 the result for equality. */
7419 tem = double_int_ext (tree_to_double_int (vr->min), precision, unsigned_p);
7420 if (!double_int_equal_p (tree_to_double_int (vr->min), tem))
7421 return false;
7422 tem = double_int_ext (tree_to_double_int (vr->max), precision, unsigned_p);
7423 if (!double_int_equal_p (tree_to_double_int (vr->max), tem))
7424 return false;
7425
7426 return true;
7427 }
7428
7429 /* Simplify a conversion from integral SSA name to float in STMT. */
7430
7431 static bool
simplify_float_conversion_using_ranges(gimple_stmt_iterator * gsi,gimple stmt)7432 simplify_float_conversion_using_ranges (gimple_stmt_iterator *gsi, gimple stmt)
7433 {
7434 tree rhs1 = gimple_assign_rhs1 (stmt);
7435 value_range_t *vr = get_value_range (rhs1);
7436 enum machine_mode fltmode = TYPE_MODE (TREE_TYPE (gimple_assign_lhs (stmt)));
7437 enum machine_mode mode;
7438 tree tem;
7439 gimple conv;
7440
7441 /* We can only handle constant ranges. */
7442 if (vr->type != VR_RANGE
7443 || TREE_CODE (vr->min) != INTEGER_CST
7444 || TREE_CODE (vr->max) != INTEGER_CST)
7445 return false;
7446
7447 /* First check if we can use a signed type in place of an unsigned. */
7448 if (TYPE_UNSIGNED (TREE_TYPE (rhs1))
7449 && (can_float_p (fltmode, TYPE_MODE (TREE_TYPE (rhs1)), 0)
7450 != CODE_FOR_nothing)
7451 && range_fits_type_p (vr, GET_MODE_PRECISION
7452 (TYPE_MODE (TREE_TYPE (rhs1))), 0))
7453 mode = TYPE_MODE (TREE_TYPE (rhs1));
7454 /* If we can do the conversion in the current input mode do nothing. */
7455 else if (can_float_p (fltmode, TYPE_MODE (TREE_TYPE (rhs1)),
7456 TYPE_UNSIGNED (TREE_TYPE (rhs1))))
7457 return false;
7458 /* Otherwise search for a mode we can use, starting from the narrowest
7459 integer mode available. */
7460 else
7461 {
7462 mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
7463 do
7464 {
7465 /* If we cannot do a signed conversion to float from mode
7466 or if the value-range does not fit in the signed type
7467 try with a wider mode. */
7468 if (can_float_p (fltmode, mode, 0) != CODE_FOR_nothing
7469 && range_fits_type_p (vr, GET_MODE_PRECISION (mode), 0))
7470 break;
7471
7472 mode = GET_MODE_WIDER_MODE (mode);
7473 /* But do not widen the input. Instead leave that to the
7474 optabs expansion code. */
7475 if (GET_MODE_PRECISION (mode) > TYPE_PRECISION (TREE_TYPE (rhs1)))
7476 return false;
7477 }
7478 while (mode != VOIDmode);
7479 if (mode == VOIDmode)
7480 return false;
7481 }
7482
7483 /* It works, insert a truncation or sign-change before the
7484 float conversion. */
7485 tem = create_tmp_var (build_nonstandard_integer_type
7486 (GET_MODE_PRECISION (mode), 0), NULL);
7487 conv = gimple_build_assign_with_ops (NOP_EXPR, tem, rhs1, NULL_TREE);
7488 tem = make_ssa_name (tem, conv);
7489 gimple_assign_set_lhs (conv, tem);
7490 gsi_insert_before (gsi, conv, GSI_SAME_STMT);
7491 gimple_assign_set_rhs1 (stmt, tem);
7492 update_stmt (stmt);
7493
7494 return true;
7495 }
7496
7497 /* Simplify STMT using ranges if possible. */
7498
7499 static bool
simplify_stmt_using_ranges(gimple_stmt_iterator * gsi)7500 simplify_stmt_using_ranges (gimple_stmt_iterator *gsi)
7501 {
7502 gimple stmt = gsi_stmt (*gsi);
7503 if (is_gimple_assign (stmt))
7504 {
7505 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
7506 tree rhs1 = gimple_assign_rhs1 (stmt);
7507
7508 switch (rhs_code)
7509 {
7510 case EQ_EXPR:
7511 case NE_EXPR:
7512 /* Transform EQ_EXPR, NE_EXPR into BIT_XOR_EXPR or identity
7513 if the RHS is zero or one, and the LHS are known to be boolean
7514 values. */
7515 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
7516 return simplify_truth_ops_using_ranges (gsi, stmt);
7517 break;
7518
7519 /* Transform TRUNC_DIV_EXPR and TRUNC_MOD_EXPR into RSHIFT_EXPR
7520 and BIT_AND_EXPR respectively if the first operand is greater
7521 than zero and the second operand is an exact power of two. */
7522 case TRUNC_DIV_EXPR:
7523 case TRUNC_MOD_EXPR:
7524 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
7525 && integer_pow2p (gimple_assign_rhs2 (stmt)))
7526 return simplify_div_or_mod_using_ranges (stmt);
7527 break;
7528
7529 /* Transform ABS (X) into X or -X as appropriate. */
7530 case ABS_EXPR:
7531 if (TREE_CODE (rhs1) == SSA_NAME
7532 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
7533 return simplify_abs_using_ranges (stmt);
7534 break;
7535
7536 case BIT_AND_EXPR:
7537 case BIT_IOR_EXPR:
7538 /* Optimize away BIT_AND_EXPR and BIT_IOR_EXPR
7539 if all the bits being cleared are already cleared or
7540 all the bits being set are already set. */
7541 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
7542 return simplify_bit_ops_using_ranges (gsi, stmt);
7543 break;
7544
7545 CASE_CONVERT:
7546 if (TREE_CODE (rhs1) == SSA_NAME
7547 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
7548 return simplify_conversion_using_ranges (stmt);
7549 break;
7550
7551 case FLOAT_EXPR:
7552 if (TREE_CODE (rhs1) == SSA_NAME
7553 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
7554 return simplify_float_conversion_using_ranges (gsi, stmt);
7555 break;
7556
7557 default:
7558 break;
7559 }
7560 }
7561 else if (gimple_code (stmt) == GIMPLE_COND)
7562 return simplify_cond_using_ranges (stmt);
7563 else if (gimple_code (stmt) == GIMPLE_SWITCH)
7564 return simplify_switch_using_ranges (stmt);
7565
7566 return false;
7567 }
7568
7569 /* If the statement pointed by SI has a predicate whose value can be
7570 computed using the value range information computed by VRP, compute
7571 its value and return true. Otherwise, return false. */
7572
7573 static bool
fold_predicate_in(gimple_stmt_iterator * si)7574 fold_predicate_in (gimple_stmt_iterator *si)
7575 {
7576 bool assignment_p = false;
7577 tree val;
7578 gimple stmt = gsi_stmt (*si);
7579
7580 if (is_gimple_assign (stmt)
7581 && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison)
7582 {
7583 assignment_p = true;
7584 val = vrp_evaluate_conditional (gimple_assign_rhs_code (stmt),
7585 gimple_assign_rhs1 (stmt),
7586 gimple_assign_rhs2 (stmt),
7587 stmt);
7588 }
7589 else if (gimple_code (stmt) == GIMPLE_COND)
7590 val = vrp_evaluate_conditional (gimple_cond_code (stmt),
7591 gimple_cond_lhs (stmt),
7592 gimple_cond_rhs (stmt),
7593 stmt);
7594 else
7595 return false;
7596
7597 if (val)
7598 {
7599 if (assignment_p)
7600 val = fold_convert (gimple_expr_type (stmt), val);
7601
7602 if (dump_file)
7603 {
7604 fprintf (dump_file, "Folding predicate ");
7605 print_gimple_expr (dump_file, stmt, 0, 0);
7606 fprintf (dump_file, " to ");
7607 print_generic_expr (dump_file, val, 0);
7608 fprintf (dump_file, "\n");
7609 }
7610
7611 if (is_gimple_assign (stmt))
7612 gimple_assign_set_rhs_from_tree (si, val);
7613 else
7614 {
7615 gcc_assert (gimple_code (stmt) == GIMPLE_COND);
7616 if (integer_zerop (val))
7617 gimple_cond_make_false (stmt);
7618 else if (integer_onep (val))
7619 gimple_cond_make_true (stmt);
7620 else
7621 gcc_unreachable ();
7622 }
7623
7624 return true;
7625 }
7626
7627 return false;
7628 }
7629
7630 /* Callback for substitute_and_fold folding the stmt at *SI. */
7631
7632 static bool
vrp_fold_stmt(gimple_stmt_iterator * si)7633 vrp_fold_stmt (gimple_stmt_iterator *si)
7634 {
7635 if (fold_predicate_in (si))
7636 return true;
7637
7638 return simplify_stmt_using_ranges (si);
7639 }
7640
7641 /* Stack of dest,src equivalency pairs that need to be restored after
7642 each attempt to thread a block's incoming edge to an outgoing edge.
7643
7644 A NULL entry is used to mark the end of pairs which need to be
7645 restored. */
VEC(tree,heap)7646 static VEC(tree,heap) *stack;
7647
7648 /* A trivial wrapper so that we can present the generic jump threading
7649 code with a simple API for simplifying statements. STMT is the
7650 statement we want to simplify, WITHIN_STMT provides the location
7651 for any overflow warnings. */
7652
7653 static tree
7654 simplify_stmt_for_jump_threading (gimple stmt, gimple within_stmt)
7655 {
7656 /* We only use VRP information to simplify conditionals. This is
7657 overly conservative, but it's unclear if doing more would be
7658 worth the compile time cost. */
7659 if (gimple_code (stmt) != GIMPLE_COND)
7660 return NULL;
7661
7662 return vrp_evaluate_conditional (gimple_cond_code (stmt),
7663 gimple_cond_lhs (stmt),
7664 gimple_cond_rhs (stmt), within_stmt);
7665 }
7666
7667 /* Blocks which have more than one predecessor and more than
7668 one successor present jump threading opportunities, i.e.,
7669 when the block is reached from a specific predecessor, we
7670 may be able to determine which of the outgoing edges will
7671 be traversed. When this optimization applies, we are able
7672 to avoid conditionals at runtime and we may expose secondary
7673 optimization opportunities.
7674
7675 This routine is effectively a driver for the generic jump
7676 threading code. It basically just presents the generic code
7677 with edges that may be suitable for jump threading.
7678
7679 Unlike DOM, we do not iterate VRP if jump threading was successful.
7680 While iterating may expose new opportunities for VRP, it is expected
7681 those opportunities would be very limited and the compile time cost
7682 to expose those opportunities would be significant.
7683
7684 As jump threading opportunities are discovered, they are registered
7685 for later realization. */
7686
7687 static void
identify_jump_threads(void)7688 identify_jump_threads (void)
7689 {
7690 basic_block bb;
7691 gimple dummy;
7692 int i;
7693 edge e;
7694
7695 /* Ugh. When substituting values earlier in this pass we can
7696 wipe the dominance information. So rebuild the dominator
7697 information as we need it within the jump threading code. */
7698 calculate_dominance_info (CDI_DOMINATORS);
7699
7700 /* We do not allow VRP information to be used for jump threading
7701 across a back edge in the CFG. Otherwise it becomes too
7702 difficult to avoid eliminating loop exit tests. Of course
7703 EDGE_DFS_BACK is not accurate at this time so we have to
7704 recompute it. */
7705 mark_dfs_back_edges ();
7706
7707 /* Do not thread across edges we are about to remove. Just marking
7708 them as EDGE_DFS_BACK will do. */
7709 FOR_EACH_VEC_ELT (edge, to_remove_edges, i, e)
7710 e->flags |= EDGE_DFS_BACK;
7711
7712 /* Allocate our unwinder stack to unwind any temporary equivalences
7713 that might be recorded. */
7714 stack = VEC_alloc (tree, heap, 20);
7715
7716 /* To avoid lots of silly node creation, we create a single
7717 conditional and just modify it in-place when attempting to
7718 thread jumps. */
7719 dummy = gimple_build_cond (EQ_EXPR,
7720 integer_zero_node, integer_zero_node,
7721 NULL, NULL);
7722
7723 /* Walk through all the blocks finding those which present a
7724 potential jump threading opportunity. We could set this up
7725 as a dominator walker and record data during the walk, but
7726 I doubt it's worth the effort for the classes of jump
7727 threading opportunities we are trying to identify at this
7728 point in compilation. */
7729 FOR_EACH_BB (bb)
7730 {
7731 gimple last;
7732
7733 /* If the generic jump threading code does not find this block
7734 interesting, then there is nothing to do. */
7735 if (! potentially_threadable_block (bb))
7736 continue;
7737
7738 /* We only care about blocks ending in a COND_EXPR. While there
7739 may be some value in handling SWITCH_EXPR here, I doubt it's
7740 terribly important. */
7741 last = gsi_stmt (gsi_last_bb (bb));
7742
7743 /* We're basically looking for a switch or any kind of conditional with
7744 integral or pointer type arguments. Note the type of the second
7745 argument will be the same as the first argument, so no need to
7746 check it explicitly. */
7747 if (gimple_code (last) == GIMPLE_SWITCH
7748 || (gimple_code (last) == GIMPLE_COND
7749 && TREE_CODE (gimple_cond_lhs (last)) == SSA_NAME
7750 && (INTEGRAL_TYPE_P (TREE_TYPE (gimple_cond_lhs (last)))
7751 || POINTER_TYPE_P (TREE_TYPE (gimple_cond_lhs (last))))
7752 && (TREE_CODE (gimple_cond_rhs (last)) == SSA_NAME
7753 || is_gimple_min_invariant (gimple_cond_rhs (last)))))
7754 {
7755 edge_iterator ei;
7756
7757 /* We've got a block with multiple predecessors and multiple
7758 successors which also ends in a suitable conditional or
7759 switch statement. For each predecessor, see if we can thread
7760 it to a specific successor. */
7761 FOR_EACH_EDGE (e, ei, bb->preds)
7762 {
7763 /* Do not thread across back edges or abnormal edges
7764 in the CFG. */
7765 if (e->flags & (EDGE_DFS_BACK | EDGE_COMPLEX))
7766 continue;
7767
7768 thread_across_edge (dummy, e, true, &stack,
7769 simplify_stmt_for_jump_threading);
7770 }
7771 }
7772 }
7773
7774 /* We do not actually update the CFG or SSA graphs at this point as
7775 ASSERT_EXPRs are still in the IL and cfg cleanup code does not yet
7776 handle ASSERT_EXPRs gracefully. */
7777 }
7778
7779 /* We identified all the jump threading opportunities earlier, but could
7780 not transform the CFG at that time. This routine transforms the
7781 CFG and arranges for the dominator tree to be rebuilt if necessary.
7782
7783 Note the SSA graph update will occur during the normal TODO
7784 processing by the pass manager. */
7785 static void
finalize_jump_threads(void)7786 finalize_jump_threads (void)
7787 {
7788 thread_through_all_blocks (false);
7789 VEC_free (tree, heap, stack);
7790 }
7791
7792
7793 /* Traverse all the blocks folding conditionals with known ranges. */
7794
7795 static void
vrp_finalize(void)7796 vrp_finalize (void)
7797 {
7798 size_t i;
7799
7800 values_propagated = true;
7801
7802 if (dump_file)
7803 {
7804 fprintf (dump_file, "\nValue ranges after VRP:\n\n");
7805 dump_all_value_ranges (dump_file);
7806 fprintf (dump_file, "\n");
7807 }
7808
7809 substitute_and_fold (op_with_constant_singleton_value_range,
7810 vrp_fold_stmt, false);
7811
7812 if (warn_array_bounds)
7813 check_all_array_refs ();
7814
7815 /* We must identify jump threading opportunities before we release
7816 the datastructures built by VRP. */
7817 identify_jump_threads ();
7818
7819 /* Free allocated memory. */
7820 for (i = 0; i < num_vr_values; i++)
7821 if (vr_value[i])
7822 {
7823 BITMAP_FREE (vr_value[i]->equiv);
7824 free (vr_value[i]);
7825 }
7826
7827 free (vr_value);
7828 free (vr_phi_edge_counts);
7829
7830 /* So that we can distinguish between VRP data being available
7831 and not available. */
7832 vr_value = NULL;
7833 vr_phi_edge_counts = NULL;
7834 }
7835
7836
7837 /* Main entry point to VRP (Value Range Propagation). This pass is
7838 loosely based on J. R. C. Patterson, ``Accurate Static Branch
7839 Prediction by Value Range Propagation,'' in SIGPLAN Conference on
7840 Programming Language Design and Implementation, pp. 67-78, 1995.
7841 Also available at http://citeseer.ist.psu.edu/patterson95accurate.html
7842
7843 This is essentially an SSA-CCP pass modified to deal with ranges
7844 instead of constants.
7845
7846 While propagating ranges, we may find that two or more SSA name
7847 have equivalent, though distinct ranges. For instance,
7848
7849 1 x_9 = p_3->a;
7850 2 p_4 = ASSERT_EXPR <p_3, p_3 != 0>
7851 3 if (p_4 == q_2)
7852 4 p_5 = ASSERT_EXPR <p_4, p_4 == q_2>;
7853 5 endif
7854 6 if (q_2)
7855
7856 In the code above, pointer p_5 has range [q_2, q_2], but from the
7857 code we can also determine that p_5 cannot be NULL and, if q_2 had
7858 a non-varying range, p_5's range should also be compatible with it.
7859
7860 These equivalences are created by two expressions: ASSERT_EXPR and
7861 copy operations. Since p_5 is an assertion on p_4, and p_4 was the
7862 result of another assertion, then we can use the fact that p_5 and
7863 p_4 are equivalent when evaluating p_5's range.
7864
7865 Together with value ranges, we also propagate these equivalences
7866 between names so that we can take advantage of information from
7867 multiple ranges when doing final replacement. Note that this
7868 equivalency relation is transitive but not symmetric.
7869
7870 In the example above, p_5 is equivalent to p_4, q_2 and p_3, but we
7871 cannot assert that q_2 is equivalent to p_5 because q_2 may be used
7872 in contexts where that assertion does not hold (e.g., in line 6).
7873
7874 TODO, the main difference between this pass and Patterson's is that
7875 we do not propagate edge probabilities. We only compute whether
7876 edges can be taken or not. That is, instead of having a spectrum
7877 of jump probabilities between 0 and 1, we only deal with 0, 1 and
7878 DON'T KNOW. In the future, it may be worthwhile to propagate
7879 probabilities to aid branch prediction. */
7880
7881 static unsigned int
execute_vrp(void)7882 execute_vrp (void)
7883 {
7884 int i;
7885 edge e;
7886 switch_update *su;
7887
7888 loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
7889 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
7890 scev_initialize ();
7891
7892 /* ??? This ends up using stale EDGE_DFS_BACK for liveness computation.
7893 Inserting assertions may split edges which will invalidate
7894 EDGE_DFS_BACK. */
7895 insert_range_assertions ();
7896
7897 /* Estimate number of iterations - but do not use undefined behavior
7898 for this. We can't do this lazily as other functions may compute
7899 this using undefined behavior. */
7900 free_numbers_of_iterations_estimates ();
7901 estimate_numbers_of_iterations (false);
7902
7903 to_remove_edges = VEC_alloc (edge, heap, 10);
7904 to_update_switch_stmts = VEC_alloc (switch_update, heap, 5);
7905 threadedge_initialize_values ();
7906
7907 /* For visiting PHI nodes we need EDGE_DFS_BACK computed. */
7908 mark_dfs_back_edges ();
7909
7910 vrp_initialize ();
7911 ssa_propagate (vrp_visit_stmt, vrp_visit_phi_node);
7912 vrp_finalize ();
7913
7914 free_numbers_of_iterations_estimates ();
7915
7916 /* ASSERT_EXPRs must be removed before finalizing jump threads
7917 as finalizing jump threads calls the CFG cleanup code which
7918 does not properly handle ASSERT_EXPRs. */
7919 remove_range_assertions ();
7920
7921 /* If we exposed any new variables, go ahead and put them into
7922 SSA form now, before we handle jump threading. This simplifies
7923 interactions between rewriting of _DECL nodes into SSA form
7924 and rewriting SSA_NAME nodes into SSA form after block
7925 duplication and CFG manipulation. */
7926 update_ssa (TODO_update_ssa);
7927
7928 finalize_jump_threads ();
7929
7930 /* Remove dead edges from SWITCH_EXPR optimization. This leaves the
7931 CFG in a broken state and requires a cfg_cleanup run. */
7932 FOR_EACH_VEC_ELT (edge, to_remove_edges, i, e)
7933 remove_edge (e);
7934 /* Update SWITCH_EXPR case label vector. */
7935 FOR_EACH_VEC_ELT (switch_update, to_update_switch_stmts, i, su)
7936 {
7937 size_t j;
7938 size_t n = TREE_VEC_LENGTH (su->vec);
7939 tree label;
7940 gimple_switch_set_num_labels (su->stmt, n);
7941 for (j = 0; j < n; j++)
7942 gimple_switch_set_label (su->stmt, j, TREE_VEC_ELT (su->vec, j));
7943 /* As we may have replaced the default label with a regular one
7944 make sure to make it a real default label again. This ensures
7945 optimal expansion. */
7946 label = gimple_switch_default_label (su->stmt);
7947 CASE_LOW (label) = NULL_TREE;
7948 CASE_HIGH (label) = NULL_TREE;
7949 }
7950
7951 if (VEC_length (edge, to_remove_edges) > 0)
7952 free_dominance_info (CDI_DOMINATORS);
7953
7954 VEC_free (edge, heap, to_remove_edges);
7955 VEC_free (switch_update, heap, to_update_switch_stmts);
7956 threadedge_finalize_values ();
7957
7958 scev_finalize ();
7959 loop_optimizer_finalize ();
7960 return 0;
7961 }
7962
7963 static bool
gate_vrp(void)7964 gate_vrp (void)
7965 {
7966 return flag_tree_vrp != 0;
7967 }
7968
7969 struct gimple_opt_pass pass_vrp =
7970 {
7971 {
7972 GIMPLE_PASS,
7973 "vrp", /* name */
7974 gate_vrp, /* gate */
7975 execute_vrp, /* execute */
7976 NULL, /* sub */
7977 NULL, /* next */
7978 0, /* static_pass_number */
7979 TV_TREE_VRP, /* tv_id */
7980 PROP_ssa, /* properties_required */
7981 0, /* properties_provided */
7982 0, /* properties_destroyed */
7983 0, /* todo_flags_start */
7984 TODO_cleanup_cfg
7985 | TODO_update_ssa
7986 | TODO_verify_ssa
7987 | TODO_verify_flow
7988 | TODO_ggc_collect /* todo_flags_finish */
7989 }
7990 };
7991