1 /* Support routines for Value Range Propagation (VRP).
2    Copyright (C) 2005-2016 Free Software Foundation, Inc.
3    Contributed by Diego Novillo <dnovillo@redhat.com>.
4 
5 This file is part of GCC.
6 
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11 
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15 GNU General Public License for more details.
16 
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3.  If not see
19 <http://www.gnu.org/licenses/>.  */
20 
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "insn-codes.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "gimple.h"
29 #include "cfghooks.h"
30 #include "tree-pass.h"
31 #include "ssa.h"
32 #include "optabs-tree.h"
33 #include "gimple-pretty-print.h"
34 #include "diagnostic-core.h"
35 #include "flags.h"
36 #include "fold-const.h"
37 #include "stor-layout.h"
38 #include "calls.h"
39 #include "cfganal.h"
40 #include "gimple-fold.h"
41 #include "tree-eh.h"
42 #include "gimple-iterator.h"
43 #include "gimple-walk.h"
44 #include "tree-cfg.h"
45 #include "tree-ssa-loop-manip.h"
46 #include "tree-ssa-loop-niter.h"
47 #include "tree-ssa-loop.h"
48 #include "tree-into-ssa.h"
49 #include "tree-ssa.h"
50 #include "intl.h"
51 #include "cfgloop.h"
52 #include "tree-scalar-evolution.h"
53 #include "tree-ssa-propagate.h"
54 #include "tree-chrec.h"
55 #include "tree-ssa-threadupdate.h"
56 #include "tree-ssa-scopedtables.h"
57 #include "tree-ssa-threadedge.h"
58 #include "omp-low.h"
59 #include "target.h"
60 #include "case-cfn-macros.h"
61 
62 /* Range of values that can be associated with an SSA_NAME after VRP
63    has executed.  */
64 struct value_range
65 {
66   /* Lattice value represented by this range.  */
67   enum value_range_type type;
68 
69   /* Minimum and maximum values represented by this range.  These
70      values should be interpreted as follows:
71 
72 	- If TYPE is VR_UNDEFINED or VR_VARYING then MIN and MAX must
73 	  be NULL.
74 
75 	- If TYPE == VR_RANGE then MIN holds the minimum value and
76 	  MAX holds the maximum value of the range [MIN, MAX].
77 
78 	- If TYPE == ANTI_RANGE the variable is known to NOT
79 	  take any values in the range [MIN, MAX].  */
80   tree min;
81   tree max;
82 
83   /* Set of SSA names whose value ranges are equivalent to this one.
84      This set is only valid when TYPE is VR_RANGE or VR_ANTI_RANGE.  */
85   bitmap equiv;
86 };
87 
88 #define VR_INITIALIZER { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL }
89 
90 /* Set of SSA names found live during the RPO traversal of the function
91    for still active basic-blocks.  */
92 static sbitmap *live;
93 
94 /* Return true if the SSA name NAME is live on the edge E.  */
95 
96 static bool
live_on_edge(edge e,tree name)97 live_on_edge (edge e, tree name)
98 {
99   return (live[e->dest->index]
100 	  && bitmap_bit_p (live[e->dest->index], SSA_NAME_VERSION (name)));
101 }
102 
103 /* Local functions.  */
104 static int compare_values (tree val1, tree val2);
105 static int compare_values_warnv (tree val1, tree val2, bool *);
106 static void vrp_meet (value_range *, value_range *);
107 static void vrp_intersect_ranges (value_range *, value_range *);
108 static tree vrp_evaluate_conditional_warnv_with_ops (enum tree_code,
109 						     tree, tree, bool, bool *,
110 						     bool *);
111 
112 /* Location information for ASSERT_EXPRs.  Each instance of this
113    structure describes an ASSERT_EXPR for an SSA name.  Since a single
114    SSA name may have more than one assertion associated with it, these
115    locations are kept in a linked list attached to the corresponding
116    SSA name.  */
117 struct assert_locus
118 {
119   /* Basic block where the assertion would be inserted.  */
120   basic_block bb;
121 
122   /* Some assertions need to be inserted on an edge (e.g., assertions
123      generated by COND_EXPRs).  In those cases, BB will be NULL.  */
124   edge e;
125 
126   /* Pointer to the statement that generated this assertion.  */
127   gimple_stmt_iterator si;
128 
129   /* Predicate code for the ASSERT_EXPR.  Must be COMPARISON_CLASS_P.  */
130   enum tree_code comp_code;
131 
132   /* Value being compared against.  */
133   tree val;
134 
135   /* Expression to compare.  */
136   tree expr;
137 
138   /* Next node in the linked list.  */
139   assert_locus *next;
140 };
141 
142 /* If bit I is present, it means that SSA name N_i has a list of
143    assertions that should be inserted in the IL.  */
144 static bitmap need_assert_for;
145 
146 /* Array of locations lists where to insert assertions.  ASSERTS_FOR[I]
147    holds a list of ASSERT_LOCUS_T nodes that describe where
148    ASSERT_EXPRs for SSA name N_I should be inserted.  */
149 static assert_locus **asserts_for;
150 
151 /* Value range array.  After propagation, VR_VALUE[I] holds the range
152    of values that SSA name N_I may take.  */
153 static unsigned num_vr_values;
154 static value_range **vr_value;
155 static bool values_propagated;
156 
157 /* For a PHI node which sets SSA name N_I, VR_COUNTS[I] holds the
158    number of executable edges we saw the last time we visited the
159    node.  */
160 static int *vr_phi_edge_counts;
161 
162 struct switch_update {
163   gswitch *stmt;
164   tree vec;
165 };
166 
167 static vec<edge> to_remove_edges;
168 static vec<switch_update> to_update_switch_stmts;
169 
170 
171 /* Return the maximum value for TYPE.  */
172 
173 static inline tree
vrp_val_max(const_tree type)174 vrp_val_max (const_tree type)
175 {
176   if (!INTEGRAL_TYPE_P (type))
177     return NULL_TREE;
178 
179   return TYPE_MAX_VALUE (type);
180 }
181 
182 /* Return the minimum value for TYPE.  */
183 
184 static inline tree
vrp_val_min(const_tree type)185 vrp_val_min (const_tree type)
186 {
187   if (!INTEGRAL_TYPE_P (type))
188     return NULL_TREE;
189 
190   return TYPE_MIN_VALUE (type);
191 }
192 
193 /* Return whether VAL is equal to the maximum value of its type.  This
194    will be true for a positive overflow infinity.  We can't do a
195    simple equality comparison with TYPE_MAX_VALUE because C typedefs
196    and Ada subtypes can produce types whose TYPE_MAX_VALUE is not ==
197    to the integer constant with the same value in the type.  */
198 
199 static inline bool
vrp_val_is_max(const_tree val)200 vrp_val_is_max (const_tree val)
201 {
202   tree type_max = vrp_val_max (TREE_TYPE (val));
203   return (val == type_max
204 	  || (type_max != NULL_TREE
205 	      && operand_equal_p (val, type_max, 0)));
206 }
207 
208 /* Return whether VAL is equal to the minimum value of its type.  This
209    will be true for a negative overflow infinity.  */
210 
211 static inline bool
vrp_val_is_min(const_tree val)212 vrp_val_is_min (const_tree val)
213 {
214   tree type_min = vrp_val_min (TREE_TYPE (val));
215   return (val == type_min
216 	  || (type_min != NULL_TREE
217 	      && operand_equal_p (val, type_min, 0)));
218 }
219 
220 
221 /* Return whether TYPE should use an overflow infinity distinct from
222    TYPE_{MIN,MAX}_VALUE.  We use an overflow infinity value to
223    represent a signed overflow during VRP computations.  An infinity
224    is distinct from a half-range, which will go from some number to
225    TYPE_{MIN,MAX}_VALUE.  */
226 
227 static inline bool
needs_overflow_infinity(const_tree type)228 needs_overflow_infinity (const_tree type)
229 {
230   return INTEGRAL_TYPE_P (type) && !TYPE_OVERFLOW_WRAPS (type);
231 }
232 
233 /* Return whether TYPE can support our overflow infinity
234    representation: we use the TREE_OVERFLOW flag, which only exists
235    for constants.  If TYPE doesn't support this, we don't optimize
236    cases which would require signed overflow--we drop them to
237    VARYING.  */
238 
239 static inline bool
supports_overflow_infinity(const_tree type)240 supports_overflow_infinity (const_tree type)
241 {
242   tree min = vrp_val_min (type), max = vrp_val_max (type);
243   gcc_checking_assert (needs_overflow_infinity (type));
244   return (min != NULL_TREE
245 	  && CONSTANT_CLASS_P (min)
246 	  && max != NULL_TREE
247 	  && CONSTANT_CLASS_P (max));
248 }
249 
250 /* VAL is the maximum or minimum value of a type.  Return a
251    corresponding overflow infinity.  */
252 
253 static inline tree
make_overflow_infinity(tree val)254 make_overflow_infinity (tree val)
255 {
256   gcc_checking_assert (val != NULL_TREE && CONSTANT_CLASS_P (val));
257   val = copy_node (val);
258   TREE_OVERFLOW (val) = 1;
259   return val;
260 }
261 
262 /* Return a negative overflow infinity for TYPE.  */
263 
264 static inline tree
negative_overflow_infinity(tree type)265 negative_overflow_infinity (tree type)
266 {
267   gcc_checking_assert (supports_overflow_infinity (type));
268   return make_overflow_infinity (vrp_val_min (type));
269 }
270 
271 /* Return a positive overflow infinity for TYPE.  */
272 
273 static inline tree
positive_overflow_infinity(tree type)274 positive_overflow_infinity (tree type)
275 {
276   gcc_checking_assert (supports_overflow_infinity (type));
277   return make_overflow_infinity (vrp_val_max (type));
278 }
279 
280 /* Return whether VAL is a negative overflow infinity.  */
281 
282 static inline bool
is_negative_overflow_infinity(const_tree val)283 is_negative_overflow_infinity (const_tree val)
284 {
285   return (TREE_OVERFLOW_P (val)
286 	  && needs_overflow_infinity (TREE_TYPE (val))
287 	  && vrp_val_is_min (val));
288 }
289 
290 /* Return whether VAL is a positive overflow infinity.  */
291 
292 static inline bool
is_positive_overflow_infinity(const_tree val)293 is_positive_overflow_infinity (const_tree val)
294 {
295   return (TREE_OVERFLOW_P (val)
296 	  && needs_overflow_infinity (TREE_TYPE (val))
297 	  && vrp_val_is_max (val));
298 }
299 
300 /* Return whether VAL is a positive or negative overflow infinity.  */
301 
302 static inline bool
is_overflow_infinity(const_tree val)303 is_overflow_infinity (const_tree val)
304 {
305   return (TREE_OVERFLOW_P (val)
306 	  && needs_overflow_infinity (TREE_TYPE (val))
307 	  && (vrp_val_is_min (val) || vrp_val_is_max (val)));
308 }
309 
310 /* Return whether STMT has a constant rhs that is_overflow_infinity. */
311 
312 static inline bool
stmt_overflow_infinity(gimple * stmt)313 stmt_overflow_infinity (gimple *stmt)
314 {
315   if (is_gimple_assign (stmt)
316       && get_gimple_rhs_class (gimple_assign_rhs_code (stmt)) ==
317       GIMPLE_SINGLE_RHS)
318     return is_overflow_infinity (gimple_assign_rhs1 (stmt));
319   return false;
320 }
321 
322 /* If VAL is now an overflow infinity, return VAL.  Otherwise, return
323    the same value with TREE_OVERFLOW clear.  This can be used to avoid
324    confusing a regular value with an overflow value.  */
325 
326 static inline tree
avoid_overflow_infinity(tree val)327 avoid_overflow_infinity (tree val)
328 {
329   if (!is_overflow_infinity (val))
330     return val;
331 
332   if (vrp_val_is_max (val))
333     return vrp_val_max (TREE_TYPE (val));
334   else
335     {
336       gcc_checking_assert (vrp_val_is_min (val));
337       return vrp_val_min (TREE_TYPE (val));
338     }
339 }
340 
341 
342 /* Set value range VR to VR_UNDEFINED.  */
343 
344 static inline void
set_value_range_to_undefined(value_range * vr)345 set_value_range_to_undefined (value_range *vr)
346 {
347   vr->type = VR_UNDEFINED;
348   vr->min = vr->max = NULL_TREE;
349   if (vr->equiv)
350     bitmap_clear (vr->equiv);
351 }
352 
353 
354 /* Set value range VR to VR_VARYING.  */
355 
356 static inline void
set_value_range_to_varying(value_range * vr)357 set_value_range_to_varying (value_range *vr)
358 {
359   vr->type = VR_VARYING;
360   vr->min = vr->max = NULL_TREE;
361   if (vr->equiv)
362     bitmap_clear (vr->equiv);
363 }
364 
365 
366 /* Set value range VR to {T, MIN, MAX, EQUIV}.  */
367 
368 static void
set_value_range(value_range * vr,enum value_range_type t,tree min,tree max,bitmap equiv)369 set_value_range (value_range *vr, enum value_range_type t, tree min,
370 		 tree max, bitmap equiv)
371 {
372   /* Check the validity of the range.  */
373   if (flag_checking
374       && (t == VR_RANGE || t == VR_ANTI_RANGE))
375     {
376       int cmp;
377 
378       gcc_assert (min && max);
379 
380       gcc_assert ((!TREE_OVERFLOW_P (min) || is_overflow_infinity (min))
381 		  && (!TREE_OVERFLOW_P (max) || is_overflow_infinity (max)));
382 
383       if (INTEGRAL_TYPE_P (TREE_TYPE (min)) && t == VR_ANTI_RANGE)
384 	gcc_assert (!vrp_val_is_min (min) || !vrp_val_is_max (max));
385 
386       cmp = compare_values (min, max);
387       gcc_assert (cmp == 0 || cmp == -1 || cmp == -2);
388 
389       if (needs_overflow_infinity (TREE_TYPE (min)))
390 	gcc_assert (!is_overflow_infinity (min)
391 		    || !is_overflow_infinity (max));
392     }
393 
394   if (flag_checking
395       && (t == VR_UNDEFINED || t == VR_VARYING))
396     {
397       gcc_assert (min == NULL_TREE && max == NULL_TREE);
398       gcc_assert (equiv == NULL || bitmap_empty_p (equiv));
399     }
400 
401   vr->type = t;
402   vr->min = min;
403   vr->max = max;
404 
405   /* Since updating the equivalence set involves deep copying the
406      bitmaps, only do it if absolutely necessary.  */
407   if (vr->equiv == NULL
408       && equiv != NULL)
409     vr->equiv = BITMAP_ALLOC (NULL);
410 
411   if (equiv != vr->equiv)
412     {
413       if (equiv && !bitmap_empty_p (equiv))
414 	bitmap_copy (vr->equiv, equiv);
415       else
416 	bitmap_clear (vr->equiv);
417     }
418 }
419 
420 
421 /* Set value range VR to the canonical form of {T, MIN, MAX, EQUIV}.
422    This means adjusting T, MIN and MAX representing the case of a
423    wrapping range with MAX < MIN covering [MIN, type_max] U [type_min, MAX]
424    as anti-rage ~[MAX+1, MIN-1].  Likewise for wrapping anti-ranges.
425    In corner cases where MAX+1 or MIN-1 wraps this will fall back
426    to varying.
427    This routine exists to ease canonicalization in the case where we
428    extract ranges from var + CST op limit.  */
429 
430 static void
set_and_canonicalize_value_range(value_range * vr,enum value_range_type t,tree min,tree max,bitmap equiv)431 set_and_canonicalize_value_range (value_range *vr, enum value_range_type t,
432 				  tree min, tree max, bitmap equiv)
433 {
434   /* Use the canonical setters for VR_UNDEFINED and VR_VARYING.  */
435   if (t == VR_UNDEFINED)
436     {
437       set_value_range_to_undefined (vr);
438       return;
439     }
440   else if (t == VR_VARYING)
441     {
442       set_value_range_to_varying (vr);
443       return;
444     }
445 
446   /* Nothing to canonicalize for symbolic ranges.  */
447   if (TREE_CODE (min) != INTEGER_CST
448       || TREE_CODE (max) != INTEGER_CST)
449     {
450       set_value_range (vr, t, min, max, equiv);
451       return;
452     }
453 
454   /* Wrong order for min and max, to swap them and the VR type we need
455      to adjust them.  */
456   if (tree_int_cst_lt (max, min))
457     {
458       tree one, tmp;
459 
460       /* For one bit precision if max < min, then the swapped
461 	 range covers all values, so for VR_RANGE it is varying and
462 	 for VR_ANTI_RANGE empty range, so drop to varying as well.  */
463       if (TYPE_PRECISION (TREE_TYPE (min)) == 1)
464 	{
465 	  set_value_range_to_varying (vr);
466 	  return;
467 	}
468 
469       one = build_int_cst (TREE_TYPE (min), 1);
470       tmp = int_const_binop (PLUS_EXPR, max, one);
471       max = int_const_binop (MINUS_EXPR, min, one);
472       min = tmp;
473 
474       /* There's one corner case, if we had [C+1, C] before we now have
475 	 that again.  But this represents an empty value range, so drop
476 	 to varying in this case.  */
477       if (tree_int_cst_lt (max, min))
478 	{
479 	  set_value_range_to_varying (vr);
480 	  return;
481 	}
482 
483       t = t == VR_RANGE ? VR_ANTI_RANGE : VR_RANGE;
484     }
485 
486   /* Anti-ranges that can be represented as ranges should be so.  */
487   if (t == VR_ANTI_RANGE)
488     {
489       bool is_min = vrp_val_is_min (min);
490       bool is_max = vrp_val_is_max (max);
491 
492       if (is_min && is_max)
493 	{
494 	  /* We cannot deal with empty ranges, drop to varying.
495 	     ???  This could be VR_UNDEFINED instead.  */
496 	  set_value_range_to_varying (vr);
497 	  return;
498 	}
499       else if (TYPE_PRECISION (TREE_TYPE (min)) == 1
500 	       && (is_min || is_max))
501 	{
502 	  /* Non-empty boolean ranges can always be represented
503 	     as a singleton range.  */
504 	  if (is_min)
505 	    min = max = vrp_val_max (TREE_TYPE (min));
506 	  else
507 	    min = max = vrp_val_min (TREE_TYPE (min));
508 	  t = VR_RANGE;
509 	}
510       else if (is_min
511 	       /* As a special exception preserve non-null ranges.  */
512 	       && !(TYPE_UNSIGNED (TREE_TYPE (min))
513 		    && integer_zerop (max)))
514         {
515 	  tree one = build_int_cst (TREE_TYPE (max), 1);
516 	  min = int_const_binop (PLUS_EXPR, max, one);
517 	  max = vrp_val_max (TREE_TYPE (max));
518 	  t = VR_RANGE;
519         }
520       else if (is_max)
521         {
522 	  tree one = build_int_cst (TREE_TYPE (min), 1);
523 	  max = int_const_binop (MINUS_EXPR, min, one);
524 	  min = vrp_val_min (TREE_TYPE (min));
525 	  t = VR_RANGE;
526         }
527     }
528 
529   /* Drop [-INF(OVF), +INF(OVF)] to varying.  */
530   if (needs_overflow_infinity (TREE_TYPE (min))
531       && is_overflow_infinity (min)
532       && is_overflow_infinity (max))
533     {
534       set_value_range_to_varying (vr);
535       return;
536     }
537 
538   set_value_range (vr, t, min, max, equiv);
539 }
540 
541 /* Copy value range FROM into value range TO.  */
542 
543 static inline void
copy_value_range(value_range * to,value_range * from)544 copy_value_range (value_range *to, value_range *from)
545 {
546   set_value_range (to, from->type, from->min, from->max, from->equiv);
547 }
548 
549 /* Set value range VR to a single value.  This function is only called
550    with values we get from statements, and exists to clear the
551    TREE_OVERFLOW flag so that we don't think we have an overflow
552    infinity when we shouldn't.  */
553 
554 static inline void
set_value_range_to_value(value_range * vr,tree val,bitmap equiv)555 set_value_range_to_value (value_range *vr, tree val, bitmap equiv)
556 {
557   gcc_assert (is_gimple_min_invariant (val));
558   if (TREE_OVERFLOW_P (val))
559     val = drop_tree_overflow (val);
560   set_value_range (vr, VR_RANGE, val, val, equiv);
561 }
562 
563 /* Set value range VR to a non-negative range of type TYPE.
564    OVERFLOW_INFINITY indicates whether to use an overflow infinity
565    rather than TYPE_MAX_VALUE; this should be true if we determine
566    that the range is nonnegative based on the assumption that signed
567    overflow does not occur.  */
568 
569 static inline void
set_value_range_to_nonnegative(value_range * vr,tree type,bool overflow_infinity)570 set_value_range_to_nonnegative (value_range *vr, tree type,
571 				bool overflow_infinity)
572 {
573   tree zero;
574 
575   if (overflow_infinity && !supports_overflow_infinity (type))
576     {
577       set_value_range_to_varying (vr);
578       return;
579     }
580 
581   zero = build_int_cst (type, 0);
582   set_value_range (vr, VR_RANGE, zero,
583 		   (overflow_infinity
584 		    ? positive_overflow_infinity (type)
585 		    : TYPE_MAX_VALUE (type)),
586 		   vr->equiv);
587 }
588 
589 /* Set value range VR to a non-NULL range of type TYPE.  */
590 
591 static inline void
set_value_range_to_nonnull(value_range * vr,tree type)592 set_value_range_to_nonnull (value_range *vr, tree type)
593 {
594   tree zero = build_int_cst (type, 0);
595   set_value_range (vr, VR_ANTI_RANGE, zero, zero, vr->equiv);
596 }
597 
598 
599 /* Set value range VR to a NULL range of type TYPE.  */
600 
601 static inline void
set_value_range_to_null(value_range * vr,tree type)602 set_value_range_to_null (value_range *vr, tree type)
603 {
604   set_value_range_to_value (vr, build_int_cst (type, 0), vr->equiv);
605 }
606 
607 
608 /* Set value range VR to a range of a truthvalue of type TYPE.  */
609 
610 static inline void
set_value_range_to_truthvalue(value_range * vr,tree type)611 set_value_range_to_truthvalue (value_range *vr, tree type)
612 {
613   if (TYPE_PRECISION (type) == 1)
614     set_value_range_to_varying (vr);
615   else
616     set_value_range (vr, VR_RANGE,
617 		     build_int_cst (type, 0), build_int_cst (type, 1),
618 		     vr->equiv);
619 }
620 
621 
622 /* If abs (min) < abs (max), set VR to [-max, max], if
623    abs (min) >= abs (max), set VR to [-min, min].  */
624 
625 static void
abs_extent_range(value_range * vr,tree min,tree max)626 abs_extent_range (value_range *vr, tree min, tree max)
627 {
628   int cmp;
629 
630   gcc_assert (TREE_CODE (min) == INTEGER_CST);
631   gcc_assert (TREE_CODE (max) == INTEGER_CST);
632   gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (min)));
633   gcc_assert (!TYPE_UNSIGNED (TREE_TYPE (min)));
634   min = fold_unary (ABS_EXPR, TREE_TYPE (min), min);
635   max = fold_unary (ABS_EXPR, TREE_TYPE (max), max);
636   if (TREE_OVERFLOW (min) || TREE_OVERFLOW (max))
637     {
638       set_value_range_to_varying (vr);
639       return;
640     }
641   cmp = compare_values (min, max);
642   if (cmp == -1)
643     min = fold_unary (NEGATE_EXPR, TREE_TYPE (min), max);
644   else if (cmp == 0 || cmp == 1)
645     {
646       max = min;
647       min = fold_unary (NEGATE_EXPR, TREE_TYPE (min), min);
648     }
649   else
650     {
651       set_value_range_to_varying (vr);
652       return;
653     }
654   set_and_canonicalize_value_range (vr, VR_RANGE, min, max, NULL);
655 }
656 
657 
658 /* Return value range information for VAR.
659 
660    If we have no values ranges recorded (ie, VRP is not running), then
661    return NULL.  Otherwise create an empty range if none existed for VAR.  */
662 
663 static value_range *
get_value_range(const_tree var)664 get_value_range (const_tree var)
665 {
666   static const value_range vr_const_varying
667     = { VR_VARYING, NULL_TREE, NULL_TREE, NULL };
668   value_range *vr;
669   tree sym;
670   unsigned ver = SSA_NAME_VERSION (var);
671 
672   /* If we have no recorded ranges, then return NULL.  */
673   if (! vr_value)
674     return NULL;
675 
676   /* If we query the range for a new SSA name return an unmodifiable VARYING.
677      We should get here at most from the substitute-and-fold stage which
678      will never try to change values.  */
679   if (ver >= num_vr_values)
680     return CONST_CAST (value_range *, &vr_const_varying);
681 
682   vr = vr_value[ver];
683   if (vr)
684     return vr;
685 
686   /* After propagation finished do not allocate new value-ranges.  */
687   if (values_propagated)
688     return CONST_CAST (value_range *, &vr_const_varying);
689 
690   /* Create a default value range.  */
691   vr_value[ver] = vr = XCNEW (value_range);
692 
693   /* Defer allocating the equivalence set.  */
694   vr->equiv = NULL;
695 
696   /* If VAR is a default definition of a parameter, the variable can
697      take any value in VAR's type.  */
698   if (SSA_NAME_IS_DEFAULT_DEF (var))
699     {
700       sym = SSA_NAME_VAR (var);
701       if (TREE_CODE (sym) == PARM_DECL)
702 	{
703 	  /* Try to use the "nonnull" attribute to create ~[0, 0]
704 	     anti-ranges for pointers.  Note that this is only valid with
705 	     default definitions of PARM_DECLs.  */
706 	  if (POINTER_TYPE_P (TREE_TYPE (sym))
707 	      && nonnull_arg_p (sym))
708 	    set_value_range_to_nonnull (vr, TREE_TYPE (sym));
709 	  else
710 	    set_value_range_to_varying (vr);
711 	}
712       else if (TREE_CODE (sym) == RESULT_DECL
713 	       && DECL_BY_REFERENCE (sym))
714 	set_value_range_to_nonnull (vr, TREE_TYPE (sym));
715     }
716 
717   return vr;
718 }
719 
720 /* Set value-ranges of all SSA names defined by STMT to varying.  */
721 
722 static void
set_defs_to_varying(gimple * stmt)723 set_defs_to_varying (gimple *stmt)
724 {
725   ssa_op_iter i;
726   tree def;
727   FOR_EACH_SSA_TREE_OPERAND (def, stmt, i, SSA_OP_DEF)
728     {
729       value_range *vr = get_value_range (def);
730       /* Avoid writing to vr_const_varying get_value_range may return.  */
731       if (vr->type != VR_VARYING)
732 	set_value_range_to_varying (vr);
733     }
734 }
735 
736 
737 /* Return true, if VAL1 and VAL2 are equal values for VRP purposes.  */
738 
739 static inline bool
vrp_operand_equal_p(const_tree val1,const_tree val2)740 vrp_operand_equal_p (const_tree val1, const_tree val2)
741 {
742   if (val1 == val2)
743     return true;
744   if (!val1 || !val2 || !operand_equal_p (val1, val2, 0))
745     return false;
746   return is_overflow_infinity (val1) == is_overflow_infinity (val2);
747 }
748 
749 /* Return true, if the bitmaps B1 and B2 are equal.  */
750 
751 static inline bool
vrp_bitmap_equal_p(const_bitmap b1,const_bitmap b2)752 vrp_bitmap_equal_p (const_bitmap b1, const_bitmap b2)
753 {
754   return (b1 == b2
755 	  || ((!b1 || bitmap_empty_p (b1))
756 	      && (!b2 || bitmap_empty_p (b2)))
757 	  || (b1 && b2
758 	      && bitmap_equal_p (b1, b2)));
759 }
760 
761 /* Update the value range and equivalence set for variable VAR to
762    NEW_VR.  Return true if NEW_VR is different from VAR's previous
763    value.
764 
765    NOTE: This function assumes that NEW_VR is a temporary value range
766    object created for the sole purpose of updating VAR's range.  The
767    storage used by the equivalence set from NEW_VR will be freed by
768    this function.  Do not call update_value_range when NEW_VR
769    is the range object associated with another SSA name.  */
770 
771 static inline bool
update_value_range(const_tree var,value_range * new_vr)772 update_value_range (const_tree var, value_range *new_vr)
773 {
774   value_range *old_vr;
775   bool is_new;
776 
777   /* If there is a value-range on the SSA name from earlier analysis
778      factor that in.  */
779   if (INTEGRAL_TYPE_P (TREE_TYPE (var)))
780     {
781       wide_int min, max;
782       value_range_type rtype = get_range_info (var, &min, &max);
783       if (rtype == VR_RANGE || rtype == VR_ANTI_RANGE)
784 	{
785 	  value_range nr;
786 	  nr.type = rtype;
787 	  /* Range info on SSA names doesn't carry overflow information
788 	     so make sure to preserve the overflow bit on the lattice.  */
789 	  if (new_vr->type == VR_RANGE
790 	      && is_negative_overflow_infinity (new_vr->min)
791 	      && wi::eq_p (new_vr->min, min))
792 	    nr.min = new_vr->min;
793 	  else
794 	    nr.min = wide_int_to_tree (TREE_TYPE (var), min);
795 	  if (new_vr->type == VR_RANGE
796 	      && is_positive_overflow_infinity (new_vr->max)
797 	      && wi::eq_p (new_vr->max, max))
798 	    nr.max = new_vr->max;
799 	  else
800 	    nr.max = wide_int_to_tree (TREE_TYPE (var), max);
801 	  nr.equiv = NULL;
802 	  vrp_intersect_ranges (new_vr, &nr);
803 	}
804     }
805 
806   /* Update the value range, if necessary.  */
807   old_vr = get_value_range (var);
808   is_new = old_vr->type != new_vr->type
809 	   || !vrp_operand_equal_p (old_vr->min, new_vr->min)
810 	   || !vrp_operand_equal_p (old_vr->max, new_vr->max)
811 	   || !vrp_bitmap_equal_p (old_vr->equiv, new_vr->equiv);
812 
813   if (is_new)
814     {
815       /* Do not allow transitions up the lattice.  The following
816 	 is slightly more awkward than just new_vr->type < old_vr->type
817 	 because VR_RANGE and VR_ANTI_RANGE need to be considered
818 	 the same.  We may not have is_new when transitioning to
819 	 UNDEFINED.  If old_vr->type is VARYING, we shouldn't be
820 	 called.  */
821       if (new_vr->type == VR_UNDEFINED)
822 	{
823 	  BITMAP_FREE (new_vr->equiv);
824 	  set_value_range_to_varying (old_vr);
825 	  set_value_range_to_varying (new_vr);
826 	  return true;
827 	}
828       else
829 	set_value_range (old_vr, new_vr->type, new_vr->min, new_vr->max,
830 			 new_vr->equiv);
831     }
832 
833   BITMAP_FREE (new_vr->equiv);
834 
835   return is_new;
836 }
837 
838 
839 /* Add VAR and VAR's equivalence set to EQUIV.  This is the central
840    point where equivalence processing can be turned on/off.  */
841 
842 static void
add_equivalence(bitmap * equiv,const_tree var)843 add_equivalence (bitmap *equiv, const_tree var)
844 {
845   unsigned ver = SSA_NAME_VERSION (var);
846   value_range *vr = vr_value[ver];
847 
848   if (*equiv == NULL)
849     *equiv = BITMAP_ALLOC (NULL);
850   bitmap_set_bit (*equiv, ver);
851   if (vr && vr->equiv)
852     bitmap_ior_into (*equiv, vr->equiv);
853 }
854 
855 
856 /* Return true if VR is ~[0, 0].  */
857 
858 static inline bool
range_is_nonnull(value_range * vr)859 range_is_nonnull (value_range *vr)
860 {
861   return vr->type == VR_ANTI_RANGE
862 	 && integer_zerop (vr->min)
863 	 && integer_zerop (vr->max);
864 }
865 
866 
867 /* Return true if VR is [0, 0].  */
868 
869 static inline bool
range_is_null(value_range * vr)870 range_is_null (value_range *vr)
871 {
872   return vr->type == VR_RANGE
873 	 && integer_zerop (vr->min)
874 	 && integer_zerop (vr->max);
875 }
876 
877 /* Return true if max and min of VR are INTEGER_CST.  It's not necessary
878    a singleton.  */
879 
880 static inline bool
range_int_cst_p(value_range * vr)881 range_int_cst_p (value_range *vr)
882 {
883   return (vr->type == VR_RANGE
884 	  && TREE_CODE (vr->max) == INTEGER_CST
885 	  && TREE_CODE (vr->min) == INTEGER_CST);
886 }
887 
888 /* Return true if VR is a INTEGER_CST singleton.  */
889 
890 static inline bool
range_int_cst_singleton_p(value_range * vr)891 range_int_cst_singleton_p (value_range *vr)
892 {
893   return (range_int_cst_p (vr)
894 	  && !is_overflow_infinity (vr->min)
895 	  && !is_overflow_infinity (vr->max)
896 	  && tree_int_cst_equal (vr->min, vr->max));
897 }
898 
899 /* Return true if value range VR involves at least one symbol.  */
900 
901 static inline bool
symbolic_range_p(value_range * vr)902 symbolic_range_p (value_range *vr)
903 {
904   return (!is_gimple_min_invariant (vr->min)
905           || !is_gimple_min_invariant (vr->max));
906 }
907 
908 /* Return the single symbol (an SSA_NAME) contained in T if any, or NULL_TREE
909    otherwise.  We only handle additive operations and set NEG to true if the
910    symbol is negated and INV to the invariant part, if any.  */
911 
912 static tree
get_single_symbol(tree t,bool * neg,tree * inv)913 get_single_symbol (tree t, bool *neg, tree *inv)
914 {
915   bool neg_;
916   tree inv_;
917 
918   if (TREE_CODE (t) == PLUS_EXPR
919       || TREE_CODE (t) == POINTER_PLUS_EXPR
920       || TREE_CODE (t) == MINUS_EXPR)
921     {
922       if (is_gimple_min_invariant (TREE_OPERAND (t, 0)))
923 	{
924 	  neg_ = (TREE_CODE (t) == MINUS_EXPR);
925 	  inv_ = TREE_OPERAND (t, 0);
926 	  t = TREE_OPERAND (t, 1);
927 	}
928       else if (is_gimple_min_invariant (TREE_OPERAND (t, 1)))
929 	{
930 	  neg_ = false;
931 	  inv_ = TREE_OPERAND (t, 1);
932 	  t = TREE_OPERAND (t, 0);
933 	}
934       else
935         return NULL_TREE;
936     }
937   else
938     {
939       neg_ = false;
940       inv_ = NULL_TREE;
941     }
942 
943   if (TREE_CODE (t) == NEGATE_EXPR)
944     {
945       t = TREE_OPERAND (t, 0);
946       neg_ = !neg_;
947     }
948 
949   if (TREE_CODE (t) != SSA_NAME)
950     return NULL_TREE;
951 
952   *neg = neg_;
953   *inv = inv_;
954   return t;
955 }
956 
957 /* The reverse operation: build a symbolic expression with TYPE
958    from symbol SYM, negated according to NEG, and invariant INV.  */
959 
960 static tree
build_symbolic_expr(tree type,tree sym,bool neg,tree inv)961 build_symbolic_expr (tree type, tree sym, bool neg, tree inv)
962 {
963   const bool pointer_p = POINTER_TYPE_P (type);
964   tree t = sym;
965 
966   if (neg)
967     t = build1 (NEGATE_EXPR, type, t);
968 
969   if (integer_zerop (inv))
970     return t;
971 
972   return build2 (pointer_p ? POINTER_PLUS_EXPR : PLUS_EXPR, type, t, inv);
973 }
974 
975 /* Return true if value range VR involves exactly one symbol SYM.  */
976 
977 static bool
symbolic_range_based_on_p(value_range * vr,const_tree sym)978 symbolic_range_based_on_p (value_range *vr, const_tree sym)
979 {
980   bool neg, min_has_symbol, max_has_symbol;
981   tree inv;
982 
983   if (is_gimple_min_invariant (vr->min))
984     min_has_symbol = false;
985   else if (get_single_symbol (vr->min, &neg, &inv) == sym)
986     min_has_symbol = true;
987   else
988     return false;
989 
990   if (is_gimple_min_invariant (vr->max))
991     max_has_symbol = false;
992   else if (get_single_symbol (vr->max, &neg, &inv) == sym)
993     max_has_symbol = true;
994   else
995     return false;
996 
997   return (min_has_symbol || max_has_symbol);
998 }
999 
1000 /* Return true if value range VR uses an overflow infinity.  */
1001 
1002 static inline bool
overflow_infinity_range_p(value_range * vr)1003 overflow_infinity_range_p (value_range *vr)
1004 {
1005   return (vr->type == VR_RANGE
1006 	  && (is_overflow_infinity (vr->min)
1007 	      || is_overflow_infinity (vr->max)));
1008 }
1009 
1010 /* Return false if we can not make a valid comparison based on VR;
1011    this will be the case if it uses an overflow infinity and overflow
1012    is not undefined (i.e., -fno-strict-overflow is in effect).
1013    Otherwise return true, and set *STRICT_OVERFLOW_P to true if VR
1014    uses an overflow infinity.  */
1015 
1016 static bool
usable_range_p(value_range * vr,bool * strict_overflow_p)1017 usable_range_p (value_range *vr, bool *strict_overflow_p)
1018 {
1019   gcc_assert (vr->type == VR_RANGE);
1020   if (is_overflow_infinity (vr->min))
1021     {
1022       *strict_overflow_p = true;
1023       if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (vr->min)))
1024 	return false;
1025     }
1026   if (is_overflow_infinity (vr->max))
1027     {
1028       *strict_overflow_p = true;
1029       if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (vr->max)))
1030 	return false;
1031     }
1032   return true;
1033 }
1034 
1035 /* Return true if the result of assignment STMT is know to be non-zero.
1036    If the return value is based on the assumption that signed overflow is
1037    undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
1038    *STRICT_OVERFLOW_P.*/
1039 
1040 static bool
gimple_assign_nonzero_warnv_p(gimple * stmt,bool * strict_overflow_p)1041 gimple_assign_nonzero_warnv_p (gimple *stmt, bool *strict_overflow_p)
1042 {
1043   enum tree_code code = gimple_assign_rhs_code (stmt);
1044   switch (get_gimple_rhs_class (code))
1045     {
1046     case GIMPLE_UNARY_RHS:
1047       return tree_unary_nonzero_warnv_p (gimple_assign_rhs_code (stmt),
1048 					 gimple_expr_type (stmt),
1049 					 gimple_assign_rhs1 (stmt),
1050 					 strict_overflow_p);
1051     case GIMPLE_BINARY_RHS:
1052       return tree_binary_nonzero_warnv_p (gimple_assign_rhs_code (stmt),
1053 					  gimple_expr_type (stmt),
1054 					  gimple_assign_rhs1 (stmt),
1055 					  gimple_assign_rhs2 (stmt),
1056 					  strict_overflow_p);
1057     case GIMPLE_TERNARY_RHS:
1058       return false;
1059     case GIMPLE_SINGLE_RHS:
1060       return tree_single_nonzero_warnv_p (gimple_assign_rhs1 (stmt),
1061 					  strict_overflow_p);
1062     case GIMPLE_INVALID_RHS:
1063       gcc_unreachable ();
1064     default:
1065       gcc_unreachable ();
1066     }
1067 }
1068 
1069 /* Return true if STMT is known to compute a non-zero value.
1070    If the return value is based on the assumption that signed overflow is
1071    undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
1072    *STRICT_OVERFLOW_P.*/
1073 
1074 static bool
gimple_stmt_nonzero_warnv_p(gimple * stmt,bool * strict_overflow_p)1075 gimple_stmt_nonzero_warnv_p (gimple *stmt, bool *strict_overflow_p)
1076 {
1077   switch (gimple_code (stmt))
1078     {
1079     case GIMPLE_ASSIGN:
1080       return gimple_assign_nonzero_warnv_p (stmt, strict_overflow_p);
1081     case GIMPLE_CALL:
1082       {
1083 	tree fndecl = gimple_call_fndecl (stmt);
1084 	if (!fndecl) return false;
1085 	if (flag_delete_null_pointer_checks && !flag_check_new
1086 	    && DECL_IS_OPERATOR_NEW (fndecl)
1087 	    && !TREE_NOTHROW (fndecl))
1088 	  return true;
1089 	/* References are always non-NULL.  */
1090 	if (flag_delete_null_pointer_checks
1091 	    && TREE_CODE (TREE_TYPE (fndecl)) == REFERENCE_TYPE)
1092 	  return true;
1093 	if (flag_delete_null_pointer_checks &&
1094 	    lookup_attribute ("returns_nonnull",
1095 			      TYPE_ATTRIBUTES (gimple_call_fntype (stmt))))
1096 	  return true;
1097 	return gimple_alloca_call_p (stmt);
1098       }
1099     default:
1100       gcc_unreachable ();
1101     }
1102 }
1103 
1104 /* Like tree_expr_nonzero_warnv_p, but this function uses value ranges
1105    obtained so far.  */
1106 
1107 static bool
vrp_stmt_computes_nonzero(gimple * stmt,bool * strict_overflow_p)1108 vrp_stmt_computes_nonzero (gimple *stmt, bool *strict_overflow_p)
1109 {
1110   if (gimple_stmt_nonzero_warnv_p (stmt, strict_overflow_p))
1111     return true;
1112 
1113   /* If we have an expression of the form &X->a, then the expression
1114      is nonnull if X is nonnull.  */
1115   if (is_gimple_assign (stmt)
1116       && gimple_assign_rhs_code (stmt) == ADDR_EXPR)
1117     {
1118       tree expr = gimple_assign_rhs1 (stmt);
1119       tree base = get_base_address (TREE_OPERAND (expr, 0));
1120 
1121       if (base != NULL_TREE
1122 	  && TREE_CODE (base) == MEM_REF
1123 	  && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME)
1124 	{
1125 	  value_range *vr = get_value_range (TREE_OPERAND (base, 0));
1126 	  if (range_is_nonnull (vr))
1127 	    return true;
1128 	}
1129     }
1130 
1131   return false;
1132 }
1133 
1134 /* Returns true if EXPR is a valid value (as expected by compare_values) --
1135    a gimple invariant, or SSA_NAME +- CST.  */
1136 
1137 static bool
valid_value_p(tree expr)1138 valid_value_p (tree expr)
1139 {
1140   if (TREE_CODE (expr) == SSA_NAME)
1141     return true;
1142 
1143   if (TREE_CODE (expr) == PLUS_EXPR
1144       || TREE_CODE (expr) == MINUS_EXPR)
1145     return (TREE_CODE (TREE_OPERAND (expr, 0)) == SSA_NAME
1146 	    && TREE_CODE (TREE_OPERAND (expr, 1)) == INTEGER_CST);
1147 
1148   return is_gimple_min_invariant (expr);
1149 }
1150 
1151 /* Return
1152    1 if VAL < VAL2
1153    0 if !(VAL < VAL2)
1154    -2 if those are incomparable.  */
1155 static inline int
operand_less_p(tree val,tree val2)1156 operand_less_p (tree val, tree val2)
1157 {
1158   /* LT is folded faster than GE and others.  Inline the common case.  */
1159   if (TREE_CODE (val) == INTEGER_CST && TREE_CODE (val2) == INTEGER_CST)
1160     {
1161       if (! is_positive_overflow_infinity (val2))
1162 	return tree_int_cst_lt (val, val2);
1163     }
1164   else
1165     {
1166       tree tcmp;
1167 
1168       fold_defer_overflow_warnings ();
1169 
1170       tcmp = fold_binary_to_constant (LT_EXPR, boolean_type_node, val, val2);
1171 
1172       fold_undefer_and_ignore_overflow_warnings ();
1173 
1174       if (!tcmp
1175 	  || TREE_CODE (tcmp) != INTEGER_CST)
1176 	return -2;
1177 
1178       if (!integer_zerop (tcmp))
1179 	return 1;
1180     }
1181 
1182   /* val >= val2, not considering overflow infinity.  */
1183   if (is_negative_overflow_infinity (val))
1184     return is_negative_overflow_infinity (val2) ? 0 : 1;
1185   else if (is_positive_overflow_infinity (val2))
1186     return is_positive_overflow_infinity (val) ? 0 : 1;
1187 
1188   return 0;
1189 }
1190 
1191 /* Compare two values VAL1 and VAL2.  Return
1192 
1193    	-2 if VAL1 and VAL2 cannot be compared at compile-time,
1194    	-1 if VAL1 < VAL2,
1195    	 0 if VAL1 == VAL2,
1196 	+1 if VAL1 > VAL2, and
1197 	+2 if VAL1 != VAL2
1198 
1199    This is similar to tree_int_cst_compare but supports pointer values
1200    and values that cannot be compared at compile time.
1201 
1202    If STRICT_OVERFLOW_P is not NULL, then set *STRICT_OVERFLOW_P to
1203    true if the return value is only valid if we assume that signed
1204    overflow is undefined.  */
1205 
1206 static int
compare_values_warnv(tree val1,tree val2,bool * strict_overflow_p)1207 compare_values_warnv (tree val1, tree val2, bool *strict_overflow_p)
1208 {
1209   if (val1 == val2)
1210     return 0;
1211 
1212   /* Below we rely on the fact that VAL1 and VAL2 are both pointers or
1213      both integers.  */
1214   gcc_assert (POINTER_TYPE_P (TREE_TYPE (val1))
1215 	      == POINTER_TYPE_P (TREE_TYPE (val2)));
1216 
1217   /* Convert the two values into the same type.  This is needed because
1218      sizetype causes sign extension even for unsigned types.  */
1219   val2 = fold_convert (TREE_TYPE (val1), val2);
1220   STRIP_USELESS_TYPE_CONVERSION (val2);
1221 
1222   if ((TREE_CODE (val1) == SSA_NAME
1223        || (TREE_CODE (val1) == NEGATE_EXPR
1224 	   && TREE_CODE (TREE_OPERAND (val1, 0)) == SSA_NAME)
1225        || TREE_CODE (val1) == PLUS_EXPR
1226        || TREE_CODE (val1) == MINUS_EXPR)
1227       && (TREE_CODE (val2) == SSA_NAME
1228 	  || (TREE_CODE (val2) == NEGATE_EXPR
1229 	      && TREE_CODE (TREE_OPERAND (val2, 0)) == SSA_NAME)
1230 	  || TREE_CODE (val2) == PLUS_EXPR
1231 	  || TREE_CODE (val2) == MINUS_EXPR))
1232     {
1233       tree n1, c1, n2, c2;
1234       enum tree_code code1, code2;
1235 
1236       /* If VAL1 and VAL2 are of the form '[-]NAME [+-] CST' or 'NAME',
1237 	 return -1 or +1 accordingly.  If VAL1 and VAL2 don't use the
1238 	 same name, return -2.  */
1239       if (TREE_CODE (val1) == SSA_NAME || TREE_CODE (val1) == NEGATE_EXPR)
1240 	{
1241 	  code1 = SSA_NAME;
1242 	  n1 = val1;
1243 	  c1 = NULL_TREE;
1244 	}
1245       else
1246 	{
1247 	  code1 = TREE_CODE (val1);
1248 	  n1 = TREE_OPERAND (val1, 0);
1249 	  c1 = TREE_OPERAND (val1, 1);
1250 	  if (tree_int_cst_sgn (c1) == -1)
1251 	    {
1252 	      if (is_negative_overflow_infinity (c1))
1253 		return -2;
1254 	      c1 = fold_unary_to_constant (NEGATE_EXPR, TREE_TYPE (c1), c1);
1255 	      if (!c1)
1256 		return -2;
1257 	      code1 = code1 == MINUS_EXPR ? PLUS_EXPR : MINUS_EXPR;
1258 	    }
1259 	}
1260 
1261       if (TREE_CODE (val2) == SSA_NAME || TREE_CODE (val2) == NEGATE_EXPR)
1262 	{
1263 	  code2 = SSA_NAME;
1264 	  n2 = val2;
1265 	  c2 = NULL_TREE;
1266 	}
1267       else
1268 	{
1269 	  code2 = TREE_CODE (val2);
1270 	  n2 = TREE_OPERAND (val2, 0);
1271 	  c2 = TREE_OPERAND (val2, 1);
1272 	  if (tree_int_cst_sgn (c2) == -1)
1273 	    {
1274 	      if (is_negative_overflow_infinity (c2))
1275 		return -2;
1276 	      c2 = fold_unary_to_constant (NEGATE_EXPR, TREE_TYPE (c2), c2);
1277 	      if (!c2)
1278 		return -2;
1279 	      code2 = code2 == MINUS_EXPR ? PLUS_EXPR : MINUS_EXPR;
1280 	    }
1281 	}
1282 
1283       /* Both values must use the same name.  */
1284       if (TREE_CODE (n1) == NEGATE_EXPR && TREE_CODE (n2) == NEGATE_EXPR)
1285 	{
1286 	  n1 = TREE_OPERAND (n1, 0);
1287 	  n2 = TREE_OPERAND (n2, 0);
1288 	}
1289       if (n1 != n2)
1290 	return -2;
1291 
1292       if (code1 == SSA_NAME && code2 == SSA_NAME)
1293 	/* NAME == NAME  */
1294 	return 0;
1295 
1296       /* If overflow is defined we cannot simplify more.  */
1297       if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (val1)))
1298 	return -2;
1299 
1300       if (strict_overflow_p != NULL
1301 	  && (code1 == SSA_NAME || !TREE_NO_WARNING (val1))
1302 	  && (code2 == SSA_NAME || !TREE_NO_WARNING (val2)))
1303 	*strict_overflow_p = true;
1304 
1305       if (code1 == SSA_NAME)
1306 	{
1307 	  if (code2 == PLUS_EXPR)
1308 	    /* NAME < NAME + CST  */
1309 	    return -1;
1310 	  else if (code2 == MINUS_EXPR)
1311 	    /* NAME > NAME - CST  */
1312 	    return 1;
1313 	}
1314       else if (code1 == PLUS_EXPR)
1315 	{
1316 	  if (code2 == SSA_NAME)
1317 	    /* NAME + CST > NAME  */
1318 	    return 1;
1319 	  else if (code2 == PLUS_EXPR)
1320 	    /* NAME + CST1 > NAME + CST2, if CST1 > CST2  */
1321 	    return compare_values_warnv (c1, c2, strict_overflow_p);
1322 	  else if (code2 == MINUS_EXPR)
1323 	    /* NAME + CST1 > NAME - CST2  */
1324 	    return 1;
1325 	}
1326       else if (code1 == MINUS_EXPR)
1327 	{
1328 	  if (code2 == SSA_NAME)
1329 	    /* NAME - CST < NAME  */
1330 	    return -1;
1331 	  else if (code2 == PLUS_EXPR)
1332 	    /* NAME - CST1 < NAME + CST2  */
1333 	    return -1;
1334 	  else if (code2 == MINUS_EXPR)
1335 	    /* NAME - CST1 > NAME - CST2, if CST1 < CST2.  Notice that
1336 	       C1 and C2 are swapped in the call to compare_values.  */
1337 	    return compare_values_warnv (c2, c1, strict_overflow_p);
1338 	}
1339 
1340       gcc_unreachable ();
1341     }
1342 
1343   /* We cannot compare non-constants.  */
1344   if (!is_gimple_min_invariant (val1) || !is_gimple_min_invariant (val2))
1345     return -2;
1346 
1347   if (!POINTER_TYPE_P (TREE_TYPE (val1)))
1348     {
1349       /* We cannot compare overflowed values, except for overflow
1350 	 infinities.  */
1351       if (TREE_OVERFLOW (val1) || TREE_OVERFLOW (val2))
1352 	{
1353 	  if (strict_overflow_p != NULL)
1354 	    *strict_overflow_p = true;
1355 	  if (is_negative_overflow_infinity (val1))
1356 	    return is_negative_overflow_infinity (val2) ? 0 : -1;
1357 	  else if (is_negative_overflow_infinity (val2))
1358 	    return 1;
1359 	  else if (is_positive_overflow_infinity (val1))
1360 	    return is_positive_overflow_infinity (val2) ? 0 : 1;
1361 	  else if (is_positive_overflow_infinity (val2))
1362 	    return -1;
1363 	  return -2;
1364 	}
1365 
1366       return tree_int_cst_compare (val1, val2);
1367     }
1368   else
1369     {
1370       tree t;
1371 
1372       /* First see if VAL1 and VAL2 are not the same.  */
1373       if (val1 == val2 || operand_equal_p (val1, val2, 0))
1374 	return 0;
1375 
1376       /* If VAL1 is a lower address than VAL2, return -1.  */
1377       if (operand_less_p (val1, val2) == 1)
1378 	return -1;
1379 
1380       /* If VAL1 is a higher address than VAL2, return +1.  */
1381       if (operand_less_p (val2, val1) == 1)
1382 	return 1;
1383 
1384       /* If VAL1 is different than VAL2, return +2.
1385 	 For integer constants we either have already returned -1 or 1
1386 	 or they are equivalent.  We still might succeed in proving
1387 	 something about non-trivial operands.  */
1388       if (TREE_CODE (val1) != INTEGER_CST
1389 	  || TREE_CODE (val2) != INTEGER_CST)
1390 	{
1391           t = fold_binary_to_constant (NE_EXPR, boolean_type_node, val1, val2);
1392 	  if (t && integer_onep (t))
1393 	    return 2;
1394 	}
1395 
1396       return -2;
1397     }
1398 }
1399 
1400 /* Compare values like compare_values_warnv, but treat comparisons of
1401    nonconstants which rely on undefined overflow as incomparable.  */
1402 
1403 static int
compare_values(tree val1,tree val2)1404 compare_values (tree val1, tree val2)
1405 {
1406   bool sop;
1407   int ret;
1408 
1409   sop = false;
1410   ret = compare_values_warnv (val1, val2, &sop);
1411   if (sop
1412       && (!is_gimple_min_invariant (val1) || !is_gimple_min_invariant (val2)))
1413     ret = -2;
1414   return ret;
1415 }
1416 
1417 
1418 /* Return 1 if VAL is inside value range MIN <= VAL <= MAX,
1419           0 if VAL is not inside [MIN, MAX],
1420 	 -2 if we cannot tell either way.
1421 
1422    Benchmark compile/20001226-1.c compilation time after changing this
1423    function.  */
1424 
1425 static inline int
value_inside_range(tree val,tree min,tree max)1426 value_inside_range (tree val, tree min, tree max)
1427 {
1428   int cmp1, cmp2;
1429 
1430   cmp1 = operand_less_p (val, min);
1431   if (cmp1 == -2)
1432     return -2;
1433   if (cmp1 == 1)
1434     return 0;
1435 
1436   cmp2 = operand_less_p (max, val);
1437   if (cmp2 == -2)
1438     return -2;
1439 
1440   return !cmp2;
1441 }
1442 
1443 
1444 /* Return true if value ranges VR0 and VR1 have a non-empty
1445    intersection.
1446 
1447    Benchmark compile/20001226-1.c compilation time after changing this
1448    function.
1449    */
1450 
1451 static inline bool
value_ranges_intersect_p(value_range * vr0,value_range * vr1)1452 value_ranges_intersect_p (value_range *vr0, value_range *vr1)
1453 {
1454   /* The value ranges do not intersect if the maximum of the first range is
1455      less than the minimum of the second range or vice versa.
1456      When those relations are unknown, we can't do any better.  */
1457   if (operand_less_p (vr0->max, vr1->min) != 0)
1458     return false;
1459   if (operand_less_p (vr1->max, vr0->min) != 0)
1460     return false;
1461   return true;
1462 }
1463 
1464 
1465 /* Return 1 if [MIN, MAX] includes the value zero, 0 if it does not
1466    include the value zero, -2 if we cannot tell.  */
1467 
1468 static inline int
range_includes_zero_p(tree min,tree max)1469 range_includes_zero_p (tree min, tree max)
1470 {
1471   tree zero = build_int_cst (TREE_TYPE (min), 0);
1472   return value_inside_range (zero, min, max);
1473 }
1474 
1475 /* Return true if *VR is know to only contain nonnegative values.  */
1476 
1477 static inline bool
value_range_nonnegative_p(value_range * vr)1478 value_range_nonnegative_p (value_range *vr)
1479 {
1480   /* Testing for VR_ANTI_RANGE is not useful here as any anti-range
1481      which would return a useful value should be encoded as a
1482      VR_RANGE.  */
1483   if (vr->type == VR_RANGE)
1484     {
1485       int result = compare_values (vr->min, integer_zero_node);
1486       return (result == 0 || result == 1);
1487     }
1488 
1489   return false;
1490 }
1491 
1492 /* If *VR has a value rante that is a single constant value return that,
1493    otherwise return NULL_TREE.  */
1494 
1495 static tree
value_range_constant_singleton(value_range * vr)1496 value_range_constant_singleton (value_range *vr)
1497 {
1498   if (vr->type == VR_RANGE
1499       && vrp_operand_equal_p (vr->min, vr->max)
1500       && is_gimple_min_invariant (vr->min))
1501     return vr->min;
1502 
1503   return NULL_TREE;
1504 }
1505 
1506 /* If OP has a value range with a single constant value return that,
1507    otherwise return NULL_TREE.  This returns OP itself if OP is a
1508    constant.  */
1509 
1510 static tree
op_with_constant_singleton_value_range(tree op)1511 op_with_constant_singleton_value_range (tree op)
1512 {
1513   if (is_gimple_min_invariant (op))
1514     return op;
1515 
1516   if (TREE_CODE (op) != SSA_NAME)
1517     return NULL_TREE;
1518 
1519   return value_range_constant_singleton (get_value_range (op));
1520 }
1521 
1522 /* Return true if op is in a boolean [0, 1] value-range.  */
1523 
1524 static bool
op_with_boolean_value_range_p(tree op)1525 op_with_boolean_value_range_p (tree op)
1526 {
1527   value_range *vr;
1528 
1529   if (TYPE_PRECISION (TREE_TYPE (op)) == 1)
1530     return true;
1531 
1532   if (integer_zerop (op)
1533       || integer_onep (op))
1534     return true;
1535 
1536   if (TREE_CODE (op) != SSA_NAME)
1537     return false;
1538 
1539   vr = get_value_range (op);
1540   return (vr->type == VR_RANGE
1541 	  && integer_zerop (vr->min)
1542 	  && integer_onep (vr->max));
1543 }
1544 
1545 /* Extract value range information from an ASSERT_EXPR EXPR and store
1546    it in *VR_P.  */
1547 
1548 static void
extract_range_from_assert(value_range * vr_p,tree expr)1549 extract_range_from_assert (value_range *vr_p, tree expr)
1550 {
1551   tree var, cond, limit, min, max, type;
1552   value_range *limit_vr;
1553   enum tree_code cond_code;
1554 
1555   var = ASSERT_EXPR_VAR (expr);
1556   cond = ASSERT_EXPR_COND (expr);
1557 
1558   gcc_assert (COMPARISON_CLASS_P (cond));
1559 
1560   /* Find VAR in the ASSERT_EXPR conditional.  */
1561   if (var == TREE_OPERAND (cond, 0)
1562       || TREE_CODE (TREE_OPERAND (cond, 0)) == PLUS_EXPR
1563       || TREE_CODE (TREE_OPERAND (cond, 0)) == NOP_EXPR)
1564     {
1565       /* If the predicate is of the form VAR COMP LIMIT, then we just
1566 	 take LIMIT from the RHS and use the same comparison code.  */
1567       cond_code = TREE_CODE (cond);
1568       limit = TREE_OPERAND (cond, 1);
1569       cond = TREE_OPERAND (cond, 0);
1570     }
1571   else
1572     {
1573       /* If the predicate is of the form LIMIT COMP VAR, then we need
1574 	 to flip around the comparison code to create the proper range
1575 	 for VAR.  */
1576       cond_code = swap_tree_comparison (TREE_CODE (cond));
1577       limit = TREE_OPERAND (cond, 0);
1578       cond = TREE_OPERAND (cond, 1);
1579     }
1580 
1581   limit = avoid_overflow_infinity (limit);
1582 
1583   type = TREE_TYPE (var);
1584   gcc_assert (limit != var);
1585 
1586   /* For pointer arithmetic, we only keep track of pointer equality
1587      and inequality.  */
1588   if (POINTER_TYPE_P (type) && cond_code != NE_EXPR && cond_code != EQ_EXPR)
1589     {
1590       set_value_range_to_varying (vr_p);
1591       return;
1592     }
1593 
1594   /* If LIMIT is another SSA name and LIMIT has a range of its own,
1595      try to use LIMIT's range to avoid creating symbolic ranges
1596      unnecessarily. */
1597   limit_vr = (TREE_CODE (limit) == SSA_NAME) ? get_value_range (limit) : NULL;
1598 
1599   /* LIMIT's range is only interesting if it has any useful information.  */
1600   if (limit_vr
1601       && (limit_vr->type == VR_UNDEFINED
1602 	  || limit_vr->type == VR_VARYING
1603 	  || symbolic_range_p (limit_vr)))
1604     limit_vr = NULL;
1605 
1606   /* Initially, the new range has the same set of equivalences of
1607      VAR's range.  This will be revised before returning the final
1608      value.  Since assertions may be chained via mutually exclusive
1609      predicates, we will need to trim the set of equivalences before
1610      we are done.  */
1611   gcc_assert (vr_p->equiv == NULL);
1612   add_equivalence (&vr_p->equiv, var);
1613 
1614   /* Extract a new range based on the asserted comparison for VAR and
1615      LIMIT's value range.  Notice that if LIMIT has an anti-range, we
1616      will only use it for equality comparisons (EQ_EXPR).  For any
1617      other kind of assertion, we cannot derive a range from LIMIT's
1618      anti-range that can be used to describe the new range.  For
1619      instance, ASSERT_EXPR <x_2, x_2 <= b_4>.  If b_4 is ~[2, 10],
1620      then b_4 takes on the ranges [-INF, 1] and [11, +INF].  There is
1621      no single range for x_2 that could describe LE_EXPR, so we might
1622      as well build the range [b_4, +INF] for it.
1623      One special case we handle is extracting a range from a
1624      range test encoded as (unsigned)var + CST <= limit.  */
1625   if (TREE_CODE (cond) == NOP_EXPR
1626       || TREE_CODE (cond) == PLUS_EXPR)
1627     {
1628       if (TREE_CODE (cond) == PLUS_EXPR)
1629         {
1630           min = fold_build1 (NEGATE_EXPR, TREE_TYPE (TREE_OPERAND (cond, 1)),
1631 			     TREE_OPERAND (cond, 1));
1632           max = int_const_binop (PLUS_EXPR, limit, min);
1633 	  cond = TREE_OPERAND (cond, 0);
1634 	}
1635       else
1636 	{
1637 	  min = build_int_cst (TREE_TYPE (var), 0);
1638 	  max = limit;
1639 	}
1640 
1641       /* Make sure to not set TREE_OVERFLOW on the final type
1642 	 conversion.  We are willingly interpreting large positive
1643 	 unsigned values as negative signed values here.  */
1644       min = force_fit_type (TREE_TYPE (var), wi::to_widest (min), 0, false);
1645       max = force_fit_type (TREE_TYPE (var), wi::to_widest (max), 0, false);
1646 
1647       /* We can transform a max, min range to an anti-range or
1648          vice-versa.  Use set_and_canonicalize_value_range which does
1649 	 this for us.  */
1650       if (cond_code == LE_EXPR)
1651         set_and_canonicalize_value_range (vr_p, VR_RANGE,
1652 					  min, max, vr_p->equiv);
1653       else if (cond_code == GT_EXPR)
1654         set_and_canonicalize_value_range (vr_p, VR_ANTI_RANGE,
1655 					  min, max, vr_p->equiv);
1656       else
1657 	gcc_unreachable ();
1658     }
1659   else if (cond_code == EQ_EXPR)
1660     {
1661       enum value_range_type range_type;
1662 
1663       if (limit_vr)
1664 	{
1665 	  range_type = limit_vr->type;
1666 	  min = limit_vr->min;
1667 	  max = limit_vr->max;
1668 	}
1669       else
1670 	{
1671 	  range_type = VR_RANGE;
1672 	  min = limit;
1673 	  max = limit;
1674 	}
1675 
1676       set_value_range (vr_p, range_type, min, max, vr_p->equiv);
1677 
1678       /* When asserting the equality VAR == LIMIT and LIMIT is another
1679 	 SSA name, the new range will also inherit the equivalence set
1680 	 from LIMIT.  */
1681       if (TREE_CODE (limit) == SSA_NAME)
1682 	add_equivalence (&vr_p->equiv, limit);
1683     }
1684   else if (cond_code == NE_EXPR)
1685     {
1686       /* As described above, when LIMIT's range is an anti-range and
1687 	 this assertion is an inequality (NE_EXPR), then we cannot
1688 	 derive anything from the anti-range.  For instance, if
1689 	 LIMIT's range was ~[0, 0], the assertion 'VAR != LIMIT' does
1690 	 not imply that VAR's range is [0, 0].  So, in the case of
1691 	 anti-ranges, we just assert the inequality using LIMIT and
1692 	 not its anti-range.
1693 
1694 	 If LIMIT_VR is a range, we can only use it to build a new
1695 	 anti-range if LIMIT_VR is a single-valued range.  For
1696 	 instance, if LIMIT_VR is [0, 1], the predicate
1697 	 VAR != [0, 1] does not mean that VAR's range is ~[0, 1].
1698 	 Rather, it means that for value 0 VAR should be ~[0, 0]
1699 	 and for value 1, VAR should be ~[1, 1].  We cannot
1700 	 represent these ranges.
1701 
1702 	 The only situation in which we can build a valid
1703 	 anti-range is when LIMIT_VR is a single-valued range
1704 	 (i.e., LIMIT_VR->MIN == LIMIT_VR->MAX).  In that case,
1705 	 build the anti-range ~[LIMIT_VR->MIN, LIMIT_VR->MAX].  */
1706       if (limit_vr
1707 	  && limit_vr->type == VR_RANGE
1708 	  && compare_values (limit_vr->min, limit_vr->max) == 0)
1709 	{
1710 	  min = limit_vr->min;
1711 	  max = limit_vr->max;
1712 	}
1713       else
1714 	{
1715 	  /* In any other case, we cannot use LIMIT's range to build a
1716 	     valid anti-range.  */
1717 	  min = max = limit;
1718 	}
1719 
1720       /* If MIN and MAX cover the whole range for their type, then
1721 	 just use the original LIMIT.  */
1722       if (INTEGRAL_TYPE_P (type)
1723 	  && vrp_val_is_min (min)
1724 	  && vrp_val_is_max (max))
1725 	min = max = limit;
1726 
1727       set_and_canonicalize_value_range (vr_p, VR_ANTI_RANGE,
1728 					min, max, vr_p->equiv);
1729     }
1730   else if (cond_code == LE_EXPR || cond_code == LT_EXPR)
1731     {
1732       min = TYPE_MIN_VALUE (type);
1733 
1734       if (limit_vr == NULL || limit_vr->type == VR_ANTI_RANGE)
1735 	max = limit;
1736       else
1737 	{
1738 	  /* If LIMIT_VR is of the form [N1, N2], we need to build the
1739 	     range [MIN, N2] for LE_EXPR and [MIN, N2 - 1] for
1740 	     LT_EXPR.  */
1741 	  max = limit_vr->max;
1742 	}
1743 
1744       /* If the maximum value forces us to be out of bounds, simply punt.
1745 	 It would be pointless to try and do anything more since this
1746 	 all should be optimized away above us.  */
1747       if ((cond_code == LT_EXPR
1748 	   && compare_values (max, min) == 0)
1749 	  || is_overflow_infinity (max))
1750 	set_value_range_to_varying (vr_p);
1751       else
1752 	{
1753 	  /* For LT_EXPR, we create the range [MIN, MAX - 1].  */
1754 	  if (cond_code == LT_EXPR)
1755 	    {
1756 	      if (TYPE_PRECISION (TREE_TYPE (max)) == 1
1757 		  && !TYPE_UNSIGNED (TREE_TYPE (max)))
1758 		max = fold_build2 (PLUS_EXPR, TREE_TYPE (max), max,
1759 				   build_int_cst (TREE_TYPE (max), -1));
1760 	      else
1761 		max = fold_build2 (MINUS_EXPR, TREE_TYPE (max), max,
1762 				   build_int_cst (TREE_TYPE (max), 1));
1763 	      if (EXPR_P (max))
1764 		TREE_NO_WARNING (max) = 1;
1765 	    }
1766 
1767 	  set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
1768 	}
1769     }
1770   else if (cond_code == GE_EXPR || cond_code == GT_EXPR)
1771     {
1772       max = TYPE_MAX_VALUE (type);
1773 
1774       if (limit_vr == NULL || limit_vr->type == VR_ANTI_RANGE)
1775 	min = limit;
1776       else
1777 	{
1778 	  /* If LIMIT_VR is of the form [N1, N2], we need to build the
1779 	     range [N1, MAX] for GE_EXPR and [N1 + 1, MAX] for
1780 	     GT_EXPR.  */
1781 	  min = limit_vr->min;
1782 	}
1783 
1784       /* If the minimum value forces us to be out of bounds, simply punt.
1785 	 It would be pointless to try and do anything more since this
1786 	 all should be optimized away above us.  */
1787       if ((cond_code == GT_EXPR
1788 	   && compare_values (min, max) == 0)
1789 	  || is_overflow_infinity (min))
1790 	set_value_range_to_varying (vr_p);
1791       else
1792 	{
1793 	  /* For GT_EXPR, we create the range [MIN + 1, MAX].  */
1794 	  if (cond_code == GT_EXPR)
1795 	    {
1796 	      if (TYPE_PRECISION (TREE_TYPE (min)) == 1
1797 		  && !TYPE_UNSIGNED (TREE_TYPE (min)))
1798 		min = fold_build2 (MINUS_EXPR, TREE_TYPE (min), min,
1799 				   build_int_cst (TREE_TYPE (min), -1));
1800 	      else
1801 		min = fold_build2 (PLUS_EXPR, TREE_TYPE (min), min,
1802 				   build_int_cst (TREE_TYPE (min), 1));
1803 	      if (EXPR_P (min))
1804 		TREE_NO_WARNING (min) = 1;
1805 	    }
1806 
1807 	  set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
1808 	}
1809     }
1810   else
1811     gcc_unreachable ();
1812 
1813   /* Finally intersect the new range with what we already know about var.  */
1814   vrp_intersect_ranges (vr_p, get_value_range (var));
1815 }
1816 
1817 
1818 /* Extract range information from SSA name VAR and store it in VR.  If
1819    VAR has an interesting range, use it.  Otherwise, create the
1820    range [VAR, VAR] and return it.  This is useful in situations where
1821    we may have conditionals testing values of VARYING names.  For
1822    instance,
1823 
1824    	x_3 = y_5;
1825 	if (x_3 > y_5)
1826 	  ...
1827 
1828     Even if y_5 is deemed VARYING, we can determine that x_3 > y_5 is
1829     always false.  */
1830 
1831 static void
extract_range_from_ssa_name(value_range * vr,tree var)1832 extract_range_from_ssa_name (value_range *vr, tree var)
1833 {
1834   value_range *var_vr = get_value_range (var);
1835 
1836   if (var_vr->type != VR_VARYING)
1837     copy_value_range (vr, var_vr);
1838   else
1839     set_value_range (vr, VR_RANGE, var, var, NULL);
1840 
1841   add_equivalence (&vr->equiv, var);
1842 }
1843 
1844 
1845 /* Wrapper around int_const_binop.  If the operation overflows and we
1846    are not using wrapping arithmetic, then adjust the result to be
1847    -INF or +INF depending on CODE, VAL1 and VAL2.  This can return
1848    NULL_TREE if we need to use an overflow infinity representation but
1849    the type does not support it.  */
1850 
1851 static tree
vrp_int_const_binop(enum tree_code code,tree val1,tree val2)1852 vrp_int_const_binop (enum tree_code code, tree val1, tree val2)
1853 {
1854   tree res;
1855 
1856   res = int_const_binop (code, val1, val2);
1857 
1858   /* If we are using unsigned arithmetic, operate symbolically
1859      on -INF and +INF as int_const_binop only handles signed overflow.  */
1860   if (TYPE_UNSIGNED (TREE_TYPE (val1)))
1861     {
1862       int checkz = compare_values (res, val1);
1863       bool overflow = false;
1864 
1865       /* Ensure that res = val1 [+*] val2 >= val1
1866          or that res = val1 - val2 <= val1.  */
1867       if ((code == PLUS_EXPR
1868 	   && !(checkz == 1 || checkz == 0))
1869           || (code == MINUS_EXPR
1870 	      && !(checkz == 0 || checkz == -1)))
1871 	{
1872 	  overflow = true;
1873 	}
1874       /* Checking for multiplication overflow is done by dividing the
1875 	 output of the multiplication by the first input of the
1876 	 multiplication.  If the result of that division operation is
1877 	 not equal to the second input of the multiplication, then the
1878 	 multiplication overflowed.  */
1879       else if (code == MULT_EXPR && !integer_zerop (val1))
1880 	{
1881 	  tree tmp = int_const_binop (TRUNC_DIV_EXPR,
1882 				      res,
1883 				      val1);
1884 	  int check = compare_values (tmp, val2);
1885 
1886 	  if (check != 0)
1887 	    overflow = true;
1888 	}
1889 
1890       if (overflow)
1891 	{
1892 	  res = copy_node (res);
1893 	  TREE_OVERFLOW (res) = 1;
1894 	}
1895 
1896     }
1897   else if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (val1)))
1898     /* If the singed operation wraps then int_const_binop has done
1899        everything we want.  */
1900     ;
1901   /* Signed division of -1/0 overflows and by the time it gets here
1902      returns NULL_TREE.  */
1903   else if (!res)
1904     return NULL_TREE;
1905   else if ((TREE_OVERFLOW (res)
1906 	    && !TREE_OVERFLOW (val1)
1907 	    && !TREE_OVERFLOW (val2))
1908 	   || is_overflow_infinity (val1)
1909 	   || is_overflow_infinity (val2))
1910     {
1911       /* If the operation overflowed but neither VAL1 nor VAL2 are
1912 	 overflown, return -INF or +INF depending on the operation
1913 	 and the combination of signs of the operands.  */
1914       int sgn1 = tree_int_cst_sgn (val1);
1915       int sgn2 = tree_int_cst_sgn (val2);
1916 
1917       if (needs_overflow_infinity (TREE_TYPE (res))
1918 	  && !supports_overflow_infinity (TREE_TYPE (res)))
1919 	return NULL_TREE;
1920 
1921       /* We have to punt on adding infinities of different signs,
1922 	 since we can't tell what the sign of the result should be.
1923 	 Likewise for subtracting infinities of the same sign.  */
1924       if (((code == PLUS_EXPR && sgn1 != sgn2)
1925 	   || (code == MINUS_EXPR && sgn1 == sgn2))
1926 	  && is_overflow_infinity (val1)
1927 	  && is_overflow_infinity (val2))
1928 	return NULL_TREE;
1929 
1930       /* Don't try to handle division or shifting of infinities.  */
1931       if ((code == TRUNC_DIV_EXPR
1932 	   || code == FLOOR_DIV_EXPR
1933 	   || code == CEIL_DIV_EXPR
1934 	   || code == EXACT_DIV_EXPR
1935 	   || code == ROUND_DIV_EXPR
1936 	   || code == RSHIFT_EXPR)
1937 	  && (is_overflow_infinity (val1)
1938 	      || is_overflow_infinity (val2)))
1939 	return NULL_TREE;
1940 
1941       /* Notice that we only need to handle the restricted set of
1942 	 operations handled by extract_range_from_binary_expr.
1943 	 Among them, only multiplication, addition and subtraction
1944 	 can yield overflow without overflown operands because we
1945 	 are working with integral types only... except in the
1946 	 case VAL1 = -INF and VAL2 = -1 which overflows to +INF
1947 	 for division too.  */
1948 
1949       /* For multiplication, the sign of the overflow is given
1950 	 by the comparison of the signs of the operands.  */
1951       if ((code == MULT_EXPR && sgn1 == sgn2)
1952           /* For addition, the operands must be of the same sign
1953 	     to yield an overflow.  Its sign is therefore that
1954 	     of one of the operands, for example the first.  For
1955 	     infinite operands X + -INF is negative, not positive.  */
1956 	  || (code == PLUS_EXPR
1957 	      && (sgn1 >= 0
1958 		  ? !is_negative_overflow_infinity (val2)
1959 		  : is_positive_overflow_infinity (val2)))
1960 	  /* For subtraction, non-infinite operands must be of
1961 	     different signs to yield an overflow.  Its sign is
1962 	     therefore that of the first operand or the opposite of
1963 	     that of the second operand.  A first operand of 0 counts
1964 	     as positive here, for the corner case 0 - (-INF), which
1965 	     overflows, but must yield +INF.  For infinite operands 0
1966 	     - INF is negative, not positive.  */
1967 	  || (code == MINUS_EXPR
1968 	      && (sgn1 >= 0
1969 		  ? !is_positive_overflow_infinity (val2)
1970 		  : is_negative_overflow_infinity (val2)))
1971 	  /* We only get in here with positive shift count, so the
1972 	     overflow direction is the same as the sign of val1.
1973 	     Actually rshift does not overflow at all, but we only
1974 	     handle the case of shifting overflowed -INF and +INF.  */
1975 	  || (code == RSHIFT_EXPR
1976 	      && sgn1 >= 0)
1977 	  /* For division, the only case is -INF / -1 = +INF.  */
1978 	  || code == TRUNC_DIV_EXPR
1979 	  || code == FLOOR_DIV_EXPR
1980 	  || code == CEIL_DIV_EXPR
1981 	  || code == EXACT_DIV_EXPR
1982 	  || code == ROUND_DIV_EXPR)
1983 	return (needs_overflow_infinity (TREE_TYPE (res))
1984 		? positive_overflow_infinity (TREE_TYPE (res))
1985 		: TYPE_MAX_VALUE (TREE_TYPE (res)));
1986       else
1987 	return (needs_overflow_infinity (TREE_TYPE (res))
1988 		? negative_overflow_infinity (TREE_TYPE (res))
1989 		: TYPE_MIN_VALUE (TREE_TYPE (res)));
1990     }
1991 
1992   return res;
1993 }
1994 
1995 
1996 /* For range VR compute two wide_int bitmasks.  In *MAY_BE_NONZERO
1997    bitmask if some bit is unset, it means for all numbers in the range
1998    the bit is 0, otherwise it might be 0 or 1.  In *MUST_BE_NONZERO
1999    bitmask if some bit is set, it means for all numbers in the range
2000    the bit is 1, otherwise it might be 0 or 1.  */
2001 
2002 static bool
zero_nonzero_bits_from_vr(const tree expr_type,value_range * vr,wide_int * may_be_nonzero,wide_int * must_be_nonzero)2003 zero_nonzero_bits_from_vr (const tree expr_type,
2004 			   value_range *vr,
2005 			   wide_int *may_be_nonzero,
2006 			   wide_int *must_be_nonzero)
2007 {
2008   *may_be_nonzero = wi::minus_one (TYPE_PRECISION (expr_type));
2009   *must_be_nonzero = wi::zero (TYPE_PRECISION (expr_type));
2010   if (!range_int_cst_p (vr)
2011       || is_overflow_infinity (vr->min)
2012       || is_overflow_infinity (vr->max))
2013     return false;
2014 
2015   if (range_int_cst_singleton_p (vr))
2016     {
2017       *may_be_nonzero = vr->min;
2018       *must_be_nonzero = *may_be_nonzero;
2019     }
2020   else if (tree_int_cst_sgn (vr->min) >= 0
2021 	   || tree_int_cst_sgn (vr->max) < 0)
2022     {
2023       wide_int xor_mask = wi::bit_xor (vr->min, vr->max);
2024       *may_be_nonzero = wi::bit_or (vr->min, vr->max);
2025       *must_be_nonzero = wi::bit_and (vr->min, vr->max);
2026       if (xor_mask != 0)
2027 	{
2028 	  wide_int mask = wi::mask (wi::floor_log2 (xor_mask), false,
2029 				    may_be_nonzero->get_precision ());
2030 	  *may_be_nonzero = *may_be_nonzero | mask;
2031 	  *must_be_nonzero = must_be_nonzero->and_not (mask);
2032 	}
2033     }
2034 
2035   return true;
2036 }
2037 
2038 /* Create two value-ranges in *VR0 and *VR1 from the anti-range *AR
2039    so that *VR0 U *VR1 == *AR.  Returns true if that is possible,
2040    false otherwise.  If *AR can be represented with a single range
2041    *VR1 will be VR_UNDEFINED.  */
2042 
2043 static bool
ranges_from_anti_range(value_range * ar,value_range * vr0,value_range * vr1)2044 ranges_from_anti_range (value_range *ar,
2045 			value_range *vr0, value_range *vr1)
2046 {
2047   tree type = TREE_TYPE (ar->min);
2048 
2049   vr0->type = VR_UNDEFINED;
2050   vr1->type = VR_UNDEFINED;
2051 
2052   if (ar->type != VR_ANTI_RANGE
2053       || TREE_CODE (ar->min) != INTEGER_CST
2054       || TREE_CODE (ar->max) != INTEGER_CST
2055       || !vrp_val_min (type)
2056       || !vrp_val_max (type))
2057     return false;
2058 
2059   if (!vrp_val_is_min (ar->min))
2060     {
2061       vr0->type = VR_RANGE;
2062       vr0->min = vrp_val_min (type);
2063       vr0->max = wide_int_to_tree (type, wi::sub (ar->min, 1));
2064     }
2065   if (!vrp_val_is_max (ar->max))
2066     {
2067       vr1->type = VR_RANGE;
2068       vr1->min = wide_int_to_tree (type, wi::add (ar->max, 1));
2069       vr1->max = vrp_val_max (type);
2070     }
2071   if (vr0->type == VR_UNDEFINED)
2072     {
2073       *vr0 = *vr1;
2074       vr1->type = VR_UNDEFINED;
2075     }
2076 
2077   return vr0->type != VR_UNDEFINED;
2078 }
2079 
2080 /* Helper to extract a value-range *VR for a multiplicative operation
2081    *VR0 CODE *VR1.  */
2082 
2083 static void
extract_range_from_multiplicative_op_1(value_range * vr,enum tree_code code,value_range * vr0,value_range * vr1)2084 extract_range_from_multiplicative_op_1 (value_range *vr,
2085 					enum tree_code code,
2086 					value_range *vr0, value_range *vr1)
2087 {
2088   enum value_range_type type;
2089   tree val[4];
2090   size_t i;
2091   tree min, max;
2092   bool sop;
2093   int cmp;
2094 
2095   /* Multiplications, divisions and shifts are a bit tricky to handle,
2096      depending on the mix of signs we have in the two ranges, we
2097      need to operate on different values to get the minimum and
2098      maximum values for the new range.  One approach is to figure
2099      out all the variations of range combinations and do the
2100      operations.
2101 
2102      However, this involves several calls to compare_values and it
2103      is pretty convoluted.  It's simpler to do the 4 operations
2104      (MIN0 OP MIN1, MIN0 OP MAX1, MAX0 OP MIN1 and MAX0 OP MAX0 OP
2105      MAX1) and then figure the smallest and largest values to form
2106      the new range.  */
2107   gcc_assert (code == MULT_EXPR
2108 	      || code == TRUNC_DIV_EXPR
2109 	      || code == FLOOR_DIV_EXPR
2110 	      || code == CEIL_DIV_EXPR
2111 	      || code == EXACT_DIV_EXPR
2112 	      || code == ROUND_DIV_EXPR
2113 	      || code == RSHIFT_EXPR
2114 	      || code == LSHIFT_EXPR);
2115   gcc_assert ((vr0->type == VR_RANGE
2116 	       || (code == MULT_EXPR && vr0->type == VR_ANTI_RANGE))
2117 	      && vr0->type == vr1->type);
2118 
2119   type = vr0->type;
2120 
2121   /* Compute the 4 cross operations.  */
2122   sop = false;
2123   val[0] = vrp_int_const_binop (code, vr0->min, vr1->min);
2124   if (val[0] == NULL_TREE)
2125     sop = true;
2126 
2127   if (vr1->max == vr1->min)
2128     val[1] = NULL_TREE;
2129   else
2130     {
2131       val[1] = vrp_int_const_binop (code, vr0->min, vr1->max);
2132       if (val[1] == NULL_TREE)
2133 	sop = true;
2134     }
2135 
2136   if (vr0->max == vr0->min)
2137     val[2] = NULL_TREE;
2138   else
2139     {
2140       val[2] = vrp_int_const_binop (code, vr0->max, vr1->min);
2141       if (val[2] == NULL_TREE)
2142 	sop = true;
2143     }
2144 
2145   if (vr0->min == vr0->max || vr1->min == vr1->max)
2146     val[3] = NULL_TREE;
2147   else
2148     {
2149       val[3] = vrp_int_const_binop (code, vr0->max, vr1->max);
2150       if (val[3] == NULL_TREE)
2151 	sop = true;
2152     }
2153 
2154   if (sop)
2155     {
2156       set_value_range_to_varying (vr);
2157       return;
2158     }
2159 
2160   /* Set MIN to the minimum of VAL[i] and MAX to the maximum
2161      of VAL[i].  */
2162   min = val[0];
2163   max = val[0];
2164   for (i = 1; i < 4; i++)
2165     {
2166       if (!is_gimple_min_invariant (min)
2167 	  || (TREE_OVERFLOW (min) && !is_overflow_infinity (min))
2168 	  || !is_gimple_min_invariant (max)
2169 	  || (TREE_OVERFLOW (max) && !is_overflow_infinity (max)))
2170 	break;
2171 
2172       if (val[i])
2173 	{
2174 	  if (!is_gimple_min_invariant (val[i])
2175 	      || (TREE_OVERFLOW (val[i])
2176 		  && !is_overflow_infinity (val[i])))
2177 	    {
2178 	      /* If we found an overflowed value, set MIN and MAX
2179 		 to it so that we set the resulting range to
2180 		 VARYING.  */
2181 	      min = max = val[i];
2182 	      break;
2183 	    }
2184 
2185 	  if (compare_values (val[i], min) == -1)
2186 	    min = val[i];
2187 
2188 	  if (compare_values (val[i], max) == 1)
2189 	    max = val[i];
2190 	}
2191     }
2192 
2193   /* If either MIN or MAX overflowed, then set the resulting range to
2194      VARYING.  But we do accept an overflow infinity
2195      representation.  */
2196   if (min == NULL_TREE
2197       || !is_gimple_min_invariant (min)
2198       || (TREE_OVERFLOW (min) && !is_overflow_infinity (min))
2199       || max == NULL_TREE
2200       || !is_gimple_min_invariant (max)
2201       || (TREE_OVERFLOW (max) && !is_overflow_infinity (max)))
2202     {
2203       set_value_range_to_varying (vr);
2204       return;
2205     }
2206 
2207   /* We punt if:
2208      1) [-INF, +INF]
2209      2) [-INF, +-INF(OVF)]
2210      3) [+-INF(OVF), +INF]
2211      4) [+-INF(OVF), +-INF(OVF)]
2212      We learn nothing when we have INF and INF(OVF) on both sides.
2213      Note that we do accept [-INF, -INF] and [+INF, +INF] without
2214      overflow.  */
2215   if ((vrp_val_is_min (min) || is_overflow_infinity (min))
2216       && (vrp_val_is_max (max) || is_overflow_infinity (max)))
2217     {
2218       set_value_range_to_varying (vr);
2219       return;
2220     }
2221 
2222   cmp = compare_values (min, max);
2223   if (cmp == -2 || cmp == 1)
2224     {
2225       /* If the new range has its limits swapped around (MIN > MAX),
2226 	 then the operation caused one of them to wrap around, mark
2227 	 the new range VARYING.  */
2228       set_value_range_to_varying (vr);
2229     }
2230   else
2231     set_value_range (vr, type, min, max, NULL);
2232 }
2233 
2234 /* Extract range information from a binary operation CODE based on
2235    the ranges of each of its operands *VR0 and *VR1 with resulting
2236    type EXPR_TYPE.  The resulting range is stored in *VR.  */
2237 
2238 static void
extract_range_from_binary_expr_1(value_range * vr,enum tree_code code,tree expr_type,value_range * vr0_,value_range * vr1_)2239 extract_range_from_binary_expr_1 (value_range *vr,
2240 				  enum tree_code code, tree expr_type,
2241 				  value_range *vr0_, value_range *vr1_)
2242 {
2243   value_range vr0 = *vr0_, vr1 = *vr1_;
2244   value_range vrtem0 = VR_INITIALIZER, vrtem1 = VR_INITIALIZER;
2245   enum value_range_type type;
2246   tree min = NULL_TREE, max = NULL_TREE;
2247   int cmp;
2248 
2249   if (!INTEGRAL_TYPE_P (expr_type)
2250       && !POINTER_TYPE_P (expr_type))
2251     {
2252       set_value_range_to_varying (vr);
2253       return;
2254     }
2255 
2256   /* Not all binary expressions can be applied to ranges in a
2257      meaningful way.  Handle only arithmetic operations.  */
2258   if (code != PLUS_EXPR
2259       && code != MINUS_EXPR
2260       && code != POINTER_PLUS_EXPR
2261       && code != MULT_EXPR
2262       && code != TRUNC_DIV_EXPR
2263       && code != FLOOR_DIV_EXPR
2264       && code != CEIL_DIV_EXPR
2265       && code != EXACT_DIV_EXPR
2266       && code != ROUND_DIV_EXPR
2267       && code != TRUNC_MOD_EXPR
2268       && code != RSHIFT_EXPR
2269       && code != LSHIFT_EXPR
2270       && code != MIN_EXPR
2271       && code != MAX_EXPR
2272       && code != BIT_AND_EXPR
2273       && code != BIT_IOR_EXPR
2274       && code != BIT_XOR_EXPR)
2275     {
2276       set_value_range_to_varying (vr);
2277       return;
2278     }
2279 
2280   /* If both ranges are UNDEFINED, so is the result.  */
2281   if (vr0.type == VR_UNDEFINED && vr1.type == VR_UNDEFINED)
2282     {
2283       set_value_range_to_undefined (vr);
2284       return;
2285     }
2286   /* If one of the ranges is UNDEFINED drop it to VARYING for the following
2287      code.  At some point we may want to special-case operations that
2288      have UNDEFINED result for all or some value-ranges of the not UNDEFINED
2289      operand.  */
2290   else if (vr0.type == VR_UNDEFINED)
2291     set_value_range_to_varying (&vr0);
2292   else if (vr1.type == VR_UNDEFINED)
2293     set_value_range_to_varying (&vr1);
2294 
2295   /* Now canonicalize anti-ranges to ranges when they are not symbolic
2296      and express ~[] op X as ([]' op X) U ([]'' op X).  */
2297   if (vr0.type == VR_ANTI_RANGE
2298       && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1))
2299     {
2300       extract_range_from_binary_expr_1 (vr, code, expr_type, &vrtem0, vr1_);
2301       if (vrtem1.type != VR_UNDEFINED)
2302 	{
2303 	  value_range vrres = VR_INITIALIZER;
2304 	  extract_range_from_binary_expr_1 (&vrres, code, expr_type,
2305 					    &vrtem1, vr1_);
2306 	  vrp_meet (vr, &vrres);
2307 	}
2308       return;
2309     }
2310   /* Likewise for X op ~[].  */
2311   if (vr1.type == VR_ANTI_RANGE
2312       && ranges_from_anti_range (&vr1, &vrtem0, &vrtem1))
2313     {
2314       extract_range_from_binary_expr_1 (vr, code, expr_type, vr0_, &vrtem0);
2315       if (vrtem1.type != VR_UNDEFINED)
2316 	{
2317 	  value_range vrres = VR_INITIALIZER;
2318 	  extract_range_from_binary_expr_1 (&vrres, code, expr_type,
2319 					    vr0_, &vrtem1);
2320 	  vrp_meet (vr, &vrres);
2321 	}
2322       return;
2323     }
2324 
2325   /* The type of the resulting value range defaults to VR0.TYPE.  */
2326   type = vr0.type;
2327 
2328   /* Refuse to operate on VARYING ranges, ranges of different kinds
2329      and symbolic ranges.  As an exception, we allow BIT_{AND,IOR}
2330      because we may be able to derive a useful range even if one of
2331      the operands is VR_VARYING or symbolic range.  Similarly for
2332      divisions, MIN/MAX and PLUS/MINUS.
2333 
2334      TODO, we may be able to derive anti-ranges in some cases.  */
2335   if (code != BIT_AND_EXPR
2336       && code != BIT_IOR_EXPR
2337       && code != TRUNC_DIV_EXPR
2338       && code != FLOOR_DIV_EXPR
2339       && code != CEIL_DIV_EXPR
2340       && code != EXACT_DIV_EXPR
2341       && code != ROUND_DIV_EXPR
2342       && code != TRUNC_MOD_EXPR
2343       && code != MIN_EXPR
2344       && code != MAX_EXPR
2345       && code != PLUS_EXPR
2346       && code != MINUS_EXPR
2347       && code != RSHIFT_EXPR
2348       && (vr0.type == VR_VARYING
2349 	  || vr1.type == VR_VARYING
2350 	  || vr0.type != vr1.type
2351 	  || symbolic_range_p (&vr0)
2352 	  || symbolic_range_p (&vr1)))
2353     {
2354       set_value_range_to_varying (vr);
2355       return;
2356     }
2357 
2358   /* Now evaluate the expression to determine the new range.  */
2359   if (POINTER_TYPE_P (expr_type))
2360     {
2361       if (code == MIN_EXPR || code == MAX_EXPR)
2362 	{
2363 	  /* For MIN/MAX expressions with pointers, we only care about
2364 	     nullness, if both are non null, then the result is nonnull.
2365 	     If both are null, then the result is null. Otherwise they
2366 	     are varying.  */
2367 	  if (range_is_nonnull (&vr0) && range_is_nonnull (&vr1))
2368 	    set_value_range_to_nonnull (vr, expr_type);
2369 	  else if (range_is_null (&vr0) && range_is_null (&vr1))
2370 	    set_value_range_to_null (vr, expr_type);
2371 	  else
2372 	    set_value_range_to_varying (vr);
2373 	}
2374       else if (code == POINTER_PLUS_EXPR)
2375 	{
2376 	  /* For pointer types, we are really only interested in asserting
2377 	     whether the expression evaluates to non-NULL.  */
2378 	  if (range_is_nonnull (&vr0) || range_is_nonnull (&vr1))
2379 	    set_value_range_to_nonnull (vr, expr_type);
2380 	  else if (range_is_null (&vr0) && range_is_null (&vr1))
2381 	    set_value_range_to_null (vr, expr_type);
2382 	  else
2383 	    set_value_range_to_varying (vr);
2384 	}
2385       else if (code == BIT_AND_EXPR)
2386 	{
2387 	  /* For pointer types, we are really only interested in asserting
2388 	     whether the expression evaluates to non-NULL.  */
2389 	  if (range_is_nonnull (&vr0) && range_is_nonnull (&vr1))
2390 	    set_value_range_to_nonnull (vr, expr_type);
2391 	  else if (range_is_null (&vr0) || range_is_null (&vr1))
2392 	    set_value_range_to_null (vr, expr_type);
2393 	  else
2394 	    set_value_range_to_varying (vr);
2395 	}
2396       else
2397 	set_value_range_to_varying (vr);
2398 
2399       return;
2400     }
2401 
2402   /* For integer ranges, apply the operation to each end of the
2403      range and see what we end up with.  */
2404   if (code == PLUS_EXPR || code == MINUS_EXPR)
2405     {
2406       const bool minus_p = (code == MINUS_EXPR);
2407       tree min_op0 = vr0.min;
2408       tree min_op1 = minus_p ? vr1.max : vr1.min;
2409       tree max_op0 = vr0.max;
2410       tree max_op1 = minus_p ? vr1.min : vr1.max;
2411       tree sym_min_op0 = NULL_TREE;
2412       tree sym_min_op1 = NULL_TREE;
2413       tree sym_max_op0 = NULL_TREE;
2414       tree sym_max_op1 = NULL_TREE;
2415       bool neg_min_op0, neg_min_op1, neg_max_op0, neg_max_op1;
2416 
2417       /* If we have a PLUS or MINUS with two VR_RANGEs, either constant or
2418 	 single-symbolic ranges, try to compute the precise resulting range,
2419 	 but only if we know that this resulting range will also be constant
2420 	 or single-symbolic.  */
2421       if (vr0.type == VR_RANGE && vr1.type == VR_RANGE
2422 	  && (TREE_CODE (min_op0) == INTEGER_CST
2423 	      || (sym_min_op0
2424 		  = get_single_symbol (min_op0, &neg_min_op0, &min_op0)))
2425 	  && (TREE_CODE (min_op1) == INTEGER_CST
2426 	      || (sym_min_op1
2427 		  = get_single_symbol (min_op1, &neg_min_op1, &min_op1)))
2428 	  && (!(sym_min_op0 && sym_min_op1)
2429 	      || (sym_min_op0 == sym_min_op1
2430 		  && neg_min_op0 == (minus_p ? neg_min_op1 : !neg_min_op1)))
2431 	  && (TREE_CODE (max_op0) == INTEGER_CST
2432 	      || (sym_max_op0
2433 		  = get_single_symbol (max_op0, &neg_max_op0, &max_op0)))
2434 	  && (TREE_CODE (max_op1) == INTEGER_CST
2435 	      || (sym_max_op1
2436 		  = get_single_symbol (max_op1, &neg_max_op1, &max_op1)))
2437 	  && (!(sym_max_op0 && sym_max_op1)
2438 	      || (sym_max_op0 == sym_max_op1
2439 		  && neg_max_op0 == (minus_p ? neg_max_op1 : !neg_max_op1))))
2440 	{
2441 	  const signop sgn = TYPE_SIGN (expr_type);
2442 	  const unsigned int prec = TYPE_PRECISION (expr_type);
2443 	  wide_int type_min, type_max, wmin, wmax;
2444 	  int min_ovf = 0;
2445 	  int max_ovf = 0;
2446 
2447 	  /* Get the lower and upper bounds of the type.  */
2448 	  if (TYPE_OVERFLOW_WRAPS (expr_type))
2449 	    {
2450 	      type_min = wi::min_value (prec, sgn);
2451 	      type_max = wi::max_value (prec, sgn);
2452 	    }
2453 	  else
2454 	    {
2455 	      type_min = vrp_val_min (expr_type);
2456 	      type_max = vrp_val_max (expr_type);
2457 	    }
2458 
2459 	  /* Combine the lower bounds, if any.  */
2460 	  if (min_op0 && min_op1)
2461 	    {
2462 	      if (minus_p)
2463 		{
2464 		  wmin = wi::sub (min_op0, min_op1);
2465 
2466 		  /* Check for overflow.  */
2467 		  if (wi::cmp (0, min_op1, sgn)
2468 		      != wi::cmp (wmin, min_op0, sgn))
2469 		    min_ovf = wi::cmp (min_op0, min_op1, sgn);
2470 		}
2471 	      else
2472 		{
2473 		  wmin = wi::add (min_op0, min_op1);
2474 
2475 		  /* Check for overflow.  */
2476 		  if (wi::cmp (min_op1, 0, sgn)
2477 		      != wi::cmp (wmin, min_op0, sgn))
2478 		    min_ovf = wi::cmp (min_op0, wmin, sgn);
2479 		}
2480 	    }
2481 	  else if (min_op0)
2482 	    wmin = min_op0;
2483 	  else if (min_op1)
2484 	    {
2485 	      if (minus_p)
2486 		{
2487 		  wmin = wi::neg (min_op1);
2488 
2489 		  /* Check for overflow.  */
2490 		  if (sgn == SIGNED && wi::neg_p (min_op1) && wi::neg_p (wmin))
2491 		    min_ovf = 1;
2492 		  else if (sgn == UNSIGNED && wi::ne_p (min_op1, 0))
2493 		    min_ovf = -1;
2494 		}
2495 	      else
2496 		wmin = min_op1;
2497 	    }
2498 	  else
2499 	    wmin = wi::shwi (0, prec);
2500 
2501 	  /* Combine the upper bounds, if any.  */
2502 	  if (max_op0 && max_op1)
2503 	    {
2504 	      if (minus_p)
2505 		{
2506 		  wmax = wi::sub (max_op0, max_op1);
2507 
2508 		  /* Check for overflow.  */
2509 		  if (wi::cmp (0, max_op1, sgn)
2510 		      != wi::cmp (wmax, max_op0, sgn))
2511 		    max_ovf = wi::cmp (max_op0, max_op1, sgn);
2512 		}
2513 	      else
2514 		{
2515 		  wmax = wi::add (max_op0, max_op1);
2516 
2517 		  if (wi::cmp (max_op1, 0, sgn)
2518 		      != wi::cmp (wmax, max_op0, sgn))
2519 		    max_ovf = wi::cmp (max_op0, wmax, sgn);
2520 		}
2521 	    }
2522 	  else if (max_op0)
2523 	    wmax = max_op0;
2524 	  else if (max_op1)
2525 	    {
2526 	      if (minus_p)
2527 		{
2528 		  wmax = wi::neg (max_op1);
2529 
2530 		  /* Check for overflow.  */
2531 		  if (sgn == SIGNED && wi::neg_p (max_op1) && wi::neg_p (wmax))
2532 		    max_ovf = 1;
2533 		  else if (sgn == UNSIGNED && wi::ne_p (max_op1, 0))
2534 		    max_ovf = -1;
2535 		}
2536 	      else
2537 		wmax = max_op1;
2538 	    }
2539 	  else
2540 	    wmax = wi::shwi (0, prec);
2541 
2542 	  /* Check for type overflow.  */
2543 	  if (min_ovf == 0)
2544 	    {
2545 	      if (wi::cmp (wmin, type_min, sgn) == -1)
2546 		min_ovf = -1;
2547 	      else if (wi::cmp (wmin, type_max, sgn) == 1)
2548 		min_ovf = 1;
2549 	    }
2550 	  if (max_ovf == 0)
2551 	    {
2552 	      if (wi::cmp (wmax, type_min, sgn) == -1)
2553 		max_ovf = -1;
2554 	      else if (wi::cmp (wmax, type_max, sgn) == 1)
2555 		max_ovf = 1;
2556 	    }
2557 
2558 	  /* If we have overflow for the constant part and the resulting
2559 	     range will be symbolic, drop to VR_VARYING.  */
2560 	  if ((min_ovf && sym_min_op0 != sym_min_op1)
2561 	      || (max_ovf && sym_max_op0 != sym_max_op1))
2562 	    {
2563 	      set_value_range_to_varying (vr);
2564 	      return;
2565 	    }
2566 
2567 	  if (TYPE_OVERFLOW_WRAPS (expr_type))
2568 	    {
2569 	      /* If overflow wraps, truncate the values and adjust the
2570 		 range kind and bounds appropriately.  */
2571 	      wide_int tmin = wide_int::from (wmin, prec, sgn);
2572 	      wide_int tmax = wide_int::from (wmax, prec, sgn);
2573 	      if (min_ovf == max_ovf)
2574 		{
2575 		  /* No overflow or both overflow or underflow.  The
2576 		     range kind stays VR_RANGE.  */
2577 		  min = wide_int_to_tree (expr_type, tmin);
2578 		  max = wide_int_to_tree (expr_type, tmax);
2579 		}
2580 	      else if ((min_ovf == -1 && max_ovf == 0)
2581 		       || (max_ovf == 1 && min_ovf == 0))
2582 		{
2583 		  /* Min underflow or max overflow.  The range kind
2584 		     changes to VR_ANTI_RANGE.  */
2585 		  bool covers = false;
2586 		  wide_int tem = tmin;
2587 		  type = VR_ANTI_RANGE;
2588 		  tmin = tmax + 1;
2589 		  if (wi::cmp (tmin, tmax, sgn) < 0)
2590 		    covers = true;
2591 		  tmax = tem - 1;
2592 		  if (wi::cmp (tmax, tem, sgn) > 0)
2593 		    covers = true;
2594 		  /* If the anti-range would cover nothing, drop to varying.
2595 		     Likewise if the anti-range bounds are outside of the
2596 		     types values.  */
2597 		  if (covers || wi::cmp (tmin, tmax, sgn) > 0)
2598 		    {
2599 		      set_value_range_to_varying (vr);
2600 		      return;
2601 		    }
2602 		  min = wide_int_to_tree (expr_type, tmin);
2603 		  max = wide_int_to_tree (expr_type, tmax);
2604 		}
2605 	      else
2606 		{
2607 		  /* Other underflow and/or overflow, drop to VR_VARYING.  */
2608 		  set_value_range_to_varying (vr);
2609 		  return;
2610 		}
2611 	    }
2612 	  else
2613 	    {
2614 	      /* If overflow does not wrap, saturate to the types min/max
2615 	         value.  */
2616 	      if (min_ovf == -1)
2617 		{
2618 		  if (needs_overflow_infinity (expr_type)
2619 		      && supports_overflow_infinity (expr_type))
2620 		    min = negative_overflow_infinity (expr_type);
2621 		  else
2622 		    min = wide_int_to_tree (expr_type, type_min);
2623 		}
2624 	      else if (min_ovf == 1)
2625 		{
2626 		  if (needs_overflow_infinity (expr_type)
2627 		      && supports_overflow_infinity (expr_type))
2628 		    min = positive_overflow_infinity (expr_type);
2629 		  else
2630 		    min = wide_int_to_tree (expr_type, type_max);
2631 		}
2632 	      else
2633 		min = wide_int_to_tree (expr_type, wmin);
2634 
2635 	      if (max_ovf == -1)
2636 		{
2637 		  if (needs_overflow_infinity (expr_type)
2638 		      && supports_overflow_infinity (expr_type))
2639 		    max = negative_overflow_infinity (expr_type);
2640 		  else
2641 		    max = wide_int_to_tree (expr_type, type_min);
2642 		}
2643 	      else if (max_ovf == 1)
2644 		{
2645 		  if (needs_overflow_infinity (expr_type)
2646 		      && supports_overflow_infinity (expr_type))
2647 		    max = positive_overflow_infinity (expr_type);
2648 		  else
2649 		    max = wide_int_to_tree (expr_type, type_max);
2650 		}
2651 	      else
2652 		max = wide_int_to_tree (expr_type, wmax);
2653 	    }
2654 
2655 	  if (needs_overflow_infinity (expr_type)
2656 	      && supports_overflow_infinity (expr_type))
2657 	    {
2658 	      if ((min_op0 && is_negative_overflow_infinity (min_op0))
2659 		  || (min_op1
2660 		      && (minus_p
2661 			  ? is_positive_overflow_infinity (min_op1)
2662 			  : is_negative_overflow_infinity (min_op1))))
2663 		min = negative_overflow_infinity (expr_type);
2664 	      if ((max_op0 && is_positive_overflow_infinity (max_op0))
2665 		  || (max_op1
2666 		      && (minus_p
2667 			  ? is_negative_overflow_infinity (max_op1)
2668 			  : is_positive_overflow_infinity (max_op1))))
2669 		max = positive_overflow_infinity (expr_type);
2670 	    }
2671 
2672 	  /* If the result lower bound is constant, we're done;
2673 	     otherwise, build the symbolic lower bound.  */
2674 	  if (sym_min_op0 == sym_min_op1)
2675 	    ;
2676 	  else if (sym_min_op0)
2677 	    min = build_symbolic_expr (expr_type, sym_min_op0,
2678 				       neg_min_op0, min);
2679 	  else if (sym_min_op1)
2680 	    {
2681 	      /* We may not negate if that might introduce
2682 		 undefined overflow.  */
2683 	      if (! minus_p
2684 		  || neg_min_op1
2685 		  || TYPE_OVERFLOW_WRAPS (expr_type))
2686 		min = build_symbolic_expr (expr_type, sym_min_op1,
2687 					   neg_min_op1 ^ minus_p, min);
2688 	      else
2689 		min = NULL_TREE;
2690 	    }
2691 
2692 	  /* Likewise for the upper bound.  */
2693 	  if (sym_max_op0 == sym_max_op1)
2694 	    ;
2695 	  else if (sym_max_op0)
2696 	    max = build_symbolic_expr (expr_type, sym_max_op0,
2697 				       neg_max_op0, max);
2698 	  else if (sym_max_op1)
2699 	    {
2700 	      /* We may not negate if that might introduce
2701 		 undefined overflow.  */
2702 	      if (! minus_p
2703 		  || neg_max_op1
2704 		  || TYPE_OVERFLOW_WRAPS (expr_type))
2705 		max = build_symbolic_expr (expr_type, sym_max_op1,
2706 					   neg_max_op1 ^ minus_p, max);
2707 	      else
2708 		max = NULL_TREE;
2709 	    }
2710 	}
2711       else
2712 	{
2713 	  /* For other cases, for example if we have a PLUS_EXPR with two
2714 	     VR_ANTI_RANGEs, drop to VR_VARYING.  It would take more effort
2715 	     to compute a precise range for such a case.
2716 	     ???  General even mixed range kind operations can be expressed
2717 	     by for example transforming ~[3, 5] + [1, 2] to range-only
2718 	     operations and a union primitive:
2719 	       [-INF, 2] + [1, 2]  U  [5, +INF] + [1, 2]
2720 	           [-INF+1, 4]     U    [6, +INF(OVF)]
2721 	     though usually the union is not exactly representable with
2722 	     a single range or anti-range as the above is
2723 		 [-INF+1, +INF(OVF)] intersected with ~[5, 5]
2724 	     but one could use a scheme similar to equivalences for this. */
2725 	  set_value_range_to_varying (vr);
2726 	  return;
2727 	}
2728     }
2729   else if (code == MIN_EXPR
2730 	   || code == MAX_EXPR)
2731     {
2732       if (vr0.type == VR_RANGE
2733 	  && !symbolic_range_p (&vr0))
2734 	{
2735 	  type = VR_RANGE;
2736 	  if (vr1.type == VR_RANGE
2737 	      && !symbolic_range_p (&vr1))
2738 	    {
2739 	      /* For operations that make the resulting range directly
2740 		 proportional to the original ranges, apply the operation to
2741 		 the same end of each range.  */
2742 	      min = vrp_int_const_binop (code, vr0.min, vr1.min);
2743 	      max = vrp_int_const_binop (code, vr0.max, vr1.max);
2744 	    }
2745 	  else if (code == MIN_EXPR)
2746 	    {
2747 	      min = vrp_val_min (expr_type);
2748 	      max = vr0.max;
2749 	    }
2750 	  else if (code == MAX_EXPR)
2751 	    {
2752 	      min = vr0.min;
2753 	      max = vrp_val_max (expr_type);
2754 	    }
2755 	}
2756       else if (vr1.type == VR_RANGE
2757 	       && !symbolic_range_p (&vr1))
2758 	{
2759 	  type = VR_RANGE;
2760 	  if (code == MIN_EXPR)
2761 	    {
2762 	      min = vrp_val_min (expr_type);
2763 	      max = vr1.max;
2764 	    }
2765 	  else if (code == MAX_EXPR)
2766 	    {
2767 	      min = vr1.min;
2768 	      max = vrp_val_max (expr_type);
2769 	    }
2770 	}
2771       else
2772 	{
2773 	  set_value_range_to_varying (vr);
2774 	  return;
2775 	}
2776     }
2777   else if (code == MULT_EXPR)
2778     {
2779       /* Fancy code so that with unsigned, [-3,-1]*[-3,-1] does not
2780 	 drop to varying.  This test requires 2*prec bits if both
2781 	 operands are signed and 2*prec + 2 bits if either is not.  */
2782 
2783       signop sign = TYPE_SIGN (expr_type);
2784       unsigned int prec = TYPE_PRECISION (expr_type);
2785 
2786       if (range_int_cst_p (&vr0)
2787 	  && range_int_cst_p (&vr1)
2788 	  && TYPE_OVERFLOW_WRAPS (expr_type))
2789 	{
2790 	  typedef FIXED_WIDE_INT (WIDE_INT_MAX_PRECISION * 2) vrp_int;
2791 	  typedef generic_wide_int
2792              <wi::extended_tree <WIDE_INT_MAX_PRECISION * 2> > vrp_int_cst;
2793 	  vrp_int sizem1 = wi::mask <vrp_int> (prec, false);
2794 	  vrp_int size = sizem1 + 1;
2795 
2796 	  /* Extend the values using the sign of the result to PREC2.
2797 	     From here on out, everthing is just signed math no matter
2798 	     what the input types were.  */
2799           vrp_int min0 = vrp_int_cst (vr0.min);
2800           vrp_int max0 = vrp_int_cst (vr0.max);
2801           vrp_int min1 = vrp_int_cst (vr1.min);
2802           vrp_int max1 = vrp_int_cst (vr1.max);
2803 	  /* Canonicalize the intervals.  */
2804 	  if (sign == UNSIGNED)
2805 	    {
2806 	      if (wi::ltu_p (size, min0 + max0))
2807 		{
2808 		  min0 -= size;
2809 		  max0 -= size;
2810 		}
2811 
2812 	      if (wi::ltu_p (size, min1 + max1))
2813 		{
2814 		  min1 -= size;
2815 		  max1 -= size;
2816 		}
2817 	    }
2818 
2819 	  vrp_int prod0 = min0 * min1;
2820 	  vrp_int prod1 = min0 * max1;
2821 	  vrp_int prod2 = max0 * min1;
2822 	  vrp_int prod3 = max0 * max1;
2823 
2824 	  /* Sort the 4 products so that min is in prod0 and max is in
2825 	     prod3.  */
2826 	  /* min0min1 > max0max1 */
2827 	  if (wi::gts_p (prod0, prod3))
2828 	    std::swap (prod0, prod3);
2829 
2830 	  /* min0max1 > max0min1 */
2831 	  if (wi::gts_p (prod1, prod2))
2832 	    std::swap (prod1, prod2);
2833 
2834 	  if (wi::gts_p (prod0, prod1))
2835 	    std::swap (prod0, prod1);
2836 
2837 	  if (wi::gts_p (prod2, prod3))
2838 	    std::swap (prod2, prod3);
2839 
2840 	  /* diff = max - min.  */
2841 	  prod2 = prod3 - prod0;
2842 	  if (wi::geu_p (prod2, sizem1))
2843 	    {
2844 	      /* the range covers all values.  */
2845 	      set_value_range_to_varying (vr);
2846 	      return;
2847 	    }
2848 
2849 	  /* The following should handle the wrapping and selecting
2850 	     VR_ANTI_RANGE for us.  */
2851 	  min = wide_int_to_tree (expr_type, prod0);
2852 	  max = wide_int_to_tree (expr_type, prod3);
2853 	  set_and_canonicalize_value_range (vr, VR_RANGE, min, max, NULL);
2854 	  return;
2855 	}
2856 
2857       /* If we have an unsigned MULT_EXPR with two VR_ANTI_RANGEs,
2858 	 drop to VR_VARYING.  It would take more effort to compute a
2859 	 precise range for such a case.  For example, if we have
2860 	 op0 == 65536 and op1 == 65536 with their ranges both being
2861 	 ~[0,0] on a 32-bit machine, we would have op0 * op1 == 0, so
2862 	 we cannot claim that the product is in ~[0,0].  Note that we
2863 	 are guaranteed to have vr0.type == vr1.type at this
2864 	 point.  */
2865       if (vr0.type == VR_ANTI_RANGE
2866 	  && !TYPE_OVERFLOW_UNDEFINED (expr_type))
2867 	{
2868 	  set_value_range_to_varying (vr);
2869 	  return;
2870 	}
2871 
2872       extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
2873       return;
2874     }
2875   else if (code == RSHIFT_EXPR
2876 	   || code == LSHIFT_EXPR)
2877     {
2878       /* If we have a RSHIFT_EXPR with any shift values outside [0..prec-1],
2879 	 then drop to VR_VARYING.  Outside of this range we get undefined
2880 	 behavior from the shift operation.  We cannot even trust
2881 	 SHIFT_COUNT_TRUNCATED at this stage, because that applies to rtl
2882 	 shifts, and the operation at the tree level may be widened.  */
2883       if (range_int_cst_p (&vr1)
2884 	  && compare_tree_int (vr1.min, 0) >= 0
2885 	  && compare_tree_int (vr1.max, TYPE_PRECISION (expr_type)) == -1)
2886 	{
2887 	  if (code == RSHIFT_EXPR)
2888 	    {
2889 	      /* Even if vr0 is VARYING or otherwise not usable, we can derive
2890 		 useful ranges just from the shift count.  E.g.
2891 		 x >> 63 for signed 64-bit x is always [-1, 0].  */
2892 	      if (vr0.type != VR_RANGE || symbolic_range_p (&vr0))
2893 		{
2894 		  vr0.type = type = VR_RANGE;
2895 		  vr0.min = vrp_val_min (expr_type);
2896 		  vr0.max = vrp_val_max (expr_type);
2897 		}
2898 	      extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
2899 	      return;
2900 	    }
2901 	  /* We can map lshifts by constants to MULT_EXPR handling.  */
2902 	  else if (code == LSHIFT_EXPR
2903 		   && range_int_cst_singleton_p (&vr1))
2904 	    {
2905 	      bool saved_flag_wrapv;
2906 	      value_range vr1p = VR_INITIALIZER;
2907 	      vr1p.type = VR_RANGE;
2908 	      vr1p.min = (wide_int_to_tree
2909 			  (expr_type,
2910 			   wi::set_bit_in_zero (tree_to_shwi (vr1.min),
2911 						TYPE_PRECISION (expr_type))));
2912 	      vr1p.max = vr1p.min;
2913 	      /* We have to use a wrapping multiply though as signed overflow
2914 		 on lshifts is implementation defined in C89.  */
2915 	      saved_flag_wrapv = flag_wrapv;
2916 	      flag_wrapv = 1;
2917 	      extract_range_from_binary_expr_1 (vr, MULT_EXPR, expr_type,
2918 						&vr0, &vr1p);
2919 	      flag_wrapv = saved_flag_wrapv;
2920 	      return;
2921 	    }
2922 	  else if (code == LSHIFT_EXPR
2923 		   && range_int_cst_p (&vr0))
2924 	    {
2925 	      int prec = TYPE_PRECISION (expr_type);
2926 	      int overflow_pos = prec;
2927 	      int bound_shift;
2928 	      wide_int low_bound, high_bound;
2929 	      bool uns = TYPE_UNSIGNED (expr_type);
2930 	      bool in_bounds = false;
2931 
2932 	      if (!uns)
2933 		overflow_pos -= 1;
2934 
2935 	      bound_shift = overflow_pos - tree_to_shwi (vr1.max);
2936 	      /* If bound_shift == HOST_BITS_PER_WIDE_INT, the llshift can
2937 		 overflow.  However, for that to happen, vr1.max needs to be
2938 		 zero, which means vr1 is a singleton range of zero, which
2939 		 means it should be handled by the previous LSHIFT_EXPR
2940 		 if-clause.  */
2941 	      wide_int bound = wi::set_bit_in_zero (bound_shift, prec);
2942 	      wide_int complement = ~(bound - 1);
2943 
2944 	      if (uns)
2945 		{
2946 		  low_bound = bound;
2947 		  high_bound = complement;
2948 		  if (wi::ltu_p (vr0.max, low_bound))
2949 		    {
2950 		      /* [5, 6] << [1, 2] == [10, 24].  */
2951 		      /* We're shifting out only zeroes, the value increases
2952 			 monotonically.  */
2953 		      in_bounds = true;
2954 		    }
2955 		  else if (wi::ltu_p (high_bound, vr0.min))
2956 		    {
2957 		      /* [0xffffff00, 0xffffffff] << [1, 2]
2958 		         == [0xfffffc00, 0xfffffffe].  */
2959 		      /* We're shifting out only ones, the value decreases
2960 			 monotonically.  */
2961 		      in_bounds = true;
2962 		    }
2963 		}
2964 	      else
2965 		{
2966 		  /* [-1, 1] << [1, 2] == [-4, 4].  */
2967 		  low_bound = complement;
2968 		  high_bound = bound;
2969 		  if (wi::lts_p (vr0.max, high_bound)
2970 		      && wi::lts_p (low_bound, vr0.min))
2971 		    {
2972 		      /* For non-negative numbers, we're shifting out only
2973 			 zeroes, the value increases monotonically.
2974 			 For negative numbers, we're shifting out only ones, the
2975 			 value decreases monotomically.  */
2976 		      in_bounds = true;
2977 		    }
2978 		}
2979 
2980 	      if (in_bounds)
2981 		{
2982 		  extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
2983 		  return;
2984 		}
2985 	    }
2986 	}
2987       set_value_range_to_varying (vr);
2988       return;
2989     }
2990   else if (code == TRUNC_DIV_EXPR
2991 	   || code == FLOOR_DIV_EXPR
2992 	   || code == CEIL_DIV_EXPR
2993 	   || code == EXACT_DIV_EXPR
2994 	   || code == ROUND_DIV_EXPR)
2995     {
2996       if (vr0.type != VR_RANGE || symbolic_range_p (&vr0))
2997 	{
2998 	  /* For division, if op1 has VR_RANGE but op0 does not, something
2999 	     can be deduced just from that range.  Say [min, max] / [4, max]
3000 	     gives [min / 4, max / 4] range.  */
3001 	  if (vr1.type == VR_RANGE
3002 	      && !symbolic_range_p (&vr1)
3003 	      && range_includes_zero_p (vr1.min, vr1.max) == 0)
3004 	    {
3005 	      vr0.type = type = VR_RANGE;
3006 	      vr0.min = vrp_val_min (expr_type);
3007 	      vr0.max = vrp_val_max (expr_type);
3008 	    }
3009 	  else
3010 	    {
3011 	      set_value_range_to_varying (vr);
3012 	      return;
3013 	    }
3014 	}
3015 
3016       /* For divisions, if flag_non_call_exceptions is true, we must
3017 	 not eliminate a division by zero.  */
3018       if (cfun->can_throw_non_call_exceptions
3019 	  && (vr1.type != VR_RANGE
3020 	      || range_includes_zero_p (vr1.min, vr1.max) != 0))
3021 	{
3022 	  set_value_range_to_varying (vr);
3023 	  return;
3024 	}
3025 
3026       /* For divisions, if op0 is VR_RANGE, we can deduce a range
3027 	 even if op1 is VR_VARYING, VR_ANTI_RANGE, symbolic or can
3028 	 include 0.  */
3029       if (vr0.type == VR_RANGE
3030 	  && (vr1.type != VR_RANGE
3031 	      || range_includes_zero_p (vr1.min, vr1.max) != 0))
3032 	{
3033 	  tree zero = build_int_cst (TREE_TYPE (vr0.min), 0);
3034 	  int cmp;
3035 
3036 	  min = NULL_TREE;
3037 	  max = NULL_TREE;
3038 	  if (TYPE_UNSIGNED (expr_type)
3039 	      || value_range_nonnegative_p (&vr1))
3040 	    {
3041 	      /* For unsigned division or when divisor is known
3042 		 to be non-negative, the range has to cover
3043 		 all numbers from 0 to max for positive max
3044 		 and all numbers from min to 0 for negative min.  */
3045 	      cmp = compare_values (vr0.max, zero);
3046 	      if (cmp == -1)
3047 		{
3048 		  /* When vr0.max < 0, vr1.min != 0 and value
3049 		     ranges for dividend and divisor are available.  */
3050 		  if (vr1.type == VR_RANGE
3051 		      && !symbolic_range_p (&vr0)
3052 		      && !symbolic_range_p (&vr1)
3053 		      && compare_values (vr1.min, zero) != 0)
3054 		    max = int_const_binop (code, vr0.max, vr1.min);
3055 		  else
3056 		    max = zero;
3057 		}
3058 	      else if (cmp == 0 || cmp == 1)
3059 		max = vr0.max;
3060 	      else
3061 		type = VR_VARYING;
3062 	      cmp = compare_values (vr0.min, zero);
3063 	      if (cmp == 1)
3064 		{
3065 		  /* For unsigned division when value ranges for dividend
3066 		     and divisor are available.  */
3067 		  if (vr1.type == VR_RANGE
3068 		      && !symbolic_range_p (&vr0)
3069 		      && !symbolic_range_p (&vr1)
3070 		      && compare_values (vr1.max, zero) != 0)
3071 		    min = int_const_binop (code, vr0.min, vr1.max);
3072 		  else
3073 		    min = zero;
3074 		}
3075 	      else if (cmp == 0 || cmp == -1)
3076 		min = vr0.min;
3077 	      else
3078 		type = VR_VARYING;
3079 	    }
3080 	  else
3081 	    {
3082 	      /* Otherwise the range is -max .. max or min .. -min
3083 		 depending on which bound is bigger in absolute value,
3084 		 as the division can change the sign.  */
3085 	      abs_extent_range (vr, vr0.min, vr0.max);
3086 	      return;
3087 	    }
3088 	  if (type == VR_VARYING)
3089 	    {
3090 	      set_value_range_to_varying (vr);
3091 	      return;
3092 	    }
3093 	}
3094       else if (!symbolic_range_p (&vr0) && !symbolic_range_p (&vr1))
3095 	{
3096 	  extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
3097 	  return;
3098 	}
3099     }
3100   else if (code == TRUNC_MOD_EXPR)
3101     {
3102       if (range_is_null (&vr1))
3103 	{
3104 	  set_value_range_to_undefined (vr);
3105 	  return;
3106 	}
3107       /* ABS (A % B) < ABS (B) and either
3108 	 0 <= A % B <= A or A <= A % B <= 0.  */
3109       type = VR_RANGE;
3110       signop sgn = TYPE_SIGN (expr_type);
3111       unsigned int prec = TYPE_PRECISION (expr_type);
3112       wide_int wmin, wmax, tmp;
3113       wide_int zero = wi::zero (prec);
3114       wide_int one = wi::one (prec);
3115       if (vr1.type == VR_RANGE && !symbolic_range_p (&vr1))
3116 	{
3117 	  wmax = wi::sub (vr1.max, one);
3118 	  if (sgn == SIGNED)
3119 	    {
3120 	      tmp = wi::sub (wi::minus_one (prec), vr1.min);
3121 	      wmax = wi::smax (wmax, tmp);
3122 	    }
3123 	}
3124       else
3125 	{
3126 	  wmax = wi::max_value (prec, sgn);
3127 	  /* X % INT_MIN may be INT_MAX.  */
3128 	  if (sgn == UNSIGNED)
3129 	    wmax = wmax - one;
3130 	}
3131 
3132       if (sgn == UNSIGNED)
3133 	wmin = zero;
3134       else
3135 	{
3136 	  wmin = -wmax;
3137 	  if (vr0.type == VR_RANGE && TREE_CODE (vr0.min) == INTEGER_CST)
3138 	    {
3139 	      tmp = vr0.min;
3140 	      if (wi::gts_p (tmp, zero))
3141 		tmp = zero;
3142 	      wmin = wi::smax (wmin, tmp);
3143 	    }
3144 	}
3145 
3146       if (vr0.type == VR_RANGE && TREE_CODE (vr0.max) == INTEGER_CST)
3147 	{
3148 	  tmp = vr0.max;
3149 	  if (sgn == SIGNED && wi::neg_p (tmp))
3150 	    tmp = zero;
3151 	  wmax = wi::min (wmax, tmp, sgn);
3152 	}
3153 
3154       min = wide_int_to_tree (expr_type, wmin);
3155       max = wide_int_to_tree (expr_type, wmax);
3156     }
3157   else if (code == BIT_AND_EXPR || code == BIT_IOR_EXPR || code == BIT_XOR_EXPR)
3158     {
3159       bool int_cst_range0, int_cst_range1;
3160       wide_int may_be_nonzero0, may_be_nonzero1;
3161       wide_int must_be_nonzero0, must_be_nonzero1;
3162 
3163       int_cst_range0 = zero_nonzero_bits_from_vr (expr_type, &vr0,
3164 						  &may_be_nonzero0,
3165 						  &must_be_nonzero0);
3166       int_cst_range1 = zero_nonzero_bits_from_vr (expr_type, &vr1,
3167 						  &may_be_nonzero1,
3168 						  &must_be_nonzero1);
3169 
3170       type = VR_RANGE;
3171       if (code == BIT_AND_EXPR)
3172 	{
3173 	  min = wide_int_to_tree (expr_type,
3174 				  must_be_nonzero0 & must_be_nonzero1);
3175 	  wide_int wmax = may_be_nonzero0 & may_be_nonzero1;
3176 	  /* If both input ranges contain only negative values we can
3177 	     truncate the result range maximum to the minimum of the
3178 	     input range maxima.  */
3179 	  if (int_cst_range0 && int_cst_range1
3180 	      && tree_int_cst_sgn (vr0.max) < 0
3181 	      && tree_int_cst_sgn (vr1.max) < 0)
3182 	    {
3183 	      wmax = wi::min (wmax, vr0.max, TYPE_SIGN (expr_type));
3184 	      wmax = wi::min (wmax, vr1.max, TYPE_SIGN (expr_type));
3185 	    }
3186 	  /* If either input range contains only non-negative values
3187 	     we can truncate the result range maximum to the respective
3188 	     maximum of the input range.  */
3189 	  if (int_cst_range0 && tree_int_cst_sgn (vr0.min) >= 0)
3190 	    wmax = wi::min (wmax, vr0.max, TYPE_SIGN (expr_type));
3191 	  if (int_cst_range1 && tree_int_cst_sgn (vr1.min) >= 0)
3192 	    wmax = wi::min (wmax, vr1.max, TYPE_SIGN (expr_type));
3193 	  max = wide_int_to_tree (expr_type, wmax);
3194 	}
3195       else if (code == BIT_IOR_EXPR)
3196 	{
3197 	  max = wide_int_to_tree (expr_type,
3198 				  may_be_nonzero0 | may_be_nonzero1);
3199 	  wide_int wmin = must_be_nonzero0 | must_be_nonzero1;
3200 	  /* If the input ranges contain only positive values we can
3201 	     truncate the minimum of the result range to the maximum
3202 	     of the input range minima.  */
3203 	  if (int_cst_range0 && int_cst_range1
3204 	      && tree_int_cst_sgn (vr0.min) >= 0
3205 	      && tree_int_cst_sgn (vr1.min) >= 0)
3206 	    {
3207 	      wmin = wi::max (wmin, vr0.min, TYPE_SIGN (expr_type));
3208 	      wmin = wi::max (wmin, vr1.min, TYPE_SIGN (expr_type));
3209 	    }
3210 	  /* If either input range contains only negative values
3211 	     we can truncate the minimum of the result range to the
3212 	     respective minimum range.  */
3213 	  if (int_cst_range0 && tree_int_cst_sgn (vr0.max) < 0)
3214 	    wmin = wi::max (wmin, vr0.min, TYPE_SIGN (expr_type));
3215 	  if (int_cst_range1 && tree_int_cst_sgn (vr1.max) < 0)
3216 	    wmin = wi::max (wmin, vr1.min, TYPE_SIGN (expr_type));
3217 	  min = wide_int_to_tree (expr_type, wmin);
3218 	}
3219       else if (code == BIT_XOR_EXPR)
3220 	{
3221 	  wide_int result_zero_bits = ((must_be_nonzero0 & must_be_nonzero1)
3222 				       | ~(may_be_nonzero0 | may_be_nonzero1));
3223 	  wide_int result_one_bits
3224 	    = (must_be_nonzero0.and_not (may_be_nonzero1)
3225 	       | must_be_nonzero1.and_not (may_be_nonzero0));
3226 	  max = wide_int_to_tree (expr_type, ~result_zero_bits);
3227 	  min = wide_int_to_tree (expr_type, result_one_bits);
3228 	  /* If the range has all positive or all negative values the
3229 	     result is better than VARYING.  */
3230 	  if (tree_int_cst_sgn (min) < 0
3231 	      || tree_int_cst_sgn (max) >= 0)
3232 	    ;
3233 	  else
3234 	    max = min = NULL_TREE;
3235 	}
3236     }
3237   else
3238     gcc_unreachable ();
3239 
3240   /* If either MIN or MAX overflowed, then set the resulting range to
3241      VARYING.  But we do accept an overflow infinity representation.  */
3242   if (min == NULL_TREE
3243       || (TREE_OVERFLOW_P (min) && !is_overflow_infinity (min))
3244       || max == NULL_TREE
3245       || (TREE_OVERFLOW_P (max) && !is_overflow_infinity (max)))
3246     {
3247       set_value_range_to_varying (vr);
3248       return;
3249     }
3250 
3251   /* We punt if:
3252      1) [-INF, +INF]
3253      2) [-INF, +-INF(OVF)]
3254      3) [+-INF(OVF), +INF]
3255      4) [+-INF(OVF), +-INF(OVF)]
3256      We learn nothing when we have INF and INF(OVF) on both sides.
3257      Note that we do accept [-INF, -INF] and [+INF, +INF] without
3258      overflow.  */
3259   if ((vrp_val_is_min (min) || is_overflow_infinity (min))
3260       && (vrp_val_is_max (max) || is_overflow_infinity (max)))
3261     {
3262       set_value_range_to_varying (vr);
3263       return;
3264     }
3265 
3266   cmp = compare_values (min, max);
3267   if (cmp == -2 || cmp == 1)
3268     {
3269       /* If the new range has its limits swapped around (MIN > MAX),
3270 	 then the operation caused one of them to wrap around, mark
3271 	 the new range VARYING.  */
3272       set_value_range_to_varying (vr);
3273     }
3274   else
3275     set_value_range (vr, type, min, max, NULL);
3276 }
3277 
3278 /* Extract range information from a binary expression OP0 CODE OP1 based on
3279    the ranges of each of its operands with resulting type EXPR_TYPE.
3280    The resulting range is stored in *VR.  */
3281 
3282 static void
extract_range_from_binary_expr(value_range * vr,enum tree_code code,tree expr_type,tree op0,tree op1)3283 extract_range_from_binary_expr (value_range *vr,
3284 				enum tree_code code,
3285 				tree expr_type, tree op0, tree op1)
3286 {
3287   value_range vr0 = VR_INITIALIZER;
3288   value_range vr1 = VR_INITIALIZER;
3289 
3290   /* Get value ranges for each operand.  For constant operands, create
3291      a new value range with the operand to simplify processing.  */
3292   if (TREE_CODE (op0) == SSA_NAME)
3293     vr0 = *(get_value_range (op0));
3294   else if (is_gimple_min_invariant (op0))
3295     set_value_range_to_value (&vr0, op0, NULL);
3296   else
3297     set_value_range_to_varying (&vr0);
3298 
3299   if (TREE_CODE (op1) == SSA_NAME)
3300     vr1 = *(get_value_range (op1));
3301   else if (is_gimple_min_invariant (op1))
3302     set_value_range_to_value (&vr1, op1, NULL);
3303   else
3304     set_value_range_to_varying (&vr1);
3305 
3306   extract_range_from_binary_expr_1 (vr, code, expr_type, &vr0, &vr1);
3307 
3308   /* Try harder for PLUS and MINUS if the range of one operand is symbolic
3309      and based on the other operand, for example if it was deduced from a
3310      symbolic comparison.  When a bound of the range of the first operand
3311      is invariant, we set the corresponding bound of the new range to INF
3312      in order to avoid recursing on the range of the second operand.  */
3313   if (vr->type == VR_VARYING
3314       && (code == PLUS_EXPR || code == MINUS_EXPR)
3315       && TREE_CODE (op1) == SSA_NAME
3316       && vr0.type == VR_RANGE
3317       && symbolic_range_based_on_p (&vr0, op1))
3318     {
3319       const bool minus_p = (code == MINUS_EXPR);
3320       value_range n_vr1 = VR_INITIALIZER;
3321 
3322       /* Try with VR0 and [-INF, OP1].  */
3323       if (is_gimple_min_invariant (minus_p ? vr0.max : vr0.min))
3324 	set_value_range (&n_vr1, VR_RANGE, vrp_val_min (expr_type), op1, NULL);
3325 
3326       /* Try with VR0 and [OP1, +INF].  */
3327       else if (is_gimple_min_invariant (minus_p ? vr0.min : vr0.max))
3328 	set_value_range (&n_vr1, VR_RANGE, op1, vrp_val_max (expr_type), NULL);
3329 
3330       /* Try with VR0 and [OP1, OP1].  */
3331       else
3332 	set_value_range (&n_vr1, VR_RANGE, op1, op1, NULL);
3333 
3334       extract_range_from_binary_expr_1 (vr, code, expr_type, &vr0, &n_vr1);
3335     }
3336 
3337   if (vr->type == VR_VARYING
3338       && (code == PLUS_EXPR || code == MINUS_EXPR)
3339       && TREE_CODE (op0) == SSA_NAME
3340       && vr1.type == VR_RANGE
3341       && symbolic_range_based_on_p (&vr1, op0))
3342     {
3343       const bool minus_p = (code == MINUS_EXPR);
3344       value_range n_vr0 = VR_INITIALIZER;
3345 
3346       /* Try with [-INF, OP0] and VR1.  */
3347       if (is_gimple_min_invariant (minus_p ? vr1.max : vr1.min))
3348 	set_value_range (&n_vr0, VR_RANGE, vrp_val_min (expr_type), op0, NULL);
3349 
3350       /* Try with [OP0, +INF] and VR1.  */
3351       else if (is_gimple_min_invariant (minus_p ? vr1.min : vr1.max))
3352 	set_value_range (&n_vr0, VR_RANGE, op0, vrp_val_max (expr_type), NULL);
3353 
3354       /* Try with [OP0, OP0] and VR1.  */
3355       else
3356 	set_value_range (&n_vr0, VR_RANGE, op0, op0, NULL);
3357 
3358       extract_range_from_binary_expr_1 (vr, code, expr_type, &n_vr0, &vr1);
3359     }
3360 }
3361 
3362 /* Extract range information from a unary operation CODE based on
3363    the range of its operand *VR0 with type OP0_TYPE with resulting type TYPE.
3364    The resulting range is stored in *VR.  */
3365 
3366 static void
extract_range_from_unary_expr_1(value_range * vr,enum tree_code code,tree type,value_range * vr0_,tree op0_type)3367 extract_range_from_unary_expr_1 (value_range *vr,
3368 				 enum tree_code code, tree type,
3369 				 value_range *vr0_, tree op0_type)
3370 {
3371   value_range vr0 = *vr0_, vrtem0 = VR_INITIALIZER, vrtem1 = VR_INITIALIZER;
3372 
3373   /* VRP only operates on integral and pointer types.  */
3374   if (!(INTEGRAL_TYPE_P (op0_type)
3375 	|| POINTER_TYPE_P (op0_type))
3376       || !(INTEGRAL_TYPE_P (type)
3377 	   || POINTER_TYPE_P (type)))
3378     {
3379       set_value_range_to_varying (vr);
3380       return;
3381     }
3382 
3383   /* If VR0 is UNDEFINED, so is the result.  */
3384   if (vr0.type == VR_UNDEFINED)
3385     {
3386       set_value_range_to_undefined (vr);
3387       return;
3388     }
3389 
3390   /* Handle operations that we express in terms of others.  */
3391   if (code == PAREN_EXPR || code == OBJ_TYPE_REF)
3392     {
3393       /* PAREN_EXPR and OBJ_TYPE_REF are simple copies.  */
3394       copy_value_range (vr, &vr0);
3395       return;
3396     }
3397   else if (code == NEGATE_EXPR)
3398     {
3399       /* -X is simply 0 - X, so re-use existing code that also handles
3400          anti-ranges fine.  */
3401       value_range zero = VR_INITIALIZER;
3402       set_value_range_to_value (&zero, build_int_cst (type, 0), NULL);
3403       extract_range_from_binary_expr_1 (vr, MINUS_EXPR, type, &zero, &vr0);
3404       return;
3405     }
3406   else if (code == BIT_NOT_EXPR)
3407     {
3408       /* ~X is simply -1 - X, so re-use existing code that also handles
3409          anti-ranges fine.  */
3410       value_range minusone = VR_INITIALIZER;
3411       set_value_range_to_value (&minusone, build_int_cst (type, -1), NULL);
3412       extract_range_from_binary_expr_1 (vr, MINUS_EXPR,
3413 					type, &minusone, &vr0);
3414       return;
3415     }
3416 
3417   /* Now canonicalize anti-ranges to ranges when they are not symbolic
3418      and express op ~[]  as (op []') U (op []'').  */
3419   if (vr0.type == VR_ANTI_RANGE
3420       && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1))
3421     {
3422       extract_range_from_unary_expr_1 (vr, code, type, &vrtem0, op0_type);
3423       if (vrtem1.type != VR_UNDEFINED)
3424 	{
3425 	  value_range vrres = VR_INITIALIZER;
3426 	  extract_range_from_unary_expr_1 (&vrres, code, type,
3427 					   &vrtem1, op0_type);
3428 	  vrp_meet (vr, &vrres);
3429 	}
3430       return;
3431     }
3432 
3433   if (CONVERT_EXPR_CODE_P (code))
3434     {
3435       tree inner_type = op0_type;
3436       tree outer_type = type;
3437 
3438       /* If the expression evaluates to a pointer, we are only interested in
3439 	 determining if it evaluates to NULL [0, 0] or non-NULL (~[0, 0]).  */
3440       if (POINTER_TYPE_P (type))
3441 	{
3442 	  if (range_is_nonnull (&vr0))
3443 	    set_value_range_to_nonnull (vr, type);
3444 	  else if (range_is_null (&vr0))
3445 	    set_value_range_to_null (vr, type);
3446 	  else
3447 	    set_value_range_to_varying (vr);
3448 	  return;
3449 	}
3450 
3451       /* If VR0 is varying and we increase the type precision, assume
3452 	 a full range for the following transformation.  */
3453       if (vr0.type == VR_VARYING
3454 	  && INTEGRAL_TYPE_P (inner_type)
3455 	  && TYPE_PRECISION (inner_type) < TYPE_PRECISION (outer_type))
3456 	{
3457 	  vr0.type = VR_RANGE;
3458 	  vr0.min = TYPE_MIN_VALUE (inner_type);
3459 	  vr0.max = TYPE_MAX_VALUE (inner_type);
3460 	}
3461 
3462       /* If VR0 is a constant range or anti-range and the conversion is
3463 	 not truncating we can convert the min and max values and
3464 	 canonicalize the resulting range.  Otherwise we can do the
3465 	 conversion if the size of the range is less than what the
3466 	 precision of the target type can represent and the range is
3467 	 not an anti-range.  */
3468       if ((vr0.type == VR_RANGE
3469 	   || vr0.type == VR_ANTI_RANGE)
3470 	  && TREE_CODE (vr0.min) == INTEGER_CST
3471 	  && TREE_CODE (vr0.max) == INTEGER_CST
3472 	  && (!is_overflow_infinity (vr0.min)
3473 	      || (vr0.type == VR_RANGE
3474 		  && TYPE_PRECISION (outer_type) > TYPE_PRECISION (inner_type)
3475 		  && needs_overflow_infinity (outer_type)
3476 		  && supports_overflow_infinity (outer_type)))
3477 	  && (!is_overflow_infinity (vr0.max)
3478 	      || (vr0.type == VR_RANGE
3479 		  && TYPE_PRECISION (outer_type) > TYPE_PRECISION (inner_type)
3480 		  && needs_overflow_infinity (outer_type)
3481 		  && supports_overflow_infinity (outer_type)))
3482 	  && (TYPE_PRECISION (outer_type) >= TYPE_PRECISION (inner_type)
3483 	      || (vr0.type == VR_RANGE
3484 		  && integer_zerop (int_const_binop (RSHIFT_EXPR,
3485 		       int_const_binop (MINUS_EXPR, vr0.max, vr0.min),
3486 		         size_int (TYPE_PRECISION (outer_type)))))))
3487 	{
3488 	  tree new_min, new_max;
3489 	  if (is_overflow_infinity (vr0.min))
3490 	    new_min = negative_overflow_infinity (outer_type);
3491 	  else
3492 	    new_min = force_fit_type (outer_type, wi::to_widest (vr0.min),
3493 				      0, false);
3494 	  if (is_overflow_infinity (vr0.max))
3495 	    new_max = positive_overflow_infinity (outer_type);
3496 	  else
3497 	    new_max = force_fit_type (outer_type, wi::to_widest (vr0.max),
3498 				      0, false);
3499 	  set_and_canonicalize_value_range (vr, vr0.type,
3500 					    new_min, new_max, NULL);
3501 	  return;
3502 	}
3503 
3504       set_value_range_to_varying (vr);
3505       return;
3506     }
3507   else if (code == ABS_EXPR)
3508     {
3509       tree min, max;
3510       int cmp;
3511 
3512       /* Pass through vr0 in the easy cases.  */
3513       if (TYPE_UNSIGNED (type)
3514 	  || value_range_nonnegative_p (&vr0))
3515 	{
3516 	  copy_value_range (vr, &vr0);
3517 	  return;
3518 	}
3519 
3520       /* For the remaining varying or symbolic ranges we can't do anything
3521 	 useful.  */
3522       if (vr0.type == VR_VARYING
3523 	  || symbolic_range_p (&vr0))
3524 	{
3525 	  set_value_range_to_varying (vr);
3526 	  return;
3527 	}
3528 
3529       /* -TYPE_MIN_VALUE = TYPE_MIN_VALUE with flag_wrapv so we can't get a
3530          useful range.  */
3531       if (!TYPE_OVERFLOW_UNDEFINED (type)
3532 	  && ((vr0.type == VR_RANGE
3533 	       && vrp_val_is_min (vr0.min))
3534 	      || (vr0.type == VR_ANTI_RANGE
3535 		  && !vrp_val_is_min (vr0.min))))
3536 	{
3537 	  set_value_range_to_varying (vr);
3538 	  return;
3539 	}
3540 
3541       /* ABS_EXPR may flip the range around, if the original range
3542 	 included negative values.  */
3543       if (is_overflow_infinity (vr0.min))
3544 	min = positive_overflow_infinity (type);
3545       else if (!vrp_val_is_min (vr0.min))
3546 	min = fold_unary_to_constant (code, type, vr0.min);
3547       else if (!needs_overflow_infinity (type))
3548 	min = TYPE_MAX_VALUE (type);
3549       else if (supports_overflow_infinity (type))
3550 	min = positive_overflow_infinity (type);
3551       else
3552 	{
3553 	  set_value_range_to_varying (vr);
3554 	  return;
3555 	}
3556 
3557       if (is_overflow_infinity (vr0.max))
3558 	max = positive_overflow_infinity (type);
3559       else if (!vrp_val_is_min (vr0.max))
3560 	max = fold_unary_to_constant (code, type, vr0.max);
3561       else if (!needs_overflow_infinity (type))
3562 	max = TYPE_MAX_VALUE (type);
3563       else if (supports_overflow_infinity (type)
3564 	       /* We shouldn't generate [+INF, +INF] as set_value_range
3565 		  doesn't like this and ICEs.  */
3566 	       && !is_positive_overflow_infinity (min))
3567 	max = positive_overflow_infinity (type);
3568       else
3569 	{
3570 	  set_value_range_to_varying (vr);
3571 	  return;
3572 	}
3573 
3574       cmp = compare_values (min, max);
3575 
3576       /* If a VR_ANTI_RANGEs contains zero, then we have
3577 	 ~[-INF, min(MIN, MAX)].  */
3578       if (vr0.type == VR_ANTI_RANGE)
3579 	{
3580 	  if (range_includes_zero_p (vr0.min, vr0.max) == 1)
3581 	    {
3582 	      /* Take the lower of the two values.  */
3583 	      if (cmp != 1)
3584 		max = min;
3585 
3586 	      /* Create ~[-INF, min (abs(MIN), abs(MAX))]
3587 	         or ~[-INF + 1, min (abs(MIN), abs(MAX))] when
3588 		 flag_wrapv is set and the original anti-range doesn't include
3589 	         TYPE_MIN_VALUE, remember -TYPE_MIN_VALUE = TYPE_MIN_VALUE.  */
3590 	      if (TYPE_OVERFLOW_WRAPS (type))
3591 		{
3592 		  tree type_min_value = TYPE_MIN_VALUE (type);
3593 
3594 		  min = (vr0.min != type_min_value
3595 			 ? int_const_binop (PLUS_EXPR, type_min_value,
3596 					    build_int_cst (TREE_TYPE (type_min_value), 1))
3597 			 : type_min_value);
3598 		}
3599 	      else
3600 		{
3601 		  if (overflow_infinity_range_p (&vr0))
3602 		    min = negative_overflow_infinity (type);
3603 		  else
3604 		    min = TYPE_MIN_VALUE (type);
3605 		}
3606 	    }
3607 	  else
3608 	    {
3609 	      /* All else has failed, so create the range [0, INF], even for
3610 	         flag_wrapv since TYPE_MIN_VALUE is in the original
3611 	         anti-range.  */
3612 	      vr0.type = VR_RANGE;
3613 	      min = build_int_cst (type, 0);
3614 	      if (needs_overflow_infinity (type))
3615 		{
3616 		  if (supports_overflow_infinity (type))
3617 		    max = positive_overflow_infinity (type);
3618 		  else
3619 		    {
3620 		      set_value_range_to_varying (vr);
3621 		      return;
3622 		    }
3623 		}
3624 	      else
3625 		max = TYPE_MAX_VALUE (type);
3626 	    }
3627 	}
3628 
3629       /* If the range contains zero then we know that the minimum value in the
3630          range will be zero.  */
3631       else if (range_includes_zero_p (vr0.min, vr0.max) == 1)
3632 	{
3633 	  if (cmp == 1)
3634 	    max = min;
3635 	  min = build_int_cst (type, 0);
3636 	}
3637       else
3638 	{
3639           /* If the range was reversed, swap MIN and MAX.  */
3640 	  if (cmp == 1)
3641 	    std::swap (min, max);
3642 	}
3643 
3644       cmp = compare_values (min, max);
3645       if (cmp == -2 || cmp == 1)
3646 	{
3647 	  /* If the new range has its limits swapped around (MIN > MAX),
3648 	     then the operation caused one of them to wrap around, mark
3649 	     the new range VARYING.  */
3650 	  set_value_range_to_varying (vr);
3651 	}
3652       else
3653 	set_value_range (vr, vr0.type, min, max, NULL);
3654       return;
3655     }
3656 
3657   /* For unhandled operations fall back to varying.  */
3658   set_value_range_to_varying (vr);
3659   return;
3660 }
3661 
3662 
3663 /* Extract range information from a unary expression CODE OP0 based on
3664    the range of its operand with resulting type TYPE.
3665    The resulting range is stored in *VR.  */
3666 
3667 static void
extract_range_from_unary_expr(value_range * vr,enum tree_code code,tree type,tree op0)3668 extract_range_from_unary_expr (value_range *vr, enum tree_code code,
3669 			       tree type, tree op0)
3670 {
3671   value_range vr0 = VR_INITIALIZER;
3672 
3673   /* Get value ranges for the operand.  For constant operands, create
3674      a new value range with the operand to simplify processing.  */
3675   if (TREE_CODE (op0) == SSA_NAME)
3676     vr0 = *(get_value_range (op0));
3677   else if (is_gimple_min_invariant (op0))
3678     set_value_range_to_value (&vr0, op0, NULL);
3679   else
3680     set_value_range_to_varying (&vr0);
3681 
3682   extract_range_from_unary_expr_1 (vr, code, type, &vr0, TREE_TYPE (op0));
3683 }
3684 
3685 
3686 /* Extract range information from a conditional expression STMT based on
3687    the ranges of each of its operands and the expression code.  */
3688 
3689 static void
extract_range_from_cond_expr(value_range * vr,gassign * stmt)3690 extract_range_from_cond_expr (value_range *vr, gassign *stmt)
3691 {
3692   tree op0, op1;
3693   value_range vr0 = VR_INITIALIZER;
3694   value_range vr1 = VR_INITIALIZER;
3695 
3696   /* Get value ranges for each operand.  For constant operands, create
3697      a new value range with the operand to simplify processing.  */
3698   op0 = gimple_assign_rhs2 (stmt);
3699   if (TREE_CODE (op0) == SSA_NAME)
3700     vr0 = *(get_value_range (op0));
3701   else if (is_gimple_min_invariant (op0))
3702     set_value_range_to_value (&vr0, op0, NULL);
3703   else
3704     set_value_range_to_varying (&vr0);
3705 
3706   op1 = gimple_assign_rhs3 (stmt);
3707   if (TREE_CODE (op1) == SSA_NAME)
3708     vr1 = *(get_value_range (op1));
3709   else if (is_gimple_min_invariant (op1))
3710     set_value_range_to_value (&vr1, op1, NULL);
3711   else
3712     set_value_range_to_varying (&vr1);
3713 
3714   /* The resulting value range is the union of the operand ranges */
3715   copy_value_range (vr, &vr0);
3716   vrp_meet (vr, &vr1);
3717 }
3718 
3719 
3720 /* Extract range information from a comparison expression EXPR based
3721    on the range of its operand and the expression code.  */
3722 
3723 static void
extract_range_from_comparison(value_range * vr,enum tree_code code,tree type,tree op0,tree op1)3724 extract_range_from_comparison (value_range *vr, enum tree_code code,
3725 			       tree type, tree op0, tree op1)
3726 {
3727   bool sop = false;
3728   tree val;
3729 
3730   val = vrp_evaluate_conditional_warnv_with_ops (code, op0, op1, false, &sop,
3731   						 NULL);
3732 
3733   /* A disadvantage of using a special infinity as an overflow
3734      representation is that we lose the ability to record overflow
3735      when we don't have an infinity.  So we have to ignore a result
3736      which relies on overflow.  */
3737 
3738   if (val && !is_overflow_infinity (val) && !sop)
3739     {
3740       /* Since this expression was found on the RHS of an assignment,
3741 	 its type may be different from _Bool.  Convert VAL to EXPR's
3742 	 type.  */
3743       val = fold_convert (type, val);
3744       if (is_gimple_min_invariant (val))
3745 	set_value_range_to_value (vr, val, vr->equiv);
3746       else
3747 	set_value_range (vr, VR_RANGE, val, val, vr->equiv);
3748     }
3749   else
3750     /* The result of a comparison is always true or false.  */
3751     set_value_range_to_truthvalue (vr, type);
3752 }
3753 
3754 /* Helper function for simplify_internal_call_using_ranges and
3755    extract_range_basic.  Return true if OP0 SUBCODE OP1 for
3756    SUBCODE {PLUS,MINUS,MULT}_EXPR is known to never overflow or
3757    always overflow.  Set *OVF to true if it is known to always
3758    overflow.  */
3759 
3760 static bool
check_for_binary_op_overflow(enum tree_code subcode,tree type,tree op0,tree op1,bool * ovf)3761 check_for_binary_op_overflow (enum tree_code subcode, tree type,
3762 			      tree op0, tree op1, bool *ovf)
3763 {
3764   value_range vr0 = VR_INITIALIZER;
3765   value_range vr1 = VR_INITIALIZER;
3766   if (TREE_CODE (op0) == SSA_NAME)
3767     vr0 = *get_value_range (op0);
3768   else if (TREE_CODE (op0) == INTEGER_CST)
3769     set_value_range_to_value (&vr0, op0, NULL);
3770   else
3771     set_value_range_to_varying (&vr0);
3772 
3773   if (TREE_CODE (op1) == SSA_NAME)
3774     vr1 = *get_value_range (op1);
3775   else if (TREE_CODE (op1) == INTEGER_CST)
3776     set_value_range_to_value (&vr1, op1, NULL);
3777   else
3778     set_value_range_to_varying (&vr1);
3779 
3780   if (!range_int_cst_p (&vr0)
3781       || TREE_OVERFLOW (vr0.min)
3782       || TREE_OVERFLOW (vr0.max))
3783     {
3784       vr0.min = vrp_val_min (TREE_TYPE (op0));
3785       vr0.max = vrp_val_max (TREE_TYPE (op0));
3786     }
3787   if (!range_int_cst_p (&vr1)
3788       || TREE_OVERFLOW (vr1.min)
3789       || TREE_OVERFLOW (vr1.max))
3790     {
3791       vr1.min = vrp_val_min (TREE_TYPE (op1));
3792       vr1.max = vrp_val_max (TREE_TYPE (op1));
3793     }
3794   *ovf = arith_overflowed_p (subcode, type, vr0.min,
3795 			     subcode == MINUS_EXPR ? vr1.max : vr1.min);
3796   if (arith_overflowed_p (subcode, type, vr0.max,
3797 			  subcode == MINUS_EXPR ? vr1.min : vr1.max) != *ovf)
3798     return false;
3799   if (subcode == MULT_EXPR)
3800     {
3801       if (arith_overflowed_p (subcode, type, vr0.min, vr1.max) != *ovf
3802 	  || arith_overflowed_p (subcode, type, vr0.max, vr1.min) != *ovf)
3803 	return false;
3804     }
3805   if (*ovf)
3806     {
3807       /* So far we found that there is an overflow on the boundaries.
3808 	 That doesn't prove that there is an overflow even for all values
3809 	 in between the boundaries.  For that compute widest_int range
3810 	 of the result and see if it doesn't overlap the range of
3811 	 type.  */
3812       widest_int wmin, wmax;
3813       widest_int w[4];
3814       int i;
3815       w[0] = wi::to_widest (vr0.min);
3816       w[1] = wi::to_widest (vr0.max);
3817       w[2] = wi::to_widest (vr1.min);
3818       w[3] = wi::to_widest (vr1.max);
3819       for (i = 0; i < 4; i++)
3820 	{
3821 	  widest_int wt;
3822 	  switch (subcode)
3823 	    {
3824 	    case PLUS_EXPR:
3825 	      wt = wi::add (w[i & 1], w[2 + (i & 2) / 2]);
3826 	      break;
3827 	    case MINUS_EXPR:
3828 	      wt = wi::sub (w[i & 1], w[2 + (i & 2) / 2]);
3829 	      break;
3830 	    case MULT_EXPR:
3831 	      wt = wi::mul (w[i & 1], w[2 + (i & 2) / 2]);
3832 	      break;
3833 	    default:
3834 	      gcc_unreachable ();
3835 	    }
3836 	  if (i == 0)
3837 	    {
3838 	      wmin = wt;
3839 	      wmax = wt;
3840 	    }
3841 	  else
3842 	    {
3843 	      wmin = wi::smin (wmin, wt);
3844 	      wmax = wi::smax (wmax, wt);
3845 	    }
3846 	}
3847       /* The result of op0 CODE op1 is known to be in range
3848 	 [wmin, wmax].  */
3849       widest_int wtmin = wi::to_widest (vrp_val_min (type));
3850       widest_int wtmax = wi::to_widest (vrp_val_max (type));
3851       /* If all values in [wmin, wmax] are smaller than
3852 	 [wtmin, wtmax] or all are larger than [wtmin, wtmax],
3853 	 the arithmetic operation will always overflow.  */
3854       if (wi::lts_p (wmax, wtmin) || wi::gts_p (wmin, wtmax))
3855 	return true;
3856       return false;
3857     }
3858   return true;
3859 }
3860 
3861 /* Try to derive a nonnegative or nonzero range out of STMT relying
3862    primarily on generic routines in fold in conjunction with range data.
3863    Store the result in *VR */
3864 
3865 static void
extract_range_basic(value_range * vr,gimple * stmt)3866 extract_range_basic (value_range *vr, gimple *stmt)
3867 {
3868   bool sop = false;
3869   tree type = gimple_expr_type (stmt);
3870 
3871   if (is_gimple_call (stmt))
3872     {
3873       tree arg;
3874       int mini, maxi, zerov = 0, prec;
3875       enum tree_code subcode = ERROR_MARK;
3876       combined_fn cfn = gimple_call_combined_fn (stmt);
3877 
3878       switch (cfn)
3879 	{
3880 	case CFN_BUILT_IN_CONSTANT_P:
3881 	  /* If the call is __builtin_constant_p and the argument is a
3882 	     function parameter resolve it to false.  This avoids bogus
3883 	     array bound warnings.
3884 	     ???  We could do this as early as inlining is finished.  */
3885 	  arg = gimple_call_arg (stmt, 0);
3886 	  if (TREE_CODE (arg) == SSA_NAME
3887 	      && SSA_NAME_IS_DEFAULT_DEF (arg)
3888 	      && TREE_CODE (SSA_NAME_VAR (arg)) == PARM_DECL)
3889 	    {
3890 	      set_value_range_to_null (vr, type);
3891 	      return;
3892 	    }
3893 	  break;
3894 	  /* Both __builtin_ffs* and __builtin_popcount return
3895 	     [0, prec].  */
3896 	CASE_CFN_FFS:
3897 	CASE_CFN_POPCOUNT:
3898 	  arg = gimple_call_arg (stmt, 0);
3899 	  prec = TYPE_PRECISION (TREE_TYPE (arg));
3900 	  mini = 0;
3901 	  maxi = prec;
3902 	  if (TREE_CODE (arg) == SSA_NAME)
3903 	    {
3904 	      value_range *vr0 = get_value_range (arg);
3905 	      /* If arg is non-zero, then ffs or popcount
3906 		 are non-zero.  */
3907 	      if (((vr0->type == VR_RANGE
3908 		    && range_includes_zero_p (vr0->min, vr0->max) == 0)
3909 		   || (vr0->type == VR_ANTI_RANGE
3910 		       && range_includes_zero_p (vr0->min, vr0->max) == 1))
3911 		  && !is_overflow_infinity (vr0->min)
3912 		  && !is_overflow_infinity (vr0->max))
3913 		mini = 1;
3914 	      /* If some high bits are known to be zero,
3915 		 we can decrease the maximum.  */
3916 	      if (vr0->type == VR_RANGE
3917 		  && TREE_CODE (vr0->max) == INTEGER_CST
3918 		  && !operand_less_p (vr0->min,
3919 				      build_zero_cst (TREE_TYPE (vr0->min)))
3920 		  && !is_overflow_infinity (vr0->max))
3921 		maxi = tree_floor_log2 (vr0->max) + 1;
3922 	    }
3923 	  goto bitop_builtin;
3924 	  /* __builtin_parity* returns [0, 1].  */
3925 	CASE_CFN_PARITY:
3926 	  mini = 0;
3927 	  maxi = 1;
3928 	  goto bitop_builtin;
3929 	  /* __builtin_c[lt]z* return [0, prec-1], except for
3930 	     when the argument is 0, but that is undefined behavior.
3931 	     On many targets where the CLZ RTL or optab value is defined
3932 	     for 0 the value is prec, so include that in the range
3933 	     by default.  */
3934 	CASE_CFN_CLZ:
3935 	  arg = gimple_call_arg (stmt, 0);
3936 	  prec = TYPE_PRECISION (TREE_TYPE (arg));
3937 	  mini = 0;
3938 	  maxi = prec;
3939 	  if (optab_handler (clz_optab, TYPE_MODE (TREE_TYPE (arg)))
3940 	      != CODE_FOR_nothing
3941 	      && CLZ_DEFINED_VALUE_AT_ZERO (TYPE_MODE (TREE_TYPE (arg)),
3942 					    zerov)
3943 	      /* Handle only the single common value.  */
3944 	      && zerov != prec)
3945 	    /* Magic value to give up, unless vr0 proves
3946 	       arg is non-zero.  */
3947 	    mini = -2;
3948 	  if (TREE_CODE (arg) == SSA_NAME)
3949 	    {
3950 	      value_range *vr0 = get_value_range (arg);
3951 	      /* From clz of VR_RANGE minimum we can compute
3952 		 result maximum.  */
3953 	      if (vr0->type == VR_RANGE
3954 		  && TREE_CODE (vr0->min) == INTEGER_CST
3955 		  && !is_overflow_infinity (vr0->min))
3956 		{
3957 		  maxi = prec - 1 - tree_floor_log2 (vr0->min);
3958 		  if (maxi != prec)
3959 		    mini = 0;
3960 		}
3961 	      else if (vr0->type == VR_ANTI_RANGE
3962 		       && integer_zerop (vr0->min)
3963 		       && !is_overflow_infinity (vr0->min))
3964 		{
3965 		  maxi = prec - 1;
3966 		  mini = 0;
3967 		}
3968 	      if (mini == -2)
3969 		break;
3970 	      /* From clz of VR_RANGE maximum we can compute
3971 		 result minimum.  */
3972 	      if (vr0->type == VR_RANGE
3973 		  && TREE_CODE (vr0->max) == INTEGER_CST
3974 		  && !is_overflow_infinity (vr0->max))
3975 		{
3976 		  mini = prec - 1 - tree_floor_log2 (vr0->max);
3977 		  if (mini == prec)
3978 		    break;
3979 		}
3980 	    }
3981 	  if (mini == -2)
3982 	    break;
3983 	  goto bitop_builtin;
3984 	  /* __builtin_ctz* return [0, prec-1], except for
3985 	     when the argument is 0, but that is undefined behavior.
3986 	     If there is a ctz optab for this mode and
3987 	     CTZ_DEFINED_VALUE_AT_ZERO, include that in the range,
3988 	     otherwise just assume 0 won't be seen.  */
3989 	CASE_CFN_CTZ:
3990 	  arg = gimple_call_arg (stmt, 0);
3991 	  prec = TYPE_PRECISION (TREE_TYPE (arg));
3992 	  mini = 0;
3993 	  maxi = prec - 1;
3994 	  if (optab_handler (ctz_optab, TYPE_MODE (TREE_TYPE (arg)))
3995 	      != CODE_FOR_nothing
3996 	      && CTZ_DEFINED_VALUE_AT_ZERO (TYPE_MODE (TREE_TYPE (arg)),
3997 					    zerov))
3998 	    {
3999 	      /* Handle only the two common values.  */
4000 	      if (zerov == -1)
4001 		mini = -1;
4002 	      else if (zerov == prec)
4003 		maxi = prec;
4004 	      else
4005 		/* Magic value to give up, unless vr0 proves
4006 		   arg is non-zero.  */
4007 		mini = -2;
4008 	    }
4009 	  if (TREE_CODE (arg) == SSA_NAME)
4010 	    {
4011 	      value_range *vr0 = get_value_range (arg);
4012 	      /* If arg is non-zero, then use [0, prec - 1].  */
4013 	      if (((vr0->type == VR_RANGE
4014 		    && integer_nonzerop (vr0->min))
4015 		   || (vr0->type == VR_ANTI_RANGE
4016 		       && integer_zerop (vr0->min)))
4017 		  && !is_overflow_infinity (vr0->min))
4018 		{
4019 		  mini = 0;
4020 		  maxi = prec - 1;
4021 		}
4022 	      /* If some high bits are known to be zero,
4023 		 we can decrease the result maximum.  */
4024 	      if (vr0->type == VR_RANGE
4025 		  && TREE_CODE (vr0->max) == INTEGER_CST
4026 		  && !is_overflow_infinity (vr0->max))
4027 		{
4028 		  maxi = tree_floor_log2 (vr0->max);
4029 		  /* For vr0 [0, 0] give up.  */
4030 		  if (maxi == -1)
4031 		    break;
4032 		}
4033 	    }
4034 	  if (mini == -2)
4035 	    break;
4036 	  goto bitop_builtin;
4037 	  /* __builtin_clrsb* returns [0, prec-1].  */
4038 	CASE_CFN_CLRSB:
4039 	  arg = gimple_call_arg (stmt, 0);
4040 	  prec = TYPE_PRECISION (TREE_TYPE (arg));
4041 	  mini = 0;
4042 	  maxi = prec - 1;
4043 	  goto bitop_builtin;
4044 	bitop_builtin:
4045 	  set_value_range (vr, VR_RANGE, build_int_cst (type, mini),
4046 			   build_int_cst (type, maxi), NULL);
4047 	  return;
4048 	case CFN_UBSAN_CHECK_ADD:
4049 	  subcode = PLUS_EXPR;
4050 	  break;
4051 	case CFN_UBSAN_CHECK_SUB:
4052 	  subcode = MINUS_EXPR;
4053 	  break;
4054 	case CFN_UBSAN_CHECK_MUL:
4055 	  subcode = MULT_EXPR;
4056 	  break;
4057 	case CFN_GOACC_DIM_SIZE:
4058 	case CFN_GOACC_DIM_POS:
4059 	  /* Optimizing these two internal functions helps the loop
4060 	     optimizer eliminate outer comparisons.  Size is [1,N]
4061 	     and pos is [0,N-1].  */
4062 	  {
4063 	    bool is_pos = cfn == CFN_GOACC_DIM_POS;
4064 	    int axis = get_oacc_ifn_dim_arg (stmt);
4065 	    int size = get_oacc_fn_dim_size (current_function_decl, axis);
4066 
4067 	    if (!size)
4068 	      /* If it's dynamic, the backend might know a hardware
4069 		 limitation.  */
4070 	      size = targetm.goacc.dim_limit (axis);
4071 
4072 	    tree type = TREE_TYPE (gimple_call_lhs (stmt));
4073 	    set_value_range (vr, VR_RANGE,
4074 			     build_int_cst (type, is_pos ? 0 : 1),
4075 			     size ? build_int_cst (type, size - is_pos)
4076 			          : vrp_val_max (type), NULL);
4077 	  }
4078 	  return;
4079 	default:
4080 	  break;
4081 	}
4082       if (subcode != ERROR_MARK)
4083 	{
4084 	  bool saved_flag_wrapv = flag_wrapv;
4085 	  /* Pretend the arithmetics is wrapping.  If there is
4086 	     any overflow, we'll complain, but will actually do
4087 	     wrapping operation.  */
4088 	  flag_wrapv = 1;
4089 	  extract_range_from_binary_expr (vr, subcode, type,
4090 					  gimple_call_arg (stmt, 0),
4091 					  gimple_call_arg (stmt, 1));
4092 	  flag_wrapv = saved_flag_wrapv;
4093 
4094 	  /* If for both arguments vrp_valueize returned non-NULL,
4095 	     this should have been already folded and if not, it
4096 	     wasn't folded because of overflow.  Avoid removing the
4097 	     UBSAN_CHECK_* calls in that case.  */
4098 	  if (vr->type == VR_RANGE
4099 	      && (vr->min == vr->max
4100 		  || operand_equal_p (vr->min, vr->max, 0)))
4101 	    set_value_range_to_varying (vr);
4102 	  return;
4103 	}
4104     }
4105   /* Handle extraction of the two results (result of arithmetics and
4106      a flag whether arithmetics overflowed) from {ADD,SUB,MUL}_OVERFLOW
4107      internal function.  */
4108   else if (is_gimple_assign (stmt)
4109 	   && (gimple_assign_rhs_code (stmt) == REALPART_EXPR
4110 	       || gimple_assign_rhs_code (stmt) == IMAGPART_EXPR)
4111 	   && INTEGRAL_TYPE_P (type))
4112     {
4113       enum tree_code code = gimple_assign_rhs_code (stmt);
4114       tree op = gimple_assign_rhs1 (stmt);
4115       if (TREE_CODE (op) == code && TREE_CODE (TREE_OPERAND (op, 0)) == SSA_NAME)
4116 	{
4117 	  gimple *g = SSA_NAME_DEF_STMT (TREE_OPERAND (op, 0));
4118 	  if (is_gimple_call (g) && gimple_call_internal_p (g))
4119 	    {
4120 	      enum tree_code subcode = ERROR_MARK;
4121 	      switch (gimple_call_internal_fn (g))
4122 		{
4123 		case IFN_ADD_OVERFLOW:
4124 		  subcode = PLUS_EXPR;
4125 		  break;
4126 		case IFN_SUB_OVERFLOW:
4127 		  subcode = MINUS_EXPR;
4128 		  break;
4129 		case IFN_MUL_OVERFLOW:
4130 		  subcode = MULT_EXPR;
4131 		  break;
4132 		default:
4133 		  break;
4134 		}
4135 	      if (subcode != ERROR_MARK)
4136 		{
4137 		  tree op0 = gimple_call_arg (g, 0);
4138 		  tree op1 = gimple_call_arg (g, 1);
4139 		  if (code == IMAGPART_EXPR)
4140 		    {
4141 		      bool ovf = false;
4142 		      if (check_for_binary_op_overflow (subcode, type,
4143 							op0, op1, &ovf))
4144 			set_value_range_to_value (vr,
4145 						  build_int_cst (type, ovf),
4146 						  NULL);
4147 		      else
4148 			set_value_range (vr, VR_RANGE, build_int_cst (type, 0),
4149 					 build_int_cst (type, 1), NULL);
4150 		    }
4151 		  else if (types_compatible_p (type, TREE_TYPE (op0))
4152 			   && types_compatible_p (type, TREE_TYPE (op1)))
4153 		    {
4154 		      bool saved_flag_wrapv = flag_wrapv;
4155 		      /* Pretend the arithmetics is wrapping.  If there is
4156 			 any overflow, IMAGPART_EXPR will be set.  */
4157 		      flag_wrapv = 1;
4158 		      extract_range_from_binary_expr (vr, subcode, type,
4159 						      op0, op1);
4160 		      flag_wrapv = saved_flag_wrapv;
4161 		    }
4162 		  else
4163 		    {
4164 		      value_range vr0 = VR_INITIALIZER;
4165 		      value_range vr1 = VR_INITIALIZER;
4166 		      bool saved_flag_wrapv = flag_wrapv;
4167 		      /* Pretend the arithmetics is wrapping.  If there is
4168 			 any overflow, IMAGPART_EXPR will be set.  */
4169 		      flag_wrapv = 1;
4170 		      extract_range_from_unary_expr (&vr0, NOP_EXPR,
4171 						     type, op0);
4172 		      extract_range_from_unary_expr (&vr1, NOP_EXPR,
4173 						     type, op1);
4174 		      extract_range_from_binary_expr_1 (vr, subcode, type,
4175 							&vr0, &vr1);
4176 		      flag_wrapv = saved_flag_wrapv;
4177 		    }
4178 		  return;
4179 		}
4180 	    }
4181 	}
4182     }
4183   if (INTEGRAL_TYPE_P (type)
4184       && gimple_stmt_nonnegative_warnv_p (stmt, &sop))
4185     set_value_range_to_nonnegative (vr, type,
4186 				    sop || stmt_overflow_infinity (stmt));
4187   else if (vrp_stmt_computes_nonzero (stmt, &sop)
4188 	   && !sop)
4189     set_value_range_to_nonnull (vr, type);
4190   else
4191     set_value_range_to_varying (vr);
4192 }
4193 
4194 
4195 /* Try to compute a useful range out of assignment STMT and store it
4196    in *VR.  */
4197 
4198 static void
extract_range_from_assignment(value_range * vr,gassign * stmt)4199 extract_range_from_assignment (value_range *vr, gassign *stmt)
4200 {
4201   enum tree_code code = gimple_assign_rhs_code (stmt);
4202 
4203   if (code == ASSERT_EXPR)
4204     extract_range_from_assert (vr, gimple_assign_rhs1 (stmt));
4205   else if (code == SSA_NAME)
4206     extract_range_from_ssa_name (vr, gimple_assign_rhs1 (stmt));
4207   else if (TREE_CODE_CLASS (code) == tcc_binary)
4208     extract_range_from_binary_expr (vr, gimple_assign_rhs_code (stmt),
4209 				    gimple_expr_type (stmt),
4210 				    gimple_assign_rhs1 (stmt),
4211 				    gimple_assign_rhs2 (stmt));
4212   else if (TREE_CODE_CLASS (code) == tcc_unary)
4213     extract_range_from_unary_expr (vr, gimple_assign_rhs_code (stmt),
4214 				   gimple_expr_type (stmt),
4215 				   gimple_assign_rhs1 (stmt));
4216   else if (code == COND_EXPR)
4217     extract_range_from_cond_expr (vr, stmt);
4218   else if (TREE_CODE_CLASS (code) == tcc_comparison)
4219     extract_range_from_comparison (vr, gimple_assign_rhs_code (stmt),
4220 				   gimple_expr_type (stmt),
4221 				   gimple_assign_rhs1 (stmt),
4222 				   gimple_assign_rhs2 (stmt));
4223   else if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS
4224 	   && is_gimple_min_invariant (gimple_assign_rhs1 (stmt)))
4225     set_value_range_to_value (vr, gimple_assign_rhs1 (stmt), NULL);
4226   else
4227     set_value_range_to_varying (vr);
4228 
4229   if (vr->type == VR_VARYING)
4230     extract_range_basic (vr, stmt);
4231 }
4232 
4233 /* Given a range VR, a LOOP and a variable VAR, determine whether it
4234    would be profitable to adjust VR using scalar evolution information
4235    for VAR.  If so, update VR with the new limits.  */
4236 
4237 static void
adjust_range_with_scev(value_range * vr,struct loop * loop,gimple * stmt,tree var)4238 adjust_range_with_scev (value_range *vr, struct loop *loop,
4239 			gimple *stmt, tree var)
4240 {
4241   tree init, step, chrec, tmin, tmax, min, max, type, tem;
4242   enum ev_direction dir;
4243 
4244   /* TODO.  Don't adjust anti-ranges.  An anti-range may provide
4245      better opportunities than a regular range, but I'm not sure.  */
4246   if (vr->type == VR_ANTI_RANGE)
4247     return;
4248 
4249   chrec = instantiate_parameters (loop, analyze_scalar_evolution (loop, var));
4250 
4251   /* Like in PR19590, scev can return a constant function.  */
4252   if (is_gimple_min_invariant (chrec))
4253     {
4254       set_value_range_to_value (vr, chrec, vr->equiv);
4255       return;
4256     }
4257 
4258   if (TREE_CODE (chrec) != POLYNOMIAL_CHREC)
4259     return;
4260 
4261   init = initial_condition_in_loop_num (chrec, loop->num);
4262   tem = op_with_constant_singleton_value_range (init);
4263   if (tem)
4264     init = tem;
4265   step = evolution_part_in_loop_num (chrec, loop->num);
4266   tem = op_with_constant_singleton_value_range (step);
4267   if (tem)
4268     step = tem;
4269 
4270   /* If STEP is symbolic, we can't know whether INIT will be the
4271      minimum or maximum value in the range.  Also, unless INIT is
4272      a simple expression, compare_values and possibly other functions
4273      in tree-vrp won't be able to handle it.  */
4274   if (step == NULL_TREE
4275       || !is_gimple_min_invariant (step)
4276       || !valid_value_p (init))
4277     return;
4278 
4279   dir = scev_direction (chrec);
4280   if (/* Do not adjust ranges if we do not know whether the iv increases
4281 	 or decreases,  ... */
4282       dir == EV_DIR_UNKNOWN
4283       /* ... or if it may wrap.  */
4284       || scev_probably_wraps_p (init, step, stmt, get_chrec_loop (chrec),
4285 				true))
4286     return;
4287 
4288   /* We use TYPE_MIN_VALUE and TYPE_MAX_VALUE here instead of
4289      negative_overflow_infinity and positive_overflow_infinity,
4290      because we have concluded that the loop probably does not
4291      wrap.  */
4292 
4293   type = TREE_TYPE (var);
4294   if (POINTER_TYPE_P (type) || !TYPE_MIN_VALUE (type))
4295     tmin = lower_bound_in_type (type, type);
4296   else
4297     tmin = TYPE_MIN_VALUE (type);
4298   if (POINTER_TYPE_P (type) || !TYPE_MAX_VALUE (type))
4299     tmax = upper_bound_in_type (type, type);
4300   else
4301     tmax = TYPE_MAX_VALUE (type);
4302 
4303   /* Try to use estimated number of iterations for the loop to constrain the
4304      final value in the evolution.  */
4305   if (TREE_CODE (step) == INTEGER_CST
4306       && is_gimple_val (init)
4307       && (TREE_CODE (init) != SSA_NAME
4308 	  || get_value_range (init)->type == VR_RANGE))
4309     {
4310       widest_int nit;
4311 
4312       /* We are only entering here for loop header PHI nodes, so using
4313 	 the number of latch executions is the correct thing to use.  */
4314       if (max_loop_iterations (loop, &nit))
4315 	{
4316 	  value_range maxvr = VR_INITIALIZER;
4317 	  signop sgn = TYPE_SIGN (TREE_TYPE (step));
4318 	  bool overflow;
4319 
4320 	  widest_int wtmp = wi::mul (wi::to_widest (step), nit, sgn,
4321 				     &overflow);
4322 	  /* If the multiplication overflowed we can't do a meaningful
4323 	     adjustment.  Likewise if the result doesn't fit in the type
4324 	     of the induction variable.  For a signed type we have to
4325 	     check whether the result has the expected signedness which
4326 	     is that of the step as number of iterations is unsigned.  */
4327 	  if (!overflow
4328 	      && wi::fits_to_tree_p (wtmp, TREE_TYPE (init))
4329 	      && (sgn == UNSIGNED
4330 		  || wi::gts_p (wtmp, 0) == wi::gts_p (step, 0)))
4331 	    {
4332 	      tem = wide_int_to_tree (TREE_TYPE (init), wtmp);
4333 	      extract_range_from_binary_expr (&maxvr, PLUS_EXPR,
4334 					      TREE_TYPE (init), init, tem);
4335 	      /* Likewise if the addition did.  */
4336 	      if (maxvr.type == VR_RANGE)
4337 		{
4338 		  value_range initvr = VR_INITIALIZER;
4339 
4340 		  if (TREE_CODE (init) == SSA_NAME)
4341 		    initvr = *(get_value_range (init));
4342 		  else if (is_gimple_min_invariant (init))
4343 		    set_value_range_to_value (&initvr, init, NULL);
4344 		  else
4345 		    return;
4346 
4347 		  /* Check if init + nit * step overflows.  Though we checked
4348 		     scev {init, step}_loop doesn't wrap, it is not enough
4349 		     because the loop may exit immediately.  Overflow could
4350 		     happen in the plus expression in this case.  */
4351 		  if ((dir == EV_DIR_DECREASES
4352 		       && (is_negative_overflow_infinity (maxvr.min)
4353 			   || compare_values (maxvr.min, initvr.min) != -1))
4354 		      || (dir == EV_DIR_GROWS
4355 			  && (is_positive_overflow_infinity (maxvr.max)
4356 			      || compare_values (maxvr.max, initvr.max) != 1)))
4357 		    return;
4358 
4359 		  tmin = maxvr.min;
4360 		  tmax = maxvr.max;
4361 		}
4362 	    }
4363 	}
4364     }
4365 
4366   if (vr->type == VR_VARYING || vr->type == VR_UNDEFINED)
4367     {
4368       min = tmin;
4369       max = tmax;
4370 
4371       /* For VARYING or UNDEFINED ranges, just about anything we get
4372 	 from scalar evolutions should be better.  */
4373 
4374       if (dir == EV_DIR_DECREASES)
4375 	max = init;
4376       else
4377 	min = init;
4378     }
4379   else if (vr->type == VR_RANGE)
4380     {
4381       min = vr->min;
4382       max = vr->max;
4383 
4384       if (dir == EV_DIR_DECREASES)
4385 	{
4386 	  /* INIT is the maximum value.  If INIT is lower than VR->MAX
4387 	     but no smaller than VR->MIN, set VR->MAX to INIT.  */
4388 	  if (compare_values (init, max) == -1)
4389 	    max = init;
4390 
4391 	  /* According to the loop information, the variable does not
4392 	     overflow.  If we think it does, probably because of an
4393 	     overflow due to arithmetic on a different INF value,
4394 	     reset now.  */
4395 	  if (is_negative_overflow_infinity (min)
4396 	      || compare_values (min, tmin) == -1)
4397 	    min = tmin;
4398 
4399 	}
4400       else
4401 	{
4402 	  /* If INIT is bigger than VR->MIN, set VR->MIN to INIT.  */
4403 	  if (compare_values (init, min) == 1)
4404 	    min = init;
4405 
4406 	  if (is_positive_overflow_infinity (max)
4407 	      || compare_values (tmax, max) == -1)
4408 	    max = tmax;
4409 	}
4410     }
4411   else
4412     return;
4413 
4414   /* If we just created an invalid range with the minimum
4415      greater than the maximum, we fail conservatively.
4416      This should happen only in unreachable
4417      parts of code, or for invalid programs.  */
4418   if (compare_values (min, max) == 1
4419       || (is_negative_overflow_infinity (min)
4420 	  && is_positive_overflow_infinity (max)))
4421     return;
4422 
4423   /* Even for valid range info, sometimes overflow flag will leak in.
4424      As GIMPLE IL should have no constants with TREE_OVERFLOW set, we
4425      drop them except for +-overflow_infinity which still need special
4426      handling in vrp pass.  */
4427   if (TREE_OVERFLOW_P (min)
4428       && ! is_negative_overflow_infinity (min))
4429     min = drop_tree_overflow (min);
4430   if (TREE_OVERFLOW_P (max)
4431       && ! is_positive_overflow_infinity (max))
4432     max = drop_tree_overflow (max);
4433 
4434   set_value_range (vr, VR_RANGE, min, max, vr->equiv);
4435 }
4436 
4437 
4438 /* Given two numeric value ranges VR0, VR1 and a comparison code COMP:
4439 
4440    - Return BOOLEAN_TRUE_NODE if VR0 COMP VR1 always returns true for
4441      all the values in the ranges.
4442 
4443    - Return BOOLEAN_FALSE_NODE if the comparison always returns false.
4444 
4445    - Return NULL_TREE if it is not always possible to determine the
4446      value of the comparison.
4447 
4448    Also set *STRICT_OVERFLOW_P to indicate whether a range with an
4449    overflow infinity was used in the test.  */
4450 
4451 
4452 static tree
compare_ranges(enum tree_code comp,value_range * vr0,value_range * vr1,bool * strict_overflow_p)4453 compare_ranges (enum tree_code comp, value_range *vr0, value_range *vr1,
4454 		bool *strict_overflow_p)
4455 {
4456   /* VARYING or UNDEFINED ranges cannot be compared.  */
4457   if (vr0->type == VR_VARYING
4458       || vr0->type == VR_UNDEFINED
4459       || vr1->type == VR_VARYING
4460       || vr1->type == VR_UNDEFINED)
4461     return NULL_TREE;
4462 
4463   /* Anti-ranges need to be handled separately.  */
4464   if (vr0->type == VR_ANTI_RANGE || vr1->type == VR_ANTI_RANGE)
4465     {
4466       /* If both are anti-ranges, then we cannot compute any
4467 	 comparison.  */
4468       if (vr0->type == VR_ANTI_RANGE && vr1->type == VR_ANTI_RANGE)
4469 	return NULL_TREE;
4470 
4471       /* These comparisons are never statically computable.  */
4472       if (comp == GT_EXPR
4473 	  || comp == GE_EXPR
4474 	  || comp == LT_EXPR
4475 	  || comp == LE_EXPR)
4476 	return NULL_TREE;
4477 
4478       /* Equality can be computed only between a range and an
4479 	 anti-range.  ~[VAL1, VAL2] == [VAL1, VAL2] is always false.  */
4480       if (vr0->type == VR_RANGE)
4481 	{
4482 	  /* To simplify processing, make VR0 the anti-range.  */
4483 	  value_range *tmp = vr0;
4484 	  vr0 = vr1;
4485 	  vr1 = tmp;
4486 	}
4487 
4488       gcc_assert (comp == NE_EXPR || comp == EQ_EXPR);
4489 
4490       if (compare_values_warnv (vr0->min, vr1->min, strict_overflow_p) == 0
4491 	  && compare_values_warnv (vr0->max, vr1->max, strict_overflow_p) == 0)
4492 	return (comp == NE_EXPR) ? boolean_true_node : boolean_false_node;
4493 
4494       return NULL_TREE;
4495     }
4496 
4497   if (!usable_range_p (vr0, strict_overflow_p)
4498       || !usable_range_p (vr1, strict_overflow_p))
4499     return NULL_TREE;
4500 
4501   /* Simplify processing.  If COMP is GT_EXPR or GE_EXPR, switch the
4502      operands around and change the comparison code.  */
4503   if (comp == GT_EXPR || comp == GE_EXPR)
4504     {
4505       comp = (comp == GT_EXPR) ? LT_EXPR : LE_EXPR;
4506       std::swap (vr0, vr1);
4507     }
4508 
4509   if (comp == EQ_EXPR)
4510     {
4511       /* Equality may only be computed if both ranges represent
4512 	 exactly one value.  */
4513       if (compare_values_warnv (vr0->min, vr0->max, strict_overflow_p) == 0
4514 	  && compare_values_warnv (vr1->min, vr1->max, strict_overflow_p) == 0)
4515 	{
4516 	  int cmp_min = compare_values_warnv (vr0->min, vr1->min,
4517 					      strict_overflow_p);
4518 	  int cmp_max = compare_values_warnv (vr0->max, vr1->max,
4519 					      strict_overflow_p);
4520 	  if (cmp_min == 0 && cmp_max == 0)
4521 	    return boolean_true_node;
4522 	  else if (cmp_min != -2 && cmp_max != -2)
4523 	    return boolean_false_node;
4524 	}
4525       /* If [V0_MIN, V1_MAX] < [V1_MIN, V1_MAX] then V0 != V1.  */
4526       else if (compare_values_warnv (vr0->min, vr1->max,
4527 				     strict_overflow_p) == 1
4528 	       || compare_values_warnv (vr1->min, vr0->max,
4529 					strict_overflow_p) == 1)
4530 	return boolean_false_node;
4531 
4532       return NULL_TREE;
4533     }
4534   else if (comp == NE_EXPR)
4535     {
4536       int cmp1, cmp2;
4537 
4538       /* If VR0 is completely to the left or completely to the right
4539 	 of VR1, they are always different.  Notice that we need to
4540 	 make sure that both comparisons yield similar results to
4541 	 avoid comparing values that cannot be compared at
4542 	 compile-time.  */
4543       cmp1 = compare_values_warnv (vr0->max, vr1->min, strict_overflow_p);
4544       cmp2 = compare_values_warnv (vr0->min, vr1->max, strict_overflow_p);
4545       if ((cmp1 == -1 && cmp2 == -1) || (cmp1 == 1 && cmp2 == 1))
4546 	return boolean_true_node;
4547 
4548       /* If VR0 and VR1 represent a single value and are identical,
4549 	 return false.  */
4550       else if (compare_values_warnv (vr0->min, vr0->max,
4551 				     strict_overflow_p) == 0
4552 	       && compare_values_warnv (vr1->min, vr1->max,
4553 					strict_overflow_p) == 0
4554 	       && compare_values_warnv (vr0->min, vr1->min,
4555 					strict_overflow_p) == 0
4556 	       && compare_values_warnv (vr0->max, vr1->max,
4557 					strict_overflow_p) == 0)
4558 	return boolean_false_node;
4559 
4560       /* Otherwise, they may or may not be different.  */
4561       else
4562 	return NULL_TREE;
4563     }
4564   else if (comp == LT_EXPR || comp == LE_EXPR)
4565     {
4566       int tst;
4567 
4568       /* If VR0 is to the left of VR1, return true.  */
4569       tst = compare_values_warnv (vr0->max, vr1->min, strict_overflow_p);
4570       if ((comp == LT_EXPR && tst == -1)
4571 	  || (comp == LE_EXPR && (tst == -1 || tst == 0)))
4572 	{
4573 	  if (overflow_infinity_range_p (vr0)
4574 	      || overflow_infinity_range_p (vr1))
4575 	    *strict_overflow_p = true;
4576 	  return boolean_true_node;
4577 	}
4578 
4579       /* If VR0 is to the right of VR1, return false.  */
4580       tst = compare_values_warnv (vr0->min, vr1->max, strict_overflow_p);
4581       if ((comp == LT_EXPR && (tst == 0 || tst == 1))
4582 	  || (comp == LE_EXPR && tst == 1))
4583 	{
4584 	  if (overflow_infinity_range_p (vr0)
4585 	      || overflow_infinity_range_p (vr1))
4586 	    *strict_overflow_p = true;
4587 	  return boolean_false_node;
4588 	}
4589 
4590       /* Otherwise, we don't know.  */
4591       return NULL_TREE;
4592     }
4593 
4594   gcc_unreachable ();
4595 }
4596 
4597 
4598 /* Given a value range VR, a value VAL and a comparison code COMP, return
4599    BOOLEAN_TRUE_NODE if VR COMP VAL always returns true for all the
4600    values in VR.  Return BOOLEAN_FALSE_NODE if the comparison
4601    always returns false.  Return NULL_TREE if it is not always
4602    possible to determine the value of the comparison.  Also set
4603    *STRICT_OVERFLOW_P to indicate whether a range with an overflow
4604    infinity was used in the test.  */
4605 
4606 static tree
compare_range_with_value(enum tree_code comp,value_range * vr,tree val,bool * strict_overflow_p)4607 compare_range_with_value (enum tree_code comp, value_range *vr, tree val,
4608 			  bool *strict_overflow_p)
4609 {
4610   if (vr->type == VR_VARYING || vr->type == VR_UNDEFINED)
4611     return NULL_TREE;
4612 
4613   /* Anti-ranges need to be handled separately.  */
4614   if (vr->type == VR_ANTI_RANGE)
4615     {
4616       /* For anti-ranges, the only predicates that we can compute at
4617 	 compile time are equality and inequality.  */
4618       if (comp == GT_EXPR
4619 	  || comp == GE_EXPR
4620 	  || comp == LT_EXPR
4621 	  || comp == LE_EXPR)
4622 	return NULL_TREE;
4623 
4624       /* ~[VAL_1, VAL_2] OP VAL is known if VAL_1 <= VAL <= VAL_2.  */
4625       if (value_inside_range (val, vr->min, vr->max) == 1)
4626 	return (comp == NE_EXPR) ? boolean_true_node : boolean_false_node;
4627 
4628       return NULL_TREE;
4629     }
4630 
4631   if (!usable_range_p (vr, strict_overflow_p))
4632     return NULL_TREE;
4633 
4634   if (comp == EQ_EXPR)
4635     {
4636       /* EQ_EXPR may only be computed if VR represents exactly
4637 	 one value.  */
4638       if (compare_values_warnv (vr->min, vr->max, strict_overflow_p) == 0)
4639 	{
4640 	  int cmp = compare_values_warnv (vr->min, val, strict_overflow_p);
4641 	  if (cmp == 0)
4642 	    return boolean_true_node;
4643 	  else if (cmp == -1 || cmp == 1 || cmp == 2)
4644 	    return boolean_false_node;
4645 	}
4646       else if (compare_values_warnv (val, vr->min, strict_overflow_p) == -1
4647 	       || compare_values_warnv (vr->max, val, strict_overflow_p) == -1)
4648 	return boolean_false_node;
4649 
4650       return NULL_TREE;
4651     }
4652   else if (comp == NE_EXPR)
4653     {
4654       /* If VAL is not inside VR, then they are always different.  */
4655       if (compare_values_warnv (vr->max, val, strict_overflow_p) == -1
4656 	  || compare_values_warnv (vr->min, val, strict_overflow_p) == 1)
4657 	return boolean_true_node;
4658 
4659       /* If VR represents exactly one value equal to VAL, then return
4660 	 false.  */
4661       if (compare_values_warnv (vr->min, vr->max, strict_overflow_p) == 0
4662 	  && compare_values_warnv (vr->min, val, strict_overflow_p) == 0)
4663 	return boolean_false_node;
4664 
4665       /* Otherwise, they may or may not be different.  */
4666       return NULL_TREE;
4667     }
4668   else if (comp == LT_EXPR || comp == LE_EXPR)
4669     {
4670       int tst;
4671 
4672       /* If VR is to the left of VAL, return true.  */
4673       tst = compare_values_warnv (vr->max, val, strict_overflow_p);
4674       if ((comp == LT_EXPR && tst == -1)
4675 	  || (comp == LE_EXPR && (tst == -1 || tst == 0)))
4676 	{
4677 	  if (overflow_infinity_range_p (vr))
4678 	    *strict_overflow_p = true;
4679 	  return boolean_true_node;
4680 	}
4681 
4682       /* If VR is to the right of VAL, return false.  */
4683       tst = compare_values_warnv (vr->min, val, strict_overflow_p);
4684       if ((comp == LT_EXPR && (tst == 0 || tst == 1))
4685 	  || (comp == LE_EXPR && tst == 1))
4686 	{
4687 	  if (overflow_infinity_range_p (vr))
4688 	    *strict_overflow_p = true;
4689 	  return boolean_false_node;
4690 	}
4691 
4692       /* Otherwise, we don't know.  */
4693       return NULL_TREE;
4694     }
4695   else if (comp == GT_EXPR || comp == GE_EXPR)
4696     {
4697       int tst;
4698 
4699       /* If VR is to the right of VAL, return true.  */
4700       tst = compare_values_warnv (vr->min, val, strict_overflow_p);
4701       if ((comp == GT_EXPR && tst == 1)
4702 	  || (comp == GE_EXPR && (tst == 0 || tst == 1)))
4703 	{
4704 	  if (overflow_infinity_range_p (vr))
4705 	    *strict_overflow_p = true;
4706 	  return boolean_true_node;
4707 	}
4708 
4709       /* If VR is to the left of VAL, return false.  */
4710       tst = compare_values_warnv (vr->max, val, strict_overflow_p);
4711       if ((comp == GT_EXPR && (tst == -1 || tst == 0))
4712 	  || (comp == GE_EXPR && tst == -1))
4713 	{
4714 	  if (overflow_infinity_range_p (vr))
4715 	    *strict_overflow_p = true;
4716 	  return boolean_false_node;
4717 	}
4718 
4719       /* Otherwise, we don't know.  */
4720       return NULL_TREE;
4721     }
4722 
4723   gcc_unreachable ();
4724 }
4725 
4726 
4727 /* Debugging dumps.  */
4728 
4729 void dump_value_range (FILE *, value_range *);
4730 void debug_value_range (value_range *);
4731 void dump_all_value_ranges (FILE *);
4732 void debug_all_value_ranges (void);
4733 void dump_vr_equiv (FILE *, bitmap);
4734 void debug_vr_equiv (bitmap);
4735 
4736 
4737 /* Dump value range VR to FILE.  */
4738 
4739 void
dump_value_range(FILE * file,value_range * vr)4740 dump_value_range (FILE *file, value_range *vr)
4741 {
4742   if (vr == NULL)
4743     fprintf (file, "[]");
4744   else if (vr->type == VR_UNDEFINED)
4745     fprintf (file, "UNDEFINED");
4746   else if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE)
4747     {
4748       tree type = TREE_TYPE (vr->min);
4749 
4750       fprintf (file, "%s[", (vr->type == VR_ANTI_RANGE) ? "~" : "");
4751 
4752       if (is_negative_overflow_infinity (vr->min))
4753 	fprintf (file, "-INF(OVF)");
4754       else if (INTEGRAL_TYPE_P (type)
4755 	       && !TYPE_UNSIGNED (type)
4756 	       && vrp_val_is_min (vr->min))
4757 	fprintf (file, "-INF");
4758       else
4759 	print_generic_expr (file, vr->min, 0);
4760 
4761       fprintf (file, ", ");
4762 
4763       if (is_positive_overflow_infinity (vr->max))
4764 	fprintf (file, "+INF(OVF)");
4765       else if (INTEGRAL_TYPE_P (type)
4766 	       && vrp_val_is_max (vr->max))
4767 	fprintf (file, "+INF");
4768       else
4769 	print_generic_expr (file, vr->max, 0);
4770 
4771       fprintf (file, "]");
4772 
4773       if (vr->equiv)
4774 	{
4775 	  bitmap_iterator bi;
4776 	  unsigned i, c = 0;
4777 
4778 	  fprintf (file, "  EQUIVALENCES: { ");
4779 
4780 	  EXECUTE_IF_SET_IN_BITMAP (vr->equiv, 0, i, bi)
4781 	    {
4782 	      print_generic_expr (file, ssa_name (i), 0);
4783 	      fprintf (file, " ");
4784 	      c++;
4785 	    }
4786 
4787 	  fprintf (file, "} (%u elements)", c);
4788 	}
4789     }
4790   else if (vr->type == VR_VARYING)
4791     fprintf (file, "VARYING");
4792   else
4793     fprintf (file, "INVALID RANGE");
4794 }
4795 
4796 
4797 /* Dump value range VR to stderr.  */
4798 
4799 DEBUG_FUNCTION void
debug_value_range(value_range * vr)4800 debug_value_range (value_range *vr)
4801 {
4802   dump_value_range (stderr, vr);
4803   fprintf (stderr, "\n");
4804 }
4805 
4806 
4807 /* Dump value ranges of all SSA_NAMEs to FILE.  */
4808 
4809 void
dump_all_value_ranges(FILE * file)4810 dump_all_value_ranges (FILE *file)
4811 {
4812   size_t i;
4813 
4814   for (i = 0; i < num_vr_values; i++)
4815     {
4816       if (vr_value[i])
4817 	{
4818 	  print_generic_expr (file, ssa_name (i), 0);
4819 	  fprintf (file, ": ");
4820 	  dump_value_range (file, vr_value[i]);
4821 	  fprintf (file, "\n");
4822 	}
4823     }
4824 
4825   fprintf (file, "\n");
4826 }
4827 
4828 
4829 /* Dump all value ranges to stderr.  */
4830 
4831 DEBUG_FUNCTION void
debug_all_value_ranges(void)4832 debug_all_value_ranges (void)
4833 {
4834   dump_all_value_ranges (stderr);
4835 }
4836 
4837 
4838 /* Given a COND_EXPR COND of the form 'V OP W', and an SSA name V,
4839    create a new SSA name N and return the assertion assignment
4840    'N = ASSERT_EXPR <V, V OP W>'.  */
4841 
4842 static gimple *
build_assert_expr_for(tree cond,tree v)4843 build_assert_expr_for (tree cond, tree v)
4844 {
4845   tree a;
4846   gassign *assertion;
4847 
4848   gcc_assert (TREE_CODE (v) == SSA_NAME
4849 	      && COMPARISON_CLASS_P (cond));
4850 
4851   a = build2 (ASSERT_EXPR, TREE_TYPE (v), v, cond);
4852   assertion = gimple_build_assign (NULL_TREE, a);
4853 
4854   /* The new ASSERT_EXPR, creates a new SSA name that replaces the
4855      operand of the ASSERT_EXPR.  Create it so the new name and the old one
4856      are registered in the replacement table so that we can fix the SSA web
4857      after adding all the ASSERT_EXPRs.  */
4858   create_new_def_for (v, assertion, NULL);
4859 
4860   return assertion;
4861 }
4862 
4863 
4864 /* Return false if EXPR is a predicate expression involving floating
4865    point values.  */
4866 
4867 static inline bool
fp_predicate(gimple * stmt)4868 fp_predicate (gimple *stmt)
4869 {
4870   GIMPLE_CHECK (stmt, GIMPLE_COND);
4871 
4872   return FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt)));
4873 }
4874 
4875 /* If the range of values taken by OP can be inferred after STMT executes,
4876    return the comparison code (COMP_CODE_P) and value (VAL_P) that
4877    describes the inferred range.  Return true if a range could be
4878    inferred.  */
4879 
4880 static bool
infer_value_range(gimple * stmt,tree op,tree_code * comp_code_p,tree * val_p)4881 infer_value_range (gimple *stmt, tree op, tree_code *comp_code_p, tree *val_p)
4882 {
4883   *val_p = NULL_TREE;
4884   *comp_code_p = ERROR_MARK;
4885 
4886   /* Do not attempt to infer anything in names that flow through
4887      abnormal edges.  */
4888   if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op))
4889     return false;
4890 
4891   /* Similarly, don't infer anything from statements that may throw
4892      exceptions. ??? Relax this requirement?  */
4893   if (stmt_could_throw_p (stmt))
4894     return false;
4895 
4896   /* If STMT is the last statement of a basic block with no normal
4897      successors, there is no point inferring anything about any of its
4898      operands.  We would not be able to find a proper insertion point
4899      for the assertion, anyway.  */
4900   if (stmt_ends_bb_p (stmt))
4901     {
4902       edge_iterator ei;
4903       edge e;
4904 
4905       FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs)
4906 	if (!(e->flags & EDGE_ABNORMAL))
4907 	  break;
4908       if (e == NULL)
4909 	return false;
4910     }
4911 
4912   if (infer_nonnull_range (stmt, op))
4913     {
4914       *val_p = build_int_cst (TREE_TYPE (op), 0);
4915       *comp_code_p = NE_EXPR;
4916       return true;
4917     }
4918 
4919   return false;
4920 }
4921 
4922 
4923 void dump_asserts_for (FILE *, tree);
4924 void debug_asserts_for (tree);
4925 void dump_all_asserts (FILE *);
4926 void debug_all_asserts (void);
4927 
4928 /* Dump all the registered assertions for NAME to FILE.  */
4929 
4930 void
dump_asserts_for(FILE * file,tree name)4931 dump_asserts_for (FILE *file, tree name)
4932 {
4933   assert_locus *loc;
4934 
4935   fprintf (file, "Assertions to be inserted for ");
4936   print_generic_expr (file, name, 0);
4937   fprintf (file, "\n");
4938 
4939   loc = asserts_for[SSA_NAME_VERSION (name)];
4940   while (loc)
4941     {
4942       fprintf (file, "\t");
4943       print_gimple_stmt (file, gsi_stmt (loc->si), 0, 0);
4944       fprintf (file, "\n\tBB #%d", loc->bb->index);
4945       if (loc->e)
4946 	{
4947 	  fprintf (file, "\n\tEDGE %d->%d", loc->e->src->index,
4948 	           loc->e->dest->index);
4949 	  dump_edge_info (file, loc->e, dump_flags, 0);
4950 	}
4951       fprintf (file, "\n\tPREDICATE: ");
4952       print_generic_expr (file, name, 0);
4953       fprintf (file, " %s ", get_tree_code_name (loc->comp_code));
4954       print_generic_expr (file, loc->val, 0);
4955       fprintf (file, "\n\n");
4956       loc = loc->next;
4957     }
4958 
4959   fprintf (file, "\n");
4960 }
4961 
4962 
4963 /* Dump all the registered assertions for NAME to stderr.  */
4964 
4965 DEBUG_FUNCTION void
debug_asserts_for(tree name)4966 debug_asserts_for (tree name)
4967 {
4968   dump_asserts_for (stderr, name);
4969 }
4970 
4971 
4972 /* Dump all the registered assertions for all the names to FILE.  */
4973 
4974 void
dump_all_asserts(FILE * file)4975 dump_all_asserts (FILE *file)
4976 {
4977   unsigned i;
4978   bitmap_iterator bi;
4979 
4980   fprintf (file, "\nASSERT_EXPRs to be inserted\n\n");
4981   EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
4982     dump_asserts_for (file, ssa_name (i));
4983   fprintf (file, "\n");
4984 }
4985 
4986 
4987 /* Dump all the registered assertions for all the names to stderr.  */
4988 
4989 DEBUG_FUNCTION void
debug_all_asserts(void)4990 debug_all_asserts (void)
4991 {
4992   dump_all_asserts (stderr);
4993 }
4994 
4995 
4996 /* If NAME doesn't have an ASSERT_EXPR registered for asserting
4997    'EXPR COMP_CODE VAL' at a location that dominates block BB or
4998    E->DEST, then register this location as a possible insertion point
4999    for ASSERT_EXPR <NAME, EXPR COMP_CODE VAL>.
5000 
5001    BB, E and SI provide the exact insertion point for the new
5002    ASSERT_EXPR.  If BB is NULL, then the ASSERT_EXPR is to be inserted
5003    on edge E.  Otherwise, if E is NULL, the ASSERT_EXPR is inserted on
5004    BB.  If SI points to a COND_EXPR or a SWITCH_EXPR statement, then E
5005    must not be NULL.  */
5006 
5007 static void
register_new_assert_for(tree name,tree expr,enum tree_code comp_code,tree val,basic_block bb,edge e,gimple_stmt_iterator si)5008 register_new_assert_for (tree name, tree expr,
5009 			 enum tree_code comp_code,
5010 			 tree val,
5011 			 basic_block bb,
5012 			 edge e,
5013 			 gimple_stmt_iterator si)
5014 {
5015   assert_locus *n, *loc, *last_loc;
5016   basic_block dest_bb;
5017 
5018   gcc_checking_assert (bb == NULL || e == NULL);
5019 
5020   if (e == NULL)
5021     gcc_checking_assert (gimple_code (gsi_stmt (si)) != GIMPLE_COND
5022 			 && gimple_code (gsi_stmt (si)) != GIMPLE_SWITCH);
5023 
5024   /* Never build an assert comparing against an integer constant with
5025      TREE_OVERFLOW set.  This confuses our undefined overflow warning
5026      machinery.  */
5027   if (TREE_OVERFLOW_P (val))
5028     val = drop_tree_overflow (val);
5029 
5030   /* The new assertion A will be inserted at BB or E.  We need to
5031      determine if the new location is dominated by a previously
5032      registered location for A.  If we are doing an edge insertion,
5033      assume that A will be inserted at E->DEST.  Note that this is not
5034      necessarily true.
5035 
5036      If E is a critical edge, it will be split.  But even if E is
5037      split, the new block will dominate the same set of blocks that
5038      E->DEST dominates.
5039 
5040      The reverse, however, is not true, blocks dominated by E->DEST
5041      will not be dominated by the new block created to split E.  So,
5042      if the insertion location is on a critical edge, we will not use
5043      the new location to move another assertion previously registered
5044      at a block dominated by E->DEST.  */
5045   dest_bb = (bb) ? bb : e->dest;
5046 
5047   /* If NAME already has an ASSERT_EXPR registered for COMP_CODE and
5048      VAL at a block dominating DEST_BB, then we don't need to insert a new
5049      one.  Similarly, if the same assertion already exists at a block
5050      dominated by DEST_BB and the new location is not on a critical
5051      edge, then update the existing location for the assertion (i.e.,
5052      move the assertion up in the dominance tree).
5053 
5054      Note, this is implemented as a simple linked list because there
5055      should not be more than a handful of assertions registered per
5056      name.  If this becomes a performance problem, a table hashed by
5057      COMP_CODE and VAL could be implemented.  */
5058   loc = asserts_for[SSA_NAME_VERSION (name)];
5059   last_loc = loc;
5060   while (loc)
5061     {
5062       if (loc->comp_code == comp_code
5063 	  && (loc->val == val
5064 	      || operand_equal_p (loc->val, val, 0))
5065 	  && (loc->expr == expr
5066 	      || operand_equal_p (loc->expr, expr, 0)))
5067 	{
5068 	  /* If E is not a critical edge and DEST_BB
5069 	     dominates the existing location for the assertion, move
5070 	     the assertion up in the dominance tree by updating its
5071 	     location information.  */
5072 	  if ((e == NULL || !EDGE_CRITICAL_P (e))
5073 	      && dominated_by_p (CDI_DOMINATORS, loc->bb, dest_bb))
5074 	    {
5075 	      loc->bb = dest_bb;
5076 	      loc->e = e;
5077 	      loc->si = si;
5078 	      return;
5079 	    }
5080 	}
5081 
5082       /* Update the last node of the list and move to the next one.  */
5083       last_loc = loc;
5084       loc = loc->next;
5085     }
5086 
5087   /* If we didn't find an assertion already registered for
5088      NAME COMP_CODE VAL, add a new one at the end of the list of
5089      assertions associated with NAME.  */
5090   n = XNEW (struct assert_locus);
5091   n->bb = dest_bb;
5092   n->e = e;
5093   n->si = si;
5094   n->comp_code = comp_code;
5095   n->val = val;
5096   n->expr = expr;
5097   n->next = NULL;
5098 
5099   if (last_loc)
5100     last_loc->next = n;
5101   else
5102     asserts_for[SSA_NAME_VERSION (name)] = n;
5103 
5104   bitmap_set_bit (need_assert_for, SSA_NAME_VERSION (name));
5105 }
5106 
5107 /* (COND_OP0 COND_CODE COND_OP1) is a predicate which uses NAME.
5108    Extract a suitable test code and value and store them into *CODE_P and
5109    *VAL_P so the predicate is normalized to NAME *CODE_P *VAL_P.
5110 
5111    If no extraction was possible, return FALSE, otherwise return TRUE.
5112 
5113    If INVERT is true, then we invert the result stored into *CODE_P.  */
5114 
5115 static bool
extract_code_and_val_from_cond_with_ops(tree name,enum tree_code cond_code,tree cond_op0,tree cond_op1,bool invert,enum tree_code * code_p,tree * val_p)5116 extract_code_and_val_from_cond_with_ops (tree name, enum tree_code cond_code,
5117 					 tree cond_op0, tree cond_op1,
5118 					 bool invert, enum tree_code *code_p,
5119 					 tree *val_p)
5120 {
5121   enum tree_code comp_code;
5122   tree val;
5123 
5124   /* Otherwise, we have a comparison of the form NAME COMP VAL
5125      or VAL COMP NAME.  */
5126   if (name == cond_op1)
5127     {
5128       /* If the predicate is of the form VAL COMP NAME, flip
5129 	 COMP around because we need to register NAME as the
5130 	 first operand in the predicate.  */
5131       comp_code = swap_tree_comparison (cond_code);
5132       val = cond_op0;
5133     }
5134   else
5135     {
5136       /* The comparison is of the form NAME COMP VAL, so the
5137 	 comparison code remains unchanged.  */
5138       comp_code = cond_code;
5139       val = cond_op1;
5140     }
5141 
5142   /* Invert the comparison code as necessary.  */
5143   if (invert)
5144     comp_code = invert_tree_comparison (comp_code, 0);
5145 
5146   /* VRP only handles integral and pointer types.  */
5147   if (! INTEGRAL_TYPE_P (TREE_TYPE (val))
5148       && ! POINTER_TYPE_P (TREE_TYPE (val)))
5149     return false;
5150 
5151   /* Do not register always-false predicates.
5152      FIXME:  this works around a limitation in fold() when dealing with
5153      enumerations.  Given 'enum { N1, N2 } x;', fold will not
5154      fold 'if (x > N2)' to 'if (0)'.  */
5155   if ((comp_code == GT_EXPR || comp_code == LT_EXPR)
5156       && INTEGRAL_TYPE_P (TREE_TYPE (val)))
5157     {
5158       tree min = TYPE_MIN_VALUE (TREE_TYPE (val));
5159       tree max = TYPE_MAX_VALUE (TREE_TYPE (val));
5160 
5161       if (comp_code == GT_EXPR
5162 	  && (!max
5163 	      || compare_values (val, max) == 0))
5164 	return false;
5165 
5166       if (comp_code == LT_EXPR
5167 	  && (!min
5168 	      || compare_values (val, min) == 0))
5169 	return false;
5170     }
5171   *code_p = comp_code;
5172   *val_p = val;
5173   return true;
5174 }
5175 
5176 /* Find out smallest RES where RES > VAL && (RES & MASK) == RES, if any
5177    (otherwise return VAL).  VAL and MASK must be zero-extended for
5178    precision PREC.  If SGNBIT is non-zero, first xor VAL with SGNBIT
5179    (to transform signed values into unsigned) and at the end xor
5180    SGNBIT back.  */
5181 
5182 static wide_int
masked_increment(const wide_int & val_in,const wide_int & mask,const wide_int & sgnbit,unsigned int prec)5183 masked_increment (const wide_int &val_in, const wide_int &mask,
5184 		  const wide_int &sgnbit, unsigned int prec)
5185 {
5186   wide_int bit = wi::one (prec), res;
5187   unsigned int i;
5188 
5189   wide_int val = val_in ^ sgnbit;
5190   for (i = 0; i < prec; i++, bit += bit)
5191     {
5192       res = mask;
5193       if ((res & bit) == 0)
5194 	continue;
5195       res = bit - 1;
5196       res = (val + bit).and_not (res);
5197       res &= mask;
5198       if (wi::gtu_p (res, val))
5199 	return res ^ sgnbit;
5200     }
5201   return val ^ sgnbit;
5202 }
5203 
5204 /* Try to register an edge assertion for SSA name NAME on edge E for
5205    the condition COND contributing to the conditional jump pointed to by BSI.
5206    Invert the condition COND if INVERT is true.  */
5207 
5208 static void
register_edge_assert_for_2(tree name,edge e,gimple_stmt_iterator bsi,enum tree_code cond_code,tree cond_op0,tree cond_op1,bool invert)5209 register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi,
5210 			    enum tree_code cond_code,
5211 			    tree cond_op0, tree cond_op1, bool invert)
5212 {
5213   tree val;
5214   enum tree_code comp_code;
5215 
5216   if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
5217 						cond_op0,
5218 						cond_op1,
5219 						invert, &comp_code, &val))
5220     return;
5221 
5222   /* Only register an ASSERT_EXPR if NAME was found in the sub-graph
5223      reachable from E.  */
5224   if (live_on_edge (e, name)
5225       && !has_single_use (name))
5226     register_new_assert_for (name, name, comp_code, val, NULL, e, bsi);
5227 
5228   /* In the case of NAME <= CST and NAME being defined as
5229      NAME = (unsigned) NAME2 + CST2 we can assert NAME2 >= -CST2
5230      and NAME2 <= CST - CST2.  We can do the same for NAME > CST.
5231      This catches range and anti-range tests.  */
5232   if ((comp_code == LE_EXPR
5233        || comp_code == GT_EXPR)
5234       && TREE_CODE (val) == INTEGER_CST
5235       && TYPE_UNSIGNED (TREE_TYPE (val)))
5236     {
5237       gimple *def_stmt = SSA_NAME_DEF_STMT (name);
5238       tree cst2 = NULL_TREE, name2 = NULL_TREE, name3 = NULL_TREE;
5239 
5240       /* Extract CST2 from the (optional) addition.  */
5241       if (is_gimple_assign (def_stmt)
5242 	  && gimple_assign_rhs_code (def_stmt) == PLUS_EXPR)
5243 	{
5244 	  name2 = gimple_assign_rhs1 (def_stmt);
5245 	  cst2 = gimple_assign_rhs2 (def_stmt);
5246 	  if (TREE_CODE (name2) == SSA_NAME
5247 	      && TREE_CODE (cst2) == INTEGER_CST)
5248 	    def_stmt = SSA_NAME_DEF_STMT (name2);
5249 	}
5250 
5251       /* Extract NAME2 from the (optional) sign-changing cast.  */
5252       if (gimple_assign_cast_p (def_stmt))
5253 	{
5254 	  if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt))
5255 	      && ! TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
5256 	      && (TYPE_PRECISION (gimple_expr_type (def_stmt))
5257 		  == TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))))
5258 	    name3 = gimple_assign_rhs1 (def_stmt);
5259 	}
5260 
5261       /* If name3 is used later, create an ASSERT_EXPR for it.  */
5262       if (name3 != NULL_TREE
5263       	  && TREE_CODE (name3) == SSA_NAME
5264 	  && (cst2 == NULL_TREE
5265 	      || TREE_CODE (cst2) == INTEGER_CST)
5266 	  && INTEGRAL_TYPE_P (TREE_TYPE (name3))
5267 	  && live_on_edge (e, name3)
5268 	  && !has_single_use (name3))
5269 	{
5270 	  tree tmp;
5271 
5272 	  /* Build an expression for the range test.  */
5273 	  tmp = build1 (NOP_EXPR, TREE_TYPE (name), name3);
5274 	  if (cst2 != NULL_TREE)
5275 	    tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
5276 
5277 	  if (dump_file)
5278 	    {
5279 	      fprintf (dump_file, "Adding assert for ");
5280 	      print_generic_expr (dump_file, name3, 0);
5281 	      fprintf (dump_file, " from ");
5282 	      print_generic_expr (dump_file, tmp, 0);
5283 	      fprintf (dump_file, "\n");
5284 	    }
5285 
5286 	  register_new_assert_for (name3, tmp, comp_code, val, NULL, e, bsi);
5287 	}
5288 
5289       /* If name2 is used later, create an ASSERT_EXPR for it.  */
5290       if (name2 != NULL_TREE
5291       	  && TREE_CODE (name2) == SSA_NAME
5292 	  && TREE_CODE (cst2) == INTEGER_CST
5293 	  && INTEGRAL_TYPE_P (TREE_TYPE (name2))
5294 	  && live_on_edge (e, name2)
5295 	  && !has_single_use (name2))
5296 	{
5297 	  tree tmp;
5298 
5299 	  /* Build an expression for the range test.  */
5300 	  tmp = name2;
5301 	  if (TREE_TYPE (name) != TREE_TYPE (name2))
5302 	    tmp = build1 (NOP_EXPR, TREE_TYPE (name), tmp);
5303 	  if (cst2 != NULL_TREE)
5304 	    tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
5305 
5306 	  if (dump_file)
5307 	    {
5308 	      fprintf (dump_file, "Adding assert for ");
5309 	      print_generic_expr (dump_file, name2, 0);
5310 	      fprintf (dump_file, " from ");
5311 	      print_generic_expr (dump_file, tmp, 0);
5312 	      fprintf (dump_file, "\n");
5313 	    }
5314 
5315 	  register_new_assert_for (name2, tmp, comp_code, val, NULL, e, bsi);
5316 	}
5317     }
5318 
5319   /* In the case of post-in/decrement tests like if (i++) ... and uses
5320      of the in/decremented value on the edge the extra name we want to
5321      assert for is not on the def chain of the name compared.  Instead
5322      it is in the set of use stmts.
5323      Similar cases happen for conversions that were simplified through
5324      fold_{sign_changed,widened}_comparison.  */
5325   if ((comp_code == NE_EXPR
5326        || comp_code == EQ_EXPR)
5327       && TREE_CODE (val) == INTEGER_CST)
5328     {
5329       imm_use_iterator ui;
5330       gimple *use_stmt;
5331       FOR_EACH_IMM_USE_STMT (use_stmt, ui, name)
5332 	{
5333 	  if (!is_gimple_assign (use_stmt))
5334 	    continue;
5335 
5336 	  /* Cut off to use-stmts that are dominating the predecessor.  */
5337 	  if (!dominated_by_p (CDI_DOMINATORS, e->src, gimple_bb (use_stmt)))
5338 	    continue;
5339 
5340 	  tree name2 = gimple_assign_lhs (use_stmt);
5341 	  if (TREE_CODE (name2) != SSA_NAME
5342 	      || !live_on_edge (e, name2))
5343 	    continue;
5344 
5345 	  enum tree_code code = gimple_assign_rhs_code (use_stmt);
5346 	  tree cst;
5347 	  if (code == PLUS_EXPR
5348 	      || code == MINUS_EXPR)
5349 	    {
5350 	      cst = gimple_assign_rhs2 (use_stmt);
5351 	      if (TREE_CODE (cst) != INTEGER_CST)
5352 		continue;
5353 	      cst = int_const_binop (code, val, cst);
5354 	    }
5355 	  else if (CONVERT_EXPR_CODE_P (code))
5356 	    {
5357 	      /* For truncating conversions we cannot record
5358 		 an inequality.  */
5359 	      if (comp_code == NE_EXPR
5360 		  && (TYPE_PRECISION (TREE_TYPE (name2))
5361 		      < TYPE_PRECISION (TREE_TYPE (name))))
5362 		continue;
5363 	      cst = fold_convert (TREE_TYPE (name2), val);
5364 	    }
5365 	  else
5366 	    continue;
5367 
5368 	  if (TREE_OVERFLOW_P (cst))
5369 	    cst = drop_tree_overflow (cst);
5370 	  register_new_assert_for (name2, name2, comp_code, cst,
5371 				   NULL, e, bsi);
5372 	}
5373     }
5374 
5375   if (TREE_CODE_CLASS (comp_code) == tcc_comparison
5376       && TREE_CODE (val) == INTEGER_CST)
5377     {
5378       gimple *def_stmt = SSA_NAME_DEF_STMT (name);
5379       tree name2 = NULL_TREE, names[2], cst2 = NULL_TREE;
5380       tree val2 = NULL_TREE;
5381       unsigned int prec = TYPE_PRECISION (TREE_TYPE (val));
5382       wide_int mask = wi::zero (prec);
5383       unsigned int nprec = prec;
5384       enum tree_code rhs_code = ERROR_MARK;
5385 
5386       if (is_gimple_assign (def_stmt))
5387 	rhs_code = gimple_assign_rhs_code (def_stmt);
5388 
5389       /* In the case of NAME != CST1 where NAME = A +- CST2 we can
5390          assert that A != CST1 -+ CST2.  */
5391       if ((comp_code == EQ_EXPR || comp_code == NE_EXPR)
5392 	  && (rhs_code == PLUS_EXPR || rhs_code == MINUS_EXPR))
5393 	{
5394 	  tree op0 = gimple_assign_rhs1 (def_stmt);
5395 	  tree op1 = gimple_assign_rhs2 (def_stmt);
5396 	  if (TREE_CODE (op0) == SSA_NAME
5397 	      && TREE_CODE (op1) == INTEGER_CST
5398 	      && live_on_edge (e, op0)
5399 	      && !has_single_use (op0))
5400 	    {
5401 	      enum tree_code reverse_op = (rhs_code == PLUS_EXPR
5402 					   ? MINUS_EXPR : PLUS_EXPR);
5403 	      op1 = int_const_binop (reverse_op, val, op1);
5404 	      if (TREE_OVERFLOW (op1))
5405 		op1 = drop_tree_overflow (op1);
5406 	      register_new_assert_for (op0, op0, comp_code, op1, NULL, e, bsi);
5407 	    }
5408 	}
5409 
5410       /* Add asserts for NAME cmp CST and NAME being defined
5411 	 as NAME = (int) NAME2.  */
5412       if (!TYPE_UNSIGNED (TREE_TYPE (val))
5413 	  && (comp_code == LE_EXPR || comp_code == LT_EXPR
5414 	      || comp_code == GT_EXPR || comp_code == GE_EXPR)
5415 	  && gimple_assign_cast_p (def_stmt))
5416 	{
5417 	  name2 = gimple_assign_rhs1 (def_stmt);
5418 	  if (CONVERT_EXPR_CODE_P (rhs_code)
5419 	      && INTEGRAL_TYPE_P (TREE_TYPE (name2))
5420 	      && TYPE_UNSIGNED (TREE_TYPE (name2))
5421 	      && prec == TYPE_PRECISION (TREE_TYPE (name2))
5422 	      && (comp_code == LE_EXPR || comp_code == GT_EXPR
5423 		  || !tree_int_cst_equal (val,
5424 					  TYPE_MIN_VALUE (TREE_TYPE (val))))
5425 	      && live_on_edge (e, name2)
5426 	      && !has_single_use (name2))
5427 	    {
5428 	      tree tmp, cst;
5429 	      enum tree_code new_comp_code = comp_code;
5430 
5431 	      cst = fold_convert (TREE_TYPE (name2),
5432 				  TYPE_MIN_VALUE (TREE_TYPE (val)));
5433 	      /* Build an expression for the range test.  */
5434 	      tmp = build2 (PLUS_EXPR, TREE_TYPE (name2), name2, cst);
5435 	      cst = fold_build2 (PLUS_EXPR, TREE_TYPE (name2), cst,
5436 				 fold_convert (TREE_TYPE (name2), val));
5437 	      if (comp_code == LT_EXPR || comp_code == GE_EXPR)
5438 		{
5439 		  new_comp_code = comp_code == LT_EXPR ? LE_EXPR : GT_EXPR;
5440 		  cst = fold_build2 (MINUS_EXPR, TREE_TYPE (name2), cst,
5441 				     build_int_cst (TREE_TYPE (name2), 1));
5442 		}
5443 
5444 	      if (dump_file)
5445 		{
5446 		  fprintf (dump_file, "Adding assert for ");
5447 		  print_generic_expr (dump_file, name2, 0);
5448 		  fprintf (dump_file, " from ");
5449 		  print_generic_expr (dump_file, tmp, 0);
5450 		  fprintf (dump_file, "\n");
5451 		}
5452 
5453 	      register_new_assert_for (name2, tmp, new_comp_code, cst, NULL,
5454 				       e, bsi);
5455 	    }
5456 	}
5457 
5458       /* Add asserts for NAME cmp CST and NAME being defined as
5459 	 NAME = NAME2 >> CST2.
5460 
5461 	 Extract CST2 from the right shift.  */
5462       if (rhs_code == RSHIFT_EXPR)
5463 	{
5464 	  name2 = gimple_assign_rhs1 (def_stmt);
5465 	  cst2 = gimple_assign_rhs2 (def_stmt);
5466 	  if (TREE_CODE (name2) == SSA_NAME
5467 	      && tree_fits_uhwi_p (cst2)
5468 	      && INTEGRAL_TYPE_P (TREE_TYPE (name2))
5469 	      && IN_RANGE (tree_to_uhwi (cst2), 1, prec - 1)
5470 	      && prec == GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (val)))
5471 	      && live_on_edge (e, name2)
5472 	      && !has_single_use (name2))
5473 	    {
5474 	      mask = wi::mask (tree_to_uhwi (cst2), false, prec);
5475 	      val2 = fold_binary (LSHIFT_EXPR, TREE_TYPE (val), val, cst2);
5476 	    }
5477 	}
5478       if (val2 != NULL_TREE
5479 	  && TREE_CODE (val2) == INTEGER_CST
5480 	  && simple_cst_equal (fold_build2 (RSHIFT_EXPR,
5481 					    TREE_TYPE (val),
5482 					    val2, cst2), val))
5483 	{
5484 	  enum tree_code new_comp_code = comp_code;
5485 	  tree tmp, new_val;
5486 
5487 	  tmp = name2;
5488 	  if (comp_code == EQ_EXPR || comp_code == NE_EXPR)
5489 	    {
5490 	      if (!TYPE_UNSIGNED (TREE_TYPE (val)))
5491 		{
5492 		  tree type = build_nonstandard_integer_type (prec, 1);
5493 		  tmp = build1 (NOP_EXPR, type, name2);
5494 		  val2 = fold_convert (type, val2);
5495 		}
5496 	      tmp = fold_build2 (MINUS_EXPR, TREE_TYPE (tmp), tmp, val2);
5497 	      new_val = wide_int_to_tree (TREE_TYPE (tmp), mask);
5498 	      new_comp_code = comp_code == EQ_EXPR ? LE_EXPR : GT_EXPR;
5499 	    }
5500 	  else if (comp_code == LT_EXPR || comp_code == GE_EXPR)
5501 	    {
5502 	      wide_int minval
5503 		= wi::min_value (prec, TYPE_SIGN (TREE_TYPE (val)));
5504 	      new_val = val2;
5505 	      if (minval == new_val)
5506 		new_val = NULL_TREE;
5507 	    }
5508 	  else
5509 	    {
5510 	      wide_int maxval
5511 		= wi::max_value (prec, TYPE_SIGN (TREE_TYPE (val)));
5512 	      mask |= val2;
5513 	      if (mask == maxval)
5514 		new_val = NULL_TREE;
5515 	      else
5516 		new_val = wide_int_to_tree (TREE_TYPE (val2), mask);
5517 	    }
5518 
5519 	  if (new_val)
5520 	    {
5521 	      if (dump_file)
5522 		{
5523 		  fprintf (dump_file, "Adding assert for ");
5524 		  print_generic_expr (dump_file, name2, 0);
5525 		  fprintf (dump_file, " from ");
5526 		  print_generic_expr (dump_file, tmp, 0);
5527 		  fprintf (dump_file, "\n");
5528 		}
5529 
5530 	      register_new_assert_for (name2, tmp, new_comp_code, new_val,
5531 				       NULL, e, bsi);
5532 	    }
5533 	}
5534 
5535       /* Add asserts for NAME cmp CST and NAME being defined as
5536 	 NAME = NAME2 & CST2.
5537 
5538 	 Extract CST2 from the and.
5539 
5540 	 Also handle
5541 	 NAME = (unsigned) NAME2;
5542 	 casts where NAME's type is unsigned and has smaller precision
5543 	 than NAME2's type as if it was NAME = NAME2 & MASK.  */
5544       names[0] = NULL_TREE;
5545       names[1] = NULL_TREE;
5546       cst2 = NULL_TREE;
5547       if (rhs_code == BIT_AND_EXPR
5548 	  || (CONVERT_EXPR_CODE_P (rhs_code)
5549 	      && INTEGRAL_TYPE_P (TREE_TYPE (val))
5550 	      && TYPE_UNSIGNED (TREE_TYPE (val))
5551 	      && TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
5552 		 > prec))
5553 	{
5554 	  name2 = gimple_assign_rhs1 (def_stmt);
5555 	  if (rhs_code == BIT_AND_EXPR)
5556 	    cst2 = gimple_assign_rhs2 (def_stmt);
5557 	  else
5558 	    {
5559 	      cst2 = TYPE_MAX_VALUE (TREE_TYPE (val));
5560 	      nprec = TYPE_PRECISION (TREE_TYPE (name2));
5561 	    }
5562 	  if (TREE_CODE (name2) == SSA_NAME
5563 	      && INTEGRAL_TYPE_P (TREE_TYPE (name2))
5564 	      && TREE_CODE (cst2) == INTEGER_CST
5565 	      && !integer_zerop (cst2)
5566 	      && (nprec > 1
5567 		  || TYPE_UNSIGNED (TREE_TYPE (val))))
5568 	    {
5569 	      gimple *def_stmt2 = SSA_NAME_DEF_STMT (name2);
5570 	      if (gimple_assign_cast_p (def_stmt2))
5571 		{
5572 		  names[1] = gimple_assign_rhs1 (def_stmt2);
5573 		  if (!CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt2))
5574 		      || !INTEGRAL_TYPE_P (TREE_TYPE (names[1]))
5575 		      || (TYPE_PRECISION (TREE_TYPE (name2))
5576 			  != TYPE_PRECISION (TREE_TYPE (names[1])))
5577 		      || !live_on_edge (e, names[1])
5578 		      || has_single_use (names[1]))
5579 		    names[1] = NULL_TREE;
5580 		}
5581 	      if (live_on_edge (e, name2)
5582 		  && !has_single_use (name2))
5583 		names[0] = name2;
5584 	    }
5585 	}
5586       if (names[0] || names[1])
5587 	{
5588 	  wide_int minv, maxv, valv, cst2v;
5589 	  wide_int tem, sgnbit;
5590 	  bool valid_p = false, valn, cst2n;
5591 	  enum tree_code ccode = comp_code;
5592 
5593 	  valv = wide_int::from (val, nprec, UNSIGNED);
5594 	  cst2v = wide_int::from (cst2, nprec, UNSIGNED);
5595 	  valn = wi::neg_p (valv, TYPE_SIGN (TREE_TYPE (val)));
5596 	  cst2n = wi::neg_p (cst2v, TYPE_SIGN (TREE_TYPE (val)));
5597 	  /* If CST2 doesn't have most significant bit set,
5598 	     but VAL is negative, we have comparison like
5599 	     if ((x & 0x123) > -4) (always true).  Just give up.  */
5600 	  if (!cst2n && valn)
5601 	    ccode = ERROR_MARK;
5602 	  if (cst2n)
5603 	    sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
5604 	  else
5605 	    sgnbit = wi::zero (nprec);
5606 	  minv = valv & cst2v;
5607 	  switch (ccode)
5608 	    {
5609 	    case EQ_EXPR:
5610 	      /* Minimum unsigned value for equality is VAL & CST2
5611 		 (should be equal to VAL, otherwise we probably should
5612 		 have folded the comparison into false) and
5613 		 maximum unsigned value is VAL | ~CST2.  */
5614 	      maxv = valv | ~cst2v;
5615 	      valid_p = true;
5616 	      break;
5617 
5618 	    case NE_EXPR:
5619 	      tem = valv | ~cst2v;
5620 	      /* If VAL is 0, handle (X & CST2) != 0 as (X & CST2) > 0U.  */
5621 	      if (valv == 0)
5622 		{
5623 		  cst2n = false;
5624 		  sgnbit = wi::zero (nprec);
5625 		  goto gt_expr;
5626 		}
5627 	      /* If (VAL | ~CST2) is all ones, handle it as
5628 		 (X & CST2) < VAL.  */
5629 	      if (tem == -1)
5630 		{
5631 		  cst2n = false;
5632 		  valn = false;
5633 		  sgnbit = wi::zero (nprec);
5634 		  goto lt_expr;
5635 		}
5636 	      if (!cst2n && wi::neg_p (cst2v))
5637 		sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
5638 	      if (sgnbit != 0)
5639 		{
5640 		  if (valv == sgnbit)
5641 		    {
5642 		      cst2n = true;
5643 		      valn = true;
5644 		      goto gt_expr;
5645 		    }
5646 		  if (tem == wi::mask (nprec - 1, false, nprec))
5647 		    {
5648 		      cst2n = true;
5649 		      goto lt_expr;
5650 		    }
5651 		  if (!cst2n)
5652 		    sgnbit = wi::zero (nprec);
5653 		}
5654 	      break;
5655 
5656 	    case GE_EXPR:
5657 	      /* Minimum unsigned value for >= if (VAL & CST2) == VAL
5658 		 is VAL and maximum unsigned value is ~0.  For signed
5659 		 comparison, if CST2 doesn't have most significant bit
5660 		 set, handle it similarly.  If CST2 has MSB set,
5661 		 the minimum is the same, and maximum is ~0U/2.  */
5662 	      if (minv != valv)
5663 		{
5664 		  /* If (VAL & CST2) != VAL, X & CST2 can't be equal to
5665 		     VAL.  */
5666 		  minv = masked_increment (valv, cst2v, sgnbit, nprec);
5667 		  if (minv == valv)
5668 		    break;
5669 		}
5670 	      maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
5671 	      valid_p = true;
5672 	      break;
5673 
5674 	    case GT_EXPR:
5675 	    gt_expr:
5676 	      /* Find out smallest MINV where MINV > VAL
5677 		 && (MINV & CST2) == MINV, if any.  If VAL is signed and
5678 		 CST2 has MSB set, compute it biased by 1 << (nprec - 1).  */
5679 	      minv = masked_increment (valv, cst2v, sgnbit, nprec);
5680 	      if (minv == valv)
5681 		break;
5682 	      maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
5683 	      valid_p = true;
5684 	      break;
5685 
5686 	    case LE_EXPR:
5687 	      /* Minimum unsigned value for <= is 0 and maximum
5688 		 unsigned value is VAL | ~CST2 if (VAL & CST2) == VAL.
5689 		 Otherwise, find smallest VAL2 where VAL2 > VAL
5690 		 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
5691 		 as maximum.
5692 		 For signed comparison, if CST2 doesn't have most
5693 		 significant bit set, handle it similarly.  If CST2 has
5694 		 MSB set, the maximum is the same and minimum is INT_MIN.  */
5695 	      if (minv == valv)
5696 		maxv = valv;
5697 	      else
5698 		{
5699 		  maxv = masked_increment (valv, cst2v, sgnbit, nprec);
5700 		  if (maxv == valv)
5701 		    break;
5702 		  maxv -= 1;
5703 		}
5704 	      maxv |= ~cst2v;
5705 	      minv = sgnbit;
5706 	      valid_p = true;
5707 	      break;
5708 
5709 	    case LT_EXPR:
5710 	    lt_expr:
5711 	      /* Minimum unsigned value for < is 0 and maximum
5712 		 unsigned value is (VAL-1) | ~CST2 if (VAL & CST2) == VAL.
5713 		 Otherwise, find smallest VAL2 where VAL2 > VAL
5714 		 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
5715 		 as maximum.
5716 		 For signed comparison, if CST2 doesn't have most
5717 		 significant bit set, handle it similarly.  If CST2 has
5718 		 MSB set, the maximum is the same and minimum is INT_MIN.  */
5719 	      if (minv == valv)
5720 		{
5721 		  if (valv == sgnbit)
5722 		    break;
5723 		  maxv = valv;
5724 		}
5725 	      else
5726 		{
5727 		  maxv = masked_increment (valv, cst2v, sgnbit, nprec);
5728 		  if (maxv == valv)
5729 		    break;
5730 		}
5731 	      maxv -= 1;
5732 	      maxv |= ~cst2v;
5733 	      minv = sgnbit;
5734 	      valid_p = true;
5735 	      break;
5736 
5737 	    default:
5738 	      break;
5739 	    }
5740 	  if (valid_p
5741 	      && (maxv - minv) != -1)
5742 	    {
5743 	      tree tmp, new_val, type;
5744 	      int i;
5745 
5746 	      for (i = 0; i < 2; i++)
5747 		if (names[i])
5748 		  {
5749 		    wide_int maxv2 = maxv;
5750 		    tmp = names[i];
5751 		    type = TREE_TYPE (names[i]);
5752 		    if (!TYPE_UNSIGNED (type))
5753 		      {
5754 			type = build_nonstandard_integer_type (nprec, 1);
5755 			tmp = build1 (NOP_EXPR, type, names[i]);
5756 		      }
5757 		    if (minv != 0)
5758 		      {
5759 			tmp = build2 (PLUS_EXPR, type, tmp,
5760 				      wide_int_to_tree (type, -minv));
5761 			maxv2 = maxv - minv;
5762 		      }
5763 		    new_val = wide_int_to_tree (type, maxv2);
5764 
5765 		    if (dump_file)
5766 		      {
5767 			fprintf (dump_file, "Adding assert for ");
5768 			print_generic_expr (dump_file, names[i], 0);
5769 			fprintf (dump_file, " from ");
5770 			print_generic_expr (dump_file, tmp, 0);
5771 			fprintf (dump_file, "\n");
5772 		      }
5773 
5774 		    register_new_assert_for (names[i], tmp, LE_EXPR,
5775 					     new_val, NULL, e, bsi);
5776 		  }
5777 	    }
5778 	}
5779     }
5780 }
5781 
5782 /* OP is an operand of a truth value expression which is known to have
5783    a particular value.  Register any asserts for OP and for any
5784    operands in OP's defining statement.
5785 
5786    If CODE is EQ_EXPR, then we want to register OP is zero (false),
5787    if CODE is NE_EXPR, then we want to register OP is nonzero (true).   */
5788 
5789 static void
register_edge_assert_for_1(tree op,enum tree_code code,edge e,gimple_stmt_iterator bsi)5790 register_edge_assert_for_1 (tree op, enum tree_code code,
5791 			    edge e, gimple_stmt_iterator bsi)
5792 {
5793   gimple *op_def;
5794   tree val;
5795   enum tree_code rhs_code;
5796 
5797   /* We only care about SSA_NAMEs.  */
5798   if (TREE_CODE (op) != SSA_NAME)
5799     return;
5800 
5801   /* We know that OP will have a zero or nonzero value.  If OP is used
5802      more than once go ahead and register an assert for OP.  */
5803   if (live_on_edge (e, op)
5804       && !has_single_use (op))
5805     {
5806       val = build_int_cst (TREE_TYPE (op), 0);
5807       register_new_assert_for (op, op, code, val, NULL, e, bsi);
5808     }
5809 
5810   /* Now look at how OP is set.  If it's set from a comparison,
5811      a truth operation or some bit operations, then we may be able
5812      to register information about the operands of that assignment.  */
5813   op_def = SSA_NAME_DEF_STMT (op);
5814   if (gimple_code (op_def) != GIMPLE_ASSIGN)
5815     return;
5816 
5817   rhs_code = gimple_assign_rhs_code (op_def);
5818 
5819   if (TREE_CODE_CLASS (rhs_code) == tcc_comparison)
5820     {
5821       bool invert = (code == EQ_EXPR ? true : false);
5822       tree op0 = gimple_assign_rhs1 (op_def);
5823       tree op1 = gimple_assign_rhs2 (op_def);
5824 
5825       if (TREE_CODE (op0) == SSA_NAME)
5826         register_edge_assert_for_2 (op0, e, bsi, rhs_code, op0, op1, invert);
5827       if (TREE_CODE (op1) == SSA_NAME)
5828         register_edge_assert_for_2 (op1, e, bsi, rhs_code, op0, op1, invert);
5829     }
5830   else if ((code == NE_EXPR
5831 	    && gimple_assign_rhs_code (op_def) == BIT_AND_EXPR)
5832 	   || (code == EQ_EXPR
5833 	       && gimple_assign_rhs_code (op_def) == BIT_IOR_EXPR))
5834     {
5835       /* Recurse on each operand.  */
5836       tree op0 = gimple_assign_rhs1 (op_def);
5837       tree op1 = gimple_assign_rhs2 (op_def);
5838       if (TREE_CODE (op0) == SSA_NAME
5839 	  && has_single_use (op0))
5840 	register_edge_assert_for_1 (op0, code, e, bsi);
5841       if (TREE_CODE (op1) == SSA_NAME
5842 	  && has_single_use (op1))
5843 	register_edge_assert_for_1 (op1, code, e, bsi);
5844     }
5845   else if (gimple_assign_rhs_code (op_def) == BIT_NOT_EXPR
5846 	   && TYPE_PRECISION (TREE_TYPE (gimple_assign_lhs (op_def))) == 1)
5847     {
5848       /* Recurse, flipping CODE.  */
5849       code = invert_tree_comparison (code, false);
5850       register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), code, e, bsi);
5851     }
5852   else if (gimple_assign_rhs_code (op_def) == SSA_NAME)
5853     {
5854       /* Recurse through the copy.  */
5855       register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), code, e, bsi);
5856     }
5857   else if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (op_def)))
5858     {
5859       /* Recurse through the type conversion, unless it is a narrowing
5860 	 conversion or conversion from non-integral type.  */
5861       tree rhs = gimple_assign_rhs1 (op_def);
5862       if (INTEGRAL_TYPE_P (TREE_TYPE (rhs))
5863 	  && (TYPE_PRECISION (TREE_TYPE (rhs))
5864 	      <= TYPE_PRECISION (TREE_TYPE (op))))
5865 	register_edge_assert_for_1 (rhs, code, e, bsi);
5866     }
5867 }
5868 
5869 /* Try to register an edge assertion for SSA name NAME on edge E for
5870    the condition COND contributing to the conditional jump pointed to by
5871    SI.  */
5872 
5873 static void
register_edge_assert_for(tree name,edge e,gimple_stmt_iterator si,enum tree_code cond_code,tree cond_op0,tree cond_op1)5874 register_edge_assert_for (tree name, edge e, gimple_stmt_iterator si,
5875 			  enum tree_code cond_code, tree cond_op0,
5876 			  tree cond_op1)
5877 {
5878   tree val;
5879   enum tree_code comp_code;
5880   bool is_else_edge = (e->flags & EDGE_FALSE_VALUE) != 0;
5881 
5882   /* Do not attempt to infer anything in names that flow through
5883      abnormal edges.  */
5884   if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
5885     return;
5886 
5887   if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
5888 						cond_op0, cond_op1,
5889 						is_else_edge,
5890 						&comp_code, &val))
5891     return;
5892 
5893   /* Register ASSERT_EXPRs for name.  */
5894   register_edge_assert_for_2 (name, e, si, cond_code, cond_op0,
5895 			      cond_op1, is_else_edge);
5896 
5897 
5898   /* If COND is effectively an equality test of an SSA_NAME against
5899      the value zero or one, then we may be able to assert values
5900      for SSA_NAMEs which flow into COND.  */
5901 
5902   /* In the case of NAME == 1 or NAME != 0, for BIT_AND_EXPR defining
5903      statement of NAME we can assert both operands of the BIT_AND_EXPR
5904      have nonzero value.  */
5905   if (((comp_code == EQ_EXPR && integer_onep (val))
5906        || (comp_code == NE_EXPR && integer_zerop (val))))
5907     {
5908       gimple *def_stmt = SSA_NAME_DEF_STMT (name);
5909 
5910       if (is_gimple_assign (def_stmt)
5911 	  && gimple_assign_rhs_code (def_stmt) == BIT_AND_EXPR)
5912 	{
5913 	  tree op0 = gimple_assign_rhs1 (def_stmt);
5914 	  tree op1 = gimple_assign_rhs2 (def_stmt);
5915 	  register_edge_assert_for_1 (op0, NE_EXPR, e, si);
5916 	  register_edge_assert_for_1 (op1, NE_EXPR, e, si);
5917 	}
5918     }
5919 
5920   /* In the case of NAME == 0 or NAME != 1, for BIT_IOR_EXPR defining
5921      statement of NAME we can assert both operands of the BIT_IOR_EXPR
5922      have zero value.  */
5923   if (((comp_code == EQ_EXPR && integer_zerop (val))
5924        || (comp_code == NE_EXPR && integer_onep (val))))
5925     {
5926       gimple *def_stmt = SSA_NAME_DEF_STMT (name);
5927 
5928       /* For BIT_IOR_EXPR only if NAME == 0 both operands have
5929 	 necessarily zero value, or if type-precision is one.  */
5930       if (is_gimple_assign (def_stmt)
5931 	  && (gimple_assign_rhs_code (def_stmt) == BIT_IOR_EXPR
5932 	      && (TYPE_PRECISION (TREE_TYPE (name)) == 1
5933 	          || comp_code == EQ_EXPR)))
5934 	{
5935 	  tree op0 = gimple_assign_rhs1 (def_stmt);
5936 	  tree op1 = gimple_assign_rhs2 (def_stmt);
5937 	  register_edge_assert_for_1 (op0, EQ_EXPR, e, si);
5938 	  register_edge_assert_for_1 (op1, EQ_EXPR, e, si);
5939 	}
5940     }
5941 }
5942 
5943 
5944 /* Determine whether the outgoing edges of BB should receive an
5945    ASSERT_EXPR for each of the operands of BB's LAST statement.
5946    The last statement of BB must be a COND_EXPR.
5947 
5948    If any of the sub-graphs rooted at BB have an interesting use of
5949    the predicate operands, an assert location node is added to the
5950    list of assertions for the corresponding operands.  */
5951 
5952 static void
find_conditional_asserts(basic_block bb,gcond * last)5953 find_conditional_asserts (basic_block bb, gcond *last)
5954 {
5955   gimple_stmt_iterator bsi;
5956   tree op;
5957   edge_iterator ei;
5958   edge e;
5959   ssa_op_iter iter;
5960 
5961   bsi = gsi_for_stmt (last);
5962 
5963   /* Look for uses of the operands in each of the sub-graphs
5964      rooted at BB.  We need to check each of the outgoing edges
5965      separately, so that we know what kind of ASSERT_EXPR to
5966      insert.  */
5967   FOR_EACH_EDGE (e, ei, bb->succs)
5968     {
5969       if (e->dest == bb)
5970 	continue;
5971 
5972       /* Register the necessary assertions for each operand in the
5973 	 conditional predicate.  */
5974       FOR_EACH_SSA_TREE_OPERAND (op, last, iter, SSA_OP_USE)
5975 	register_edge_assert_for (op, e, bsi,
5976 				  gimple_cond_code (last),
5977 				  gimple_cond_lhs (last),
5978 				  gimple_cond_rhs (last));
5979     }
5980 }
5981 
5982 struct case_info
5983 {
5984   tree expr;
5985   basic_block bb;
5986 };
5987 
5988 /* Compare two case labels sorting first by the destination bb index
5989    and then by the case value.  */
5990 
5991 static int
compare_case_labels(const void * p1,const void * p2)5992 compare_case_labels (const void *p1, const void *p2)
5993 {
5994   const struct case_info *ci1 = (const struct case_info *) p1;
5995   const struct case_info *ci2 = (const struct case_info *) p2;
5996   int idx1 = ci1->bb->index;
5997   int idx2 = ci2->bb->index;
5998 
5999   if (idx1 < idx2)
6000     return -1;
6001   else if (idx1 == idx2)
6002     {
6003       /* Make sure the default label is first in a group.  */
6004       if (!CASE_LOW (ci1->expr))
6005 	return -1;
6006       else if (!CASE_LOW (ci2->expr))
6007 	return 1;
6008       else
6009 	return tree_int_cst_compare (CASE_LOW (ci1->expr),
6010 				     CASE_LOW (ci2->expr));
6011     }
6012   else
6013     return 1;
6014 }
6015 
6016 /* Determine whether the outgoing edges of BB should receive an
6017    ASSERT_EXPR for each of the operands of BB's LAST statement.
6018    The last statement of BB must be a SWITCH_EXPR.
6019 
6020    If any of the sub-graphs rooted at BB have an interesting use of
6021    the predicate operands, an assert location node is added to the
6022    list of assertions for the corresponding operands.  */
6023 
6024 static void
find_switch_asserts(basic_block bb,gswitch * last)6025 find_switch_asserts (basic_block bb, gswitch *last)
6026 {
6027   gimple_stmt_iterator bsi;
6028   tree op;
6029   edge e;
6030   struct case_info *ci;
6031   size_t n = gimple_switch_num_labels (last);
6032 #if GCC_VERSION >= 4000
6033   unsigned int idx;
6034 #else
6035   /* Work around GCC 3.4 bug (PR 37086).  */
6036   volatile unsigned int idx;
6037 #endif
6038 
6039   bsi = gsi_for_stmt (last);
6040   op = gimple_switch_index (last);
6041   if (TREE_CODE (op) != SSA_NAME)
6042     return;
6043 
6044   /* Build a vector of case labels sorted by destination label.  */
6045   ci = XNEWVEC (struct case_info, n);
6046   for (idx = 0; idx < n; ++idx)
6047     {
6048       ci[idx].expr = gimple_switch_label (last, idx);
6049       ci[idx].bb = label_to_block (CASE_LABEL (ci[idx].expr));
6050     }
6051   qsort (ci, n, sizeof (struct case_info), compare_case_labels);
6052 
6053   for (idx = 0; idx < n; ++idx)
6054     {
6055       tree min, max;
6056       tree cl = ci[idx].expr;
6057       basic_block cbb = ci[idx].bb;
6058 
6059       min = CASE_LOW (cl);
6060       max = CASE_HIGH (cl);
6061 
6062       /* If there are multiple case labels with the same destination
6063 	 we need to combine them to a single value range for the edge.  */
6064       if (idx + 1 < n && cbb == ci[idx + 1].bb)
6065 	{
6066 	  /* Skip labels until the last of the group.  */
6067 	  do {
6068 	    ++idx;
6069 	  } while (idx < n && cbb == ci[idx].bb);
6070 	  --idx;
6071 
6072 	  /* Pick up the maximum of the case label range.  */
6073 	  if (CASE_HIGH (ci[idx].expr))
6074 	    max = CASE_HIGH (ci[idx].expr);
6075 	  else
6076 	    max = CASE_LOW (ci[idx].expr);
6077 	}
6078 
6079       /* Nothing to do if the range includes the default label until we
6080 	 can register anti-ranges.  */
6081       if (min == NULL_TREE)
6082 	continue;
6083 
6084       /* Find the edge to register the assert expr on.  */
6085       e = find_edge (bb, cbb);
6086 
6087       /* Register the necessary assertions for the operand in the
6088 	 SWITCH_EXPR.  */
6089       register_edge_assert_for (op, e, bsi,
6090 				max ? GE_EXPR : EQ_EXPR,
6091 				op, fold_convert (TREE_TYPE (op), min));
6092       if (max)
6093 	register_edge_assert_for (op, e, bsi, LE_EXPR, op,
6094 				  fold_convert (TREE_TYPE (op), max));
6095     }
6096 
6097   XDELETEVEC (ci);
6098 }
6099 
6100 
6101 /* Traverse all the statements in block BB looking for statements that
6102    may generate useful assertions for the SSA names in their operand.
6103    If a statement produces a useful assertion A for name N_i, then the
6104    list of assertions already generated for N_i is scanned to
6105    determine if A is actually needed.
6106 
6107    If N_i already had the assertion A at a location dominating the
6108    current location, then nothing needs to be done.  Otherwise, the
6109    new location for A is recorded instead.
6110 
6111    1- For every statement S in BB, all the variables used by S are
6112       added to bitmap FOUND_IN_SUBGRAPH.
6113 
6114    2- If statement S uses an operand N in a way that exposes a known
6115       value range for N, then if N was not already generated by an
6116       ASSERT_EXPR, create a new assert location for N.  For instance,
6117       if N is a pointer and the statement dereferences it, we can
6118       assume that N is not NULL.
6119 
6120    3- COND_EXPRs are a special case of #2.  We can derive range
6121       information from the predicate but need to insert different
6122       ASSERT_EXPRs for each of the sub-graphs rooted at the
6123       conditional block.  If the last statement of BB is a conditional
6124       expression of the form 'X op Y', then
6125 
6126       a) Remove X and Y from the set FOUND_IN_SUBGRAPH.
6127 
6128       b) If the conditional is the only entry point to the sub-graph
6129 	 corresponding to the THEN_CLAUSE, recurse into it.  On
6130 	 return, if X and/or Y are marked in FOUND_IN_SUBGRAPH, then
6131 	 an ASSERT_EXPR is added for the corresponding variable.
6132 
6133       c) Repeat step (b) on the ELSE_CLAUSE.
6134 
6135       d) Mark X and Y in FOUND_IN_SUBGRAPH.
6136 
6137       For instance,
6138 
6139 	    if (a == 9)
6140 	      b = a;
6141 	    else
6142 	      b = c + 1;
6143 
6144       In this case, an assertion on the THEN clause is useful to
6145       determine that 'a' is always 9 on that edge.  However, an assertion
6146       on the ELSE clause would be unnecessary.
6147 
6148    4- If BB does not end in a conditional expression, then we recurse
6149       into BB's dominator children.
6150 
6151    At the end of the recursive traversal, every SSA name will have a
6152    list of locations where ASSERT_EXPRs should be added.  When a new
6153    location for name N is found, it is registered by calling
6154    register_new_assert_for.  That function keeps track of all the
6155    registered assertions to prevent adding unnecessary assertions.
6156    For instance, if a pointer P_4 is dereferenced more than once in a
6157    dominator tree, only the location dominating all the dereference of
6158    P_4 will receive an ASSERT_EXPR.  */
6159 
6160 static void
find_assert_locations_1(basic_block bb,sbitmap live)6161 find_assert_locations_1 (basic_block bb, sbitmap live)
6162 {
6163   gimple *last;
6164 
6165   last = last_stmt (bb);
6166 
6167   /* If BB's last statement is a conditional statement involving integer
6168      operands, determine if we need to add ASSERT_EXPRs.  */
6169   if (last
6170       && gimple_code (last) == GIMPLE_COND
6171       && !fp_predicate (last)
6172       && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
6173     find_conditional_asserts (bb, as_a <gcond *> (last));
6174 
6175   /* If BB's last statement is a switch statement involving integer
6176      operands, determine if we need to add ASSERT_EXPRs.  */
6177   if (last
6178       && gimple_code (last) == GIMPLE_SWITCH
6179       && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
6180     find_switch_asserts (bb, as_a <gswitch *> (last));
6181 
6182   /* Traverse all the statements in BB marking used names and looking
6183      for statements that may infer assertions for their used operands.  */
6184   for (gimple_stmt_iterator si = gsi_last_bb (bb); !gsi_end_p (si);
6185        gsi_prev (&si))
6186     {
6187       gimple *stmt;
6188       tree op;
6189       ssa_op_iter i;
6190 
6191       stmt = gsi_stmt (si);
6192 
6193       if (is_gimple_debug (stmt))
6194 	continue;
6195 
6196       /* See if we can derive an assertion for any of STMT's operands.  */
6197       FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
6198 	{
6199 	  tree value;
6200 	  enum tree_code comp_code;
6201 
6202 	  /* If op is not live beyond this stmt, do not bother to insert
6203 	     asserts for it.  */
6204 	  if (!bitmap_bit_p (live, SSA_NAME_VERSION (op)))
6205 	    continue;
6206 
6207 	  /* If OP is used in such a way that we can infer a value
6208 	     range for it, and we don't find a previous assertion for
6209 	     it, create a new assertion location node for OP.  */
6210 	  if (infer_value_range (stmt, op, &comp_code, &value))
6211 	    {
6212 	      /* If we are able to infer a nonzero value range for OP,
6213 		 then walk backwards through the use-def chain to see if OP
6214 		 was set via a typecast.
6215 
6216 		 If so, then we can also infer a nonzero value range
6217 		 for the operand of the NOP_EXPR.  */
6218 	      if (comp_code == NE_EXPR && integer_zerop (value))
6219 		{
6220 		  tree t = op;
6221 		  gimple *def_stmt = SSA_NAME_DEF_STMT (t);
6222 
6223 		  while (is_gimple_assign (def_stmt)
6224 			 && CONVERT_EXPR_CODE_P
6225 			     (gimple_assign_rhs_code (def_stmt))
6226 			 && TREE_CODE
6227 			     (gimple_assign_rhs1 (def_stmt)) == SSA_NAME
6228 			 && POINTER_TYPE_P
6229 			     (TREE_TYPE (gimple_assign_rhs1 (def_stmt))))
6230 		    {
6231 		      t = gimple_assign_rhs1 (def_stmt);
6232 		      def_stmt = SSA_NAME_DEF_STMT (t);
6233 
6234 		      /* Note we want to register the assert for the
6235 			 operand of the NOP_EXPR after SI, not after the
6236 			 conversion.  */
6237 		      if (! has_single_use (t))
6238 			register_new_assert_for (t, t, comp_code, value,
6239 						 bb, NULL, si);
6240 		    }
6241 		}
6242 
6243 	      register_new_assert_for (op, op, comp_code, value, bb, NULL, si);
6244 	    }
6245 	}
6246 
6247       /* Update live.  */
6248       FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
6249 	bitmap_set_bit (live, SSA_NAME_VERSION (op));
6250       FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_DEF)
6251 	bitmap_clear_bit (live, SSA_NAME_VERSION (op));
6252     }
6253 
6254   /* Traverse all PHI nodes in BB, updating live.  */
6255   for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
6256        gsi_next (&si))
6257     {
6258       use_operand_p arg_p;
6259       ssa_op_iter i;
6260       gphi *phi = si.phi ();
6261       tree res = gimple_phi_result (phi);
6262 
6263       if (virtual_operand_p (res))
6264 	continue;
6265 
6266       FOR_EACH_PHI_ARG (arg_p, phi, i, SSA_OP_USE)
6267 	{
6268 	  tree arg = USE_FROM_PTR (arg_p);
6269 	  if (TREE_CODE (arg) == SSA_NAME)
6270 	    bitmap_set_bit (live, SSA_NAME_VERSION (arg));
6271 	}
6272 
6273       bitmap_clear_bit (live, SSA_NAME_VERSION (res));
6274     }
6275 }
6276 
6277 /* Do an RPO walk over the function computing SSA name liveness
6278    on-the-fly and deciding on assert expressions to insert.  */
6279 
6280 static void
find_assert_locations(void)6281 find_assert_locations (void)
6282 {
6283   int *rpo = XNEWVEC (int, last_basic_block_for_fn (cfun));
6284   int *bb_rpo = XNEWVEC (int, last_basic_block_for_fn (cfun));
6285   int *last_rpo = XCNEWVEC (int, last_basic_block_for_fn (cfun));
6286   int rpo_cnt, i;
6287 
6288   live = XCNEWVEC (sbitmap, last_basic_block_for_fn (cfun));
6289   rpo_cnt = pre_and_rev_post_order_compute (NULL, rpo, false);
6290   for (i = 0; i < rpo_cnt; ++i)
6291     bb_rpo[rpo[i]] = i;
6292 
6293   /* Pre-seed loop latch liveness from loop header PHI nodes.  Due to
6294      the order we compute liveness and insert asserts we otherwise
6295      fail to insert asserts into the loop latch.  */
6296   loop_p loop;
6297   FOR_EACH_LOOP (loop, 0)
6298     {
6299       i = loop->latch->index;
6300       unsigned int j = single_succ_edge (loop->latch)->dest_idx;
6301       for (gphi_iterator gsi = gsi_start_phis (loop->header);
6302 	   !gsi_end_p (gsi); gsi_next (&gsi))
6303 	{
6304 	  gphi *phi = gsi.phi ();
6305 	  if (virtual_operand_p (gimple_phi_result (phi)))
6306 	    continue;
6307 	  tree arg = gimple_phi_arg_def (phi, j);
6308 	  if (TREE_CODE (arg) == SSA_NAME)
6309 	    {
6310 	      if (live[i] == NULL)
6311 		{
6312 		  live[i] = sbitmap_alloc (num_ssa_names);
6313 		  bitmap_clear (live[i]);
6314 		}
6315 	      bitmap_set_bit (live[i], SSA_NAME_VERSION (arg));
6316 	    }
6317 	}
6318     }
6319 
6320   for (i = rpo_cnt - 1; i >= 0; --i)
6321     {
6322       basic_block bb = BASIC_BLOCK_FOR_FN (cfun, rpo[i]);
6323       edge e;
6324       edge_iterator ei;
6325 
6326       if (!live[rpo[i]])
6327 	{
6328 	  live[rpo[i]] = sbitmap_alloc (num_ssa_names);
6329 	  bitmap_clear (live[rpo[i]]);
6330 	}
6331 
6332       /* Process BB and update the live information with uses in
6333          this block.  */
6334       find_assert_locations_1 (bb, live[rpo[i]]);
6335 
6336       /* Merge liveness into the predecessor blocks and free it.  */
6337       if (!bitmap_empty_p (live[rpo[i]]))
6338 	{
6339 	  int pred_rpo = i;
6340 	  FOR_EACH_EDGE (e, ei, bb->preds)
6341 	    {
6342 	      int pred = e->src->index;
6343 	      if ((e->flags & EDGE_DFS_BACK) || pred == ENTRY_BLOCK)
6344 		continue;
6345 
6346 	      if (!live[pred])
6347 		{
6348 		  live[pred] = sbitmap_alloc (num_ssa_names);
6349 		  bitmap_clear (live[pred]);
6350 		}
6351 	      bitmap_ior (live[pred], live[pred], live[rpo[i]]);
6352 
6353 	      if (bb_rpo[pred] < pred_rpo)
6354 		pred_rpo = bb_rpo[pred];
6355 	    }
6356 
6357 	  /* Record the RPO number of the last visited block that needs
6358 	     live information from this block.  */
6359 	  last_rpo[rpo[i]] = pred_rpo;
6360 	}
6361       else
6362 	{
6363 	  sbitmap_free (live[rpo[i]]);
6364 	  live[rpo[i]] = NULL;
6365 	}
6366 
6367       /* We can free all successors live bitmaps if all their
6368          predecessors have been visited already.  */
6369       FOR_EACH_EDGE (e, ei, bb->succs)
6370 	if (last_rpo[e->dest->index] == i
6371 	    && live[e->dest->index])
6372 	  {
6373 	    sbitmap_free (live[e->dest->index]);
6374 	    live[e->dest->index] = NULL;
6375 	  }
6376     }
6377 
6378   XDELETEVEC (rpo);
6379   XDELETEVEC (bb_rpo);
6380   XDELETEVEC (last_rpo);
6381   for (i = 0; i < last_basic_block_for_fn (cfun); ++i)
6382     if (live[i])
6383       sbitmap_free (live[i]);
6384   XDELETEVEC (live);
6385 }
6386 
6387 /* Create an ASSERT_EXPR for NAME and insert it in the location
6388    indicated by LOC.  Return true if we made any edge insertions.  */
6389 
6390 static bool
process_assert_insertions_for(tree name,assert_locus * loc)6391 process_assert_insertions_for (tree name, assert_locus *loc)
6392 {
6393   /* Build the comparison expression NAME_i COMP_CODE VAL.  */
6394   gimple *stmt;
6395   tree cond;
6396   gimple *assert_stmt;
6397   edge_iterator ei;
6398   edge e;
6399 
6400   /* If we have X <=> X do not insert an assert expr for that.  */
6401   if (loc->expr == loc->val)
6402     return false;
6403 
6404   cond = build2 (loc->comp_code, boolean_type_node, loc->expr, loc->val);
6405   assert_stmt = build_assert_expr_for (cond, name);
6406   if (loc->e)
6407     {
6408       /* We have been asked to insert the assertion on an edge.  This
6409 	 is used only by COND_EXPR and SWITCH_EXPR assertions.  */
6410       gcc_checking_assert (gimple_code (gsi_stmt (loc->si)) == GIMPLE_COND
6411 			   || (gimple_code (gsi_stmt (loc->si))
6412 			       == GIMPLE_SWITCH));
6413 
6414       gsi_insert_on_edge (loc->e, assert_stmt);
6415       return true;
6416     }
6417 
6418   /* Otherwise, we can insert right after LOC->SI iff the
6419      statement must not be the last statement in the block.  */
6420   stmt = gsi_stmt (loc->si);
6421   if (!stmt_ends_bb_p (stmt))
6422     {
6423       gsi_insert_after (&loc->si, assert_stmt, GSI_SAME_STMT);
6424       return false;
6425     }
6426 
6427   /* If STMT must be the last statement in BB, we can only insert new
6428      assertions on the non-abnormal edge out of BB.  Note that since
6429      STMT is not control flow, there may only be one non-abnormal edge
6430      out of BB.  */
6431   FOR_EACH_EDGE (e, ei, loc->bb->succs)
6432     if (!(e->flags & EDGE_ABNORMAL))
6433       {
6434 	gsi_insert_on_edge (e, assert_stmt);
6435 	return true;
6436       }
6437 
6438   gcc_unreachable ();
6439 }
6440 
6441 
6442 /* Process all the insertions registered for every name N_i registered
6443    in NEED_ASSERT_FOR.  The list of assertions to be inserted are
6444    found in ASSERTS_FOR[i].  */
6445 
6446 static void
process_assert_insertions(void)6447 process_assert_insertions (void)
6448 {
6449   unsigned i;
6450   bitmap_iterator bi;
6451   bool update_edges_p = false;
6452   int num_asserts = 0;
6453 
6454   if (dump_file && (dump_flags & TDF_DETAILS))
6455     dump_all_asserts (dump_file);
6456 
6457   EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
6458     {
6459       assert_locus *loc = asserts_for[i];
6460       gcc_assert (loc);
6461 
6462       while (loc)
6463 	{
6464 	  assert_locus *next = loc->next;
6465 	  update_edges_p |= process_assert_insertions_for (ssa_name (i), loc);
6466 	  free (loc);
6467 	  loc = next;
6468 	  num_asserts++;
6469 	}
6470     }
6471 
6472   if (update_edges_p)
6473     gsi_commit_edge_inserts ();
6474 
6475   statistics_counter_event (cfun, "Number of ASSERT_EXPR expressions inserted",
6476 			    num_asserts);
6477 }
6478 
6479 
6480 /* Traverse the flowgraph looking for conditional jumps to insert range
6481    expressions.  These range expressions are meant to provide information
6482    to optimizations that need to reason in terms of value ranges.  They
6483    will not be expanded into RTL.  For instance, given:
6484 
6485    x = ...
6486    y = ...
6487    if (x < y)
6488      y = x - 2;
6489    else
6490      x = y + 3;
6491 
6492    this pass will transform the code into:
6493 
6494    x = ...
6495    y = ...
6496    if (x < y)
6497     {
6498       x = ASSERT_EXPR <x, x < y>
6499       y = x - 2
6500     }
6501    else
6502     {
6503       y = ASSERT_EXPR <y, x >= y>
6504       x = y + 3
6505     }
6506 
6507    The idea is that once copy and constant propagation have run, other
6508    optimizations will be able to determine what ranges of values can 'x'
6509    take in different paths of the code, simply by checking the reaching
6510    definition of 'x'.  */
6511 
6512 static void
insert_range_assertions(void)6513 insert_range_assertions (void)
6514 {
6515   need_assert_for = BITMAP_ALLOC (NULL);
6516   asserts_for = XCNEWVEC (assert_locus *, num_ssa_names);
6517 
6518   calculate_dominance_info (CDI_DOMINATORS);
6519 
6520   find_assert_locations ();
6521   if (!bitmap_empty_p (need_assert_for))
6522     {
6523       process_assert_insertions ();
6524       update_ssa (TODO_update_ssa_no_phi);
6525     }
6526 
6527   if (dump_file && (dump_flags & TDF_DETAILS))
6528     {
6529       fprintf (dump_file, "\nSSA form after inserting ASSERT_EXPRs\n");
6530       dump_function_to_file (current_function_decl, dump_file, dump_flags);
6531     }
6532 
6533   free (asserts_for);
6534   BITMAP_FREE (need_assert_for);
6535 }
6536 
6537 /* Checks one ARRAY_REF in REF, located at LOCUS. Ignores flexible arrays
6538    and "struct" hacks. If VRP can determine that the
6539    array subscript is a constant, check if it is outside valid
6540    range. If the array subscript is a RANGE, warn if it is
6541    non-overlapping with valid range.
6542    IGNORE_OFF_BY_ONE is true if the ARRAY_REF is inside a ADDR_EXPR.  */
6543 
6544 static void
check_array_ref(location_t location,tree ref,bool ignore_off_by_one)6545 check_array_ref (location_t location, tree ref, bool ignore_off_by_one)
6546 {
6547   value_range *vr = NULL;
6548   tree low_sub, up_sub;
6549   tree low_bound, up_bound, up_bound_p1;
6550 
6551   if (TREE_NO_WARNING (ref))
6552     return;
6553 
6554   low_sub = up_sub = TREE_OPERAND (ref, 1);
6555   up_bound = array_ref_up_bound (ref);
6556 
6557   /* Can not check flexible arrays.  */
6558   if (!up_bound
6559       || TREE_CODE (up_bound) != INTEGER_CST)
6560     return;
6561 
6562   /* Accesses to trailing arrays via pointers may access storage
6563      beyond the types array bounds.  */
6564   if (warn_array_bounds < 2
6565       && array_at_struct_end_p (ref))
6566     return;
6567 
6568   low_bound = array_ref_low_bound (ref);
6569   up_bound_p1 = int_const_binop (PLUS_EXPR, up_bound,
6570 				 build_int_cst (TREE_TYPE (up_bound), 1));
6571 
6572   /* Empty array.  */
6573   if (tree_int_cst_equal (low_bound, up_bound_p1))
6574     {
6575       warning_at (location, OPT_Warray_bounds,
6576 		  "array subscript is above array bounds");
6577       TREE_NO_WARNING (ref) = 1;
6578     }
6579 
6580   if (TREE_CODE (low_sub) == SSA_NAME)
6581     {
6582       vr = get_value_range (low_sub);
6583       if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE)
6584         {
6585           low_sub = vr->type == VR_RANGE ? vr->max : vr->min;
6586           up_sub = vr->type == VR_RANGE ? vr->min : vr->max;
6587         }
6588     }
6589 
6590   if (vr && vr->type == VR_ANTI_RANGE)
6591     {
6592       if (TREE_CODE (up_sub) == INTEGER_CST
6593           && (ignore_off_by_one
6594 	      ? tree_int_cst_lt (up_bound, up_sub)
6595 	      : tree_int_cst_le (up_bound, up_sub))
6596           && TREE_CODE (low_sub) == INTEGER_CST
6597           && tree_int_cst_le (low_sub, low_bound))
6598         {
6599           warning_at (location, OPT_Warray_bounds,
6600 		      "array subscript is outside array bounds");
6601           TREE_NO_WARNING (ref) = 1;
6602         }
6603     }
6604   else if (TREE_CODE (up_sub) == INTEGER_CST
6605 	   && (ignore_off_by_one
6606 	       ? !tree_int_cst_le (up_sub, up_bound_p1)
6607 	       : !tree_int_cst_le (up_sub, up_bound)))
6608     {
6609       if (dump_file && (dump_flags & TDF_DETAILS))
6610 	{
6611 	  fprintf (dump_file, "Array bound warning for ");
6612 	  dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
6613 	  fprintf (dump_file, "\n");
6614 	}
6615       warning_at (location, OPT_Warray_bounds,
6616 		  "array subscript is above array bounds");
6617       TREE_NO_WARNING (ref) = 1;
6618     }
6619   else if (TREE_CODE (low_sub) == INTEGER_CST
6620            && tree_int_cst_lt (low_sub, low_bound))
6621     {
6622       if (dump_file && (dump_flags & TDF_DETAILS))
6623 	{
6624 	  fprintf (dump_file, "Array bound warning for ");
6625 	  dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
6626 	  fprintf (dump_file, "\n");
6627 	}
6628       warning_at (location, OPT_Warray_bounds,
6629 		  "array subscript is below array bounds");
6630       TREE_NO_WARNING (ref) = 1;
6631     }
6632 }
6633 
6634 /* Searches if the expr T, located at LOCATION computes
6635    address of an ARRAY_REF, and call check_array_ref on it.  */
6636 
6637 static void
search_for_addr_array(tree t,location_t location)6638 search_for_addr_array (tree t, location_t location)
6639 {
6640   /* Check each ARRAY_REFs in the reference chain. */
6641   do
6642     {
6643       if (TREE_CODE (t) == ARRAY_REF)
6644 	check_array_ref (location, t, true /*ignore_off_by_one*/);
6645 
6646       t = TREE_OPERAND (t, 0);
6647     }
6648   while (handled_component_p (t));
6649 
6650   if (TREE_CODE (t) == MEM_REF
6651       && TREE_CODE (TREE_OPERAND (t, 0)) == ADDR_EXPR
6652       && !TREE_NO_WARNING (t))
6653     {
6654       tree tem = TREE_OPERAND (TREE_OPERAND (t, 0), 0);
6655       tree low_bound, up_bound, el_sz;
6656       offset_int idx;
6657       if (TREE_CODE (TREE_TYPE (tem)) != ARRAY_TYPE
6658 	  || TREE_CODE (TREE_TYPE (TREE_TYPE (tem))) == ARRAY_TYPE
6659 	  || !TYPE_DOMAIN (TREE_TYPE (tem)))
6660 	return;
6661 
6662       low_bound = TYPE_MIN_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
6663       up_bound = TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
6664       el_sz = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (tem)));
6665       if (!low_bound
6666 	  || TREE_CODE (low_bound) != INTEGER_CST
6667 	  || !up_bound
6668 	  || TREE_CODE (up_bound) != INTEGER_CST
6669 	  || !el_sz
6670 	  || TREE_CODE (el_sz) != INTEGER_CST)
6671 	return;
6672 
6673       idx = mem_ref_offset (t);
6674       idx = wi::sdiv_trunc (idx, wi::to_offset (el_sz));
6675       if (wi::lts_p (idx, 0))
6676 	{
6677 	  if (dump_file && (dump_flags & TDF_DETAILS))
6678 	    {
6679 	      fprintf (dump_file, "Array bound warning for ");
6680 	      dump_generic_expr (MSG_NOTE, TDF_SLIM, t);
6681 	      fprintf (dump_file, "\n");
6682 	    }
6683 	  warning_at (location, OPT_Warray_bounds,
6684 		      "array subscript is below array bounds");
6685 	  TREE_NO_WARNING (t) = 1;
6686 	}
6687       else if (wi::gts_p (idx, (wi::to_offset (up_bound)
6688 				- wi::to_offset (low_bound) + 1)))
6689 	{
6690 	  if (dump_file && (dump_flags & TDF_DETAILS))
6691 	    {
6692 	      fprintf (dump_file, "Array bound warning for ");
6693 	      dump_generic_expr (MSG_NOTE, TDF_SLIM, t);
6694 	      fprintf (dump_file, "\n");
6695 	    }
6696 	  warning_at (location, OPT_Warray_bounds,
6697 		      "array subscript is above array bounds");
6698 	  TREE_NO_WARNING (t) = 1;
6699 	}
6700     }
6701 }
6702 
6703 /* walk_tree() callback that checks if *TP is
6704    an ARRAY_REF inside an ADDR_EXPR (in which an array
6705    subscript one outside the valid range is allowed). Call
6706    check_array_ref for each ARRAY_REF found. The location is
6707    passed in DATA.  */
6708 
6709 static tree
check_array_bounds(tree * tp,int * walk_subtree,void * data)6710 check_array_bounds (tree *tp, int *walk_subtree, void *data)
6711 {
6712   tree t = *tp;
6713   struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
6714   location_t location;
6715 
6716   if (EXPR_HAS_LOCATION (t))
6717     location = EXPR_LOCATION (t);
6718   else
6719     {
6720       location_t *locp = (location_t *) wi->info;
6721       location = *locp;
6722     }
6723 
6724   *walk_subtree = TRUE;
6725 
6726   if (TREE_CODE (t) == ARRAY_REF)
6727     check_array_ref (location, t, false /*ignore_off_by_one*/);
6728 
6729   else if (TREE_CODE (t) == ADDR_EXPR)
6730     {
6731       search_for_addr_array (t, location);
6732       *walk_subtree = FALSE;
6733     }
6734 
6735   return NULL_TREE;
6736 }
6737 
6738 /* Walk over all statements of all reachable BBs and call check_array_bounds
6739    on them.  */
6740 
6741 static void
check_all_array_refs(void)6742 check_all_array_refs (void)
6743 {
6744   basic_block bb;
6745   gimple_stmt_iterator si;
6746 
6747   FOR_EACH_BB_FN (bb, cfun)
6748     {
6749       edge_iterator ei;
6750       edge e;
6751       bool executable = false;
6752 
6753       /* Skip blocks that were found to be unreachable.  */
6754       FOR_EACH_EDGE (e, ei, bb->preds)
6755 	executable |= !!(e->flags & EDGE_EXECUTABLE);
6756       if (!executable)
6757 	continue;
6758 
6759       for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
6760 	{
6761 	  gimple *stmt = gsi_stmt (si);
6762 	  struct walk_stmt_info wi;
6763 	  if (!gimple_has_location (stmt)
6764 	      || is_gimple_debug (stmt))
6765 	    continue;
6766 
6767 	  memset (&wi, 0, sizeof (wi));
6768 
6769 	  location_t loc = gimple_location (stmt);
6770 	  wi.info = &loc;
6771 
6772 	  walk_gimple_op (gsi_stmt (si),
6773 			  check_array_bounds,
6774 			  &wi);
6775 	}
6776     }
6777 }
6778 
6779 /* Return true if all imm uses of VAR are either in STMT, or
6780    feed (optionally through a chain of single imm uses) GIMPLE_COND
6781    in basic block COND_BB.  */
6782 
6783 static bool
all_imm_uses_in_stmt_or_feed_cond(tree var,gimple * stmt,basic_block cond_bb)6784 all_imm_uses_in_stmt_or_feed_cond (tree var, gimple *stmt, basic_block cond_bb)
6785 {
6786   use_operand_p use_p, use2_p;
6787   imm_use_iterator iter;
6788 
6789   FOR_EACH_IMM_USE_FAST (use_p, iter, var)
6790     if (USE_STMT (use_p) != stmt)
6791       {
6792 	gimple *use_stmt = USE_STMT (use_p), *use_stmt2;
6793 	if (is_gimple_debug (use_stmt))
6794 	  continue;
6795 	while (is_gimple_assign (use_stmt)
6796 	       && TREE_CODE (gimple_assign_lhs (use_stmt)) == SSA_NAME
6797 	       && single_imm_use (gimple_assign_lhs (use_stmt),
6798 				  &use2_p, &use_stmt2))
6799 	  use_stmt = use_stmt2;
6800 	if (gimple_code (use_stmt) != GIMPLE_COND
6801 	    || gimple_bb (use_stmt) != cond_bb)
6802 	  return false;
6803       }
6804   return true;
6805 }
6806 
6807 /* Handle
6808    _4 = x_3 & 31;
6809    if (_4 != 0)
6810      goto <bb 6>;
6811    else
6812      goto <bb 7>;
6813    <bb 6>:
6814    __builtin_unreachable ();
6815    <bb 7>:
6816    x_5 = ASSERT_EXPR <x_3, ...>;
6817    If x_3 has no other immediate uses (checked by caller),
6818    var is the x_3 var from ASSERT_EXPR, we can clear low 5 bits
6819    from the non-zero bitmask.  */
6820 
6821 static void
maybe_set_nonzero_bits(basic_block bb,tree var)6822 maybe_set_nonzero_bits (basic_block bb, tree var)
6823 {
6824   edge e = single_pred_edge (bb);
6825   basic_block cond_bb = e->src;
6826   gimple *stmt = last_stmt (cond_bb);
6827   tree cst;
6828 
6829   if (stmt == NULL
6830       || gimple_code (stmt) != GIMPLE_COND
6831       || gimple_cond_code (stmt) != ((e->flags & EDGE_TRUE_VALUE)
6832 				     ? EQ_EXPR : NE_EXPR)
6833       || TREE_CODE (gimple_cond_lhs (stmt)) != SSA_NAME
6834       || !integer_zerop (gimple_cond_rhs (stmt)))
6835     return;
6836 
6837   stmt = SSA_NAME_DEF_STMT (gimple_cond_lhs (stmt));
6838   if (!is_gimple_assign (stmt)
6839       || gimple_assign_rhs_code (stmt) != BIT_AND_EXPR
6840       || TREE_CODE (gimple_assign_rhs2 (stmt)) != INTEGER_CST)
6841     return;
6842   if (gimple_assign_rhs1 (stmt) != var)
6843     {
6844       gimple *stmt2;
6845 
6846       if (TREE_CODE (gimple_assign_rhs1 (stmt)) != SSA_NAME)
6847 	return;
6848       stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
6849       if (!gimple_assign_cast_p (stmt2)
6850 	  || gimple_assign_rhs1 (stmt2) != var
6851 	  || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt2))
6852 	  || (TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (stmt)))
6853 			      != TYPE_PRECISION (TREE_TYPE (var))))
6854 	return;
6855     }
6856   cst = gimple_assign_rhs2 (stmt);
6857   set_nonzero_bits (var, wi::bit_and_not (get_nonzero_bits (var), cst));
6858 }
6859 
6860 /* Convert range assertion expressions into the implied copies and
6861    copy propagate away the copies.  Doing the trivial copy propagation
6862    here avoids the need to run the full copy propagation pass after
6863    VRP.
6864 
6865    FIXME, this will eventually lead to copy propagation removing the
6866    names that had useful range information attached to them.  For
6867    instance, if we had the assertion N_i = ASSERT_EXPR <N_j, N_j > 3>,
6868    then N_i will have the range [3, +INF].
6869 
6870    However, by converting the assertion into the implied copy
6871    operation N_i = N_j, we will then copy-propagate N_j into the uses
6872    of N_i and lose the range information.  We may want to hold on to
6873    ASSERT_EXPRs a little while longer as the ranges could be used in
6874    things like jump threading.
6875 
6876    The problem with keeping ASSERT_EXPRs around is that passes after
6877    VRP need to handle them appropriately.
6878 
6879    Another approach would be to make the range information a first
6880    class property of the SSA_NAME so that it can be queried from
6881    any pass.  This is made somewhat more complex by the need for
6882    multiple ranges to be associated with one SSA_NAME.  */
6883 
6884 static void
remove_range_assertions(void)6885 remove_range_assertions (void)
6886 {
6887   basic_block bb;
6888   gimple_stmt_iterator si;
6889   /* 1 if looking at ASSERT_EXPRs immediately at the beginning of
6890      a basic block preceeded by GIMPLE_COND branching to it and
6891      __builtin_trap, -1 if not yet checked, 0 otherwise.  */
6892   int is_unreachable;
6893 
6894   /* Note that the BSI iterator bump happens at the bottom of the
6895      loop and no bump is necessary if we're removing the statement
6896      referenced by the current BSI.  */
6897   FOR_EACH_BB_FN (bb, cfun)
6898     for (si = gsi_after_labels (bb), is_unreachable = -1; !gsi_end_p (si);)
6899       {
6900 	gimple *stmt = gsi_stmt (si);
6901 	gimple *use_stmt;
6902 
6903 	if (is_gimple_assign (stmt)
6904 	    && gimple_assign_rhs_code (stmt) == ASSERT_EXPR)
6905 	  {
6906 	    tree lhs = gimple_assign_lhs (stmt);
6907 	    tree rhs = gimple_assign_rhs1 (stmt);
6908 	    tree var;
6909 	    use_operand_p use_p;
6910 	    imm_use_iterator iter;
6911 
6912 	    var = ASSERT_EXPR_VAR (rhs);
6913 	    gcc_assert (TREE_CODE (var) == SSA_NAME);
6914 
6915 	    if (!POINTER_TYPE_P (TREE_TYPE (lhs))
6916 		&& SSA_NAME_RANGE_INFO (lhs))
6917 	      {
6918 		if (is_unreachable == -1)
6919 		  {
6920 		    is_unreachable = 0;
6921 		    if (single_pred_p (bb)
6922 			&& assert_unreachable_fallthru_edge_p
6923 						    (single_pred_edge (bb)))
6924 		      is_unreachable = 1;
6925 		  }
6926 		/* Handle
6927 		   if (x_7 >= 10 && x_7 < 20)
6928 		     __builtin_unreachable ();
6929 		   x_8 = ASSERT_EXPR <x_7, ...>;
6930 		   if the only uses of x_7 are in the ASSERT_EXPR and
6931 		   in the condition.  In that case, we can copy the
6932 		   range info from x_8 computed in this pass also
6933 		   for x_7.  */
6934 		if (is_unreachable
6935 		    && all_imm_uses_in_stmt_or_feed_cond (var, stmt,
6936 							  single_pred (bb)))
6937 		  {
6938 		    set_range_info (var, SSA_NAME_RANGE_TYPE (lhs),
6939 				    SSA_NAME_RANGE_INFO (lhs)->get_min (),
6940 				    SSA_NAME_RANGE_INFO (lhs)->get_max ());
6941 		    maybe_set_nonzero_bits (bb, var);
6942 		  }
6943 	      }
6944 
6945 	    /* Propagate the RHS into every use of the LHS.  */
6946 	    FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
6947 	      FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
6948 		SET_USE (use_p, var);
6949 
6950 	    /* And finally, remove the copy, it is not needed.  */
6951 	    gsi_remove (&si, true);
6952 	    release_defs (stmt);
6953 	  }
6954 	else
6955 	  {
6956 	    if (!is_gimple_debug (gsi_stmt (si)))
6957 	      is_unreachable = 0;
6958 	    gsi_next (&si);
6959 	  }
6960       }
6961 }
6962 
6963 
6964 /* Return true if STMT is interesting for VRP.  */
6965 
6966 static bool
stmt_interesting_for_vrp(gimple * stmt)6967 stmt_interesting_for_vrp (gimple *stmt)
6968 {
6969   if (gimple_code (stmt) == GIMPLE_PHI)
6970     {
6971       tree res = gimple_phi_result (stmt);
6972       return (!virtual_operand_p (res)
6973 	      && (INTEGRAL_TYPE_P (TREE_TYPE (res))
6974 		  || POINTER_TYPE_P (TREE_TYPE (res))));
6975     }
6976   else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
6977     {
6978       tree lhs = gimple_get_lhs (stmt);
6979 
6980       /* In general, assignments with virtual operands are not useful
6981 	 for deriving ranges, with the obvious exception of calls to
6982 	 builtin functions.  */
6983       if (lhs && TREE_CODE (lhs) == SSA_NAME
6984 	  && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
6985 	      || POINTER_TYPE_P (TREE_TYPE (lhs)))
6986 	  && (is_gimple_call (stmt)
6987 	      || !gimple_vuse (stmt)))
6988 	return true;
6989       else if (is_gimple_call (stmt) && gimple_call_internal_p (stmt))
6990 	switch (gimple_call_internal_fn (stmt))
6991 	  {
6992 	  case IFN_ADD_OVERFLOW:
6993 	  case IFN_SUB_OVERFLOW:
6994 	  case IFN_MUL_OVERFLOW:
6995 	    /* These internal calls return _Complex integer type,
6996 	       but are interesting to VRP nevertheless.  */
6997 	    if (lhs && TREE_CODE (lhs) == SSA_NAME)
6998 	      return true;
6999 	    break;
7000 	  default:
7001 	    break;
7002 	  }
7003     }
7004   else if (gimple_code (stmt) == GIMPLE_COND
7005 	   || gimple_code (stmt) == GIMPLE_SWITCH)
7006     return true;
7007 
7008   return false;
7009 }
7010 
7011 
7012 /* Initialize local data structures for VRP.  */
7013 
7014 static void
vrp_initialize(void)7015 vrp_initialize (void)
7016 {
7017   basic_block bb;
7018 
7019   values_propagated = false;
7020   num_vr_values = num_ssa_names;
7021   vr_value = XCNEWVEC (value_range *, num_vr_values);
7022   vr_phi_edge_counts = XCNEWVEC (int, num_ssa_names);
7023 
7024   FOR_EACH_BB_FN (bb, cfun)
7025     {
7026       for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
7027 	   gsi_next (&si))
7028 	{
7029 	  gphi *phi = si.phi ();
7030 	  if (!stmt_interesting_for_vrp (phi))
7031 	    {
7032 	      tree lhs = PHI_RESULT (phi);
7033 	      set_value_range_to_varying (get_value_range (lhs));
7034 	      prop_set_simulate_again (phi, false);
7035 	    }
7036 	  else
7037 	    prop_set_simulate_again (phi, true);
7038 	}
7039 
7040       for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
7041 	   gsi_next (&si))
7042         {
7043 	  gimple *stmt = gsi_stmt (si);
7044 
7045  	  /* If the statement is a control insn, then we do not
7046  	     want to avoid simulating the statement once.  Failure
7047  	     to do so means that those edges will never get added.  */
7048 	  if (stmt_ends_bb_p (stmt))
7049 	    prop_set_simulate_again (stmt, true);
7050 	  else if (!stmt_interesting_for_vrp (stmt))
7051 	    {
7052 	      set_defs_to_varying (stmt);
7053 	      prop_set_simulate_again (stmt, false);
7054 	    }
7055 	  else
7056 	    prop_set_simulate_again (stmt, true);
7057 	}
7058     }
7059 }
7060 
7061 /* Return the singleton value-range for NAME or NAME.  */
7062 
7063 static inline tree
vrp_valueize(tree name)7064 vrp_valueize (tree name)
7065 {
7066   if (TREE_CODE (name) == SSA_NAME)
7067     {
7068       value_range *vr = get_value_range (name);
7069       if (vr->type == VR_RANGE
7070 	  && vrp_operand_equal_p (vr->min, vr->max))
7071 	return vr->min;
7072     }
7073   return name;
7074 }
7075 
7076 /* Return the singleton value-range for NAME if that is a constant
7077    but signal to not follow SSA edges.  */
7078 
7079 static inline tree
vrp_valueize_1(tree name)7080 vrp_valueize_1 (tree name)
7081 {
7082   if (TREE_CODE (name) == SSA_NAME)
7083     {
7084       /* If the definition may be simulated again we cannot follow
7085          this SSA edge as the SSA propagator does not necessarily
7086 	 re-visit the use.  */
7087       gimple *def_stmt = SSA_NAME_DEF_STMT (name);
7088       if (!gimple_nop_p (def_stmt)
7089 	  && prop_simulate_again_p (def_stmt))
7090 	return NULL_TREE;
7091       value_range *vr = get_value_range (name);
7092       if (range_int_cst_singleton_p (vr))
7093 	return vr->min;
7094     }
7095   return name;
7096 }
7097 
7098 /* Visit assignment STMT.  If it produces an interesting range, record
7099    the SSA name in *OUTPUT_P.  */
7100 
7101 static enum ssa_prop_result
vrp_visit_assignment_or_call(gimple * stmt,tree * output_p)7102 vrp_visit_assignment_or_call (gimple *stmt, tree *output_p)
7103 {
7104   tree lhs;
7105   enum gimple_code code = gimple_code (stmt);
7106   lhs = gimple_get_lhs (stmt);
7107 
7108   /* We only keep track of ranges in integral and pointer types.  */
7109   if (TREE_CODE (lhs) == SSA_NAME
7110       && ((INTEGRAL_TYPE_P (TREE_TYPE (lhs))
7111 	   /* It is valid to have NULL MIN/MAX values on a type.  See
7112 	      build_range_type.  */
7113 	   && TYPE_MIN_VALUE (TREE_TYPE (lhs))
7114 	   && TYPE_MAX_VALUE (TREE_TYPE (lhs)))
7115 	  || POINTER_TYPE_P (TREE_TYPE (lhs))))
7116     {
7117       value_range new_vr = VR_INITIALIZER;
7118 
7119       /* Try folding the statement to a constant first.  */
7120       tree tem = gimple_fold_stmt_to_constant_1 (stmt, vrp_valueize,
7121 						 vrp_valueize_1);
7122       if (tem && is_gimple_min_invariant (tem))
7123 	set_value_range_to_value (&new_vr, tem, NULL);
7124       /* Then dispatch to value-range extracting functions.  */
7125       else if (code == GIMPLE_CALL)
7126 	extract_range_basic (&new_vr, stmt);
7127       else
7128 	extract_range_from_assignment (&new_vr, as_a <gassign *> (stmt));
7129 
7130       if (update_value_range (lhs, &new_vr))
7131 	{
7132 	  *output_p = lhs;
7133 
7134 	  if (dump_file && (dump_flags & TDF_DETAILS))
7135 	    {
7136 	      fprintf (dump_file, "Found new range for ");
7137 	      print_generic_expr (dump_file, lhs, 0);
7138 	      fprintf (dump_file, ": ");
7139 	      dump_value_range (dump_file, &new_vr);
7140 	      fprintf (dump_file, "\n");
7141 	    }
7142 
7143 	  if (new_vr.type == VR_VARYING)
7144 	    return SSA_PROP_VARYING;
7145 
7146 	  return SSA_PROP_INTERESTING;
7147 	}
7148 
7149       return SSA_PROP_NOT_INTERESTING;
7150     }
7151   else if (is_gimple_call (stmt) && gimple_call_internal_p (stmt))
7152     switch (gimple_call_internal_fn (stmt))
7153       {
7154       case IFN_ADD_OVERFLOW:
7155       case IFN_SUB_OVERFLOW:
7156       case IFN_MUL_OVERFLOW:
7157 	/* These internal calls return _Complex integer type,
7158 	   which VRP does not track, but the immediate uses
7159 	   thereof might be interesting.  */
7160 	if (lhs && TREE_CODE (lhs) == SSA_NAME)
7161 	  {
7162 	    imm_use_iterator iter;
7163 	    use_operand_p use_p;
7164 	    enum ssa_prop_result res = SSA_PROP_VARYING;
7165 
7166 	    set_value_range_to_varying (get_value_range (lhs));
7167 
7168 	    FOR_EACH_IMM_USE_FAST (use_p, iter, lhs)
7169 	      {
7170 		gimple *use_stmt = USE_STMT (use_p);
7171 		if (!is_gimple_assign (use_stmt))
7172 		  continue;
7173 		enum tree_code rhs_code = gimple_assign_rhs_code (use_stmt);
7174 		if (rhs_code != REALPART_EXPR && rhs_code != IMAGPART_EXPR)
7175 		  continue;
7176 		tree rhs1 = gimple_assign_rhs1 (use_stmt);
7177 		tree use_lhs = gimple_assign_lhs (use_stmt);
7178 		if (TREE_CODE (rhs1) != rhs_code
7179 		    || TREE_OPERAND (rhs1, 0) != lhs
7180 		    || TREE_CODE (use_lhs) != SSA_NAME
7181 		    || !stmt_interesting_for_vrp (use_stmt)
7182 		    || (!INTEGRAL_TYPE_P (TREE_TYPE (use_lhs))
7183 			|| !TYPE_MIN_VALUE (TREE_TYPE (use_lhs))
7184 			|| !TYPE_MAX_VALUE (TREE_TYPE (use_lhs))))
7185 		  continue;
7186 
7187 		/* If there is a change in the value range for any of the
7188 		   REALPART_EXPR/IMAGPART_EXPR immediate uses, return
7189 		   SSA_PROP_INTERESTING.  If there are any REALPART_EXPR
7190 		   or IMAGPART_EXPR immediate uses, but none of them have
7191 		   a change in their value ranges, return
7192 		   SSA_PROP_NOT_INTERESTING.  If there are no
7193 		   {REAL,IMAG}PART_EXPR uses at all,
7194 		   return SSA_PROP_VARYING.  */
7195 		value_range new_vr = VR_INITIALIZER;
7196 		extract_range_basic (&new_vr, use_stmt);
7197 		value_range *old_vr = get_value_range (use_lhs);
7198 		if (old_vr->type != new_vr.type
7199 		    || !vrp_operand_equal_p (old_vr->min, new_vr.min)
7200 		    || !vrp_operand_equal_p (old_vr->max, new_vr.max)
7201 		    || !vrp_bitmap_equal_p (old_vr->equiv, new_vr.equiv))
7202 		  res = SSA_PROP_INTERESTING;
7203 		else
7204 		  res = SSA_PROP_NOT_INTERESTING;
7205 		BITMAP_FREE (new_vr.equiv);
7206 		if (res == SSA_PROP_INTERESTING)
7207 		  {
7208 		    *output_p = lhs;
7209 		    return res;
7210 		  }
7211 	      }
7212 
7213 	    return res;
7214 	  }
7215 	break;
7216       default:
7217 	break;
7218       }
7219 
7220   /* Every other statement produces no useful ranges.  */
7221   set_defs_to_varying (stmt);
7222 
7223   return SSA_PROP_VARYING;
7224 }
7225 
7226 /* Helper that gets the value range of the SSA_NAME with version I
7227    or a symbolic range containing the SSA_NAME only if the value range
7228    is varying or undefined.  */
7229 
7230 static inline value_range
get_vr_for_comparison(int i)7231 get_vr_for_comparison (int i)
7232 {
7233   value_range vr = *get_value_range (ssa_name (i));
7234 
7235   /* If name N_i does not have a valid range, use N_i as its own
7236      range.  This allows us to compare against names that may
7237      have N_i in their ranges.  */
7238   if (vr.type == VR_VARYING || vr.type == VR_UNDEFINED)
7239     {
7240       vr.type = VR_RANGE;
7241       vr.min = ssa_name (i);
7242       vr.max = ssa_name (i);
7243     }
7244 
7245   return vr;
7246 }
7247 
7248 /* Compare all the value ranges for names equivalent to VAR with VAL
7249    using comparison code COMP.  Return the same value returned by
7250    compare_range_with_value, including the setting of
7251    *STRICT_OVERFLOW_P.  */
7252 
7253 static tree
compare_name_with_value(enum tree_code comp,tree var,tree val,bool * strict_overflow_p,bool use_equiv_p)7254 compare_name_with_value (enum tree_code comp, tree var, tree val,
7255 			 bool *strict_overflow_p, bool use_equiv_p)
7256 {
7257   bitmap_iterator bi;
7258   unsigned i;
7259   bitmap e;
7260   tree retval, t;
7261   int used_strict_overflow;
7262   bool sop;
7263   value_range equiv_vr;
7264 
7265   /* Get the set of equivalences for VAR.  */
7266   e = get_value_range (var)->equiv;
7267 
7268   /* Start at -1.  Set it to 0 if we do a comparison without relying
7269      on overflow, or 1 if all comparisons rely on overflow.  */
7270   used_strict_overflow = -1;
7271 
7272   /* Compare vars' value range with val.  */
7273   equiv_vr = get_vr_for_comparison (SSA_NAME_VERSION (var));
7274   sop = false;
7275   retval = compare_range_with_value (comp, &equiv_vr, val, &sop);
7276   if (retval)
7277     used_strict_overflow = sop ? 1 : 0;
7278 
7279   /* If the equiv set is empty we have done all work we need to do.  */
7280   if (e == NULL)
7281     {
7282       if (retval
7283 	  && used_strict_overflow > 0)
7284 	*strict_overflow_p = true;
7285       return retval;
7286     }
7287 
7288   EXECUTE_IF_SET_IN_BITMAP (e, 0, i, bi)
7289     {
7290       if (! use_equiv_p
7291 	  && ! SSA_NAME_IS_DEFAULT_DEF (ssa_name (i))
7292 	  && prop_simulate_again_p (SSA_NAME_DEF_STMT (ssa_name (i))))
7293 	continue;
7294 
7295       equiv_vr = get_vr_for_comparison (i);
7296       sop = false;
7297       t = compare_range_with_value (comp, &equiv_vr, val, &sop);
7298       if (t)
7299 	{
7300 	  /* If we get different answers from different members
7301 	     of the equivalence set this check must be in a dead
7302 	     code region.  Folding it to a trap representation
7303 	     would be correct here.  For now just return don't-know.  */
7304 	  if (retval != NULL
7305 	      && t != retval)
7306 	    {
7307 	      retval = NULL_TREE;
7308 	      break;
7309 	    }
7310 	  retval = t;
7311 
7312 	  if (!sop)
7313 	    used_strict_overflow = 0;
7314 	  else if (used_strict_overflow < 0)
7315 	    used_strict_overflow = 1;
7316 	}
7317     }
7318 
7319   if (retval
7320       && used_strict_overflow > 0)
7321     *strict_overflow_p = true;
7322 
7323   return retval;
7324 }
7325 
7326 
7327 /* Given a comparison code COMP and names N1 and N2, compare all the
7328    ranges equivalent to N1 against all the ranges equivalent to N2
7329    to determine the value of N1 COMP N2.  Return the same value
7330    returned by compare_ranges.  Set *STRICT_OVERFLOW_P to indicate
7331    whether we relied on an overflow infinity in the comparison.  */
7332 
7333 
7334 static tree
compare_names(enum tree_code comp,tree n1,tree n2,bool * strict_overflow_p)7335 compare_names (enum tree_code comp, tree n1, tree n2,
7336 	       bool *strict_overflow_p)
7337 {
7338   tree t, retval;
7339   bitmap e1, e2;
7340   bitmap_iterator bi1, bi2;
7341   unsigned i1, i2;
7342   int used_strict_overflow;
7343   static bitmap_obstack *s_obstack = NULL;
7344   static bitmap s_e1 = NULL, s_e2 = NULL;
7345 
7346   /* Compare the ranges of every name equivalent to N1 against the
7347      ranges of every name equivalent to N2.  */
7348   e1 = get_value_range (n1)->equiv;
7349   e2 = get_value_range (n2)->equiv;
7350 
7351   /* Use the fake bitmaps if e1 or e2 are not available.  */
7352   if (s_obstack == NULL)
7353     {
7354       s_obstack = XNEW (bitmap_obstack);
7355       bitmap_obstack_initialize (s_obstack);
7356       s_e1 = BITMAP_ALLOC (s_obstack);
7357       s_e2 = BITMAP_ALLOC (s_obstack);
7358     }
7359   if (e1 == NULL)
7360     e1 = s_e1;
7361   if (e2 == NULL)
7362     e2 = s_e2;
7363 
7364   /* Add N1 and N2 to their own set of equivalences to avoid
7365      duplicating the body of the loop just to check N1 and N2
7366      ranges.  */
7367   bitmap_set_bit (e1, SSA_NAME_VERSION (n1));
7368   bitmap_set_bit (e2, SSA_NAME_VERSION (n2));
7369 
7370   /* If the equivalence sets have a common intersection, then the two
7371      names can be compared without checking their ranges.  */
7372   if (bitmap_intersect_p (e1, e2))
7373     {
7374       bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
7375       bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
7376 
7377       return (comp == EQ_EXPR || comp == GE_EXPR || comp == LE_EXPR)
7378 	     ? boolean_true_node
7379 	     : boolean_false_node;
7380     }
7381 
7382   /* Start at -1.  Set it to 0 if we do a comparison without relying
7383      on overflow, or 1 if all comparisons rely on overflow.  */
7384   used_strict_overflow = -1;
7385 
7386   /* Otherwise, compare all the equivalent ranges.  First, add N1 and
7387      N2 to their own set of equivalences to avoid duplicating the body
7388      of the loop just to check N1 and N2 ranges.  */
7389   EXECUTE_IF_SET_IN_BITMAP (e1, 0, i1, bi1)
7390     {
7391       value_range vr1 = get_vr_for_comparison (i1);
7392 
7393       t = retval = NULL_TREE;
7394       EXECUTE_IF_SET_IN_BITMAP (e2, 0, i2, bi2)
7395 	{
7396 	  bool sop = false;
7397 
7398 	  value_range vr2 = get_vr_for_comparison (i2);
7399 
7400 	  t = compare_ranges (comp, &vr1, &vr2, &sop);
7401 	  if (t)
7402 	    {
7403 	      /* If we get different answers from different members
7404 		 of the equivalence set this check must be in a dead
7405 		 code region.  Folding it to a trap representation
7406 		 would be correct here.  For now just return don't-know.  */
7407 	      if (retval != NULL
7408 		  && t != retval)
7409 		{
7410 		  bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
7411 		  bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
7412 		  return NULL_TREE;
7413 		}
7414 	      retval = t;
7415 
7416 	      if (!sop)
7417 		used_strict_overflow = 0;
7418 	      else if (used_strict_overflow < 0)
7419 		used_strict_overflow = 1;
7420 	    }
7421 	}
7422 
7423       if (retval)
7424 	{
7425 	  bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
7426 	  bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
7427 	  if (used_strict_overflow > 0)
7428 	    *strict_overflow_p = true;
7429 	  return retval;
7430 	}
7431     }
7432 
7433   /* None of the equivalent ranges are useful in computing this
7434      comparison.  */
7435   bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
7436   bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
7437   return NULL_TREE;
7438 }
7439 
7440 /* Helper function for vrp_evaluate_conditional_warnv & other
7441    optimizers.  */
7442 
7443 static tree
vrp_evaluate_conditional_warnv_with_ops_using_ranges(enum tree_code code,tree op0,tree op1,bool * strict_overflow_p)7444 vrp_evaluate_conditional_warnv_with_ops_using_ranges (enum tree_code code,
7445 						      tree op0, tree op1,
7446 						      bool * strict_overflow_p)
7447 {
7448   value_range *vr0, *vr1;
7449 
7450   vr0 = (TREE_CODE (op0) == SSA_NAME) ? get_value_range (op0) : NULL;
7451   vr1 = (TREE_CODE (op1) == SSA_NAME) ? get_value_range (op1) : NULL;
7452 
7453   tree res = NULL_TREE;
7454   if (vr0 && vr1)
7455     res = compare_ranges (code, vr0, vr1, strict_overflow_p);
7456   if (!res && vr0)
7457     res = compare_range_with_value (code, vr0, op1, strict_overflow_p);
7458   if (!res && vr1)
7459     res = (compare_range_with_value
7460 	    (swap_tree_comparison (code), vr1, op0, strict_overflow_p));
7461   return res;
7462 }
7463 
7464 /* Helper function for vrp_evaluate_conditional_warnv. */
7465 
7466 static tree
vrp_evaluate_conditional_warnv_with_ops(enum tree_code code,tree op0,tree op1,bool use_equiv_p,bool * strict_overflow_p,bool * only_ranges)7467 vrp_evaluate_conditional_warnv_with_ops (enum tree_code code, tree op0,
7468 					 tree op1, bool use_equiv_p,
7469 					 bool *strict_overflow_p, bool *only_ranges)
7470 {
7471   tree ret;
7472   if (only_ranges)
7473     *only_ranges = true;
7474 
7475   /* We only deal with integral and pointer types.  */
7476   if (!INTEGRAL_TYPE_P (TREE_TYPE (op0))
7477       && !POINTER_TYPE_P (TREE_TYPE (op0)))
7478     return NULL_TREE;
7479 
7480   if ((ret = vrp_evaluate_conditional_warnv_with_ops_using_ranges
7481 	       (code, op0, op1, strict_overflow_p)))
7482     return ret;
7483   if (only_ranges)
7484     *only_ranges = false;
7485   /* Do not use compare_names during propagation, it's quadratic.  */
7486   if (TREE_CODE (op0) == SSA_NAME && TREE_CODE (op1) == SSA_NAME
7487       && use_equiv_p)
7488     return compare_names (code, op0, op1, strict_overflow_p);
7489   else if (TREE_CODE (op0) == SSA_NAME)
7490     return compare_name_with_value (code, op0, op1,
7491 				    strict_overflow_p, use_equiv_p);
7492   else if (TREE_CODE (op1) == SSA_NAME)
7493     return compare_name_with_value (swap_tree_comparison (code), op1, op0,
7494 				    strict_overflow_p, use_equiv_p);
7495   return NULL_TREE;
7496 }
7497 
7498 /* Given (CODE OP0 OP1) within STMT, try to simplify it based on value range
7499    information.  Return NULL if the conditional can not be evaluated.
7500    The ranges of all the names equivalent with the operands in COND
7501    will be used when trying to compute the value.  If the result is
7502    based on undefined signed overflow, issue a warning if
7503    appropriate.  */
7504 
7505 static tree
vrp_evaluate_conditional(tree_code code,tree op0,tree op1,gimple * stmt)7506 vrp_evaluate_conditional (tree_code code, tree op0, tree op1, gimple *stmt)
7507 {
7508   bool sop;
7509   tree ret;
7510   bool only_ranges;
7511 
7512   /* Some passes and foldings leak constants with overflow flag set
7513      into the IL.  Avoid doing wrong things with these and bail out.  */
7514   if ((TREE_CODE (op0) == INTEGER_CST
7515        && TREE_OVERFLOW (op0))
7516       || (TREE_CODE (op1) == INTEGER_CST
7517 	  && TREE_OVERFLOW (op1)))
7518     return NULL_TREE;
7519 
7520   sop = false;
7521   ret = vrp_evaluate_conditional_warnv_with_ops (code, op0, op1, true, &sop,
7522   						 &only_ranges);
7523 
7524   if (ret && sop)
7525     {
7526       enum warn_strict_overflow_code wc;
7527       const char* warnmsg;
7528 
7529       if (is_gimple_min_invariant (ret))
7530 	{
7531 	  wc = WARN_STRICT_OVERFLOW_CONDITIONAL;
7532 	  warnmsg = G_("assuming signed overflow does not occur when "
7533 		       "simplifying conditional to constant");
7534 	}
7535       else
7536 	{
7537 	  wc = WARN_STRICT_OVERFLOW_COMPARISON;
7538 	  warnmsg = G_("assuming signed overflow does not occur when "
7539 		       "simplifying conditional");
7540 	}
7541 
7542       if (issue_strict_overflow_warning (wc))
7543 	{
7544 	  location_t location;
7545 
7546 	  if (!gimple_has_location (stmt))
7547 	    location = input_location;
7548 	  else
7549 	    location = gimple_location (stmt);
7550 	  warning_at (location, OPT_Wstrict_overflow, "%s", warnmsg);
7551 	}
7552     }
7553 
7554   if (warn_type_limits
7555       && ret && only_ranges
7556       && TREE_CODE_CLASS (code) == tcc_comparison
7557       && TREE_CODE (op0) == SSA_NAME)
7558     {
7559       /* If the comparison is being folded and the operand on the LHS
7560 	 is being compared against a constant value that is outside of
7561 	 the natural range of OP0's type, then the predicate will
7562 	 always fold regardless of the value of OP0.  If -Wtype-limits
7563 	 was specified, emit a warning.  */
7564       tree type = TREE_TYPE (op0);
7565       value_range *vr0 = get_value_range (op0);
7566 
7567       if (vr0->type == VR_RANGE
7568 	  && INTEGRAL_TYPE_P (type)
7569 	  && vrp_val_is_min (vr0->min)
7570 	  && vrp_val_is_max (vr0->max)
7571 	  && is_gimple_min_invariant (op1))
7572 	{
7573 	  location_t location;
7574 
7575 	  if (!gimple_has_location (stmt))
7576 	    location = input_location;
7577 	  else
7578 	    location = gimple_location (stmt);
7579 
7580 	  warning_at (location, OPT_Wtype_limits,
7581 		      integer_zerop (ret)
7582 		      ? G_("comparison always false "
7583                            "due to limited range of data type")
7584 		      : G_("comparison always true "
7585                            "due to limited range of data type"));
7586 	}
7587     }
7588 
7589   return ret;
7590 }
7591 
7592 
7593 /* Visit conditional statement STMT.  If we can determine which edge
7594    will be taken out of STMT's basic block, record it in
7595    *TAKEN_EDGE_P and return SSA_PROP_INTERESTING.  Otherwise, return
7596    SSA_PROP_VARYING.  */
7597 
7598 static enum ssa_prop_result
vrp_visit_cond_stmt(gcond * stmt,edge * taken_edge_p)7599 vrp_visit_cond_stmt (gcond *stmt, edge *taken_edge_p)
7600 {
7601   tree val;
7602   bool sop;
7603 
7604   *taken_edge_p = NULL;
7605 
7606   if (dump_file && (dump_flags & TDF_DETAILS))
7607     {
7608       tree use;
7609       ssa_op_iter i;
7610 
7611       fprintf (dump_file, "\nVisiting conditional with predicate: ");
7612       print_gimple_stmt (dump_file, stmt, 0, 0);
7613       fprintf (dump_file, "\nWith known ranges\n");
7614 
7615       FOR_EACH_SSA_TREE_OPERAND (use, stmt, i, SSA_OP_USE)
7616 	{
7617 	  fprintf (dump_file, "\t");
7618 	  print_generic_expr (dump_file, use, 0);
7619 	  fprintf (dump_file, ": ");
7620 	  dump_value_range (dump_file, vr_value[SSA_NAME_VERSION (use)]);
7621 	}
7622 
7623       fprintf (dump_file, "\n");
7624     }
7625 
7626   /* Compute the value of the predicate COND by checking the known
7627      ranges of each of its operands.
7628 
7629      Note that we cannot evaluate all the equivalent ranges here
7630      because those ranges may not yet be final and with the current
7631      propagation strategy, we cannot determine when the value ranges
7632      of the names in the equivalence set have changed.
7633 
7634      For instance, given the following code fragment
7635 
7636         i_5 = PHI <8, i_13>
7637 	...
7638      	i_14 = ASSERT_EXPR <i_5, i_5 != 0>
7639 	if (i_14 == 1)
7640 	  ...
7641 
7642      Assume that on the first visit to i_14, i_5 has the temporary
7643      range [8, 8] because the second argument to the PHI function is
7644      not yet executable.  We derive the range ~[0, 0] for i_14 and the
7645      equivalence set { i_5 }.  So, when we visit 'if (i_14 == 1)' for
7646      the first time, since i_14 is equivalent to the range [8, 8], we
7647      determine that the predicate is always false.
7648 
7649      On the next round of propagation, i_13 is determined to be
7650      VARYING, which causes i_5 to drop down to VARYING.  So, another
7651      visit to i_14 is scheduled.  In this second visit, we compute the
7652      exact same range and equivalence set for i_14, namely ~[0, 0] and
7653      { i_5 }.  But we did not have the previous range for i_5
7654      registered, so vrp_visit_assignment thinks that the range for
7655      i_14 has not changed.  Therefore, the predicate 'if (i_14 == 1)'
7656      is not visited again, which stops propagation from visiting
7657      statements in the THEN clause of that if().
7658 
7659      To properly fix this we would need to keep the previous range
7660      value for the names in the equivalence set.  This way we would've
7661      discovered that from one visit to the other i_5 changed from
7662      range [8, 8] to VR_VARYING.
7663 
7664      However, fixing this apparent limitation may not be worth the
7665      additional checking.  Testing on several code bases (GCC, DLV,
7666      MICO, TRAMP3D and SPEC2000) showed that doing this results in
7667      4 more predicates folded in SPEC.  */
7668   sop = false;
7669 
7670   val = vrp_evaluate_conditional_warnv_with_ops (gimple_cond_code (stmt),
7671 						 gimple_cond_lhs (stmt),
7672 						 gimple_cond_rhs (stmt),
7673 						 false, &sop, NULL);
7674   if (val)
7675     {
7676       if (!sop)
7677 	*taken_edge_p = find_taken_edge (gimple_bb (stmt), val);
7678       else
7679 	{
7680 	  if (dump_file && (dump_flags & TDF_DETAILS))
7681 	    fprintf (dump_file,
7682 		     "\nIgnoring predicate evaluation because "
7683 		     "it assumes that signed overflow is undefined");
7684 	  val = NULL_TREE;
7685 	}
7686     }
7687 
7688   if (dump_file && (dump_flags & TDF_DETAILS))
7689     {
7690       fprintf (dump_file, "\nPredicate evaluates to: ");
7691       if (val == NULL_TREE)
7692 	fprintf (dump_file, "DON'T KNOW\n");
7693       else
7694 	print_generic_stmt (dump_file, val, 0);
7695     }
7696 
7697   return (*taken_edge_p) ? SSA_PROP_INTERESTING : SSA_PROP_VARYING;
7698 }
7699 
7700 /* Searches the case label vector VEC for the index *IDX of the CASE_LABEL
7701    that includes the value VAL.  The search is restricted to the range
7702    [START_IDX, n - 1] where n is the size of VEC.
7703 
7704    If there is a CASE_LABEL for VAL, its index is placed in IDX and true is
7705    returned.
7706 
7707    If there is no CASE_LABEL for VAL and there is one that is larger than VAL,
7708    it is placed in IDX and false is returned.
7709 
7710    If VAL is larger than any CASE_LABEL, n is placed on IDX and false is
7711    returned. */
7712 
7713 static bool
find_case_label_index(gswitch * stmt,size_t start_idx,tree val,size_t * idx)7714 find_case_label_index (gswitch *stmt, size_t start_idx, tree val, size_t *idx)
7715 {
7716   size_t n = gimple_switch_num_labels (stmt);
7717   size_t low, high;
7718 
7719   /* Find case label for minimum of the value range or the next one.
7720      At each iteration we are searching in [low, high - 1]. */
7721 
7722   for (low = start_idx, high = n; high != low; )
7723     {
7724       tree t;
7725       int cmp;
7726       /* Note that i != high, so we never ask for n. */
7727       size_t i = (high + low) / 2;
7728       t = gimple_switch_label (stmt, i);
7729 
7730       /* Cache the result of comparing CASE_LOW and val.  */
7731       cmp = tree_int_cst_compare (CASE_LOW (t), val);
7732 
7733       if (cmp == 0)
7734 	{
7735 	  /* Ranges cannot be empty. */
7736 	  *idx = i;
7737 	  return true;
7738 	}
7739       else if (cmp > 0)
7740         high = i;
7741       else
7742 	{
7743 	  low = i + 1;
7744 	  if (CASE_HIGH (t) != NULL
7745 	      && tree_int_cst_compare (CASE_HIGH (t), val) >= 0)
7746 	    {
7747 	      *idx = i;
7748 	      return true;
7749 	    }
7750         }
7751     }
7752 
7753   *idx = high;
7754   return false;
7755 }
7756 
7757 /* Searches the case label vector VEC for the range of CASE_LABELs that is used
7758    for values between MIN and MAX. The first index is placed in MIN_IDX. The
7759    last index is placed in MAX_IDX. If the range of CASE_LABELs is empty
7760    then MAX_IDX < MIN_IDX.
7761    Returns true if the default label is not needed. */
7762 
7763 static bool
find_case_label_range(gswitch * stmt,tree min,tree max,size_t * min_idx,size_t * max_idx)7764 find_case_label_range (gswitch *stmt, tree min, tree max, size_t *min_idx,
7765 		       size_t *max_idx)
7766 {
7767   size_t i, j;
7768   bool min_take_default = !find_case_label_index (stmt, 1, min, &i);
7769   bool max_take_default = !find_case_label_index (stmt, i, max, &j);
7770 
7771   if (i == j
7772       && min_take_default
7773       && max_take_default)
7774     {
7775       /* Only the default case label reached.
7776          Return an empty range. */
7777       *min_idx = 1;
7778       *max_idx = 0;
7779       return false;
7780     }
7781   else
7782     {
7783       bool take_default = min_take_default || max_take_default;
7784       tree low, high;
7785       size_t k;
7786 
7787       if (max_take_default)
7788 	j--;
7789 
7790       /* If the case label range is continuous, we do not need
7791 	 the default case label.  Verify that.  */
7792       high = CASE_LOW (gimple_switch_label (stmt, i));
7793       if (CASE_HIGH (gimple_switch_label (stmt, i)))
7794 	high = CASE_HIGH (gimple_switch_label (stmt, i));
7795       for (k = i + 1; k <= j; ++k)
7796 	{
7797 	  low = CASE_LOW (gimple_switch_label (stmt, k));
7798 	  if (!integer_onep (int_const_binop (MINUS_EXPR, low, high)))
7799 	    {
7800 	      take_default = true;
7801 	      break;
7802 	    }
7803 	  high = low;
7804 	  if (CASE_HIGH (gimple_switch_label (stmt, k)))
7805 	    high = CASE_HIGH (gimple_switch_label (stmt, k));
7806 	}
7807 
7808       *min_idx = i;
7809       *max_idx = j;
7810       return !take_default;
7811     }
7812 }
7813 
7814 /* Searches the case label vector VEC for the ranges of CASE_LABELs that are
7815    used in range VR.  The indices are placed in MIN_IDX1, MAX_IDX, MIN_IDX2 and
7816    MAX_IDX2.  If the ranges of CASE_LABELs are empty then MAX_IDX1 < MIN_IDX1.
7817    Returns true if the default label is not needed.  */
7818 
7819 static bool
find_case_label_ranges(gswitch * stmt,value_range * vr,size_t * min_idx1,size_t * max_idx1,size_t * min_idx2,size_t * max_idx2)7820 find_case_label_ranges (gswitch *stmt, value_range *vr, size_t *min_idx1,
7821 			size_t *max_idx1, size_t *min_idx2,
7822 			size_t *max_idx2)
7823 {
7824   size_t i, j, k, l;
7825   unsigned int n = gimple_switch_num_labels (stmt);
7826   bool take_default;
7827   tree case_low, case_high;
7828   tree min = vr->min, max = vr->max;
7829 
7830   gcc_checking_assert (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE);
7831 
7832   take_default = !find_case_label_range (stmt, min, max, &i, &j);
7833 
7834   /* Set second range to emtpy.  */
7835   *min_idx2 = 1;
7836   *max_idx2 = 0;
7837 
7838   if (vr->type == VR_RANGE)
7839     {
7840       *min_idx1 = i;
7841       *max_idx1 = j;
7842       return !take_default;
7843     }
7844 
7845   /* Set first range to all case labels.  */
7846   *min_idx1 = 1;
7847   *max_idx1 = n - 1;
7848 
7849   if (i > j)
7850     return false;
7851 
7852   /* Make sure all the values of case labels [i , j] are contained in
7853      range [MIN, MAX].  */
7854   case_low = CASE_LOW (gimple_switch_label (stmt, i));
7855   case_high = CASE_HIGH (gimple_switch_label (stmt, j));
7856   if (tree_int_cst_compare (case_low, min) < 0)
7857     i += 1;
7858   if (case_high != NULL_TREE
7859       && tree_int_cst_compare (max, case_high) < 0)
7860     j -= 1;
7861 
7862   if (i > j)
7863     return false;
7864 
7865   /* If the range spans case labels [i, j], the corresponding anti-range spans
7866      the labels [1, i - 1] and [j + 1, n -  1].  */
7867   k = j + 1;
7868   l = n - 1;
7869   if (k > l)
7870     {
7871       k = 1;
7872       l = 0;
7873     }
7874 
7875   j = i - 1;
7876   i = 1;
7877   if (i > j)
7878     {
7879       i = k;
7880       j = l;
7881       k = 1;
7882       l = 0;
7883     }
7884 
7885   *min_idx1 = i;
7886   *max_idx1 = j;
7887   *min_idx2 = k;
7888   *max_idx2 = l;
7889   return false;
7890 }
7891 
7892 /* Visit switch statement STMT.  If we can determine which edge
7893    will be taken out of STMT's basic block, record it in
7894    *TAKEN_EDGE_P and return SSA_PROP_INTERESTING.  Otherwise, return
7895    SSA_PROP_VARYING.  */
7896 
7897 static enum ssa_prop_result
vrp_visit_switch_stmt(gswitch * stmt,edge * taken_edge_p)7898 vrp_visit_switch_stmt (gswitch *stmt, edge *taken_edge_p)
7899 {
7900   tree op, val;
7901   value_range *vr;
7902   size_t i = 0, j = 0, k, l;
7903   bool take_default;
7904 
7905   *taken_edge_p = NULL;
7906   op = gimple_switch_index (stmt);
7907   if (TREE_CODE (op) != SSA_NAME)
7908     return SSA_PROP_VARYING;
7909 
7910   vr = get_value_range (op);
7911   if (dump_file && (dump_flags & TDF_DETAILS))
7912     {
7913       fprintf (dump_file, "\nVisiting switch expression with operand ");
7914       print_generic_expr (dump_file, op, 0);
7915       fprintf (dump_file, " with known range ");
7916       dump_value_range (dump_file, vr);
7917       fprintf (dump_file, "\n");
7918     }
7919 
7920   if ((vr->type != VR_RANGE
7921        && vr->type != VR_ANTI_RANGE)
7922       || symbolic_range_p (vr))
7923     return SSA_PROP_VARYING;
7924 
7925   /* Find the single edge that is taken from the switch expression.  */
7926   take_default = !find_case_label_ranges (stmt, vr, &i, &j, &k, &l);
7927 
7928   /* Check if the range spans no CASE_LABEL. If so, we only reach the default
7929      label */
7930   if (j < i)
7931     {
7932       gcc_assert (take_default);
7933       val = gimple_switch_default_label (stmt);
7934     }
7935   else
7936     {
7937       /* Check if labels with index i to j and maybe the default label
7938 	 are all reaching the same label.  */
7939 
7940       val = gimple_switch_label (stmt, i);
7941       if (take_default
7942 	  && CASE_LABEL (gimple_switch_default_label (stmt))
7943 	  != CASE_LABEL (val))
7944 	{
7945 	  if (dump_file && (dump_flags & TDF_DETAILS))
7946 	    fprintf (dump_file, "  not a single destination for this "
7947 		     "range\n");
7948           return SSA_PROP_VARYING;
7949 	}
7950       for (++i; i <= j; ++i)
7951         {
7952           if (CASE_LABEL (gimple_switch_label (stmt, i)) != CASE_LABEL (val))
7953 	    {
7954 	      if (dump_file && (dump_flags & TDF_DETAILS))
7955 		fprintf (dump_file, "  not a single destination for this "
7956 			 "range\n");
7957 	      return SSA_PROP_VARYING;
7958 	    }
7959         }
7960       for (; k <= l; ++k)
7961         {
7962           if (CASE_LABEL (gimple_switch_label (stmt, k)) != CASE_LABEL (val))
7963 	    {
7964 	      if (dump_file && (dump_flags & TDF_DETAILS))
7965 		fprintf (dump_file, "  not a single destination for this "
7966 			 "range\n");
7967 	      return SSA_PROP_VARYING;
7968 	    }
7969         }
7970     }
7971 
7972   *taken_edge_p = find_edge (gimple_bb (stmt),
7973 			     label_to_block (CASE_LABEL (val)));
7974 
7975   if (dump_file && (dump_flags & TDF_DETAILS))
7976     {
7977       fprintf (dump_file, "  will take edge to ");
7978       print_generic_stmt (dump_file, CASE_LABEL (val), 0);
7979     }
7980 
7981   return SSA_PROP_INTERESTING;
7982 }
7983 
7984 
7985 /* Evaluate statement STMT.  If the statement produces a useful range,
7986    return SSA_PROP_INTERESTING and record the SSA name with the
7987    interesting range into *OUTPUT_P.
7988 
7989    If STMT is a conditional branch and we can determine its truth
7990    value, the taken edge is recorded in *TAKEN_EDGE_P.
7991 
7992    If STMT produces a varying value, return SSA_PROP_VARYING.  */
7993 
7994 static enum ssa_prop_result
vrp_visit_stmt(gimple * stmt,edge * taken_edge_p,tree * output_p)7995 vrp_visit_stmt (gimple *stmt, edge *taken_edge_p, tree *output_p)
7996 {
7997   if (dump_file && (dump_flags & TDF_DETAILS))
7998     {
7999       fprintf (dump_file, "\nVisiting statement:\n");
8000       print_gimple_stmt (dump_file, stmt, 0, dump_flags);
8001     }
8002 
8003   if (!stmt_interesting_for_vrp (stmt))
8004     gcc_assert (stmt_ends_bb_p (stmt));
8005   else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
8006     return vrp_visit_assignment_or_call (stmt, output_p);
8007   else if (gimple_code (stmt) == GIMPLE_COND)
8008     return vrp_visit_cond_stmt (as_a <gcond *> (stmt), taken_edge_p);
8009   else if (gimple_code (stmt) == GIMPLE_SWITCH)
8010     return vrp_visit_switch_stmt (as_a <gswitch *> (stmt), taken_edge_p);
8011 
8012   /* All other statements produce nothing of interest for VRP, so mark
8013      their outputs varying and prevent further simulation.  */
8014   set_defs_to_varying (stmt);
8015 
8016   return SSA_PROP_VARYING;
8017 }
8018 
8019 /* Union the two value-ranges { *VR0TYPE, *VR0MIN, *VR0MAX } and
8020    { VR1TYPE, VR0MIN, VR0MAX } and store the result
8021    in { *VR0TYPE, *VR0MIN, *VR0MAX }.  This may not be the smallest
8022    possible such range.  The resulting range is not canonicalized.  */
8023 
8024 static void
union_ranges(enum value_range_type * vr0type,tree * vr0min,tree * vr0max,enum value_range_type vr1type,tree vr1min,tree vr1max)8025 union_ranges (enum value_range_type *vr0type,
8026 	      tree *vr0min, tree *vr0max,
8027 	      enum value_range_type vr1type,
8028 	      tree vr1min, tree vr1max)
8029 {
8030   bool mineq = vrp_operand_equal_p (*vr0min, vr1min);
8031   bool maxeq = vrp_operand_equal_p (*vr0max, vr1max);
8032 
8033   /* [] is vr0, () is vr1 in the following classification comments.  */
8034   if (mineq && maxeq)
8035     {
8036       /* [(  )] */
8037       if (*vr0type == vr1type)
8038 	/* Nothing to do for equal ranges.  */
8039 	;
8040       else if ((*vr0type == VR_RANGE
8041 		&& vr1type == VR_ANTI_RANGE)
8042 	       || (*vr0type == VR_ANTI_RANGE
8043 		   && vr1type == VR_RANGE))
8044 	{
8045 	  /* For anti-range with range union the result is varying.  */
8046 	  goto give_up;
8047 	}
8048       else
8049 	gcc_unreachable ();
8050     }
8051   else if (operand_less_p (*vr0max, vr1min) == 1
8052 	   || operand_less_p (vr1max, *vr0min) == 1)
8053     {
8054       /* [ ] ( ) or ( ) [ ]
8055 	 If the ranges have an empty intersection, result of the union
8056 	 operation is the anti-range or if both are anti-ranges
8057 	 it covers all.  */
8058       if (*vr0type == VR_ANTI_RANGE
8059 	  && vr1type == VR_ANTI_RANGE)
8060 	goto give_up;
8061       else if (*vr0type == VR_ANTI_RANGE
8062 	       && vr1type == VR_RANGE)
8063 	;
8064       else if (*vr0type == VR_RANGE
8065 	       && vr1type == VR_ANTI_RANGE)
8066 	{
8067 	  *vr0type = vr1type;
8068 	  *vr0min = vr1min;
8069 	  *vr0max = vr1max;
8070 	}
8071       else if (*vr0type == VR_RANGE
8072 	       && vr1type == VR_RANGE)
8073 	{
8074 	  /* The result is the convex hull of both ranges.  */
8075 	  if (operand_less_p (*vr0max, vr1min) == 1)
8076 	    {
8077 	      /* If the result can be an anti-range, create one.  */
8078 	      if (TREE_CODE (*vr0max) == INTEGER_CST
8079 		  && TREE_CODE (vr1min) == INTEGER_CST
8080 		  && vrp_val_is_min (*vr0min)
8081 		  && vrp_val_is_max (vr1max))
8082 		{
8083 		  tree min = int_const_binop (PLUS_EXPR,
8084 					      *vr0max,
8085 					      build_int_cst (TREE_TYPE (*vr0max), 1));
8086 		  tree max = int_const_binop (MINUS_EXPR,
8087 					      vr1min,
8088 					      build_int_cst (TREE_TYPE (vr1min), 1));
8089 		  if (!operand_less_p (max, min))
8090 		    {
8091 		      *vr0type = VR_ANTI_RANGE;
8092 		      *vr0min = min;
8093 		      *vr0max = max;
8094 		    }
8095 		  else
8096 		    *vr0max = vr1max;
8097 		}
8098 	      else
8099 		*vr0max = vr1max;
8100 	    }
8101 	  else
8102 	    {
8103 	      /* If the result can be an anti-range, create one.  */
8104 	      if (TREE_CODE (vr1max) == INTEGER_CST
8105 		  && TREE_CODE (*vr0min) == INTEGER_CST
8106 		  && vrp_val_is_min (vr1min)
8107 		  && vrp_val_is_max (*vr0max))
8108 		{
8109 		  tree min = int_const_binop (PLUS_EXPR,
8110 					      vr1max,
8111 					      build_int_cst (TREE_TYPE (vr1max), 1));
8112 		  tree max = int_const_binop (MINUS_EXPR,
8113 					      *vr0min,
8114 					      build_int_cst (TREE_TYPE (*vr0min), 1));
8115 		  if (!operand_less_p (max, min))
8116 		    {
8117 		      *vr0type = VR_ANTI_RANGE;
8118 		      *vr0min = min;
8119 		      *vr0max = max;
8120 		    }
8121 		  else
8122 		    *vr0min = vr1min;
8123 		}
8124 	      else
8125 		*vr0min = vr1min;
8126 	    }
8127 	}
8128       else
8129 	gcc_unreachable ();
8130     }
8131   else if ((maxeq || operand_less_p (vr1max, *vr0max) == 1)
8132 	   && (mineq || operand_less_p (*vr0min, vr1min) == 1))
8133     {
8134       /* [ (  ) ] or [(  ) ] or [ (  )] */
8135       if (*vr0type == VR_RANGE
8136 	  && vr1type == VR_RANGE)
8137 	;
8138       else if (*vr0type == VR_ANTI_RANGE
8139 	       && vr1type == VR_ANTI_RANGE)
8140 	{
8141 	  *vr0type = vr1type;
8142 	  *vr0min = vr1min;
8143 	  *vr0max = vr1max;
8144 	}
8145       else if (*vr0type == VR_ANTI_RANGE
8146 	       && vr1type == VR_RANGE)
8147 	{
8148 	  /* Arbitrarily choose the right or left gap.  */
8149 	  if (!mineq && TREE_CODE (vr1min) == INTEGER_CST)
8150 	    *vr0max = int_const_binop (MINUS_EXPR, vr1min,
8151 				       build_int_cst (TREE_TYPE (vr1min), 1));
8152 	  else if (!maxeq && TREE_CODE (vr1max) == INTEGER_CST)
8153 	    *vr0min = int_const_binop (PLUS_EXPR, vr1max,
8154 				       build_int_cst (TREE_TYPE (vr1max), 1));
8155 	  else
8156 	    goto give_up;
8157 	}
8158       else if (*vr0type == VR_RANGE
8159 	       && vr1type == VR_ANTI_RANGE)
8160 	/* The result covers everything.  */
8161 	goto give_up;
8162       else
8163 	gcc_unreachable ();
8164     }
8165   else if ((maxeq || operand_less_p (*vr0max, vr1max) == 1)
8166 	   && (mineq || operand_less_p (vr1min, *vr0min) == 1))
8167     {
8168       /* ( [  ] ) or ([  ] ) or ( [  ]) */
8169       if (*vr0type == VR_RANGE
8170 	  && vr1type == VR_RANGE)
8171 	{
8172 	  *vr0type = vr1type;
8173 	  *vr0min = vr1min;
8174 	  *vr0max = vr1max;
8175 	}
8176       else if (*vr0type == VR_ANTI_RANGE
8177 	       && vr1type == VR_ANTI_RANGE)
8178 	;
8179       else if (*vr0type == VR_RANGE
8180 	       && vr1type == VR_ANTI_RANGE)
8181 	{
8182 	  *vr0type = VR_ANTI_RANGE;
8183 	  if (!mineq && TREE_CODE (*vr0min) == INTEGER_CST)
8184 	    {
8185 	      *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
8186 					 build_int_cst (TREE_TYPE (*vr0min), 1));
8187 	      *vr0min = vr1min;
8188 	    }
8189 	  else if (!maxeq && TREE_CODE (*vr0max) == INTEGER_CST)
8190 	    {
8191 	      *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
8192 					 build_int_cst (TREE_TYPE (*vr0max), 1));
8193 	      *vr0max = vr1max;
8194 	    }
8195 	  else
8196 	    goto give_up;
8197 	}
8198       else if (*vr0type == VR_ANTI_RANGE
8199 	       && vr1type == VR_RANGE)
8200 	/* The result covers everything.  */
8201 	goto give_up;
8202       else
8203 	gcc_unreachable ();
8204     }
8205   else if ((operand_less_p (vr1min, *vr0max) == 1
8206 	    || operand_equal_p (vr1min, *vr0max, 0))
8207 	   && operand_less_p (*vr0min, vr1min) == 1
8208 	   && operand_less_p (*vr0max, vr1max) == 1)
8209     {
8210       /* [  (  ]  ) or [   ](   ) */
8211       if (*vr0type == VR_RANGE
8212 	  && vr1type == VR_RANGE)
8213 	*vr0max = vr1max;
8214       else if (*vr0type == VR_ANTI_RANGE
8215 	       && vr1type == VR_ANTI_RANGE)
8216 	*vr0min = vr1min;
8217       else if (*vr0type == VR_ANTI_RANGE
8218 	       && vr1type == VR_RANGE)
8219 	{
8220 	  if (TREE_CODE (vr1min) == INTEGER_CST)
8221 	    *vr0max = int_const_binop (MINUS_EXPR, vr1min,
8222 				       build_int_cst (TREE_TYPE (vr1min), 1));
8223 	  else
8224 	    goto give_up;
8225 	}
8226       else if (*vr0type == VR_RANGE
8227 	       && vr1type == VR_ANTI_RANGE)
8228 	{
8229 	  if (TREE_CODE (*vr0max) == INTEGER_CST)
8230 	    {
8231 	      *vr0type = vr1type;
8232 	      *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
8233 					 build_int_cst (TREE_TYPE (*vr0max), 1));
8234 	      *vr0max = vr1max;
8235 	    }
8236 	  else
8237 	    goto give_up;
8238 	}
8239       else
8240 	gcc_unreachable ();
8241     }
8242   else if ((operand_less_p (*vr0min, vr1max) == 1
8243 	    || operand_equal_p (*vr0min, vr1max, 0))
8244 	   && operand_less_p (vr1min, *vr0min) == 1
8245 	   && operand_less_p (vr1max, *vr0max) == 1)
8246     {
8247       /* (  [  )  ] or (   )[   ] */
8248       if (*vr0type == VR_RANGE
8249 	  && vr1type == VR_RANGE)
8250 	*vr0min = vr1min;
8251       else if (*vr0type == VR_ANTI_RANGE
8252 	       && vr1type == VR_ANTI_RANGE)
8253 	*vr0max = vr1max;
8254       else if (*vr0type == VR_ANTI_RANGE
8255 	       && vr1type == VR_RANGE)
8256 	{
8257 	  if (TREE_CODE (vr1max) == INTEGER_CST)
8258 	    *vr0min = int_const_binop (PLUS_EXPR, vr1max,
8259 				       build_int_cst (TREE_TYPE (vr1max), 1));
8260 	  else
8261 	    goto give_up;
8262 	}
8263       else if (*vr0type == VR_RANGE
8264 	       && vr1type == VR_ANTI_RANGE)
8265 	{
8266 	  if (TREE_CODE (*vr0min) == INTEGER_CST)
8267 	    {
8268 	      *vr0type = vr1type;
8269 	      *vr0min = vr1min;
8270 	      *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
8271 					 build_int_cst (TREE_TYPE (*vr0min), 1));
8272 	    }
8273 	  else
8274 	    goto give_up;
8275 	}
8276       else
8277 	gcc_unreachable ();
8278     }
8279   else
8280     goto give_up;
8281 
8282   return;
8283 
8284 give_up:
8285   *vr0type = VR_VARYING;
8286   *vr0min = NULL_TREE;
8287   *vr0max = NULL_TREE;
8288 }
8289 
8290 /* Intersect the two value-ranges { *VR0TYPE, *VR0MIN, *VR0MAX } and
8291    { VR1TYPE, VR0MIN, VR0MAX } and store the result
8292    in { *VR0TYPE, *VR0MIN, *VR0MAX }.  This may not be the smallest
8293    possible such range.  The resulting range is not canonicalized.  */
8294 
8295 static void
intersect_ranges(enum value_range_type * vr0type,tree * vr0min,tree * vr0max,enum value_range_type vr1type,tree vr1min,tree vr1max)8296 intersect_ranges (enum value_range_type *vr0type,
8297 		  tree *vr0min, tree *vr0max,
8298 		  enum value_range_type vr1type,
8299 		  tree vr1min, tree vr1max)
8300 {
8301   bool mineq = vrp_operand_equal_p (*vr0min, vr1min);
8302   bool maxeq = vrp_operand_equal_p (*vr0max, vr1max);
8303 
8304   /* [] is vr0, () is vr1 in the following classification comments.  */
8305   if (mineq && maxeq)
8306     {
8307       /* [(  )] */
8308       if (*vr0type == vr1type)
8309 	/* Nothing to do for equal ranges.  */
8310 	;
8311       else if ((*vr0type == VR_RANGE
8312 		&& vr1type == VR_ANTI_RANGE)
8313 	       || (*vr0type == VR_ANTI_RANGE
8314 		   && vr1type == VR_RANGE))
8315 	{
8316 	  /* For anti-range with range intersection the result is empty.  */
8317 	  *vr0type = VR_UNDEFINED;
8318 	  *vr0min = NULL_TREE;
8319 	  *vr0max = NULL_TREE;
8320 	}
8321       else
8322 	gcc_unreachable ();
8323     }
8324   else if (operand_less_p (*vr0max, vr1min) == 1
8325 	   || operand_less_p (vr1max, *vr0min) == 1)
8326     {
8327       /* [ ] ( ) or ( ) [ ]
8328 	 If the ranges have an empty intersection, the result of the
8329 	 intersect operation is the range for intersecting an
8330 	 anti-range with a range or empty when intersecting two ranges.  */
8331       if (*vr0type == VR_RANGE
8332 	  && vr1type == VR_ANTI_RANGE)
8333 	;
8334       else if (*vr0type == VR_ANTI_RANGE
8335 	       && vr1type == VR_RANGE)
8336 	{
8337 	  *vr0type = vr1type;
8338 	  *vr0min = vr1min;
8339 	  *vr0max = vr1max;
8340 	}
8341       else if (*vr0type == VR_RANGE
8342 	       && vr1type == VR_RANGE)
8343 	{
8344 	  *vr0type = VR_UNDEFINED;
8345 	  *vr0min = NULL_TREE;
8346 	  *vr0max = NULL_TREE;
8347 	}
8348       else if (*vr0type == VR_ANTI_RANGE
8349 	       && vr1type == VR_ANTI_RANGE)
8350 	{
8351 	  /* If the anti-ranges are adjacent to each other merge them.  */
8352 	  if (TREE_CODE (*vr0max) == INTEGER_CST
8353 	      && TREE_CODE (vr1min) == INTEGER_CST
8354 	      && operand_less_p (*vr0max, vr1min) == 1
8355 	      && integer_onep (int_const_binop (MINUS_EXPR,
8356 						vr1min, *vr0max)))
8357 	    *vr0max = vr1max;
8358 	  else if (TREE_CODE (vr1max) == INTEGER_CST
8359 		   && TREE_CODE (*vr0min) == INTEGER_CST
8360 		   && operand_less_p (vr1max, *vr0min) == 1
8361 		   && integer_onep (int_const_binop (MINUS_EXPR,
8362 						     *vr0min, vr1max)))
8363 	    *vr0min = vr1min;
8364 	  /* Else arbitrarily take VR0.  */
8365 	}
8366     }
8367   else if ((maxeq || operand_less_p (vr1max, *vr0max) == 1)
8368 	   && (mineq || operand_less_p (*vr0min, vr1min) == 1))
8369     {
8370       /* [ (  ) ] or [(  ) ] or [ (  )] */
8371       if (*vr0type == VR_RANGE
8372 	  && vr1type == VR_RANGE)
8373 	{
8374 	  /* If both are ranges the result is the inner one.  */
8375 	  *vr0type = vr1type;
8376 	  *vr0min = vr1min;
8377 	  *vr0max = vr1max;
8378 	}
8379       else if (*vr0type == VR_RANGE
8380 	       && vr1type == VR_ANTI_RANGE)
8381 	{
8382 	  /* Choose the right gap if the left one is empty.  */
8383 	  if (mineq)
8384 	    {
8385 	      if (TREE_CODE (vr1max) == INTEGER_CST)
8386 		*vr0min = int_const_binop (PLUS_EXPR, vr1max,
8387 					   build_int_cst (TREE_TYPE (vr1max), 1));
8388 	      else
8389 		*vr0min = vr1max;
8390 	    }
8391 	  /* Choose the left gap if the right one is empty.  */
8392 	  else if (maxeq)
8393 	    {
8394 	      if (TREE_CODE (vr1min) == INTEGER_CST)
8395 		*vr0max = int_const_binop (MINUS_EXPR, vr1min,
8396 					   build_int_cst (TREE_TYPE (vr1min), 1));
8397 	      else
8398 		*vr0max = vr1min;
8399 	    }
8400 	  /* Choose the anti-range if the range is effectively varying.  */
8401 	  else if (vrp_val_is_min (*vr0min)
8402 		   && vrp_val_is_max (*vr0max))
8403 	    {
8404 	      *vr0type = vr1type;
8405 	      *vr0min = vr1min;
8406 	      *vr0max = vr1max;
8407 	    }
8408 	  /* Else choose the range.  */
8409 	}
8410       else if (*vr0type == VR_ANTI_RANGE
8411 	       && vr1type == VR_ANTI_RANGE)
8412 	/* If both are anti-ranges the result is the outer one.  */
8413 	;
8414       else if (*vr0type == VR_ANTI_RANGE
8415 	       && vr1type == VR_RANGE)
8416 	{
8417 	  /* The intersection is empty.  */
8418 	  *vr0type = VR_UNDEFINED;
8419 	  *vr0min = NULL_TREE;
8420 	  *vr0max = NULL_TREE;
8421 	}
8422       else
8423 	gcc_unreachable ();
8424     }
8425   else if ((maxeq || operand_less_p (*vr0max, vr1max) == 1)
8426 	   && (mineq || operand_less_p (vr1min, *vr0min) == 1))
8427     {
8428       /* ( [  ] ) or ([  ] ) or ( [  ]) */
8429       if (*vr0type == VR_RANGE
8430 	  && vr1type == VR_RANGE)
8431 	/* Choose the inner range.  */
8432 	;
8433       else if (*vr0type == VR_ANTI_RANGE
8434 	       && vr1type == VR_RANGE)
8435 	{
8436 	  /* Choose the right gap if the left is empty.  */
8437 	  if (mineq)
8438 	    {
8439 	      *vr0type = VR_RANGE;
8440 	      if (TREE_CODE (*vr0max) == INTEGER_CST)
8441 		*vr0min = int_const_binop (PLUS_EXPR, *vr0max,
8442 					   build_int_cst (TREE_TYPE (*vr0max), 1));
8443 	      else
8444 		*vr0min = *vr0max;
8445 	      *vr0max = vr1max;
8446 	    }
8447 	  /* Choose the left gap if the right is empty.  */
8448 	  else if (maxeq)
8449 	    {
8450 	      *vr0type = VR_RANGE;
8451 	      if (TREE_CODE (*vr0min) == INTEGER_CST)
8452 		*vr0max = int_const_binop (MINUS_EXPR, *vr0min,
8453 					   build_int_cst (TREE_TYPE (*vr0min), 1));
8454 	      else
8455 		*vr0max = *vr0min;
8456 	      *vr0min = vr1min;
8457 	    }
8458 	  /* Choose the anti-range if the range is effectively varying.  */
8459 	  else if (vrp_val_is_min (vr1min)
8460 		   && vrp_val_is_max (vr1max))
8461 	    ;
8462 	  /* Else choose the range.  */
8463 	  else
8464 	    {
8465 	      *vr0type = vr1type;
8466 	      *vr0min = vr1min;
8467 	      *vr0max = vr1max;
8468 	    }
8469 	}
8470       else if (*vr0type == VR_ANTI_RANGE
8471 	       && vr1type == VR_ANTI_RANGE)
8472 	{
8473 	  /* If both are anti-ranges the result is the outer one.  */
8474 	  *vr0type = vr1type;
8475 	  *vr0min = vr1min;
8476 	  *vr0max = vr1max;
8477 	}
8478       else if (vr1type == VR_ANTI_RANGE
8479 	       && *vr0type == VR_RANGE)
8480 	{
8481 	  /* The intersection is empty.  */
8482 	  *vr0type = VR_UNDEFINED;
8483 	  *vr0min = NULL_TREE;
8484 	  *vr0max = NULL_TREE;
8485 	}
8486       else
8487 	gcc_unreachable ();
8488     }
8489   else if ((operand_less_p (vr1min, *vr0max) == 1
8490 	    || operand_equal_p (vr1min, *vr0max, 0))
8491 	   && operand_less_p (*vr0min, vr1min) == 1)
8492     {
8493       /* [  (  ]  ) or [  ](  ) */
8494       if (*vr0type == VR_ANTI_RANGE
8495 	  && vr1type == VR_ANTI_RANGE)
8496 	*vr0max = vr1max;
8497       else if (*vr0type == VR_RANGE
8498 	       && vr1type == VR_RANGE)
8499 	*vr0min = vr1min;
8500       else if (*vr0type == VR_RANGE
8501 	       && vr1type == VR_ANTI_RANGE)
8502 	{
8503 	  if (TREE_CODE (vr1min) == INTEGER_CST)
8504 	    *vr0max = int_const_binop (MINUS_EXPR, vr1min,
8505 				       build_int_cst (TREE_TYPE (vr1min), 1));
8506 	  else
8507 	    *vr0max = vr1min;
8508 	}
8509       else if (*vr0type == VR_ANTI_RANGE
8510 	       && vr1type == VR_RANGE)
8511 	{
8512 	  *vr0type = VR_RANGE;
8513 	  if (TREE_CODE (*vr0max) == INTEGER_CST)
8514 	    *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
8515 				       build_int_cst (TREE_TYPE (*vr0max), 1));
8516 	  else
8517 	    *vr0min = *vr0max;
8518 	  *vr0max = vr1max;
8519 	}
8520       else
8521 	gcc_unreachable ();
8522     }
8523   else if ((operand_less_p (*vr0min, vr1max) == 1
8524 	    || operand_equal_p (*vr0min, vr1max, 0))
8525 	   && operand_less_p (vr1min, *vr0min) == 1)
8526     {
8527       /* (  [  )  ] or (  )[  ] */
8528       if (*vr0type == VR_ANTI_RANGE
8529 	  && vr1type == VR_ANTI_RANGE)
8530 	*vr0min = vr1min;
8531       else if (*vr0type == VR_RANGE
8532 	       && vr1type == VR_RANGE)
8533 	*vr0max = vr1max;
8534       else if (*vr0type == VR_RANGE
8535 	       && vr1type == VR_ANTI_RANGE)
8536 	{
8537 	  if (TREE_CODE (vr1max) == INTEGER_CST)
8538 	    *vr0min = int_const_binop (PLUS_EXPR, vr1max,
8539 				       build_int_cst (TREE_TYPE (vr1max), 1));
8540 	  else
8541 	    *vr0min = vr1max;
8542 	}
8543       else if (*vr0type == VR_ANTI_RANGE
8544 	       && vr1type == VR_RANGE)
8545 	{
8546 	  *vr0type = VR_RANGE;
8547 	  if (TREE_CODE (*vr0min) == INTEGER_CST)
8548 	    *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
8549 				       build_int_cst (TREE_TYPE (*vr0min), 1));
8550 	  else
8551 	    *vr0max = *vr0min;
8552 	  *vr0min = vr1min;
8553 	}
8554       else
8555 	gcc_unreachable ();
8556     }
8557 
8558   /* As a fallback simply use { *VRTYPE, *VR0MIN, *VR0MAX } as
8559      result for the intersection.  That's always a conservative
8560      correct estimate.  */
8561 
8562   return;
8563 }
8564 
8565 
8566 /* Intersect the two value-ranges *VR0 and *VR1 and store the result
8567    in *VR0.  This may not be the smallest possible such range.  */
8568 
8569 static void
vrp_intersect_ranges_1(value_range * vr0,value_range * vr1)8570 vrp_intersect_ranges_1 (value_range *vr0, value_range *vr1)
8571 {
8572   value_range saved;
8573 
8574   /* If either range is VR_VARYING the other one wins.  */
8575   if (vr1->type == VR_VARYING)
8576     return;
8577   if (vr0->type == VR_VARYING)
8578     {
8579       copy_value_range (vr0, vr1);
8580       return;
8581     }
8582 
8583   /* When either range is VR_UNDEFINED the resulting range is
8584      VR_UNDEFINED, too.  */
8585   if (vr0->type == VR_UNDEFINED)
8586     return;
8587   if (vr1->type == VR_UNDEFINED)
8588     {
8589       set_value_range_to_undefined (vr0);
8590       return;
8591     }
8592 
8593   /* Save the original vr0 so we can return it as conservative intersection
8594      result when our worker turns things to varying.  */
8595   saved = *vr0;
8596   intersect_ranges (&vr0->type, &vr0->min, &vr0->max,
8597 		    vr1->type, vr1->min, vr1->max);
8598   /* Make sure to canonicalize the result though as the inversion of a
8599      VR_RANGE can still be a VR_RANGE.  */
8600   set_and_canonicalize_value_range (vr0, vr0->type,
8601 				    vr0->min, vr0->max, vr0->equiv);
8602   /* If that failed, use the saved original VR0.  */
8603   if (vr0->type == VR_VARYING)
8604     {
8605       *vr0 = saved;
8606       return;
8607     }
8608   /* If the result is VR_UNDEFINED there is no need to mess with
8609      the equivalencies.  */
8610   if (vr0->type == VR_UNDEFINED)
8611     return;
8612 
8613   /* The resulting set of equivalences for range intersection is the union of
8614      the two sets.  */
8615   if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
8616     bitmap_ior_into (vr0->equiv, vr1->equiv);
8617   else if (vr1->equiv && !vr0->equiv)
8618     {
8619       vr0->equiv = BITMAP_ALLOC (NULL);
8620       bitmap_copy (vr0->equiv, vr1->equiv);
8621     }
8622 }
8623 
8624 static void
vrp_intersect_ranges(value_range * vr0,value_range * vr1)8625 vrp_intersect_ranges (value_range *vr0, value_range *vr1)
8626 {
8627   if (dump_file && (dump_flags & TDF_DETAILS))
8628     {
8629       fprintf (dump_file, "Intersecting\n  ");
8630       dump_value_range (dump_file, vr0);
8631       fprintf (dump_file, "\nand\n  ");
8632       dump_value_range (dump_file, vr1);
8633       fprintf (dump_file, "\n");
8634     }
8635   vrp_intersect_ranges_1 (vr0, vr1);
8636   if (dump_file && (dump_flags & TDF_DETAILS))
8637     {
8638       fprintf (dump_file, "to\n  ");
8639       dump_value_range (dump_file, vr0);
8640       fprintf (dump_file, "\n");
8641     }
8642 }
8643 
8644 /* Meet operation for value ranges.  Given two value ranges VR0 and
8645    VR1, store in VR0 a range that contains both VR0 and VR1.  This
8646    may not be the smallest possible such range.  */
8647 
8648 static void
vrp_meet_1(value_range * vr0,value_range * vr1)8649 vrp_meet_1 (value_range *vr0, value_range *vr1)
8650 {
8651   value_range saved;
8652 
8653   if (vr0->type == VR_UNDEFINED)
8654     {
8655       set_value_range (vr0, vr1->type, vr1->min, vr1->max, vr1->equiv);
8656       return;
8657     }
8658 
8659   if (vr1->type == VR_UNDEFINED)
8660     {
8661       /* VR0 already has the resulting range.  */
8662       return;
8663     }
8664 
8665   if (vr0->type == VR_VARYING)
8666     {
8667       /* Nothing to do.  VR0 already has the resulting range.  */
8668       return;
8669     }
8670 
8671   if (vr1->type == VR_VARYING)
8672     {
8673       set_value_range_to_varying (vr0);
8674       return;
8675     }
8676 
8677   saved = *vr0;
8678   union_ranges (&vr0->type, &vr0->min, &vr0->max,
8679 		vr1->type, vr1->min, vr1->max);
8680   if (vr0->type == VR_VARYING)
8681     {
8682       /* Failed to find an efficient meet.  Before giving up and setting
8683 	 the result to VARYING, see if we can at least derive a useful
8684 	 anti-range.  FIXME, all this nonsense about distinguishing
8685 	 anti-ranges from ranges is necessary because of the odd
8686 	 semantics of range_includes_zero_p and friends.  */
8687       if (((saved.type == VR_RANGE
8688 	    && range_includes_zero_p (saved.min, saved.max) == 0)
8689 	   || (saved.type == VR_ANTI_RANGE
8690 	       && range_includes_zero_p (saved.min, saved.max) == 1))
8691 	  && ((vr1->type == VR_RANGE
8692 	       && range_includes_zero_p (vr1->min, vr1->max) == 0)
8693 	      || (vr1->type == VR_ANTI_RANGE
8694 		  && range_includes_zero_p (vr1->min, vr1->max) == 1)))
8695 	{
8696 	  set_value_range_to_nonnull (vr0, TREE_TYPE (saved.min));
8697 
8698 	  /* Since this meet operation did not result from the meeting of
8699 	     two equivalent names, VR0 cannot have any equivalences.  */
8700 	  if (vr0->equiv)
8701 	    bitmap_clear (vr0->equiv);
8702 	  return;
8703 	}
8704 
8705       set_value_range_to_varying (vr0);
8706       return;
8707     }
8708   set_and_canonicalize_value_range (vr0, vr0->type, vr0->min, vr0->max,
8709 				    vr0->equiv);
8710   if (vr0->type == VR_VARYING)
8711     return;
8712 
8713   /* The resulting set of equivalences is always the intersection of
8714      the two sets.  */
8715   if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
8716     bitmap_and_into (vr0->equiv, vr1->equiv);
8717   else if (vr0->equiv && !vr1->equiv)
8718     bitmap_clear (vr0->equiv);
8719 }
8720 
8721 static void
vrp_meet(value_range * vr0,value_range * vr1)8722 vrp_meet (value_range *vr0, value_range *vr1)
8723 {
8724   if (dump_file && (dump_flags & TDF_DETAILS))
8725     {
8726       fprintf (dump_file, "Meeting\n  ");
8727       dump_value_range (dump_file, vr0);
8728       fprintf (dump_file, "\nand\n  ");
8729       dump_value_range (dump_file, vr1);
8730       fprintf (dump_file, "\n");
8731     }
8732   vrp_meet_1 (vr0, vr1);
8733   if (dump_file && (dump_flags & TDF_DETAILS))
8734     {
8735       fprintf (dump_file, "to\n  ");
8736       dump_value_range (dump_file, vr0);
8737       fprintf (dump_file, "\n");
8738     }
8739 }
8740 
8741 
8742 /* Visit all arguments for PHI node PHI that flow through executable
8743    edges.  If a valid value range can be derived from all the incoming
8744    value ranges, set a new range for the LHS of PHI.  */
8745 
8746 static enum ssa_prop_result
vrp_visit_phi_node(gphi * phi)8747 vrp_visit_phi_node (gphi *phi)
8748 {
8749   size_t i;
8750   tree lhs = PHI_RESULT (phi);
8751   value_range *lhs_vr = get_value_range (lhs);
8752   value_range vr_result = VR_INITIALIZER;
8753   bool first = true;
8754   int edges, old_edges;
8755   struct loop *l;
8756 
8757   if (dump_file && (dump_flags & TDF_DETAILS))
8758     {
8759       fprintf (dump_file, "\nVisiting PHI node: ");
8760       print_gimple_stmt (dump_file, phi, 0, dump_flags);
8761     }
8762 
8763   edges = 0;
8764   for (i = 0; i < gimple_phi_num_args (phi); i++)
8765     {
8766       edge e = gimple_phi_arg_edge (phi, i);
8767 
8768       if (dump_file && (dump_flags & TDF_DETAILS))
8769 	{
8770 	  fprintf (dump_file,
8771 	      "    Argument #%d (%d -> %d %sexecutable)\n",
8772 	      (int) i, e->src->index, e->dest->index,
8773 	      (e->flags & EDGE_EXECUTABLE) ? "" : "not ");
8774 	}
8775 
8776       if (e->flags & EDGE_EXECUTABLE)
8777 	{
8778 	  tree arg = PHI_ARG_DEF (phi, i);
8779 	  value_range vr_arg;
8780 
8781 	  ++edges;
8782 
8783 	  if (TREE_CODE (arg) == SSA_NAME)
8784 	    {
8785 	      vr_arg = *(get_value_range (arg));
8786 	      /* Do not allow equivalences or symbolic ranges to leak in from
8787 		 backedges.  That creates invalid equivalencies.
8788 		 See PR53465 and PR54767.  */
8789 	      if (e->flags & EDGE_DFS_BACK)
8790 		{
8791 		  if (vr_arg.type == VR_RANGE
8792 		      || vr_arg.type == VR_ANTI_RANGE)
8793 		    {
8794 		      vr_arg.equiv = NULL;
8795 		      if (symbolic_range_p (&vr_arg))
8796 			{
8797 			  vr_arg.type = VR_VARYING;
8798 			  vr_arg.min = NULL_TREE;
8799 			  vr_arg.max = NULL_TREE;
8800 			}
8801 		    }
8802 		}
8803 	      else
8804 		{
8805 		  /* If the non-backedge arguments range is VR_VARYING then
8806 		     we can still try recording a simple equivalence.  */
8807 		  if (vr_arg.type == VR_VARYING)
8808 		    {
8809 		      vr_arg.type = VR_RANGE;
8810 		      vr_arg.min = arg;
8811 		      vr_arg.max = arg;
8812 		      vr_arg.equiv = NULL;
8813 		    }
8814 		}
8815 	    }
8816 	  else
8817 	    {
8818 	      if (TREE_OVERFLOW_P (arg))
8819 		arg = drop_tree_overflow (arg);
8820 
8821 	      vr_arg.type = VR_RANGE;
8822 	      vr_arg.min = arg;
8823 	      vr_arg.max = arg;
8824 	      vr_arg.equiv = NULL;
8825 	    }
8826 
8827 	  if (dump_file && (dump_flags & TDF_DETAILS))
8828 	    {
8829 	      fprintf (dump_file, "\t");
8830 	      print_generic_expr (dump_file, arg, dump_flags);
8831 	      fprintf (dump_file, ": ");
8832 	      dump_value_range (dump_file, &vr_arg);
8833 	      fprintf (dump_file, "\n");
8834 	    }
8835 
8836 	  if (first)
8837 	    copy_value_range (&vr_result, &vr_arg);
8838 	  else
8839 	    vrp_meet (&vr_result, &vr_arg);
8840 	  first = false;
8841 
8842 	  if (vr_result.type == VR_VARYING)
8843 	    break;
8844 	}
8845     }
8846 
8847   if (vr_result.type == VR_VARYING)
8848     goto varying;
8849   else if (vr_result.type == VR_UNDEFINED)
8850     goto update_range;
8851 
8852   old_edges = vr_phi_edge_counts[SSA_NAME_VERSION (lhs)];
8853   vr_phi_edge_counts[SSA_NAME_VERSION (lhs)] = edges;
8854 
8855   /* To prevent infinite iterations in the algorithm, derive ranges
8856      when the new value is slightly bigger or smaller than the
8857      previous one.  We don't do this if we have seen a new executable
8858      edge; this helps us avoid an overflow infinity for conditionals
8859      which are not in a loop.  If the old value-range was VR_UNDEFINED
8860      use the updated range and iterate one more time.  */
8861   if (edges > 0
8862       && gimple_phi_num_args (phi) > 1
8863       && edges == old_edges
8864       && lhs_vr->type != VR_UNDEFINED)
8865     {
8866       /* Compare old and new ranges, fall back to varying if the
8867          values are not comparable.  */
8868       int cmp_min = compare_values (lhs_vr->min, vr_result.min);
8869       if (cmp_min == -2)
8870 	goto varying;
8871       int cmp_max = compare_values (lhs_vr->max, vr_result.max);
8872       if (cmp_max == -2)
8873 	goto varying;
8874 
8875       /* For non VR_RANGE or for pointers fall back to varying if
8876 	 the range changed.  */
8877       if ((lhs_vr->type != VR_RANGE || vr_result.type != VR_RANGE
8878 	   || POINTER_TYPE_P (TREE_TYPE (lhs)))
8879 	  && (cmp_min != 0 || cmp_max != 0))
8880 	goto varying;
8881 
8882       /* If the new minimum is larger than the previous one
8883 	 retain the old value.  If the new minimum value is smaller
8884 	 than the previous one and not -INF go all the way to -INF + 1.
8885 	 In the first case, to avoid infinite bouncing between different
8886 	 minimums, and in the other case to avoid iterating millions of
8887 	 times to reach -INF.  Going to -INF + 1 also lets the following
8888 	 iteration compute whether there will be any overflow, at the
8889 	 expense of one additional iteration.  */
8890       if (cmp_min < 0)
8891 	vr_result.min = lhs_vr->min;
8892       else if (cmp_min > 0
8893 	       && !vrp_val_is_min (vr_result.min))
8894 	vr_result.min
8895 	  = int_const_binop (PLUS_EXPR,
8896 			     vrp_val_min (TREE_TYPE (vr_result.min)),
8897 			     build_int_cst (TREE_TYPE (vr_result.min), 1));
8898 
8899       /* Similarly for the maximum value.  */
8900       if (cmp_max > 0)
8901 	vr_result.max = lhs_vr->max;
8902       else if (cmp_max < 0
8903 	       && !vrp_val_is_max (vr_result.max))
8904 	vr_result.max
8905 	  = int_const_binop (MINUS_EXPR,
8906 			     vrp_val_max (TREE_TYPE (vr_result.min)),
8907 			     build_int_cst (TREE_TYPE (vr_result.min), 1));
8908 
8909       /* If we dropped either bound to +-INF then if this is a loop
8910 	 PHI node SCEV may known more about its value-range.  */
8911       if (cmp_min > 0 || cmp_min < 0
8912 	   || cmp_max < 0 || cmp_max > 0)
8913 	goto scev_check;
8914 
8915       goto infinite_check;
8916     }
8917 
8918   /* If the new range is different than the previous value, keep
8919      iterating.  */
8920 update_range:
8921   if (update_value_range (lhs, &vr_result))
8922     {
8923       if (dump_file && (dump_flags & TDF_DETAILS))
8924 	{
8925 	  fprintf (dump_file, "Found new range for ");
8926 	  print_generic_expr (dump_file, lhs, 0);
8927 	  fprintf (dump_file, ": ");
8928 	  dump_value_range (dump_file, &vr_result);
8929 	  fprintf (dump_file, "\n");
8930 	}
8931 
8932       if (vr_result.type == VR_VARYING)
8933 	return SSA_PROP_VARYING;
8934 
8935       return SSA_PROP_INTERESTING;
8936     }
8937 
8938   /* Nothing changed, don't add outgoing edges.  */
8939   return SSA_PROP_NOT_INTERESTING;
8940 
8941 varying:
8942   set_value_range_to_varying (&vr_result);
8943 
8944 scev_check:
8945   /* If this is a loop PHI node SCEV may known more about its value-range.
8946      scev_check can be reached from two paths, one is a fall through from above
8947      "varying" label, the other is direct goto from code block which tries to
8948      avoid infinite simulation.  */
8949   if ((l = loop_containing_stmt (phi))
8950       && l->header == gimple_bb (phi))
8951     adjust_range_with_scev (&vr_result, l, phi, lhs);
8952 
8953 infinite_check:
8954   /* If we will end up with a (-INF, +INF) range, set it to
8955      VARYING.  Same if the previous max value was invalid for
8956      the type and we end up with vr_result.min > vr_result.max.  */
8957   if ((vr_result.type == VR_RANGE || vr_result.type == VR_ANTI_RANGE)
8958       && !((vrp_val_is_max (vr_result.max) && vrp_val_is_min (vr_result.min))
8959 	   || compare_values (vr_result.min, vr_result.max) > 0))
8960     goto update_range;
8961 
8962   /* No match found.  Set the LHS to VARYING.  */
8963   set_value_range_to_varying (lhs_vr);
8964   return SSA_PROP_VARYING;
8965 }
8966 
8967 /* Simplify boolean operations if the source is known
8968    to be already a boolean.  */
8969 static bool
simplify_truth_ops_using_ranges(gimple_stmt_iterator * gsi,gimple * stmt)8970 simplify_truth_ops_using_ranges (gimple_stmt_iterator *gsi, gimple *stmt)
8971 {
8972   enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
8973   tree lhs, op0, op1;
8974   bool need_conversion;
8975 
8976   /* We handle only !=/== case here.  */
8977   gcc_assert (rhs_code == EQ_EXPR || rhs_code == NE_EXPR);
8978 
8979   op0 = gimple_assign_rhs1 (stmt);
8980   if (!op_with_boolean_value_range_p (op0))
8981     return false;
8982 
8983   op1 = gimple_assign_rhs2 (stmt);
8984   if (!op_with_boolean_value_range_p (op1))
8985     return false;
8986 
8987   /* Reduce number of cases to handle to NE_EXPR.  As there is no
8988      BIT_XNOR_EXPR we cannot replace A == B with a single statement.  */
8989   if (rhs_code == EQ_EXPR)
8990     {
8991       if (TREE_CODE (op1) == INTEGER_CST)
8992 	op1 = int_const_binop (BIT_XOR_EXPR, op1,
8993 			       build_int_cst (TREE_TYPE (op1), 1));
8994       else
8995 	return false;
8996     }
8997 
8998   lhs = gimple_assign_lhs (stmt);
8999   need_conversion
9000     = !useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (op0));
9001 
9002   /* Make sure to not sign-extend a 1-bit 1 when converting the result.  */
9003   if (need_conversion
9004       && !TYPE_UNSIGNED (TREE_TYPE (op0))
9005       && TYPE_PRECISION (TREE_TYPE (op0)) == 1
9006       && TYPE_PRECISION (TREE_TYPE (lhs)) > 1)
9007     return false;
9008 
9009   /* For A != 0 we can substitute A itself.  */
9010   if (integer_zerop (op1))
9011     gimple_assign_set_rhs_with_ops (gsi,
9012 				    need_conversion
9013 				    ? NOP_EXPR : TREE_CODE (op0), op0);
9014   /* For A != B we substitute A ^ B.  Either with conversion.  */
9015   else if (need_conversion)
9016     {
9017       tree tem = make_ssa_name (TREE_TYPE (op0));
9018       gassign *newop
9019 	= gimple_build_assign (tem, BIT_XOR_EXPR, op0, op1);
9020       gsi_insert_before (gsi, newop, GSI_SAME_STMT);
9021       gimple_assign_set_rhs_with_ops (gsi, NOP_EXPR, tem);
9022     }
9023   /* Or without.  */
9024   else
9025     gimple_assign_set_rhs_with_ops (gsi, BIT_XOR_EXPR, op0, op1);
9026   update_stmt (gsi_stmt (*gsi));
9027 
9028   return true;
9029 }
9030 
9031 /* Simplify a division or modulo operator to a right shift or
9032    bitwise and if the first operand is unsigned or is greater
9033    than zero and the second operand is an exact power of two.
9034    For TRUNC_MOD_EXPR op0 % op1 with constant op1, optimize it
9035    into just op0 if op0's range is known to be a subset of
9036    [-op1 + 1, op1 - 1] for signed and [0, op1 - 1] for unsigned
9037    modulo.  */
9038 
9039 static bool
simplify_div_or_mod_using_ranges(gimple_stmt_iterator * gsi,gimple * stmt)9040 simplify_div_or_mod_using_ranges (gimple_stmt_iterator *gsi, gimple *stmt)
9041 {
9042   enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
9043   tree val = NULL;
9044   tree op0 = gimple_assign_rhs1 (stmt);
9045   tree op1 = gimple_assign_rhs2 (stmt);
9046   value_range *vr = get_value_range (op0);
9047 
9048   if (rhs_code == TRUNC_MOD_EXPR
9049       && TREE_CODE (op1) == INTEGER_CST
9050       && tree_int_cst_sgn (op1) == 1
9051       && range_int_cst_p (vr)
9052       && tree_int_cst_lt (vr->max, op1))
9053     {
9054       if (TYPE_UNSIGNED (TREE_TYPE (op0))
9055 	  || tree_int_cst_sgn (vr->min) >= 0
9056 	  || tree_int_cst_lt (fold_unary (NEGATE_EXPR, TREE_TYPE (op1), op1),
9057 			      vr->min))
9058 	{
9059 	  /* If op0 already has the range op0 % op1 has,
9060 	     then TRUNC_MOD_EXPR won't change anything.  */
9061 	  gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
9062 	  gimple_assign_set_rhs_from_tree (&gsi, op0);
9063 	  update_stmt (stmt);
9064 	  return true;
9065 	}
9066     }
9067 
9068   if (!integer_pow2p (op1))
9069     {
9070       /* X % -Y can be only optimized into X % Y either if
9071 	 X is not INT_MIN, or Y is not -1.  Fold it now, as after
9072 	 remove_range_assertions the range info might be not available
9073 	 anymore.  */
9074       if (rhs_code == TRUNC_MOD_EXPR
9075 	  && fold_stmt (gsi, follow_single_use_edges))
9076 	return true;
9077       return false;
9078     }
9079 
9080   if (TYPE_UNSIGNED (TREE_TYPE (op0)))
9081     val = integer_one_node;
9082   else
9083     {
9084       bool sop = false;
9085 
9086       val = compare_range_with_value (GE_EXPR, vr, integer_zero_node, &sop);
9087 
9088       if (val
9089 	  && sop
9090 	  && integer_onep (val)
9091 	  && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
9092 	{
9093 	  location_t location;
9094 
9095 	  if (!gimple_has_location (stmt))
9096 	    location = input_location;
9097 	  else
9098 	    location = gimple_location (stmt);
9099 	  warning_at (location, OPT_Wstrict_overflow,
9100 		      "assuming signed overflow does not occur when "
9101 		      "simplifying %</%> or %<%%%> to %<>>%> or %<&%>");
9102 	}
9103     }
9104 
9105   if (val && integer_onep (val))
9106     {
9107       tree t;
9108 
9109       if (rhs_code == TRUNC_DIV_EXPR)
9110 	{
9111 	  t = build_int_cst (integer_type_node, tree_log2 (op1));
9112 	  gimple_assign_set_rhs_code (stmt, RSHIFT_EXPR);
9113 	  gimple_assign_set_rhs1 (stmt, op0);
9114 	  gimple_assign_set_rhs2 (stmt, t);
9115 	}
9116       else
9117 	{
9118 	  t = build_int_cst (TREE_TYPE (op1), 1);
9119 	  t = int_const_binop (MINUS_EXPR, op1, t);
9120 	  t = fold_convert (TREE_TYPE (op0), t);
9121 
9122 	  gimple_assign_set_rhs_code (stmt, BIT_AND_EXPR);
9123 	  gimple_assign_set_rhs1 (stmt, op0);
9124 	  gimple_assign_set_rhs2 (stmt, t);
9125 	}
9126 
9127       update_stmt (stmt);
9128       return true;
9129     }
9130 
9131   return false;
9132 }
9133 
9134 /* Simplify a min or max if the ranges of the two operands are
9135    disjoint.   Return true if we do simplify.  */
9136 
9137 static bool
simplify_min_or_max_using_ranges(gimple * stmt)9138 simplify_min_or_max_using_ranges (gimple *stmt)
9139 {
9140   tree op0 = gimple_assign_rhs1 (stmt);
9141   tree op1 = gimple_assign_rhs2 (stmt);
9142   bool sop = false;
9143   tree val;
9144 
9145   val = (vrp_evaluate_conditional_warnv_with_ops_using_ranges
9146 	 (LE_EXPR, op0, op1, &sop));
9147   if (!val)
9148     {
9149       sop = false;
9150       val = (vrp_evaluate_conditional_warnv_with_ops_using_ranges
9151 	     (LT_EXPR, op0, op1, &sop));
9152     }
9153 
9154   if (val)
9155     {
9156       if (sop && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
9157 	{
9158 	  location_t location;
9159 
9160 	  if (!gimple_has_location (stmt))
9161 	    location = input_location;
9162 	  else
9163 	    location = gimple_location (stmt);
9164 	  warning_at (location, OPT_Wstrict_overflow,
9165 		      "assuming signed overflow does not occur when "
9166 		      "simplifying %<min/max (X,Y)%> to %<X%> or %<Y%>");
9167 	}
9168 
9169       /* VAL == TRUE -> OP0 < or <= op1
9170 	 VAL == FALSE -> OP0 > or >= op1.  */
9171       tree res = ((gimple_assign_rhs_code (stmt) == MAX_EXPR)
9172 		  == integer_zerop (val)) ? op0 : op1;
9173       gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
9174       gimple_assign_set_rhs_from_tree (&gsi, res);
9175       update_stmt (stmt);
9176       return true;
9177     }
9178 
9179   return false;
9180 }
9181 
9182 /* If the operand to an ABS_EXPR is >= 0, then eliminate the
9183    ABS_EXPR.  If the operand is <= 0, then simplify the
9184    ABS_EXPR into a NEGATE_EXPR.  */
9185 
9186 static bool
simplify_abs_using_ranges(gimple * stmt)9187 simplify_abs_using_ranges (gimple *stmt)
9188 {
9189   tree op = gimple_assign_rhs1 (stmt);
9190   value_range *vr = get_value_range (op);
9191 
9192   if (vr)
9193     {
9194       tree val = NULL;
9195       bool sop = false;
9196 
9197       val = compare_range_with_value (LE_EXPR, vr, integer_zero_node, &sop);
9198       if (!val)
9199 	{
9200 	  /* The range is neither <= 0 nor > 0.  Now see if it is
9201 	     either < 0 or >= 0.  */
9202 	  sop = false;
9203 	  val = compare_range_with_value (LT_EXPR, vr, integer_zero_node,
9204 					  &sop);
9205 	}
9206 
9207       if (val)
9208 	{
9209 	  if (sop && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
9210 	    {
9211 	      location_t location;
9212 
9213 	      if (!gimple_has_location (stmt))
9214 		location = input_location;
9215 	      else
9216 		location = gimple_location (stmt);
9217 	      warning_at (location, OPT_Wstrict_overflow,
9218 			  "assuming signed overflow does not occur when "
9219 			  "simplifying %<abs (X)%> to %<X%> or %<-X%>");
9220 	    }
9221 
9222 	  gimple_assign_set_rhs1 (stmt, op);
9223 	  if (integer_zerop (val))
9224 	    gimple_assign_set_rhs_code (stmt, SSA_NAME);
9225 	  else
9226 	    gimple_assign_set_rhs_code (stmt, NEGATE_EXPR);
9227 	  update_stmt (stmt);
9228 	  return true;
9229 	}
9230     }
9231 
9232   return false;
9233 }
9234 
9235 /* Optimize away redundant BIT_AND_EXPR and BIT_IOR_EXPR.
9236    If all the bits that are being cleared by & are already
9237    known to be zero from VR, or all the bits that are being
9238    set by | are already known to be one from VR, the bit
9239    operation is redundant.  */
9240 
9241 static bool
simplify_bit_ops_using_ranges(gimple_stmt_iterator * gsi,gimple * stmt)9242 simplify_bit_ops_using_ranges (gimple_stmt_iterator *gsi, gimple *stmt)
9243 {
9244   tree op0 = gimple_assign_rhs1 (stmt);
9245   tree op1 = gimple_assign_rhs2 (stmt);
9246   tree op = NULL_TREE;
9247   value_range vr0 = VR_INITIALIZER;
9248   value_range vr1 = VR_INITIALIZER;
9249   wide_int may_be_nonzero0, may_be_nonzero1;
9250   wide_int must_be_nonzero0, must_be_nonzero1;
9251   wide_int mask;
9252 
9253   if (TREE_CODE (op0) == SSA_NAME)
9254     vr0 = *(get_value_range (op0));
9255   else if (is_gimple_min_invariant (op0))
9256     set_value_range_to_value (&vr0, op0, NULL);
9257   else
9258     return false;
9259 
9260   if (TREE_CODE (op1) == SSA_NAME)
9261     vr1 = *(get_value_range (op1));
9262   else if (is_gimple_min_invariant (op1))
9263     set_value_range_to_value (&vr1, op1, NULL);
9264   else
9265     return false;
9266 
9267   if (!zero_nonzero_bits_from_vr (TREE_TYPE (op0), &vr0, &may_be_nonzero0,
9268 				  &must_be_nonzero0))
9269     return false;
9270   if (!zero_nonzero_bits_from_vr (TREE_TYPE (op1), &vr1, &may_be_nonzero1,
9271 				  &must_be_nonzero1))
9272     return false;
9273 
9274   switch (gimple_assign_rhs_code (stmt))
9275     {
9276     case BIT_AND_EXPR:
9277       mask = may_be_nonzero0.and_not (must_be_nonzero1);
9278       if (mask == 0)
9279 	{
9280 	  op = op0;
9281 	  break;
9282 	}
9283       mask = may_be_nonzero1.and_not (must_be_nonzero0);
9284       if (mask == 0)
9285 	{
9286 	  op = op1;
9287 	  break;
9288 	}
9289       break;
9290     case BIT_IOR_EXPR:
9291       mask = may_be_nonzero0.and_not (must_be_nonzero1);
9292       if (mask == 0)
9293 	{
9294 	  op = op1;
9295 	  break;
9296 	}
9297       mask = may_be_nonzero1.and_not (must_be_nonzero0);
9298       if (mask == 0)
9299 	{
9300 	  op = op0;
9301 	  break;
9302 	}
9303       break;
9304     default:
9305       gcc_unreachable ();
9306     }
9307 
9308   if (op == NULL_TREE)
9309     return false;
9310 
9311   gimple_assign_set_rhs_with_ops (gsi, TREE_CODE (op), op);
9312   update_stmt (gsi_stmt (*gsi));
9313   return true;
9314 }
9315 
9316 /* We are comparing trees OP0 and OP1 using COND_CODE.  OP0 has
9317    a known value range VR.
9318 
9319    If there is one and only one value which will satisfy the
9320    conditional, then return that value.  Else return NULL.
9321 
9322    If signed overflow must be undefined for the value to satisfy
9323    the conditional, then set *STRICT_OVERFLOW_P to true.  */
9324 
9325 static tree
test_for_singularity(enum tree_code cond_code,tree op0,tree op1,value_range * vr,bool * strict_overflow_p)9326 test_for_singularity (enum tree_code cond_code, tree op0,
9327 		      tree op1, value_range *vr,
9328 		      bool *strict_overflow_p)
9329 {
9330   tree min = NULL;
9331   tree max = NULL;
9332 
9333   /* Extract minimum/maximum values which satisfy the conditional as it was
9334      written.  */
9335   if (cond_code == LE_EXPR || cond_code == LT_EXPR)
9336     {
9337       /* This should not be negative infinity; there is no overflow
9338 	 here.  */
9339       min = TYPE_MIN_VALUE (TREE_TYPE (op0));
9340 
9341       max = op1;
9342       if (cond_code == LT_EXPR && !is_overflow_infinity (max))
9343 	{
9344 	  tree one = build_int_cst (TREE_TYPE (op0), 1);
9345 	  max = fold_build2 (MINUS_EXPR, TREE_TYPE (op0), max, one);
9346 	  if (EXPR_P (max))
9347 	    TREE_NO_WARNING (max) = 1;
9348 	}
9349     }
9350   else if (cond_code == GE_EXPR || cond_code == GT_EXPR)
9351     {
9352       /* This should not be positive infinity; there is no overflow
9353 	 here.  */
9354       max = TYPE_MAX_VALUE (TREE_TYPE (op0));
9355 
9356       min = op1;
9357       if (cond_code == GT_EXPR && !is_overflow_infinity (min))
9358 	{
9359 	  tree one = build_int_cst (TREE_TYPE (op0), 1);
9360 	  min = fold_build2 (PLUS_EXPR, TREE_TYPE (op0), min, one);
9361 	  if (EXPR_P (min))
9362 	    TREE_NO_WARNING (min) = 1;
9363 	}
9364     }
9365 
9366   /* Now refine the minimum and maximum values using any
9367      value range information we have for op0.  */
9368   if (min && max)
9369     {
9370       if (compare_values (vr->min, min) == 1)
9371 	min = vr->min;
9372       if (compare_values (vr->max, max) == -1)
9373 	max = vr->max;
9374 
9375       /* If the new min/max values have converged to a single value,
9376 	 then there is only one value which can satisfy the condition,
9377 	 return that value.  */
9378       if (operand_equal_p (min, max, 0) && is_gimple_min_invariant (min))
9379 	{
9380 	  if ((cond_code == LE_EXPR || cond_code == LT_EXPR)
9381 	      && is_overflow_infinity (vr->max))
9382 	    *strict_overflow_p = true;
9383 	  if ((cond_code == GE_EXPR || cond_code == GT_EXPR)
9384 	      && is_overflow_infinity (vr->min))
9385 	    *strict_overflow_p = true;
9386 
9387 	  return min;
9388 	}
9389     }
9390   return NULL;
9391 }
9392 
9393 /* Return whether the value range *VR fits in an integer type specified
9394    by PRECISION and UNSIGNED_P.  */
9395 
9396 static bool
range_fits_type_p(value_range * vr,unsigned dest_precision,signop dest_sgn)9397 range_fits_type_p (value_range *vr, unsigned dest_precision, signop dest_sgn)
9398 {
9399   tree src_type;
9400   unsigned src_precision;
9401   widest_int tem;
9402   signop src_sgn;
9403 
9404   /* We can only handle integral and pointer types.  */
9405   src_type = TREE_TYPE (vr->min);
9406   if (!INTEGRAL_TYPE_P (src_type)
9407       && !POINTER_TYPE_P (src_type))
9408     return false;
9409 
9410   /* An extension is fine unless VR is SIGNED and dest_sgn is UNSIGNED,
9411      and so is an identity transform.  */
9412   src_precision = TYPE_PRECISION (TREE_TYPE (vr->min));
9413   src_sgn = TYPE_SIGN (src_type);
9414   if ((src_precision < dest_precision
9415        && !(dest_sgn == UNSIGNED && src_sgn == SIGNED))
9416       || (src_precision == dest_precision && src_sgn == dest_sgn))
9417     return true;
9418 
9419   /* Now we can only handle ranges with constant bounds.  */
9420   if (vr->type != VR_RANGE
9421       || TREE_CODE (vr->min) != INTEGER_CST
9422       || TREE_CODE (vr->max) != INTEGER_CST)
9423     return false;
9424 
9425   /* For sign changes, the MSB of the wide_int has to be clear.
9426      An unsigned value with its MSB set cannot be represented by
9427      a signed wide_int, while a negative value cannot be represented
9428      by an unsigned wide_int.  */
9429   if (src_sgn != dest_sgn
9430       && (wi::lts_p (vr->min, 0) || wi::lts_p (vr->max, 0)))
9431     return false;
9432 
9433   /* Then we can perform the conversion on both ends and compare
9434      the result for equality.  */
9435   tem = wi::ext (wi::to_widest (vr->min), dest_precision, dest_sgn);
9436   if (tem != wi::to_widest (vr->min))
9437     return false;
9438   tem = wi::ext (wi::to_widest (vr->max), dest_precision, dest_sgn);
9439   if (tem != wi::to_widest (vr->max))
9440     return false;
9441 
9442   return true;
9443 }
9444 
9445 /* Simplify a conditional using a relational operator to an equality
9446    test if the range information indicates only one value can satisfy
9447    the original conditional.  */
9448 
9449 static bool
simplify_cond_using_ranges(gcond * stmt)9450 simplify_cond_using_ranges (gcond *stmt)
9451 {
9452   tree op0 = gimple_cond_lhs (stmt);
9453   tree op1 = gimple_cond_rhs (stmt);
9454   enum tree_code cond_code = gimple_cond_code (stmt);
9455 
9456   if (cond_code != NE_EXPR
9457       && cond_code != EQ_EXPR
9458       && TREE_CODE (op0) == SSA_NAME
9459       && INTEGRAL_TYPE_P (TREE_TYPE (op0))
9460       && is_gimple_min_invariant (op1))
9461     {
9462       value_range *vr = get_value_range (op0);
9463 
9464       /* If we have range information for OP0, then we might be
9465 	 able to simplify this conditional. */
9466       if (vr->type == VR_RANGE)
9467 	{
9468 	  enum warn_strict_overflow_code wc = WARN_STRICT_OVERFLOW_COMPARISON;
9469 	  bool sop = false;
9470 	  tree new_tree = test_for_singularity (cond_code, op0, op1, vr, &sop);
9471 
9472 	  if (new_tree
9473 	      && (!sop || TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (op0))))
9474 	    {
9475 	      if (dump_file)
9476 		{
9477 		  fprintf (dump_file, "Simplified relational ");
9478 		  print_gimple_stmt (dump_file, stmt, 0, 0);
9479 		  fprintf (dump_file, " into ");
9480 		}
9481 
9482 	      gimple_cond_set_code (stmt, EQ_EXPR);
9483 	      gimple_cond_set_lhs (stmt, op0);
9484 	      gimple_cond_set_rhs (stmt, new_tree);
9485 
9486 	      update_stmt (stmt);
9487 
9488 	      if (dump_file)
9489 		{
9490 		  print_gimple_stmt (dump_file, stmt, 0, 0);
9491 		  fprintf (dump_file, "\n");
9492 		}
9493 
9494 	      if (sop && issue_strict_overflow_warning (wc))
9495 	        {
9496 	          location_t location = input_location;
9497 	          if (gimple_has_location (stmt))
9498 		    location = gimple_location (stmt);
9499 
9500 	          warning_at (location, OPT_Wstrict_overflow,
9501 			      "assuming signed overflow does not occur when "
9502 			      "simplifying conditional");
9503 	        }
9504 
9505 	      return true;
9506 	    }
9507 
9508 	  /* Try again after inverting the condition.  We only deal
9509 	     with integral types here, so no need to worry about
9510 	     issues with inverting FP comparisons.  */
9511 	  sop = false;
9512 	  new_tree = test_for_singularity
9513 		       (invert_tree_comparison (cond_code, false),
9514 			op0, op1, vr, &sop);
9515 
9516 	  if (new_tree
9517 	      && (!sop || TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (op0))))
9518 	    {
9519 	      if (dump_file)
9520 		{
9521 		  fprintf (dump_file, "Simplified relational ");
9522 		  print_gimple_stmt (dump_file, stmt, 0, 0);
9523 		  fprintf (dump_file, " into ");
9524 		}
9525 
9526 	      gimple_cond_set_code (stmt, NE_EXPR);
9527 	      gimple_cond_set_lhs (stmt, op0);
9528 	      gimple_cond_set_rhs (stmt, new_tree);
9529 
9530 	      update_stmt (stmt);
9531 
9532 	      if (dump_file)
9533 		{
9534 		  print_gimple_stmt (dump_file, stmt, 0, 0);
9535 		  fprintf (dump_file, "\n");
9536 		}
9537 
9538 	      if (sop && issue_strict_overflow_warning (wc))
9539 	        {
9540 	          location_t location = input_location;
9541 	          if (gimple_has_location (stmt))
9542 		    location = gimple_location (stmt);
9543 
9544 	          warning_at (location, OPT_Wstrict_overflow,
9545 			      "assuming signed overflow does not occur when "
9546 			      "simplifying conditional");
9547 	        }
9548 
9549 	      return true;
9550 	    }
9551 	}
9552     }
9553 
9554   /* If we have a comparison of an SSA_NAME (OP0) against a constant,
9555      see if OP0 was set by a type conversion where the source of
9556      the conversion is another SSA_NAME with a range that fits
9557      into the range of OP0's type.
9558 
9559      If so, the conversion is redundant as the earlier SSA_NAME can be
9560      used for the comparison directly if we just massage the constant in the
9561      comparison.  */
9562   if (TREE_CODE (op0) == SSA_NAME
9563       && TREE_CODE (op1) == INTEGER_CST)
9564     {
9565       gimple *def_stmt = SSA_NAME_DEF_STMT (op0);
9566       tree innerop;
9567 
9568       if (!is_gimple_assign (def_stmt)
9569 	  || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt)))
9570 	return false;
9571 
9572       innerop = gimple_assign_rhs1 (def_stmt);
9573 
9574       if (TREE_CODE (innerop) == SSA_NAME
9575 	  && !POINTER_TYPE_P (TREE_TYPE (innerop))
9576 	  && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (innerop)
9577 	  && desired_pro_or_demotion_p (TREE_TYPE (innerop), TREE_TYPE (op0)))
9578 	{
9579 	  value_range *vr = get_value_range (innerop);
9580 
9581 	  if (range_int_cst_p (vr)
9582 	      && range_fits_type_p (vr,
9583 				    TYPE_PRECISION (TREE_TYPE (op0)),
9584 				    TYPE_SIGN (TREE_TYPE (op0)))
9585 	      && int_fits_type_p (op1, TREE_TYPE (innerop))
9586 	      /* The range must not have overflowed, or if it did overflow
9587 		 we must not be wrapping/trapping overflow and optimizing
9588 		 with strict overflow semantics.  */
9589 	      && ((!is_negative_overflow_infinity (vr->min)
9590 	           && !is_positive_overflow_infinity (vr->max))
9591 		  || TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (innerop))))
9592 	    {
9593 	      /* If the range overflowed and the user has asked for warnings
9594 		 when strict overflow semantics were used to optimize code,
9595 		 issue an appropriate warning.  */
9596 	      if (cond_code != EQ_EXPR && cond_code != NE_EXPR
9597 		  && (is_negative_overflow_infinity (vr->min)
9598 		      || is_positive_overflow_infinity (vr->max))
9599 		  && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_CONDITIONAL))
9600 		{
9601 		  location_t location;
9602 
9603 		  if (!gimple_has_location (stmt))
9604 		    location = input_location;
9605 		  else
9606 		    location = gimple_location (stmt);
9607 		  warning_at (location, OPT_Wstrict_overflow,
9608 			      "assuming signed overflow does not occur when "
9609 			      "simplifying conditional");
9610 		}
9611 
9612 	      tree newconst = fold_convert (TREE_TYPE (innerop), op1);
9613 	      gimple_cond_set_lhs (stmt, innerop);
9614 	      gimple_cond_set_rhs (stmt, newconst);
9615 	      return true;
9616 	    }
9617 	}
9618     }
9619 
9620   return false;
9621 }
9622 
9623 /* Simplify a switch statement using the value range of the switch
9624    argument.  */
9625 
9626 static bool
simplify_switch_using_ranges(gswitch * stmt)9627 simplify_switch_using_ranges (gswitch *stmt)
9628 {
9629   tree op = gimple_switch_index (stmt);
9630   value_range *vr;
9631   bool take_default;
9632   edge e;
9633   edge_iterator ei;
9634   size_t i = 0, j = 0, n, n2;
9635   tree vec2;
9636   switch_update su;
9637   size_t k = 1, l = 0;
9638 
9639   if (TREE_CODE (op) == SSA_NAME)
9640     {
9641       vr = get_value_range (op);
9642 
9643       /* We can only handle integer ranges.  */
9644       if ((vr->type != VR_RANGE
9645 	   && vr->type != VR_ANTI_RANGE)
9646 	  || symbolic_range_p (vr))
9647 	return false;
9648 
9649       /* Find case label for min/max of the value range.  */
9650       take_default = !find_case_label_ranges (stmt, vr, &i, &j, &k, &l);
9651     }
9652   else if (TREE_CODE (op) == INTEGER_CST)
9653     {
9654       take_default = !find_case_label_index (stmt, 1, op, &i);
9655       if (take_default)
9656 	{
9657 	  i = 1;
9658 	  j = 0;
9659 	}
9660       else
9661 	{
9662 	  j = i;
9663 	}
9664     }
9665   else
9666     return false;
9667 
9668   n = gimple_switch_num_labels (stmt);
9669 
9670   /* Bail out if this is just all edges taken.  */
9671   if (i == 1
9672       && j == n - 1
9673       && take_default)
9674     return false;
9675 
9676   /* Build a new vector of taken case labels.  */
9677   vec2 = make_tree_vec (j - i + 1 + l - k + 1 + (int)take_default);
9678   n2 = 0;
9679 
9680   /* Add the default edge, if necessary.  */
9681   if (take_default)
9682     TREE_VEC_ELT (vec2, n2++) = gimple_switch_default_label (stmt);
9683 
9684   for (; i <= j; ++i, ++n2)
9685     TREE_VEC_ELT (vec2, n2) = gimple_switch_label (stmt, i);
9686 
9687   for (; k <= l; ++k, ++n2)
9688     TREE_VEC_ELT (vec2, n2) = gimple_switch_label (stmt, k);
9689 
9690   /* Mark needed edges.  */
9691   for (i = 0; i < n2; ++i)
9692     {
9693       e = find_edge (gimple_bb (stmt),
9694 		     label_to_block (CASE_LABEL (TREE_VEC_ELT (vec2, i))));
9695       e->aux = (void *)-1;
9696     }
9697 
9698   /* Queue not needed edges for later removal.  */
9699   FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs)
9700     {
9701       if (e->aux == (void *)-1)
9702 	{
9703 	  e->aux = NULL;
9704 	  continue;
9705 	}
9706 
9707       if (dump_file && (dump_flags & TDF_DETAILS))
9708 	{
9709 	  fprintf (dump_file, "removing unreachable case label\n");
9710 	}
9711       to_remove_edges.safe_push (e);
9712       e->flags &= ~EDGE_EXECUTABLE;
9713     }
9714 
9715   /* And queue an update for the stmt.  */
9716   su.stmt = stmt;
9717   su.vec = vec2;
9718   to_update_switch_stmts.safe_push (su);
9719   return false;
9720 }
9721 
9722 /* Simplify an integral conversion from an SSA name in STMT.  */
9723 
9724 static bool
simplify_conversion_using_ranges(gimple * stmt)9725 simplify_conversion_using_ranges (gimple *stmt)
9726 {
9727   tree innerop, middleop, finaltype;
9728   gimple *def_stmt;
9729   value_range *innervr;
9730   signop inner_sgn, middle_sgn, final_sgn;
9731   unsigned inner_prec, middle_prec, final_prec;
9732   widest_int innermin, innermed, innermax, middlemin, middlemed, middlemax;
9733 
9734   finaltype = TREE_TYPE (gimple_assign_lhs (stmt));
9735   if (!INTEGRAL_TYPE_P (finaltype))
9736     return false;
9737   middleop = gimple_assign_rhs1 (stmt);
9738   def_stmt = SSA_NAME_DEF_STMT (middleop);
9739   if (!is_gimple_assign (def_stmt)
9740       || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt)))
9741     return false;
9742   innerop = gimple_assign_rhs1 (def_stmt);
9743   if (TREE_CODE (innerop) != SSA_NAME
9744       || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (innerop))
9745     return false;
9746 
9747   /* Get the value-range of the inner operand.  */
9748   innervr = get_value_range (innerop);
9749   if (innervr->type != VR_RANGE
9750       || TREE_CODE (innervr->min) != INTEGER_CST
9751       || TREE_CODE (innervr->max) != INTEGER_CST)
9752     return false;
9753 
9754   /* Simulate the conversion chain to check if the result is equal if
9755      the middle conversion is removed.  */
9756   innermin = wi::to_widest (innervr->min);
9757   innermax = wi::to_widest (innervr->max);
9758 
9759   inner_prec = TYPE_PRECISION (TREE_TYPE (innerop));
9760   middle_prec = TYPE_PRECISION (TREE_TYPE (middleop));
9761   final_prec = TYPE_PRECISION (finaltype);
9762 
9763   /* If the first conversion is not injective, the second must not
9764      be widening.  */
9765   if (wi::gtu_p (innermax - innermin,
9766 		 wi::mask <widest_int> (middle_prec, false))
9767       && middle_prec < final_prec)
9768     return false;
9769   /* We also want a medium value so that we can track the effect that
9770      narrowing conversions with sign change have.  */
9771   inner_sgn = TYPE_SIGN (TREE_TYPE (innerop));
9772   if (inner_sgn == UNSIGNED)
9773     innermed = wi::shifted_mask <widest_int> (1, inner_prec - 1, false);
9774   else
9775     innermed = 0;
9776   if (wi::cmp (innermin, innermed, inner_sgn) >= 0
9777       || wi::cmp (innermed, innermax, inner_sgn) >= 0)
9778     innermed = innermin;
9779 
9780   middle_sgn = TYPE_SIGN (TREE_TYPE (middleop));
9781   middlemin = wi::ext (innermin, middle_prec, middle_sgn);
9782   middlemed = wi::ext (innermed, middle_prec, middle_sgn);
9783   middlemax = wi::ext (innermax, middle_prec, middle_sgn);
9784 
9785   /* Require that the final conversion applied to both the original
9786      and the intermediate range produces the same result.  */
9787   final_sgn = TYPE_SIGN (finaltype);
9788   if (wi::ext (middlemin, final_prec, final_sgn)
9789 	 != wi::ext (innermin, final_prec, final_sgn)
9790       || wi::ext (middlemed, final_prec, final_sgn)
9791 	 != wi::ext (innermed, final_prec, final_sgn)
9792       || wi::ext (middlemax, final_prec, final_sgn)
9793 	 != wi::ext (innermax, final_prec, final_sgn))
9794     return false;
9795 
9796   gimple_assign_set_rhs1 (stmt, innerop);
9797   update_stmt (stmt);
9798   return true;
9799 }
9800 
9801 /* Simplify a conversion from integral SSA name to float in STMT.  */
9802 
9803 static bool
simplify_float_conversion_using_ranges(gimple_stmt_iterator * gsi,gimple * stmt)9804 simplify_float_conversion_using_ranges (gimple_stmt_iterator *gsi,
9805 					gimple *stmt)
9806 {
9807   tree rhs1 = gimple_assign_rhs1 (stmt);
9808   value_range *vr = get_value_range (rhs1);
9809   machine_mode fltmode = TYPE_MODE (TREE_TYPE (gimple_assign_lhs (stmt)));
9810   machine_mode mode;
9811   tree tem;
9812   gassign *conv;
9813 
9814   /* We can only handle constant ranges.  */
9815   if (vr->type != VR_RANGE
9816       || TREE_CODE (vr->min) != INTEGER_CST
9817       || TREE_CODE (vr->max) != INTEGER_CST)
9818     return false;
9819 
9820   /* First check if we can use a signed type in place of an unsigned.  */
9821   if (TYPE_UNSIGNED (TREE_TYPE (rhs1))
9822       && (can_float_p (fltmode, TYPE_MODE (TREE_TYPE (rhs1)), 0)
9823 	  != CODE_FOR_nothing)
9824       && range_fits_type_p (vr, TYPE_PRECISION (TREE_TYPE (rhs1)), SIGNED))
9825     mode = TYPE_MODE (TREE_TYPE (rhs1));
9826   /* If we can do the conversion in the current input mode do nothing.  */
9827   else if (can_float_p (fltmode, TYPE_MODE (TREE_TYPE (rhs1)),
9828 			TYPE_UNSIGNED (TREE_TYPE (rhs1))) != CODE_FOR_nothing)
9829     return false;
9830   /* Otherwise search for a mode we can use, starting from the narrowest
9831      integer mode available.  */
9832   else
9833     {
9834       mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
9835       do
9836 	{
9837 	  /* If we cannot do a signed conversion to float from mode
9838 	     or if the value-range does not fit in the signed type
9839 	     try with a wider mode.  */
9840 	  if (can_float_p (fltmode, mode, 0) != CODE_FOR_nothing
9841 	      && range_fits_type_p (vr, GET_MODE_PRECISION (mode), SIGNED))
9842 	    break;
9843 
9844 	  mode = GET_MODE_WIDER_MODE (mode);
9845 	  /* But do not widen the input.  Instead leave that to the
9846 	     optabs expansion code.  */
9847 	  if (GET_MODE_PRECISION (mode) > TYPE_PRECISION (TREE_TYPE (rhs1)))
9848 	    return false;
9849 	}
9850       while (mode != VOIDmode);
9851       if (mode == VOIDmode)
9852 	return false;
9853     }
9854 
9855   /* It works, insert a truncation or sign-change before the
9856      float conversion.  */
9857   tem = make_ssa_name (build_nonstandard_integer_type
9858 			  (GET_MODE_PRECISION (mode), 0));
9859   conv = gimple_build_assign (tem, NOP_EXPR, rhs1);
9860   gsi_insert_before (gsi, conv, GSI_SAME_STMT);
9861   gimple_assign_set_rhs1 (stmt, tem);
9862   update_stmt (stmt);
9863 
9864   return true;
9865 }
9866 
9867 /* Simplify an internal fn call using ranges if possible.  */
9868 
9869 static bool
simplify_internal_call_using_ranges(gimple_stmt_iterator * gsi,gimple * stmt)9870 simplify_internal_call_using_ranges (gimple_stmt_iterator *gsi, gimple *stmt)
9871 {
9872   enum tree_code subcode;
9873   bool is_ubsan = false;
9874   bool ovf = false;
9875   switch (gimple_call_internal_fn (stmt))
9876     {
9877     case IFN_UBSAN_CHECK_ADD:
9878       subcode = PLUS_EXPR;
9879       is_ubsan = true;
9880       break;
9881     case IFN_UBSAN_CHECK_SUB:
9882       subcode = MINUS_EXPR;
9883       is_ubsan = true;
9884       break;
9885     case IFN_UBSAN_CHECK_MUL:
9886       subcode = MULT_EXPR;
9887       is_ubsan = true;
9888       break;
9889     case IFN_ADD_OVERFLOW:
9890       subcode = PLUS_EXPR;
9891       break;
9892     case IFN_SUB_OVERFLOW:
9893       subcode = MINUS_EXPR;
9894       break;
9895     case IFN_MUL_OVERFLOW:
9896       subcode = MULT_EXPR;
9897       break;
9898     default:
9899       return false;
9900     }
9901 
9902   tree op0 = gimple_call_arg (stmt, 0);
9903   tree op1 = gimple_call_arg (stmt, 1);
9904   tree type;
9905   if (is_ubsan)
9906     type = TREE_TYPE (op0);
9907   else if (gimple_call_lhs (stmt) == NULL_TREE)
9908     return false;
9909   else
9910     type = TREE_TYPE (TREE_TYPE (gimple_call_lhs (stmt)));
9911   if (!check_for_binary_op_overflow (subcode, type, op0, op1, &ovf)
9912       || (is_ubsan && ovf))
9913     return false;
9914 
9915   gimple *g;
9916   location_t loc = gimple_location (stmt);
9917   if (is_ubsan)
9918     g = gimple_build_assign (gimple_call_lhs (stmt), subcode, op0, op1);
9919   else
9920     {
9921       int prec = TYPE_PRECISION (type);
9922       tree utype = type;
9923       if (ovf
9924 	  || !useless_type_conversion_p (type, TREE_TYPE (op0))
9925 	  || !useless_type_conversion_p (type, TREE_TYPE (op1)))
9926 	utype = build_nonstandard_integer_type (prec, 1);
9927       if (TREE_CODE (op0) == INTEGER_CST)
9928 	op0 = fold_convert (utype, op0);
9929       else if (!useless_type_conversion_p (utype, TREE_TYPE (op0)))
9930 	{
9931 	  g = gimple_build_assign (make_ssa_name (utype), NOP_EXPR, op0);
9932 	  gimple_set_location (g, loc);
9933 	  gsi_insert_before (gsi, g, GSI_SAME_STMT);
9934 	  op0 = gimple_assign_lhs (g);
9935 	}
9936       if (TREE_CODE (op1) == INTEGER_CST)
9937 	op1 = fold_convert (utype, op1);
9938       else if (!useless_type_conversion_p (utype, TREE_TYPE (op1)))
9939 	{
9940 	  g = gimple_build_assign (make_ssa_name (utype), NOP_EXPR, op1);
9941 	  gimple_set_location (g, loc);
9942 	  gsi_insert_before (gsi, g, GSI_SAME_STMT);
9943 	  op1 = gimple_assign_lhs (g);
9944 	}
9945       g = gimple_build_assign (make_ssa_name (utype), subcode, op0, op1);
9946       gimple_set_location (g, loc);
9947       gsi_insert_before (gsi, g, GSI_SAME_STMT);
9948       if (utype != type)
9949 	{
9950 	  g = gimple_build_assign (make_ssa_name (type), NOP_EXPR,
9951 				   gimple_assign_lhs (g));
9952 	  gimple_set_location (g, loc);
9953 	  gsi_insert_before (gsi, g, GSI_SAME_STMT);
9954 	}
9955       g = gimple_build_assign (gimple_call_lhs (stmt), COMPLEX_EXPR,
9956 			       gimple_assign_lhs (g),
9957 			       build_int_cst (type, ovf));
9958     }
9959   gimple_set_location (g, loc);
9960   gsi_replace (gsi, g, false);
9961   return true;
9962 }
9963 
9964 /* Simplify STMT using ranges if possible.  */
9965 
9966 static bool
simplify_stmt_using_ranges(gimple_stmt_iterator * gsi)9967 simplify_stmt_using_ranges (gimple_stmt_iterator *gsi)
9968 {
9969   gimple *stmt = gsi_stmt (*gsi);
9970   if (is_gimple_assign (stmt))
9971     {
9972       enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
9973       tree rhs1 = gimple_assign_rhs1 (stmt);
9974 
9975       switch (rhs_code)
9976 	{
9977 	case EQ_EXPR:
9978 	case NE_EXPR:
9979           /* Transform EQ_EXPR, NE_EXPR into BIT_XOR_EXPR or identity
9980 	     if the RHS is zero or one, and the LHS are known to be boolean
9981 	     values.  */
9982 	  if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
9983 	    return simplify_truth_ops_using_ranges (gsi, stmt);
9984 	  break;
9985 
9986       /* Transform TRUNC_DIV_EXPR and TRUNC_MOD_EXPR into RSHIFT_EXPR
9987 	 and BIT_AND_EXPR respectively if the first operand is greater
9988 	 than zero and the second operand is an exact power of two.
9989 	 Also optimize TRUNC_MOD_EXPR away if the second operand is
9990 	 constant and the first operand already has the right value
9991 	 range.  */
9992 	case TRUNC_DIV_EXPR:
9993 	case TRUNC_MOD_EXPR:
9994 	  if (TREE_CODE (rhs1) == SSA_NAME
9995 	      && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
9996 	    return simplify_div_or_mod_using_ranges (gsi, stmt);
9997 	  break;
9998 
9999       /* Transform ABS (X) into X or -X as appropriate.  */
10000 	case ABS_EXPR:
10001 	  if (TREE_CODE (rhs1) == SSA_NAME
10002 	      && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
10003 	    return simplify_abs_using_ranges (stmt);
10004 	  break;
10005 
10006 	case BIT_AND_EXPR:
10007 	case BIT_IOR_EXPR:
10008 	  /* Optimize away BIT_AND_EXPR and BIT_IOR_EXPR
10009 	     if all the bits being cleared are already cleared or
10010 	     all the bits being set are already set.  */
10011 	  if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
10012 	    return simplify_bit_ops_using_ranges (gsi, stmt);
10013 	  break;
10014 
10015 	CASE_CONVERT:
10016 	  if (TREE_CODE (rhs1) == SSA_NAME
10017 	      && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
10018 	    return simplify_conversion_using_ranges (stmt);
10019 	  break;
10020 
10021 	case FLOAT_EXPR:
10022 	  if (TREE_CODE (rhs1) == SSA_NAME
10023 	      && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
10024 	    return simplify_float_conversion_using_ranges (gsi, stmt);
10025 	  break;
10026 
10027 	case MIN_EXPR:
10028 	case MAX_EXPR:
10029 	  return simplify_min_or_max_using_ranges (stmt);
10030 	  break;
10031 
10032 	default:
10033 	  break;
10034 	}
10035     }
10036   else if (gimple_code (stmt) == GIMPLE_COND)
10037     return simplify_cond_using_ranges (as_a <gcond *> (stmt));
10038   else if (gimple_code (stmt) == GIMPLE_SWITCH)
10039     return simplify_switch_using_ranges (as_a <gswitch *> (stmt));
10040   else if (is_gimple_call (stmt)
10041 	   && gimple_call_internal_p (stmt))
10042     return simplify_internal_call_using_ranges (gsi, stmt);
10043 
10044   return false;
10045 }
10046 
10047 /* If the statement pointed by SI has a predicate whose value can be
10048    computed using the value range information computed by VRP, compute
10049    its value and return true.  Otherwise, return false.  */
10050 
10051 static bool
fold_predicate_in(gimple_stmt_iterator * si)10052 fold_predicate_in (gimple_stmt_iterator *si)
10053 {
10054   bool assignment_p = false;
10055   tree val;
10056   gimple *stmt = gsi_stmt (*si);
10057 
10058   if (is_gimple_assign (stmt)
10059       && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison)
10060     {
10061       assignment_p = true;
10062       val = vrp_evaluate_conditional (gimple_assign_rhs_code (stmt),
10063 				      gimple_assign_rhs1 (stmt),
10064 				      gimple_assign_rhs2 (stmt),
10065 				      stmt);
10066     }
10067   else if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
10068     val = vrp_evaluate_conditional (gimple_cond_code (cond_stmt),
10069 				    gimple_cond_lhs (cond_stmt),
10070 				    gimple_cond_rhs (cond_stmt),
10071 				    stmt);
10072   else
10073     return false;
10074 
10075   if (val)
10076     {
10077       if (assignment_p)
10078         val = fold_convert (gimple_expr_type (stmt), val);
10079 
10080       if (dump_file)
10081 	{
10082 	  fprintf (dump_file, "Folding predicate ");
10083 	  print_gimple_expr (dump_file, stmt, 0, 0);
10084 	  fprintf (dump_file, " to ");
10085 	  print_generic_expr (dump_file, val, 0);
10086 	  fprintf (dump_file, "\n");
10087 	}
10088 
10089       if (is_gimple_assign (stmt))
10090 	gimple_assign_set_rhs_from_tree (si, val);
10091       else
10092 	{
10093 	  gcc_assert (gimple_code (stmt) == GIMPLE_COND);
10094 	  gcond *cond_stmt = as_a <gcond *> (stmt);
10095 	  if (integer_zerop (val))
10096 	    gimple_cond_make_false (cond_stmt);
10097 	  else if (integer_onep (val))
10098 	    gimple_cond_make_true (cond_stmt);
10099 	  else
10100 	    gcc_unreachable ();
10101 	}
10102 
10103       return true;
10104     }
10105 
10106   return false;
10107 }
10108 
10109 /* Callback for substitute_and_fold folding the stmt at *SI.  */
10110 
10111 static bool
vrp_fold_stmt(gimple_stmt_iterator * si)10112 vrp_fold_stmt (gimple_stmt_iterator *si)
10113 {
10114   if (fold_predicate_in (si))
10115     return true;
10116 
10117   return simplify_stmt_using_ranges (si);
10118 }
10119 
10120 /* Unwindable const/copy equivalences.  */
10121 const_and_copies *equiv_stack;
10122 
10123 /* A trivial wrapper so that we can present the generic jump threading
10124    code with a simple API for simplifying statements.  STMT is the
10125    statement we want to simplify, WITHIN_STMT provides the location
10126    for any overflow warnings.  */
10127 
10128 static tree
simplify_stmt_for_jump_threading(gimple * stmt,gimple * within_stmt,class avail_exprs_stack * avail_exprs_stack ATTRIBUTE_UNUSED)10129 simplify_stmt_for_jump_threading (gimple *stmt, gimple *within_stmt,
10130     class avail_exprs_stack *avail_exprs_stack ATTRIBUTE_UNUSED)
10131 {
10132   if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
10133     return vrp_evaluate_conditional (gimple_cond_code (cond_stmt),
10134 				     gimple_cond_lhs (cond_stmt),
10135 				     gimple_cond_rhs (cond_stmt),
10136 				     within_stmt);
10137 
10138   if (gassign *assign_stmt = dyn_cast <gassign *> (stmt))
10139     {
10140       value_range new_vr = VR_INITIALIZER;
10141       tree lhs = gimple_assign_lhs (assign_stmt);
10142 
10143       if (TREE_CODE (lhs) == SSA_NAME
10144 	  && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
10145 	      || POINTER_TYPE_P (TREE_TYPE (lhs))))
10146 	{
10147 	  extract_range_from_assignment (&new_vr, assign_stmt);
10148 	  if (range_int_cst_singleton_p (&new_vr))
10149 	    return new_vr.min;
10150 	}
10151     }
10152 
10153   return NULL_TREE;
10154 }
10155 
10156 /* Blocks which have more than one predecessor and more than
10157    one successor present jump threading opportunities, i.e.,
10158    when the block is reached from a specific predecessor, we
10159    may be able to determine which of the outgoing edges will
10160    be traversed.  When this optimization applies, we are able
10161    to avoid conditionals at runtime and we may expose secondary
10162    optimization opportunities.
10163 
10164    This routine is effectively a driver for the generic jump
10165    threading code.  It basically just presents the generic code
10166    with edges that may be suitable for jump threading.
10167 
10168    Unlike DOM, we do not iterate VRP if jump threading was successful.
10169    While iterating may expose new opportunities for VRP, it is expected
10170    those opportunities would be very limited and the compile time cost
10171    to expose those opportunities would be significant.
10172 
10173    As jump threading opportunities are discovered, they are registered
10174    for later realization.  */
10175 
10176 static void
identify_jump_threads(void)10177 identify_jump_threads (void)
10178 {
10179   basic_block bb;
10180   gcond *dummy;
10181   int i;
10182   edge e;
10183 
10184   /* Ugh.  When substituting values earlier in this pass we can
10185      wipe the dominance information.  So rebuild the dominator
10186      information as we need it within the jump threading code.  */
10187   calculate_dominance_info (CDI_DOMINATORS);
10188 
10189   /* We do not allow VRP information to be used for jump threading
10190      across a back edge in the CFG.  Otherwise it becomes too
10191      difficult to avoid eliminating loop exit tests.  Of course
10192      EDGE_DFS_BACK is not accurate at this time so we have to
10193      recompute it.  */
10194   mark_dfs_back_edges ();
10195 
10196   /* Do not thread across edges we are about to remove.  Just marking
10197      them as EDGE_IGNORE will do.  */
10198   FOR_EACH_VEC_ELT (to_remove_edges, i, e)
10199     e->flags |= EDGE_IGNORE;
10200 
10201   /* Allocate our unwinder stack to unwind any temporary equivalences
10202      that might be recorded.  */
10203   equiv_stack = new const_and_copies ();
10204 
10205   /* To avoid lots of silly node creation, we create a single
10206      conditional and just modify it in-place when attempting to
10207      thread jumps.  */
10208   dummy = gimple_build_cond (EQ_EXPR,
10209 			     integer_zero_node, integer_zero_node,
10210 			     NULL, NULL);
10211 
10212   /* Walk through all the blocks finding those which present a
10213      potential jump threading opportunity.  We could set this up
10214      as a dominator walker and record data during the walk, but
10215      I doubt it's worth the effort for the classes of jump
10216      threading opportunities we are trying to identify at this
10217      point in compilation.  */
10218   FOR_EACH_BB_FN (bb, cfun)
10219     {
10220       gimple *last;
10221 
10222       /* If the generic jump threading code does not find this block
10223 	 interesting, then there is nothing to do.  */
10224       if (! potentially_threadable_block (bb))
10225 	continue;
10226 
10227       last = last_stmt (bb);
10228 
10229       /* We're basically looking for a switch or any kind of conditional with
10230 	 integral or pointer type arguments.  Note the type of the second
10231 	 argument will be the same as the first argument, so no need to
10232 	 check it explicitly.
10233 
10234 	 We also handle the case where there are no statements in the
10235 	 block.  This come up with forwarder blocks that are not
10236 	 optimized away because they lead to a loop header.  But we do
10237 	 want to thread through them as we can sometimes thread to the
10238 	 loop exit which is obviously profitable.  */
10239       if (!last
10240 	  || gimple_code (last) == GIMPLE_SWITCH
10241 	  || (gimple_code (last) == GIMPLE_COND
10242       	      && TREE_CODE (gimple_cond_lhs (last)) == SSA_NAME
10243 	      && (INTEGRAL_TYPE_P (TREE_TYPE (gimple_cond_lhs (last)))
10244 		  || POINTER_TYPE_P (TREE_TYPE (gimple_cond_lhs (last))))
10245 	      && (TREE_CODE (gimple_cond_rhs (last)) == SSA_NAME
10246 		  || is_gimple_min_invariant (gimple_cond_rhs (last)))))
10247 	{
10248 	  edge_iterator ei;
10249 
10250 	  /* We've got a block with multiple predecessors and multiple
10251 	     successors which also ends in a suitable conditional or
10252 	     switch statement.  For each predecessor, see if we can thread
10253 	     it to a specific successor.  */
10254 	  FOR_EACH_EDGE (e, ei, bb->preds)
10255 	    {
10256 	      /* Do not thread across edges marked to ignoreor abnormal
10257 		 edges in the CFG.  */
10258 	      if (e->flags & (EDGE_IGNORE | EDGE_COMPLEX))
10259 		continue;
10260 
10261 	      thread_across_edge (dummy, e, true, equiv_stack, NULL,
10262 				  simplify_stmt_for_jump_threading);
10263 	    }
10264 	}
10265     }
10266 
10267   /* Clear EDGE_IGNORE.  */
10268   FOR_EACH_VEC_ELT (to_remove_edges, i, e)
10269     e->flags &= ~EDGE_IGNORE;
10270 
10271   /* We do not actually update the CFG or SSA graphs at this point as
10272      ASSERT_EXPRs are still in the IL and cfg cleanup code does not yet
10273      handle ASSERT_EXPRs gracefully.  */
10274 }
10275 
10276 /* We identified all the jump threading opportunities earlier, but could
10277    not transform the CFG at that time.  This routine transforms the
10278    CFG and arranges for the dominator tree to be rebuilt if necessary.
10279 
10280    Note the SSA graph update will occur during the normal TODO
10281    processing by the pass manager.  */
10282 static void
finalize_jump_threads(void)10283 finalize_jump_threads (void)
10284 {
10285   thread_through_all_blocks (false);
10286   delete equiv_stack;
10287 }
10288 
10289 
10290 /* Traverse all the blocks folding conditionals with known ranges.  */
10291 
10292 static void
vrp_finalize(bool warn_array_bounds_p)10293 vrp_finalize (bool warn_array_bounds_p)
10294 {
10295   size_t i;
10296 
10297   values_propagated = true;
10298 
10299   if (dump_file)
10300     {
10301       fprintf (dump_file, "\nValue ranges after VRP:\n\n");
10302       dump_all_value_ranges (dump_file);
10303       fprintf (dump_file, "\n");
10304     }
10305 
10306   /* Set value range to non pointer SSA_NAMEs.  */
10307   for (i  = 0; i < num_vr_values; i++)
10308     if (vr_value[i])
10309       {
10310 	tree name = ssa_name (i);
10311 
10312       if (!name
10313 	  || POINTER_TYPE_P (TREE_TYPE (name))
10314 	  || (vr_value[i]->type == VR_VARYING)
10315 	  || (vr_value[i]->type == VR_UNDEFINED))
10316 	continue;
10317 
10318       if ((TREE_CODE (vr_value[i]->min) == INTEGER_CST)
10319 	  && (TREE_CODE (vr_value[i]->max) == INTEGER_CST)
10320 	  && (vr_value[i]->type == VR_RANGE
10321 	      || vr_value[i]->type == VR_ANTI_RANGE))
10322 	set_range_info (name, vr_value[i]->type, vr_value[i]->min,
10323 			vr_value[i]->max);
10324       }
10325 
10326   substitute_and_fold (op_with_constant_singleton_value_range,
10327 		       vrp_fold_stmt, false);
10328 
10329   if (warn_array_bounds && warn_array_bounds_p)
10330     check_all_array_refs ();
10331 
10332   /* We must identify jump threading opportunities before we release
10333      the datastructures built by VRP.  */
10334   identify_jump_threads ();
10335 
10336   /* Free allocated memory.  */
10337   for (i = 0; i < num_vr_values; i++)
10338     if (vr_value[i])
10339       {
10340 	BITMAP_FREE (vr_value[i]->equiv);
10341 	free (vr_value[i]);
10342       }
10343 
10344   free (vr_value);
10345   free (vr_phi_edge_counts);
10346 
10347   /* So that we can distinguish between VRP data being available
10348      and not available.  */
10349   vr_value = NULL;
10350   vr_phi_edge_counts = NULL;
10351 }
10352 
10353 
10354 /* Main entry point to VRP (Value Range Propagation).  This pass is
10355    loosely based on J. R. C. Patterson, ``Accurate Static Branch
10356    Prediction by Value Range Propagation,'' in SIGPLAN Conference on
10357    Programming Language Design and Implementation, pp. 67-78, 1995.
10358    Also available at http://citeseer.ist.psu.edu/patterson95accurate.html
10359 
10360    This is essentially an SSA-CCP pass modified to deal with ranges
10361    instead of constants.
10362 
10363    While propagating ranges, we may find that two or more SSA name
10364    have equivalent, though distinct ranges.  For instance,
10365 
10366      1	x_9 = p_3->a;
10367      2	p_4 = ASSERT_EXPR <p_3, p_3 != 0>
10368      3	if (p_4 == q_2)
10369      4	  p_5 = ASSERT_EXPR <p_4, p_4 == q_2>;
10370      5	endif
10371      6	if (q_2)
10372 
10373    In the code above, pointer p_5 has range [q_2, q_2], but from the
10374    code we can also determine that p_5 cannot be NULL and, if q_2 had
10375    a non-varying range, p_5's range should also be compatible with it.
10376 
10377    These equivalences are created by two expressions: ASSERT_EXPR and
10378    copy operations.  Since p_5 is an assertion on p_4, and p_4 was the
10379    result of another assertion, then we can use the fact that p_5 and
10380    p_4 are equivalent when evaluating p_5's range.
10381 
10382    Together with value ranges, we also propagate these equivalences
10383    between names so that we can take advantage of information from
10384    multiple ranges when doing final replacement.  Note that this
10385    equivalency relation is transitive but not symmetric.
10386 
10387    In the example above, p_5 is equivalent to p_4, q_2 and p_3, but we
10388    cannot assert that q_2 is equivalent to p_5 because q_2 may be used
10389    in contexts where that assertion does not hold (e.g., in line 6).
10390 
10391    TODO, the main difference between this pass and Patterson's is that
10392    we do not propagate edge probabilities.  We only compute whether
10393    edges can be taken or not.  That is, instead of having a spectrum
10394    of jump probabilities between 0 and 1, we only deal with 0, 1 and
10395    DON'T KNOW.  In the future, it may be worthwhile to propagate
10396    probabilities to aid branch prediction.  */
10397 
10398 static unsigned int
execute_vrp(bool warn_array_bounds_p)10399 execute_vrp (bool warn_array_bounds_p)
10400 {
10401   int i;
10402   edge e;
10403   switch_update *su;
10404 
10405   loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
10406   rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
10407   scev_initialize ();
10408 
10409   /* ???  This ends up using stale EDGE_DFS_BACK for liveness computation.
10410      Inserting assertions may split edges which will invalidate
10411      EDGE_DFS_BACK.  */
10412   insert_range_assertions ();
10413 
10414   to_remove_edges.create (10);
10415   to_update_switch_stmts.create (5);
10416   threadedge_initialize_values ();
10417 
10418   /* For visiting PHI nodes we need EDGE_DFS_BACK computed.  */
10419   mark_dfs_back_edges ();
10420 
10421   vrp_initialize ();
10422   ssa_propagate (vrp_visit_stmt, vrp_visit_phi_node);
10423   vrp_finalize (warn_array_bounds_p);
10424 
10425   free_numbers_of_iterations_estimates (cfun);
10426 
10427   /* ASSERT_EXPRs must be removed before finalizing jump threads
10428      as finalizing jump threads calls the CFG cleanup code which
10429      does not properly handle ASSERT_EXPRs.  */
10430   remove_range_assertions ();
10431 
10432   /* If we exposed any new variables, go ahead and put them into
10433      SSA form now, before we handle jump threading.  This simplifies
10434      interactions between rewriting of _DECL nodes into SSA form
10435      and rewriting SSA_NAME nodes into SSA form after block
10436      duplication and CFG manipulation.  */
10437   update_ssa (TODO_update_ssa);
10438 
10439   finalize_jump_threads ();
10440 
10441   /* Remove dead edges from SWITCH_EXPR optimization.  This leaves the
10442      CFG in a broken state and requires a cfg_cleanup run.  */
10443   FOR_EACH_VEC_ELT (to_remove_edges, i, e)
10444     remove_edge (e);
10445   /* Update SWITCH_EXPR case label vector.  */
10446   FOR_EACH_VEC_ELT (to_update_switch_stmts, i, su)
10447     {
10448       size_t j;
10449       size_t n = TREE_VEC_LENGTH (su->vec);
10450       tree label;
10451       gimple_switch_set_num_labels (su->stmt, n);
10452       for (j = 0; j < n; j++)
10453 	gimple_switch_set_label (su->stmt, j, TREE_VEC_ELT (su->vec, j));
10454       /* As we may have replaced the default label with a regular one
10455 	 make sure to make it a real default label again.  This ensures
10456 	 optimal expansion.  */
10457       label = gimple_switch_label (su->stmt, 0);
10458       CASE_LOW (label) = NULL_TREE;
10459       CASE_HIGH (label) = NULL_TREE;
10460     }
10461 
10462   if (to_remove_edges.length () > 0)
10463     {
10464       free_dominance_info (CDI_DOMINATORS);
10465       loops_state_set (LOOPS_NEED_FIXUP);
10466     }
10467 
10468   to_remove_edges.release ();
10469   to_update_switch_stmts.release ();
10470   threadedge_finalize_values ();
10471 
10472   scev_finalize ();
10473   loop_optimizer_finalize ();
10474   return 0;
10475 }
10476 
10477 namespace {
10478 
10479 const pass_data pass_data_vrp =
10480 {
10481   GIMPLE_PASS, /* type */
10482   "vrp", /* name */
10483   OPTGROUP_NONE, /* optinfo_flags */
10484   TV_TREE_VRP, /* tv_id */
10485   PROP_ssa, /* properties_required */
10486   0, /* properties_provided */
10487   0, /* properties_destroyed */
10488   0, /* todo_flags_start */
10489   ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */
10490 };
10491 
10492 class pass_vrp : public gimple_opt_pass
10493 {
10494 public:
pass_vrp(gcc::context * ctxt)10495   pass_vrp (gcc::context *ctxt)
10496     : gimple_opt_pass (pass_data_vrp, ctxt), warn_array_bounds_p (false)
10497   {}
10498 
10499   /* opt_pass methods: */
clone()10500   opt_pass * clone () { return new pass_vrp (m_ctxt); }
set_pass_param(unsigned int n,bool param)10501   void set_pass_param (unsigned int n, bool param)
10502     {
10503       gcc_assert (n == 0);
10504       warn_array_bounds_p = param;
10505     }
gate(function *)10506   virtual bool gate (function *) { return flag_tree_vrp != 0; }
execute(function *)10507   virtual unsigned int execute (function *)
10508     { return execute_vrp (warn_array_bounds_p); }
10509 
10510  private:
10511   bool warn_array_bounds_p;
10512 }; // class pass_vrp
10513 
10514 } // anon namespace
10515 
10516 gimple_opt_pass *
make_pass_vrp(gcc::context * ctxt)10517 make_pass_vrp (gcc::context *ctxt)
10518 {
10519   return new pass_vrp (ctxt);
10520 }
10521