xref: /dragonfly/contrib/gcc-8.0/gcc/tree-vrp.c (revision 58e805e6)
1 /* Support routines for Value Range Propagation (VRP).
2    Copyright (C) 2005-2018 Free Software Foundation, Inc.
3    Contributed by Diego Novillo <dnovillo@redhat.com>.
4 
5 This file is part of GCC.
6 
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11 
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15 GNU General Public License for more details.
16 
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3.  If not see
19 <http://www.gnu.org/licenses/>.  */
20 
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "insn-codes.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "gimple.h"
29 #include "cfghooks.h"
30 #include "tree-pass.h"
31 #include "ssa.h"
32 #include "optabs-tree.h"
33 #include "gimple-pretty-print.h"
34 #include "diagnostic-core.h"
35 #include "flags.h"
36 #include "fold-const.h"
37 #include "stor-layout.h"
38 #include "calls.h"
39 #include "cfganal.h"
40 #include "gimple-fold.h"
41 #include "tree-eh.h"
42 #include "gimple-iterator.h"
43 #include "gimple-walk.h"
44 #include "tree-cfg.h"
45 #include "tree-dfa.h"
46 #include "tree-ssa-loop-manip.h"
47 #include "tree-ssa-loop-niter.h"
48 #include "tree-ssa-loop.h"
49 #include "tree-into-ssa.h"
50 #include "tree-ssa.h"
51 #include "intl.h"
52 #include "cfgloop.h"
53 #include "tree-scalar-evolution.h"
54 #include "tree-ssa-propagate.h"
55 #include "tree-chrec.h"
56 #include "tree-ssa-threadupdate.h"
57 #include "tree-ssa-scopedtables.h"
58 #include "tree-ssa-threadedge.h"
59 #include "omp-general.h"
60 #include "target.h"
61 #include "case-cfn-macros.h"
62 #include "params.h"
63 #include "alloc-pool.h"
64 #include "domwalk.h"
65 #include "tree-cfgcleanup.h"
66 #include "stringpool.h"
67 #include "attribs.h"
68 #include "vr-values.h"
69 #include "builtins.h"
70 
71 /* Set of SSA names found live during the RPO traversal of the function
72    for still active basic-blocks.  */
73 static sbitmap *live;
74 
75 /* Return true if the SSA name NAME is live on the edge E.  */
76 
77 static bool
live_on_edge(edge e,tree name)78 live_on_edge (edge e, tree name)
79 {
80   return (live[e->dest->index]
81 	  && bitmap_bit_p (live[e->dest->index], SSA_NAME_VERSION (name)));
82 }
83 
84 /* Location information for ASSERT_EXPRs.  Each instance of this
85    structure describes an ASSERT_EXPR for an SSA name.  Since a single
86    SSA name may have more than one assertion associated with it, these
87    locations are kept in a linked list attached to the corresponding
88    SSA name.  */
89 struct assert_locus
90 {
91   /* Basic block where the assertion would be inserted.  */
92   basic_block bb;
93 
94   /* Some assertions need to be inserted on an edge (e.g., assertions
95      generated by COND_EXPRs).  In those cases, BB will be NULL.  */
96   edge e;
97 
98   /* Pointer to the statement that generated this assertion.  */
99   gimple_stmt_iterator si;
100 
101   /* Predicate code for the ASSERT_EXPR.  Must be COMPARISON_CLASS_P.  */
102   enum tree_code comp_code;
103 
104   /* Value being compared against.  */
105   tree val;
106 
107   /* Expression to compare.  */
108   tree expr;
109 
110   /* Next node in the linked list.  */
111   assert_locus *next;
112 };
113 
114 /* If bit I is present, it means that SSA name N_i has a list of
115    assertions that should be inserted in the IL.  */
116 static bitmap need_assert_for;
117 
118 /* Array of locations lists where to insert assertions.  ASSERTS_FOR[I]
119    holds a list of ASSERT_LOCUS_T nodes that describe where
120    ASSERT_EXPRs for SSA name N_I should be inserted.  */
121 static assert_locus **asserts_for;
122 
123 vec<edge> to_remove_edges;
124 vec<switch_update> to_update_switch_stmts;
125 
126 
127 /* Return the maximum value for TYPE.  */
128 
129 tree
vrp_val_max(const_tree type)130 vrp_val_max (const_tree type)
131 {
132   if (!INTEGRAL_TYPE_P (type))
133     return NULL_TREE;
134 
135   return TYPE_MAX_VALUE (type);
136 }
137 
138 /* Return the minimum value for TYPE.  */
139 
140 tree
vrp_val_min(const_tree type)141 vrp_val_min (const_tree type)
142 {
143   if (!INTEGRAL_TYPE_P (type))
144     return NULL_TREE;
145 
146   return TYPE_MIN_VALUE (type);
147 }
148 
149 /* Return whether VAL is equal to the maximum value of its type.
150    We can't do a simple equality comparison with TYPE_MAX_VALUE because
151    C typedefs and Ada subtypes can produce types whose TYPE_MAX_VALUE
152    is not == to the integer constant with the same value in the type.  */
153 
154 bool
vrp_val_is_max(const_tree val)155 vrp_val_is_max (const_tree val)
156 {
157   tree type_max = vrp_val_max (TREE_TYPE (val));
158   return (val == type_max
159 	  || (type_max != NULL_TREE
160 	      && operand_equal_p (val, type_max, 0)));
161 }
162 
163 /* Return whether VAL is equal to the minimum value of its type.  */
164 
165 bool
vrp_val_is_min(const_tree val)166 vrp_val_is_min (const_tree val)
167 {
168   tree type_min = vrp_val_min (TREE_TYPE (val));
169   return (val == type_min
170 	  || (type_min != NULL_TREE
171 	      && operand_equal_p (val, type_min, 0)));
172 }
173 
174 /* VR_TYPE describes a range with mininum value *MIN and maximum
175    value *MAX.  Restrict the range to the set of values that have
176    no bits set outside NONZERO_BITS.  Update *MIN and *MAX and
177    return the new range type.
178 
179    SGN gives the sign of the values described by the range.  */
180 
181 enum value_range_type
intersect_range_with_nonzero_bits(enum value_range_type vr_type,wide_int * min,wide_int * max,const wide_int & nonzero_bits,signop sgn)182 intersect_range_with_nonzero_bits (enum value_range_type vr_type,
183 				   wide_int *min, wide_int *max,
184 				   const wide_int &nonzero_bits,
185 				   signop sgn)
186 {
187   if (vr_type == VR_ANTI_RANGE)
188     {
189       /* The VR_ANTI_RANGE is equivalent to the union of the ranges
190 	 A: [-INF, *MIN) and B: (*MAX, +INF].  First use NONZERO_BITS
191 	 to create an inclusive upper bound for A and an inclusive lower
192 	 bound for B.  */
193       wide_int a_max = wi::round_down_for_mask (*min - 1, nonzero_bits);
194       wide_int b_min = wi::round_up_for_mask (*max + 1, nonzero_bits);
195 
196       /* If the calculation of A_MAX wrapped, A is effectively empty
197 	 and A_MAX is the highest value that satisfies NONZERO_BITS.
198 	 Likewise if the calculation of B_MIN wrapped, B is effectively
199 	 empty and B_MIN is the lowest value that satisfies NONZERO_BITS.  */
200       bool a_empty = wi::ge_p (a_max, *min, sgn);
201       bool b_empty = wi::le_p (b_min, *max, sgn);
202 
203       /* If both A and B are empty, there are no valid values.  */
204       if (a_empty && b_empty)
205 	return VR_UNDEFINED;
206 
207       /* If exactly one of A or B is empty, return a VR_RANGE for the
208 	 other one.  */
209       if (a_empty || b_empty)
210 	{
211 	  *min = b_min;
212 	  *max = a_max;
213 	  gcc_checking_assert (wi::le_p (*min, *max, sgn));
214 	  return VR_RANGE;
215 	}
216 
217       /* Update the VR_ANTI_RANGE bounds.  */
218       *min = a_max + 1;
219       *max = b_min - 1;
220       gcc_checking_assert (wi::le_p (*min, *max, sgn));
221 
222       /* Now check whether the excluded range includes any values that
223 	 satisfy NONZERO_BITS.  If not, switch to a full VR_RANGE.  */
224       if (wi::round_up_for_mask (*min, nonzero_bits) == b_min)
225 	{
226 	  unsigned int precision = min->get_precision ();
227 	  *min = wi::min_value (precision, sgn);
228 	  *max = wi::max_value (precision, sgn);
229 	  vr_type = VR_RANGE;
230 	}
231     }
232   if (vr_type == VR_RANGE)
233     {
234       *max = wi::round_down_for_mask (*max, nonzero_bits);
235 
236       /* Check that the range contains at least one valid value.  */
237       if (wi::gt_p (*min, *max, sgn))
238 	return VR_UNDEFINED;
239 
240       *min = wi::round_up_for_mask (*min, nonzero_bits);
241       gcc_checking_assert (wi::le_p (*min, *max, sgn));
242     }
243   return vr_type;
244 }
245 
246 /* Set value range VR to VR_UNDEFINED.  */
247 
248 static inline void
set_value_range_to_undefined(value_range * vr)249 set_value_range_to_undefined (value_range *vr)
250 {
251   vr->type = VR_UNDEFINED;
252   vr->min = vr->max = NULL_TREE;
253   if (vr->equiv)
254     bitmap_clear (vr->equiv);
255 }
256 
257 /* Set value range VR to VR_VARYING.  */
258 
259 void
set_value_range_to_varying(value_range * vr)260 set_value_range_to_varying (value_range *vr)
261 {
262   vr->type = VR_VARYING;
263   vr->min = vr->max = NULL_TREE;
264   if (vr->equiv)
265     bitmap_clear (vr->equiv);
266 }
267 
268 /* Set value range VR to {T, MIN, MAX, EQUIV}.  */
269 
270 void
set_value_range(value_range * vr,enum value_range_type t,tree min,tree max,bitmap equiv)271 set_value_range (value_range *vr, enum value_range_type t, tree min,
272 		 tree max, bitmap equiv)
273 {
274   /* Check the validity of the range.  */
275   if (flag_checking
276       && (t == VR_RANGE || t == VR_ANTI_RANGE))
277     {
278       int cmp;
279 
280       gcc_assert (min && max);
281 
282       gcc_assert (!TREE_OVERFLOW_P (min) && !TREE_OVERFLOW_P (max));
283 
284       if (INTEGRAL_TYPE_P (TREE_TYPE (min)) && t == VR_ANTI_RANGE)
285 	gcc_assert (!vrp_val_is_min (min) || !vrp_val_is_max (max));
286 
287       cmp = compare_values (min, max);
288       gcc_assert (cmp == 0 || cmp == -1 || cmp == -2);
289     }
290 
291   if (flag_checking
292       && (t == VR_UNDEFINED || t == VR_VARYING))
293     {
294       gcc_assert (min == NULL_TREE && max == NULL_TREE);
295       gcc_assert (equiv == NULL || bitmap_empty_p (equiv));
296     }
297 
298   vr->type = t;
299   vr->min = min;
300   vr->max = max;
301 
302   /* Since updating the equivalence set involves deep copying the
303      bitmaps, only do it if absolutely necessary.
304 
305      All equivalence bitmaps are allocated from the same obstack.  So
306      we can use the obstack associated with EQUIV to allocate vr->equiv.  */
307   if (vr->equiv == NULL
308       && equiv != NULL)
309     vr->equiv = BITMAP_ALLOC (equiv->obstack);
310 
311   if (equiv != vr->equiv)
312     {
313       if (equiv && !bitmap_empty_p (equiv))
314 	bitmap_copy (vr->equiv, equiv);
315       else
316 	bitmap_clear (vr->equiv);
317     }
318 }
319 
320 
321 /* Set value range VR to the canonical form of {T, MIN, MAX, EQUIV}.
322    This means adjusting T, MIN and MAX representing the case of a
323    wrapping range with MAX < MIN covering [MIN, type_max] U [type_min, MAX]
324    as anti-rage ~[MAX+1, MIN-1].  Likewise for wrapping anti-ranges.
325    In corner cases where MAX+1 or MIN-1 wraps this will fall back
326    to varying.
327    This routine exists to ease canonicalization in the case where we
328    extract ranges from var + CST op limit.  */
329 
330 void
set_and_canonicalize_value_range(value_range * vr,enum value_range_type t,tree min,tree max,bitmap equiv)331 set_and_canonicalize_value_range (value_range *vr, enum value_range_type t,
332 				  tree min, tree max, bitmap equiv)
333 {
334   /* Use the canonical setters for VR_UNDEFINED and VR_VARYING.  */
335   if (t == VR_UNDEFINED)
336     {
337       set_value_range_to_undefined (vr);
338       return;
339     }
340   else if (t == VR_VARYING)
341     {
342       set_value_range_to_varying (vr);
343       return;
344     }
345 
346   /* Nothing to canonicalize for symbolic ranges.  */
347   if (TREE_CODE (min) != INTEGER_CST
348       || TREE_CODE (max) != INTEGER_CST)
349     {
350       set_value_range (vr, t, min, max, equiv);
351       return;
352     }
353 
354   /* Wrong order for min and max, to swap them and the VR type we need
355      to adjust them.  */
356   if (tree_int_cst_lt (max, min))
357     {
358       tree one, tmp;
359 
360       /* For one bit precision if max < min, then the swapped
361 	 range covers all values, so for VR_RANGE it is varying and
362 	 for VR_ANTI_RANGE empty range, so drop to varying as well.  */
363       if (TYPE_PRECISION (TREE_TYPE (min)) == 1)
364 	{
365 	  set_value_range_to_varying (vr);
366 	  return;
367 	}
368 
369       one = build_int_cst (TREE_TYPE (min), 1);
370       tmp = int_const_binop (PLUS_EXPR, max, one);
371       max = int_const_binop (MINUS_EXPR, min, one);
372       min = tmp;
373 
374       /* There's one corner case, if we had [C+1, C] before we now have
375 	 that again.  But this represents an empty value range, so drop
376 	 to varying in this case.  */
377       if (tree_int_cst_lt (max, min))
378 	{
379 	  set_value_range_to_varying (vr);
380 	  return;
381 	}
382 
383       t = t == VR_RANGE ? VR_ANTI_RANGE : VR_RANGE;
384     }
385 
386   /* Anti-ranges that can be represented as ranges should be so.  */
387   if (t == VR_ANTI_RANGE)
388     {
389       /* For -fstrict-enums we may receive out-of-range ranges so consider
390          values < -INF and values > INF as -INF/INF as well.  */
391       tree type = TREE_TYPE (min);
392       bool is_min = (INTEGRAL_TYPE_P (type)
393 		     && tree_int_cst_compare (min, TYPE_MIN_VALUE (type)) <= 0);
394       bool is_max = (INTEGRAL_TYPE_P (type)
395 		     && tree_int_cst_compare (max, TYPE_MAX_VALUE (type)) >= 0);
396 
397       if (is_min && is_max)
398 	{
399 	  /* We cannot deal with empty ranges, drop to varying.
400 	     ???  This could be VR_UNDEFINED instead.  */
401 	  set_value_range_to_varying (vr);
402 	  return;
403 	}
404       else if (TYPE_PRECISION (TREE_TYPE (min)) == 1
405 	       && (is_min || is_max))
406 	{
407 	  /* Non-empty boolean ranges can always be represented
408 	     as a singleton range.  */
409 	  if (is_min)
410 	    min = max = vrp_val_max (TREE_TYPE (min));
411 	  else
412 	    min = max = vrp_val_min (TREE_TYPE (min));
413 	  t = VR_RANGE;
414 	}
415       else if (is_min
416 	       /* As a special exception preserve non-null ranges.  */
417 	       && !(TYPE_UNSIGNED (TREE_TYPE (min))
418 		    && integer_zerop (max)))
419         {
420 	  tree one = build_int_cst (TREE_TYPE (max), 1);
421 	  min = int_const_binop (PLUS_EXPR, max, one);
422 	  max = vrp_val_max (TREE_TYPE (max));
423 	  t = VR_RANGE;
424         }
425       else if (is_max)
426         {
427 	  tree one = build_int_cst (TREE_TYPE (min), 1);
428 	  max = int_const_binop (MINUS_EXPR, min, one);
429 	  min = vrp_val_min (TREE_TYPE (min));
430 	  t = VR_RANGE;
431         }
432     }
433 
434   /* Do not drop [-INF(OVF), +INF(OVF)] to varying.  (OVF) has to be sticky
435      to make sure VRP iteration terminates, otherwise we can get into
436      oscillations.  */
437 
438   set_value_range (vr, t, min, max, equiv);
439 }
440 
441 /* Copy value range FROM into value range TO.  */
442 
443 void
copy_value_range(value_range * to,value_range * from)444 copy_value_range (value_range *to, value_range *from)
445 {
446   set_value_range (to, from->type, from->min, from->max, from->equiv);
447 }
448 
449 /* Set value range VR to a single value.  This function is only called
450    with values we get from statements, and exists to clear the
451    TREE_OVERFLOW flag.  */
452 
453 void
set_value_range_to_value(value_range * vr,tree val,bitmap equiv)454 set_value_range_to_value (value_range *vr, tree val, bitmap equiv)
455 {
456   gcc_assert (is_gimple_min_invariant (val));
457   if (TREE_OVERFLOW_P (val))
458     val = drop_tree_overflow (val);
459   set_value_range (vr, VR_RANGE, val, val, equiv);
460 }
461 
462 /* Set value range VR to a non-NULL range of type TYPE.  */
463 
464 void
set_value_range_to_nonnull(value_range * vr,tree type)465 set_value_range_to_nonnull (value_range *vr, tree type)
466 {
467   tree zero = build_int_cst (type, 0);
468   set_value_range (vr, VR_ANTI_RANGE, zero, zero, vr->equiv);
469 }
470 
471 
472 /* Set value range VR to a NULL range of type TYPE.  */
473 
474 void
set_value_range_to_null(value_range * vr,tree type)475 set_value_range_to_null (value_range *vr, tree type)
476 {
477   set_value_range_to_value (vr, build_int_cst (type, 0), vr->equiv);
478 }
479 
480 
481 /* If abs (min) < abs (max), set VR to [-max, max], if
482    abs (min) >= abs (max), set VR to [-min, min].  */
483 
484 static void
abs_extent_range(value_range * vr,tree min,tree max)485 abs_extent_range (value_range *vr, tree min, tree max)
486 {
487   int cmp;
488 
489   gcc_assert (TREE_CODE (min) == INTEGER_CST);
490   gcc_assert (TREE_CODE (max) == INTEGER_CST);
491   gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (min)));
492   gcc_assert (!TYPE_UNSIGNED (TREE_TYPE (min)));
493   min = fold_unary (ABS_EXPR, TREE_TYPE (min), min);
494   max = fold_unary (ABS_EXPR, TREE_TYPE (max), max);
495   if (TREE_OVERFLOW (min) || TREE_OVERFLOW (max))
496     {
497       set_value_range_to_varying (vr);
498       return;
499     }
500   cmp = compare_values (min, max);
501   if (cmp == -1)
502     min = fold_unary (NEGATE_EXPR, TREE_TYPE (min), max);
503   else if (cmp == 0 || cmp == 1)
504     {
505       max = min;
506       min = fold_unary (NEGATE_EXPR, TREE_TYPE (min), min);
507     }
508   else
509     {
510       set_value_range_to_varying (vr);
511       return;
512     }
513   set_and_canonicalize_value_range (vr, VR_RANGE, min, max, NULL);
514 }
515 
516 /* Return true, if VAL1 and VAL2 are equal values for VRP purposes.  */
517 
518 bool
vrp_operand_equal_p(const_tree val1,const_tree val2)519 vrp_operand_equal_p (const_tree val1, const_tree val2)
520 {
521   if (val1 == val2)
522     return true;
523   if (!val1 || !val2 || !operand_equal_p (val1, val2, 0))
524     return false;
525   return true;
526 }
527 
528 /* Return true, if the bitmaps B1 and B2 are equal.  */
529 
530 bool
vrp_bitmap_equal_p(const_bitmap b1,const_bitmap b2)531 vrp_bitmap_equal_p (const_bitmap b1, const_bitmap b2)
532 {
533   return (b1 == b2
534 	  || ((!b1 || bitmap_empty_p (b1))
535 	      && (!b2 || bitmap_empty_p (b2)))
536 	  || (b1 && b2
537 	      && bitmap_equal_p (b1, b2)));
538 }
539 
540 /* Return true if VR is ~[0, 0].  */
541 
542 bool
range_is_nonnull(value_range * vr)543 range_is_nonnull (value_range *vr)
544 {
545   return vr->type == VR_ANTI_RANGE
546 	 && integer_zerop (vr->min)
547 	 && integer_zerop (vr->max);
548 }
549 
550 
551 /* Return true if VR is [0, 0].  */
552 
553 static inline bool
range_is_null(value_range * vr)554 range_is_null (value_range *vr)
555 {
556   return vr->type == VR_RANGE
557 	 && integer_zerop (vr->min)
558 	 && integer_zerop (vr->max);
559 }
560 
561 /* Return true if max and min of VR are INTEGER_CST.  It's not necessary
562    a singleton.  */
563 
564 bool
range_int_cst_p(value_range * vr)565 range_int_cst_p (value_range *vr)
566 {
567   return (vr->type == VR_RANGE
568 	  && TREE_CODE (vr->max) == INTEGER_CST
569 	  && TREE_CODE (vr->min) == INTEGER_CST);
570 }
571 
572 /* Return true if VR is a INTEGER_CST singleton.  */
573 
574 bool
range_int_cst_singleton_p(value_range * vr)575 range_int_cst_singleton_p (value_range *vr)
576 {
577   return (range_int_cst_p (vr)
578 	  && tree_int_cst_equal (vr->min, vr->max));
579 }
580 
581 /* Return true if value range VR involves at least one symbol.  */
582 
583 bool
symbolic_range_p(value_range * vr)584 symbolic_range_p (value_range *vr)
585 {
586   return (!is_gimple_min_invariant (vr->min)
587           || !is_gimple_min_invariant (vr->max));
588 }
589 
590 /* Return the single symbol (an SSA_NAME) contained in T if any, or NULL_TREE
591    otherwise.  We only handle additive operations and set NEG to true if the
592    symbol is negated and INV to the invariant part, if any.  */
593 
594 tree
get_single_symbol(tree t,bool * neg,tree * inv)595 get_single_symbol (tree t, bool *neg, tree *inv)
596 {
597   bool neg_;
598   tree inv_;
599 
600   *inv = NULL_TREE;
601   *neg = false;
602 
603   if (TREE_CODE (t) == PLUS_EXPR
604       || TREE_CODE (t) == POINTER_PLUS_EXPR
605       || TREE_CODE (t) == MINUS_EXPR)
606     {
607       if (is_gimple_min_invariant (TREE_OPERAND (t, 0)))
608 	{
609 	  neg_ = (TREE_CODE (t) == MINUS_EXPR);
610 	  inv_ = TREE_OPERAND (t, 0);
611 	  t = TREE_OPERAND (t, 1);
612 	}
613       else if (is_gimple_min_invariant (TREE_OPERAND (t, 1)))
614 	{
615 	  neg_ = false;
616 	  inv_ = TREE_OPERAND (t, 1);
617 	  t = TREE_OPERAND (t, 0);
618 	}
619       else
620         return NULL_TREE;
621     }
622   else
623     {
624       neg_ = false;
625       inv_ = NULL_TREE;
626     }
627 
628   if (TREE_CODE (t) == NEGATE_EXPR)
629     {
630       t = TREE_OPERAND (t, 0);
631       neg_ = !neg_;
632     }
633 
634   if (TREE_CODE (t) != SSA_NAME)
635     return NULL_TREE;
636 
637   if (inv_ && TREE_OVERFLOW_P (inv_))
638     inv_ = drop_tree_overflow (inv_);
639 
640   *neg = neg_;
641   *inv = inv_;
642   return t;
643 }
644 
645 /* The reverse operation: build a symbolic expression with TYPE
646    from symbol SYM, negated according to NEG, and invariant INV.  */
647 
648 static tree
build_symbolic_expr(tree type,tree sym,bool neg,tree inv)649 build_symbolic_expr (tree type, tree sym, bool neg, tree inv)
650 {
651   const bool pointer_p = POINTER_TYPE_P (type);
652   tree t = sym;
653 
654   if (neg)
655     t = build1 (NEGATE_EXPR, type, t);
656 
657   if (integer_zerop (inv))
658     return t;
659 
660   return build2 (pointer_p ? POINTER_PLUS_EXPR : PLUS_EXPR, type, t, inv);
661 }
662 
663 /* Return
664    1 if VAL < VAL2
665    0 if !(VAL < VAL2)
666    -2 if those are incomparable.  */
667 int
operand_less_p(tree val,tree val2)668 operand_less_p (tree val, tree val2)
669 {
670   /* LT is folded faster than GE and others.  Inline the common case.  */
671   if (TREE_CODE (val) == INTEGER_CST && TREE_CODE (val2) == INTEGER_CST)
672     return tree_int_cst_lt (val, val2);
673   else
674     {
675       tree tcmp;
676 
677       fold_defer_overflow_warnings ();
678 
679       tcmp = fold_binary_to_constant (LT_EXPR, boolean_type_node, val, val2);
680 
681       fold_undefer_and_ignore_overflow_warnings ();
682 
683       if (!tcmp
684 	  || TREE_CODE (tcmp) != INTEGER_CST)
685 	return -2;
686 
687       if (!integer_zerop (tcmp))
688 	return 1;
689     }
690 
691   return 0;
692 }
693 
694 /* Compare two values VAL1 and VAL2.  Return
695 
696    	-2 if VAL1 and VAL2 cannot be compared at compile-time,
697    	-1 if VAL1 < VAL2,
698    	 0 if VAL1 == VAL2,
699 	+1 if VAL1 > VAL2, and
700 	+2 if VAL1 != VAL2
701 
702    This is similar to tree_int_cst_compare but supports pointer values
703    and values that cannot be compared at compile time.
704 
705    If STRICT_OVERFLOW_P is not NULL, then set *STRICT_OVERFLOW_P to
706    true if the return value is only valid if we assume that signed
707    overflow is undefined.  */
708 
709 int
compare_values_warnv(tree val1,tree val2,bool * strict_overflow_p)710 compare_values_warnv (tree val1, tree val2, bool *strict_overflow_p)
711 {
712   if (val1 == val2)
713     return 0;
714 
715   /* Below we rely on the fact that VAL1 and VAL2 are both pointers or
716      both integers.  */
717   gcc_assert (POINTER_TYPE_P (TREE_TYPE (val1))
718 	      == POINTER_TYPE_P (TREE_TYPE (val2)));
719 
720   /* Convert the two values into the same type.  This is needed because
721      sizetype causes sign extension even for unsigned types.  */
722   val2 = fold_convert (TREE_TYPE (val1), val2);
723   STRIP_USELESS_TYPE_CONVERSION (val2);
724 
725   const bool overflow_undefined
726     = INTEGRAL_TYPE_P (TREE_TYPE (val1))
727       && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (val1));
728   tree inv1, inv2;
729   bool neg1, neg2;
730   tree sym1 = get_single_symbol (val1, &neg1, &inv1);
731   tree sym2 = get_single_symbol (val2, &neg2, &inv2);
732 
733   /* If VAL1 and VAL2 are of the form '[-]NAME [+ CST]', return -1 or +1
734      accordingly.  If VAL1 and VAL2 don't use the same name, return -2.  */
735   if (sym1 && sym2)
736     {
737       /* Both values must use the same name with the same sign.  */
738       if (sym1 != sym2 || neg1 != neg2)
739 	return -2;
740 
741       /* [-]NAME + CST == [-]NAME + CST.  */
742       if (inv1 == inv2)
743 	return 0;
744 
745       /* If overflow is defined we cannot simplify more.  */
746       if (!overflow_undefined)
747 	return -2;
748 
749       if (strict_overflow_p != NULL
750 	  /* Symbolic range building sets TREE_NO_WARNING to declare
751 	     that overflow doesn't happen.  */
752 	  && (!inv1 || !TREE_NO_WARNING (val1))
753 	  && (!inv2 || !TREE_NO_WARNING (val2)))
754 	*strict_overflow_p = true;
755 
756       if (!inv1)
757 	inv1 = build_int_cst (TREE_TYPE (val1), 0);
758       if (!inv2)
759 	inv2 = build_int_cst (TREE_TYPE (val2), 0);
760 
761       return wi::cmp (wi::to_wide (inv1), wi::to_wide (inv2),
762 		      TYPE_SIGN (TREE_TYPE (val1)));
763     }
764 
765   const bool cst1 = is_gimple_min_invariant (val1);
766   const bool cst2 = is_gimple_min_invariant (val2);
767 
768   /* If one is of the form '[-]NAME + CST' and the other is constant, then
769      it might be possible to say something depending on the constants.  */
770   if ((sym1 && inv1 && cst2) || (sym2 && inv2 && cst1))
771     {
772       if (!overflow_undefined)
773 	return -2;
774 
775       if (strict_overflow_p != NULL
776 	  /* Symbolic range building sets TREE_NO_WARNING to declare
777 	     that overflow doesn't happen.  */
778 	  && (!sym1 || !TREE_NO_WARNING (val1))
779 	  && (!sym2 || !TREE_NO_WARNING (val2)))
780 	*strict_overflow_p = true;
781 
782       const signop sgn = TYPE_SIGN (TREE_TYPE (val1));
783       tree cst = cst1 ? val1 : val2;
784       tree inv = cst1 ? inv2 : inv1;
785 
786       /* Compute the difference between the constants.  If it overflows or
787 	 underflows, this means that we can trivially compare the NAME with
788 	 it and, consequently, the two values with each other.  */
789       wide_int diff = wi::to_wide (cst) - wi::to_wide (inv);
790       if (wi::cmp (0, wi::to_wide (inv), sgn)
791 	  != wi::cmp (diff, wi::to_wide (cst), sgn))
792 	{
793 	  const int res = wi::cmp (wi::to_wide (cst), wi::to_wide (inv), sgn);
794 	  return cst1 ? res : -res;
795 	}
796 
797       return -2;
798     }
799 
800   /* We cannot say anything more for non-constants.  */
801   if (!cst1 || !cst2)
802     return -2;
803 
804   if (!POINTER_TYPE_P (TREE_TYPE (val1)))
805     {
806       /* We cannot compare overflowed values.  */
807       if (TREE_OVERFLOW (val1) || TREE_OVERFLOW (val2))
808 	return -2;
809 
810       if (TREE_CODE (val1) == INTEGER_CST
811 	  && TREE_CODE (val2) == INTEGER_CST)
812 	return tree_int_cst_compare (val1, val2);
813 
814       if (poly_int_tree_p (val1) && poly_int_tree_p (val2))
815 	{
816 	  if (known_eq (wi::to_poly_widest (val1),
817 			wi::to_poly_widest (val2)))
818 	    return 0;
819 	  if (known_lt (wi::to_poly_widest (val1),
820 			wi::to_poly_widest (val2)))
821 	    return -1;
822 	  if (known_gt (wi::to_poly_widest (val1),
823 			wi::to_poly_widest (val2)))
824 	    return 1;
825 	}
826 
827       return -2;
828     }
829   else
830     {
831       tree t;
832 
833       /* First see if VAL1 and VAL2 are not the same.  */
834       if (val1 == val2 || operand_equal_p (val1, val2, 0))
835 	return 0;
836 
837       /* If VAL1 is a lower address than VAL2, return -1.  */
838       if (operand_less_p (val1, val2) == 1)
839 	return -1;
840 
841       /* If VAL1 is a higher address than VAL2, return +1.  */
842       if (operand_less_p (val2, val1) == 1)
843 	return 1;
844 
845       /* If VAL1 is different than VAL2, return +2.
846 	 For integer constants we either have already returned -1 or 1
847 	 or they are equivalent.  We still might succeed in proving
848 	 something about non-trivial operands.  */
849       if (TREE_CODE (val1) != INTEGER_CST
850 	  || TREE_CODE (val2) != INTEGER_CST)
851 	{
852           t = fold_binary_to_constant (NE_EXPR, boolean_type_node, val1, val2);
853 	  if (t && integer_onep (t))
854 	    return 2;
855 	}
856 
857       return -2;
858     }
859 }
860 
861 /* Compare values like compare_values_warnv.  */
862 
863 int
compare_values(tree val1,tree val2)864 compare_values (tree val1, tree val2)
865 {
866   bool sop;
867   return compare_values_warnv (val1, val2, &sop);
868 }
869 
870 
871 /* Return 1 if VAL is inside value range MIN <= VAL <= MAX,
872           0 if VAL is not inside [MIN, MAX],
873 	 -2 if we cannot tell either way.
874 
875    Benchmark compile/20001226-1.c compilation time after changing this
876    function.  */
877 
878 int
value_inside_range(tree val,tree min,tree max)879 value_inside_range (tree val, tree min, tree max)
880 {
881   int cmp1, cmp2;
882 
883   cmp1 = operand_less_p (val, min);
884   if (cmp1 == -2)
885     return -2;
886   if (cmp1 == 1)
887     return 0;
888 
889   cmp2 = operand_less_p (max, val);
890   if (cmp2 == -2)
891     return -2;
892 
893   return !cmp2;
894 }
895 
896 
897 /* Return true if value ranges VR0 and VR1 have a non-empty
898    intersection.
899 
900    Benchmark compile/20001226-1.c compilation time after changing this
901    function.
902    */
903 
904 static inline bool
value_ranges_intersect_p(value_range * vr0,value_range * vr1)905 value_ranges_intersect_p (value_range *vr0, value_range *vr1)
906 {
907   /* The value ranges do not intersect if the maximum of the first range is
908      less than the minimum of the second range or vice versa.
909      When those relations are unknown, we can't do any better.  */
910   if (operand_less_p (vr0->max, vr1->min) != 0)
911     return false;
912   if (operand_less_p (vr1->max, vr0->min) != 0)
913     return false;
914   return true;
915 }
916 
917 
918 /* Return 1 if [MIN, MAX] includes the value zero, 0 if it does not
919    include the value zero, -2 if we cannot tell.  */
920 
921 int
range_includes_zero_p(tree min,tree max)922 range_includes_zero_p (tree min, tree max)
923 {
924   tree zero = build_int_cst (TREE_TYPE (min), 0);
925   return value_inside_range (zero, min, max);
926 }
927 
928 /* Return true if *VR is know to only contain nonnegative values.  */
929 
930 static inline bool
value_range_nonnegative_p(value_range * vr)931 value_range_nonnegative_p (value_range *vr)
932 {
933   /* Testing for VR_ANTI_RANGE is not useful here as any anti-range
934      which would return a useful value should be encoded as a
935      VR_RANGE.  */
936   if (vr->type == VR_RANGE)
937     {
938       int result = compare_values (vr->min, integer_zero_node);
939       return (result == 0 || result == 1);
940     }
941 
942   return false;
943 }
944 
945 /* If *VR has a value rante that is a single constant value return that,
946    otherwise return NULL_TREE.  */
947 
948 tree
value_range_constant_singleton(value_range * vr)949 value_range_constant_singleton (value_range *vr)
950 {
951   if (vr->type == VR_RANGE
952       && vrp_operand_equal_p (vr->min, vr->max)
953       && is_gimple_min_invariant (vr->min))
954     return vr->min;
955 
956   return NULL_TREE;
957 }
958 
959 /* Wrapper around int_const_binop.  Return true if we can compute the
960    result; i.e. if the operation doesn't overflow or if the overflow is
961    undefined.  In the latter case (if the operation overflows and
962    overflow is undefined), then adjust the result to be -INF or +INF
963    depending on CODE, VAL1 and VAL2.  Return the value in *RES.
964 
965    Return false for division by zero, for which the result is
966    indeterminate.  */
967 
968 static bool
vrp_int_const_binop(enum tree_code code,tree val1,tree val2,wide_int * res)969 vrp_int_const_binop (enum tree_code code, tree val1, tree val2, wide_int *res)
970 {
971   bool overflow = false;
972   signop sign = TYPE_SIGN (TREE_TYPE (val1));
973 
974   switch (code)
975     {
976     case RSHIFT_EXPR:
977     case LSHIFT_EXPR:
978       {
979 	wide_int wval2 = wi::to_wide (val2, TYPE_PRECISION (TREE_TYPE (val1)));
980 	if (wi::neg_p (wval2))
981 	  {
982 	    wval2 = -wval2;
983 	    if (code == RSHIFT_EXPR)
984 	      code = LSHIFT_EXPR;
985 	    else
986 	      code = RSHIFT_EXPR;
987 	  }
988 
989 	if (code == RSHIFT_EXPR)
990 	  /* It's unclear from the C standard whether shifts can overflow.
991 	     The following code ignores overflow; perhaps a C standard
992 	     interpretation ruling is needed.  */
993 	  *res = wi::rshift (wi::to_wide (val1), wval2, sign);
994 	else
995 	  *res = wi::lshift (wi::to_wide (val1), wval2);
996 	break;
997       }
998 
999     case MULT_EXPR:
1000       *res = wi::mul (wi::to_wide (val1),
1001 		      wi::to_wide (val2), sign, &overflow);
1002       break;
1003 
1004     case TRUNC_DIV_EXPR:
1005     case EXACT_DIV_EXPR:
1006       if (val2 == 0)
1007 	return false;
1008       else
1009 	*res = wi::div_trunc (wi::to_wide (val1),
1010 			      wi::to_wide (val2), sign, &overflow);
1011       break;
1012 
1013     case FLOOR_DIV_EXPR:
1014       if (val2 == 0)
1015 	return false;
1016       *res = wi::div_floor (wi::to_wide (val1),
1017 			    wi::to_wide (val2), sign, &overflow);
1018       break;
1019 
1020     case CEIL_DIV_EXPR:
1021       if (val2 == 0)
1022 	return false;
1023       *res = wi::div_ceil (wi::to_wide (val1),
1024 			   wi::to_wide (val2), sign, &overflow);
1025       break;
1026 
1027     case ROUND_DIV_EXPR:
1028       if (val2 == 0)
1029 	return false;
1030       *res = wi::div_round (wi::to_wide (val1),
1031 			    wi::to_wide (val2), sign, &overflow);
1032       break;
1033 
1034     default:
1035       gcc_unreachable ();
1036     }
1037 
1038   if (overflow
1039       && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (val1)))
1040     {
1041       /* If the operation overflowed return -INF or +INF depending
1042 	 on the operation and the combination of signs of the operands.  */
1043       int sgn1 = tree_int_cst_sgn (val1);
1044       int sgn2 = tree_int_cst_sgn (val2);
1045 
1046       /* Notice that we only need to handle the restricted set of
1047 	 operations handled by extract_range_from_binary_expr.
1048 	 Among them, only multiplication, addition and subtraction
1049 	 can yield overflow without overflown operands because we
1050 	 are working with integral types only... except in the
1051 	 case VAL1 = -INF and VAL2 = -1 which overflows to +INF
1052 	 for division too.  */
1053 
1054       /* For multiplication, the sign of the overflow is given
1055 	 by the comparison of the signs of the operands.  */
1056       if ((code == MULT_EXPR && sgn1 == sgn2)
1057           /* For addition, the operands must be of the same sign
1058 	     to yield an overflow.  Its sign is therefore that
1059 	     of one of the operands, for example the first.  */
1060 	  || (code == PLUS_EXPR && sgn1 >= 0)
1061 	  /* For subtraction, operands must be of
1062 	     different signs to yield an overflow.  Its sign is
1063 	     therefore that of the first operand or the opposite of
1064 	     that of the second operand.  A first operand of 0 counts
1065 	     as positive here, for the corner case 0 - (-INF), which
1066 	     overflows, but must yield +INF.  */
1067 	  || (code == MINUS_EXPR && sgn1 >= 0)
1068 	  /* For division, the only case is -INF / -1 = +INF.  */
1069 	  || code == TRUNC_DIV_EXPR
1070 	  || code == FLOOR_DIV_EXPR
1071 	  || code == CEIL_DIV_EXPR
1072 	  || code == EXACT_DIV_EXPR
1073 	  || code == ROUND_DIV_EXPR)
1074 	*res = wi::max_value (TYPE_PRECISION (TREE_TYPE (val1)),
1075 			      TYPE_SIGN (TREE_TYPE (val1)));
1076       else
1077 	*res = wi::min_value (TYPE_PRECISION (TREE_TYPE (val1)),
1078 			      TYPE_SIGN (TREE_TYPE (val1)));
1079       return true;
1080     }
1081 
1082   return !overflow;
1083 }
1084 
1085 
1086 /* For range VR compute two wide_int bitmasks.  In *MAY_BE_NONZERO
1087    bitmask if some bit is unset, it means for all numbers in the range
1088    the bit is 0, otherwise it might be 0 or 1.  In *MUST_BE_NONZERO
1089    bitmask if some bit is set, it means for all numbers in the range
1090    the bit is 1, otherwise it might be 0 or 1.  */
1091 
1092 bool
zero_nonzero_bits_from_vr(const tree expr_type,value_range * vr,wide_int * may_be_nonzero,wide_int * must_be_nonzero)1093 zero_nonzero_bits_from_vr (const tree expr_type,
1094 			   value_range *vr,
1095 			   wide_int *may_be_nonzero,
1096 			   wide_int *must_be_nonzero)
1097 {
1098   *may_be_nonzero = wi::minus_one (TYPE_PRECISION (expr_type));
1099   *must_be_nonzero = wi::zero (TYPE_PRECISION (expr_type));
1100   if (!range_int_cst_p (vr))
1101     return false;
1102 
1103   if (range_int_cst_singleton_p (vr))
1104     {
1105       *may_be_nonzero = wi::to_wide (vr->min);
1106       *must_be_nonzero = *may_be_nonzero;
1107     }
1108   else if (tree_int_cst_sgn (vr->min) >= 0
1109 	   || tree_int_cst_sgn (vr->max) < 0)
1110     {
1111       wide_int xor_mask = wi::to_wide (vr->min) ^ wi::to_wide (vr->max);
1112       *may_be_nonzero = wi::to_wide (vr->min) | wi::to_wide (vr->max);
1113       *must_be_nonzero = wi::to_wide (vr->min) & wi::to_wide (vr->max);
1114       if (xor_mask != 0)
1115 	{
1116 	  wide_int mask = wi::mask (wi::floor_log2 (xor_mask), false,
1117 				    may_be_nonzero->get_precision ());
1118 	  *may_be_nonzero = *may_be_nonzero | mask;
1119 	  *must_be_nonzero = wi::bit_and_not (*must_be_nonzero, mask);
1120 	}
1121     }
1122 
1123   return true;
1124 }
1125 
1126 /* Create two value-ranges in *VR0 and *VR1 from the anti-range *AR
1127    so that *VR0 U *VR1 == *AR.  Returns true if that is possible,
1128    false otherwise.  If *AR can be represented with a single range
1129    *VR1 will be VR_UNDEFINED.  */
1130 
1131 static bool
ranges_from_anti_range(value_range * ar,value_range * vr0,value_range * vr1)1132 ranges_from_anti_range (value_range *ar,
1133 			value_range *vr0, value_range *vr1)
1134 {
1135   tree type = TREE_TYPE (ar->min);
1136 
1137   vr0->type = VR_UNDEFINED;
1138   vr1->type = VR_UNDEFINED;
1139 
1140   if (ar->type != VR_ANTI_RANGE
1141       || TREE_CODE (ar->min) != INTEGER_CST
1142       || TREE_CODE (ar->max) != INTEGER_CST
1143       || !vrp_val_min (type)
1144       || !vrp_val_max (type))
1145     return false;
1146 
1147   if (!vrp_val_is_min (ar->min))
1148     {
1149       vr0->type = VR_RANGE;
1150       vr0->min = vrp_val_min (type);
1151       vr0->max = wide_int_to_tree (type, wi::to_wide (ar->min) - 1);
1152     }
1153   if (!vrp_val_is_max (ar->max))
1154     {
1155       vr1->type = VR_RANGE;
1156       vr1->min = wide_int_to_tree (type, wi::to_wide (ar->max) + 1);
1157       vr1->max = vrp_val_max (type);
1158     }
1159   if (vr0->type == VR_UNDEFINED)
1160     {
1161       *vr0 = *vr1;
1162       vr1->type = VR_UNDEFINED;
1163     }
1164 
1165   return vr0->type != VR_UNDEFINED;
1166 }
1167 
1168 /* Helper to extract a value-range *VR for a multiplicative operation
1169    *VR0 CODE *VR1.  */
1170 
1171 static void
extract_range_from_multiplicative_op_1(value_range * vr,enum tree_code code,value_range * vr0,value_range * vr1)1172 extract_range_from_multiplicative_op_1 (value_range *vr,
1173 					enum tree_code code,
1174 					value_range *vr0, value_range *vr1)
1175 {
1176   enum value_range_type rtype;
1177   wide_int val, min, max;
1178   tree type;
1179 
1180   /* Multiplications, divisions and shifts are a bit tricky to handle,
1181      depending on the mix of signs we have in the two ranges, we
1182      need to operate on different values to get the minimum and
1183      maximum values for the new range.  One approach is to figure
1184      out all the variations of range combinations and do the
1185      operations.
1186 
1187      However, this involves several calls to compare_values and it
1188      is pretty convoluted.  It's simpler to do the 4 operations
1189      (MIN0 OP MIN1, MIN0 OP MAX1, MAX0 OP MIN1 and MAX0 OP MAX0 OP
1190      MAX1) and then figure the smallest and largest values to form
1191      the new range.  */
1192   gcc_assert (code == MULT_EXPR
1193 	      || code == TRUNC_DIV_EXPR
1194 	      || code == FLOOR_DIV_EXPR
1195 	      || code == CEIL_DIV_EXPR
1196 	      || code == EXACT_DIV_EXPR
1197 	      || code == ROUND_DIV_EXPR
1198 	      || code == RSHIFT_EXPR
1199 	      || code == LSHIFT_EXPR);
1200   gcc_assert (vr0->type == VR_RANGE
1201 	      && vr0->type == vr1->type);
1202 
1203   rtype = vr0->type;
1204   type = TREE_TYPE (vr0->min);
1205   signop sgn = TYPE_SIGN (type);
1206 
1207   /* Compute the 4 cross operations and their minimum and maximum value.  */
1208   if (!vrp_int_const_binop (code, vr0->min, vr1->min, &val))
1209     {
1210       set_value_range_to_varying (vr);
1211       return;
1212     }
1213   min = max = val;
1214 
1215   if (vr1->max != vr1->min)
1216     {
1217       if (!vrp_int_const_binop (code, vr0->min, vr1->max, &val))
1218 	{
1219 	  set_value_range_to_varying (vr);
1220 	  return;
1221 	}
1222       if (wi::lt_p (val, min, sgn))
1223 	min = val;
1224       else if (wi::gt_p (val, max, sgn))
1225 	max = val;
1226     }
1227 
1228   if (vr0->max != vr0->min)
1229     {
1230       if (!vrp_int_const_binop (code, vr0->max, vr1->min, &val))
1231 	{
1232 	  set_value_range_to_varying (vr);
1233 	  return;
1234 	}
1235       if (wi::lt_p (val, min, sgn))
1236 	min = val;
1237       else if (wi::gt_p (val, max, sgn))
1238 	max = val;
1239     }
1240 
1241   if (vr0->min != vr0->max && vr1->min != vr1->max)
1242     {
1243       if (!vrp_int_const_binop (code, vr0->max, vr1->max, &val))
1244 	{
1245 	  set_value_range_to_varying (vr);
1246 	  return;
1247 	}
1248       if (wi::lt_p (val, min, sgn))
1249 	min = val;
1250       else if (wi::gt_p (val, max, sgn))
1251 	max = val;
1252     }
1253 
1254   /* If the new range has its limits swapped around (MIN > MAX),
1255      then the operation caused one of them to wrap around, mark
1256      the new range VARYING.  */
1257   if (wi::gt_p (min, max, sgn))
1258     {
1259       set_value_range_to_varying (vr);
1260       return;
1261     }
1262 
1263   /* We punt for [-INF, +INF].
1264      We learn nothing when we have INF on both sides.
1265      Note that we do accept [-INF, -INF] and [+INF, +INF].  */
1266   if (wi::eq_p (min, wi::min_value (TYPE_PRECISION (type), sgn))
1267       && wi::eq_p (max, wi::max_value (TYPE_PRECISION (type), sgn)))
1268     {
1269       set_value_range_to_varying (vr);
1270       return;
1271     }
1272 
1273   set_value_range (vr, rtype,
1274 		   wide_int_to_tree (type, min),
1275 		   wide_int_to_tree (type, max), NULL);
1276 }
1277 
1278 /* Extract range information from a binary operation CODE based on
1279    the ranges of each of its operands *VR0 and *VR1 with resulting
1280    type EXPR_TYPE.  The resulting range is stored in *VR.  */
1281 
1282 void
extract_range_from_binary_expr_1(value_range * vr,enum tree_code code,tree expr_type,value_range * vr0_,value_range * vr1_)1283 extract_range_from_binary_expr_1 (value_range *vr,
1284 				  enum tree_code code, tree expr_type,
1285 				  value_range *vr0_, value_range *vr1_)
1286 {
1287   value_range vr0 = *vr0_, vr1 = *vr1_;
1288   value_range vrtem0 = VR_INITIALIZER, vrtem1 = VR_INITIALIZER;
1289   enum value_range_type type;
1290   tree min = NULL_TREE, max = NULL_TREE;
1291   int cmp;
1292 
1293   if (!INTEGRAL_TYPE_P (expr_type)
1294       && !POINTER_TYPE_P (expr_type))
1295     {
1296       set_value_range_to_varying (vr);
1297       return;
1298     }
1299 
1300   /* Not all binary expressions can be applied to ranges in a
1301      meaningful way.  Handle only arithmetic operations.  */
1302   if (code != PLUS_EXPR
1303       && code != MINUS_EXPR
1304       && code != POINTER_PLUS_EXPR
1305       && code != MULT_EXPR
1306       && code != TRUNC_DIV_EXPR
1307       && code != FLOOR_DIV_EXPR
1308       && code != CEIL_DIV_EXPR
1309       && code != EXACT_DIV_EXPR
1310       && code != ROUND_DIV_EXPR
1311       && code != TRUNC_MOD_EXPR
1312       && code != RSHIFT_EXPR
1313       && code != LSHIFT_EXPR
1314       && code != MIN_EXPR
1315       && code != MAX_EXPR
1316       && code != BIT_AND_EXPR
1317       && code != BIT_IOR_EXPR
1318       && code != BIT_XOR_EXPR)
1319     {
1320       set_value_range_to_varying (vr);
1321       return;
1322     }
1323 
1324   /* If both ranges are UNDEFINED, so is the result.  */
1325   if (vr0.type == VR_UNDEFINED && vr1.type == VR_UNDEFINED)
1326     {
1327       set_value_range_to_undefined (vr);
1328       return;
1329     }
1330   /* If one of the ranges is UNDEFINED drop it to VARYING for the following
1331      code.  At some point we may want to special-case operations that
1332      have UNDEFINED result for all or some value-ranges of the not UNDEFINED
1333      operand.  */
1334   else if (vr0.type == VR_UNDEFINED)
1335     set_value_range_to_varying (&vr0);
1336   else if (vr1.type == VR_UNDEFINED)
1337     set_value_range_to_varying (&vr1);
1338 
1339   /* We get imprecise results from ranges_from_anti_range when
1340      code is EXACT_DIV_EXPR.  We could mask out bits in the resulting
1341      range, but then we also need to hack up vrp_meet.  It's just
1342      easier to special case when vr0 is ~[0,0] for EXACT_DIV_EXPR.  */
1343   if (code == EXACT_DIV_EXPR
1344       && vr0.type == VR_ANTI_RANGE
1345       && vr0.min == vr0.max
1346       && integer_zerop (vr0.min))
1347     {
1348       set_value_range_to_nonnull (vr, expr_type);
1349       return;
1350     }
1351 
1352   /* Now canonicalize anti-ranges to ranges when they are not symbolic
1353      and express ~[] op X as ([]' op X) U ([]'' op X).  */
1354   if (vr0.type == VR_ANTI_RANGE
1355       && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1))
1356     {
1357       extract_range_from_binary_expr_1 (vr, code, expr_type, &vrtem0, vr1_);
1358       if (vrtem1.type != VR_UNDEFINED)
1359 	{
1360 	  value_range vrres = VR_INITIALIZER;
1361 	  extract_range_from_binary_expr_1 (&vrres, code, expr_type,
1362 					    &vrtem1, vr1_);
1363 	  vrp_meet (vr, &vrres);
1364 	}
1365       return;
1366     }
1367   /* Likewise for X op ~[].  */
1368   if (vr1.type == VR_ANTI_RANGE
1369       && ranges_from_anti_range (&vr1, &vrtem0, &vrtem1))
1370     {
1371       extract_range_from_binary_expr_1 (vr, code, expr_type, vr0_, &vrtem0);
1372       if (vrtem1.type != VR_UNDEFINED)
1373 	{
1374 	  value_range vrres = VR_INITIALIZER;
1375 	  extract_range_from_binary_expr_1 (&vrres, code, expr_type,
1376 					    vr0_, &vrtem1);
1377 	  vrp_meet (vr, &vrres);
1378 	}
1379       return;
1380     }
1381 
1382   /* The type of the resulting value range defaults to VR0.TYPE.  */
1383   type = vr0.type;
1384 
1385   /* Refuse to operate on VARYING ranges, ranges of different kinds
1386      and symbolic ranges.  As an exception, we allow BIT_{AND,IOR}
1387      because we may be able to derive a useful range even if one of
1388      the operands is VR_VARYING or symbolic range.  Similarly for
1389      divisions, MIN/MAX and PLUS/MINUS.
1390 
1391      TODO, we may be able to derive anti-ranges in some cases.  */
1392   if (code != BIT_AND_EXPR
1393       && code != BIT_IOR_EXPR
1394       && code != TRUNC_DIV_EXPR
1395       && code != FLOOR_DIV_EXPR
1396       && code != CEIL_DIV_EXPR
1397       && code != EXACT_DIV_EXPR
1398       && code != ROUND_DIV_EXPR
1399       && code != TRUNC_MOD_EXPR
1400       && code != MIN_EXPR
1401       && code != MAX_EXPR
1402       && code != PLUS_EXPR
1403       && code != MINUS_EXPR
1404       && code != RSHIFT_EXPR
1405       && (vr0.type == VR_VARYING
1406 	  || vr1.type == VR_VARYING
1407 	  || vr0.type != vr1.type
1408 	  || symbolic_range_p (&vr0)
1409 	  || symbolic_range_p (&vr1)))
1410     {
1411       set_value_range_to_varying (vr);
1412       return;
1413     }
1414 
1415   /* Now evaluate the expression to determine the new range.  */
1416   if (POINTER_TYPE_P (expr_type))
1417     {
1418       if (code == MIN_EXPR || code == MAX_EXPR)
1419 	{
1420 	  /* For MIN/MAX expressions with pointers, we only care about
1421 	     nullness, if both are non null, then the result is nonnull.
1422 	     If both are null, then the result is null. Otherwise they
1423 	     are varying.  */
1424 	  if (range_is_nonnull (&vr0) && range_is_nonnull (&vr1))
1425 	    set_value_range_to_nonnull (vr, expr_type);
1426 	  else if (range_is_null (&vr0) && range_is_null (&vr1))
1427 	    set_value_range_to_null (vr, expr_type);
1428 	  else
1429 	    set_value_range_to_varying (vr);
1430 	}
1431       else if (code == POINTER_PLUS_EXPR)
1432 	{
1433 	  /* For pointer types, we are really only interested in asserting
1434 	     whether the expression evaluates to non-NULL.  */
1435 	  if (range_is_nonnull (&vr0) || range_is_nonnull (&vr1))
1436 	    set_value_range_to_nonnull (vr, expr_type);
1437 	  else if (range_is_null (&vr0) && range_is_null (&vr1))
1438 	    set_value_range_to_null (vr, expr_type);
1439 	  else
1440 	    set_value_range_to_varying (vr);
1441 	}
1442       else if (code == BIT_AND_EXPR)
1443 	{
1444 	  /* For pointer types, we are really only interested in asserting
1445 	     whether the expression evaluates to non-NULL.  */
1446 	  if (range_is_nonnull (&vr0) && range_is_nonnull (&vr1))
1447 	    set_value_range_to_nonnull (vr, expr_type);
1448 	  else if (range_is_null (&vr0) || range_is_null (&vr1))
1449 	    set_value_range_to_null (vr, expr_type);
1450 	  else
1451 	    set_value_range_to_varying (vr);
1452 	}
1453       else
1454 	set_value_range_to_varying (vr);
1455 
1456       return;
1457     }
1458 
1459   /* For integer ranges, apply the operation to each end of the
1460      range and see what we end up with.  */
1461   if (code == PLUS_EXPR || code == MINUS_EXPR)
1462     {
1463       const bool minus_p = (code == MINUS_EXPR);
1464       tree min_op0 = vr0.min;
1465       tree min_op1 = minus_p ? vr1.max : vr1.min;
1466       tree max_op0 = vr0.max;
1467       tree max_op1 = minus_p ? vr1.min : vr1.max;
1468       tree sym_min_op0 = NULL_TREE;
1469       tree sym_min_op1 = NULL_TREE;
1470       tree sym_max_op0 = NULL_TREE;
1471       tree sym_max_op1 = NULL_TREE;
1472       bool neg_min_op0, neg_min_op1, neg_max_op0, neg_max_op1;
1473 
1474       /* If we have a PLUS or MINUS with two VR_RANGEs, either constant or
1475 	 single-symbolic ranges, try to compute the precise resulting range,
1476 	 but only if we know that this resulting range will also be constant
1477 	 or single-symbolic.  */
1478       if (vr0.type == VR_RANGE && vr1.type == VR_RANGE
1479 	  && (TREE_CODE (min_op0) == INTEGER_CST
1480 	      || (sym_min_op0
1481 		  = get_single_symbol (min_op0, &neg_min_op0, &min_op0)))
1482 	  && (TREE_CODE (min_op1) == INTEGER_CST
1483 	      || (sym_min_op1
1484 		  = get_single_symbol (min_op1, &neg_min_op1, &min_op1)))
1485 	  && (!(sym_min_op0 && sym_min_op1)
1486 	      || (sym_min_op0 == sym_min_op1
1487 		  && neg_min_op0 == (minus_p ? neg_min_op1 : !neg_min_op1)))
1488 	  && (TREE_CODE (max_op0) == INTEGER_CST
1489 	      || (sym_max_op0
1490 		  = get_single_symbol (max_op0, &neg_max_op0, &max_op0)))
1491 	  && (TREE_CODE (max_op1) == INTEGER_CST
1492 	      || (sym_max_op1
1493 		  = get_single_symbol (max_op1, &neg_max_op1, &max_op1)))
1494 	  && (!(sym_max_op0 && sym_max_op1)
1495 	      || (sym_max_op0 == sym_max_op1
1496 		  && neg_max_op0 == (minus_p ? neg_max_op1 : !neg_max_op1))))
1497 	{
1498 	  const signop sgn = TYPE_SIGN (expr_type);
1499 	  const unsigned int prec = TYPE_PRECISION (expr_type);
1500 	  wide_int type_min, type_max, wmin, wmax;
1501 	  int min_ovf = 0;
1502 	  int max_ovf = 0;
1503 
1504 	  /* Get the lower and upper bounds of the type.  */
1505 	  if (TYPE_OVERFLOW_WRAPS (expr_type))
1506 	    {
1507 	      type_min = wi::min_value (prec, sgn);
1508 	      type_max = wi::max_value (prec, sgn);
1509 	    }
1510 	  else
1511 	    {
1512 	      type_min = wi::to_wide (vrp_val_min (expr_type));
1513 	      type_max = wi::to_wide (vrp_val_max (expr_type));
1514 	    }
1515 
1516 	  /* Combine the lower bounds, if any.  */
1517 	  if (min_op0 && min_op1)
1518 	    {
1519 	      if (minus_p)
1520 		{
1521 		  wmin = wi::to_wide (min_op0) - wi::to_wide (min_op1);
1522 
1523 		  /* Check for overflow.  */
1524 		  if (wi::cmp (0, wi::to_wide (min_op1), sgn)
1525 		      != wi::cmp (wmin, wi::to_wide (min_op0), sgn))
1526 		    min_ovf = wi::cmp (wi::to_wide (min_op0),
1527 				       wi::to_wide (min_op1), sgn);
1528 		}
1529 	      else
1530 		{
1531 		  wmin = wi::to_wide (min_op0) + wi::to_wide (min_op1);
1532 
1533 		  /* Check for overflow.  */
1534 		  if (wi::cmp (wi::to_wide (min_op1), 0, sgn)
1535 		      != wi::cmp (wmin, wi::to_wide (min_op0), sgn))
1536 		    min_ovf = wi::cmp (wi::to_wide (min_op0), wmin, sgn);
1537 		}
1538 	    }
1539 	  else if (min_op0)
1540 	    wmin = wi::to_wide (min_op0);
1541 	  else if (min_op1)
1542 	    {
1543 	      if (minus_p)
1544 		{
1545 		  wmin = -wi::to_wide (min_op1);
1546 
1547 		  /* Check for overflow.  */
1548 		  if (sgn == SIGNED
1549 		      && wi::neg_p (wi::to_wide (min_op1))
1550 		      && wi::neg_p (wmin))
1551 		    min_ovf = 1;
1552 		  else if (sgn == UNSIGNED && wi::to_wide (min_op1) != 0)
1553 		    min_ovf = -1;
1554 		}
1555 	      else
1556 		wmin = wi::to_wide (min_op1);
1557 	    }
1558 	  else
1559 	    wmin = wi::shwi (0, prec);
1560 
1561 	  /* Combine the upper bounds, if any.  */
1562 	  if (max_op0 && max_op1)
1563 	    {
1564 	      if (minus_p)
1565 		{
1566 		  wmax = wi::to_wide (max_op0) - wi::to_wide (max_op1);
1567 
1568 		  /* Check for overflow.  */
1569 		  if (wi::cmp (0, wi::to_wide (max_op1), sgn)
1570 		      != wi::cmp (wmax, wi::to_wide (max_op0), sgn))
1571 		    max_ovf = wi::cmp (wi::to_wide (max_op0),
1572 				       wi::to_wide (max_op1), sgn);
1573 		}
1574 	      else
1575 		{
1576 		  wmax = wi::to_wide (max_op0) + wi::to_wide (max_op1);
1577 
1578 		  if (wi::cmp (wi::to_wide (max_op1), 0, sgn)
1579 		      != wi::cmp (wmax, wi::to_wide (max_op0), sgn))
1580 		    max_ovf = wi::cmp (wi::to_wide (max_op0), wmax, sgn);
1581 		}
1582 	    }
1583 	  else if (max_op0)
1584 	    wmax = wi::to_wide (max_op0);
1585 	  else if (max_op1)
1586 	    {
1587 	      if (minus_p)
1588 		{
1589 		  wmax = -wi::to_wide (max_op1);
1590 
1591 		  /* Check for overflow.  */
1592 		  if (sgn == SIGNED
1593 		      && wi::neg_p (wi::to_wide (max_op1))
1594 		      && wi::neg_p (wmax))
1595 		    max_ovf = 1;
1596 		  else if (sgn == UNSIGNED && wi::to_wide (max_op1) != 0)
1597 		    max_ovf = -1;
1598 		}
1599 	      else
1600 		wmax = wi::to_wide (max_op1);
1601 	    }
1602 	  else
1603 	    wmax = wi::shwi (0, prec);
1604 
1605 	  /* Check for type overflow.  */
1606 	  if (min_ovf == 0)
1607 	    {
1608 	      if (wi::cmp (wmin, type_min, sgn) == -1)
1609 		min_ovf = -1;
1610 	      else if (wi::cmp (wmin, type_max, sgn) == 1)
1611 		min_ovf = 1;
1612 	    }
1613 	  if (max_ovf == 0)
1614 	    {
1615 	      if (wi::cmp (wmax, type_min, sgn) == -1)
1616 		max_ovf = -1;
1617 	      else if (wi::cmp (wmax, type_max, sgn) == 1)
1618 		max_ovf = 1;
1619 	    }
1620 
1621 	  /* If we have overflow for the constant part and the resulting
1622 	     range will be symbolic, drop to VR_VARYING.  */
1623 	  if ((min_ovf && sym_min_op0 != sym_min_op1)
1624 	      || (max_ovf && sym_max_op0 != sym_max_op1))
1625 	    {
1626 	      set_value_range_to_varying (vr);
1627 	      return;
1628 	    }
1629 
1630 	  if (TYPE_OVERFLOW_WRAPS (expr_type))
1631 	    {
1632 	      /* If overflow wraps, truncate the values and adjust the
1633 		 range kind and bounds appropriately.  */
1634 	      wide_int tmin = wide_int::from (wmin, prec, sgn);
1635 	      wide_int tmax = wide_int::from (wmax, prec, sgn);
1636 	      if (min_ovf == max_ovf)
1637 		{
1638 		  /* No overflow or both overflow or underflow.  The
1639 		     range kind stays VR_RANGE.  */
1640 		  min = wide_int_to_tree (expr_type, tmin);
1641 		  max = wide_int_to_tree (expr_type, tmax);
1642 		}
1643 	      else if ((min_ovf == -1 && max_ovf == 0)
1644 		       || (max_ovf == 1 && min_ovf == 0))
1645 		{
1646 		  /* Min underflow or max overflow.  The range kind
1647 		     changes to VR_ANTI_RANGE.  */
1648 		  bool covers = false;
1649 		  wide_int tem = tmin;
1650 		  type = VR_ANTI_RANGE;
1651 		  tmin = tmax + 1;
1652 		  if (wi::cmp (tmin, tmax, sgn) < 0)
1653 		    covers = true;
1654 		  tmax = tem - 1;
1655 		  if (wi::cmp (tmax, tem, sgn) > 0)
1656 		    covers = true;
1657 		  /* If the anti-range would cover nothing, drop to varying.
1658 		     Likewise if the anti-range bounds are outside of the
1659 		     types values.  */
1660 		  if (covers || wi::cmp (tmin, tmax, sgn) > 0)
1661 		    {
1662 		      set_value_range_to_varying (vr);
1663 		      return;
1664 		    }
1665 		  min = wide_int_to_tree (expr_type, tmin);
1666 		  max = wide_int_to_tree (expr_type, tmax);
1667 		}
1668 	      else
1669 		{
1670 		  /* Other underflow and/or overflow, drop to VR_VARYING.  */
1671 		  set_value_range_to_varying (vr);
1672 		  return;
1673 		}
1674 	    }
1675 	  else
1676 	    {
1677 	      /* If overflow does not wrap, saturate to the types min/max
1678 	         value.  */
1679 	      if (min_ovf == -1)
1680 		min = wide_int_to_tree (expr_type, type_min);
1681 	      else if (min_ovf == 1)
1682 		min = wide_int_to_tree (expr_type, type_max);
1683 	      else
1684 		min = wide_int_to_tree (expr_type, wmin);
1685 
1686 	      if (max_ovf == -1)
1687 		max = wide_int_to_tree (expr_type, type_min);
1688 	      else if (max_ovf == 1)
1689 		max = wide_int_to_tree (expr_type, type_max);
1690 	      else
1691 		max = wide_int_to_tree (expr_type, wmax);
1692 	    }
1693 
1694 	  /* If the result lower bound is constant, we're done;
1695 	     otherwise, build the symbolic lower bound.  */
1696 	  if (sym_min_op0 == sym_min_op1)
1697 	    ;
1698 	  else if (sym_min_op0)
1699 	    min = build_symbolic_expr (expr_type, sym_min_op0,
1700 				       neg_min_op0, min);
1701 	  else if (sym_min_op1)
1702 	    {
1703 	      /* We may not negate if that might introduce
1704 		 undefined overflow.  */
1705 	      if (! minus_p
1706 		  || neg_min_op1
1707 		  || TYPE_OVERFLOW_WRAPS (expr_type))
1708 		min = build_symbolic_expr (expr_type, sym_min_op1,
1709 					   neg_min_op1 ^ minus_p, min);
1710 	      else
1711 		min = NULL_TREE;
1712 	    }
1713 
1714 	  /* Likewise for the upper bound.  */
1715 	  if (sym_max_op0 == sym_max_op1)
1716 	    ;
1717 	  else if (sym_max_op0)
1718 	    max = build_symbolic_expr (expr_type, sym_max_op0,
1719 				       neg_max_op0, max);
1720 	  else if (sym_max_op1)
1721 	    {
1722 	      /* We may not negate if that might introduce
1723 		 undefined overflow.  */
1724 	      if (! minus_p
1725 		  || neg_max_op1
1726 		  || TYPE_OVERFLOW_WRAPS (expr_type))
1727 		max = build_symbolic_expr (expr_type, sym_max_op1,
1728 					   neg_max_op1 ^ minus_p, max);
1729 	      else
1730 		max = NULL_TREE;
1731 	    }
1732 	}
1733       else
1734 	{
1735 	  /* For other cases, for example if we have a PLUS_EXPR with two
1736 	     VR_ANTI_RANGEs, drop to VR_VARYING.  It would take more effort
1737 	     to compute a precise range for such a case.
1738 	     ???  General even mixed range kind operations can be expressed
1739 	     by for example transforming ~[3, 5] + [1, 2] to range-only
1740 	     operations and a union primitive:
1741 	       [-INF, 2] + [1, 2]  U  [5, +INF] + [1, 2]
1742 	           [-INF+1, 4]     U    [6, +INF(OVF)]
1743 	     though usually the union is not exactly representable with
1744 	     a single range or anti-range as the above is
1745 		 [-INF+1, +INF(OVF)] intersected with ~[5, 5]
1746 	     but one could use a scheme similar to equivalences for this. */
1747 	  set_value_range_to_varying (vr);
1748 	  return;
1749 	}
1750     }
1751   else if (code == MIN_EXPR
1752 	   || code == MAX_EXPR)
1753     {
1754       if (vr0.type == VR_RANGE
1755 	  && !symbolic_range_p (&vr0))
1756 	{
1757 	  type = VR_RANGE;
1758 	  if (vr1.type == VR_RANGE
1759 	      && !symbolic_range_p (&vr1))
1760 	    {
1761 	      /* For operations that make the resulting range directly
1762 		 proportional to the original ranges, apply the operation to
1763 		 the same end of each range.  */
1764 	      min = int_const_binop (code, vr0.min, vr1.min);
1765 	      max = int_const_binop (code, vr0.max, vr1.max);
1766 	    }
1767 	  else if (code == MIN_EXPR)
1768 	    {
1769 	      min = vrp_val_min (expr_type);
1770 	      max = vr0.max;
1771 	    }
1772 	  else if (code == MAX_EXPR)
1773 	    {
1774 	      min = vr0.min;
1775 	      max = vrp_val_max (expr_type);
1776 	    }
1777 	}
1778       else if (vr1.type == VR_RANGE
1779 	       && !symbolic_range_p (&vr1))
1780 	{
1781 	  type = VR_RANGE;
1782 	  if (code == MIN_EXPR)
1783 	    {
1784 	      min = vrp_val_min (expr_type);
1785 	      max = vr1.max;
1786 	    }
1787 	  else if (code == MAX_EXPR)
1788 	    {
1789 	      min = vr1.min;
1790 	      max = vrp_val_max (expr_type);
1791 	    }
1792 	}
1793       else
1794 	{
1795 	  set_value_range_to_varying (vr);
1796 	  return;
1797 	}
1798     }
1799   else if (code == MULT_EXPR)
1800     {
1801       /* Fancy code so that with unsigned, [-3,-1]*[-3,-1] does not
1802 	 drop to varying.  This test requires 2*prec bits if both
1803 	 operands are signed and 2*prec + 2 bits if either is not.  */
1804 
1805       signop sign = TYPE_SIGN (expr_type);
1806       unsigned int prec = TYPE_PRECISION (expr_type);
1807 
1808       if (!range_int_cst_p (&vr0)
1809 	  || !range_int_cst_p (&vr1))
1810 	{
1811 	  set_value_range_to_varying (vr);
1812 	  return;
1813 	}
1814 
1815       if (TYPE_OVERFLOW_WRAPS (expr_type))
1816 	{
1817 	  typedef FIXED_WIDE_INT (WIDE_INT_MAX_PRECISION * 2) vrp_int;
1818 	  typedef generic_wide_int
1819              <wi::extended_tree <WIDE_INT_MAX_PRECISION * 2> > vrp_int_cst;
1820 	  vrp_int sizem1 = wi::mask <vrp_int> (prec, false);
1821 	  vrp_int size = sizem1 + 1;
1822 
1823 	  /* Extend the values using the sign of the result to PREC2.
1824 	     From here on out, everthing is just signed math no matter
1825 	     what the input types were.  */
1826           vrp_int min0 = vrp_int_cst (vr0.min);
1827           vrp_int max0 = vrp_int_cst (vr0.max);
1828           vrp_int min1 = vrp_int_cst (vr1.min);
1829           vrp_int max1 = vrp_int_cst (vr1.max);
1830 	  /* Canonicalize the intervals.  */
1831 	  if (sign == UNSIGNED)
1832 	    {
1833 	      if (wi::ltu_p (size, min0 + max0))
1834 		{
1835 		  min0 -= size;
1836 		  max0 -= size;
1837 		}
1838 
1839 	      if (wi::ltu_p (size, min1 + max1))
1840 		{
1841 		  min1 -= size;
1842 		  max1 -= size;
1843 		}
1844 	    }
1845 
1846 	  vrp_int prod0 = min0 * min1;
1847 	  vrp_int prod1 = min0 * max1;
1848 	  vrp_int prod2 = max0 * min1;
1849 	  vrp_int prod3 = max0 * max1;
1850 
1851 	  /* Sort the 4 products so that min is in prod0 and max is in
1852 	     prod3.  */
1853 	  /* min0min1 > max0max1 */
1854 	  if (prod0 > prod3)
1855 	    std::swap (prod0, prod3);
1856 
1857 	  /* min0max1 > max0min1 */
1858 	  if (prod1 > prod2)
1859 	    std::swap (prod1, prod2);
1860 
1861 	  if (prod0 > prod1)
1862 	    std::swap (prod0, prod1);
1863 
1864 	  if (prod2 > prod3)
1865 	    std::swap (prod2, prod3);
1866 
1867 	  /* diff = max - min.  */
1868 	  prod2 = prod3 - prod0;
1869 	  if (wi::geu_p (prod2, sizem1))
1870 	    {
1871 	      /* the range covers all values.  */
1872 	      set_value_range_to_varying (vr);
1873 	      return;
1874 	    }
1875 
1876 	  /* The following should handle the wrapping and selecting
1877 	     VR_ANTI_RANGE for us.  */
1878 	  min = wide_int_to_tree (expr_type, prod0);
1879 	  max = wide_int_to_tree (expr_type, prod3);
1880 	  set_and_canonicalize_value_range (vr, VR_RANGE, min, max, NULL);
1881 	  return;
1882 	}
1883 
1884       /* If we have an unsigned MULT_EXPR with two VR_ANTI_RANGEs,
1885 	 drop to VR_VARYING.  It would take more effort to compute a
1886 	 precise range for such a case.  For example, if we have
1887 	 op0 == 65536 and op1 == 65536 with their ranges both being
1888 	 ~[0,0] on a 32-bit machine, we would have op0 * op1 == 0, so
1889 	 we cannot claim that the product is in ~[0,0].  Note that we
1890 	 are guaranteed to have vr0.type == vr1.type at this
1891 	 point.  */
1892       if (vr0.type == VR_ANTI_RANGE
1893 	  && !TYPE_OVERFLOW_UNDEFINED (expr_type))
1894 	{
1895 	  set_value_range_to_varying (vr);
1896 	  return;
1897 	}
1898 
1899       extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
1900       return;
1901     }
1902   else if (code == RSHIFT_EXPR
1903 	   || code == LSHIFT_EXPR)
1904     {
1905       /* If we have a RSHIFT_EXPR with any shift values outside [0..prec-1],
1906 	 then drop to VR_VARYING.  Outside of this range we get undefined
1907 	 behavior from the shift operation.  We cannot even trust
1908 	 SHIFT_COUNT_TRUNCATED at this stage, because that applies to rtl
1909 	 shifts, and the operation at the tree level may be widened.  */
1910       if (range_int_cst_p (&vr1)
1911 	  && compare_tree_int (vr1.min, 0) >= 0
1912 	  && compare_tree_int (vr1.max, TYPE_PRECISION (expr_type)) == -1)
1913 	{
1914 	  if (code == RSHIFT_EXPR)
1915 	    {
1916 	      /* Even if vr0 is VARYING or otherwise not usable, we can derive
1917 		 useful ranges just from the shift count.  E.g.
1918 		 x >> 63 for signed 64-bit x is always [-1, 0].  */
1919 	      if (vr0.type != VR_RANGE || symbolic_range_p (&vr0))
1920 		{
1921 		  vr0.type = type = VR_RANGE;
1922 		  vr0.min = vrp_val_min (expr_type);
1923 		  vr0.max = vrp_val_max (expr_type);
1924 		}
1925 	      extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
1926 	      return;
1927 	    }
1928 	  /* We can map lshifts by constants to MULT_EXPR handling.  */
1929 	  else if (code == LSHIFT_EXPR
1930 		   && range_int_cst_singleton_p (&vr1))
1931 	    {
1932 	      bool saved_flag_wrapv;
1933 	      value_range vr1p = VR_INITIALIZER;
1934 	      vr1p.type = VR_RANGE;
1935 	      vr1p.min = (wide_int_to_tree
1936 			  (expr_type,
1937 			   wi::set_bit_in_zero (tree_to_shwi (vr1.min),
1938 						TYPE_PRECISION (expr_type))));
1939 	      vr1p.max = vr1p.min;
1940 	      /* We have to use a wrapping multiply though as signed overflow
1941 		 on lshifts is implementation defined in C89.  */
1942 	      saved_flag_wrapv = flag_wrapv;
1943 	      flag_wrapv = 1;
1944 	      extract_range_from_binary_expr_1 (vr, MULT_EXPR, expr_type,
1945 						&vr0, &vr1p);
1946 	      flag_wrapv = saved_flag_wrapv;
1947 	      return;
1948 	    }
1949 	  else if (code == LSHIFT_EXPR
1950 		   && range_int_cst_p (&vr0))
1951 	    {
1952 	      int prec = TYPE_PRECISION (expr_type);
1953 	      int overflow_pos = prec;
1954 	      int bound_shift;
1955 	      wide_int low_bound, high_bound;
1956 	      bool uns = TYPE_UNSIGNED (expr_type);
1957 	      bool in_bounds = false;
1958 
1959 	      if (!uns)
1960 		overflow_pos -= 1;
1961 
1962 	      bound_shift = overflow_pos - tree_to_shwi (vr1.max);
1963 	      /* If bound_shift == HOST_BITS_PER_WIDE_INT, the llshift can
1964 		 overflow.  However, for that to happen, vr1.max needs to be
1965 		 zero, which means vr1 is a singleton range of zero, which
1966 		 means it should be handled by the previous LSHIFT_EXPR
1967 		 if-clause.  */
1968 	      wide_int bound = wi::set_bit_in_zero (bound_shift, prec);
1969 	      wide_int complement = ~(bound - 1);
1970 
1971 	      if (uns)
1972 		{
1973 		  low_bound = bound;
1974 		  high_bound = complement;
1975 		  if (wi::ltu_p (wi::to_wide (vr0.max), low_bound))
1976 		    {
1977 		      /* [5, 6] << [1, 2] == [10, 24].  */
1978 		      /* We're shifting out only zeroes, the value increases
1979 			 monotonically.  */
1980 		      in_bounds = true;
1981 		    }
1982 		  else if (wi::ltu_p (high_bound, wi::to_wide (vr0.min)))
1983 		    {
1984 		      /* [0xffffff00, 0xffffffff] << [1, 2]
1985 		         == [0xfffffc00, 0xfffffffe].  */
1986 		      /* We're shifting out only ones, the value decreases
1987 			 monotonically.  */
1988 		      in_bounds = true;
1989 		    }
1990 		}
1991 	      else
1992 		{
1993 		  /* [-1, 1] << [1, 2] == [-4, 4].  */
1994 		  low_bound = complement;
1995 		  high_bound = bound;
1996 		  if (wi::lts_p (wi::to_wide (vr0.max), high_bound)
1997 		      && wi::lts_p (low_bound, wi::to_wide (vr0.min)))
1998 		    {
1999 		      /* For non-negative numbers, we're shifting out only
2000 			 zeroes, the value increases monotonically.
2001 			 For negative numbers, we're shifting out only ones, the
2002 			 value decreases monotomically.  */
2003 		      in_bounds = true;
2004 		    }
2005 		}
2006 
2007 	      if (in_bounds)
2008 		{
2009 		  extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
2010 		  return;
2011 		}
2012 	    }
2013 	}
2014       set_value_range_to_varying (vr);
2015       return;
2016     }
2017   else if (code == TRUNC_DIV_EXPR
2018 	   || code == FLOOR_DIV_EXPR
2019 	   || code == CEIL_DIV_EXPR
2020 	   || code == EXACT_DIV_EXPR
2021 	   || code == ROUND_DIV_EXPR)
2022     {
2023       if (vr0.type != VR_RANGE || symbolic_range_p (&vr0))
2024 	{
2025 	  /* For division, if op1 has VR_RANGE but op0 does not, something
2026 	     can be deduced just from that range.  Say [min, max] / [4, max]
2027 	     gives [min / 4, max / 4] range.  */
2028 	  if (vr1.type == VR_RANGE
2029 	      && !symbolic_range_p (&vr1)
2030 	      && range_includes_zero_p (vr1.min, vr1.max) == 0)
2031 	    {
2032 	      vr0.type = type = VR_RANGE;
2033 	      vr0.min = vrp_val_min (expr_type);
2034 	      vr0.max = vrp_val_max (expr_type);
2035 	    }
2036 	  else
2037 	    {
2038 	      set_value_range_to_varying (vr);
2039 	      return;
2040 	    }
2041 	}
2042 
2043       /* For divisions, if flag_non_call_exceptions is true, we must
2044 	 not eliminate a division by zero.  */
2045       if (cfun->can_throw_non_call_exceptions
2046 	  && (vr1.type != VR_RANGE
2047 	      || range_includes_zero_p (vr1.min, vr1.max) != 0))
2048 	{
2049 	  set_value_range_to_varying (vr);
2050 	  return;
2051 	}
2052 
2053       /* For divisions, if op0 is VR_RANGE, we can deduce a range
2054 	 even if op1 is VR_VARYING, VR_ANTI_RANGE, symbolic or can
2055 	 include 0.  */
2056       if (vr0.type == VR_RANGE
2057 	  && (vr1.type != VR_RANGE
2058 	      || range_includes_zero_p (vr1.min, vr1.max) != 0))
2059 	{
2060 	  tree zero = build_int_cst (TREE_TYPE (vr0.min), 0);
2061 	  int cmp;
2062 
2063 	  min = NULL_TREE;
2064 	  max = NULL_TREE;
2065 	  if (TYPE_UNSIGNED (expr_type)
2066 	      || value_range_nonnegative_p (&vr1))
2067 	    {
2068 	      /* For unsigned division or when divisor is known
2069 		 to be non-negative, the range has to cover
2070 		 all numbers from 0 to max for positive max
2071 		 and all numbers from min to 0 for negative min.  */
2072 	      cmp = compare_values (vr0.max, zero);
2073 	      if (cmp == -1)
2074 		{
2075 		  /* When vr0.max < 0, vr1.min != 0 and value
2076 		     ranges for dividend and divisor are available.  */
2077 		  if (vr1.type == VR_RANGE
2078 		      && !symbolic_range_p (&vr0)
2079 		      && !symbolic_range_p (&vr1)
2080 		      && compare_values (vr1.min, zero) != 0)
2081 		    max = int_const_binop (code, vr0.max, vr1.min);
2082 		  else
2083 		    max = zero;
2084 		}
2085 	      else if (cmp == 0 || cmp == 1)
2086 		max = vr0.max;
2087 	      else
2088 		type = VR_VARYING;
2089 	      cmp = compare_values (vr0.min, zero);
2090 	      if (cmp == 1)
2091 		{
2092 		  /* For unsigned division when value ranges for dividend
2093 		     and divisor are available.  */
2094 		  if (vr1.type == VR_RANGE
2095 		      && !symbolic_range_p (&vr0)
2096 		      && !symbolic_range_p (&vr1)
2097 		      && compare_values (vr1.max, zero) != 0)
2098 		    min = int_const_binop (code, vr0.min, vr1.max);
2099 		  else
2100 		    min = zero;
2101 		}
2102 	      else if (cmp == 0 || cmp == -1)
2103 		min = vr0.min;
2104 	      else
2105 		type = VR_VARYING;
2106 	    }
2107 	  else
2108 	    {
2109 	      /* Otherwise the range is -max .. max or min .. -min
2110 		 depending on which bound is bigger in absolute value,
2111 		 as the division can change the sign.  */
2112 	      abs_extent_range (vr, vr0.min, vr0.max);
2113 	      return;
2114 	    }
2115 	  if (type == VR_VARYING)
2116 	    {
2117 	      set_value_range_to_varying (vr);
2118 	      return;
2119 	    }
2120 	}
2121       else if (range_int_cst_p (&vr0) && range_int_cst_p (&vr1))
2122 	{
2123 	  extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
2124 	  return;
2125 	}
2126     }
2127   else if (code == TRUNC_MOD_EXPR)
2128     {
2129       if (range_is_null (&vr1))
2130 	{
2131 	  set_value_range_to_undefined (vr);
2132 	  return;
2133 	}
2134       /* ABS (A % B) < ABS (B) and either
2135 	 0 <= A % B <= A or A <= A % B <= 0.  */
2136       type = VR_RANGE;
2137       signop sgn = TYPE_SIGN (expr_type);
2138       unsigned int prec = TYPE_PRECISION (expr_type);
2139       wide_int wmin, wmax, tmp;
2140       if (vr1.type == VR_RANGE && !symbolic_range_p (&vr1))
2141 	{
2142 	  wmax = wi::to_wide (vr1.max) - 1;
2143 	  if (sgn == SIGNED)
2144 	    {
2145 	      tmp = -1 - wi::to_wide (vr1.min);
2146 	      wmax = wi::smax (wmax, tmp);
2147 	    }
2148 	}
2149       else
2150 	{
2151 	  wmax = wi::max_value (prec, sgn);
2152 	  /* X % INT_MIN may be INT_MAX.  */
2153 	  if (sgn == UNSIGNED)
2154 	    wmax = wmax - 1;
2155 	}
2156 
2157       if (sgn == UNSIGNED)
2158 	wmin = wi::zero (prec);
2159       else
2160 	{
2161 	  wmin = -wmax;
2162 	  if (vr0.type == VR_RANGE && TREE_CODE (vr0.min) == INTEGER_CST)
2163 	    {
2164 	      tmp = wi::to_wide (vr0.min);
2165 	      if (wi::gts_p (tmp, 0))
2166 		tmp = wi::zero (prec);
2167 	      wmin = wi::smax (wmin, tmp);
2168 	    }
2169 	}
2170 
2171       if (vr0.type == VR_RANGE && TREE_CODE (vr0.max) == INTEGER_CST)
2172 	{
2173 	  tmp = wi::to_wide (vr0.max);
2174 	  if (sgn == SIGNED && wi::neg_p (tmp))
2175 	    tmp = wi::zero (prec);
2176 	  wmax = wi::min (wmax, tmp, sgn);
2177 	}
2178 
2179       min = wide_int_to_tree (expr_type, wmin);
2180       max = wide_int_to_tree (expr_type, wmax);
2181     }
2182   else if (code == BIT_AND_EXPR || code == BIT_IOR_EXPR || code == BIT_XOR_EXPR)
2183     {
2184       bool int_cst_range0, int_cst_range1;
2185       wide_int may_be_nonzero0, may_be_nonzero1;
2186       wide_int must_be_nonzero0, must_be_nonzero1;
2187 
2188       int_cst_range0 = zero_nonzero_bits_from_vr (expr_type, &vr0,
2189 						  &may_be_nonzero0,
2190 						  &must_be_nonzero0);
2191       int_cst_range1 = zero_nonzero_bits_from_vr (expr_type, &vr1,
2192 						  &may_be_nonzero1,
2193 						  &must_be_nonzero1);
2194 
2195       if (code == BIT_AND_EXPR || code == BIT_IOR_EXPR)
2196 	{
2197 	  value_range *vr0p = NULL, *vr1p = NULL;
2198 	  if (range_int_cst_singleton_p (&vr1))
2199 	    {
2200 	      vr0p = &vr0;
2201 	      vr1p = &vr1;
2202 	    }
2203 	  else if (range_int_cst_singleton_p (&vr0))
2204 	    {
2205 	      vr0p = &vr1;
2206 	      vr1p = &vr0;
2207 	    }
2208 	  /* For op & or | attempt to optimize:
2209 	     [x, y] op z into [x op z, y op z]
2210 	     if z is a constant which (for op | its bitwise not) has n
2211 	     consecutive least significant bits cleared followed by m 1
2212 	     consecutive bits set immediately above it and either
2213 	     m + n == precision, or (x >> (m + n)) == (y >> (m + n)).
2214 	     The least significant n bits of all the values in the range are
2215 	     cleared or set, the m bits above it are preserved and any bits
2216 	     above these are required to be the same for all values in the
2217 	     range.  */
2218 	  if (vr0p && range_int_cst_p (vr0p))
2219 	    {
2220 	      wide_int w = wi::to_wide (vr1p->min);
2221 	      int m = 0, n = 0;
2222 	      if (code == BIT_IOR_EXPR)
2223 		w = ~w;
2224 	      if (wi::eq_p (w, 0))
2225 		n = TYPE_PRECISION (expr_type);
2226 	      else
2227 		{
2228 		  n = wi::ctz (w);
2229 		  w = ~(w | wi::mask (n, false, w.get_precision ()));
2230 		  if (wi::eq_p (w, 0))
2231 		    m = TYPE_PRECISION (expr_type) - n;
2232 		  else
2233 		    m = wi::ctz (w) - n;
2234 		}
2235 	      wide_int mask = wi::mask (m + n, true, w.get_precision ());
2236 	      if ((mask & wi::to_wide (vr0p->min))
2237 		  == (mask & wi::to_wide (vr0p->max)))
2238 		{
2239 		  min = int_const_binop (code, vr0p->min, vr1p->min);
2240 		  max = int_const_binop (code, vr0p->max, vr1p->min);
2241 		}
2242 	    }
2243 	}
2244 
2245       type = VR_RANGE;
2246       if (min && max)
2247 	/* Optimized above already.  */;
2248       else if (code == BIT_AND_EXPR)
2249 	{
2250 	  min = wide_int_to_tree (expr_type,
2251 				  must_be_nonzero0 & must_be_nonzero1);
2252 	  wide_int wmax = may_be_nonzero0 & may_be_nonzero1;
2253 	  /* If both input ranges contain only negative values we can
2254 	     truncate the result range maximum to the minimum of the
2255 	     input range maxima.  */
2256 	  if (int_cst_range0 && int_cst_range1
2257 	      && tree_int_cst_sgn (vr0.max) < 0
2258 	      && tree_int_cst_sgn (vr1.max) < 0)
2259 	    {
2260 	      wmax = wi::min (wmax, wi::to_wide (vr0.max),
2261 			      TYPE_SIGN (expr_type));
2262 	      wmax = wi::min (wmax, wi::to_wide (vr1.max),
2263 			      TYPE_SIGN (expr_type));
2264 	    }
2265 	  /* If either input range contains only non-negative values
2266 	     we can truncate the result range maximum to the respective
2267 	     maximum of the input range.  */
2268 	  if (int_cst_range0 && tree_int_cst_sgn (vr0.min) >= 0)
2269 	    wmax = wi::min (wmax, wi::to_wide (vr0.max),
2270 			    TYPE_SIGN (expr_type));
2271 	  if (int_cst_range1 && tree_int_cst_sgn (vr1.min) >= 0)
2272 	    wmax = wi::min (wmax, wi::to_wide (vr1.max),
2273 			    TYPE_SIGN (expr_type));
2274 	  max = wide_int_to_tree (expr_type, wmax);
2275 	  cmp = compare_values (min, max);
2276 	  /* PR68217: In case of signed & sign-bit-CST should
2277 	     result in [-INF, 0] instead of [-INF, INF].  */
2278 	  if (cmp == -2 || cmp == 1)
2279 	    {
2280 	      wide_int sign_bit
2281 		= wi::set_bit_in_zero (TYPE_PRECISION (expr_type) - 1,
2282 				       TYPE_PRECISION (expr_type));
2283 	      if (!TYPE_UNSIGNED (expr_type)
2284 		  && ((int_cst_range0
2285 		       && value_range_constant_singleton (&vr0)
2286 		       && !wi::cmps (wi::to_wide (vr0.min), sign_bit))
2287 		      || (int_cst_range1
2288 			  && value_range_constant_singleton (&vr1)
2289 			  && !wi::cmps (wi::to_wide (vr1.min), sign_bit))))
2290 		{
2291 		  min = TYPE_MIN_VALUE (expr_type);
2292 		  max = build_int_cst (expr_type, 0);
2293 		}
2294 	    }
2295 	}
2296       else if (code == BIT_IOR_EXPR)
2297 	{
2298 	  max = wide_int_to_tree (expr_type,
2299 				  may_be_nonzero0 | may_be_nonzero1);
2300 	  wide_int wmin = must_be_nonzero0 | must_be_nonzero1;
2301 	  /* If the input ranges contain only positive values we can
2302 	     truncate the minimum of the result range to the maximum
2303 	     of the input range minima.  */
2304 	  if (int_cst_range0 && int_cst_range1
2305 	      && tree_int_cst_sgn (vr0.min) >= 0
2306 	      && tree_int_cst_sgn (vr1.min) >= 0)
2307 	    {
2308 	      wmin = wi::max (wmin, wi::to_wide (vr0.min),
2309 			      TYPE_SIGN (expr_type));
2310 	      wmin = wi::max (wmin, wi::to_wide (vr1.min),
2311 			      TYPE_SIGN (expr_type));
2312 	    }
2313 	  /* If either input range contains only negative values
2314 	     we can truncate the minimum of the result range to the
2315 	     respective minimum range.  */
2316 	  if (int_cst_range0 && tree_int_cst_sgn (vr0.max) < 0)
2317 	    wmin = wi::max (wmin, wi::to_wide (vr0.min),
2318 			    TYPE_SIGN (expr_type));
2319 	  if (int_cst_range1 && tree_int_cst_sgn (vr1.max) < 0)
2320 	    wmin = wi::max (wmin, wi::to_wide (vr1.min),
2321 			    TYPE_SIGN (expr_type));
2322 	  min = wide_int_to_tree (expr_type, wmin);
2323 	}
2324       else if (code == BIT_XOR_EXPR)
2325 	{
2326 	  wide_int result_zero_bits = ((must_be_nonzero0 & must_be_nonzero1)
2327 				       | ~(may_be_nonzero0 | may_be_nonzero1));
2328 	  wide_int result_one_bits
2329 	    = (wi::bit_and_not (must_be_nonzero0, may_be_nonzero1)
2330 	       | wi::bit_and_not (must_be_nonzero1, may_be_nonzero0));
2331 	  max = wide_int_to_tree (expr_type, ~result_zero_bits);
2332 	  min = wide_int_to_tree (expr_type, result_one_bits);
2333 	  /* If the range has all positive or all negative values the
2334 	     result is better than VARYING.  */
2335 	  if (tree_int_cst_sgn (min) < 0
2336 	      || tree_int_cst_sgn (max) >= 0)
2337 	    ;
2338 	  else
2339 	    max = min = NULL_TREE;
2340 	}
2341     }
2342   else
2343     gcc_unreachable ();
2344 
2345   /* If either MIN or MAX overflowed, then set the resulting range to
2346      VARYING.  */
2347   if (min == NULL_TREE
2348       || TREE_OVERFLOW_P (min)
2349       || max == NULL_TREE
2350       || TREE_OVERFLOW_P (max))
2351     {
2352       set_value_range_to_varying (vr);
2353       return;
2354     }
2355 
2356   /* We punt for [-INF, +INF].
2357      We learn nothing when we have INF on both sides.
2358      Note that we do accept [-INF, -INF] and [+INF, +INF].  */
2359   if (vrp_val_is_min (min) && vrp_val_is_max (max))
2360     {
2361       set_value_range_to_varying (vr);
2362       return;
2363     }
2364 
2365   cmp = compare_values (min, max);
2366   if (cmp == -2 || cmp == 1)
2367     {
2368       /* If the new range has its limits swapped around (MIN > MAX),
2369 	 then the operation caused one of them to wrap around, mark
2370 	 the new range VARYING.  */
2371       set_value_range_to_varying (vr);
2372     }
2373   else
2374     set_value_range (vr, type, min, max, NULL);
2375 }
2376 
2377 /* Extract range information from a unary operation CODE based on
2378    the range of its operand *VR0 with type OP0_TYPE with resulting type TYPE.
2379    The resulting range is stored in *VR.  */
2380 
2381 void
extract_range_from_unary_expr(value_range * vr,enum tree_code code,tree type,value_range * vr0_,tree op0_type)2382 extract_range_from_unary_expr (value_range *vr,
2383 			       enum tree_code code, tree type,
2384 			       value_range *vr0_, tree op0_type)
2385 {
2386   value_range vr0 = *vr0_, vrtem0 = VR_INITIALIZER, vrtem1 = VR_INITIALIZER;
2387 
2388   /* VRP only operates on integral and pointer types.  */
2389   if (!(INTEGRAL_TYPE_P (op0_type)
2390 	|| POINTER_TYPE_P (op0_type))
2391       || !(INTEGRAL_TYPE_P (type)
2392 	   || POINTER_TYPE_P (type)))
2393     {
2394       set_value_range_to_varying (vr);
2395       return;
2396     }
2397 
2398   /* If VR0 is UNDEFINED, so is the result.  */
2399   if (vr0.type == VR_UNDEFINED)
2400     {
2401       set_value_range_to_undefined (vr);
2402       return;
2403     }
2404 
2405   /* Handle operations that we express in terms of others.  */
2406   if (code == PAREN_EXPR || code == OBJ_TYPE_REF)
2407     {
2408       /* PAREN_EXPR and OBJ_TYPE_REF are simple copies.  */
2409       copy_value_range (vr, &vr0);
2410       return;
2411     }
2412   else if (code == NEGATE_EXPR)
2413     {
2414       /* -X is simply 0 - X, so re-use existing code that also handles
2415          anti-ranges fine.  */
2416       value_range zero = VR_INITIALIZER;
2417       set_value_range_to_value (&zero, build_int_cst (type, 0), NULL);
2418       extract_range_from_binary_expr_1 (vr, MINUS_EXPR, type, &zero, &vr0);
2419       return;
2420     }
2421   else if (code == BIT_NOT_EXPR)
2422     {
2423       /* ~X is simply -1 - X, so re-use existing code that also handles
2424          anti-ranges fine.  */
2425       value_range minusone = VR_INITIALIZER;
2426       set_value_range_to_value (&minusone, build_int_cst (type, -1), NULL);
2427       extract_range_from_binary_expr_1 (vr, MINUS_EXPR,
2428 					type, &minusone, &vr0);
2429       return;
2430     }
2431 
2432   /* Now canonicalize anti-ranges to ranges when they are not symbolic
2433      and express op ~[]  as (op []') U (op []'').  */
2434   if (vr0.type == VR_ANTI_RANGE
2435       && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1))
2436     {
2437       extract_range_from_unary_expr (vr, code, type, &vrtem0, op0_type);
2438       if (vrtem1.type != VR_UNDEFINED)
2439 	{
2440 	  value_range vrres = VR_INITIALIZER;
2441 	  extract_range_from_unary_expr (&vrres, code, type,
2442 					 &vrtem1, op0_type);
2443 	  vrp_meet (vr, &vrres);
2444 	}
2445       return;
2446     }
2447 
2448   if (CONVERT_EXPR_CODE_P (code))
2449     {
2450       tree inner_type = op0_type;
2451       tree outer_type = type;
2452 
2453       /* If the expression evaluates to a pointer, we are only interested in
2454 	 determining if it evaluates to NULL [0, 0] or non-NULL (~[0, 0]).  */
2455       if (POINTER_TYPE_P (type))
2456 	{
2457 	  if (range_is_nonnull (&vr0))
2458 	    set_value_range_to_nonnull (vr, type);
2459 	  else if (range_is_null (&vr0))
2460 	    set_value_range_to_null (vr, type);
2461 	  else
2462 	    set_value_range_to_varying (vr);
2463 	  return;
2464 	}
2465 
2466       /* If VR0 is varying and we increase the type precision, assume
2467 	 a full range for the following transformation.  */
2468       if (vr0.type == VR_VARYING
2469 	  && INTEGRAL_TYPE_P (inner_type)
2470 	  && TYPE_PRECISION (inner_type) < TYPE_PRECISION (outer_type))
2471 	{
2472 	  vr0.type = VR_RANGE;
2473 	  vr0.min = TYPE_MIN_VALUE (inner_type);
2474 	  vr0.max = TYPE_MAX_VALUE (inner_type);
2475 	}
2476 
2477       /* If VR0 is a constant range or anti-range and the conversion is
2478 	 not truncating we can convert the min and max values and
2479 	 canonicalize the resulting range.  Otherwise we can do the
2480 	 conversion if the size of the range is less than what the
2481 	 precision of the target type can represent and the range is
2482 	 not an anti-range.  */
2483       if ((vr0.type == VR_RANGE
2484 	   || vr0.type == VR_ANTI_RANGE)
2485 	  && TREE_CODE (vr0.min) == INTEGER_CST
2486 	  && TREE_CODE (vr0.max) == INTEGER_CST
2487 	  && (TYPE_PRECISION (outer_type) >= TYPE_PRECISION (inner_type)
2488 	      || (vr0.type == VR_RANGE
2489 		  && integer_zerop (int_const_binop (RSHIFT_EXPR,
2490 		       int_const_binop (MINUS_EXPR, vr0.max, vr0.min),
2491 		         size_int (TYPE_PRECISION (outer_type)))))))
2492 	{
2493 	  tree new_min, new_max;
2494 	  new_min = force_fit_type (outer_type, wi::to_widest (vr0.min),
2495 				    0, false);
2496 	  new_max = force_fit_type (outer_type, wi::to_widest (vr0.max),
2497 				    0, false);
2498 	  set_and_canonicalize_value_range (vr, vr0.type,
2499 					    new_min, new_max, NULL);
2500 	  return;
2501 	}
2502 
2503       set_value_range_to_varying (vr);
2504       return;
2505     }
2506   else if (code == ABS_EXPR)
2507     {
2508       tree min, max;
2509       int cmp;
2510 
2511       /* Pass through vr0 in the easy cases.  */
2512       if (TYPE_UNSIGNED (type)
2513 	  || value_range_nonnegative_p (&vr0))
2514 	{
2515 	  copy_value_range (vr, &vr0);
2516 	  return;
2517 	}
2518 
2519       /* For the remaining varying or symbolic ranges we can't do anything
2520 	 useful.  */
2521       if (vr0.type == VR_VARYING
2522 	  || symbolic_range_p (&vr0))
2523 	{
2524 	  set_value_range_to_varying (vr);
2525 	  return;
2526 	}
2527 
2528       /* -TYPE_MIN_VALUE = TYPE_MIN_VALUE with flag_wrapv so we can't get a
2529          useful range.  */
2530       if (!TYPE_OVERFLOW_UNDEFINED (type)
2531 	  && ((vr0.type == VR_RANGE
2532 	       && vrp_val_is_min (vr0.min))
2533 	      || (vr0.type == VR_ANTI_RANGE
2534 		  && !vrp_val_is_min (vr0.min))))
2535 	{
2536 	  set_value_range_to_varying (vr);
2537 	  return;
2538 	}
2539 
2540       /* ABS_EXPR may flip the range around, if the original range
2541 	 included negative values.  */
2542       if (!vrp_val_is_min (vr0.min))
2543 	min = fold_unary_to_constant (code, type, vr0.min);
2544       else
2545 	min = TYPE_MAX_VALUE (type);
2546 
2547       if (!vrp_val_is_min (vr0.max))
2548 	max = fold_unary_to_constant (code, type, vr0.max);
2549       else
2550 	max = TYPE_MAX_VALUE (type);
2551 
2552       cmp = compare_values (min, max);
2553 
2554       /* If a VR_ANTI_RANGEs contains zero, then we have
2555 	 ~[-INF, min(MIN, MAX)].  */
2556       if (vr0.type == VR_ANTI_RANGE)
2557 	{
2558 	  if (range_includes_zero_p (vr0.min, vr0.max) == 1)
2559 	    {
2560 	      /* Take the lower of the two values.  */
2561 	      if (cmp != 1)
2562 		max = min;
2563 
2564 	      /* Create ~[-INF, min (abs(MIN), abs(MAX))]
2565 	         or ~[-INF + 1, min (abs(MIN), abs(MAX))] when
2566 		 flag_wrapv is set and the original anti-range doesn't include
2567 	         TYPE_MIN_VALUE, remember -TYPE_MIN_VALUE = TYPE_MIN_VALUE.  */
2568 	      if (TYPE_OVERFLOW_WRAPS (type))
2569 		{
2570 		  tree type_min_value = TYPE_MIN_VALUE (type);
2571 
2572 		  min = (vr0.min != type_min_value
2573 			 ? int_const_binop (PLUS_EXPR, type_min_value,
2574 					    build_int_cst (TREE_TYPE (type_min_value), 1))
2575 			 : type_min_value);
2576 		}
2577 	      else
2578 		min = TYPE_MIN_VALUE (type);
2579 	    }
2580 	  else
2581 	    {
2582 	      /* All else has failed, so create the range [0, INF], even for
2583 	         flag_wrapv since TYPE_MIN_VALUE is in the original
2584 	         anti-range.  */
2585 	      vr0.type = VR_RANGE;
2586 	      min = build_int_cst (type, 0);
2587 	      max = TYPE_MAX_VALUE (type);
2588 	    }
2589 	}
2590 
2591       /* If the range contains zero then we know that the minimum value in the
2592          range will be zero.  */
2593       else if (range_includes_zero_p (vr0.min, vr0.max) == 1)
2594 	{
2595 	  if (cmp == 1)
2596 	    max = min;
2597 	  min = build_int_cst (type, 0);
2598 	}
2599       else
2600 	{
2601           /* If the range was reversed, swap MIN and MAX.  */
2602 	  if (cmp == 1)
2603 	    std::swap (min, max);
2604 	}
2605 
2606       cmp = compare_values (min, max);
2607       if (cmp == -2 || cmp == 1)
2608 	{
2609 	  /* If the new range has its limits swapped around (MIN > MAX),
2610 	     then the operation caused one of them to wrap around, mark
2611 	     the new range VARYING.  */
2612 	  set_value_range_to_varying (vr);
2613 	}
2614       else
2615 	set_value_range (vr, vr0.type, min, max, NULL);
2616       return;
2617     }
2618 
2619   /* For unhandled operations fall back to varying.  */
2620   set_value_range_to_varying (vr);
2621   return;
2622 }
2623 
2624 /* Debugging dumps.  */
2625 
2626 void dump_value_range (FILE *, const value_range *);
2627 void debug_value_range (value_range *);
2628 void dump_all_value_ranges (FILE *);
2629 void dump_vr_equiv (FILE *, bitmap);
2630 void debug_vr_equiv (bitmap);
2631 
2632 
2633 /* Dump value range VR to FILE.  */
2634 
2635 void
dump_value_range(FILE * file,const value_range * vr)2636 dump_value_range (FILE *file, const value_range *vr)
2637 {
2638   if (vr == NULL)
2639     fprintf (file, "[]");
2640   else if (vr->type == VR_UNDEFINED)
2641     fprintf (file, "UNDEFINED");
2642   else if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE)
2643     {
2644       tree type = TREE_TYPE (vr->min);
2645 
2646       fprintf (file, "%s[", (vr->type == VR_ANTI_RANGE) ? "~" : "");
2647 
2648       if (INTEGRAL_TYPE_P (type)
2649 	  && !TYPE_UNSIGNED (type)
2650 	  && vrp_val_is_min (vr->min))
2651 	fprintf (file, "-INF");
2652       else
2653 	print_generic_expr (file, vr->min);
2654 
2655       fprintf (file, ", ");
2656 
2657       if (INTEGRAL_TYPE_P (type)
2658 	  && vrp_val_is_max (vr->max))
2659 	fprintf (file, "+INF");
2660       else
2661 	print_generic_expr (file, vr->max);
2662 
2663       fprintf (file, "]");
2664 
2665       if (vr->equiv)
2666 	{
2667 	  bitmap_iterator bi;
2668 	  unsigned i, c = 0;
2669 
2670 	  fprintf (file, "  EQUIVALENCES: { ");
2671 
2672 	  EXECUTE_IF_SET_IN_BITMAP (vr->equiv, 0, i, bi)
2673 	    {
2674 	      print_generic_expr (file, ssa_name (i));
2675 	      fprintf (file, " ");
2676 	      c++;
2677 	    }
2678 
2679 	  fprintf (file, "} (%u elements)", c);
2680 	}
2681     }
2682   else if (vr->type == VR_VARYING)
2683     fprintf (file, "VARYING");
2684   else
2685     fprintf (file, "INVALID RANGE");
2686 }
2687 
2688 
2689 /* Dump value range VR to stderr.  */
2690 
2691 DEBUG_FUNCTION void
debug_value_range(value_range * vr)2692 debug_value_range (value_range *vr)
2693 {
2694   dump_value_range (stderr, vr);
2695   fprintf (stderr, "\n");
2696 }
2697 
2698 
2699 /* Given a COND_EXPR COND of the form 'V OP W', and an SSA name V,
2700    create a new SSA name N and return the assertion assignment
2701    'N = ASSERT_EXPR <V, V OP W>'.  */
2702 
2703 static gimple *
build_assert_expr_for(tree cond,tree v)2704 build_assert_expr_for (tree cond, tree v)
2705 {
2706   tree a;
2707   gassign *assertion;
2708 
2709   gcc_assert (TREE_CODE (v) == SSA_NAME
2710 	      && COMPARISON_CLASS_P (cond));
2711 
2712   a = build2 (ASSERT_EXPR, TREE_TYPE (v), v, cond);
2713   assertion = gimple_build_assign (NULL_TREE, a);
2714 
2715   /* The new ASSERT_EXPR, creates a new SSA name that replaces the
2716      operand of the ASSERT_EXPR.  Create it so the new name and the old one
2717      are registered in the replacement table so that we can fix the SSA web
2718      after adding all the ASSERT_EXPRs.  */
2719   tree new_def = create_new_def_for (v, assertion, NULL);
2720   /* Make sure we preserve abnormalness throughout an ASSERT_EXPR chain
2721      given we have to be able to fully propagate those out to re-create
2722      valid SSA when removing the asserts.  */
2723   if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (v))
2724     SSA_NAME_OCCURS_IN_ABNORMAL_PHI (new_def) = 1;
2725 
2726   return assertion;
2727 }
2728 
2729 
2730 /* Return false if EXPR is a predicate expression involving floating
2731    point values.  */
2732 
2733 static inline bool
fp_predicate(gimple * stmt)2734 fp_predicate (gimple *stmt)
2735 {
2736   GIMPLE_CHECK (stmt, GIMPLE_COND);
2737 
2738   return FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt)));
2739 }
2740 
2741 /* If the range of values taken by OP can be inferred after STMT executes,
2742    return the comparison code (COMP_CODE_P) and value (VAL_P) that
2743    describes the inferred range.  Return true if a range could be
2744    inferred.  */
2745 
2746 bool
infer_value_range(gimple * stmt,tree op,tree_code * comp_code_p,tree * val_p)2747 infer_value_range (gimple *stmt, tree op, tree_code *comp_code_p, tree *val_p)
2748 {
2749   *val_p = NULL_TREE;
2750   *comp_code_p = ERROR_MARK;
2751 
2752   /* Do not attempt to infer anything in names that flow through
2753      abnormal edges.  */
2754   if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op))
2755     return false;
2756 
2757   /* If STMT is the last statement of a basic block with no normal
2758      successors, there is no point inferring anything about any of its
2759      operands.  We would not be able to find a proper insertion point
2760      for the assertion, anyway.  */
2761   if (stmt_ends_bb_p (stmt))
2762     {
2763       edge_iterator ei;
2764       edge e;
2765 
2766       FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs)
2767 	if (!(e->flags & (EDGE_ABNORMAL|EDGE_EH)))
2768 	  break;
2769       if (e == NULL)
2770 	return false;
2771     }
2772 
2773   if (infer_nonnull_range (stmt, op))
2774     {
2775       *val_p = build_int_cst (TREE_TYPE (op), 0);
2776       *comp_code_p = NE_EXPR;
2777       return true;
2778     }
2779 
2780   return false;
2781 }
2782 
2783 
2784 void dump_asserts_for (FILE *, tree);
2785 void debug_asserts_for (tree);
2786 void dump_all_asserts (FILE *);
2787 void debug_all_asserts (void);
2788 
2789 /* Dump all the registered assertions for NAME to FILE.  */
2790 
2791 void
dump_asserts_for(FILE * file,tree name)2792 dump_asserts_for (FILE *file, tree name)
2793 {
2794   assert_locus *loc;
2795 
2796   fprintf (file, "Assertions to be inserted for ");
2797   print_generic_expr (file, name);
2798   fprintf (file, "\n");
2799 
2800   loc = asserts_for[SSA_NAME_VERSION (name)];
2801   while (loc)
2802     {
2803       fprintf (file, "\t");
2804       print_gimple_stmt (file, gsi_stmt (loc->si), 0);
2805       fprintf (file, "\n\tBB #%d", loc->bb->index);
2806       if (loc->e)
2807 	{
2808 	  fprintf (file, "\n\tEDGE %d->%d", loc->e->src->index,
2809 	           loc->e->dest->index);
2810 	  dump_edge_info (file, loc->e, dump_flags, 0);
2811 	}
2812       fprintf (file, "\n\tPREDICATE: ");
2813       print_generic_expr (file, loc->expr);
2814       fprintf (file, " %s ", get_tree_code_name (loc->comp_code));
2815       print_generic_expr (file, loc->val);
2816       fprintf (file, "\n\n");
2817       loc = loc->next;
2818     }
2819 
2820   fprintf (file, "\n");
2821 }
2822 
2823 
2824 /* Dump all the registered assertions for NAME to stderr.  */
2825 
2826 DEBUG_FUNCTION void
debug_asserts_for(tree name)2827 debug_asserts_for (tree name)
2828 {
2829   dump_asserts_for (stderr, name);
2830 }
2831 
2832 
2833 /* Dump all the registered assertions for all the names to FILE.  */
2834 
2835 void
dump_all_asserts(FILE * file)2836 dump_all_asserts (FILE *file)
2837 {
2838   unsigned i;
2839   bitmap_iterator bi;
2840 
2841   fprintf (file, "\nASSERT_EXPRs to be inserted\n\n");
2842   EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
2843     dump_asserts_for (file, ssa_name (i));
2844   fprintf (file, "\n");
2845 }
2846 
2847 
2848 /* Dump all the registered assertions for all the names to stderr.  */
2849 
2850 DEBUG_FUNCTION void
debug_all_asserts(void)2851 debug_all_asserts (void)
2852 {
2853   dump_all_asserts (stderr);
2854 }
2855 
2856 /* Push the assert info for NAME, EXPR, COMP_CODE and VAL to ASSERTS.  */
2857 
2858 static void
add_assert_info(vec<assert_info> & asserts,tree name,tree expr,enum tree_code comp_code,tree val)2859 add_assert_info (vec<assert_info> &asserts,
2860 		 tree name, tree expr, enum tree_code comp_code, tree val)
2861 {
2862   assert_info info;
2863   info.comp_code = comp_code;
2864   info.name = name;
2865   if (TREE_OVERFLOW_P (val))
2866     val = drop_tree_overflow (val);
2867   info.val = val;
2868   info.expr = expr;
2869   asserts.safe_push (info);
2870 }
2871 
2872 /* If NAME doesn't have an ASSERT_EXPR registered for asserting
2873    'EXPR COMP_CODE VAL' at a location that dominates block BB or
2874    E->DEST, then register this location as a possible insertion point
2875    for ASSERT_EXPR <NAME, EXPR COMP_CODE VAL>.
2876 
2877    BB, E and SI provide the exact insertion point for the new
2878    ASSERT_EXPR.  If BB is NULL, then the ASSERT_EXPR is to be inserted
2879    on edge E.  Otherwise, if E is NULL, the ASSERT_EXPR is inserted on
2880    BB.  If SI points to a COND_EXPR or a SWITCH_EXPR statement, then E
2881    must not be NULL.  */
2882 
2883 static void
register_new_assert_for(tree name,tree expr,enum tree_code comp_code,tree val,basic_block bb,edge e,gimple_stmt_iterator si)2884 register_new_assert_for (tree name, tree expr,
2885 			 enum tree_code comp_code,
2886 			 tree val,
2887 			 basic_block bb,
2888 			 edge e,
2889 			 gimple_stmt_iterator si)
2890 {
2891   assert_locus *n, *loc, *last_loc;
2892   basic_block dest_bb;
2893 
2894   gcc_checking_assert (bb == NULL || e == NULL);
2895 
2896   if (e == NULL)
2897     gcc_checking_assert (gimple_code (gsi_stmt (si)) != GIMPLE_COND
2898 			 && gimple_code (gsi_stmt (si)) != GIMPLE_SWITCH);
2899 
2900   /* Never build an assert comparing against an integer constant with
2901      TREE_OVERFLOW set.  This confuses our undefined overflow warning
2902      machinery.  */
2903   if (TREE_OVERFLOW_P (val))
2904     val = drop_tree_overflow (val);
2905 
2906   /* The new assertion A will be inserted at BB or E.  We need to
2907      determine if the new location is dominated by a previously
2908      registered location for A.  If we are doing an edge insertion,
2909      assume that A will be inserted at E->DEST.  Note that this is not
2910      necessarily true.
2911 
2912      If E is a critical edge, it will be split.  But even if E is
2913      split, the new block will dominate the same set of blocks that
2914      E->DEST dominates.
2915 
2916      The reverse, however, is not true, blocks dominated by E->DEST
2917      will not be dominated by the new block created to split E.  So,
2918      if the insertion location is on a critical edge, we will not use
2919      the new location to move another assertion previously registered
2920      at a block dominated by E->DEST.  */
2921   dest_bb = (bb) ? bb : e->dest;
2922 
2923   /* If NAME already has an ASSERT_EXPR registered for COMP_CODE and
2924      VAL at a block dominating DEST_BB, then we don't need to insert a new
2925      one.  Similarly, if the same assertion already exists at a block
2926      dominated by DEST_BB and the new location is not on a critical
2927      edge, then update the existing location for the assertion (i.e.,
2928      move the assertion up in the dominance tree).
2929 
2930      Note, this is implemented as a simple linked list because there
2931      should not be more than a handful of assertions registered per
2932      name.  If this becomes a performance problem, a table hashed by
2933      COMP_CODE and VAL could be implemented.  */
2934   loc = asserts_for[SSA_NAME_VERSION (name)];
2935   last_loc = loc;
2936   while (loc)
2937     {
2938       if (loc->comp_code == comp_code
2939 	  && (loc->val == val
2940 	      || operand_equal_p (loc->val, val, 0))
2941 	  && (loc->expr == expr
2942 	      || operand_equal_p (loc->expr, expr, 0)))
2943 	{
2944 	  /* If E is not a critical edge and DEST_BB
2945 	     dominates the existing location for the assertion, move
2946 	     the assertion up in the dominance tree by updating its
2947 	     location information.  */
2948 	  if ((e == NULL || !EDGE_CRITICAL_P (e))
2949 	      && dominated_by_p (CDI_DOMINATORS, loc->bb, dest_bb))
2950 	    {
2951 	      loc->bb = dest_bb;
2952 	      loc->e = e;
2953 	      loc->si = si;
2954 	      return;
2955 	    }
2956 	}
2957 
2958       /* Update the last node of the list and move to the next one.  */
2959       last_loc = loc;
2960       loc = loc->next;
2961     }
2962 
2963   /* If we didn't find an assertion already registered for
2964      NAME COMP_CODE VAL, add a new one at the end of the list of
2965      assertions associated with NAME.  */
2966   n = XNEW (struct assert_locus);
2967   n->bb = dest_bb;
2968   n->e = e;
2969   n->si = si;
2970   n->comp_code = comp_code;
2971   n->val = val;
2972   n->expr = expr;
2973   n->next = NULL;
2974 
2975   if (last_loc)
2976     last_loc->next = n;
2977   else
2978     asserts_for[SSA_NAME_VERSION (name)] = n;
2979 
2980   bitmap_set_bit (need_assert_for, SSA_NAME_VERSION (name));
2981 }
2982 
2983 /* (COND_OP0 COND_CODE COND_OP1) is a predicate which uses NAME.
2984    Extract a suitable test code and value and store them into *CODE_P and
2985    *VAL_P so the predicate is normalized to NAME *CODE_P *VAL_P.
2986 
2987    If no extraction was possible, return FALSE, otherwise return TRUE.
2988 
2989    If INVERT is true, then we invert the result stored into *CODE_P.  */
2990 
2991 static bool
extract_code_and_val_from_cond_with_ops(tree name,enum tree_code cond_code,tree cond_op0,tree cond_op1,bool invert,enum tree_code * code_p,tree * val_p)2992 extract_code_and_val_from_cond_with_ops (tree name, enum tree_code cond_code,
2993 					 tree cond_op0, tree cond_op1,
2994 					 bool invert, enum tree_code *code_p,
2995 					 tree *val_p)
2996 {
2997   enum tree_code comp_code;
2998   tree val;
2999 
3000   /* Otherwise, we have a comparison of the form NAME COMP VAL
3001      or VAL COMP NAME.  */
3002   if (name == cond_op1)
3003     {
3004       /* If the predicate is of the form VAL COMP NAME, flip
3005 	 COMP around because we need to register NAME as the
3006 	 first operand in the predicate.  */
3007       comp_code = swap_tree_comparison (cond_code);
3008       val = cond_op0;
3009     }
3010   else if (name == cond_op0)
3011     {
3012       /* The comparison is of the form NAME COMP VAL, so the
3013 	 comparison code remains unchanged.  */
3014       comp_code = cond_code;
3015       val = cond_op1;
3016     }
3017   else
3018     gcc_unreachable ();
3019 
3020   /* Invert the comparison code as necessary.  */
3021   if (invert)
3022     comp_code = invert_tree_comparison (comp_code, 0);
3023 
3024   /* VRP only handles integral and pointer types.  */
3025   if (! INTEGRAL_TYPE_P (TREE_TYPE (val))
3026       && ! POINTER_TYPE_P (TREE_TYPE (val)))
3027     return false;
3028 
3029   /* Do not register always-false predicates.
3030      FIXME:  this works around a limitation in fold() when dealing with
3031      enumerations.  Given 'enum { N1, N2 } x;', fold will not
3032      fold 'if (x > N2)' to 'if (0)'.  */
3033   if ((comp_code == GT_EXPR || comp_code == LT_EXPR)
3034       && INTEGRAL_TYPE_P (TREE_TYPE (val)))
3035     {
3036       tree min = TYPE_MIN_VALUE (TREE_TYPE (val));
3037       tree max = TYPE_MAX_VALUE (TREE_TYPE (val));
3038 
3039       if (comp_code == GT_EXPR
3040 	  && (!max
3041 	      || compare_values (val, max) == 0))
3042 	return false;
3043 
3044       if (comp_code == LT_EXPR
3045 	  && (!min
3046 	      || compare_values (val, min) == 0))
3047 	return false;
3048     }
3049   *code_p = comp_code;
3050   *val_p = val;
3051   return true;
3052 }
3053 
3054 /* Find out smallest RES where RES > VAL && (RES & MASK) == RES, if any
3055    (otherwise return VAL).  VAL and MASK must be zero-extended for
3056    precision PREC.  If SGNBIT is non-zero, first xor VAL with SGNBIT
3057    (to transform signed values into unsigned) and at the end xor
3058    SGNBIT back.  */
3059 
3060 static wide_int
masked_increment(const wide_int & val_in,const wide_int & mask,const wide_int & sgnbit,unsigned int prec)3061 masked_increment (const wide_int &val_in, const wide_int &mask,
3062 		  const wide_int &sgnbit, unsigned int prec)
3063 {
3064   wide_int bit = wi::one (prec), res;
3065   unsigned int i;
3066 
3067   wide_int val = val_in ^ sgnbit;
3068   for (i = 0; i < prec; i++, bit += bit)
3069     {
3070       res = mask;
3071       if ((res & bit) == 0)
3072 	continue;
3073       res = bit - 1;
3074       res = wi::bit_and_not (val + bit, res);
3075       res &= mask;
3076       if (wi::gtu_p (res, val))
3077 	return res ^ sgnbit;
3078     }
3079   return val ^ sgnbit;
3080 }
3081 
3082 /* Helper for overflow_comparison_p
3083 
3084    OP0 CODE OP1 is a comparison.  Examine the comparison and potentially
3085    OP1's defining statement to see if it ultimately has the form
3086    OP0 CODE (OP0 PLUS INTEGER_CST)
3087 
3088    If so, return TRUE indicating this is an overflow test and store into
3089    *NEW_CST an updated constant that can be used in a narrowed range test.
3090 
3091    REVERSED indicates if the comparison was originally:
3092 
3093    OP1 CODE' OP0.
3094 
3095    This affects how we build the updated constant.  */
3096 
3097 static bool
overflow_comparison_p_1(enum tree_code code,tree op0,tree op1,bool follow_assert_exprs,bool reversed,tree * new_cst)3098 overflow_comparison_p_1 (enum tree_code code, tree op0, tree op1,
3099 		         bool follow_assert_exprs, bool reversed, tree *new_cst)
3100 {
3101   /* See if this is a relational operation between two SSA_NAMES with
3102      unsigned, overflow wrapping values.  If so, check it more deeply.  */
3103   if ((code == LT_EXPR || code == LE_EXPR
3104        || code == GE_EXPR || code == GT_EXPR)
3105       && TREE_CODE (op0) == SSA_NAME
3106       && TREE_CODE (op1) == SSA_NAME
3107       && INTEGRAL_TYPE_P (TREE_TYPE (op0))
3108       && TYPE_UNSIGNED (TREE_TYPE (op0))
3109       && TYPE_OVERFLOW_WRAPS (TREE_TYPE (op0)))
3110     {
3111       gimple *op1_def = SSA_NAME_DEF_STMT (op1);
3112 
3113       /* If requested, follow any ASSERT_EXPRs backwards for OP1.  */
3114       if (follow_assert_exprs)
3115 	{
3116 	  while (gimple_assign_single_p (op1_def)
3117 		 && TREE_CODE (gimple_assign_rhs1 (op1_def)) == ASSERT_EXPR)
3118 	    {
3119 	      op1 = TREE_OPERAND (gimple_assign_rhs1 (op1_def), 0);
3120 	      if (TREE_CODE (op1) != SSA_NAME)
3121 		break;
3122 	      op1_def = SSA_NAME_DEF_STMT (op1);
3123 	    }
3124 	}
3125 
3126       /* Now look at the defining statement of OP1 to see if it adds
3127 	 or subtracts a nonzero constant from another operand.  */
3128       if (op1_def
3129 	  && is_gimple_assign (op1_def)
3130 	  && gimple_assign_rhs_code (op1_def) == PLUS_EXPR
3131 	  && TREE_CODE (gimple_assign_rhs2 (op1_def)) == INTEGER_CST
3132 	  && !integer_zerop (gimple_assign_rhs2 (op1_def)))
3133 	{
3134 	  tree target = gimple_assign_rhs1 (op1_def);
3135 
3136 	  /* If requested, follow ASSERT_EXPRs backwards for op0 looking
3137 	     for one where TARGET appears on the RHS.  */
3138 	  if (follow_assert_exprs)
3139 	    {
3140 	      /* Now see if that "other operand" is op0, following the chain
3141 		 of ASSERT_EXPRs if necessary.  */
3142 	      gimple *op0_def = SSA_NAME_DEF_STMT (op0);
3143 	      while (op0 != target
3144 		     && gimple_assign_single_p (op0_def)
3145 		     && TREE_CODE (gimple_assign_rhs1 (op0_def)) == ASSERT_EXPR)
3146 		{
3147 		  op0 = TREE_OPERAND (gimple_assign_rhs1 (op0_def), 0);
3148 		  if (TREE_CODE (op0) != SSA_NAME)
3149 		    break;
3150 		  op0_def = SSA_NAME_DEF_STMT (op0);
3151 		}
3152 	    }
3153 
3154 	  /* If we did not find our target SSA_NAME, then this is not
3155 	     an overflow test.  */
3156 	  if (op0 != target)
3157 	    return false;
3158 
3159 	  tree type = TREE_TYPE (op0);
3160 	  wide_int max = wi::max_value (TYPE_PRECISION (type), UNSIGNED);
3161 	  tree inc = gimple_assign_rhs2 (op1_def);
3162 	  if (reversed)
3163 	    *new_cst = wide_int_to_tree (type, max + wi::to_wide (inc));
3164 	  else
3165 	    *new_cst = wide_int_to_tree (type, max - wi::to_wide (inc));
3166 	  return true;
3167 	}
3168     }
3169   return false;
3170 }
3171 
3172 /* OP0 CODE OP1 is a comparison.  Examine the comparison and potentially
3173    OP1's defining statement to see if it ultimately has the form
3174    OP0 CODE (OP0 PLUS INTEGER_CST)
3175 
3176    If so, return TRUE indicating this is an overflow test and store into
3177    *NEW_CST an updated constant that can be used in a narrowed range test.
3178 
3179    These statements are left as-is in the IL to facilitate discovery of
3180    {ADD,SUB}_OVERFLOW sequences later in the optimizer pipeline.  But
3181    the alternate range representation is often useful within VRP.  */
3182 
3183 bool
overflow_comparison_p(tree_code code,tree name,tree val,bool use_equiv_p,tree * new_cst)3184 overflow_comparison_p (tree_code code, tree name, tree val,
3185 		       bool use_equiv_p, tree *new_cst)
3186 {
3187   if (overflow_comparison_p_1 (code, name, val, use_equiv_p, false, new_cst))
3188     return true;
3189   return overflow_comparison_p_1 (swap_tree_comparison (code), val, name,
3190 				  use_equiv_p, true, new_cst);
3191 }
3192 
3193 
3194 /* Try to register an edge assertion for SSA name NAME on edge E for
3195    the condition COND contributing to the conditional jump pointed to by BSI.
3196    Invert the condition COND if INVERT is true.  */
3197 
3198 static void
register_edge_assert_for_2(tree name,edge e,enum tree_code cond_code,tree cond_op0,tree cond_op1,bool invert,vec<assert_info> & asserts)3199 register_edge_assert_for_2 (tree name, edge e,
3200 			    enum tree_code cond_code,
3201 			    tree cond_op0, tree cond_op1, bool invert,
3202 			    vec<assert_info> &asserts)
3203 {
3204   tree val;
3205   enum tree_code comp_code;
3206 
3207   if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
3208 						cond_op0,
3209 						cond_op1,
3210 						invert, &comp_code, &val))
3211     return;
3212 
3213   /* Queue the assert.  */
3214   tree x;
3215   if (overflow_comparison_p (comp_code, name, val, false, &x))
3216     {
3217       enum tree_code new_code = ((comp_code == GT_EXPR || comp_code == GE_EXPR)
3218 				 ? GT_EXPR : LE_EXPR);
3219       add_assert_info (asserts, name, name, new_code, x);
3220     }
3221   add_assert_info (asserts, name, name, comp_code, val);
3222 
3223   /* In the case of NAME <= CST and NAME being defined as
3224      NAME = (unsigned) NAME2 + CST2 we can assert NAME2 >= -CST2
3225      and NAME2 <= CST - CST2.  We can do the same for NAME > CST.
3226      This catches range and anti-range tests.  */
3227   if ((comp_code == LE_EXPR
3228        || comp_code == GT_EXPR)
3229       && TREE_CODE (val) == INTEGER_CST
3230       && TYPE_UNSIGNED (TREE_TYPE (val)))
3231     {
3232       gimple *def_stmt = SSA_NAME_DEF_STMT (name);
3233       tree cst2 = NULL_TREE, name2 = NULL_TREE, name3 = NULL_TREE;
3234 
3235       /* Extract CST2 from the (optional) addition.  */
3236       if (is_gimple_assign (def_stmt)
3237 	  && gimple_assign_rhs_code (def_stmt) == PLUS_EXPR)
3238 	{
3239 	  name2 = gimple_assign_rhs1 (def_stmt);
3240 	  cst2 = gimple_assign_rhs2 (def_stmt);
3241 	  if (TREE_CODE (name2) == SSA_NAME
3242 	      && TREE_CODE (cst2) == INTEGER_CST)
3243 	    def_stmt = SSA_NAME_DEF_STMT (name2);
3244 	}
3245 
3246       /* Extract NAME2 from the (optional) sign-changing cast.  */
3247       if (gimple_assign_cast_p (def_stmt))
3248 	{
3249 	  if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt))
3250 	      && ! TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
3251 	      && (TYPE_PRECISION (gimple_expr_type (def_stmt))
3252 		  == TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))))
3253 	    name3 = gimple_assign_rhs1 (def_stmt);
3254 	}
3255 
3256       /* If name3 is used later, create an ASSERT_EXPR for it.  */
3257       if (name3 != NULL_TREE
3258       	  && TREE_CODE (name3) == SSA_NAME
3259 	  && (cst2 == NULL_TREE
3260 	      || TREE_CODE (cst2) == INTEGER_CST)
3261 	  && INTEGRAL_TYPE_P (TREE_TYPE (name3)))
3262 	{
3263 	  tree tmp;
3264 
3265 	  /* Build an expression for the range test.  */
3266 	  tmp = build1 (NOP_EXPR, TREE_TYPE (name), name3);
3267 	  if (cst2 != NULL_TREE)
3268 	    tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
3269 
3270 	  if (dump_file)
3271 	    {
3272 	      fprintf (dump_file, "Adding assert for ");
3273 	      print_generic_expr (dump_file, name3);
3274 	      fprintf (dump_file, " from ");
3275 	      print_generic_expr (dump_file, tmp);
3276 	      fprintf (dump_file, "\n");
3277 	    }
3278 
3279 	  add_assert_info (asserts, name3, tmp, comp_code, val);
3280 	}
3281 
3282       /* If name2 is used later, create an ASSERT_EXPR for it.  */
3283       if (name2 != NULL_TREE
3284       	  && TREE_CODE (name2) == SSA_NAME
3285 	  && TREE_CODE (cst2) == INTEGER_CST
3286 	  && INTEGRAL_TYPE_P (TREE_TYPE (name2)))
3287 	{
3288 	  tree tmp;
3289 
3290 	  /* Build an expression for the range test.  */
3291 	  tmp = name2;
3292 	  if (TREE_TYPE (name) != TREE_TYPE (name2))
3293 	    tmp = build1 (NOP_EXPR, TREE_TYPE (name), tmp);
3294 	  if (cst2 != NULL_TREE)
3295 	    tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
3296 
3297 	  if (dump_file)
3298 	    {
3299 	      fprintf (dump_file, "Adding assert for ");
3300 	      print_generic_expr (dump_file, name2);
3301 	      fprintf (dump_file, " from ");
3302 	      print_generic_expr (dump_file, tmp);
3303 	      fprintf (dump_file, "\n");
3304 	    }
3305 
3306 	  add_assert_info (asserts, name2, tmp, comp_code, val);
3307 	}
3308     }
3309 
3310   /* In the case of post-in/decrement tests like if (i++) ... and uses
3311      of the in/decremented value on the edge the extra name we want to
3312      assert for is not on the def chain of the name compared.  Instead
3313      it is in the set of use stmts.
3314      Similar cases happen for conversions that were simplified through
3315      fold_{sign_changed,widened}_comparison.  */
3316   if ((comp_code == NE_EXPR
3317        || comp_code == EQ_EXPR)
3318       && TREE_CODE (val) == INTEGER_CST)
3319     {
3320       imm_use_iterator ui;
3321       gimple *use_stmt;
3322       FOR_EACH_IMM_USE_STMT (use_stmt, ui, name)
3323 	{
3324 	  if (!is_gimple_assign (use_stmt))
3325 	    continue;
3326 
3327 	  /* Cut off to use-stmts that are dominating the predecessor.  */
3328 	  if (!dominated_by_p (CDI_DOMINATORS, e->src, gimple_bb (use_stmt)))
3329 	    continue;
3330 
3331 	  tree name2 = gimple_assign_lhs (use_stmt);
3332 	  if (TREE_CODE (name2) != SSA_NAME)
3333 	    continue;
3334 
3335 	  enum tree_code code = gimple_assign_rhs_code (use_stmt);
3336 	  tree cst;
3337 	  if (code == PLUS_EXPR
3338 	      || code == MINUS_EXPR)
3339 	    {
3340 	      cst = gimple_assign_rhs2 (use_stmt);
3341 	      if (TREE_CODE (cst) != INTEGER_CST)
3342 		continue;
3343 	      cst = int_const_binop (code, val, cst);
3344 	    }
3345 	  else if (CONVERT_EXPR_CODE_P (code))
3346 	    {
3347 	      /* For truncating conversions we cannot record
3348 		 an inequality.  */
3349 	      if (comp_code == NE_EXPR
3350 		  && (TYPE_PRECISION (TREE_TYPE (name2))
3351 		      < TYPE_PRECISION (TREE_TYPE (name))))
3352 		continue;
3353 	      cst = fold_convert (TREE_TYPE (name2), val);
3354 	    }
3355 	  else
3356 	    continue;
3357 
3358 	  if (TREE_OVERFLOW_P (cst))
3359 	    cst = drop_tree_overflow (cst);
3360 	  add_assert_info (asserts, name2, name2, comp_code, cst);
3361 	}
3362     }
3363 
3364   if (TREE_CODE_CLASS (comp_code) == tcc_comparison
3365       && TREE_CODE (val) == INTEGER_CST)
3366     {
3367       gimple *def_stmt = SSA_NAME_DEF_STMT (name);
3368       tree name2 = NULL_TREE, names[2], cst2 = NULL_TREE;
3369       tree val2 = NULL_TREE;
3370       unsigned int prec = TYPE_PRECISION (TREE_TYPE (val));
3371       wide_int mask = wi::zero (prec);
3372       unsigned int nprec = prec;
3373       enum tree_code rhs_code = ERROR_MARK;
3374 
3375       if (is_gimple_assign (def_stmt))
3376 	rhs_code = gimple_assign_rhs_code (def_stmt);
3377 
3378       /* In the case of NAME != CST1 where NAME = A +- CST2 we can
3379          assert that A != CST1 -+ CST2.  */
3380       if ((comp_code == EQ_EXPR || comp_code == NE_EXPR)
3381 	  && (rhs_code == PLUS_EXPR || rhs_code == MINUS_EXPR))
3382 	{
3383 	  tree op0 = gimple_assign_rhs1 (def_stmt);
3384 	  tree op1 = gimple_assign_rhs2 (def_stmt);
3385 	  if (TREE_CODE (op0) == SSA_NAME
3386 	      && TREE_CODE (op1) == INTEGER_CST)
3387 	    {
3388 	      enum tree_code reverse_op = (rhs_code == PLUS_EXPR
3389 					   ? MINUS_EXPR : PLUS_EXPR);
3390 	      op1 = int_const_binop (reverse_op, val, op1);
3391 	      if (TREE_OVERFLOW (op1))
3392 		op1 = drop_tree_overflow (op1);
3393 	      add_assert_info (asserts, op0, op0, comp_code, op1);
3394 	    }
3395 	}
3396 
3397       /* Add asserts for NAME cmp CST and NAME being defined
3398 	 as NAME = (int) NAME2.  */
3399       if (!TYPE_UNSIGNED (TREE_TYPE (val))
3400 	  && (comp_code == LE_EXPR || comp_code == LT_EXPR
3401 	      || comp_code == GT_EXPR || comp_code == GE_EXPR)
3402 	  && gimple_assign_cast_p (def_stmt))
3403 	{
3404 	  name2 = gimple_assign_rhs1 (def_stmt);
3405 	  if (CONVERT_EXPR_CODE_P (rhs_code)
3406 	      && INTEGRAL_TYPE_P (TREE_TYPE (name2))
3407 	      && TYPE_UNSIGNED (TREE_TYPE (name2))
3408 	      && prec == TYPE_PRECISION (TREE_TYPE (name2))
3409 	      && (comp_code == LE_EXPR || comp_code == GT_EXPR
3410 		  || !tree_int_cst_equal (val,
3411 					  TYPE_MIN_VALUE (TREE_TYPE (val)))))
3412 	    {
3413 	      tree tmp, cst;
3414 	      enum tree_code new_comp_code = comp_code;
3415 
3416 	      cst = fold_convert (TREE_TYPE (name2),
3417 				  TYPE_MIN_VALUE (TREE_TYPE (val)));
3418 	      /* Build an expression for the range test.  */
3419 	      tmp = build2 (PLUS_EXPR, TREE_TYPE (name2), name2, cst);
3420 	      cst = fold_build2 (PLUS_EXPR, TREE_TYPE (name2), cst,
3421 				 fold_convert (TREE_TYPE (name2), val));
3422 	      if (comp_code == LT_EXPR || comp_code == GE_EXPR)
3423 		{
3424 		  new_comp_code = comp_code == LT_EXPR ? LE_EXPR : GT_EXPR;
3425 		  cst = fold_build2 (MINUS_EXPR, TREE_TYPE (name2), cst,
3426 				     build_int_cst (TREE_TYPE (name2), 1));
3427 		}
3428 
3429 	      if (dump_file)
3430 		{
3431 		  fprintf (dump_file, "Adding assert for ");
3432 		  print_generic_expr (dump_file, name2);
3433 		  fprintf (dump_file, " from ");
3434 		  print_generic_expr (dump_file, tmp);
3435 		  fprintf (dump_file, "\n");
3436 		}
3437 
3438 	      add_assert_info (asserts, name2, tmp, new_comp_code, cst);
3439 	    }
3440 	}
3441 
3442       /* Add asserts for NAME cmp CST and NAME being defined as
3443 	 NAME = NAME2 >> CST2.
3444 
3445 	 Extract CST2 from the right shift.  */
3446       if (rhs_code == RSHIFT_EXPR)
3447 	{
3448 	  name2 = gimple_assign_rhs1 (def_stmt);
3449 	  cst2 = gimple_assign_rhs2 (def_stmt);
3450 	  if (TREE_CODE (name2) == SSA_NAME
3451 	      && tree_fits_uhwi_p (cst2)
3452 	      && INTEGRAL_TYPE_P (TREE_TYPE (name2))
3453 	      && IN_RANGE (tree_to_uhwi (cst2), 1, prec - 1)
3454 	      && type_has_mode_precision_p (TREE_TYPE (val)))
3455 	    {
3456 	      mask = wi::mask (tree_to_uhwi (cst2), false, prec);
3457 	      val2 = fold_binary (LSHIFT_EXPR, TREE_TYPE (val), val, cst2);
3458 	    }
3459 	}
3460       if (val2 != NULL_TREE
3461 	  && TREE_CODE (val2) == INTEGER_CST
3462 	  && simple_cst_equal (fold_build2 (RSHIFT_EXPR,
3463 					    TREE_TYPE (val),
3464 					    val2, cst2), val))
3465 	{
3466 	  enum tree_code new_comp_code = comp_code;
3467 	  tree tmp, new_val;
3468 
3469 	  tmp = name2;
3470 	  if (comp_code == EQ_EXPR || comp_code == NE_EXPR)
3471 	    {
3472 	      if (!TYPE_UNSIGNED (TREE_TYPE (val)))
3473 		{
3474 		  tree type = build_nonstandard_integer_type (prec, 1);
3475 		  tmp = build1 (NOP_EXPR, type, name2);
3476 		  val2 = fold_convert (type, val2);
3477 		}
3478 	      tmp = fold_build2 (MINUS_EXPR, TREE_TYPE (tmp), tmp, val2);
3479 	      new_val = wide_int_to_tree (TREE_TYPE (tmp), mask);
3480 	      new_comp_code = comp_code == EQ_EXPR ? LE_EXPR : GT_EXPR;
3481 	    }
3482 	  else if (comp_code == LT_EXPR || comp_code == GE_EXPR)
3483 	    {
3484 	      wide_int minval
3485 		= wi::min_value (prec, TYPE_SIGN (TREE_TYPE (val)));
3486 	      new_val = val2;
3487 	      if (minval == wi::to_wide (new_val))
3488 		new_val = NULL_TREE;
3489 	    }
3490 	  else
3491 	    {
3492 	      wide_int maxval
3493 		= wi::max_value (prec, TYPE_SIGN (TREE_TYPE (val)));
3494 	      mask |= wi::to_wide (val2);
3495 	      if (wi::eq_p (mask, maxval))
3496 		new_val = NULL_TREE;
3497 	      else
3498 		new_val = wide_int_to_tree (TREE_TYPE (val2), mask);
3499 	    }
3500 
3501 	  if (new_val)
3502 	    {
3503 	      if (dump_file)
3504 		{
3505 		  fprintf (dump_file, "Adding assert for ");
3506 		  print_generic_expr (dump_file, name2);
3507 		  fprintf (dump_file, " from ");
3508 		  print_generic_expr (dump_file, tmp);
3509 		  fprintf (dump_file, "\n");
3510 		}
3511 
3512 	      add_assert_info (asserts, name2, tmp, new_comp_code, new_val);
3513 	    }
3514 	}
3515 
3516       /* Add asserts for NAME cmp CST and NAME being defined as
3517 	 NAME = NAME2 & CST2.
3518 
3519 	 Extract CST2 from the and.
3520 
3521 	 Also handle
3522 	 NAME = (unsigned) NAME2;
3523 	 casts where NAME's type is unsigned and has smaller precision
3524 	 than NAME2's type as if it was NAME = NAME2 & MASK.  */
3525       names[0] = NULL_TREE;
3526       names[1] = NULL_TREE;
3527       cst2 = NULL_TREE;
3528       if (rhs_code == BIT_AND_EXPR
3529 	  || (CONVERT_EXPR_CODE_P (rhs_code)
3530 	      && INTEGRAL_TYPE_P (TREE_TYPE (val))
3531 	      && TYPE_UNSIGNED (TREE_TYPE (val))
3532 	      && TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
3533 		 > prec))
3534 	{
3535 	  name2 = gimple_assign_rhs1 (def_stmt);
3536 	  if (rhs_code == BIT_AND_EXPR)
3537 	    cst2 = gimple_assign_rhs2 (def_stmt);
3538 	  else
3539 	    {
3540 	      cst2 = TYPE_MAX_VALUE (TREE_TYPE (val));
3541 	      nprec = TYPE_PRECISION (TREE_TYPE (name2));
3542 	    }
3543 	  if (TREE_CODE (name2) == SSA_NAME
3544 	      && INTEGRAL_TYPE_P (TREE_TYPE (name2))
3545 	      && TREE_CODE (cst2) == INTEGER_CST
3546 	      && !integer_zerop (cst2)
3547 	      && (nprec > 1
3548 		  || TYPE_UNSIGNED (TREE_TYPE (val))))
3549 	    {
3550 	      gimple *def_stmt2 = SSA_NAME_DEF_STMT (name2);
3551 	      if (gimple_assign_cast_p (def_stmt2))
3552 		{
3553 		  names[1] = gimple_assign_rhs1 (def_stmt2);
3554 		  if (!CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt2))
3555 		      || !INTEGRAL_TYPE_P (TREE_TYPE (names[1]))
3556 		      || (TYPE_PRECISION (TREE_TYPE (name2))
3557 			  != TYPE_PRECISION (TREE_TYPE (names[1]))))
3558 		    names[1] = NULL_TREE;
3559 		}
3560 	      names[0] = name2;
3561 	    }
3562 	}
3563       if (names[0] || names[1])
3564 	{
3565 	  wide_int minv, maxv, valv, cst2v;
3566 	  wide_int tem, sgnbit;
3567 	  bool valid_p = false, valn, cst2n;
3568 	  enum tree_code ccode = comp_code;
3569 
3570 	  valv = wide_int::from (wi::to_wide (val), nprec, UNSIGNED);
3571 	  cst2v = wide_int::from (wi::to_wide (cst2), nprec, UNSIGNED);
3572 	  valn = wi::neg_p (valv, TYPE_SIGN (TREE_TYPE (val)));
3573 	  cst2n = wi::neg_p (cst2v, TYPE_SIGN (TREE_TYPE (val)));
3574 	  /* If CST2 doesn't have most significant bit set,
3575 	     but VAL is negative, we have comparison like
3576 	     if ((x & 0x123) > -4) (always true).  Just give up.  */
3577 	  if (!cst2n && valn)
3578 	    ccode = ERROR_MARK;
3579 	  if (cst2n)
3580 	    sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
3581 	  else
3582 	    sgnbit = wi::zero (nprec);
3583 	  minv = valv & cst2v;
3584 	  switch (ccode)
3585 	    {
3586 	    case EQ_EXPR:
3587 	      /* Minimum unsigned value for equality is VAL & CST2
3588 		 (should be equal to VAL, otherwise we probably should
3589 		 have folded the comparison into false) and
3590 		 maximum unsigned value is VAL | ~CST2.  */
3591 	      maxv = valv | ~cst2v;
3592 	      valid_p = true;
3593 	      break;
3594 
3595 	    case NE_EXPR:
3596 	      tem = valv | ~cst2v;
3597 	      /* If VAL is 0, handle (X & CST2) != 0 as (X & CST2) > 0U.  */
3598 	      if (valv == 0)
3599 		{
3600 		  cst2n = false;
3601 		  sgnbit = wi::zero (nprec);
3602 		  goto gt_expr;
3603 		}
3604 	      /* If (VAL | ~CST2) is all ones, handle it as
3605 		 (X & CST2) < VAL.  */
3606 	      if (tem == -1)
3607 		{
3608 		  cst2n = false;
3609 		  valn = false;
3610 		  sgnbit = wi::zero (nprec);
3611 		  goto lt_expr;
3612 		}
3613 	      if (!cst2n && wi::neg_p (cst2v))
3614 		sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
3615 	      if (sgnbit != 0)
3616 		{
3617 		  if (valv == sgnbit)
3618 		    {
3619 		      cst2n = true;
3620 		      valn = true;
3621 		      goto gt_expr;
3622 		    }
3623 		  if (tem == wi::mask (nprec - 1, false, nprec))
3624 		    {
3625 		      cst2n = true;
3626 		      goto lt_expr;
3627 		    }
3628 		  if (!cst2n)
3629 		    sgnbit = wi::zero (nprec);
3630 		}
3631 	      break;
3632 
3633 	    case GE_EXPR:
3634 	      /* Minimum unsigned value for >= if (VAL & CST2) == VAL
3635 		 is VAL and maximum unsigned value is ~0.  For signed
3636 		 comparison, if CST2 doesn't have most significant bit
3637 		 set, handle it similarly.  If CST2 has MSB set,
3638 		 the minimum is the same, and maximum is ~0U/2.  */
3639 	      if (minv != valv)
3640 		{
3641 		  /* If (VAL & CST2) != VAL, X & CST2 can't be equal to
3642 		     VAL.  */
3643 		  minv = masked_increment (valv, cst2v, sgnbit, nprec);
3644 		  if (minv == valv)
3645 		    break;
3646 		}
3647 	      maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
3648 	      valid_p = true;
3649 	      break;
3650 
3651 	    case GT_EXPR:
3652 	    gt_expr:
3653 	      /* Find out smallest MINV where MINV > VAL
3654 		 && (MINV & CST2) == MINV, if any.  If VAL is signed and
3655 		 CST2 has MSB set, compute it biased by 1 << (nprec - 1).  */
3656 	      minv = masked_increment (valv, cst2v, sgnbit, nprec);
3657 	      if (minv == valv)
3658 		break;
3659 	      maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
3660 	      valid_p = true;
3661 	      break;
3662 
3663 	    case LE_EXPR:
3664 	      /* Minimum unsigned value for <= is 0 and maximum
3665 		 unsigned value is VAL | ~CST2 if (VAL & CST2) == VAL.
3666 		 Otherwise, find smallest VAL2 where VAL2 > VAL
3667 		 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
3668 		 as maximum.
3669 		 For signed comparison, if CST2 doesn't have most
3670 		 significant bit set, handle it similarly.  If CST2 has
3671 		 MSB set, the maximum is the same and minimum is INT_MIN.  */
3672 	      if (minv == valv)
3673 		maxv = valv;
3674 	      else
3675 		{
3676 		  maxv = masked_increment (valv, cst2v, sgnbit, nprec);
3677 		  if (maxv == valv)
3678 		    break;
3679 		  maxv -= 1;
3680 		}
3681 	      maxv |= ~cst2v;
3682 	      minv = sgnbit;
3683 	      valid_p = true;
3684 	      break;
3685 
3686 	    case LT_EXPR:
3687 	    lt_expr:
3688 	      /* Minimum unsigned value for < is 0 and maximum
3689 		 unsigned value is (VAL-1) | ~CST2 if (VAL & CST2) == VAL.
3690 		 Otherwise, find smallest VAL2 where VAL2 > VAL
3691 		 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
3692 		 as maximum.
3693 		 For signed comparison, if CST2 doesn't have most
3694 		 significant bit set, handle it similarly.  If CST2 has
3695 		 MSB set, the maximum is the same and minimum is INT_MIN.  */
3696 	      if (minv == valv)
3697 		{
3698 		  if (valv == sgnbit)
3699 		    break;
3700 		  maxv = valv;
3701 		}
3702 	      else
3703 		{
3704 		  maxv = masked_increment (valv, cst2v, sgnbit, nprec);
3705 		  if (maxv == valv)
3706 		    break;
3707 		}
3708 	      maxv -= 1;
3709 	      maxv |= ~cst2v;
3710 	      minv = sgnbit;
3711 	      valid_p = true;
3712 	      break;
3713 
3714 	    default:
3715 	      break;
3716 	    }
3717 	  if (valid_p
3718 	      && (maxv - minv) != -1)
3719 	    {
3720 	      tree tmp, new_val, type;
3721 	      int i;
3722 
3723 	      for (i = 0; i < 2; i++)
3724 		if (names[i])
3725 		  {
3726 		    wide_int maxv2 = maxv;
3727 		    tmp = names[i];
3728 		    type = TREE_TYPE (names[i]);
3729 		    if (!TYPE_UNSIGNED (type))
3730 		      {
3731 			type = build_nonstandard_integer_type (nprec, 1);
3732 			tmp = build1 (NOP_EXPR, type, names[i]);
3733 		      }
3734 		    if (minv != 0)
3735 		      {
3736 			tmp = build2 (PLUS_EXPR, type, tmp,
3737 				      wide_int_to_tree (type, -minv));
3738 			maxv2 = maxv - minv;
3739 		      }
3740 		    new_val = wide_int_to_tree (type, maxv2);
3741 
3742 		    if (dump_file)
3743 		      {
3744 			fprintf (dump_file, "Adding assert for ");
3745 			print_generic_expr (dump_file, names[i]);
3746 			fprintf (dump_file, " from ");
3747 			print_generic_expr (dump_file, tmp);
3748 			fprintf (dump_file, "\n");
3749 		      }
3750 
3751 		    add_assert_info (asserts, names[i], tmp, LE_EXPR, new_val);
3752 		  }
3753 	    }
3754 	}
3755     }
3756 }
3757 
3758 /* OP is an operand of a truth value expression which is known to have
3759    a particular value.  Register any asserts for OP and for any
3760    operands in OP's defining statement.
3761 
3762    If CODE is EQ_EXPR, then we want to register OP is zero (false),
3763    if CODE is NE_EXPR, then we want to register OP is nonzero (true).   */
3764 
3765 static void
register_edge_assert_for_1(tree op,enum tree_code code,edge e,vec<assert_info> & asserts)3766 register_edge_assert_for_1 (tree op, enum tree_code code,
3767 			    edge e, vec<assert_info> &asserts)
3768 {
3769   gimple *op_def;
3770   tree val;
3771   enum tree_code rhs_code;
3772 
3773   /* We only care about SSA_NAMEs.  */
3774   if (TREE_CODE (op) != SSA_NAME)
3775     return;
3776 
3777   /* We know that OP will have a zero or nonzero value.  */
3778   val = build_int_cst (TREE_TYPE (op), 0);
3779   add_assert_info (asserts, op, op, code, val);
3780 
3781   /* Now look at how OP is set.  If it's set from a comparison,
3782      a truth operation or some bit operations, then we may be able
3783      to register information about the operands of that assignment.  */
3784   op_def = SSA_NAME_DEF_STMT (op);
3785   if (gimple_code (op_def) != GIMPLE_ASSIGN)
3786     return;
3787 
3788   rhs_code = gimple_assign_rhs_code (op_def);
3789 
3790   if (TREE_CODE_CLASS (rhs_code) == tcc_comparison)
3791     {
3792       bool invert = (code == EQ_EXPR ? true : false);
3793       tree op0 = gimple_assign_rhs1 (op_def);
3794       tree op1 = gimple_assign_rhs2 (op_def);
3795 
3796       if (TREE_CODE (op0) == SSA_NAME)
3797         register_edge_assert_for_2 (op0, e, rhs_code, op0, op1, invert, asserts);
3798       if (TREE_CODE (op1) == SSA_NAME)
3799         register_edge_assert_for_2 (op1, e, rhs_code, op0, op1, invert, asserts);
3800     }
3801   else if ((code == NE_EXPR
3802 	    && gimple_assign_rhs_code (op_def) == BIT_AND_EXPR)
3803 	   || (code == EQ_EXPR
3804 	       && gimple_assign_rhs_code (op_def) == BIT_IOR_EXPR))
3805     {
3806       /* Recurse on each operand.  */
3807       tree op0 = gimple_assign_rhs1 (op_def);
3808       tree op1 = gimple_assign_rhs2 (op_def);
3809       if (TREE_CODE (op0) == SSA_NAME
3810 	  && has_single_use (op0))
3811 	register_edge_assert_for_1 (op0, code, e, asserts);
3812       if (TREE_CODE (op1) == SSA_NAME
3813 	  && has_single_use (op1))
3814 	register_edge_assert_for_1 (op1, code, e, asserts);
3815     }
3816   else if (gimple_assign_rhs_code (op_def) == BIT_NOT_EXPR
3817 	   && TYPE_PRECISION (TREE_TYPE (gimple_assign_lhs (op_def))) == 1)
3818     {
3819       /* Recurse, flipping CODE.  */
3820       code = invert_tree_comparison (code, false);
3821       register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), code, e, asserts);
3822     }
3823   else if (gimple_assign_rhs_code (op_def) == SSA_NAME)
3824     {
3825       /* Recurse through the copy.  */
3826       register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), code, e, asserts);
3827     }
3828   else if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (op_def)))
3829     {
3830       /* Recurse through the type conversion, unless it is a narrowing
3831 	 conversion or conversion from non-integral type.  */
3832       tree rhs = gimple_assign_rhs1 (op_def);
3833       if (INTEGRAL_TYPE_P (TREE_TYPE (rhs))
3834 	  && (TYPE_PRECISION (TREE_TYPE (rhs))
3835 	      <= TYPE_PRECISION (TREE_TYPE (op))))
3836 	register_edge_assert_for_1 (rhs, code, e, asserts);
3837     }
3838 }
3839 
3840 /* Check if comparison
3841      NAME COND_OP INTEGER_CST
3842    has a form of
3843      (X & 11...100..0) COND_OP XX...X00...0
3844    Such comparison can yield assertions like
3845      X >= XX...X00...0
3846      X <= XX...X11...1
3847    in case of COND_OP being EQ_EXPR or
3848      X < XX...X00...0
3849      X > XX...X11...1
3850    in case of NE_EXPR.  */
3851 
3852 static bool
is_masked_range_test(tree name,tree valt,enum tree_code cond_code,tree * new_name,tree * low,enum tree_code * low_code,tree * high,enum tree_code * high_code)3853 is_masked_range_test (tree name, tree valt, enum tree_code cond_code,
3854 		      tree *new_name, tree *low, enum tree_code *low_code,
3855 		      tree *high, enum tree_code *high_code)
3856 {
3857   gimple *def_stmt = SSA_NAME_DEF_STMT (name);
3858 
3859   if (!is_gimple_assign (def_stmt)
3860       || gimple_assign_rhs_code (def_stmt) != BIT_AND_EXPR)
3861     return false;
3862 
3863   tree t = gimple_assign_rhs1 (def_stmt);
3864   tree maskt = gimple_assign_rhs2 (def_stmt);
3865   if (TREE_CODE (t) != SSA_NAME || TREE_CODE (maskt) != INTEGER_CST)
3866     return false;
3867 
3868   wi::tree_to_wide_ref mask = wi::to_wide (maskt);
3869   wide_int inv_mask = ~mask;
3870   /* Must have been removed by now so don't bother optimizing.  */
3871   if (mask == 0 || inv_mask == 0)
3872     return false;
3873 
3874   /* Assume VALT is INTEGER_CST.  */
3875   wi::tree_to_wide_ref val = wi::to_wide (valt);
3876 
3877   if ((inv_mask & (inv_mask + 1)) != 0
3878       || (val & mask) != val)
3879     return false;
3880 
3881   bool is_range = cond_code == EQ_EXPR;
3882 
3883   tree type = TREE_TYPE (t);
3884   wide_int min = wi::min_value (type),
3885     max = wi::max_value (type);
3886 
3887   if (is_range)
3888     {
3889       *low_code = val == min ? ERROR_MARK : GE_EXPR;
3890       *high_code = val == max ? ERROR_MARK : LE_EXPR;
3891     }
3892   else
3893     {
3894       /* We can still generate assertion if one of alternatives
3895 	 is known to always be false.  */
3896       if (val == min)
3897 	{
3898 	  *low_code = (enum tree_code) 0;
3899 	  *high_code = GT_EXPR;
3900 	}
3901       else if ((val | inv_mask) == max)
3902 	{
3903 	  *low_code = LT_EXPR;
3904 	  *high_code = (enum tree_code) 0;
3905 	}
3906       else
3907 	return false;
3908     }
3909 
3910   *new_name = t;
3911   *low = wide_int_to_tree (type, val);
3912   *high = wide_int_to_tree (type, val | inv_mask);
3913 
3914   return true;
3915 }
3916 
3917 /* Try to register an edge assertion for SSA name NAME on edge E for
3918    the condition COND contributing to the conditional jump pointed to by
3919    SI.  */
3920 
3921 void
register_edge_assert_for(tree name,edge e,enum tree_code cond_code,tree cond_op0,tree cond_op1,vec<assert_info> & asserts)3922 register_edge_assert_for (tree name, edge e,
3923 			  enum tree_code cond_code, tree cond_op0,
3924 			  tree cond_op1, vec<assert_info> &asserts)
3925 {
3926   tree val;
3927   enum tree_code comp_code;
3928   bool is_else_edge = (e->flags & EDGE_FALSE_VALUE) != 0;
3929 
3930   /* Do not attempt to infer anything in names that flow through
3931      abnormal edges.  */
3932   if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
3933     return;
3934 
3935   if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
3936 						cond_op0, cond_op1,
3937 						is_else_edge,
3938 						&comp_code, &val))
3939     return;
3940 
3941   /* Register ASSERT_EXPRs for name.  */
3942   register_edge_assert_for_2 (name, e, cond_code, cond_op0,
3943 			      cond_op1, is_else_edge, asserts);
3944 
3945 
3946   /* If COND is effectively an equality test of an SSA_NAME against
3947      the value zero or one, then we may be able to assert values
3948      for SSA_NAMEs which flow into COND.  */
3949 
3950   /* In the case of NAME == 1 or NAME != 0, for BIT_AND_EXPR defining
3951      statement of NAME we can assert both operands of the BIT_AND_EXPR
3952      have nonzero value.  */
3953   if (((comp_code == EQ_EXPR && integer_onep (val))
3954        || (comp_code == NE_EXPR && integer_zerop (val))))
3955     {
3956       gimple *def_stmt = SSA_NAME_DEF_STMT (name);
3957 
3958       if (is_gimple_assign (def_stmt)
3959 	  && gimple_assign_rhs_code (def_stmt) == BIT_AND_EXPR)
3960 	{
3961 	  tree op0 = gimple_assign_rhs1 (def_stmt);
3962 	  tree op1 = gimple_assign_rhs2 (def_stmt);
3963 	  register_edge_assert_for_1 (op0, NE_EXPR, e, asserts);
3964 	  register_edge_assert_for_1 (op1, NE_EXPR, e, asserts);
3965 	}
3966     }
3967 
3968   /* In the case of NAME == 0 or NAME != 1, for BIT_IOR_EXPR defining
3969      statement of NAME we can assert both operands of the BIT_IOR_EXPR
3970      have zero value.  */
3971   if (((comp_code == EQ_EXPR && integer_zerop (val))
3972        || (comp_code == NE_EXPR && integer_onep (val))))
3973     {
3974       gimple *def_stmt = SSA_NAME_DEF_STMT (name);
3975 
3976       /* For BIT_IOR_EXPR only if NAME == 0 both operands have
3977 	 necessarily zero value, or if type-precision is one.  */
3978       if (is_gimple_assign (def_stmt)
3979 	  && (gimple_assign_rhs_code (def_stmt) == BIT_IOR_EXPR
3980 	      && (TYPE_PRECISION (TREE_TYPE (name)) == 1
3981 	          || comp_code == EQ_EXPR)))
3982 	{
3983 	  tree op0 = gimple_assign_rhs1 (def_stmt);
3984 	  tree op1 = gimple_assign_rhs2 (def_stmt);
3985 	  register_edge_assert_for_1 (op0, EQ_EXPR, e, asserts);
3986 	  register_edge_assert_for_1 (op1, EQ_EXPR, e, asserts);
3987 	}
3988     }
3989 
3990   /* Sometimes we can infer ranges from (NAME & MASK) == VALUE.  */
3991   if ((comp_code == EQ_EXPR || comp_code == NE_EXPR)
3992       && TREE_CODE (val) == INTEGER_CST)
3993     {
3994       enum tree_code low_code, high_code;
3995       tree low, high;
3996       if (is_masked_range_test (name, val, comp_code, &name, &low,
3997 				&low_code, &high, &high_code))
3998 	{
3999 	  if (low_code != ERROR_MARK)
4000 	    register_edge_assert_for_2 (name, e, low_code, name,
4001 					low, /*invert*/false, asserts);
4002 	  if (high_code != ERROR_MARK)
4003 	    register_edge_assert_for_2 (name, e, high_code, name,
4004 					high, /*invert*/false, asserts);
4005 	}
4006     }
4007 }
4008 
4009 /* Finish found ASSERTS for E and register them at GSI.  */
4010 
4011 static void
finish_register_edge_assert_for(edge e,gimple_stmt_iterator gsi,vec<assert_info> & asserts)4012 finish_register_edge_assert_for (edge e, gimple_stmt_iterator gsi,
4013 				 vec<assert_info> &asserts)
4014 {
4015   for (unsigned i = 0; i < asserts.length (); ++i)
4016     /* Only register an ASSERT_EXPR if NAME was found in the sub-graph
4017        reachable from E.  */
4018     if (live_on_edge (e, asserts[i].name))
4019       register_new_assert_for (asserts[i].name, asserts[i].expr,
4020 			       asserts[i].comp_code, asserts[i].val,
4021 			       NULL, e, gsi);
4022 }
4023 
4024 
4025 
4026 /* Determine whether the outgoing edges of BB should receive an
4027    ASSERT_EXPR for each of the operands of BB's LAST statement.
4028    The last statement of BB must be a COND_EXPR.
4029 
4030    If any of the sub-graphs rooted at BB have an interesting use of
4031    the predicate operands, an assert location node is added to the
4032    list of assertions for the corresponding operands.  */
4033 
4034 static void
find_conditional_asserts(basic_block bb,gcond * last)4035 find_conditional_asserts (basic_block bb, gcond *last)
4036 {
4037   gimple_stmt_iterator bsi;
4038   tree op;
4039   edge_iterator ei;
4040   edge e;
4041   ssa_op_iter iter;
4042 
4043   bsi = gsi_for_stmt (last);
4044 
4045   /* Look for uses of the operands in each of the sub-graphs
4046      rooted at BB.  We need to check each of the outgoing edges
4047      separately, so that we know what kind of ASSERT_EXPR to
4048      insert.  */
4049   FOR_EACH_EDGE (e, ei, bb->succs)
4050     {
4051       if (e->dest == bb)
4052 	continue;
4053 
4054       /* Register the necessary assertions for each operand in the
4055 	 conditional predicate.  */
4056       auto_vec<assert_info, 8> asserts;
4057       FOR_EACH_SSA_TREE_OPERAND (op, last, iter, SSA_OP_USE)
4058 	register_edge_assert_for (op, e,
4059 				  gimple_cond_code (last),
4060 				  gimple_cond_lhs (last),
4061 				  gimple_cond_rhs (last), asserts);
4062       finish_register_edge_assert_for (e, bsi, asserts);
4063     }
4064 }
4065 
4066 struct case_info
4067 {
4068   tree expr;
4069   basic_block bb;
4070 };
4071 
4072 /* Compare two case labels sorting first by the destination bb index
4073    and then by the case value.  */
4074 
4075 static int
compare_case_labels(const void * p1,const void * p2)4076 compare_case_labels (const void *p1, const void *p2)
4077 {
4078   const struct case_info *ci1 = (const struct case_info *) p1;
4079   const struct case_info *ci2 = (const struct case_info *) p2;
4080   int idx1 = ci1->bb->index;
4081   int idx2 = ci2->bb->index;
4082 
4083   if (idx1 < idx2)
4084     return -1;
4085   else if (idx1 == idx2)
4086     {
4087       /* Make sure the default label is first in a group.  */
4088       if (!CASE_LOW (ci1->expr))
4089 	return -1;
4090       else if (!CASE_LOW (ci2->expr))
4091 	return 1;
4092       else
4093 	return tree_int_cst_compare (CASE_LOW (ci1->expr),
4094 				     CASE_LOW (ci2->expr));
4095     }
4096   else
4097     return 1;
4098 }
4099 
4100 /* Determine whether the outgoing edges of BB should receive an
4101    ASSERT_EXPR for each of the operands of BB's LAST statement.
4102    The last statement of BB must be a SWITCH_EXPR.
4103 
4104    If any of the sub-graphs rooted at BB have an interesting use of
4105    the predicate operands, an assert location node is added to the
4106    list of assertions for the corresponding operands.  */
4107 
4108 static void
find_switch_asserts(basic_block bb,gswitch * last)4109 find_switch_asserts (basic_block bb, gswitch *last)
4110 {
4111   gimple_stmt_iterator bsi;
4112   tree op;
4113   edge e;
4114   struct case_info *ci;
4115   size_t n = gimple_switch_num_labels (last);
4116 #if GCC_VERSION >= 4000
4117   unsigned int idx;
4118 #else
4119   /* Work around GCC 3.4 bug (PR 37086).  */
4120   volatile unsigned int idx;
4121 #endif
4122 
4123   bsi = gsi_for_stmt (last);
4124   op = gimple_switch_index (last);
4125   if (TREE_CODE (op) != SSA_NAME)
4126     return;
4127 
4128   /* Build a vector of case labels sorted by destination label.  */
4129   ci = XNEWVEC (struct case_info, n);
4130   for (idx = 0; idx < n; ++idx)
4131     {
4132       ci[idx].expr = gimple_switch_label (last, idx);
4133       ci[idx].bb = label_to_block (CASE_LABEL (ci[idx].expr));
4134     }
4135   edge default_edge = find_edge (bb, ci[0].bb);
4136   qsort (ci, n, sizeof (struct case_info), compare_case_labels);
4137 
4138   for (idx = 0; idx < n; ++idx)
4139     {
4140       tree min, max;
4141       tree cl = ci[idx].expr;
4142       basic_block cbb = ci[idx].bb;
4143 
4144       min = CASE_LOW (cl);
4145       max = CASE_HIGH (cl);
4146 
4147       /* If there are multiple case labels with the same destination
4148 	 we need to combine them to a single value range for the edge.  */
4149       if (idx + 1 < n && cbb == ci[idx + 1].bb)
4150 	{
4151 	  /* Skip labels until the last of the group.  */
4152 	  do {
4153 	    ++idx;
4154 	  } while (idx < n && cbb == ci[idx].bb);
4155 	  --idx;
4156 
4157 	  /* Pick up the maximum of the case label range.  */
4158 	  if (CASE_HIGH (ci[idx].expr))
4159 	    max = CASE_HIGH (ci[idx].expr);
4160 	  else
4161 	    max = CASE_LOW (ci[idx].expr);
4162 	}
4163 
4164       /* Can't extract a useful assertion out of a range that includes the
4165 	 default label.  */
4166       if (min == NULL_TREE)
4167 	continue;
4168 
4169       /* Find the edge to register the assert expr on.  */
4170       e = find_edge (bb, cbb);
4171 
4172       /* Register the necessary assertions for the operand in the
4173 	 SWITCH_EXPR.  */
4174       auto_vec<assert_info, 8> asserts;
4175       register_edge_assert_for (op, e,
4176 				max ? GE_EXPR : EQ_EXPR,
4177 				op, fold_convert (TREE_TYPE (op), min),
4178 				asserts);
4179       if (max)
4180 	register_edge_assert_for (op, e, LE_EXPR, op,
4181 				  fold_convert (TREE_TYPE (op), max),
4182 				  asserts);
4183       finish_register_edge_assert_for (e, bsi, asserts);
4184     }
4185 
4186   XDELETEVEC (ci);
4187 
4188   if (!live_on_edge (default_edge, op))
4189     return;
4190 
4191   /* Now register along the default label assertions that correspond to the
4192      anti-range of each label.  */
4193   int insertion_limit = PARAM_VALUE (PARAM_MAX_VRP_SWITCH_ASSERTIONS);
4194   if (insertion_limit == 0)
4195     return;
4196 
4197   /* We can't do this if the default case shares a label with another case.  */
4198   tree default_cl = gimple_switch_default_label (last);
4199   for (idx = 1; idx < n; idx++)
4200     {
4201       tree min, max;
4202       tree cl = gimple_switch_label (last, idx);
4203       if (CASE_LABEL (cl) == CASE_LABEL (default_cl))
4204 	continue;
4205 
4206       min = CASE_LOW (cl);
4207       max = CASE_HIGH (cl);
4208 
4209       /* Combine contiguous case ranges to reduce the number of assertions
4210 	 to insert.  */
4211       for (idx = idx + 1; idx < n; idx++)
4212 	{
4213 	  tree next_min, next_max;
4214 	  tree next_cl = gimple_switch_label (last, idx);
4215 	  if (CASE_LABEL (next_cl) == CASE_LABEL (default_cl))
4216 	    break;
4217 
4218 	  next_min = CASE_LOW (next_cl);
4219 	  next_max = CASE_HIGH (next_cl);
4220 
4221 	  wide_int difference = (wi::to_wide (next_min)
4222 				 - wi::to_wide (max ? max : min));
4223 	  if (wi::eq_p (difference, 1))
4224 	    max = next_max ? next_max : next_min;
4225 	  else
4226 	    break;
4227 	}
4228       idx--;
4229 
4230       if (max == NULL_TREE)
4231 	{
4232 	  /* Register the assertion OP != MIN.  */
4233 	  auto_vec<assert_info, 8> asserts;
4234 	  min = fold_convert (TREE_TYPE (op), min);
4235 	  register_edge_assert_for (op, default_edge, NE_EXPR, op, min,
4236 				    asserts);
4237 	  finish_register_edge_assert_for (default_edge, bsi, asserts);
4238 	}
4239       else
4240 	{
4241 	  /* Register the assertion (unsigned)OP - MIN > (MAX - MIN),
4242 	     which will give OP the anti-range ~[MIN,MAX].  */
4243 	  tree uop = fold_convert (unsigned_type_for (TREE_TYPE (op)), op);
4244 	  min = fold_convert (TREE_TYPE (uop), min);
4245 	  max = fold_convert (TREE_TYPE (uop), max);
4246 
4247 	  tree lhs = fold_build2 (MINUS_EXPR, TREE_TYPE (uop), uop, min);
4248 	  tree rhs = int_const_binop (MINUS_EXPR, max, min);
4249 	  register_new_assert_for (op, lhs, GT_EXPR, rhs,
4250 				   NULL, default_edge, bsi);
4251 	}
4252 
4253       if (--insertion_limit == 0)
4254 	break;
4255     }
4256 }
4257 
4258 
4259 /* Traverse all the statements in block BB looking for statements that
4260    may generate useful assertions for the SSA names in their operand.
4261    If a statement produces a useful assertion A for name N_i, then the
4262    list of assertions already generated for N_i is scanned to
4263    determine if A is actually needed.
4264 
4265    If N_i already had the assertion A at a location dominating the
4266    current location, then nothing needs to be done.  Otherwise, the
4267    new location for A is recorded instead.
4268 
4269    1- For every statement S in BB, all the variables used by S are
4270       added to bitmap FOUND_IN_SUBGRAPH.
4271 
4272    2- If statement S uses an operand N in a way that exposes a known
4273       value range for N, then if N was not already generated by an
4274       ASSERT_EXPR, create a new assert location for N.  For instance,
4275       if N is a pointer and the statement dereferences it, we can
4276       assume that N is not NULL.
4277 
4278    3- COND_EXPRs are a special case of #2.  We can derive range
4279       information from the predicate but need to insert different
4280       ASSERT_EXPRs for each of the sub-graphs rooted at the
4281       conditional block.  If the last statement of BB is a conditional
4282       expression of the form 'X op Y', then
4283 
4284       a) Remove X and Y from the set FOUND_IN_SUBGRAPH.
4285 
4286       b) If the conditional is the only entry point to the sub-graph
4287 	 corresponding to the THEN_CLAUSE, recurse into it.  On
4288 	 return, if X and/or Y are marked in FOUND_IN_SUBGRAPH, then
4289 	 an ASSERT_EXPR is added for the corresponding variable.
4290 
4291       c) Repeat step (b) on the ELSE_CLAUSE.
4292 
4293       d) Mark X and Y in FOUND_IN_SUBGRAPH.
4294 
4295       For instance,
4296 
4297 	    if (a == 9)
4298 	      b = a;
4299 	    else
4300 	      b = c + 1;
4301 
4302       In this case, an assertion on the THEN clause is useful to
4303       determine that 'a' is always 9 on that edge.  However, an assertion
4304       on the ELSE clause would be unnecessary.
4305 
4306    4- If BB does not end in a conditional expression, then we recurse
4307       into BB's dominator children.
4308 
4309    At the end of the recursive traversal, every SSA name will have a
4310    list of locations where ASSERT_EXPRs should be added.  When a new
4311    location for name N is found, it is registered by calling
4312    register_new_assert_for.  That function keeps track of all the
4313    registered assertions to prevent adding unnecessary assertions.
4314    For instance, if a pointer P_4 is dereferenced more than once in a
4315    dominator tree, only the location dominating all the dereference of
4316    P_4 will receive an ASSERT_EXPR.  */
4317 
4318 static void
find_assert_locations_1(basic_block bb,sbitmap live)4319 find_assert_locations_1 (basic_block bb, sbitmap live)
4320 {
4321   gimple *last;
4322 
4323   last = last_stmt (bb);
4324 
4325   /* If BB's last statement is a conditional statement involving integer
4326      operands, determine if we need to add ASSERT_EXPRs.  */
4327   if (last
4328       && gimple_code (last) == GIMPLE_COND
4329       && !fp_predicate (last)
4330       && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
4331     find_conditional_asserts (bb, as_a <gcond *> (last));
4332 
4333   /* If BB's last statement is a switch statement involving integer
4334      operands, determine if we need to add ASSERT_EXPRs.  */
4335   if (last
4336       && gimple_code (last) == GIMPLE_SWITCH
4337       && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
4338     find_switch_asserts (bb, as_a <gswitch *> (last));
4339 
4340   /* Traverse all the statements in BB marking used names and looking
4341      for statements that may infer assertions for their used operands.  */
4342   for (gimple_stmt_iterator si = gsi_last_bb (bb); !gsi_end_p (si);
4343        gsi_prev (&si))
4344     {
4345       gimple *stmt;
4346       tree op;
4347       ssa_op_iter i;
4348 
4349       stmt = gsi_stmt (si);
4350 
4351       if (is_gimple_debug (stmt))
4352 	continue;
4353 
4354       /* See if we can derive an assertion for any of STMT's operands.  */
4355       FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
4356 	{
4357 	  tree value;
4358 	  enum tree_code comp_code;
4359 
4360 	  /* If op is not live beyond this stmt, do not bother to insert
4361 	     asserts for it.  */
4362 	  if (!bitmap_bit_p (live, SSA_NAME_VERSION (op)))
4363 	    continue;
4364 
4365 	  /* If OP is used in such a way that we can infer a value
4366 	     range for it, and we don't find a previous assertion for
4367 	     it, create a new assertion location node for OP.  */
4368 	  if (infer_value_range (stmt, op, &comp_code, &value))
4369 	    {
4370 	      /* If we are able to infer a nonzero value range for OP,
4371 		 then walk backwards through the use-def chain to see if OP
4372 		 was set via a typecast.
4373 
4374 		 If so, then we can also infer a nonzero value range
4375 		 for the operand of the NOP_EXPR.  */
4376 	      if (comp_code == NE_EXPR && integer_zerop (value))
4377 		{
4378 		  tree t = op;
4379 		  gimple *def_stmt = SSA_NAME_DEF_STMT (t);
4380 
4381 		  while (is_gimple_assign (def_stmt)
4382 			 && CONVERT_EXPR_CODE_P
4383 			     (gimple_assign_rhs_code (def_stmt))
4384 			 && TREE_CODE
4385 			     (gimple_assign_rhs1 (def_stmt)) == SSA_NAME
4386 			 && POINTER_TYPE_P
4387 			     (TREE_TYPE (gimple_assign_rhs1 (def_stmt))))
4388 		    {
4389 		      t = gimple_assign_rhs1 (def_stmt);
4390 		      def_stmt = SSA_NAME_DEF_STMT (t);
4391 
4392 		      /* Note we want to register the assert for the
4393 			 operand of the NOP_EXPR after SI, not after the
4394 			 conversion.  */
4395 		      if (bitmap_bit_p (live, SSA_NAME_VERSION (t)))
4396 			register_new_assert_for (t, t, comp_code, value,
4397 						 bb, NULL, si);
4398 		    }
4399 		}
4400 
4401 	      register_new_assert_for (op, op, comp_code, value, bb, NULL, si);
4402 	    }
4403 	}
4404 
4405       /* Update live.  */
4406       FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
4407 	bitmap_set_bit (live, SSA_NAME_VERSION (op));
4408       FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_DEF)
4409 	bitmap_clear_bit (live, SSA_NAME_VERSION (op));
4410     }
4411 
4412   /* Traverse all PHI nodes in BB, updating live.  */
4413   for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
4414        gsi_next (&si))
4415     {
4416       use_operand_p arg_p;
4417       ssa_op_iter i;
4418       gphi *phi = si.phi ();
4419       tree res = gimple_phi_result (phi);
4420 
4421       if (virtual_operand_p (res))
4422 	continue;
4423 
4424       FOR_EACH_PHI_ARG (arg_p, phi, i, SSA_OP_USE)
4425 	{
4426 	  tree arg = USE_FROM_PTR (arg_p);
4427 	  if (TREE_CODE (arg) == SSA_NAME)
4428 	    bitmap_set_bit (live, SSA_NAME_VERSION (arg));
4429 	}
4430 
4431       bitmap_clear_bit (live, SSA_NAME_VERSION (res));
4432     }
4433 }
4434 
4435 /* Do an RPO walk over the function computing SSA name liveness
4436    on-the-fly and deciding on assert expressions to insert.  */
4437 
4438 static void
find_assert_locations(void)4439 find_assert_locations (void)
4440 {
4441   int *rpo = XNEWVEC (int, last_basic_block_for_fn (cfun));
4442   int *bb_rpo = XNEWVEC (int, last_basic_block_for_fn (cfun));
4443   int *last_rpo = XCNEWVEC (int, last_basic_block_for_fn (cfun));
4444   int rpo_cnt, i;
4445 
4446   live = XCNEWVEC (sbitmap, last_basic_block_for_fn (cfun));
4447   rpo_cnt = pre_and_rev_post_order_compute (NULL, rpo, false);
4448   for (i = 0; i < rpo_cnt; ++i)
4449     bb_rpo[rpo[i]] = i;
4450 
4451   /* Pre-seed loop latch liveness from loop header PHI nodes.  Due to
4452      the order we compute liveness and insert asserts we otherwise
4453      fail to insert asserts into the loop latch.  */
4454   loop_p loop;
4455   FOR_EACH_LOOP (loop, 0)
4456     {
4457       i = loop->latch->index;
4458       unsigned int j = single_succ_edge (loop->latch)->dest_idx;
4459       for (gphi_iterator gsi = gsi_start_phis (loop->header);
4460 	   !gsi_end_p (gsi); gsi_next (&gsi))
4461 	{
4462 	  gphi *phi = gsi.phi ();
4463 	  if (virtual_operand_p (gimple_phi_result (phi)))
4464 	    continue;
4465 	  tree arg = gimple_phi_arg_def (phi, j);
4466 	  if (TREE_CODE (arg) == SSA_NAME)
4467 	    {
4468 	      if (live[i] == NULL)
4469 		{
4470 		  live[i] = sbitmap_alloc (num_ssa_names);
4471 		  bitmap_clear (live[i]);
4472 		}
4473 	      bitmap_set_bit (live[i], SSA_NAME_VERSION (arg));
4474 	    }
4475 	}
4476     }
4477 
4478   for (i = rpo_cnt - 1; i >= 0; --i)
4479     {
4480       basic_block bb = BASIC_BLOCK_FOR_FN (cfun, rpo[i]);
4481       edge e;
4482       edge_iterator ei;
4483 
4484       if (!live[rpo[i]])
4485 	{
4486 	  live[rpo[i]] = sbitmap_alloc (num_ssa_names);
4487 	  bitmap_clear (live[rpo[i]]);
4488 	}
4489 
4490       /* Process BB and update the live information with uses in
4491          this block.  */
4492       find_assert_locations_1 (bb, live[rpo[i]]);
4493 
4494       /* Merge liveness into the predecessor blocks and free it.  */
4495       if (!bitmap_empty_p (live[rpo[i]]))
4496 	{
4497 	  int pred_rpo = i;
4498 	  FOR_EACH_EDGE (e, ei, bb->preds)
4499 	    {
4500 	      int pred = e->src->index;
4501 	      if ((e->flags & EDGE_DFS_BACK) || pred == ENTRY_BLOCK)
4502 		continue;
4503 
4504 	      if (!live[pred])
4505 		{
4506 		  live[pred] = sbitmap_alloc (num_ssa_names);
4507 		  bitmap_clear (live[pred]);
4508 		}
4509 	      bitmap_ior (live[pred], live[pred], live[rpo[i]]);
4510 
4511 	      if (bb_rpo[pred] < pred_rpo)
4512 		pred_rpo = bb_rpo[pred];
4513 	    }
4514 
4515 	  /* Record the RPO number of the last visited block that needs
4516 	     live information from this block.  */
4517 	  last_rpo[rpo[i]] = pred_rpo;
4518 	}
4519       else
4520 	{
4521 	  sbitmap_free (live[rpo[i]]);
4522 	  live[rpo[i]] = NULL;
4523 	}
4524 
4525       /* We can free all successors live bitmaps if all their
4526          predecessors have been visited already.  */
4527       FOR_EACH_EDGE (e, ei, bb->succs)
4528 	if (last_rpo[e->dest->index] == i
4529 	    && live[e->dest->index])
4530 	  {
4531 	    sbitmap_free (live[e->dest->index]);
4532 	    live[e->dest->index] = NULL;
4533 	  }
4534     }
4535 
4536   XDELETEVEC (rpo);
4537   XDELETEVEC (bb_rpo);
4538   XDELETEVEC (last_rpo);
4539   for (i = 0; i < last_basic_block_for_fn (cfun); ++i)
4540     if (live[i])
4541       sbitmap_free (live[i]);
4542   XDELETEVEC (live);
4543 }
4544 
4545 /* Create an ASSERT_EXPR for NAME and insert it in the location
4546    indicated by LOC.  Return true if we made any edge insertions.  */
4547 
4548 static bool
process_assert_insertions_for(tree name,assert_locus * loc)4549 process_assert_insertions_for (tree name, assert_locus *loc)
4550 {
4551   /* Build the comparison expression NAME_i COMP_CODE VAL.  */
4552   gimple *stmt;
4553   tree cond;
4554   gimple *assert_stmt;
4555   edge_iterator ei;
4556   edge e;
4557 
4558   /* If we have X <=> X do not insert an assert expr for that.  */
4559   if (loc->expr == loc->val)
4560     return false;
4561 
4562   cond = build2 (loc->comp_code, boolean_type_node, loc->expr, loc->val);
4563   assert_stmt = build_assert_expr_for (cond, name);
4564   if (loc->e)
4565     {
4566       /* We have been asked to insert the assertion on an edge.  This
4567 	 is used only by COND_EXPR and SWITCH_EXPR assertions.  */
4568       gcc_checking_assert (gimple_code (gsi_stmt (loc->si)) == GIMPLE_COND
4569 			   || (gimple_code (gsi_stmt (loc->si))
4570 			       == GIMPLE_SWITCH));
4571 
4572       gsi_insert_on_edge (loc->e, assert_stmt);
4573       return true;
4574     }
4575 
4576   /* If the stmt iterator points at the end then this is an insertion
4577      at the beginning of a block.  */
4578   if (gsi_end_p (loc->si))
4579     {
4580       gimple_stmt_iterator si = gsi_after_labels (loc->bb);
4581       gsi_insert_before (&si, assert_stmt, GSI_SAME_STMT);
4582       return false;
4583 
4584     }
4585   /* Otherwise, we can insert right after LOC->SI iff the
4586      statement must not be the last statement in the block.  */
4587   stmt = gsi_stmt (loc->si);
4588   if (!stmt_ends_bb_p (stmt))
4589     {
4590       gsi_insert_after (&loc->si, assert_stmt, GSI_SAME_STMT);
4591       return false;
4592     }
4593 
4594   /* If STMT must be the last statement in BB, we can only insert new
4595      assertions on the non-abnormal edge out of BB.  Note that since
4596      STMT is not control flow, there may only be one non-abnormal/eh edge
4597      out of BB.  */
4598   FOR_EACH_EDGE (e, ei, loc->bb->succs)
4599     if (!(e->flags & (EDGE_ABNORMAL|EDGE_EH)))
4600       {
4601 	gsi_insert_on_edge (e, assert_stmt);
4602 	return true;
4603       }
4604 
4605   gcc_unreachable ();
4606 }
4607 
4608 /* Qsort helper for sorting assert locations.  If stable is true, don't
4609    use iterative_hash_expr because it can be unstable for -fcompare-debug,
4610    on the other side some pointers might be NULL.  */
4611 
4612 template <bool stable>
4613 static int
compare_assert_loc(const void * pa,const void * pb)4614 compare_assert_loc (const void *pa, const void *pb)
4615 {
4616   assert_locus * const a = *(assert_locus * const *)pa;
4617   assert_locus * const b = *(assert_locus * const *)pb;
4618 
4619   /* If stable, some asserts might be optimized away already, sort
4620      them last.  */
4621   if (stable)
4622     {
4623       if (a == NULL)
4624 	return b != NULL;
4625       else if (b == NULL)
4626 	return -1;
4627     }
4628 
4629   if (a->e == NULL && b->e != NULL)
4630     return 1;
4631   else if (a->e != NULL && b->e == NULL)
4632     return -1;
4633 
4634   /* After the above checks, we know that (a->e == NULL) == (b->e == NULL),
4635      no need to test both a->e and b->e.  */
4636 
4637   /* Sort after destination index.  */
4638   if (a->e == NULL)
4639     ;
4640   else if (a->e->dest->index > b->e->dest->index)
4641     return 1;
4642   else if (a->e->dest->index < b->e->dest->index)
4643     return -1;
4644 
4645   /* Sort after comp_code.  */
4646   if (a->comp_code > b->comp_code)
4647     return 1;
4648   else if (a->comp_code < b->comp_code)
4649     return -1;
4650 
4651   hashval_t ha, hb;
4652 
4653   /* E.g. if a->val is ADDR_EXPR of a VAR_DECL, iterative_hash_expr
4654      uses DECL_UID of the VAR_DECL, so sorting might differ between
4655      -g and -g0.  When doing the removal of redundant assert exprs
4656      and commonization to successors, this does not matter, but for
4657      the final sort needs to be stable.  */
4658   if (stable)
4659     {
4660       ha = 0;
4661       hb = 0;
4662     }
4663   else
4664     {
4665       ha = iterative_hash_expr (a->expr, iterative_hash_expr (a->val, 0));
4666       hb = iterative_hash_expr (b->expr, iterative_hash_expr (b->val, 0));
4667     }
4668 
4669   /* Break the tie using hashing and source/bb index.  */
4670   if (ha == hb)
4671     return (a->e != NULL
4672 	    ? a->e->src->index - b->e->src->index
4673 	    : a->bb->index - b->bb->index);
4674   return ha > hb ? 1 : -1;
4675 }
4676 
4677 /* Process all the insertions registered for every name N_i registered
4678    in NEED_ASSERT_FOR.  The list of assertions to be inserted are
4679    found in ASSERTS_FOR[i].  */
4680 
4681 static void
process_assert_insertions(void)4682 process_assert_insertions (void)
4683 {
4684   unsigned i;
4685   bitmap_iterator bi;
4686   bool update_edges_p = false;
4687   int num_asserts = 0;
4688 
4689   if (dump_file && (dump_flags & TDF_DETAILS))
4690     dump_all_asserts (dump_file);
4691 
4692   EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
4693     {
4694       assert_locus *loc = asserts_for[i];
4695       gcc_assert (loc);
4696 
4697       auto_vec<assert_locus *, 16> asserts;
4698       for (; loc; loc = loc->next)
4699 	asserts.safe_push (loc);
4700       asserts.qsort (compare_assert_loc<false>);
4701 
4702       /* Push down common asserts to successors and remove redundant ones.  */
4703       unsigned ecnt = 0;
4704       assert_locus *common = NULL;
4705       unsigned commonj = 0;
4706       for (unsigned j = 0; j < asserts.length (); ++j)
4707 	{
4708 	  loc = asserts[j];
4709 	  if (! loc->e)
4710 	    common = NULL;
4711 	  else if (! common
4712 		   || loc->e->dest != common->e->dest
4713 		   || loc->comp_code != common->comp_code
4714 		   || ! operand_equal_p (loc->val, common->val, 0)
4715 		   || ! operand_equal_p (loc->expr, common->expr, 0))
4716 	    {
4717 	      commonj = j;
4718 	      common = loc;
4719 	      ecnt = 1;
4720 	    }
4721 	  else if (loc->e == asserts[j-1]->e)
4722 	    {
4723 	      /* Remove duplicate asserts.  */
4724 	      if (commonj == j - 1)
4725 		{
4726 		  commonj = j;
4727 		  common = loc;
4728 		}
4729 	      free (asserts[j-1]);
4730 	      asserts[j-1] = NULL;
4731 	    }
4732 	  else
4733 	    {
4734 	      ecnt++;
4735 	      if (EDGE_COUNT (common->e->dest->preds) == ecnt)
4736 		{
4737 		  /* We have the same assertion on all incoming edges of a BB.
4738 		     Insert it at the beginning of that block.  */
4739 		  loc->bb = loc->e->dest;
4740 		  loc->e = NULL;
4741 		  loc->si = gsi_none ();
4742 		  common = NULL;
4743 		  /* Clear asserts commoned.  */
4744 		  for (; commonj != j; ++commonj)
4745 		    if (asserts[commonj])
4746 		      {
4747 			free (asserts[commonj]);
4748 			asserts[commonj] = NULL;
4749 		      }
4750 		}
4751 	    }
4752 	}
4753 
4754       /* The asserts vector sorting above might be unstable for
4755 	 -fcompare-debug, sort again to ensure a stable sort.  */
4756       asserts.qsort (compare_assert_loc<true>);
4757       for (unsigned j = 0; j < asserts.length (); ++j)
4758 	{
4759 	  loc = asserts[j];
4760 	  if (! loc)
4761 	    break;
4762 	  update_edges_p |= process_assert_insertions_for (ssa_name (i), loc);
4763 	  num_asserts++;
4764 	  free (loc);
4765 	}
4766     }
4767 
4768   if (update_edges_p)
4769     gsi_commit_edge_inserts ();
4770 
4771   statistics_counter_event (cfun, "Number of ASSERT_EXPR expressions inserted",
4772 			    num_asserts);
4773 }
4774 
4775 
4776 /* Traverse the flowgraph looking for conditional jumps to insert range
4777    expressions.  These range expressions are meant to provide information
4778    to optimizations that need to reason in terms of value ranges.  They
4779    will not be expanded into RTL.  For instance, given:
4780 
4781    x = ...
4782    y = ...
4783    if (x < y)
4784      y = x - 2;
4785    else
4786      x = y + 3;
4787 
4788    this pass will transform the code into:
4789 
4790    x = ...
4791    y = ...
4792    if (x < y)
4793     {
4794       x = ASSERT_EXPR <x, x < y>
4795       y = x - 2
4796     }
4797    else
4798     {
4799       y = ASSERT_EXPR <y, x >= y>
4800       x = y + 3
4801     }
4802 
4803    The idea is that once copy and constant propagation have run, other
4804    optimizations will be able to determine what ranges of values can 'x'
4805    take in different paths of the code, simply by checking the reaching
4806    definition of 'x'.  */
4807 
4808 static void
insert_range_assertions(void)4809 insert_range_assertions (void)
4810 {
4811   need_assert_for = BITMAP_ALLOC (NULL);
4812   asserts_for = XCNEWVEC (assert_locus *, num_ssa_names);
4813 
4814   calculate_dominance_info (CDI_DOMINATORS);
4815 
4816   find_assert_locations ();
4817   if (!bitmap_empty_p (need_assert_for))
4818     {
4819       process_assert_insertions ();
4820       update_ssa (TODO_update_ssa_no_phi);
4821     }
4822 
4823   if (dump_file && (dump_flags & TDF_DETAILS))
4824     {
4825       fprintf (dump_file, "\nSSA form after inserting ASSERT_EXPRs\n");
4826       dump_function_to_file (current_function_decl, dump_file, dump_flags);
4827     }
4828 
4829   free (asserts_for);
4830   BITMAP_FREE (need_assert_for);
4831 }
4832 
4833 class vrp_prop : public ssa_propagation_engine
4834 {
4835  public:
4836   enum ssa_prop_result visit_stmt (gimple *, edge *, tree *) FINAL OVERRIDE;
4837   enum ssa_prop_result visit_phi (gphi *) FINAL OVERRIDE;
4838 
4839   void vrp_initialize (void);
4840   void vrp_finalize (bool);
4841   void check_all_array_refs (void);
4842   void check_array_ref (location_t, tree, bool);
4843   void search_for_addr_array (tree, location_t);
4844 
4845   class vr_values vr_values;
4846   /* Temporary delegator to minimize code churn.  */
get_value_range(const_tree op)4847   value_range *get_value_range (const_tree op)
4848     { return vr_values.get_value_range (op); }
set_defs_to_varying(gimple * stmt)4849   void set_defs_to_varying (gimple *stmt)
4850     { return vr_values.set_defs_to_varying (stmt); }
extract_range_from_stmt(gimple * stmt,edge * taken_edge_p,tree * output_p,value_range * vr)4851   void extract_range_from_stmt (gimple *stmt, edge *taken_edge_p,
4852 				tree *output_p, value_range *vr)
4853     { vr_values.extract_range_from_stmt (stmt, taken_edge_p, output_p, vr); }
update_value_range(const_tree op,value_range * vr)4854   bool update_value_range (const_tree op, value_range *vr)
4855     { return vr_values.update_value_range (op, vr); }
extract_range_basic(value_range * vr,gimple * stmt)4856   void extract_range_basic (value_range *vr, gimple *stmt)
4857     { vr_values.extract_range_basic (vr, stmt); }
extract_range_from_phi_node(gphi * phi,value_range * vr)4858   void extract_range_from_phi_node (gphi *phi, value_range *vr)
4859     { vr_values.extract_range_from_phi_node (phi, vr); }
4860 };
4861 /* Checks one ARRAY_REF in REF, located at LOCUS. Ignores flexible arrays
4862    and "struct" hacks. If VRP can determine that the
4863    array subscript is a constant, check if it is outside valid
4864    range. If the array subscript is a RANGE, warn if it is
4865    non-overlapping with valid range.
4866    IGNORE_OFF_BY_ONE is true if the ARRAY_REF is inside a ADDR_EXPR.  */
4867 
4868 void
check_array_ref(location_t location,tree ref,bool ignore_off_by_one)4869 vrp_prop::check_array_ref (location_t location, tree ref,
4870 			   bool ignore_off_by_one)
4871 {
4872   value_range *vr = NULL;
4873   tree low_sub, up_sub;
4874   tree low_bound, up_bound, up_bound_p1;
4875 
4876   if (TREE_NO_WARNING (ref))
4877     return;
4878 
4879   low_sub = up_sub = TREE_OPERAND (ref, 1);
4880   up_bound = array_ref_up_bound (ref);
4881 
4882   if (!up_bound
4883       || TREE_CODE (up_bound) != INTEGER_CST
4884       || (warn_array_bounds < 2
4885 	  && array_at_struct_end_p (ref)))
4886     {
4887       /* Accesses to trailing arrays via pointers may access storage
4888 	 beyond the types array bounds.  For such arrays, or for flexible
4889 	 array members, as well as for other arrays of an unknown size,
4890 	 replace the upper bound with a more permissive one that assumes
4891 	 the size of the largest object is PTRDIFF_MAX.  */
4892       tree eltsize = array_ref_element_size (ref);
4893 
4894       if (TREE_CODE (eltsize) != INTEGER_CST
4895 	  || integer_zerop (eltsize))
4896 	{
4897 	  up_bound = NULL_TREE;
4898 	  up_bound_p1 = NULL_TREE;
4899 	}
4900       else
4901 	{
4902 	  tree maxbound = TYPE_MAX_VALUE (ptrdiff_type_node);
4903 	  tree arg = TREE_OPERAND (ref, 0);
4904 	  poly_int64 off;
4905 
4906 	  if (get_addr_base_and_unit_offset (arg, &off) && known_gt (off, 0))
4907 	    maxbound = wide_int_to_tree (sizetype,
4908 					 wi::sub (wi::to_wide (maxbound),
4909 						  off));
4910 	  else
4911 	    maxbound = fold_convert (sizetype, maxbound);
4912 
4913 	  up_bound_p1 = int_const_binop (TRUNC_DIV_EXPR, maxbound, eltsize);
4914 
4915 	  up_bound = int_const_binop (MINUS_EXPR, up_bound_p1,
4916 				      build_int_cst (ptrdiff_type_node, 1));
4917 	}
4918     }
4919   else
4920     up_bound_p1 = int_const_binop (PLUS_EXPR, up_bound,
4921 				   build_int_cst (TREE_TYPE (up_bound), 1));
4922 
4923   low_bound = array_ref_low_bound (ref);
4924 
4925   tree artype = TREE_TYPE (TREE_OPERAND (ref, 0));
4926 
4927   /* Empty array.  */
4928   if (up_bound && tree_int_cst_equal (low_bound, up_bound_p1))
4929     {
4930       warning_at (location, OPT_Warray_bounds,
4931 		  "array subscript %E is above array bounds of %qT",
4932 		  low_bound, artype);
4933       TREE_NO_WARNING (ref) = 1;
4934     }
4935 
4936   if (TREE_CODE (low_sub) == SSA_NAME)
4937     {
4938       vr = get_value_range (low_sub);
4939       if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE)
4940         {
4941           low_sub = vr->type == VR_RANGE ? vr->max : vr->min;
4942           up_sub = vr->type == VR_RANGE ? vr->min : vr->max;
4943         }
4944     }
4945 
4946   if (vr && vr->type == VR_ANTI_RANGE)
4947     {
4948       if (up_bound
4949 	  && TREE_CODE (up_sub) == INTEGER_CST
4950           && (ignore_off_by_one
4951 	      ? tree_int_cst_lt (up_bound, up_sub)
4952 	      : tree_int_cst_le (up_bound, up_sub))
4953           && TREE_CODE (low_sub) == INTEGER_CST
4954           && tree_int_cst_le (low_sub, low_bound))
4955         {
4956           warning_at (location, OPT_Warray_bounds,
4957 		      "array subscript [%E, %E] is outside array bounds of %qT",
4958 		      low_sub, up_sub, artype);
4959           TREE_NO_WARNING (ref) = 1;
4960         }
4961     }
4962   else if (up_bound
4963 	   && TREE_CODE (up_sub) == INTEGER_CST
4964 	   && (ignore_off_by_one
4965 	       ? !tree_int_cst_le (up_sub, up_bound_p1)
4966 	       : !tree_int_cst_le (up_sub, up_bound)))
4967     {
4968       if (dump_file && (dump_flags & TDF_DETAILS))
4969 	{
4970 	  fprintf (dump_file, "Array bound warning for ");
4971 	  dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
4972 	  fprintf (dump_file, "\n");
4973 	}
4974       warning_at (location, OPT_Warray_bounds,
4975 		  "array subscript %E is above array bounds of %qT",
4976 		  up_sub, artype);
4977       TREE_NO_WARNING (ref) = 1;
4978     }
4979   else if (TREE_CODE (low_sub) == INTEGER_CST
4980            && tree_int_cst_lt (low_sub, low_bound))
4981     {
4982       if (dump_file && (dump_flags & TDF_DETAILS))
4983 	{
4984 	  fprintf (dump_file, "Array bound warning for ");
4985 	  dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
4986 	  fprintf (dump_file, "\n");
4987 	}
4988       warning_at (location, OPT_Warray_bounds,
4989 		  "array subscript %E is below array bounds of %qT",
4990 		  low_sub, artype);
4991       TREE_NO_WARNING (ref) = 1;
4992     }
4993 }
4994 
4995 /* Searches if the expr T, located at LOCATION computes
4996    address of an ARRAY_REF, and call check_array_ref on it.  */
4997 
4998 void
search_for_addr_array(tree t,location_t location)4999 vrp_prop::search_for_addr_array (tree t, location_t location)
5000 {
5001   /* Check each ARRAY_REFs in the reference chain. */
5002   do
5003     {
5004       if (TREE_CODE (t) == ARRAY_REF)
5005 	check_array_ref (location, t, true /*ignore_off_by_one*/);
5006 
5007       t = TREE_OPERAND (t, 0);
5008     }
5009   while (handled_component_p (t));
5010 
5011   if (TREE_CODE (t) == MEM_REF
5012       && TREE_CODE (TREE_OPERAND (t, 0)) == ADDR_EXPR
5013       && !TREE_NO_WARNING (t))
5014     {
5015       tree tem = TREE_OPERAND (TREE_OPERAND (t, 0), 0);
5016       tree low_bound, up_bound, el_sz;
5017       offset_int idx;
5018       if (TREE_CODE (TREE_TYPE (tem)) != ARRAY_TYPE
5019 	  || TREE_CODE (TREE_TYPE (TREE_TYPE (tem))) == ARRAY_TYPE
5020 	  || !TYPE_DOMAIN (TREE_TYPE (tem)))
5021 	return;
5022 
5023       low_bound = TYPE_MIN_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
5024       up_bound = TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
5025       el_sz = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (tem)));
5026       if (!low_bound
5027 	  || TREE_CODE (low_bound) != INTEGER_CST
5028 	  || !up_bound
5029 	  || TREE_CODE (up_bound) != INTEGER_CST
5030 	  || !el_sz
5031 	  || TREE_CODE (el_sz) != INTEGER_CST)
5032 	return;
5033 
5034       if (!mem_ref_offset (t).is_constant (&idx))
5035 	return;
5036 
5037       idx = wi::sdiv_trunc (idx, wi::to_offset (el_sz));
5038       if (idx < 0)
5039 	{
5040 	  if (dump_file && (dump_flags & TDF_DETAILS))
5041 	    {
5042 	      fprintf (dump_file, "Array bound warning for ");
5043 	      dump_generic_expr (MSG_NOTE, TDF_SLIM, t);
5044 	      fprintf (dump_file, "\n");
5045 	    }
5046 	  warning_at (location, OPT_Warray_bounds,
5047 		      "array subscript %wi is below array bounds of %qT",
5048 		      idx.to_shwi (), TREE_TYPE (tem));
5049 	  TREE_NO_WARNING (t) = 1;
5050 	}
5051       else if (idx > (wi::to_offset (up_bound)
5052 		      - wi::to_offset (low_bound) + 1))
5053 	{
5054 	  if (dump_file && (dump_flags & TDF_DETAILS))
5055 	    {
5056 	      fprintf (dump_file, "Array bound warning for ");
5057 	      dump_generic_expr (MSG_NOTE, TDF_SLIM, t);
5058 	      fprintf (dump_file, "\n");
5059 	    }
5060 	  warning_at (location, OPT_Warray_bounds,
5061 		      "array subscript %wu is above array bounds of %qT",
5062 		      idx.to_uhwi (), TREE_TYPE (tem));
5063 	  TREE_NO_WARNING (t) = 1;
5064 	}
5065     }
5066 }
5067 
5068 /* walk_tree() callback that checks if *TP is
5069    an ARRAY_REF inside an ADDR_EXPR (in which an array
5070    subscript one outside the valid range is allowed). Call
5071    check_array_ref for each ARRAY_REF found. The location is
5072    passed in DATA.  */
5073 
5074 static tree
check_array_bounds(tree * tp,int * walk_subtree,void * data)5075 check_array_bounds (tree *tp, int *walk_subtree, void *data)
5076 {
5077   tree t = *tp;
5078   struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
5079   location_t location;
5080 
5081   if (EXPR_HAS_LOCATION (t))
5082     location = EXPR_LOCATION (t);
5083   else
5084     location = gimple_location (wi->stmt);
5085 
5086   *walk_subtree = TRUE;
5087 
5088   vrp_prop *vrp_prop = (class vrp_prop *)wi->info;
5089   if (TREE_CODE (t) == ARRAY_REF)
5090     vrp_prop->check_array_ref (location, t, false /*ignore_off_by_one*/);
5091 
5092   else if (TREE_CODE (t) == ADDR_EXPR)
5093     {
5094       vrp_prop->search_for_addr_array (t, location);
5095       *walk_subtree = FALSE;
5096     }
5097 
5098   return NULL_TREE;
5099 }
5100 
5101 /* A dom_walker subclass for use by vrp_prop::check_all_array_refs,
5102    to walk over all statements of all reachable BBs and call
5103    check_array_bounds on them.  */
5104 
5105 class check_array_bounds_dom_walker : public dom_walker
5106 {
5107  public:
check_array_bounds_dom_walker(vrp_prop * prop)5108   check_array_bounds_dom_walker (vrp_prop *prop)
5109     : dom_walker (CDI_DOMINATORS,
5110 		  /* Discover non-executable edges, preserving EDGE_EXECUTABLE
5111 		     flags, so that we can merge in information on
5112 		     non-executable edges from vrp_folder .  */
5113 		  REACHABLE_BLOCKS_PRESERVING_FLAGS),
5114       m_prop (prop) {}
~check_array_bounds_dom_walker()5115   ~check_array_bounds_dom_walker () {}
5116 
5117   edge before_dom_children (basic_block) FINAL OVERRIDE;
5118 
5119  private:
5120   vrp_prop *m_prop;
5121 };
5122 
5123 /* Implementation of dom_walker::before_dom_children.
5124 
5125    Walk over all statements of BB and call check_array_bounds on them,
5126    and determine if there's a unique successor edge.  */
5127 
5128 edge
before_dom_children(basic_block bb)5129 check_array_bounds_dom_walker::before_dom_children (basic_block bb)
5130 {
5131   gimple_stmt_iterator si;
5132   for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
5133     {
5134       gimple *stmt = gsi_stmt (si);
5135       struct walk_stmt_info wi;
5136       if (!gimple_has_location (stmt)
5137 	  || is_gimple_debug (stmt))
5138 	continue;
5139 
5140       memset (&wi, 0, sizeof (wi));
5141 
5142       wi.info = m_prop;
5143 
5144       walk_gimple_op (stmt, check_array_bounds, &wi);
5145     }
5146 
5147   /* Determine if there's a unique successor edge, and if so, return
5148      that back to dom_walker, ensuring that we don't visit blocks that
5149      became unreachable during the VRP propagation
5150      (PR tree-optimization/83312).  */
5151   return find_taken_edge (bb, NULL_TREE);
5152 }
5153 
5154 /* Walk over all statements of all reachable BBs and call check_array_bounds
5155    on them.  */
5156 
5157 void
check_all_array_refs()5158 vrp_prop::check_all_array_refs ()
5159 {
5160   check_array_bounds_dom_walker w (this);
5161   w.walk (ENTRY_BLOCK_PTR_FOR_FN (cfun));
5162 }
5163 
5164 /* Return true if all imm uses of VAR are either in STMT, or
5165    feed (optionally through a chain of single imm uses) GIMPLE_COND
5166    in basic block COND_BB.  */
5167 
5168 static bool
all_imm_uses_in_stmt_or_feed_cond(tree var,gimple * stmt,basic_block cond_bb)5169 all_imm_uses_in_stmt_or_feed_cond (tree var, gimple *stmt, basic_block cond_bb)
5170 {
5171   use_operand_p use_p, use2_p;
5172   imm_use_iterator iter;
5173 
5174   FOR_EACH_IMM_USE_FAST (use_p, iter, var)
5175     if (USE_STMT (use_p) != stmt)
5176       {
5177 	gimple *use_stmt = USE_STMT (use_p), *use_stmt2;
5178 	if (is_gimple_debug (use_stmt))
5179 	  continue;
5180 	while (is_gimple_assign (use_stmt)
5181 	       && TREE_CODE (gimple_assign_lhs (use_stmt)) == SSA_NAME
5182 	       && single_imm_use (gimple_assign_lhs (use_stmt),
5183 				  &use2_p, &use_stmt2))
5184 	  use_stmt = use_stmt2;
5185 	if (gimple_code (use_stmt) != GIMPLE_COND
5186 	    || gimple_bb (use_stmt) != cond_bb)
5187 	  return false;
5188       }
5189   return true;
5190 }
5191 
5192 /* Handle
5193    _4 = x_3 & 31;
5194    if (_4 != 0)
5195      goto <bb 6>;
5196    else
5197      goto <bb 7>;
5198    <bb 6>:
5199    __builtin_unreachable ();
5200    <bb 7>:
5201    x_5 = ASSERT_EXPR <x_3, ...>;
5202    If x_3 has no other immediate uses (checked by caller),
5203    var is the x_3 var from ASSERT_EXPR, we can clear low 5 bits
5204    from the non-zero bitmask.  */
5205 
5206 void
maybe_set_nonzero_bits(edge e,tree var)5207 maybe_set_nonzero_bits (edge e, tree var)
5208 {
5209   basic_block cond_bb = e->src;
5210   gimple *stmt = last_stmt (cond_bb);
5211   tree cst;
5212 
5213   if (stmt == NULL
5214       || gimple_code (stmt) != GIMPLE_COND
5215       || gimple_cond_code (stmt) != ((e->flags & EDGE_TRUE_VALUE)
5216 				     ? EQ_EXPR : NE_EXPR)
5217       || TREE_CODE (gimple_cond_lhs (stmt)) != SSA_NAME
5218       || !integer_zerop (gimple_cond_rhs (stmt)))
5219     return;
5220 
5221   stmt = SSA_NAME_DEF_STMT (gimple_cond_lhs (stmt));
5222   if (!is_gimple_assign (stmt)
5223       || gimple_assign_rhs_code (stmt) != BIT_AND_EXPR
5224       || TREE_CODE (gimple_assign_rhs2 (stmt)) != INTEGER_CST)
5225     return;
5226   if (gimple_assign_rhs1 (stmt) != var)
5227     {
5228       gimple *stmt2;
5229 
5230       if (TREE_CODE (gimple_assign_rhs1 (stmt)) != SSA_NAME)
5231 	return;
5232       stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
5233       if (!gimple_assign_cast_p (stmt2)
5234 	  || gimple_assign_rhs1 (stmt2) != var
5235 	  || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt2))
5236 	  || (TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (stmt)))
5237 			      != TYPE_PRECISION (TREE_TYPE (var))))
5238 	return;
5239     }
5240   cst = gimple_assign_rhs2 (stmt);
5241   set_nonzero_bits (var, wi::bit_and_not (get_nonzero_bits (var),
5242 					  wi::to_wide (cst)));
5243 }
5244 
5245 /* Convert range assertion expressions into the implied copies and
5246    copy propagate away the copies.  Doing the trivial copy propagation
5247    here avoids the need to run the full copy propagation pass after
5248    VRP.
5249 
5250    FIXME, this will eventually lead to copy propagation removing the
5251    names that had useful range information attached to them.  For
5252    instance, if we had the assertion N_i = ASSERT_EXPR <N_j, N_j > 3>,
5253    then N_i will have the range [3, +INF].
5254 
5255    However, by converting the assertion into the implied copy
5256    operation N_i = N_j, we will then copy-propagate N_j into the uses
5257    of N_i and lose the range information.  We may want to hold on to
5258    ASSERT_EXPRs a little while longer as the ranges could be used in
5259    things like jump threading.
5260 
5261    The problem with keeping ASSERT_EXPRs around is that passes after
5262    VRP need to handle them appropriately.
5263 
5264    Another approach would be to make the range information a first
5265    class property of the SSA_NAME so that it can be queried from
5266    any pass.  This is made somewhat more complex by the need for
5267    multiple ranges to be associated with one SSA_NAME.  */
5268 
5269 static void
remove_range_assertions(void)5270 remove_range_assertions (void)
5271 {
5272   basic_block bb;
5273   gimple_stmt_iterator si;
5274   /* 1 if looking at ASSERT_EXPRs immediately at the beginning of
5275      a basic block preceeded by GIMPLE_COND branching to it and
5276      __builtin_trap, -1 if not yet checked, 0 otherwise.  */
5277   int is_unreachable;
5278 
5279   /* Note that the BSI iterator bump happens at the bottom of the
5280      loop and no bump is necessary if we're removing the statement
5281      referenced by the current BSI.  */
5282   FOR_EACH_BB_FN (bb, cfun)
5283     for (si = gsi_after_labels (bb), is_unreachable = -1; !gsi_end_p (si);)
5284       {
5285 	gimple *stmt = gsi_stmt (si);
5286 
5287 	if (is_gimple_assign (stmt)
5288 	    && gimple_assign_rhs_code (stmt) == ASSERT_EXPR)
5289 	  {
5290 	    tree lhs = gimple_assign_lhs (stmt);
5291 	    tree rhs = gimple_assign_rhs1 (stmt);
5292 	    tree var;
5293 
5294 	    var = ASSERT_EXPR_VAR (rhs);
5295 
5296 	    if (TREE_CODE (var) == SSA_NAME
5297 		&& !POINTER_TYPE_P (TREE_TYPE (lhs))
5298 		&& SSA_NAME_RANGE_INFO (lhs))
5299 	      {
5300 		if (is_unreachable == -1)
5301 		  {
5302 		    is_unreachable = 0;
5303 		    if (single_pred_p (bb)
5304 			&& assert_unreachable_fallthru_edge_p
5305 						    (single_pred_edge (bb)))
5306 		      is_unreachable = 1;
5307 		  }
5308 		/* Handle
5309 		   if (x_7 >= 10 && x_7 < 20)
5310 		     __builtin_unreachable ();
5311 		   x_8 = ASSERT_EXPR <x_7, ...>;
5312 		   if the only uses of x_7 are in the ASSERT_EXPR and
5313 		   in the condition.  In that case, we can copy the
5314 		   range info from x_8 computed in this pass also
5315 		   for x_7.  */
5316 		if (is_unreachable
5317 		    && all_imm_uses_in_stmt_or_feed_cond (var, stmt,
5318 							  single_pred (bb)))
5319 		  {
5320 		    set_range_info (var, SSA_NAME_RANGE_TYPE (lhs),
5321 				    SSA_NAME_RANGE_INFO (lhs)->get_min (),
5322 				    SSA_NAME_RANGE_INFO (lhs)->get_max ());
5323 		    maybe_set_nonzero_bits (single_pred_edge (bb), var);
5324 		  }
5325 	      }
5326 
5327 	    /* Propagate the RHS into every use of the LHS.  For SSA names
5328 	       also propagate abnormals as it merely restores the original
5329 	       IL in this case (an replace_uses_by would assert).  */
5330 	    if (TREE_CODE (var) == SSA_NAME)
5331 	      {
5332 		imm_use_iterator iter;
5333 		use_operand_p use_p;
5334 		gimple *use_stmt;
5335 		FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
5336 		  FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
5337 		    SET_USE (use_p, var);
5338 	      }
5339 	    else
5340 	      replace_uses_by (lhs, var);
5341 
5342 	    /* And finally, remove the copy, it is not needed.  */
5343 	    gsi_remove (&si, true);
5344 	    release_defs (stmt);
5345 	  }
5346 	else
5347 	  {
5348 	    if (!is_gimple_debug (gsi_stmt (si)))
5349 	      is_unreachable = 0;
5350 	    gsi_next (&si);
5351 	  }
5352       }
5353 }
5354 
5355 /* Return true if STMT is interesting for VRP.  */
5356 
5357 bool
stmt_interesting_for_vrp(gimple * stmt)5358 stmt_interesting_for_vrp (gimple *stmt)
5359 {
5360   if (gimple_code (stmt) == GIMPLE_PHI)
5361     {
5362       tree res = gimple_phi_result (stmt);
5363       return (!virtual_operand_p (res)
5364 	      && (INTEGRAL_TYPE_P (TREE_TYPE (res))
5365 		  || POINTER_TYPE_P (TREE_TYPE (res))));
5366     }
5367   else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
5368     {
5369       tree lhs = gimple_get_lhs (stmt);
5370 
5371       /* In general, assignments with virtual operands are not useful
5372 	 for deriving ranges, with the obvious exception of calls to
5373 	 builtin functions.  */
5374       if (lhs && TREE_CODE (lhs) == SSA_NAME
5375 	  && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
5376 	      || POINTER_TYPE_P (TREE_TYPE (lhs)))
5377 	  && (is_gimple_call (stmt)
5378 	      || !gimple_vuse (stmt)))
5379 	return true;
5380       else if (is_gimple_call (stmt) && gimple_call_internal_p (stmt))
5381 	switch (gimple_call_internal_fn (stmt))
5382 	  {
5383 	  case IFN_ADD_OVERFLOW:
5384 	  case IFN_SUB_OVERFLOW:
5385 	  case IFN_MUL_OVERFLOW:
5386 	  case IFN_ATOMIC_COMPARE_EXCHANGE:
5387 	    /* These internal calls return _Complex integer type,
5388 	       but are interesting to VRP nevertheless.  */
5389 	    if (lhs && TREE_CODE (lhs) == SSA_NAME)
5390 	      return true;
5391 	    break;
5392 	  default:
5393 	    break;
5394 	  }
5395     }
5396   else if (gimple_code (stmt) == GIMPLE_COND
5397 	   || gimple_code (stmt) == GIMPLE_SWITCH)
5398     return true;
5399 
5400   return false;
5401 }
5402 
5403 /* Initialization required by ssa_propagate engine.  */
5404 
5405 void
vrp_initialize()5406 vrp_prop::vrp_initialize ()
5407 {
5408   basic_block bb;
5409 
5410   FOR_EACH_BB_FN (bb, cfun)
5411     {
5412       for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
5413 	   gsi_next (&si))
5414 	{
5415 	  gphi *phi = si.phi ();
5416 	  if (!stmt_interesting_for_vrp (phi))
5417 	    {
5418 	      tree lhs = PHI_RESULT (phi);
5419 	      set_value_range_to_varying (get_value_range (lhs));
5420 	      prop_set_simulate_again (phi, false);
5421 	    }
5422 	  else
5423 	    prop_set_simulate_again (phi, true);
5424 	}
5425 
5426       for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
5427 	   gsi_next (&si))
5428         {
5429 	  gimple *stmt = gsi_stmt (si);
5430 
5431  	  /* If the statement is a control insn, then we do not
5432  	     want to avoid simulating the statement once.  Failure
5433  	     to do so means that those edges will never get added.  */
5434 	  if (stmt_ends_bb_p (stmt))
5435 	    prop_set_simulate_again (stmt, true);
5436 	  else if (!stmt_interesting_for_vrp (stmt))
5437 	    {
5438 	      set_defs_to_varying (stmt);
5439 	      prop_set_simulate_again (stmt, false);
5440 	    }
5441 	  else
5442 	    prop_set_simulate_again (stmt, true);
5443 	}
5444     }
5445 }
5446 
5447 /* Searches the case label vector VEC for the index *IDX of the CASE_LABEL
5448    that includes the value VAL.  The search is restricted to the range
5449    [START_IDX, n - 1] where n is the size of VEC.
5450 
5451    If there is a CASE_LABEL for VAL, its index is placed in IDX and true is
5452    returned.
5453 
5454    If there is no CASE_LABEL for VAL and there is one that is larger than VAL,
5455    it is placed in IDX and false is returned.
5456 
5457    If VAL is larger than any CASE_LABEL, n is placed on IDX and false is
5458    returned. */
5459 
5460 bool
find_case_label_index(gswitch * stmt,size_t start_idx,tree val,size_t * idx)5461 find_case_label_index (gswitch *stmt, size_t start_idx, tree val, size_t *idx)
5462 {
5463   size_t n = gimple_switch_num_labels (stmt);
5464   size_t low, high;
5465 
5466   /* Find case label for minimum of the value range or the next one.
5467      At each iteration we are searching in [low, high - 1]. */
5468 
5469   for (low = start_idx, high = n; high != low; )
5470     {
5471       tree t;
5472       int cmp;
5473       /* Note that i != high, so we never ask for n. */
5474       size_t i = (high + low) / 2;
5475       t = gimple_switch_label (stmt, i);
5476 
5477       /* Cache the result of comparing CASE_LOW and val.  */
5478       cmp = tree_int_cst_compare (CASE_LOW (t), val);
5479 
5480       if (cmp == 0)
5481 	{
5482 	  /* Ranges cannot be empty. */
5483 	  *idx = i;
5484 	  return true;
5485 	}
5486       else if (cmp > 0)
5487         high = i;
5488       else
5489 	{
5490 	  low = i + 1;
5491 	  if (CASE_HIGH (t) != NULL
5492 	      && tree_int_cst_compare (CASE_HIGH (t), val) >= 0)
5493 	    {
5494 	      *idx = i;
5495 	      return true;
5496 	    }
5497         }
5498     }
5499 
5500   *idx = high;
5501   return false;
5502 }
5503 
5504 /* Searches the case label vector VEC for the range of CASE_LABELs that is used
5505    for values between MIN and MAX. The first index is placed in MIN_IDX. The
5506    last index is placed in MAX_IDX. If the range of CASE_LABELs is empty
5507    then MAX_IDX < MIN_IDX.
5508    Returns true if the default label is not needed. */
5509 
5510 bool
find_case_label_range(gswitch * stmt,tree min,tree max,size_t * min_idx,size_t * max_idx)5511 find_case_label_range (gswitch *stmt, tree min, tree max, size_t *min_idx,
5512 		       size_t *max_idx)
5513 {
5514   size_t i, j;
5515   bool min_take_default = !find_case_label_index (stmt, 1, min, &i);
5516   bool max_take_default = !find_case_label_index (stmt, i, max, &j);
5517 
5518   if (i == j
5519       && min_take_default
5520       && max_take_default)
5521     {
5522       /* Only the default case label reached.
5523          Return an empty range. */
5524       *min_idx = 1;
5525       *max_idx = 0;
5526       return false;
5527     }
5528   else
5529     {
5530       bool take_default = min_take_default || max_take_default;
5531       tree low, high;
5532       size_t k;
5533 
5534       if (max_take_default)
5535 	j--;
5536 
5537       /* If the case label range is continuous, we do not need
5538 	 the default case label.  Verify that.  */
5539       high = CASE_LOW (gimple_switch_label (stmt, i));
5540       if (CASE_HIGH (gimple_switch_label (stmt, i)))
5541 	high = CASE_HIGH (gimple_switch_label (stmt, i));
5542       for (k = i + 1; k <= j; ++k)
5543 	{
5544 	  low = CASE_LOW (gimple_switch_label (stmt, k));
5545 	  if (!integer_onep (int_const_binop (MINUS_EXPR, low, high)))
5546 	    {
5547 	      take_default = true;
5548 	      break;
5549 	    }
5550 	  high = low;
5551 	  if (CASE_HIGH (gimple_switch_label (stmt, k)))
5552 	    high = CASE_HIGH (gimple_switch_label (stmt, k));
5553 	}
5554 
5555       *min_idx = i;
5556       *max_idx = j;
5557       return !take_default;
5558     }
5559 }
5560 
5561 /* Evaluate statement STMT.  If the statement produces a useful range,
5562    return SSA_PROP_INTERESTING and record the SSA name with the
5563    interesting range into *OUTPUT_P.
5564 
5565    If STMT is a conditional branch and we can determine its truth
5566    value, the taken edge is recorded in *TAKEN_EDGE_P.
5567 
5568    If STMT produces a varying value, return SSA_PROP_VARYING.  */
5569 
5570 enum ssa_prop_result
visit_stmt(gimple * stmt,edge * taken_edge_p,tree * output_p)5571 vrp_prop::visit_stmt (gimple *stmt, edge *taken_edge_p, tree *output_p)
5572 {
5573   value_range vr = VR_INITIALIZER;
5574   tree lhs = gimple_get_lhs (stmt);
5575   extract_range_from_stmt (stmt, taken_edge_p, output_p, &vr);
5576 
5577   if (*output_p)
5578     {
5579       if (update_value_range (*output_p, &vr))
5580 	{
5581 	  if (dump_file && (dump_flags & TDF_DETAILS))
5582 	    {
5583 	      fprintf (dump_file, "Found new range for ");
5584 	      print_generic_expr (dump_file, *output_p);
5585 	      fprintf (dump_file, ": ");
5586 	      dump_value_range (dump_file, &vr);
5587 	      fprintf (dump_file, "\n");
5588 	    }
5589 
5590 	  if (vr.type == VR_VARYING)
5591 	    return SSA_PROP_VARYING;
5592 
5593 	  return SSA_PROP_INTERESTING;
5594 	}
5595       return SSA_PROP_NOT_INTERESTING;
5596     }
5597 
5598   if (is_gimple_call (stmt) && gimple_call_internal_p (stmt))
5599     switch (gimple_call_internal_fn (stmt))
5600       {
5601       case IFN_ADD_OVERFLOW:
5602       case IFN_SUB_OVERFLOW:
5603       case IFN_MUL_OVERFLOW:
5604       case IFN_ATOMIC_COMPARE_EXCHANGE:
5605 	/* These internal calls return _Complex integer type,
5606 	   which VRP does not track, but the immediate uses
5607 	   thereof might be interesting.  */
5608 	if (lhs && TREE_CODE (lhs) == SSA_NAME)
5609 	  {
5610 	    imm_use_iterator iter;
5611 	    use_operand_p use_p;
5612 	    enum ssa_prop_result res = SSA_PROP_VARYING;
5613 
5614 	    set_value_range_to_varying (get_value_range (lhs));
5615 
5616 	    FOR_EACH_IMM_USE_FAST (use_p, iter, lhs)
5617 	      {
5618 		gimple *use_stmt = USE_STMT (use_p);
5619 		if (!is_gimple_assign (use_stmt))
5620 		  continue;
5621 		enum tree_code rhs_code = gimple_assign_rhs_code (use_stmt);
5622 		if (rhs_code != REALPART_EXPR && rhs_code != IMAGPART_EXPR)
5623 		  continue;
5624 		tree rhs1 = gimple_assign_rhs1 (use_stmt);
5625 		tree use_lhs = gimple_assign_lhs (use_stmt);
5626 		if (TREE_CODE (rhs1) != rhs_code
5627 		    || TREE_OPERAND (rhs1, 0) != lhs
5628 		    || TREE_CODE (use_lhs) != SSA_NAME
5629 		    || !stmt_interesting_for_vrp (use_stmt)
5630 		    || (!INTEGRAL_TYPE_P (TREE_TYPE (use_lhs))
5631 			|| !TYPE_MIN_VALUE (TREE_TYPE (use_lhs))
5632 			|| !TYPE_MAX_VALUE (TREE_TYPE (use_lhs))))
5633 		  continue;
5634 
5635 		/* If there is a change in the value range for any of the
5636 		   REALPART_EXPR/IMAGPART_EXPR immediate uses, return
5637 		   SSA_PROP_INTERESTING.  If there are any REALPART_EXPR
5638 		   or IMAGPART_EXPR immediate uses, but none of them have
5639 		   a change in their value ranges, return
5640 		   SSA_PROP_NOT_INTERESTING.  If there are no
5641 		   {REAL,IMAG}PART_EXPR uses at all,
5642 		   return SSA_PROP_VARYING.  */
5643 		value_range new_vr = VR_INITIALIZER;
5644 		extract_range_basic (&new_vr, use_stmt);
5645 		value_range *old_vr = get_value_range (use_lhs);
5646 		if (old_vr->type != new_vr.type
5647 		    || !vrp_operand_equal_p (old_vr->min, new_vr.min)
5648 		    || !vrp_operand_equal_p (old_vr->max, new_vr.max)
5649 		    || !vrp_bitmap_equal_p (old_vr->equiv, new_vr.equiv))
5650 		  res = SSA_PROP_INTERESTING;
5651 		else
5652 		  res = SSA_PROP_NOT_INTERESTING;
5653 		BITMAP_FREE (new_vr.equiv);
5654 		if (res == SSA_PROP_INTERESTING)
5655 		  {
5656 		    *output_p = lhs;
5657 		    return res;
5658 		  }
5659 	      }
5660 
5661 	    return res;
5662 	  }
5663 	break;
5664       default:
5665 	break;
5666       }
5667 
5668   /* All other statements produce nothing of interest for VRP, so mark
5669      their outputs varying and prevent further simulation.  */
5670   set_defs_to_varying (stmt);
5671 
5672   return (*taken_edge_p) ? SSA_PROP_INTERESTING : SSA_PROP_VARYING;
5673 }
5674 
5675 /* Union the two value-ranges { *VR0TYPE, *VR0MIN, *VR0MAX } and
5676    { VR1TYPE, VR0MIN, VR0MAX } and store the result
5677    in { *VR0TYPE, *VR0MIN, *VR0MAX }.  This may not be the smallest
5678    possible such range.  The resulting range is not canonicalized.  */
5679 
5680 static void
union_ranges(enum value_range_type * vr0type,tree * vr0min,tree * vr0max,enum value_range_type vr1type,tree vr1min,tree vr1max)5681 union_ranges (enum value_range_type *vr0type,
5682 	      tree *vr0min, tree *vr0max,
5683 	      enum value_range_type vr1type,
5684 	      tree vr1min, tree vr1max)
5685 {
5686   bool mineq = vrp_operand_equal_p (*vr0min, vr1min);
5687   bool maxeq = vrp_operand_equal_p (*vr0max, vr1max);
5688 
5689   /* [] is vr0, () is vr1 in the following classification comments.  */
5690   if (mineq && maxeq)
5691     {
5692       /* [(  )] */
5693       if (*vr0type == vr1type)
5694 	/* Nothing to do for equal ranges.  */
5695 	;
5696       else if ((*vr0type == VR_RANGE
5697 		&& vr1type == VR_ANTI_RANGE)
5698 	       || (*vr0type == VR_ANTI_RANGE
5699 		   && vr1type == VR_RANGE))
5700 	{
5701 	  /* For anti-range with range union the result is varying.  */
5702 	  goto give_up;
5703 	}
5704       else
5705 	gcc_unreachable ();
5706     }
5707   else if (operand_less_p (*vr0max, vr1min) == 1
5708 	   || operand_less_p (vr1max, *vr0min) == 1)
5709     {
5710       /* [ ] ( ) or ( ) [ ]
5711 	 If the ranges have an empty intersection, result of the union
5712 	 operation is the anti-range or if both are anti-ranges
5713 	 it covers all.  */
5714       if (*vr0type == VR_ANTI_RANGE
5715 	  && vr1type == VR_ANTI_RANGE)
5716 	goto give_up;
5717       else if (*vr0type == VR_ANTI_RANGE
5718 	       && vr1type == VR_RANGE)
5719 	;
5720       else if (*vr0type == VR_RANGE
5721 	       && vr1type == VR_ANTI_RANGE)
5722 	{
5723 	  *vr0type = vr1type;
5724 	  *vr0min = vr1min;
5725 	  *vr0max = vr1max;
5726 	}
5727       else if (*vr0type == VR_RANGE
5728 	       && vr1type == VR_RANGE)
5729 	{
5730 	  /* The result is the convex hull of both ranges.  */
5731 	  if (operand_less_p (*vr0max, vr1min) == 1)
5732 	    {
5733 	      /* If the result can be an anti-range, create one.  */
5734 	      if (TREE_CODE (*vr0max) == INTEGER_CST
5735 		  && TREE_CODE (vr1min) == INTEGER_CST
5736 		  && vrp_val_is_min (*vr0min)
5737 		  && vrp_val_is_max (vr1max))
5738 		{
5739 		  tree min = int_const_binop (PLUS_EXPR,
5740 					      *vr0max,
5741 					      build_int_cst (TREE_TYPE (*vr0max), 1));
5742 		  tree max = int_const_binop (MINUS_EXPR,
5743 					      vr1min,
5744 					      build_int_cst (TREE_TYPE (vr1min), 1));
5745 		  if (!operand_less_p (max, min))
5746 		    {
5747 		      *vr0type = VR_ANTI_RANGE;
5748 		      *vr0min = min;
5749 		      *vr0max = max;
5750 		    }
5751 		  else
5752 		    *vr0max = vr1max;
5753 		}
5754 	      else
5755 		*vr0max = vr1max;
5756 	    }
5757 	  else
5758 	    {
5759 	      /* If the result can be an anti-range, create one.  */
5760 	      if (TREE_CODE (vr1max) == INTEGER_CST
5761 		  && TREE_CODE (*vr0min) == INTEGER_CST
5762 		  && vrp_val_is_min (vr1min)
5763 		  && vrp_val_is_max (*vr0max))
5764 		{
5765 		  tree min = int_const_binop (PLUS_EXPR,
5766 					      vr1max,
5767 					      build_int_cst (TREE_TYPE (vr1max), 1));
5768 		  tree max = int_const_binop (MINUS_EXPR,
5769 					      *vr0min,
5770 					      build_int_cst (TREE_TYPE (*vr0min), 1));
5771 		  if (!operand_less_p (max, min))
5772 		    {
5773 		      *vr0type = VR_ANTI_RANGE;
5774 		      *vr0min = min;
5775 		      *vr0max = max;
5776 		    }
5777 		  else
5778 		    *vr0min = vr1min;
5779 		}
5780 	      else
5781 		*vr0min = vr1min;
5782 	    }
5783 	}
5784       else
5785 	gcc_unreachable ();
5786     }
5787   else if ((maxeq || operand_less_p (vr1max, *vr0max) == 1)
5788 	   && (mineq || operand_less_p (*vr0min, vr1min) == 1))
5789     {
5790       /* [ (  ) ] or [(  ) ] or [ (  )] */
5791       if (*vr0type == VR_RANGE
5792 	  && vr1type == VR_RANGE)
5793 	;
5794       else if (*vr0type == VR_ANTI_RANGE
5795 	       && vr1type == VR_ANTI_RANGE)
5796 	{
5797 	  *vr0type = vr1type;
5798 	  *vr0min = vr1min;
5799 	  *vr0max = vr1max;
5800 	}
5801       else if (*vr0type == VR_ANTI_RANGE
5802 	       && vr1type == VR_RANGE)
5803 	{
5804 	  /* Arbitrarily choose the right or left gap.  */
5805 	  if (!mineq && TREE_CODE (vr1min) == INTEGER_CST)
5806 	    *vr0max = int_const_binop (MINUS_EXPR, vr1min,
5807 				       build_int_cst (TREE_TYPE (vr1min), 1));
5808 	  else if (!maxeq && TREE_CODE (vr1max) == INTEGER_CST)
5809 	    *vr0min = int_const_binop (PLUS_EXPR, vr1max,
5810 				       build_int_cst (TREE_TYPE (vr1max), 1));
5811 	  else
5812 	    goto give_up;
5813 	}
5814       else if (*vr0type == VR_RANGE
5815 	       && vr1type == VR_ANTI_RANGE)
5816 	/* The result covers everything.  */
5817 	goto give_up;
5818       else
5819 	gcc_unreachable ();
5820     }
5821   else if ((maxeq || operand_less_p (*vr0max, vr1max) == 1)
5822 	   && (mineq || operand_less_p (vr1min, *vr0min) == 1))
5823     {
5824       /* ( [  ] ) or ([  ] ) or ( [  ]) */
5825       if (*vr0type == VR_RANGE
5826 	  && vr1type == VR_RANGE)
5827 	{
5828 	  *vr0type = vr1type;
5829 	  *vr0min = vr1min;
5830 	  *vr0max = vr1max;
5831 	}
5832       else if (*vr0type == VR_ANTI_RANGE
5833 	       && vr1type == VR_ANTI_RANGE)
5834 	;
5835       else if (*vr0type == VR_RANGE
5836 	       && vr1type == VR_ANTI_RANGE)
5837 	{
5838 	  *vr0type = VR_ANTI_RANGE;
5839 	  if (!mineq && TREE_CODE (*vr0min) == INTEGER_CST)
5840 	    {
5841 	      *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
5842 					 build_int_cst (TREE_TYPE (*vr0min), 1));
5843 	      *vr0min = vr1min;
5844 	    }
5845 	  else if (!maxeq && TREE_CODE (*vr0max) == INTEGER_CST)
5846 	    {
5847 	      *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
5848 					 build_int_cst (TREE_TYPE (*vr0max), 1));
5849 	      *vr0max = vr1max;
5850 	    }
5851 	  else
5852 	    goto give_up;
5853 	}
5854       else if (*vr0type == VR_ANTI_RANGE
5855 	       && vr1type == VR_RANGE)
5856 	/* The result covers everything.  */
5857 	goto give_up;
5858       else
5859 	gcc_unreachable ();
5860     }
5861   else if ((operand_less_p (vr1min, *vr0max) == 1
5862 	    || operand_equal_p (vr1min, *vr0max, 0))
5863 	   && operand_less_p (*vr0min, vr1min) == 1
5864 	   && operand_less_p (*vr0max, vr1max) == 1)
5865     {
5866       /* [  (  ]  ) or [   ](   ) */
5867       if (*vr0type == VR_RANGE
5868 	  && vr1type == VR_RANGE)
5869 	*vr0max = vr1max;
5870       else if (*vr0type == VR_ANTI_RANGE
5871 	       && vr1type == VR_ANTI_RANGE)
5872 	*vr0min = vr1min;
5873       else if (*vr0type == VR_ANTI_RANGE
5874 	       && vr1type == VR_RANGE)
5875 	{
5876 	  if (TREE_CODE (vr1min) == INTEGER_CST)
5877 	    *vr0max = int_const_binop (MINUS_EXPR, vr1min,
5878 				       build_int_cst (TREE_TYPE (vr1min), 1));
5879 	  else
5880 	    goto give_up;
5881 	}
5882       else if (*vr0type == VR_RANGE
5883 	       && vr1type == VR_ANTI_RANGE)
5884 	{
5885 	  if (TREE_CODE (*vr0max) == INTEGER_CST)
5886 	    {
5887 	      *vr0type = vr1type;
5888 	      *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
5889 					 build_int_cst (TREE_TYPE (*vr0max), 1));
5890 	      *vr0max = vr1max;
5891 	    }
5892 	  else
5893 	    goto give_up;
5894 	}
5895       else
5896 	gcc_unreachable ();
5897     }
5898   else if ((operand_less_p (*vr0min, vr1max) == 1
5899 	    || operand_equal_p (*vr0min, vr1max, 0))
5900 	   && operand_less_p (vr1min, *vr0min) == 1
5901 	   && operand_less_p (vr1max, *vr0max) == 1)
5902     {
5903       /* (  [  )  ] or (   )[   ] */
5904       if (*vr0type == VR_RANGE
5905 	  && vr1type == VR_RANGE)
5906 	*vr0min = vr1min;
5907       else if (*vr0type == VR_ANTI_RANGE
5908 	       && vr1type == VR_ANTI_RANGE)
5909 	*vr0max = vr1max;
5910       else if (*vr0type == VR_ANTI_RANGE
5911 	       && vr1type == VR_RANGE)
5912 	{
5913 	  if (TREE_CODE (vr1max) == INTEGER_CST)
5914 	    *vr0min = int_const_binop (PLUS_EXPR, vr1max,
5915 				       build_int_cst (TREE_TYPE (vr1max), 1));
5916 	  else
5917 	    goto give_up;
5918 	}
5919       else if (*vr0type == VR_RANGE
5920 	       && vr1type == VR_ANTI_RANGE)
5921 	{
5922 	  if (TREE_CODE (*vr0min) == INTEGER_CST)
5923 	    {
5924 	      *vr0type = vr1type;
5925 	      *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
5926 					 build_int_cst (TREE_TYPE (*vr0min), 1));
5927 	      *vr0min = vr1min;
5928 	    }
5929 	  else
5930 	    goto give_up;
5931 	}
5932       else
5933 	gcc_unreachable ();
5934     }
5935   else
5936     goto give_up;
5937 
5938   return;
5939 
5940 give_up:
5941   *vr0type = VR_VARYING;
5942   *vr0min = NULL_TREE;
5943   *vr0max = NULL_TREE;
5944 }
5945 
5946 /* Intersect the two value-ranges { *VR0TYPE, *VR0MIN, *VR0MAX } and
5947    { VR1TYPE, VR0MIN, VR0MAX } and store the result
5948    in { *VR0TYPE, *VR0MIN, *VR0MAX }.  This may not be the smallest
5949    possible such range.  The resulting range is not canonicalized.  */
5950 
5951 static void
intersect_ranges(enum value_range_type * vr0type,tree * vr0min,tree * vr0max,enum value_range_type vr1type,tree vr1min,tree vr1max)5952 intersect_ranges (enum value_range_type *vr0type,
5953 		  tree *vr0min, tree *vr0max,
5954 		  enum value_range_type vr1type,
5955 		  tree vr1min, tree vr1max)
5956 {
5957   bool mineq = vrp_operand_equal_p (*vr0min, vr1min);
5958   bool maxeq = vrp_operand_equal_p (*vr0max, vr1max);
5959 
5960   /* [] is vr0, () is vr1 in the following classification comments.  */
5961   if (mineq && maxeq)
5962     {
5963       /* [(  )] */
5964       if (*vr0type == vr1type)
5965 	/* Nothing to do for equal ranges.  */
5966 	;
5967       else if ((*vr0type == VR_RANGE
5968 		&& vr1type == VR_ANTI_RANGE)
5969 	       || (*vr0type == VR_ANTI_RANGE
5970 		   && vr1type == VR_RANGE))
5971 	{
5972 	  /* For anti-range with range intersection the result is empty.  */
5973 	  *vr0type = VR_UNDEFINED;
5974 	  *vr0min = NULL_TREE;
5975 	  *vr0max = NULL_TREE;
5976 	}
5977       else
5978 	gcc_unreachable ();
5979     }
5980   else if (operand_less_p (*vr0max, vr1min) == 1
5981 	   || operand_less_p (vr1max, *vr0min) == 1)
5982     {
5983       /* [ ] ( ) or ( ) [ ]
5984 	 If the ranges have an empty intersection, the result of the
5985 	 intersect operation is the range for intersecting an
5986 	 anti-range with a range or empty when intersecting two ranges.  */
5987       if (*vr0type == VR_RANGE
5988 	  && vr1type == VR_ANTI_RANGE)
5989 	;
5990       else if (*vr0type == VR_ANTI_RANGE
5991 	       && vr1type == VR_RANGE)
5992 	{
5993 	  *vr0type = vr1type;
5994 	  *vr0min = vr1min;
5995 	  *vr0max = vr1max;
5996 	}
5997       else if (*vr0type == VR_RANGE
5998 	       && vr1type == VR_RANGE)
5999 	{
6000 	  *vr0type = VR_UNDEFINED;
6001 	  *vr0min = NULL_TREE;
6002 	  *vr0max = NULL_TREE;
6003 	}
6004       else if (*vr0type == VR_ANTI_RANGE
6005 	       && vr1type == VR_ANTI_RANGE)
6006 	{
6007 	  /* If the anti-ranges are adjacent to each other merge them.  */
6008 	  if (TREE_CODE (*vr0max) == INTEGER_CST
6009 	      && TREE_CODE (vr1min) == INTEGER_CST
6010 	      && operand_less_p (*vr0max, vr1min) == 1
6011 	      && integer_onep (int_const_binop (MINUS_EXPR,
6012 						vr1min, *vr0max)))
6013 	    *vr0max = vr1max;
6014 	  else if (TREE_CODE (vr1max) == INTEGER_CST
6015 		   && TREE_CODE (*vr0min) == INTEGER_CST
6016 		   && operand_less_p (vr1max, *vr0min) == 1
6017 		   && integer_onep (int_const_binop (MINUS_EXPR,
6018 						     *vr0min, vr1max)))
6019 	    *vr0min = vr1min;
6020 	  /* Else arbitrarily take VR0.  */
6021 	}
6022     }
6023   else if ((maxeq || operand_less_p (vr1max, *vr0max) == 1)
6024 	   && (mineq || operand_less_p (*vr0min, vr1min) == 1))
6025     {
6026       /* [ (  ) ] or [(  ) ] or [ (  )] */
6027       if (*vr0type == VR_RANGE
6028 	  && vr1type == VR_RANGE)
6029 	{
6030 	  /* If both are ranges the result is the inner one.  */
6031 	  *vr0type = vr1type;
6032 	  *vr0min = vr1min;
6033 	  *vr0max = vr1max;
6034 	}
6035       else if (*vr0type == VR_RANGE
6036 	       && vr1type == VR_ANTI_RANGE)
6037 	{
6038 	  /* Choose the right gap if the left one is empty.  */
6039 	  if (mineq)
6040 	    {
6041 	      if (TREE_CODE (vr1max) != INTEGER_CST)
6042 		*vr0min = vr1max;
6043 	      else if (TYPE_PRECISION (TREE_TYPE (vr1max)) == 1
6044 		       && !TYPE_UNSIGNED (TREE_TYPE (vr1max)))
6045 		*vr0min
6046 		  = int_const_binop (MINUS_EXPR, vr1max,
6047 				     build_int_cst (TREE_TYPE (vr1max), -1));
6048 	      else
6049 		*vr0min
6050 		  = int_const_binop (PLUS_EXPR, vr1max,
6051 				     build_int_cst (TREE_TYPE (vr1max), 1));
6052 	    }
6053 	  /* Choose the left gap if the right one is empty.  */
6054 	  else if (maxeq)
6055 	    {
6056 	      if (TREE_CODE (vr1min) != INTEGER_CST)
6057 		*vr0max = vr1min;
6058 	      else if (TYPE_PRECISION (TREE_TYPE (vr1min)) == 1
6059 		       && !TYPE_UNSIGNED (TREE_TYPE (vr1min)))
6060 		*vr0max
6061 		  = int_const_binop (PLUS_EXPR, vr1min,
6062 				     build_int_cst (TREE_TYPE (vr1min), -1));
6063 	      else
6064 		*vr0max
6065 		  = int_const_binop (MINUS_EXPR, vr1min,
6066 				     build_int_cst (TREE_TYPE (vr1min), 1));
6067 	    }
6068 	  /* Choose the anti-range if the range is effectively varying.  */
6069 	  else if (vrp_val_is_min (*vr0min)
6070 		   && vrp_val_is_max (*vr0max))
6071 	    {
6072 	      *vr0type = vr1type;
6073 	      *vr0min = vr1min;
6074 	      *vr0max = vr1max;
6075 	    }
6076 	  /* Else choose the range.  */
6077 	}
6078       else if (*vr0type == VR_ANTI_RANGE
6079 	       && vr1type == VR_ANTI_RANGE)
6080 	/* If both are anti-ranges the result is the outer one.  */
6081 	;
6082       else if (*vr0type == VR_ANTI_RANGE
6083 	       && vr1type == VR_RANGE)
6084 	{
6085 	  /* The intersection is empty.  */
6086 	  *vr0type = VR_UNDEFINED;
6087 	  *vr0min = NULL_TREE;
6088 	  *vr0max = NULL_TREE;
6089 	}
6090       else
6091 	gcc_unreachable ();
6092     }
6093   else if ((maxeq || operand_less_p (*vr0max, vr1max) == 1)
6094 	   && (mineq || operand_less_p (vr1min, *vr0min) == 1))
6095     {
6096       /* ( [  ] ) or ([  ] ) or ( [  ]) */
6097       if (*vr0type == VR_RANGE
6098 	  && vr1type == VR_RANGE)
6099 	/* Choose the inner range.  */
6100 	;
6101       else if (*vr0type == VR_ANTI_RANGE
6102 	       && vr1type == VR_RANGE)
6103 	{
6104 	  /* Choose the right gap if the left is empty.  */
6105 	  if (mineq)
6106 	    {
6107 	      *vr0type = VR_RANGE;
6108 	      if (TREE_CODE (*vr0max) != INTEGER_CST)
6109 		*vr0min = *vr0max;
6110 	      else if (TYPE_PRECISION (TREE_TYPE (*vr0max)) == 1
6111 		       && !TYPE_UNSIGNED (TREE_TYPE (*vr0max)))
6112 		*vr0min
6113 		  = int_const_binop (MINUS_EXPR, *vr0max,
6114 				     build_int_cst (TREE_TYPE (*vr0max), -1));
6115 	      else
6116 		*vr0min
6117 		  = int_const_binop (PLUS_EXPR, *vr0max,
6118 				     build_int_cst (TREE_TYPE (*vr0max), 1));
6119 	      *vr0max = vr1max;
6120 	    }
6121 	  /* Choose the left gap if the right is empty.  */
6122 	  else if (maxeq)
6123 	    {
6124 	      *vr0type = VR_RANGE;
6125 	      if (TREE_CODE (*vr0min) != INTEGER_CST)
6126 		*vr0max = *vr0min;
6127 	      else if (TYPE_PRECISION (TREE_TYPE (*vr0min)) == 1
6128 		       && !TYPE_UNSIGNED (TREE_TYPE (*vr0min)))
6129 		*vr0max
6130 		  = int_const_binop (PLUS_EXPR, *vr0min,
6131 				     build_int_cst (TREE_TYPE (*vr0min), -1));
6132 	      else
6133 		*vr0max
6134 		  = int_const_binop (MINUS_EXPR, *vr0min,
6135 				     build_int_cst (TREE_TYPE (*vr0min), 1));
6136 	      *vr0min = vr1min;
6137 	    }
6138 	  /* Choose the anti-range if the range is effectively varying.  */
6139 	  else if (vrp_val_is_min (vr1min)
6140 		   && vrp_val_is_max (vr1max))
6141 	    ;
6142 	  /* Choose the anti-range if it is ~[0,0], that range is special
6143 	     enough to special case when vr1's range is relatively wide.
6144 	     At least for types bigger than int - this covers pointers
6145 	     and arguments to functions like ctz.  */
6146 	  else if (*vr0min == *vr0max
6147 		   && integer_zerop (*vr0min)
6148 		   && ((TYPE_PRECISION (TREE_TYPE (*vr0min))
6149 			>= TYPE_PRECISION (integer_type_node))
6150 		       || POINTER_TYPE_P (TREE_TYPE (*vr0min)))
6151 		   && TREE_CODE (vr1max) == INTEGER_CST
6152 		   && TREE_CODE (vr1min) == INTEGER_CST
6153 		   && (wi::clz (wi::to_wide (vr1max) - wi::to_wide (vr1min))
6154 		       < TYPE_PRECISION (TREE_TYPE (*vr0min)) / 2))
6155 	    ;
6156 	  /* Else choose the range.  */
6157 	  else
6158 	    {
6159 	      *vr0type = vr1type;
6160 	      *vr0min = vr1min;
6161 	      *vr0max = vr1max;
6162 	    }
6163 	}
6164       else if (*vr0type == VR_ANTI_RANGE
6165 	       && vr1type == VR_ANTI_RANGE)
6166 	{
6167 	  /* If both are anti-ranges the result is the outer one.  */
6168 	  *vr0type = vr1type;
6169 	  *vr0min = vr1min;
6170 	  *vr0max = vr1max;
6171 	}
6172       else if (vr1type == VR_ANTI_RANGE
6173 	       && *vr0type == VR_RANGE)
6174 	{
6175 	  /* The intersection is empty.  */
6176 	  *vr0type = VR_UNDEFINED;
6177 	  *vr0min = NULL_TREE;
6178 	  *vr0max = NULL_TREE;
6179 	}
6180       else
6181 	gcc_unreachable ();
6182     }
6183   else if ((operand_less_p (vr1min, *vr0max) == 1
6184 	    || operand_equal_p (vr1min, *vr0max, 0))
6185 	   && operand_less_p (*vr0min, vr1min) == 1)
6186     {
6187       /* [  (  ]  ) or [  ](  ) */
6188       if (*vr0type == VR_ANTI_RANGE
6189 	  && vr1type == VR_ANTI_RANGE)
6190 	*vr0max = vr1max;
6191       else if (*vr0type == VR_RANGE
6192 	       && vr1type == VR_RANGE)
6193 	*vr0min = vr1min;
6194       else if (*vr0type == VR_RANGE
6195 	       && vr1type == VR_ANTI_RANGE)
6196 	{
6197 	  if (TREE_CODE (vr1min) == INTEGER_CST)
6198 	    *vr0max = int_const_binop (MINUS_EXPR, vr1min,
6199 				       build_int_cst (TREE_TYPE (vr1min), 1));
6200 	  else
6201 	    *vr0max = vr1min;
6202 	}
6203       else if (*vr0type == VR_ANTI_RANGE
6204 	       && vr1type == VR_RANGE)
6205 	{
6206 	  *vr0type = VR_RANGE;
6207 	  if (TREE_CODE (*vr0max) == INTEGER_CST)
6208 	    *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
6209 				       build_int_cst (TREE_TYPE (*vr0max), 1));
6210 	  else
6211 	    *vr0min = *vr0max;
6212 	  *vr0max = vr1max;
6213 	}
6214       else
6215 	gcc_unreachable ();
6216     }
6217   else if ((operand_less_p (*vr0min, vr1max) == 1
6218 	    || operand_equal_p (*vr0min, vr1max, 0))
6219 	   && operand_less_p (vr1min, *vr0min) == 1)
6220     {
6221       /* (  [  )  ] or (  )[  ] */
6222       if (*vr0type == VR_ANTI_RANGE
6223 	  && vr1type == VR_ANTI_RANGE)
6224 	*vr0min = vr1min;
6225       else if (*vr0type == VR_RANGE
6226 	       && vr1type == VR_RANGE)
6227 	*vr0max = vr1max;
6228       else if (*vr0type == VR_RANGE
6229 	       && vr1type == VR_ANTI_RANGE)
6230 	{
6231 	  if (TREE_CODE (vr1max) == INTEGER_CST)
6232 	    *vr0min = int_const_binop (PLUS_EXPR, vr1max,
6233 				       build_int_cst (TREE_TYPE (vr1max), 1));
6234 	  else
6235 	    *vr0min = vr1max;
6236 	}
6237       else if (*vr0type == VR_ANTI_RANGE
6238 	       && vr1type == VR_RANGE)
6239 	{
6240 	  *vr0type = VR_RANGE;
6241 	  if (TREE_CODE (*vr0min) == INTEGER_CST)
6242 	    *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
6243 				       build_int_cst (TREE_TYPE (*vr0min), 1));
6244 	  else
6245 	    *vr0max = *vr0min;
6246 	  *vr0min = vr1min;
6247 	}
6248       else
6249 	gcc_unreachable ();
6250     }
6251 
6252   /* As a fallback simply use { *VRTYPE, *VR0MIN, *VR0MAX } as
6253      result for the intersection.  That's always a conservative
6254      correct estimate unless VR1 is a constant singleton range
6255      in which case we choose that.  */
6256   if (vr1type == VR_RANGE
6257       && is_gimple_min_invariant (vr1min)
6258       && vrp_operand_equal_p (vr1min, vr1max))
6259     {
6260       *vr0type = vr1type;
6261       *vr0min = vr1min;
6262       *vr0max = vr1max;
6263     }
6264 
6265   return;
6266 }
6267 
6268 
6269 /* Intersect the two value-ranges *VR0 and *VR1 and store the result
6270    in *VR0.  This may not be the smallest possible such range.  */
6271 
6272 static void
vrp_intersect_ranges_1(value_range * vr0,value_range * vr1)6273 vrp_intersect_ranges_1 (value_range *vr0, value_range *vr1)
6274 {
6275   value_range saved;
6276 
6277   /* If either range is VR_VARYING the other one wins.  */
6278   if (vr1->type == VR_VARYING)
6279     return;
6280   if (vr0->type == VR_VARYING)
6281     {
6282       copy_value_range (vr0, vr1);
6283       return;
6284     }
6285 
6286   /* When either range is VR_UNDEFINED the resulting range is
6287      VR_UNDEFINED, too.  */
6288   if (vr0->type == VR_UNDEFINED)
6289     return;
6290   if (vr1->type == VR_UNDEFINED)
6291     {
6292       set_value_range_to_undefined (vr0);
6293       return;
6294     }
6295 
6296   /* Save the original vr0 so we can return it as conservative intersection
6297      result when our worker turns things to varying.  */
6298   saved = *vr0;
6299   intersect_ranges (&vr0->type, &vr0->min, &vr0->max,
6300 		    vr1->type, vr1->min, vr1->max);
6301   /* Make sure to canonicalize the result though as the inversion of a
6302      VR_RANGE can still be a VR_RANGE.  */
6303   set_and_canonicalize_value_range (vr0, vr0->type,
6304 				    vr0->min, vr0->max, vr0->equiv);
6305   /* If that failed, use the saved original VR0.  */
6306   if (vr0->type == VR_VARYING)
6307     {
6308       *vr0 = saved;
6309       return;
6310     }
6311   /* If the result is VR_UNDEFINED there is no need to mess with
6312      the equivalencies.  */
6313   if (vr0->type == VR_UNDEFINED)
6314     return;
6315 
6316   /* The resulting set of equivalences for range intersection is the union of
6317      the two sets.  */
6318   if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
6319     bitmap_ior_into (vr0->equiv, vr1->equiv);
6320   else if (vr1->equiv && !vr0->equiv)
6321     {
6322       /* All equivalence bitmaps are allocated from the same obstack.  So
6323 	 we can use the obstack associated with VR to allocate vr0->equiv.  */
6324       vr0->equiv = BITMAP_ALLOC (vr1->equiv->obstack);
6325       bitmap_copy (vr0->equiv, vr1->equiv);
6326     }
6327 }
6328 
6329 void
vrp_intersect_ranges(value_range * vr0,value_range * vr1)6330 vrp_intersect_ranges (value_range *vr0, value_range *vr1)
6331 {
6332   if (dump_file && (dump_flags & TDF_DETAILS))
6333     {
6334       fprintf (dump_file, "Intersecting\n  ");
6335       dump_value_range (dump_file, vr0);
6336       fprintf (dump_file, "\nand\n  ");
6337       dump_value_range (dump_file, vr1);
6338       fprintf (dump_file, "\n");
6339     }
6340   vrp_intersect_ranges_1 (vr0, vr1);
6341   if (dump_file && (dump_flags & TDF_DETAILS))
6342     {
6343       fprintf (dump_file, "to\n  ");
6344       dump_value_range (dump_file, vr0);
6345       fprintf (dump_file, "\n");
6346     }
6347 }
6348 
6349 /* Meet operation for value ranges.  Given two value ranges VR0 and
6350    VR1, store in VR0 a range that contains both VR0 and VR1.  This
6351    may not be the smallest possible such range.  */
6352 
6353 static void
vrp_meet_1(value_range * vr0,const value_range * vr1)6354 vrp_meet_1 (value_range *vr0, const value_range *vr1)
6355 {
6356   value_range saved;
6357 
6358   if (vr0->type == VR_UNDEFINED)
6359     {
6360       set_value_range (vr0, vr1->type, vr1->min, vr1->max, vr1->equiv);
6361       return;
6362     }
6363 
6364   if (vr1->type == VR_UNDEFINED)
6365     {
6366       /* VR0 already has the resulting range.  */
6367       return;
6368     }
6369 
6370   if (vr0->type == VR_VARYING)
6371     {
6372       /* Nothing to do.  VR0 already has the resulting range.  */
6373       return;
6374     }
6375 
6376   if (vr1->type == VR_VARYING)
6377     {
6378       set_value_range_to_varying (vr0);
6379       return;
6380     }
6381 
6382   saved = *vr0;
6383   union_ranges (&vr0->type, &vr0->min, &vr0->max,
6384 		vr1->type, vr1->min, vr1->max);
6385   if (vr0->type == VR_VARYING)
6386     {
6387       /* Failed to find an efficient meet.  Before giving up and setting
6388 	 the result to VARYING, see if we can at least derive a useful
6389 	 anti-range.  FIXME, all this nonsense about distinguishing
6390 	 anti-ranges from ranges is necessary because of the odd
6391 	 semantics of range_includes_zero_p and friends.  */
6392       if (((saved.type == VR_RANGE
6393 	    && range_includes_zero_p (saved.min, saved.max) == 0)
6394 	   || (saved.type == VR_ANTI_RANGE
6395 	       && range_includes_zero_p (saved.min, saved.max) == 1))
6396 	  && ((vr1->type == VR_RANGE
6397 	       && range_includes_zero_p (vr1->min, vr1->max) == 0)
6398 	      || (vr1->type == VR_ANTI_RANGE
6399 		  && range_includes_zero_p (vr1->min, vr1->max) == 1)))
6400 	{
6401 	  set_value_range_to_nonnull (vr0, TREE_TYPE (saved.min));
6402 
6403 	  /* Since this meet operation did not result from the meeting of
6404 	     two equivalent names, VR0 cannot have any equivalences.  */
6405 	  if (vr0->equiv)
6406 	    bitmap_clear (vr0->equiv);
6407 	  return;
6408 	}
6409 
6410       set_value_range_to_varying (vr0);
6411       return;
6412     }
6413   set_and_canonicalize_value_range (vr0, vr0->type, vr0->min, vr0->max,
6414 				    vr0->equiv);
6415   if (vr0->type == VR_VARYING)
6416     return;
6417 
6418   /* The resulting set of equivalences is always the intersection of
6419      the two sets.  */
6420   if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
6421     bitmap_and_into (vr0->equiv, vr1->equiv);
6422   else if (vr0->equiv && !vr1->equiv)
6423     bitmap_clear (vr0->equiv);
6424 }
6425 
6426 void
vrp_meet(value_range * vr0,const value_range * vr1)6427 vrp_meet (value_range *vr0, const value_range *vr1)
6428 {
6429   if (dump_file && (dump_flags & TDF_DETAILS))
6430     {
6431       fprintf (dump_file, "Meeting\n  ");
6432       dump_value_range (dump_file, vr0);
6433       fprintf (dump_file, "\nand\n  ");
6434       dump_value_range (dump_file, vr1);
6435       fprintf (dump_file, "\n");
6436     }
6437   vrp_meet_1 (vr0, vr1);
6438   if (dump_file && (dump_flags & TDF_DETAILS))
6439     {
6440       fprintf (dump_file, "to\n  ");
6441       dump_value_range (dump_file, vr0);
6442       fprintf (dump_file, "\n");
6443     }
6444 }
6445 
6446 
6447 /* Visit all arguments for PHI node PHI that flow through executable
6448    edges.  If a valid value range can be derived from all the incoming
6449    value ranges, set a new range for the LHS of PHI.  */
6450 
6451 enum ssa_prop_result
visit_phi(gphi * phi)6452 vrp_prop::visit_phi (gphi *phi)
6453 {
6454   tree lhs = PHI_RESULT (phi);
6455   value_range vr_result = VR_INITIALIZER;
6456   extract_range_from_phi_node (phi, &vr_result);
6457   if (update_value_range (lhs, &vr_result))
6458     {
6459       if (dump_file && (dump_flags & TDF_DETAILS))
6460 	{
6461 	  fprintf (dump_file, "Found new range for ");
6462 	  print_generic_expr (dump_file, lhs);
6463 	  fprintf (dump_file, ": ");
6464 	  dump_value_range (dump_file, &vr_result);
6465 	  fprintf (dump_file, "\n");
6466 	}
6467 
6468       if (vr_result.type == VR_VARYING)
6469 	return SSA_PROP_VARYING;
6470 
6471       return SSA_PROP_INTERESTING;
6472     }
6473 
6474   /* Nothing changed, don't add outgoing edges.  */
6475   return SSA_PROP_NOT_INTERESTING;
6476 }
6477 
6478 class vrp_folder : public substitute_and_fold_engine
6479 {
6480  public:
6481   tree get_value (tree) FINAL OVERRIDE;
6482   bool fold_stmt (gimple_stmt_iterator *) FINAL OVERRIDE;
6483   bool fold_predicate_in (gimple_stmt_iterator *);
6484 
6485   class vr_values *vr_values;
6486 
6487   /* Delegators.  */
vrp_evaluate_conditional(tree_code code,tree op0,tree op1,gimple * stmt)6488   tree vrp_evaluate_conditional (tree_code code, tree op0,
6489 				 tree op1, gimple *stmt)
6490     { return vr_values->vrp_evaluate_conditional (code, op0, op1, stmt); }
simplify_stmt_using_ranges(gimple_stmt_iterator * gsi)6491   bool simplify_stmt_using_ranges (gimple_stmt_iterator *gsi)
6492     { return vr_values->simplify_stmt_using_ranges (gsi); }
op_with_constant_singleton_value_range(tree op)6493  tree op_with_constant_singleton_value_range (tree op)
6494     { return vr_values->op_with_constant_singleton_value_range (op); }
6495 };
6496 
6497 /* If the statement pointed by SI has a predicate whose value can be
6498    computed using the value range information computed by VRP, compute
6499    its value and return true.  Otherwise, return false.  */
6500 
6501 bool
fold_predicate_in(gimple_stmt_iterator * si)6502 vrp_folder::fold_predicate_in (gimple_stmt_iterator *si)
6503 {
6504   bool assignment_p = false;
6505   tree val;
6506   gimple *stmt = gsi_stmt (*si);
6507 
6508   if (is_gimple_assign (stmt)
6509       && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison)
6510     {
6511       assignment_p = true;
6512       val = vrp_evaluate_conditional (gimple_assign_rhs_code (stmt),
6513 				      gimple_assign_rhs1 (stmt),
6514 				      gimple_assign_rhs2 (stmt),
6515 				      stmt);
6516     }
6517   else if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
6518     val = vrp_evaluate_conditional (gimple_cond_code (cond_stmt),
6519 				    gimple_cond_lhs (cond_stmt),
6520 				    gimple_cond_rhs (cond_stmt),
6521 				    stmt);
6522   else
6523     return false;
6524 
6525   if (val)
6526     {
6527       if (assignment_p)
6528         val = fold_convert (gimple_expr_type (stmt), val);
6529 
6530       if (dump_file)
6531 	{
6532 	  fprintf (dump_file, "Folding predicate ");
6533 	  print_gimple_expr (dump_file, stmt, 0);
6534 	  fprintf (dump_file, " to ");
6535 	  print_generic_expr (dump_file, val);
6536 	  fprintf (dump_file, "\n");
6537 	}
6538 
6539       if (is_gimple_assign (stmt))
6540 	gimple_assign_set_rhs_from_tree (si, val);
6541       else
6542 	{
6543 	  gcc_assert (gimple_code (stmt) == GIMPLE_COND);
6544 	  gcond *cond_stmt = as_a <gcond *> (stmt);
6545 	  if (integer_zerop (val))
6546 	    gimple_cond_make_false (cond_stmt);
6547 	  else if (integer_onep (val))
6548 	    gimple_cond_make_true (cond_stmt);
6549 	  else
6550 	    gcc_unreachable ();
6551 	}
6552 
6553       return true;
6554     }
6555 
6556   return false;
6557 }
6558 
6559 /* Callback for substitute_and_fold folding the stmt at *SI.  */
6560 
6561 bool
fold_stmt(gimple_stmt_iterator * si)6562 vrp_folder::fold_stmt (gimple_stmt_iterator *si)
6563 {
6564   if (fold_predicate_in (si))
6565     return true;
6566 
6567   return simplify_stmt_using_ranges (si);
6568 }
6569 
6570 /* If OP has a value range with a single constant value return that,
6571    otherwise return NULL_TREE.  This returns OP itself if OP is a
6572    constant.
6573 
6574    Implemented as a pure wrapper right now, but this will change.  */
6575 
6576 tree
get_value(tree op)6577 vrp_folder::get_value (tree op)
6578 {
6579   return op_with_constant_singleton_value_range (op);
6580 }
6581 
6582 /* Return the LHS of any ASSERT_EXPR where OP appears as the first
6583    argument to the ASSERT_EXPR and in which the ASSERT_EXPR dominates
6584    BB.  If no such ASSERT_EXPR is found, return OP.  */
6585 
6586 static tree
lhs_of_dominating_assert(tree op,basic_block bb,gimple * stmt)6587 lhs_of_dominating_assert (tree op, basic_block bb, gimple *stmt)
6588 {
6589   imm_use_iterator imm_iter;
6590   gimple *use_stmt;
6591   use_operand_p use_p;
6592 
6593   if (TREE_CODE (op) == SSA_NAME)
6594     {
6595       FOR_EACH_IMM_USE_FAST (use_p, imm_iter, op)
6596 	{
6597 	  use_stmt = USE_STMT (use_p);
6598 	  if (use_stmt != stmt
6599 	      && gimple_assign_single_p (use_stmt)
6600 	      && TREE_CODE (gimple_assign_rhs1 (use_stmt)) == ASSERT_EXPR
6601 	      && TREE_OPERAND (gimple_assign_rhs1 (use_stmt), 0) == op
6602 	      && dominated_by_p (CDI_DOMINATORS, bb, gimple_bb (use_stmt)))
6603 	    return gimple_assign_lhs (use_stmt);
6604 	}
6605     }
6606   return op;
6607 }
6608 
6609 /* A hack.  */
6610 static class vr_values *x_vr_values;
6611 
6612 /* A trivial wrapper so that we can present the generic jump threading
6613    code with a simple API for simplifying statements.  STMT is the
6614    statement we want to simplify, WITHIN_STMT provides the location
6615    for any overflow warnings.  */
6616 
6617 static tree
simplify_stmt_for_jump_threading(gimple * stmt,gimple * within_stmt,class avail_exprs_stack * avail_exprs_stack ATTRIBUTE_UNUSED,basic_block bb)6618 simplify_stmt_for_jump_threading (gimple *stmt, gimple *within_stmt,
6619     class avail_exprs_stack *avail_exprs_stack ATTRIBUTE_UNUSED,
6620     basic_block bb)
6621 {
6622   /* First see if the conditional is in the hash table.  */
6623   tree cached_lhs = avail_exprs_stack->lookup_avail_expr (stmt, false, true);
6624   if (cached_lhs && is_gimple_min_invariant (cached_lhs))
6625     return cached_lhs;
6626 
6627   vr_values *vr_values = x_vr_values;
6628   if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
6629     {
6630       tree op0 = gimple_cond_lhs (cond_stmt);
6631       op0 = lhs_of_dominating_assert (op0, bb, stmt);
6632 
6633       tree op1 = gimple_cond_rhs (cond_stmt);
6634       op1 = lhs_of_dominating_assert (op1, bb, stmt);
6635 
6636       return vr_values->vrp_evaluate_conditional (gimple_cond_code (cond_stmt),
6637 						  op0, op1, within_stmt);
6638     }
6639 
6640   /* We simplify a switch statement by trying to determine which case label
6641      will be taken.  If we are successful then we return the corresponding
6642      CASE_LABEL_EXPR.  */
6643   if (gswitch *switch_stmt = dyn_cast <gswitch *> (stmt))
6644     {
6645       tree op = gimple_switch_index (switch_stmt);
6646       if (TREE_CODE (op) != SSA_NAME)
6647 	return NULL_TREE;
6648 
6649       op = lhs_of_dominating_assert (op, bb, stmt);
6650 
6651       value_range *vr = vr_values->get_value_range (op);
6652       if ((vr->type != VR_RANGE && vr->type != VR_ANTI_RANGE)
6653 	  || symbolic_range_p (vr))
6654 	return NULL_TREE;
6655 
6656       if (vr->type == VR_RANGE)
6657 	{
6658 	  size_t i, j;
6659 	  /* Get the range of labels that contain a part of the operand's
6660 	     value range.  */
6661 	  find_case_label_range (switch_stmt, vr->min, vr->max, &i, &j);
6662 
6663 	  /* Is there only one such label?  */
6664 	  if (i == j)
6665 	    {
6666 	      tree label = gimple_switch_label (switch_stmt, i);
6667 
6668 	      /* The i'th label will be taken only if the value range of the
6669 		 operand is entirely within the bounds of this label.  */
6670 	      if (CASE_HIGH (label) != NULL_TREE
6671 		  ? (tree_int_cst_compare (CASE_LOW (label), vr->min) <= 0
6672 		     && tree_int_cst_compare (CASE_HIGH (label), vr->max) >= 0)
6673 		  : (tree_int_cst_equal (CASE_LOW (label), vr->min)
6674 		     && tree_int_cst_equal (vr->min, vr->max)))
6675 		return label;
6676 	    }
6677 
6678 	  /* If there are no such labels then the default label will be
6679 	     taken.  */
6680 	  if (i > j)
6681 	    return gimple_switch_label (switch_stmt, 0);
6682 	}
6683 
6684       if (vr->type == VR_ANTI_RANGE)
6685 	{
6686 	  unsigned n = gimple_switch_num_labels (switch_stmt);
6687 	  tree min_label = gimple_switch_label (switch_stmt, 1);
6688 	  tree max_label = gimple_switch_label (switch_stmt, n - 1);
6689 
6690 	  /* The default label will be taken only if the anti-range of the
6691 	     operand is entirely outside the bounds of all the (non-default)
6692 	     case labels.  */
6693 	  if (tree_int_cst_compare (vr->min, CASE_LOW (min_label)) <= 0
6694 	      && (CASE_HIGH (max_label) != NULL_TREE
6695 		  ? tree_int_cst_compare (vr->max, CASE_HIGH (max_label)) >= 0
6696 		  : tree_int_cst_compare (vr->max, CASE_LOW (max_label)) >= 0))
6697 	  return gimple_switch_label (switch_stmt, 0);
6698 	}
6699 
6700       return NULL_TREE;
6701     }
6702 
6703   if (gassign *assign_stmt = dyn_cast <gassign *> (stmt))
6704     {
6705       tree lhs = gimple_assign_lhs (assign_stmt);
6706       if (TREE_CODE (lhs) == SSA_NAME
6707 	  && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
6708 	      || POINTER_TYPE_P (TREE_TYPE (lhs)))
6709 	  && stmt_interesting_for_vrp (stmt))
6710 	{
6711 	  edge dummy_e;
6712 	  tree dummy_tree;
6713 	  value_range new_vr = VR_INITIALIZER;
6714 	  vr_values->extract_range_from_stmt (stmt, &dummy_e,
6715 					      &dummy_tree, &new_vr);
6716 	  if (range_int_cst_singleton_p (&new_vr))
6717 	    return new_vr.min;
6718 	}
6719     }
6720 
6721   return NULL_TREE;
6722 }
6723 
6724 class vrp_dom_walker : public dom_walker
6725 {
6726 public:
vrp_dom_walker(cdi_direction direction,class const_and_copies * const_and_copies,class avail_exprs_stack * avail_exprs_stack)6727   vrp_dom_walker (cdi_direction direction,
6728 		  class const_and_copies *const_and_copies,
6729 		  class avail_exprs_stack *avail_exprs_stack)
6730     : dom_walker (direction, REACHABLE_BLOCKS),
6731       m_const_and_copies (const_and_copies),
6732       m_avail_exprs_stack (avail_exprs_stack),
6733       m_dummy_cond (NULL) {}
6734 
6735   virtual edge before_dom_children (basic_block);
6736   virtual void after_dom_children (basic_block);
6737 
6738   class vr_values *vr_values;
6739 
6740 private:
6741   class const_and_copies *m_const_and_copies;
6742   class avail_exprs_stack *m_avail_exprs_stack;
6743 
6744   gcond *m_dummy_cond;
6745 
6746 };
6747 
6748 /* Called before processing dominator children of BB.  We want to look
6749    at ASSERT_EXPRs and record information from them in the appropriate
6750    tables.
6751 
6752    We could look at other statements here.  It's not seen as likely
6753    to significantly increase the jump threads we discover.  */
6754 
6755 edge
before_dom_children(basic_block bb)6756 vrp_dom_walker::before_dom_children (basic_block bb)
6757 {
6758   gimple_stmt_iterator gsi;
6759 
6760   m_avail_exprs_stack->push_marker ();
6761   m_const_and_copies->push_marker ();
6762   for (gsi = gsi_start_nondebug_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
6763     {
6764       gimple *stmt = gsi_stmt (gsi);
6765       if (gimple_assign_single_p (stmt)
6766          && TREE_CODE (gimple_assign_rhs1 (stmt)) == ASSERT_EXPR)
6767 	{
6768 	  tree rhs1 = gimple_assign_rhs1 (stmt);
6769 	  tree cond = TREE_OPERAND (rhs1, 1);
6770 	  tree inverted = invert_truthvalue (cond);
6771 	  vec<cond_equivalence> p;
6772 	  p.create (3);
6773 	  record_conditions (&p, cond, inverted);
6774 	  for (unsigned int i = 0; i < p.length (); i++)
6775 	    m_avail_exprs_stack->record_cond (&p[i]);
6776 
6777 	  tree lhs = gimple_assign_lhs (stmt);
6778 	  m_const_and_copies->record_const_or_copy (lhs,
6779 						    TREE_OPERAND (rhs1, 0));
6780 	  p.release ();
6781 	  continue;
6782 	}
6783       break;
6784     }
6785   return NULL;
6786 }
6787 
6788 /* Called after processing dominator children of BB.  This is where we
6789    actually call into the threader.  */
6790 void
after_dom_children(basic_block bb)6791 vrp_dom_walker::after_dom_children (basic_block bb)
6792 {
6793   if (!m_dummy_cond)
6794     m_dummy_cond = gimple_build_cond (NE_EXPR,
6795 				      integer_zero_node, integer_zero_node,
6796 				      NULL, NULL);
6797 
6798   x_vr_values = vr_values;
6799   thread_outgoing_edges (bb, m_dummy_cond, m_const_and_copies,
6800 			 m_avail_exprs_stack, NULL,
6801 			 simplify_stmt_for_jump_threading);
6802   x_vr_values = NULL;
6803 
6804   m_avail_exprs_stack->pop_to_marker ();
6805   m_const_and_copies->pop_to_marker ();
6806 }
6807 
6808 /* Blocks which have more than one predecessor and more than
6809    one successor present jump threading opportunities, i.e.,
6810    when the block is reached from a specific predecessor, we
6811    may be able to determine which of the outgoing edges will
6812    be traversed.  When this optimization applies, we are able
6813    to avoid conditionals at runtime and we may expose secondary
6814    optimization opportunities.
6815 
6816    This routine is effectively a driver for the generic jump
6817    threading code.  It basically just presents the generic code
6818    with edges that may be suitable for jump threading.
6819 
6820    Unlike DOM, we do not iterate VRP if jump threading was successful.
6821    While iterating may expose new opportunities for VRP, it is expected
6822    those opportunities would be very limited and the compile time cost
6823    to expose those opportunities would be significant.
6824 
6825    As jump threading opportunities are discovered, they are registered
6826    for later realization.  */
6827 
6828 static void
identify_jump_threads(class vr_values * vr_values)6829 identify_jump_threads (class vr_values *vr_values)
6830 {
6831   int i;
6832   edge e;
6833 
6834   /* Ugh.  When substituting values earlier in this pass we can
6835      wipe the dominance information.  So rebuild the dominator
6836      information as we need it within the jump threading code.  */
6837   calculate_dominance_info (CDI_DOMINATORS);
6838 
6839   /* We do not allow VRP information to be used for jump threading
6840      across a back edge in the CFG.  Otherwise it becomes too
6841      difficult to avoid eliminating loop exit tests.  Of course
6842      EDGE_DFS_BACK is not accurate at this time so we have to
6843      recompute it.  */
6844   mark_dfs_back_edges ();
6845 
6846   /* Do not thread across edges we are about to remove.  Just marking
6847      them as EDGE_IGNORE will do.  */
6848   FOR_EACH_VEC_ELT (to_remove_edges, i, e)
6849     e->flags |= EDGE_IGNORE;
6850 
6851   /* Allocate our unwinder stack to unwind any temporary equivalences
6852      that might be recorded.  */
6853   const_and_copies *equiv_stack = new const_and_copies ();
6854 
6855   hash_table<expr_elt_hasher> *avail_exprs
6856     = new hash_table<expr_elt_hasher> (1024);
6857   avail_exprs_stack *avail_exprs_stack
6858     = new class avail_exprs_stack (avail_exprs);
6859 
6860   vrp_dom_walker walker (CDI_DOMINATORS, equiv_stack, avail_exprs_stack);
6861   walker.vr_values = vr_values;
6862   walker.walk (cfun->cfg->x_entry_block_ptr);
6863 
6864   /* Clear EDGE_IGNORE.  */
6865   FOR_EACH_VEC_ELT (to_remove_edges, i, e)
6866     e->flags &= ~EDGE_IGNORE;
6867 
6868   /* We do not actually update the CFG or SSA graphs at this point as
6869      ASSERT_EXPRs are still in the IL and cfg cleanup code does not yet
6870      handle ASSERT_EXPRs gracefully.  */
6871   delete equiv_stack;
6872   delete avail_exprs;
6873   delete avail_exprs_stack;
6874 }
6875 
6876 /* Traverse all the blocks folding conditionals with known ranges.  */
6877 
6878 void
vrp_finalize(bool warn_array_bounds_p)6879 vrp_prop::vrp_finalize (bool warn_array_bounds_p)
6880 {
6881   size_t i;
6882 
6883   /* We have completed propagating through the lattice.  */
6884   vr_values.set_lattice_propagation_complete ();
6885 
6886   if (dump_file)
6887     {
6888       fprintf (dump_file, "\nValue ranges after VRP:\n\n");
6889       vr_values.dump_all_value_ranges (dump_file);
6890       fprintf (dump_file, "\n");
6891     }
6892 
6893   /* Set value range to non pointer SSA_NAMEs.  */
6894   for (i = 0; i < num_ssa_names; i++)
6895     {
6896       tree name = ssa_name (i);
6897       if (!name)
6898 	continue;
6899 
6900       value_range *vr = get_value_range (name);
6901       if (!name
6902 	  || (vr->type == VR_VARYING)
6903 	  || (vr->type == VR_UNDEFINED)
6904 	  || (TREE_CODE (vr->min) != INTEGER_CST)
6905 	  || (TREE_CODE (vr->max) != INTEGER_CST))
6906 	continue;
6907 
6908       if (POINTER_TYPE_P (TREE_TYPE (name))
6909 	  && ((vr->type == VR_RANGE
6910 	       && range_includes_zero_p (vr->min, vr->max) == 0)
6911 	      || (vr->type == VR_ANTI_RANGE
6912 		  && range_includes_zero_p (vr->min, vr->max) == 1)))
6913 	set_ptr_nonnull (name);
6914       else if (!POINTER_TYPE_P (TREE_TYPE (name)))
6915 	set_range_info (name, vr->type,
6916 			wi::to_wide (vr->min),
6917 			wi::to_wide (vr->max));
6918     }
6919 
6920   /* If we're checking array refs, we want to merge information on
6921      the executability of each edge between vrp_folder and the
6922      check_array_bounds_dom_walker: each can clear the
6923      EDGE_EXECUTABLE flag on edges, in different ways.
6924 
6925      Hence, if we're going to call check_all_array_refs, set
6926      the flag on every edge now, rather than in
6927      check_array_bounds_dom_walker's ctor; vrp_folder may clear
6928      it from some edges.  */
6929   if (warn_array_bounds && warn_array_bounds_p)
6930     set_all_edges_as_executable (cfun);
6931 
6932   class vrp_folder vrp_folder;
6933   vrp_folder.vr_values = &vr_values;
6934   vrp_folder.substitute_and_fold ();
6935 
6936   if (warn_array_bounds && warn_array_bounds_p)
6937     check_all_array_refs ();
6938 }
6939 
6940 /* Main entry point to VRP (Value Range Propagation).  This pass is
6941    loosely based on J. R. C. Patterson, ``Accurate Static Branch
6942    Prediction by Value Range Propagation,'' in SIGPLAN Conference on
6943    Programming Language Design and Implementation, pp. 67-78, 1995.
6944    Also available at http://citeseer.ist.psu.edu/patterson95accurate.html
6945 
6946    This is essentially an SSA-CCP pass modified to deal with ranges
6947    instead of constants.
6948 
6949    While propagating ranges, we may find that two or more SSA name
6950    have equivalent, though distinct ranges.  For instance,
6951 
6952      1	x_9 = p_3->a;
6953      2	p_4 = ASSERT_EXPR <p_3, p_3 != 0>
6954      3	if (p_4 == q_2)
6955      4	  p_5 = ASSERT_EXPR <p_4, p_4 == q_2>;
6956      5	endif
6957      6	if (q_2)
6958 
6959    In the code above, pointer p_5 has range [q_2, q_2], but from the
6960    code we can also determine that p_5 cannot be NULL and, if q_2 had
6961    a non-varying range, p_5's range should also be compatible with it.
6962 
6963    These equivalences are created by two expressions: ASSERT_EXPR and
6964    copy operations.  Since p_5 is an assertion on p_4, and p_4 was the
6965    result of another assertion, then we can use the fact that p_5 and
6966    p_4 are equivalent when evaluating p_5's range.
6967 
6968    Together with value ranges, we also propagate these equivalences
6969    between names so that we can take advantage of information from
6970    multiple ranges when doing final replacement.  Note that this
6971    equivalency relation is transitive but not symmetric.
6972 
6973    In the example above, p_5 is equivalent to p_4, q_2 and p_3, but we
6974    cannot assert that q_2 is equivalent to p_5 because q_2 may be used
6975    in contexts where that assertion does not hold (e.g., in line 6).
6976 
6977    TODO, the main difference between this pass and Patterson's is that
6978    we do not propagate edge probabilities.  We only compute whether
6979    edges can be taken or not.  That is, instead of having a spectrum
6980    of jump probabilities between 0 and 1, we only deal with 0, 1 and
6981    DON'T KNOW.  In the future, it may be worthwhile to propagate
6982    probabilities to aid branch prediction.  */
6983 
6984 static unsigned int
execute_vrp(bool warn_array_bounds_p)6985 execute_vrp (bool warn_array_bounds_p)
6986 {
6987   int i;
6988   edge e;
6989   switch_update *su;
6990 
6991   loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
6992   rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
6993   scev_initialize ();
6994 
6995   /* ???  This ends up using stale EDGE_DFS_BACK for liveness computation.
6996      Inserting assertions may split edges which will invalidate
6997      EDGE_DFS_BACK.  */
6998   insert_range_assertions ();
6999 
7000   to_remove_edges.create (10);
7001   to_update_switch_stmts.create (5);
7002   threadedge_initialize_values ();
7003 
7004   /* For visiting PHI nodes we need EDGE_DFS_BACK computed.  */
7005   mark_dfs_back_edges ();
7006 
7007   class vrp_prop vrp_prop;
7008   vrp_prop.vrp_initialize ();
7009   vrp_prop.ssa_propagate ();
7010   vrp_prop.vrp_finalize (warn_array_bounds_p);
7011 
7012   /* We must identify jump threading opportunities before we release
7013      the datastructures built by VRP.  */
7014   identify_jump_threads (&vrp_prop.vr_values);
7015 
7016   /* A comparison of an SSA_NAME against a constant where the SSA_NAME
7017      was set by a type conversion can often be rewritten to use the
7018      RHS of the type conversion.
7019 
7020      However, doing so inhibits jump threading through the comparison.
7021      So that transformation is not performed until after jump threading
7022      is complete.  */
7023   basic_block bb;
7024   FOR_EACH_BB_FN (bb, cfun)
7025     {
7026       gimple *last = last_stmt (bb);
7027       if (last && gimple_code (last) == GIMPLE_COND)
7028 	vrp_prop.vr_values.simplify_cond_using_ranges_2 (as_a <gcond *> (last));
7029     }
7030 
7031   free_numbers_of_iterations_estimates (cfun);
7032 
7033   /* ASSERT_EXPRs must be removed before finalizing jump threads
7034      as finalizing jump threads calls the CFG cleanup code which
7035      does not properly handle ASSERT_EXPRs.  */
7036   remove_range_assertions ();
7037 
7038   /* If we exposed any new variables, go ahead and put them into
7039      SSA form now, before we handle jump threading.  This simplifies
7040      interactions between rewriting of _DECL nodes into SSA form
7041      and rewriting SSA_NAME nodes into SSA form after block
7042      duplication and CFG manipulation.  */
7043   update_ssa (TODO_update_ssa);
7044 
7045   /* We identified all the jump threading opportunities earlier, but could
7046      not transform the CFG at that time.  This routine transforms the
7047      CFG and arranges for the dominator tree to be rebuilt if necessary.
7048 
7049      Note the SSA graph update will occur during the normal TODO
7050      processing by the pass manager.  */
7051   thread_through_all_blocks (false);
7052 
7053   /* Remove dead edges from SWITCH_EXPR optimization.  This leaves the
7054      CFG in a broken state and requires a cfg_cleanup run.  */
7055   FOR_EACH_VEC_ELT (to_remove_edges, i, e)
7056     remove_edge (e);
7057   /* Update SWITCH_EXPR case label vector.  */
7058   FOR_EACH_VEC_ELT (to_update_switch_stmts, i, su)
7059     {
7060       size_t j;
7061       size_t n = TREE_VEC_LENGTH (su->vec);
7062       tree label;
7063       gimple_switch_set_num_labels (su->stmt, n);
7064       for (j = 0; j < n; j++)
7065 	gimple_switch_set_label (su->stmt, j, TREE_VEC_ELT (su->vec, j));
7066       /* As we may have replaced the default label with a regular one
7067 	 make sure to make it a real default label again.  This ensures
7068 	 optimal expansion.  */
7069       label = gimple_switch_label (su->stmt, 0);
7070       CASE_LOW (label) = NULL_TREE;
7071       CASE_HIGH (label) = NULL_TREE;
7072     }
7073 
7074   if (to_remove_edges.length () > 0)
7075     {
7076       free_dominance_info (CDI_DOMINATORS);
7077       loops_state_set (LOOPS_NEED_FIXUP);
7078     }
7079 
7080   to_remove_edges.release ();
7081   to_update_switch_stmts.release ();
7082   threadedge_finalize_values ();
7083 
7084   scev_finalize ();
7085   loop_optimizer_finalize ();
7086   return 0;
7087 }
7088 
7089 namespace {
7090 
7091 const pass_data pass_data_vrp =
7092 {
7093   GIMPLE_PASS, /* type */
7094   "vrp", /* name */
7095   OPTGROUP_NONE, /* optinfo_flags */
7096   TV_TREE_VRP, /* tv_id */
7097   PROP_ssa, /* properties_required */
7098   0, /* properties_provided */
7099   0, /* properties_destroyed */
7100   0, /* todo_flags_start */
7101   ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */
7102 };
7103 
7104 class pass_vrp : public gimple_opt_pass
7105 {
7106 public:
pass_vrp(gcc::context * ctxt)7107   pass_vrp (gcc::context *ctxt)
7108     : gimple_opt_pass (pass_data_vrp, ctxt), warn_array_bounds_p (false)
7109   {}
7110 
7111   /* opt_pass methods: */
clone()7112   opt_pass * clone () { return new pass_vrp (m_ctxt); }
set_pass_param(unsigned int n,bool param)7113   void set_pass_param (unsigned int n, bool param)
7114     {
7115       gcc_assert (n == 0);
7116       warn_array_bounds_p = param;
7117     }
gate(function *)7118   virtual bool gate (function *) { return flag_tree_vrp != 0; }
execute(function *)7119   virtual unsigned int execute (function *)
7120     { return execute_vrp (warn_array_bounds_p); }
7121 
7122  private:
7123   bool warn_array_bounds_p;
7124 }; // class pass_vrp
7125 
7126 } // anon namespace
7127 
7128 gimple_opt_pass *
make_pass_vrp(gcc::context * ctxt)7129 make_pass_vrp (gcc::context *ctxt)
7130 {
7131   return new pass_vrp (ctxt);
7132 }
7133