1 /* Support routines for Value Range Propagation (VRP).
2    Copyright (C) 2005-2018 Free Software Foundation, Inc.
3    Contributed by Diego Novillo <dnovillo@redhat.com>.
4 
5 This file is part of GCC.
6 
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11 
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15 GNU General Public License for more details.
16 
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3.  If not see
19 <http://www.gnu.org/licenses/>.  */
20 
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "insn-codes.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "gimple.h"
29 #include "cfghooks.h"
30 #include "tree-pass.h"
31 #include "ssa.h"
32 #include "optabs-tree.h"
33 #include "gimple-pretty-print.h"
34 #include "diagnostic-core.h"
35 #include "flags.h"
36 #include "fold-const.h"
37 #include "stor-layout.h"
38 #include "calls.h"
39 #include "cfganal.h"
40 #include "gimple-fold.h"
41 #include "tree-eh.h"
42 #include "gimple-iterator.h"
43 #include "gimple-walk.h"
44 #include "tree-cfg.h"
45 #include "tree-dfa.h"
46 #include "tree-ssa-loop-manip.h"
47 #include "tree-ssa-loop-niter.h"
48 #include "tree-ssa-loop.h"
49 #include "tree-into-ssa.h"
50 #include "tree-ssa.h"
51 #include "intl.h"
52 #include "cfgloop.h"
53 #include "tree-scalar-evolution.h"
54 #include "tree-ssa-propagate.h"
55 #include "tree-chrec.h"
56 #include "tree-ssa-threadupdate.h"
57 #include "tree-ssa-scopedtables.h"
58 #include "tree-ssa-threadedge.h"
59 #include "omp-general.h"
60 #include "target.h"
61 #include "case-cfn-macros.h"
62 #include "params.h"
63 #include "alloc-pool.h"
64 #include "domwalk.h"
65 #include "tree-cfgcleanup.h"
66 #include "stringpool.h"
67 #include "attribs.h"
68 #include "vr-values.h"
69 #include "builtins.h"
70 
71 /* Set of SSA names found live during the RPO traversal of the function
72    for still active basic-blocks.  */
73 static sbitmap *live;
74 
75 /* Return true if the SSA name NAME is live on the edge E.  */
76 
77 static bool
live_on_edge(edge e,tree name)78 live_on_edge (edge e, tree name)
79 {
80   return (live[e->dest->index]
81 	  && bitmap_bit_p (live[e->dest->index], SSA_NAME_VERSION (name)));
82 }
83 
84 /* Location information for ASSERT_EXPRs.  Each instance of this
85    structure describes an ASSERT_EXPR for an SSA name.  Since a single
86    SSA name may have more than one assertion associated with it, these
87    locations are kept in a linked list attached to the corresponding
88    SSA name.  */
89 struct assert_locus
90 {
91   /* Basic block where the assertion would be inserted.  */
92   basic_block bb;
93 
94   /* Some assertions need to be inserted on an edge (e.g., assertions
95      generated by COND_EXPRs).  In those cases, BB will be NULL.  */
96   edge e;
97 
98   /* Pointer to the statement that generated this assertion.  */
99   gimple_stmt_iterator si;
100 
101   /* Predicate code for the ASSERT_EXPR.  Must be COMPARISON_CLASS_P.  */
102   enum tree_code comp_code;
103 
104   /* Value being compared against.  */
105   tree val;
106 
107   /* Expression to compare.  */
108   tree expr;
109 
110   /* Next node in the linked list.  */
111   assert_locus *next;
112 };
113 
114 /* If bit I is present, it means that SSA name N_i has a list of
115    assertions that should be inserted in the IL.  */
116 static bitmap need_assert_for;
117 
118 /* Array of locations lists where to insert assertions.  ASSERTS_FOR[I]
119    holds a list of ASSERT_LOCUS_T nodes that describe where
120    ASSERT_EXPRs for SSA name N_I should be inserted.  */
121 static assert_locus **asserts_for;
122 
123 vec<edge> to_remove_edges;
124 vec<switch_update> to_update_switch_stmts;
125 
126 
127 /* Return the maximum value for TYPE.  */
128 
129 tree
vrp_val_max(const_tree type)130 vrp_val_max (const_tree type)
131 {
132   if (!INTEGRAL_TYPE_P (type))
133     return NULL_TREE;
134 
135   return TYPE_MAX_VALUE (type);
136 }
137 
138 /* Return the minimum value for TYPE.  */
139 
140 tree
vrp_val_min(const_tree type)141 vrp_val_min (const_tree type)
142 {
143   if (!INTEGRAL_TYPE_P (type))
144     return NULL_TREE;
145 
146   return TYPE_MIN_VALUE (type);
147 }
148 
149 /* Return whether VAL is equal to the maximum value of its type.
150    We can't do a simple equality comparison with TYPE_MAX_VALUE because
151    C typedefs and Ada subtypes can produce types whose TYPE_MAX_VALUE
152    is not == to the integer constant with the same value in the type.  */
153 
154 bool
vrp_val_is_max(const_tree val)155 vrp_val_is_max (const_tree val)
156 {
157   tree type_max = vrp_val_max (TREE_TYPE (val));
158   return (val == type_max
159 	  || (type_max != NULL_TREE
160 	      && operand_equal_p (val, type_max, 0)));
161 }
162 
163 /* Return whether VAL is equal to the minimum value of its type.  */
164 
165 bool
vrp_val_is_min(const_tree val)166 vrp_val_is_min (const_tree val)
167 {
168   tree type_min = vrp_val_min (TREE_TYPE (val));
169   return (val == type_min
170 	  || (type_min != NULL_TREE
171 	      && operand_equal_p (val, type_min, 0)));
172 }
173 
174 /* VR_TYPE describes a range with mininum value *MIN and maximum
175    value *MAX.  Restrict the range to the set of values that have
176    no bits set outside NONZERO_BITS.  Update *MIN and *MAX and
177    return the new range type.
178 
179    SGN gives the sign of the values described by the range.  */
180 
181 enum value_range_type
intersect_range_with_nonzero_bits(enum value_range_type vr_type,wide_int * min,wide_int * max,const wide_int & nonzero_bits,signop sgn)182 intersect_range_with_nonzero_bits (enum value_range_type vr_type,
183 				   wide_int *min, wide_int *max,
184 				   const wide_int &nonzero_bits,
185 				   signop sgn)
186 {
187   if (vr_type == VR_ANTI_RANGE)
188     {
189       /* The VR_ANTI_RANGE is equivalent to the union of the ranges
190 	 A: [-INF, *MIN) and B: (*MAX, +INF].  First use NONZERO_BITS
191 	 to create an inclusive upper bound for A and an inclusive lower
192 	 bound for B.  */
193       wide_int a_max = wi::round_down_for_mask (*min - 1, nonzero_bits);
194       wide_int b_min = wi::round_up_for_mask (*max + 1, nonzero_bits);
195 
196       /* If the calculation of A_MAX wrapped, A is effectively empty
197 	 and A_MAX is the highest value that satisfies NONZERO_BITS.
198 	 Likewise if the calculation of B_MIN wrapped, B is effectively
199 	 empty and B_MIN is the lowest value that satisfies NONZERO_BITS.  */
200       bool a_empty = wi::ge_p (a_max, *min, sgn);
201       bool b_empty = wi::le_p (b_min, *max, sgn);
202 
203       /* If both A and B are empty, there are no valid values.  */
204       if (a_empty && b_empty)
205 	return VR_UNDEFINED;
206 
207       /* If exactly one of A or B is empty, return a VR_RANGE for the
208 	 other one.  */
209       if (a_empty || b_empty)
210 	{
211 	  *min = b_min;
212 	  *max = a_max;
213 	  gcc_checking_assert (wi::le_p (*min, *max, sgn));
214 	  return VR_RANGE;
215 	}
216 
217       /* Update the VR_ANTI_RANGE bounds.  */
218       *min = a_max + 1;
219       *max = b_min - 1;
220       gcc_checking_assert (wi::le_p (*min, *max, sgn));
221 
222       /* Now check whether the excluded range includes any values that
223 	 satisfy NONZERO_BITS.  If not, switch to a full VR_RANGE.  */
224       if (wi::round_up_for_mask (*min, nonzero_bits) == b_min)
225 	{
226 	  unsigned int precision = min->get_precision ();
227 	  *min = wi::min_value (precision, sgn);
228 	  *max = wi::max_value (precision, sgn);
229 	  vr_type = VR_RANGE;
230 	}
231     }
232   if (vr_type == VR_RANGE)
233     {
234       *max = wi::round_down_for_mask (*max, nonzero_bits);
235 
236       /* Check that the range contains at least one valid value.  */
237       if (wi::gt_p (*min, *max, sgn))
238 	return VR_UNDEFINED;
239 
240       *min = wi::round_up_for_mask (*min, nonzero_bits);
241       gcc_checking_assert (wi::le_p (*min, *max, sgn));
242     }
243   return vr_type;
244 }
245 
246 /* Set value range VR to VR_UNDEFINED.  */
247 
248 static inline void
set_value_range_to_undefined(value_range * vr)249 set_value_range_to_undefined (value_range *vr)
250 {
251   vr->type = VR_UNDEFINED;
252   vr->min = vr->max = NULL_TREE;
253   if (vr->equiv)
254     bitmap_clear (vr->equiv);
255 }
256 
257 /* Set value range VR to VR_VARYING.  */
258 
259 void
set_value_range_to_varying(value_range * vr)260 set_value_range_to_varying (value_range *vr)
261 {
262   vr->type = VR_VARYING;
263   vr->min = vr->max = NULL_TREE;
264   if (vr->equiv)
265     bitmap_clear (vr->equiv);
266 }
267 
268 /* Set value range VR to {T, MIN, MAX, EQUIV}.  */
269 
270 void
set_value_range(value_range * vr,enum value_range_type t,tree min,tree max,bitmap equiv)271 set_value_range (value_range *vr, enum value_range_type t, tree min,
272 		 tree max, bitmap equiv)
273 {
274   /* Check the validity of the range.  */
275   if (flag_checking
276       && (t == VR_RANGE || t == VR_ANTI_RANGE))
277     {
278       int cmp;
279 
280       gcc_assert (min && max);
281 
282       gcc_assert (!TREE_OVERFLOW_P (min) && !TREE_OVERFLOW_P (max));
283 
284       if (INTEGRAL_TYPE_P (TREE_TYPE (min)) && t == VR_ANTI_RANGE)
285 	gcc_assert (!vrp_val_is_min (min) || !vrp_val_is_max (max));
286 
287       cmp = compare_values (min, max);
288       gcc_assert (cmp == 0 || cmp == -1 || cmp == -2);
289     }
290 
291   if (flag_checking
292       && (t == VR_UNDEFINED || t == VR_VARYING))
293     {
294       gcc_assert (min == NULL_TREE && max == NULL_TREE);
295       gcc_assert (equiv == NULL || bitmap_empty_p (equiv));
296     }
297 
298   vr->type = t;
299   vr->min = min;
300   vr->max = max;
301 
302   /* Since updating the equivalence set involves deep copying the
303      bitmaps, only do it if absolutely necessary.
304 
305      All equivalence bitmaps are allocated from the same obstack.  So
306      we can use the obstack associated with EQUIV to allocate vr->equiv.  */
307   if (vr->equiv == NULL
308       && equiv != NULL)
309     vr->equiv = BITMAP_ALLOC (equiv->obstack);
310 
311   if (equiv != vr->equiv)
312     {
313       if (equiv && !bitmap_empty_p (equiv))
314 	bitmap_copy (vr->equiv, equiv);
315       else
316 	bitmap_clear (vr->equiv);
317     }
318 }
319 
320 
321 /* Set value range VR to the canonical form of {T, MIN, MAX, EQUIV}.
322    This means adjusting T, MIN and MAX representing the case of a
323    wrapping range with MAX < MIN covering [MIN, type_max] U [type_min, MAX]
324    as anti-rage ~[MAX+1, MIN-1].  Likewise for wrapping anti-ranges.
325    In corner cases where MAX+1 or MIN-1 wraps this will fall back
326    to varying.
327    This routine exists to ease canonicalization in the case where we
328    extract ranges from var + CST op limit.  */
329 
330 void
set_and_canonicalize_value_range(value_range * vr,enum value_range_type t,tree min,tree max,bitmap equiv)331 set_and_canonicalize_value_range (value_range *vr, enum value_range_type t,
332 				  tree min, tree max, bitmap equiv)
333 {
334   /* Use the canonical setters for VR_UNDEFINED and VR_VARYING.  */
335   if (t == VR_UNDEFINED)
336     {
337       set_value_range_to_undefined (vr);
338       return;
339     }
340   else if (t == VR_VARYING)
341     {
342       set_value_range_to_varying (vr);
343       return;
344     }
345 
346   /* Nothing to canonicalize for symbolic ranges.  */
347   if (TREE_CODE (min) != INTEGER_CST
348       || TREE_CODE (max) != INTEGER_CST)
349     {
350       set_value_range (vr, t, min, max, equiv);
351       return;
352     }
353 
354   /* Wrong order for min and max, to swap them and the VR type we need
355      to adjust them.  */
356   if (tree_int_cst_lt (max, min))
357     {
358       tree one, tmp;
359 
360       /* For one bit precision if max < min, then the swapped
361 	 range covers all values, so for VR_RANGE it is varying and
362 	 for VR_ANTI_RANGE empty range, so drop to varying as well.  */
363       if (TYPE_PRECISION (TREE_TYPE (min)) == 1)
364 	{
365 	  set_value_range_to_varying (vr);
366 	  return;
367 	}
368 
369       one = build_int_cst (TREE_TYPE (min), 1);
370       tmp = int_const_binop (PLUS_EXPR, max, one);
371       max = int_const_binop (MINUS_EXPR, min, one);
372       min = tmp;
373 
374       /* There's one corner case, if we had [C+1, C] before we now have
375 	 that again.  But this represents an empty value range, so drop
376 	 to varying in this case.  */
377       if (tree_int_cst_lt (max, min))
378 	{
379 	  set_value_range_to_varying (vr);
380 	  return;
381 	}
382 
383       t = t == VR_RANGE ? VR_ANTI_RANGE : VR_RANGE;
384     }
385 
386   /* Anti-ranges that can be represented as ranges should be so.  */
387   if (t == VR_ANTI_RANGE)
388     {
389       /* For -fstrict-enums we may receive out-of-range ranges so consider
390          values < -INF and values > INF as -INF/INF as well.  */
391       tree type = TREE_TYPE (min);
392       bool is_min = (INTEGRAL_TYPE_P (type)
393 		     && tree_int_cst_compare (min, TYPE_MIN_VALUE (type)) <= 0);
394       bool is_max = (INTEGRAL_TYPE_P (type)
395 		     && tree_int_cst_compare (max, TYPE_MAX_VALUE (type)) >= 0);
396 
397       if (is_min && is_max)
398 	{
399 	  /* We cannot deal with empty ranges, drop to varying.
400 	     ???  This could be VR_UNDEFINED instead.  */
401 	  set_value_range_to_varying (vr);
402 	  return;
403 	}
404       else if (TYPE_PRECISION (TREE_TYPE (min)) == 1
405 	       && (is_min || is_max))
406 	{
407 	  /* Non-empty boolean ranges can always be represented
408 	     as a singleton range.  */
409 	  if (is_min)
410 	    min = max = vrp_val_max (TREE_TYPE (min));
411 	  else
412 	    min = max = vrp_val_min (TREE_TYPE (min));
413 	  t = VR_RANGE;
414 	}
415       else if (is_min
416 	       /* As a special exception preserve non-null ranges.  */
417 	       && !(TYPE_UNSIGNED (TREE_TYPE (min))
418 		    && integer_zerop (max)))
419         {
420 	  tree one = build_int_cst (TREE_TYPE (max), 1);
421 	  min = int_const_binop (PLUS_EXPR, max, one);
422 	  max = vrp_val_max (TREE_TYPE (max));
423 	  t = VR_RANGE;
424         }
425       else if (is_max)
426         {
427 	  tree one = build_int_cst (TREE_TYPE (min), 1);
428 	  max = int_const_binop (MINUS_EXPR, min, one);
429 	  min = vrp_val_min (TREE_TYPE (min));
430 	  t = VR_RANGE;
431         }
432     }
433 
434   /* Do not drop [-INF(OVF), +INF(OVF)] to varying.  (OVF) has to be sticky
435      to make sure VRP iteration terminates, otherwise we can get into
436      oscillations.  */
437 
438   set_value_range (vr, t, min, max, equiv);
439 }
440 
441 /* Copy value range FROM into value range TO.  */
442 
443 void
copy_value_range(value_range * to,value_range * from)444 copy_value_range (value_range *to, value_range *from)
445 {
446   set_value_range (to, from->type, from->min, from->max, from->equiv);
447 }
448 
449 /* Set value range VR to a single value.  This function is only called
450    with values we get from statements, and exists to clear the
451    TREE_OVERFLOW flag.  */
452 
453 void
set_value_range_to_value(value_range * vr,tree val,bitmap equiv)454 set_value_range_to_value (value_range *vr, tree val, bitmap equiv)
455 {
456   gcc_assert (is_gimple_min_invariant (val));
457   if (TREE_OVERFLOW_P (val))
458     val = drop_tree_overflow (val);
459   set_value_range (vr, VR_RANGE, val, val, equiv);
460 }
461 
462 /* Set value range VR to a non-NULL range of type TYPE.  */
463 
464 void
set_value_range_to_nonnull(value_range * vr,tree type)465 set_value_range_to_nonnull (value_range *vr, tree type)
466 {
467   tree zero = build_int_cst (type, 0);
468   set_value_range (vr, VR_ANTI_RANGE, zero, zero, vr->equiv);
469 }
470 
471 
472 /* Set value range VR to a NULL range of type TYPE.  */
473 
474 void
set_value_range_to_null(value_range * vr,tree type)475 set_value_range_to_null (value_range *vr, tree type)
476 {
477   set_value_range_to_value (vr, build_int_cst (type, 0), vr->equiv);
478 }
479 
480 
481 /* If abs (min) < abs (max), set VR to [-max, max], if
482    abs (min) >= abs (max), set VR to [-min, min].  */
483 
484 static void
abs_extent_range(value_range * vr,tree min,tree max)485 abs_extent_range (value_range *vr, tree min, tree max)
486 {
487   int cmp;
488 
489   gcc_assert (TREE_CODE (min) == INTEGER_CST);
490   gcc_assert (TREE_CODE (max) == INTEGER_CST);
491   gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (min)));
492   gcc_assert (!TYPE_UNSIGNED (TREE_TYPE (min)));
493   min = fold_unary (ABS_EXPR, TREE_TYPE (min), min);
494   max = fold_unary (ABS_EXPR, TREE_TYPE (max), max);
495   if (TREE_OVERFLOW (min) || TREE_OVERFLOW (max))
496     {
497       set_value_range_to_varying (vr);
498       return;
499     }
500   cmp = compare_values (min, max);
501   if (cmp == -1)
502     min = fold_unary (NEGATE_EXPR, TREE_TYPE (min), max);
503   else if (cmp == 0 || cmp == 1)
504     {
505       max = min;
506       min = fold_unary (NEGATE_EXPR, TREE_TYPE (min), min);
507     }
508   else
509     {
510       set_value_range_to_varying (vr);
511       return;
512     }
513   set_and_canonicalize_value_range (vr, VR_RANGE, min, max, NULL);
514 }
515 
516 /* Return true, if VAL1 and VAL2 are equal values for VRP purposes.  */
517 
518 bool
vrp_operand_equal_p(const_tree val1,const_tree val2)519 vrp_operand_equal_p (const_tree val1, const_tree val2)
520 {
521   if (val1 == val2)
522     return true;
523   if (!val1 || !val2 || !operand_equal_p (val1, val2, 0))
524     return false;
525   return true;
526 }
527 
528 /* Return true, if the bitmaps B1 and B2 are equal.  */
529 
530 bool
vrp_bitmap_equal_p(const_bitmap b1,const_bitmap b2)531 vrp_bitmap_equal_p (const_bitmap b1, const_bitmap b2)
532 {
533   return (b1 == b2
534 	  || ((!b1 || bitmap_empty_p (b1))
535 	      && (!b2 || bitmap_empty_p (b2)))
536 	  || (b1 && b2
537 	      && bitmap_equal_p (b1, b2)));
538 }
539 
540 /* Return true if VR is ~[0, 0].  */
541 
542 bool
range_is_nonnull(value_range * vr)543 range_is_nonnull (value_range *vr)
544 {
545   return vr->type == VR_ANTI_RANGE
546 	 && integer_zerop (vr->min)
547 	 && integer_zerop (vr->max);
548 }
549 
550 
551 /* Return true if VR is [0, 0].  */
552 
553 static inline bool
range_is_null(value_range * vr)554 range_is_null (value_range *vr)
555 {
556   return vr->type == VR_RANGE
557 	 && integer_zerop (vr->min)
558 	 && integer_zerop (vr->max);
559 }
560 
561 /* Return true if max and min of VR are INTEGER_CST.  It's not necessary
562    a singleton.  */
563 
564 bool
range_int_cst_p(value_range * vr)565 range_int_cst_p (value_range *vr)
566 {
567   return (vr->type == VR_RANGE
568 	  && TREE_CODE (vr->max) == INTEGER_CST
569 	  && TREE_CODE (vr->min) == INTEGER_CST);
570 }
571 
572 /* Return true if VR is a INTEGER_CST singleton.  */
573 
574 bool
range_int_cst_singleton_p(value_range * vr)575 range_int_cst_singleton_p (value_range *vr)
576 {
577   return (range_int_cst_p (vr)
578 	  && tree_int_cst_equal (vr->min, vr->max));
579 }
580 
581 /* Return true if value range VR involves at least one symbol.  */
582 
583 bool
symbolic_range_p(value_range * vr)584 symbolic_range_p (value_range *vr)
585 {
586   return (!is_gimple_min_invariant (vr->min)
587           || !is_gimple_min_invariant (vr->max));
588 }
589 
590 /* Return the single symbol (an SSA_NAME) contained in T if any, or NULL_TREE
591    otherwise.  We only handle additive operations and set NEG to true if the
592    symbol is negated and INV to the invariant part, if any.  */
593 
594 tree
get_single_symbol(tree t,bool * neg,tree * inv)595 get_single_symbol (tree t, bool *neg, tree *inv)
596 {
597   bool neg_;
598   tree inv_;
599 
600   *inv = NULL_TREE;
601   *neg = false;
602 
603   if (TREE_CODE (t) == PLUS_EXPR
604       || TREE_CODE (t) == POINTER_PLUS_EXPR
605       || TREE_CODE (t) == MINUS_EXPR)
606     {
607       if (is_gimple_min_invariant (TREE_OPERAND (t, 0)))
608 	{
609 	  neg_ = (TREE_CODE (t) == MINUS_EXPR);
610 	  inv_ = TREE_OPERAND (t, 0);
611 	  t = TREE_OPERAND (t, 1);
612 	}
613       else if (is_gimple_min_invariant (TREE_OPERAND (t, 1)))
614 	{
615 	  neg_ = false;
616 	  inv_ = TREE_OPERAND (t, 1);
617 	  t = TREE_OPERAND (t, 0);
618 	}
619       else
620         return NULL_TREE;
621     }
622   else
623     {
624       neg_ = false;
625       inv_ = NULL_TREE;
626     }
627 
628   if (TREE_CODE (t) == NEGATE_EXPR)
629     {
630       t = TREE_OPERAND (t, 0);
631       neg_ = !neg_;
632     }
633 
634   if (TREE_CODE (t) != SSA_NAME)
635     return NULL_TREE;
636 
637   if (inv_ && TREE_OVERFLOW_P (inv_))
638     inv_ = drop_tree_overflow (inv_);
639 
640   *neg = neg_;
641   *inv = inv_;
642   return t;
643 }
644 
645 /* The reverse operation: build a symbolic expression with TYPE
646    from symbol SYM, negated according to NEG, and invariant INV.  */
647 
648 static tree
build_symbolic_expr(tree type,tree sym,bool neg,tree inv)649 build_symbolic_expr (tree type, tree sym, bool neg, tree inv)
650 {
651   const bool pointer_p = POINTER_TYPE_P (type);
652   tree t = sym;
653 
654   if (neg)
655     t = build1 (NEGATE_EXPR, type, t);
656 
657   if (integer_zerop (inv))
658     return t;
659 
660   return build2 (pointer_p ? POINTER_PLUS_EXPR : PLUS_EXPR, type, t, inv);
661 }
662 
663 /* Return
664    1 if VAL < VAL2
665    0 if !(VAL < VAL2)
666    -2 if those are incomparable.  */
667 int
operand_less_p(tree val,tree val2)668 operand_less_p (tree val, tree val2)
669 {
670   /* LT is folded faster than GE and others.  Inline the common case.  */
671   if (TREE_CODE (val) == INTEGER_CST && TREE_CODE (val2) == INTEGER_CST)
672     return tree_int_cst_lt (val, val2);
673   else
674     {
675       tree tcmp;
676 
677       fold_defer_overflow_warnings ();
678 
679       tcmp = fold_binary_to_constant (LT_EXPR, boolean_type_node, val, val2);
680 
681       fold_undefer_and_ignore_overflow_warnings ();
682 
683       if (!tcmp
684 	  || TREE_CODE (tcmp) != INTEGER_CST)
685 	return -2;
686 
687       if (!integer_zerop (tcmp))
688 	return 1;
689     }
690 
691   return 0;
692 }
693 
694 /* Compare two values VAL1 and VAL2.  Return
695 
696    	-2 if VAL1 and VAL2 cannot be compared at compile-time,
697    	-1 if VAL1 < VAL2,
698    	 0 if VAL1 == VAL2,
699 	+1 if VAL1 > VAL2, and
700 	+2 if VAL1 != VAL2
701 
702    This is similar to tree_int_cst_compare but supports pointer values
703    and values that cannot be compared at compile time.
704 
705    If STRICT_OVERFLOW_P is not NULL, then set *STRICT_OVERFLOW_P to
706    true if the return value is only valid if we assume that signed
707    overflow is undefined.  */
708 
709 int
compare_values_warnv(tree val1,tree val2,bool * strict_overflow_p)710 compare_values_warnv (tree val1, tree val2, bool *strict_overflow_p)
711 {
712   if (val1 == val2)
713     return 0;
714 
715   /* Below we rely on the fact that VAL1 and VAL2 are both pointers or
716      both integers.  */
717   gcc_assert (POINTER_TYPE_P (TREE_TYPE (val1))
718 	      == POINTER_TYPE_P (TREE_TYPE (val2)));
719 
720   /* Convert the two values into the same type.  This is needed because
721      sizetype causes sign extension even for unsigned types.  */
722   val2 = fold_convert (TREE_TYPE (val1), val2);
723   STRIP_USELESS_TYPE_CONVERSION (val2);
724 
725   const bool overflow_undefined
726     = INTEGRAL_TYPE_P (TREE_TYPE (val1))
727       && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (val1));
728   tree inv1, inv2;
729   bool neg1, neg2;
730   tree sym1 = get_single_symbol (val1, &neg1, &inv1);
731   tree sym2 = get_single_symbol (val2, &neg2, &inv2);
732 
733   /* If VAL1 and VAL2 are of the form '[-]NAME [+ CST]', return -1 or +1
734      accordingly.  If VAL1 and VAL2 don't use the same name, return -2.  */
735   if (sym1 && sym2)
736     {
737       /* Both values must use the same name with the same sign.  */
738       if (sym1 != sym2 || neg1 != neg2)
739 	return -2;
740 
741       /* [-]NAME + CST == [-]NAME + CST.  */
742       if (inv1 == inv2)
743 	return 0;
744 
745       /* If overflow is defined we cannot simplify more.  */
746       if (!overflow_undefined)
747 	return -2;
748 
749       if (strict_overflow_p != NULL
750 	  /* Symbolic range building sets TREE_NO_WARNING to declare
751 	     that overflow doesn't happen.  */
752 	  && (!inv1 || !TREE_NO_WARNING (val1))
753 	  && (!inv2 || !TREE_NO_WARNING (val2)))
754 	*strict_overflow_p = true;
755 
756       if (!inv1)
757 	inv1 = build_int_cst (TREE_TYPE (val1), 0);
758       if (!inv2)
759 	inv2 = build_int_cst (TREE_TYPE (val2), 0);
760 
761       return wi::cmp (wi::to_wide (inv1), wi::to_wide (inv2),
762 		      TYPE_SIGN (TREE_TYPE (val1)));
763     }
764 
765   const bool cst1 = is_gimple_min_invariant (val1);
766   const bool cst2 = is_gimple_min_invariant (val2);
767 
768   /* If one is of the form '[-]NAME + CST' and the other is constant, then
769      it might be possible to say something depending on the constants.  */
770   if ((sym1 && inv1 && cst2) || (sym2 && inv2 && cst1))
771     {
772       if (!overflow_undefined)
773 	return -2;
774 
775       if (strict_overflow_p != NULL
776 	  /* Symbolic range building sets TREE_NO_WARNING to declare
777 	     that overflow doesn't happen.  */
778 	  && (!sym1 || !TREE_NO_WARNING (val1))
779 	  && (!sym2 || !TREE_NO_WARNING (val2)))
780 	*strict_overflow_p = true;
781 
782       const signop sgn = TYPE_SIGN (TREE_TYPE (val1));
783       tree cst = cst1 ? val1 : val2;
784       tree inv = cst1 ? inv2 : inv1;
785 
786       /* Compute the difference between the constants.  If it overflows or
787 	 underflows, this means that we can trivially compare the NAME with
788 	 it and, consequently, the two values with each other.  */
789       wide_int diff = wi::to_wide (cst) - wi::to_wide (inv);
790       if (wi::cmp (0, wi::to_wide (inv), sgn)
791 	  != wi::cmp (diff, wi::to_wide (cst), sgn))
792 	{
793 	  const int res = wi::cmp (wi::to_wide (cst), wi::to_wide (inv), sgn);
794 	  return cst1 ? res : -res;
795 	}
796 
797       return -2;
798     }
799 
800   /* We cannot say anything more for non-constants.  */
801   if (!cst1 || !cst2)
802     return -2;
803 
804   if (!POINTER_TYPE_P (TREE_TYPE (val1)))
805     {
806       /* We cannot compare overflowed values.  */
807       if (TREE_OVERFLOW (val1) || TREE_OVERFLOW (val2))
808 	return -2;
809 
810       if (TREE_CODE (val1) == INTEGER_CST
811 	  && TREE_CODE (val2) == INTEGER_CST)
812 	return tree_int_cst_compare (val1, val2);
813 
814       if (poly_int_tree_p (val1) && poly_int_tree_p (val2))
815 	{
816 	  if (known_eq (wi::to_poly_widest (val1),
817 			wi::to_poly_widest (val2)))
818 	    return 0;
819 	  if (known_lt (wi::to_poly_widest (val1),
820 			wi::to_poly_widest (val2)))
821 	    return -1;
822 	  if (known_gt (wi::to_poly_widest (val1),
823 			wi::to_poly_widest (val2)))
824 	    return 1;
825 	}
826 
827       return -2;
828     }
829   else
830     {
831       tree t;
832 
833       /* First see if VAL1 and VAL2 are not the same.  */
834       if (val1 == val2 || operand_equal_p (val1, val2, 0))
835 	return 0;
836 
837       /* If VAL1 is a lower address than VAL2, return -1.  */
838       if (operand_less_p (val1, val2) == 1)
839 	return -1;
840 
841       /* If VAL1 is a higher address than VAL2, return +1.  */
842       if (operand_less_p (val2, val1) == 1)
843 	return 1;
844 
845       /* If VAL1 is different than VAL2, return +2.
846 	 For integer constants we either have already returned -1 or 1
847 	 or they are equivalent.  We still might succeed in proving
848 	 something about non-trivial operands.  */
849       if (TREE_CODE (val1) != INTEGER_CST
850 	  || TREE_CODE (val2) != INTEGER_CST)
851 	{
852           t = fold_binary_to_constant (NE_EXPR, boolean_type_node, val1, val2);
853 	  if (t && integer_onep (t))
854 	    return 2;
855 	}
856 
857       return -2;
858     }
859 }
860 
861 /* Compare values like compare_values_warnv.  */
862 
863 int
compare_values(tree val1,tree val2)864 compare_values (tree val1, tree val2)
865 {
866   bool sop;
867   return compare_values_warnv (val1, val2, &sop);
868 }
869 
870 
871 /* Return 1 if VAL is inside value range MIN <= VAL <= MAX,
872           0 if VAL is not inside [MIN, MAX],
873 	 -2 if we cannot tell either way.
874 
875    Benchmark compile/20001226-1.c compilation time after changing this
876    function.  */
877 
878 int
value_inside_range(tree val,tree min,tree max)879 value_inside_range (tree val, tree min, tree max)
880 {
881   int cmp1, cmp2;
882 
883   cmp1 = operand_less_p (val, min);
884   if (cmp1 == -2)
885     return -2;
886   if (cmp1 == 1)
887     return 0;
888 
889   cmp2 = operand_less_p (max, val);
890   if (cmp2 == -2)
891     return -2;
892 
893   return !cmp2;
894 }
895 
896 
897 /* Return true if value ranges VR0 and VR1 have a non-empty
898    intersection.
899 
900    Benchmark compile/20001226-1.c compilation time after changing this
901    function.
902    */
903 
904 static inline bool
value_ranges_intersect_p(value_range * vr0,value_range * vr1)905 value_ranges_intersect_p (value_range *vr0, value_range *vr1)
906 {
907   /* The value ranges do not intersect if the maximum of the first range is
908      less than the minimum of the second range or vice versa.
909      When those relations are unknown, we can't do any better.  */
910   if (operand_less_p (vr0->max, vr1->min) != 0)
911     return false;
912   if (operand_less_p (vr1->max, vr0->min) != 0)
913     return false;
914   return true;
915 }
916 
917 
918 /* Return 1 if [MIN, MAX] includes the value zero, 0 if it does not
919    include the value zero, -2 if we cannot tell.  */
920 
921 int
range_includes_zero_p(tree min,tree max)922 range_includes_zero_p (tree min, tree max)
923 {
924   tree zero = build_int_cst (TREE_TYPE (min), 0);
925   return value_inside_range (zero, min, max);
926 }
927 
928 /* Return true if *VR is know to only contain nonnegative values.  */
929 
930 static inline bool
value_range_nonnegative_p(value_range * vr)931 value_range_nonnegative_p (value_range *vr)
932 {
933   /* Testing for VR_ANTI_RANGE is not useful here as any anti-range
934      which would return a useful value should be encoded as a
935      VR_RANGE.  */
936   if (vr->type == VR_RANGE)
937     {
938       int result = compare_values (vr->min, integer_zero_node);
939       return (result == 0 || result == 1);
940     }
941 
942   return false;
943 }
944 
945 /* If *VR has a value rante that is a single constant value return that,
946    otherwise return NULL_TREE.  */
947 
948 tree
value_range_constant_singleton(value_range * vr)949 value_range_constant_singleton (value_range *vr)
950 {
951   if (vr->type == VR_RANGE
952       && vrp_operand_equal_p (vr->min, vr->max)
953       && is_gimple_min_invariant (vr->min))
954     return vr->min;
955 
956   return NULL_TREE;
957 }
958 
959 /* Wrapper around int_const_binop.  Return true if we can compute the
960    result; i.e. if the operation doesn't overflow or if the overflow is
961    undefined.  In the latter case (if the operation overflows and
962    overflow is undefined), then adjust the result to be -INF or +INF
963    depending on CODE, VAL1 and VAL2.  Return the value in *RES.
964 
965    Return false for division by zero, for which the result is
966    indeterminate.  */
967 
968 static bool
vrp_int_const_binop(enum tree_code code,tree val1,tree val2,wide_int * res)969 vrp_int_const_binop (enum tree_code code, tree val1, tree val2, wide_int *res)
970 {
971   bool overflow = false;
972   signop sign = TYPE_SIGN (TREE_TYPE (val1));
973 
974   switch (code)
975     {
976     case RSHIFT_EXPR:
977     case LSHIFT_EXPR:
978       {
979 	wide_int wval2 = wi::to_wide (val2, TYPE_PRECISION (TREE_TYPE (val1)));
980 	if (wi::neg_p (wval2))
981 	  {
982 	    wval2 = -wval2;
983 	    if (code == RSHIFT_EXPR)
984 	      code = LSHIFT_EXPR;
985 	    else
986 	      code = RSHIFT_EXPR;
987 	  }
988 
989 	if (code == RSHIFT_EXPR)
990 	  /* It's unclear from the C standard whether shifts can overflow.
991 	     The following code ignores overflow; perhaps a C standard
992 	     interpretation ruling is needed.  */
993 	  *res = wi::rshift (wi::to_wide (val1), wval2, sign);
994 	else
995 	  *res = wi::lshift (wi::to_wide (val1), wval2);
996 	break;
997       }
998 
999     case MULT_EXPR:
1000       *res = wi::mul (wi::to_wide (val1),
1001 		      wi::to_wide (val2), sign, &overflow);
1002       break;
1003 
1004     case TRUNC_DIV_EXPR:
1005     case EXACT_DIV_EXPR:
1006       if (val2 == 0)
1007 	return false;
1008       else
1009 	*res = wi::div_trunc (wi::to_wide (val1),
1010 			      wi::to_wide (val2), sign, &overflow);
1011       break;
1012 
1013     case FLOOR_DIV_EXPR:
1014       if (val2 == 0)
1015 	return false;
1016       *res = wi::div_floor (wi::to_wide (val1),
1017 			    wi::to_wide (val2), sign, &overflow);
1018       break;
1019 
1020     case CEIL_DIV_EXPR:
1021       if (val2 == 0)
1022 	return false;
1023       *res = wi::div_ceil (wi::to_wide (val1),
1024 			   wi::to_wide (val2), sign, &overflow);
1025       break;
1026 
1027     case ROUND_DIV_EXPR:
1028       if (val2 == 0)
1029 	return false;
1030       *res = wi::div_round (wi::to_wide (val1),
1031 			    wi::to_wide (val2), sign, &overflow);
1032       break;
1033 
1034     default:
1035       gcc_unreachable ();
1036     }
1037 
1038   if (overflow
1039       && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (val1)))
1040     {
1041       /* If the operation overflowed return -INF or +INF depending
1042 	 on the operation and the combination of signs of the operands.  */
1043       int sgn1 = tree_int_cst_sgn (val1);
1044       int sgn2 = tree_int_cst_sgn (val2);
1045 
1046       /* Notice that we only need to handle the restricted set of
1047 	 operations handled by extract_range_from_binary_expr.
1048 	 Among them, only multiplication, addition and subtraction
1049 	 can yield overflow without overflown operands because we
1050 	 are working with integral types only... except in the
1051 	 case VAL1 = -INF and VAL2 = -1 which overflows to +INF
1052 	 for division too.  */
1053 
1054       /* For multiplication, the sign of the overflow is given
1055 	 by the comparison of the signs of the operands.  */
1056       if ((code == MULT_EXPR && sgn1 == sgn2)
1057           /* For addition, the operands must be of the same sign
1058 	     to yield an overflow.  Its sign is therefore that
1059 	     of one of the operands, for example the first.  */
1060 	  || (code == PLUS_EXPR && sgn1 >= 0)
1061 	  /* For subtraction, operands must be of
1062 	     different signs to yield an overflow.  Its sign is
1063 	     therefore that of the first operand or the opposite of
1064 	     that of the second operand.  A first operand of 0 counts
1065 	     as positive here, for the corner case 0 - (-INF), which
1066 	     overflows, but must yield +INF.  */
1067 	  || (code == MINUS_EXPR && sgn1 >= 0)
1068 	  /* For division, the only case is -INF / -1 = +INF.  */
1069 	  || code == TRUNC_DIV_EXPR
1070 	  || code == FLOOR_DIV_EXPR
1071 	  || code == CEIL_DIV_EXPR
1072 	  || code == EXACT_DIV_EXPR
1073 	  || code == ROUND_DIV_EXPR)
1074 	*res = wi::max_value (TYPE_PRECISION (TREE_TYPE (val1)),
1075 			      TYPE_SIGN (TREE_TYPE (val1)));
1076       else
1077 	*res = wi::min_value (TYPE_PRECISION (TREE_TYPE (val1)),
1078 			      TYPE_SIGN (TREE_TYPE (val1)));
1079       return true;
1080     }
1081 
1082   return !overflow;
1083 }
1084 
1085 
1086 /* For range VR compute two wide_int bitmasks.  In *MAY_BE_NONZERO
1087    bitmask if some bit is unset, it means for all numbers in the range
1088    the bit is 0, otherwise it might be 0 or 1.  In *MUST_BE_NONZERO
1089    bitmask if some bit is set, it means for all numbers in the range
1090    the bit is 1, otherwise it might be 0 or 1.  */
1091 
1092 bool
zero_nonzero_bits_from_vr(const tree expr_type,value_range * vr,wide_int * may_be_nonzero,wide_int * must_be_nonzero)1093 zero_nonzero_bits_from_vr (const tree expr_type,
1094 			   value_range *vr,
1095 			   wide_int *may_be_nonzero,
1096 			   wide_int *must_be_nonzero)
1097 {
1098   *may_be_nonzero = wi::minus_one (TYPE_PRECISION (expr_type));
1099   *must_be_nonzero = wi::zero (TYPE_PRECISION (expr_type));
1100   if (!range_int_cst_p (vr))
1101     return false;
1102 
1103   if (range_int_cst_singleton_p (vr))
1104     {
1105       *may_be_nonzero = wi::to_wide (vr->min);
1106       *must_be_nonzero = *may_be_nonzero;
1107     }
1108   else if (tree_int_cst_sgn (vr->min) >= 0
1109 	   || tree_int_cst_sgn (vr->max) < 0)
1110     {
1111       wide_int xor_mask = wi::to_wide (vr->min) ^ wi::to_wide (vr->max);
1112       *may_be_nonzero = wi::to_wide (vr->min) | wi::to_wide (vr->max);
1113       *must_be_nonzero = wi::to_wide (vr->min) & wi::to_wide (vr->max);
1114       if (xor_mask != 0)
1115 	{
1116 	  wide_int mask = wi::mask (wi::floor_log2 (xor_mask), false,
1117 				    may_be_nonzero->get_precision ());
1118 	  *may_be_nonzero = *may_be_nonzero | mask;
1119 	  *must_be_nonzero = wi::bit_and_not (*must_be_nonzero, mask);
1120 	}
1121     }
1122 
1123   return true;
1124 }
1125 
1126 /* Create two value-ranges in *VR0 and *VR1 from the anti-range *AR
1127    so that *VR0 U *VR1 == *AR.  Returns true if that is possible,
1128    false otherwise.  If *AR can be represented with a single range
1129    *VR1 will be VR_UNDEFINED.  */
1130 
1131 static bool
ranges_from_anti_range(value_range * ar,value_range * vr0,value_range * vr1)1132 ranges_from_anti_range (value_range *ar,
1133 			value_range *vr0, value_range *vr1)
1134 {
1135   tree type = TREE_TYPE (ar->min);
1136 
1137   vr0->type = VR_UNDEFINED;
1138   vr1->type = VR_UNDEFINED;
1139 
1140   if (ar->type != VR_ANTI_RANGE
1141       || TREE_CODE (ar->min) != INTEGER_CST
1142       || TREE_CODE (ar->max) != INTEGER_CST
1143       || !vrp_val_min (type)
1144       || !vrp_val_max (type))
1145     return false;
1146 
1147   if (!vrp_val_is_min (ar->min))
1148     {
1149       vr0->type = VR_RANGE;
1150       vr0->min = vrp_val_min (type);
1151       vr0->max = wide_int_to_tree (type, wi::to_wide (ar->min) - 1);
1152     }
1153   if (!vrp_val_is_max (ar->max))
1154     {
1155       vr1->type = VR_RANGE;
1156       vr1->min = wide_int_to_tree (type, wi::to_wide (ar->max) + 1);
1157       vr1->max = vrp_val_max (type);
1158     }
1159   if (vr0->type == VR_UNDEFINED)
1160     {
1161       *vr0 = *vr1;
1162       vr1->type = VR_UNDEFINED;
1163     }
1164 
1165   return vr0->type != VR_UNDEFINED;
1166 }
1167 
1168 /* Helper to extract a value-range *VR for a multiplicative operation
1169    *VR0 CODE *VR1.  */
1170 
1171 static void
extract_range_from_multiplicative_op_1(value_range * vr,enum tree_code code,value_range * vr0,value_range * vr1)1172 extract_range_from_multiplicative_op_1 (value_range *vr,
1173 					enum tree_code code,
1174 					value_range *vr0, value_range *vr1)
1175 {
1176   enum value_range_type rtype;
1177   wide_int val, min, max;
1178   tree type;
1179 
1180   /* Multiplications, divisions and shifts are a bit tricky to handle,
1181      depending on the mix of signs we have in the two ranges, we
1182      need to operate on different values to get the minimum and
1183      maximum values for the new range.  One approach is to figure
1184      out all the variations of range combinations and do the
1185      operations.
1186 
1187      However, this involves several calls to compare_values and it
1188      is pretty convoluted.  It's simpler to do the 4 operations
1189      (MIN0 OP MIN1, MIN0 OP MAX1, MAX0 OP MIN1 and MAX0 OP MAX0 OP
1190      MAX1) and then figure the smallest and largest values to form
1191      the new range.  */
1192   gcc_assert (code == MULT_EXPR
1193 	      || code == TRUNC_DIV_EXPR
1194 	      || code == FLOOR_DIV_EXPR
1195 	      || code == CEIL_DIV_EXPR
1196 	      || code == EXACT_DIV_EXPR
1197 	      || code == ROUND_DIV_EXPR
1198 	      || code == RSHIFT_EXPR
1199 	      || code == LSHIFT_EXPR);
1200   gcc_assert (vr0->type == VR_RANGE
1201 	      && vr0->type == vr1->type);
1202 
1203   rtype = vr0->type;
1204   type = TREE_TYPE (vr0->min);
1205   signop sgn = TYPE_SIGN (type);
1206 
1207   /* Compute the 4 cross operations and their minimum and maximum value.  */
1208   if (!vrp_int_const_binop (code, vr0->min, vr1->min, &val))
1209     {
1210       set_value_range_to_varying (vr);
1211       return;
1212     }
1213   min = max = val;
1214 
1215   if (vr1->max != vr1->min)
1216     {
1217       if (!vrp_int_const_binop (code, vr0->min, vr1->max, &val))
1218 	{
1219 	  set_value_range_to_varying (vr);
1220 	  return;
1221 	}
1222       if (wi::lt_p (val, min, sgn))
1223 	min = val;
1224       else if (wi::gt_p (val, max, sgn))
1225 	max = val;
1226     }
1227 
1228   if (vr0->max != vr0->min)
1229     {
1230       if (!vrp_int_const_binop (code, vr0->max, vr1->min, &val))
1231 	{
1232 	  set_value_range_to_varying (vr);
1233 	  return;
1234 	}
1235       if (wi::lt_p (val, min, sgn))
1236 	min = val;
1237       else if (wi::gt_p (val, max, sgn))
1238 	max = val;
1239     }
1240 
1241   if (vr0->min != vr0->max && vr1->min != vr1->max)
1242     {
1243       if (!vrp_int_const_binop (code, vr0->max, vr1->max, &val))
1244 	{
1245 	  set_value_range_to_varying (vr);
1246 	  return;
1247 	}
1248       if (wi::lt_p (val, min, sgn))
1249 	min = val;
1250       else if (wi::gt_p (val, max, sgn))
1251 	max = val;
1252     }
1253 
1254   /* If the new range has its limits swapped around (MIN > MAX),
1255      then the operation caused one of them to wrap around, mark
1256      the new range VARYING.  */
1257   if (wi::gt_p (min, max, sgn))
1258     {
1259       set_value_range_to_varying (vr);
1260       return;
1261     }
1262 
1263   /* We punt for [-INF, +INF].
1264      We learn nothing when we have INF on both sides.
1265      Note that we do accept [-INF, -INF] and [+INF, +INF].  */
1266   if (wi::eq_p (min, wi::min_value (TYPE_PRECISION (type), sgn))
1267       && wi::eq_p (max, wi::max_value (TYPE_PRECISION (type), sgn)))
1268     {
1269       set_value_range_to_varying (vr);
1270       return;
1271     }
1272 
1273   set_value_range (vr, rtype,
1274 		   wide_int_to_tree (type, min),
1275 		   wide_int_to_tree (type, max), NULL);
1276 }
1277 
1278 /* Extract range information from a binary operation CODE based on
1279    the ranges of each of its operands *VR0 and *VR1 with resulting
1280    type EXPR_TYPE.  The resulting range is stored in *VR.  */
1281 
1282 void
extract_range_from_binary_expr_1(value_range * vr,enum tree_code code,tree expr_type,value_range * vr0_,value_range * vr1_)1283 extract_range_from_binary_expr_1 (value_range *vr,
1284 				  enum tree_code code, tree expr_type,
1285 				  value_range *vr0_, value_range *vr1_)
1286 {
1287   value_range vr0 = *vr0_, vr1 = *vr1_;
1288   value_range vrtem0 = VR_INITIALIZER, vrtem1 = VR_INITIALIZER;
1289   enum value_range_type type;
1290   tree min = NULL_TREE, max = NULL_TREE;
1291   int cmp;
1292 
1293   if (!INTEGRAL_TYPE_P (expr_type)
1294       && !POINTER_TYPE_P (expr_type))
1295     {
1296       set_value_range_to_varying (vr);
1297       return;
1298     }
1299 
1300   /* Not all binary expressions can be applied to ranges in a
1301      meaningful way.  Handle only arithmetic operations.  */
1302   if (code != PLUS_EXPR
1303       && code != MINUS_EXPR
1304       && code != POINTER_PLUS_EXPR
1305       && code != MULT_EXPR
1306       && code != TRUNC_DIV_EXPR
1307       && code != FLOOR_DIV_EXPR
1308       && code != CEIL_DIV_EXPR
1309       && code != EXACT_DIV_EXPR
1310       && code != ROUND_DIV_EXPR
1311       && code != TRUNC_MOD_EXPR
1312       && code != RSHIFT_EXPR
1313       && code != LSHIFT_EXPR
1314       && code != MIN_EXPR
1315       && code != MAX_EXPR
1316       && code != BIT_AND_EXPR
1317       && code != BIT_IOR_EXPR
1318       && code != BIT_XOR_EXPR)
1319     {
1320       set_value_range_to_varying (vr);
1321       return;
1322     }
1323 
1324   /* If both ranges are UNDEFINED, so is the result.  */
1325   if (vr0.type == VR_UNDEFINED && vr1.type == VR_UNDEFINED)
1326     {
1327       set_value_range_to_undefined (vr);
1328       return;
1329     }
1330   /* If one of the ranges is UNDEFINED drop it to VARYING for the following
1331      code.  At some point we may want to special-case operations that
1332      have UNDEFINED result for all or some value-ranges of the not UNDEFINED
1333      operand.  */
1334   else if (vr0.type == VR_UNDEFINED)
1335     set_value_range_to_varying (&vr0);
1336   else if (vr1.type == VR_UNDEFINED)
1337     set_value_range_to_varying (&vr1);
1338 
1339   /* We get imprecise results from ranges_from_anti_range when
1340      code is EXACT_DIV_EXPR.  We could mask out bits in the resulting
1341      range, but then we also need to hack up vrp_meet.  It's just
1342      easier to special case when vr0 is ~[0,0] for EXACT_DIV_EXPR.  */
1343   if (code == EXACT_DIV_EXPR
1344       && vr0.type == VR_ANTI_RANGE
1345       && vr0.min == vr0.max
1346       && integer_zerop (vr0.min))
1347     {
1348       set_value_range_to_nonnull (vr, expr_type);
1349       return;
1350     }
1351 
1352   /* Now canonicalize anti-ranges to ranges when they are not symbolic
1353      and express ~[] op X as ([]' op X) U ([]'' op X).  */
1354   if (vr0.type == VR_ANTI_RANGE
1355       && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1))
1356     {
1357       extract_range_from_binary_expr_1 (vr, code, expr_type, &vrtem0, vr1_);
1358       if (vrtem1.type != VR_UNDEFINED)
1359 	{
1360 	  value_range vrres = VR_INITIALIZER;
1361 	  extract_range_from_binary_expr_1 (&vrres, code, expr_type,
1362 					    &vrtem1, vr1_);
1363 	  vrp_meet (vr, &vrres);
1364 	}
1365       return;
1366     }
1367   /* Likewise for X op ~[].  */
1368   if (vr1.type == VR_ANTI_RANGE
1369       && ranges_from_anti_range (&vr1, &vrtem0, &vrtem1))
1370     {
1371       extract_range_from_binary_expr_1 (vr, code, expr_type, vr0_, &vrtem0);
1372       if (vrtem1.type != VR_UNDEFINED)
1373 	{
1374 	  value_range vrres = VR_INITIALIZER;
1375 	  extract_range_from_binary_expr_1 (&vrres, code, expr_type,
1376 					    vr0_, &vrtem1);
1377 	  vrp_meet (vr, &vrres);
1378 	}
1379       return;
1380     }
1381 
1382   /* The type of the resulting value range defaults to VR0.TYPE.  */
1383   type = vr0.type;
1384 
1385   /* Refuse to operate on VARYING ranges, ranges of different kinds
1386      and symbolic ranges.  As an exception, we allow BIT_{AND,IOR}
1387      because we may be able to derive a useful range even if one of
1388      the operands is VR_VARYING or symbolic range.  Similarly for
1389      divisions, MIN/MAX and PLUS/MINUS.
1390 
1391      TODO, we may be able to derive anti-ranges in some cases.  */
1392   if (code != BIT_AND_EXPR
1393       && code != BIT_IOR_EXPR
1394       && code != TRUNC_DIV_EXPR
1395       && code != FLOOR_DIV_EXPR
1396       && code != CEIL_DIV_EXPR
1397       && code != EXACT_DIV_EXPR
1398       && code != ROUND_DIV_EXPR
1399       && code != TRUNC_MOD_EXPR
1400       && code != MIN_EXPR
1401       && code != MAX_EXPR
1402       && code != PLUS_EXPR
1403       && code != MINUS_EXPR
1404       && code != RSHIFT_EXPR
1405       && (vr0.type == VR_VARYING
1406 	  || vr1.type == VR_VARYING
1407 	  || vr0.type != vr1.type
1408 	  || symbolic_range_p (&vr0)
1409 	  || symbolic_range_p (&vr1)))
1410     {
1411       set_value_range_to_varying (vr);
1412       return;
1413     }
1414 
1415   /* Now evaluate the expression to determine the new range.  */
1416   if (POINTER_TYPE_P (expr_type))
1417     {
1418       if (code == MIN_EXPR || code == MAX_EXPR)
1419 	{
1420 	  /* For MIN/MAX expressions with pointers, we only care about
1421 	     nullness, if both are non null, then the result is nonnull.
1422 	     If both are null, then the result is null. Otherwise they
1423 	     are varying.  */
1424 	  if (range_is_nonnull (&vr0) && range_is_nonnull (&vr1))
1425 	    set_value_range_to_nonnull (vr, expr_type);
1426 	  else if (range_is_null (&vr0) && range_is_null (&vr1))
1427 	    set_value_range_to_null (vr, expr_type);
1428 	  else
1429 	    set_value_range_to_varying (vr);
1430 	}
1431       else if (code == POINTER_PLUS_EXPR)
1432 	{
1433 	  /* For pointer types, we are really only interested in asserting
1434 	     whether the expression evaluates to non-NULL.  */
1435 	  if (range_is_nonnull (&vr0) || range_is_nonnull (&vr1))
1436 	    set_value_range_to_nonnull (vr, expr_type);
1437 	  else if (range_is_null (&vr0) && range_is_null (&vr1))
1438 	    set_value_range_to_null (vr, expr_type);
1439 	  else
1440 	    set_value_range_to_varying (vr);
1441 	}
1442       else if (code == BIT_AND_EXPR)
1443 	{
1444 	  /* For pointer types, we are really only interested in asserting
1445 	     whether the expression evaluates to non-NULL.  */
1446 	  if (range_is_nonnull (&vr0) && range_is_nonnull (&vr1))
1447 	    set_value_range_to_nonnull (vr, expr_type);
1448 	  else if (range_is_null (&vr0) || range_is_null (&vr1))
1449 	    set_value_range_to_null (vr, expr_type);
1450 	  else
1451 	    set_value_range_to_varying (vr);
1452 	}
1453       else
1454 	set_value_range_to_varying (vr);
1455 
1456       return;
1457     }
1458 
1459   /* For integer ranges, apply the operation to each end of the
1460      range and see what we end up with.  */
1461   if (code == PLUS_EXPR || code == MINUS_EXPR)
1462     {
1463       const bool minus_p = (code == MINUS_EXPR);
1464       tree min_op0 = vr0.min;
1465       tree min_op1 = minus_p ? vr1.max : vr1.min;
1466       tree max_op0 = vr0.max;
1467       tree max_op1 = minus_p ? vr1.min : vr1.max;
1468       tree sym_min_op0 = NULL_TREE;
1469       tree sym_min_op1 = NULL_TREE;
1470       tree sym_max_op0 = NULL_TREE;
1471       tree sym_max_op1 = NULL_TREE;
1472       bool neg_min_op0, neg_min_op1, neg_max_op0, neg_max_op1;
1473 
1474       /* If we have a PLUS or MINUS with two VR_RANGEs, either constant or
1475 	 single-symbolic ranges, try to compute the precise resulting range,
1476 	 but only if we know that this resulting range will also be constant
1477 	 or single-symbolic.  */
1478       if (vr0.type == VR_RANGE && vr1.type == VR_RANGE
1479 	  && (TREE_CODE (min_op0) == INTEGER_CST
1480 	      || (sym_min_op0
1481 		  = get_single_symbol (min_op0, &neg_min_op0, &min_op0)))
1482 	  && (TREE_CODE (min_op1) == INTEGER_CST
1483 	      || (sym_min_op1
1484 		  = get_single_symbol (min_op1, &neg_min_op1, &min_op1)))
1485 	  && (!(sym_min_op0 && sym_min_op1)
1486 	      || (sym_min_op0 == sym_min_op1
1487 		  && neg_min_op0 == (minus_p ? neg_min_op1 : !neg_min_op1)))
1488 	  && (TREE_CODE (max_op0) == INTEGER_CST
1489 	      || (sym_max_op0
1490 		  = get_single_symbol (max_op0, &neg_max_op0, &max_op0)))
1491 	  && (TREE_CODE (max_op1) == INTEGER_CST
1492 	      || (sym_max_op1
1493 		  = get_single_symbol (max_op1, &neg_max_op1, &max_op1)))
1494 	  && (!(sym_max_op0 && sym_max_op1)
1495 	      || (sym_max_op0 == sym_max_op1
1496 		  && neg_max_op0 == (minus_p ? neg_max_op1 : !neg_max_op1))))
1497 	{
1498 	  const signop sgn = TYPE_SIGN (expr_type);
1499 	  const unsigned int prec = TYPE_PRECISION (expr_type);
1500 	  wide_int type_min, type_max, wmin, wmax;
1501 	  int min_ovf = 0;
1502 	  int max_ovf = 0;
1503 
1504 	  /* Get the lower and upper bounds of the type.  */
1505 	  if (TYPE_OVERFLOW_WRAPS (expr_type))
1506 	    {
1507 	      type_min = wi::min_value (prec, sgn);
1508 	      type_max = wi::max_value (prec, sgn);
1509 	    }
1510 	  else
1511 	    {
1512 	      type_min = wi::to_wide (vrp_val_min (expr_type));
1513 	      type_max = wi::to_wide (vrp_val_max (expr_type));
1514 	    }
1515 
1516 	  /* Combine the lower bounds, if any.  */
1517 	  if (min_op0 && min_op1)
1518 	    {
1519 	      if (minus_p)
1520 		{
1521 		  wmin = wi::to_wide (min_op0) - wi::to_wide (min_op1);
1522 
1523 		  /* Check for overflow.  */
1524 		  if (wi::cmp (0, wi::to_wide (min_op1), sgn)
1525 		      != wi::cmp (wmin, wi::to_wide (min_op0), sgn))
1526 		    min_ovf = wi::cmp (wi::to_wide (min_op0),
1527 				       wi::to_wide (min_op1), sgn);
1528 		}
1529 	      else
1530 		{
1531 		  wmin = wi::to_wide (min_op0) + wi::to_wide (min_op1);
1532 
1533 		  /* Check for overflow.  */
1534 		  if (wi::cmp (wi::to_wide (min_op1), 0, sgn)
1535 		      != wi::cmp (wmin, wi::to_wide (min_op0), sgn))
1536 		    min_ovf = wi::cmp (wi::to_wide (min_op0), wmin, sgn);
1537 		}
1538 	    }
1539 	  else if (min_op0)
1540 	    wmin = wi::to_wide (min_op0);
1541 	  else if (min_op1)
1542 	    {
1543 	      if (minus_p)
1544 		{
1545 		  wmin = -wi::to_wide (min_op1);
1546 
1547 		  /* Check for overflow.  */
1548 		  if (sgn == SIGNED
1549 		      && wi::neg_p (wi::to_wide (min_op1))
1550 		      && wi::neg_p (wmin))
1551 		    min_ovf = 1;
1552 		  else if (sgn == UNSIGNED && wi::to_wide (min_op1) != 0)
1553 		    min_ovf = -1;
1554 		}
1555 	      else
1556 		wmin = wi::to_wide (min_op1);
1557 	    }
1558 	  else
1559 	    wmin = wi::shwi (0, prec);
1560 
1561 	  /* Combine the upper bounds, if any.  */
1562 	  if (max_op0 && max_op1)
1563 	    {
1564 	      if (minus_p)
1565 		{
1566 		  wmax = wi::to_wide (max_op0) - wi::to_wide (max_op1);
1567 
1568 		  /* Check for overflow.  */
1569 		  if (wi::cmp (0, wi::to_wide (max_op1), sgn)
1570 		      != wi::cmp (wmax, wi::to_wide (max_op0), sgn))
1571 		    max_ovf = wi::cmp (wi::to_wide (max_op0),
1572 				       wi::to_wide (max_op1), sgn);
1573 		}
1574 	      else
1575 		{
1576 		  wmax = wi::to_wide (max_op0) + wi::to_wide (max_op1);
1577 
1578 		  if (wi::cmp (wi::to_wide (max_op1), 0, sgn)
1579 		      != wi::cmp (wmax, wi::to_wide (max_op0), sgn))
1580 		    max_ovf = wi::cmp (wi::to_wide (max_op0), wmax, sgn);
1581 		}
1582 	    }
1583 	  else if (max_op0)
1584 	    wmax = wi::to_wide (max_op0);
1585 	  else if (max_op1)
1586 	    {
1587 	      if (minus_p)
1588 		{
1589 		  wmax = -wi::to_wide (max_op1);
1590 
1591 		  /* Check for overflow.  */
1592 		  if (sgn == SIGNED
1593 		      && wi::neg_p (wi::to_wide (max_op1))
1594 		      && wi::neg_p (wmax))
1595 		    max_ovf = 1;
1596 		  else if (sgn == UNSIGNED && wi::to_wide (max_op1) != 0)
1597 		    max_ovf = -1;
1598 		}
1599 	      else
1600 		wmax = wi::to_wide (max_op1);
1601 	    }
1602 	  else
1603 	    wmax = wi::shwi (0, prec);
1604 
1605 	  /* Check for type overflow.  */
1606 	  if (min_ovf == 0)
1607 	    {
1608 	      if (wi::cmp (wmin, type_min, sgn) == -1)
1609 		min_ovf = -1;
1610 	      else if (wi::cmp (wmin, type_max, sgn) == 1)
1611 		min_ovf = 1;
1612 	    }
1613 	  if (max_ovf == 0)
1614 	    {
1615 	      if (wi::cmp (wmax, type_min, sgn) == -1)
1616 		max_ovf = -1;
1617 	      else if (wi::cmp (wmax, type_max, sgn) == 1)
1618 		max_ovf = 1;
1619 	    }
1620 
1621 	  /* If the resulting range will be symbolic, we need to eliminate any
1622 	     explicit or implicit overflow introduced in the above computation
1623 	     because compare_values could make an incorrect use of it.  That's
1624 	     why we require one of the ranges to be a singleton.  */
1625 	  if ((sym_min_op0 != sym_min_op1 || sym_max_op0 != sym_max_op1)
1626 	      && (min_ovf || max_ovf
1627 		  || (min_op0 != max_op0 && min_op1 != max_op1)))
1628 	    {
1629 	      set_value_range_to_varying (vr);
1630 	      return;
1631 	    }
1632 
1633 	  if (TYPE_OVERFLOW_WRAPS (expr_type))
1634 	    {
1635 	      /* If overflow wraps, truncate the values and adjust the
1636 		 range kind and bounds appropriately.  */
1637 	      wide_int tmin = wide_int::from (wmin, prec, sgn);
1638 	      wide_int tmax = wide_int::from (wmax, prec, sgn);
1639 	      if (min_ovf == max_ovf)
1640 		{
1641 		  /* No overflow or both overflow or underflow.  The
1642 		     range kind stays VR_RANGE.  */
1643 		  min = wide_int_to_tree (expr_type, tmin);
1644 		  max = wide_int_to_tree (expr_type, tmax);
1645 		}
1646 	      else if ((min_ovf == -1 && max_ovf == 0)
1647 		       || (max_ovf == 1 && min_ovf == 0))
1648 		{
1649 		  /* Min underflow or max overflow.  The range kind
1650 		     changes to VR_ANTI_RANGE.  */
1651 		  bool covers = false;
1652 		  wide_int tem = tmin;
1653 		  type = VR_ANTI_RANGE;
1654 		  tmin = tmax + 1;
1655 		  if (wi::cmp (tmin, tmax, sgn) < 0)
1656 		    covers = true;
1657 		  tmax = tem - 1;
1658 		  if (wi::cmp (tmax, tem, sgn) > 0)
1659 		    covers = true;
1660 		  /* If the anti-range would cover nothing, drop to varying.
1661 		     Likewise if the anti-range bounds are outside of the
1662 		     types values.  */
1663 		  if (covers || wi::cmp (tmin, tmax, sgn) > 0)
1664 		    {
1665 		      set_value_range_to_varying (vr);
1666 		      return;
1667 		    }
1668 		  min = wide_int_to_tree (expr_type, tmin);
1669 		  max = wide_int_to_tree (expr_type, tmax);
1670 		}
1671 	      else
1672 		{
1673 		  /* Other underflow and/or overflow, drop to VR_VARYING.  */
1674 		  set_value_range_to_varying (vr);
1675 		  return;
1676 		}
1677 	    }
1678 	  else
1679 	    {
1680 	      /* If overflow does not wrap, saturate to the types min/max
1681 	         value.  */
1682 	      if (min_ovf == -1)
1683 		min = wide_int_to_tree (expr_type, type_min);
1684 	      else if (min_ovf == 1)
1685 		min = wide_int_to_tree (expr_type, type_max);
1686 	      else
1687 		min = wide_int_to_tree (expr_type, wmin);
1688 
1689 	      if (max_ovf == -1)
1690 		max = wide_int_to_tree (expr_type, type_min);
1691 	      else if (max_ovf == 1)
1692 		max = wide_int_to_tree (expr_type, type_max);
1693 	      else
1694 		max = wide_int_to_tree (expr_type, wmax);
1695 	    }
1696 
1697 	  /* If the result lower bound is constant, we're done;
1698 	     otherwise, build the symbolic lower bound.  */
1699 	  if (sym_min_op0 == sym_min_op1)
1700 	    ;
1701 	  else if (sym_min_op0)
1702 	    min = build_symbolic_expr (expr_type, sym_min_op0,
1703 				       neg_min_op0, min);
1704 	  else if (sym_min_op1)
1705 	    {
1706 	      /* We may not negate if that might introduce
1707 		 undefined overflow.  */
1708 	      if (! minus_p
1709 		  || neg_min_op1
1710 		  || TYPE_OVERFLOW_WRAPS (expr_type))
1711 		min = build_symbolic_expr (expr_type, sym_min_op1,
1712 					   neg_min_op1 ^ minus_p, min);
1713 	      else
1714 		min = NULL_TREE;
1715 	    }
1716 
1717 	  /* Likewise for the upper bound.  */
1718 	  if (sym_max_op0 == sym_max_op1)
1719 	    ;
1720 	  else if (sym_max_op0)
1721 	    max = build_symbolic_expr (expr_type, sym_max_op0,
1722 				       neg_max_op0, max);
1723 	  else if (sym_max_op1)
1724 	    {
1725 	      /* We may not negate if that might introduce
1726 		 undefined overflow.  */
1727 	      if (! minus_p
1728 		  || neg_max_op1
1729 		  || TYPE_OVERFLOW_WRAPS (expr_type))
1730 		max = build_symbolic_expr (expr_type, sym_max_op1,
1731 					   neg_max_op1 ^ minus_p, max);
1732 	      else
1733 		max = NULL_TREE;
1734 	    }
1735 	}
1736       else
1737 	{
1738 	  /* For other cases, for example if we have a PLUS_EXPR with two
1739 	     VR_ANTI_RANGEs, drop to VR_VARYING.  It would take more effort
1740 	     to compute a precise range for such a case.
1741 	     ???  General even mixed range kind operations can be expressed
1742 	     by for example transforming ~[3, 5] + [1, 2] to range-only
1743 	     operations and a union primitive:
1744 	       [-INF, 2] + [1, 2]  U  [5, +INF] + [1, 2]
1745 	           [-INF+1, 4]     U    [6, +INF(OVF)]
1746 	     though usually the union is not exactly representable with
1747 	     a single range or anti-range as the above is
1748 		 [-INF+1, +INF(OVF)] intersected with ~[5, 5]
1749 	     but one could use a scheme similar to equivalences for this. */
1750 	  set_value_range_to_varying (vr);
1751 	  return;
1752 	}
1753     }
1754   else if (code == MIN_EXPR
1755 	   || code == MAX_EXPR)
1756     {
1757       if (vr0.type == VR_RANGE
1758 	  && !symbolic_range_p (&vr0))
1759 	{
1760 	  type = VR_RANGE;
1761 	  if (vr1.type == VR_RANGE
1762 	      && !symbolic_range_p (&vr1))
1763 	    {
1764 	      /* For operations that make the resulting range directly
1765 		 proportional to the original ranges, apply the operation to
1766 		 the same end of each range.  */
1767 	      min = int_const_binop (code, vr0.min, vr1.min);
1768 	      max = int_const_binop (code, vr0.max, vr1.max);
1769 	    }
1770 	  else if (code == MIN_EXPR)
1771 	    {
1772 	      min = vrp_val_min (expr_type);
1773 	      max = vr0.max;
1774 	    }
1775 	  else if (code == MAX_EXPR)
1776 	    {
1777 	      min = vr0.min;
1778 	      max = vrp_val_max (expr_type);
1779 	    }
1780 	}
1781       else if (vr1.type == VR_RANGE
1782 	       && !symbolic_range_p (&vr1))
1783 	{
1784 	  type = VR_RANGE;
1785 	  if (code == MIN_EXPR)
1786 	    {
1787 	      min = vrp_val_min (expr_type);
1788 	      max = vr1.max;
1789 	    }
1790 	  else if (code == MAX_EXPR)
1791 	    {
1792 	      min = vr1.min;
1793 	      max = vrp_val_max (expr_type);
1794 	    }
1795 	}
1796       else
1797 	{
1798 	  set_value_range_to_varying (vr);
1799 	  return;
1800 	}
1801     }
1802   else if (code == MULT_EXPR)
1803     {
1804       /* Fancy code so that with unsigned, [-3,-1]*[-3,-1] does not
1805 	 drop to varying.  This test requires 2*prec bits if both
1806 	 operands are signed and 2*prec + 2 bits if either is not.  */
1807 
1808       signop sign = TYPE_SIGN (expr_type);
1809       unsigned int prec = TYPE_PRECISION (expr_type);
1810 
1811       if (!range_int_cst_p (&vr0)
1812 	  || !range_int_cst_p (&vr1))
1813 	{
1814 	  set_value_range_to_varying (vr);
1815 	  return;
1816 	}
1817 
1818       if (TYPE_OVERFLOW_WRAPS (expr_type))
1819 	{
1820 	  typedef FIXED_WIDE_INT (WIDE_INT_MAX_PRECISION * 2) vrp_int;
1821 	  typedef generic_wide_int
1822              <wi::extended_tree <WIDE_INT_MAX_PRECISION * 2> > vrp_int_cst;
1823 	  vrp_int sizem1 = wi::mask <vrp_int> (prec, false);
1824 	  vrp_int size = sizem1 + 1;
1825 
1826 	  /* Extend the values using the sign of the result to PREC2.
1827 	     From here on out, everthing is just signed math no matter
1828 	     what the input types were.  */
1829           vrp_int min0 = vrp_int_cst (vr0.min);
1830           vrp_int max0 = vrp_int_cst (vr0.max);
1831           vrp_int min1 = vrp_int_cst (vr1.min);
1832           vrp_int max1 = vrp_int_cst (vr1.max);
1833 	  /* Canonicalize the intervals.  */
1834 	  if (sign == UNSIGNED)
1835 	    {
1836 	      if (wi::ltu_p (size, min0 + max0))
1837 		{
1838 		  min0 -= size;
1839 		  max0 -= size;
1840 		}
1841 
1842 	      if (wi::ltu_p (size, min1 + max1))
1843 		{
1844 		  min1 -= size;
1845 		  max1 -= size;
1846 		}
1847 	    }
1848 
1849 	  vrp_int prod0 = min0 * min1;
1850 	  vrp_int prod1 = min0 * max1;
1851 	  vrp_int prod2 = max0 * min1;
1852 	  vrp_int prod3 = max0 * max1;
1853 
1854 	  /* Sort the 4 products so that min is in prod0 and max is in
1855 	     prod3.  */
1856 	  /* min0min1 > max0max1 */
1857 	  if (prod0 > prod3)
1858 	    std::swap (prod0, prod3);
1859 
1860 	  /* min0max1 > max0min1 */
1861 	  if (prod1 > prod2)
1862 	    std::swap (prod1, prod2);
1863 
1864 	  if (prod0 > prod1)
1865 	    std::swap (prod0, prod1);
1866 
1867 	  if (prod2 > prod3)
1868 	    std::swap (prod2, prod3);
1869 
1870 	  /* diff = max - min.  */
1871 	  prod2 = prod3 - prod0;
1872 	  if (wi::geu_p (prod2, sizem1))
1873 	    {
1874 	      /* the range covers all values.  */
1875 	      set_value_range_to_varying (vr);
1876 	      return;
1877 	    }
1878 
1879 	  /* The following should handle the wrapping and selecting
1880 	     VR_ANTI_RANGE for us.  */
1881 	  min = wide_int_to_tree (expr_type, prod0);
1882 	  max = wide_int_to_tree (expr_type, prod3);
1883 	  set_and_canonicalize_value_range (vr, VR_RANGE, min, max, NULL);
1884 	  return;
1885 	}
1886 
1887       /* If we have an unsigned MULT_EXPR with two VR_ANTI_RANGEs,
1888 	 drop to VR_VARYING.  It would take more effort to compute a
1889 	 precise range for such a case.  For example, if we have
1890 	 op0 == 65536 and op1 == 65536 with their ranges both being
1891 	 ~[0,0] on a 32-bit machine, we would have op0 * op1 == 0, so
1892 	 we cannot claim that the product is in ~[0,0].  Note that we
1893 	 are guaranteed to have vr0.type == vr1.type at this
1894 	 point.  */
1895       if (vr0.type == VR_ANTI_RANGE
1896 	  && !TYPE_OVERFLOW_UNDEFINED (expr_type))
1897 	{
1898 	  set_value_range_to_varying (vr);
1899 	  return;
1900 	}
1901 
1902       extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
1903       return;
1904     }
1905   else if (code == RSHIFT_EXPR
1906 	   || code == LSHIFT_EXPR)
1907     {
1908       /* If we have a RSHIFT_EXPR with any shift values outside [0..prec-1],
1909 	 then drop to VR_VARYING.  Outside of this range we get undefined
1910 	 behavior from the shift operation.  We cannot even trust
1911 	 SHIFT_COUNT_TRUNCATED at this stage, because that applies to rtl
1912 	 shifts, and the operation at the tree level may be widened.  */
1913       if (range_int_cst_p (&vr1)
1914 	  && compare_tree_int (vr1.min, 0) >= 0
1915 	  && compare_tree_int (vr1.max, TYPE_PRECISION (expr_type)) == -1)
1916 	{
1917 	  if (code == RSHIFT_EXPR)
1918 	    {
1919 	      /* Even if vr0 is VARYING or otherwise not usable, we can derive
1920 		 useful ranges just from the shift count.  E.g.
1921 		 x >> 63 for signed 64-bit x is always [-1, 0].  */
1922 	      if (vr0.type != VR_RANGE || symbolic_range_p (&vr0))
1923 		{
1924 		  vr0.type = type = VR_RANGE;
1925 		  vr0.min = vrp_val_min (expr_type);
1926 		  vr0.max = vrp_val_max (expr_type);
1927 		}
1928 	      extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
1929 	      return;
1930 	    }
1931 	  /* We can map lshifts by constants to MULT_EXPR handling.  */
1932 	  else if (code == LSHIFT_EXPR
1933 		   && range_int_cst_singleton_p (&vr1))
1934 	    {
1935 	      bool saved_flag_wrapv;
1936 	      value_range vr1p = VR_INITIALIZER;
1937 	      vr1p.type = VR_RANGE;
1938 	      vr1p.min = (wide_int_to_tree
1939 			  (expr_type,
1940 			   wi::set_bit_in_zero (tree_to_shwi (vr1.min),
1941 						TYPE_PRECISION (expr_type))));
1942 	      vr1p.max = vr1p.min;
1943 	      /* We have to use a wrapping multiply though as signed overflow
1944 		 on lshifts is implementation defined in C89.  */
1945 	      saved_flag_wrapv = flag_wrapv;
1946 	      flag_wrapv = 1;
1947 	      extract_range_from_binary_expr_1 (vr, MULT_EXPR, expr_type,
1948 						&vr0, &vr1p);
1949 	      flag_wrapv = saved_flag_wrapv;
1950 	      return;
1951 	    }
1952 	  else if (code == LSHIFT_EXPR
1953 		   && range_int_cst_p (&vr0))
1954 	    {
1955 	      int prec = TYPE_PRECISION (expr_type);
1956 	      int overflow_pos = prec;
1957 	      int bound_shift;
1958 	      wide_int low_bound, high_bound;
1959 	      bool uns = TYPE_UNSIGNED (expr_type);
1960 	      bool in_bounds = false;
1961 
1962 	      if (!uns)
1963 		overflow_pos -= 1;
1964 
1965 	      bound_shift = overflow_pos - tree_to_shwi (vr1.max);
1966 	      /* If bound_shift == HOST_BITS_PER_WIDE_INT, the llshift can
1967 		 overflow.  However, for that to happen, vr1.max needs to be
1968 		 zero, which means vr1 is a singleton range of zero, which
1969 		 means it should be handled by the previous LSHIFT_EXPR
1970 		 if-clause.  */
1971 	      wide_int bound = wi::set_bit_in_zero (bound_shift, prec);
1972 	      wide_int complement = ~(bound - 1);
1973 
1974 	      if (uns)
1975 		{
1976 		  low_bound = bound;
1977 		  high_bound = complement;
1978 		  if (wi::ltu_p (wi::to_wide (vr0.max), low_bound))
1979 		    {
1980 		      /* [5, 6] << [1, 2] == [10, 24].  */
1981 		      /* We're shifting out only zeroes, the value increases
1982 			 monotonically.  */
1983 		      in_bounds = true;
1984 		    }
1985 		  else if (wi::ltu_p (high_bound, wi::to_wide (vr0.min)))
1986 		    {
1987 		      /* [0xffffff00, 0xffffffff] << [1, 2]
1988 		         == [0xfffffc00, 0xfffffffe].  */
1989 		      /* We're shifting out only ones, the value decreases
1990 			 monotonically.  */
1991 		      in_bounds = true;
1992 		    }
1993 		}
1994 	      else
1995 		{
1996 		  /* [-1, 1] << [1, 2] == [-4, 4].  */
1997 		  low_bound = complement;
1998 		  high_bound = bound;
1999 		  if (wi::lts_p (wi::to_wide (vr0.max), high_bound)
2000 		      && wi::lts_p (low_bound, wi::to_wide (vr0.min)))
2001 		    {
2002 		      /* For non-negative numbers, we're shifting out only
2003 			 zeroes, the value increases monotonically.
2004 			 For negative numbers, we're shifting out only ones, the
2005 			 value decreases monotomically.  */
2006 		      in_bounds = true;
2007 		    }
2008 		}
2009 
2010 	      if (in_bounds)
2011 		{
2012 		  extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
2013 		  return;
2014 		}
2015 	    }
2016 	}
2017       set_value_range_to_varying (vr);
2018       return;
2019     }
2020   else if (code == TRUNC_DIV_EXPR
2021 	   || code == FLOOR_DIV_EXPR
2022 	   || code == CEIL_DIV_EXPR
2023 	   || code == EXACT_DIV_EXPR
2024 	   || code == ROUND_DIV_EXPR)
2025     {
2026       if (vr0.type != VR_RANGE || symbolic_range_p (&vr0))
2027 	{
2028 	  /* For division, if op1 has VR_RANGE but op0 does not, something
2029 	     can be deduced just from that range.  Say [min, max] / [4, max]
2030 	     gives [min / 4, max / 4] range.  */
2031 	  if (vr1.type == VR_RANGE
2032 	      && !symbolic_range_p (&vr1)
2033 	      && range_includes_zero_p (vr1.min, vr1.max) == 0)
2034 	    {
2035 	      vr0.type = type = VR_RANGE;
2036 	      vr0.min = vrp_val_min (expr_type);
2037 	      vr0.max = vrp_val_max (expr_type);
2038 	    }
2039 	  else
2040 	    {
2041 	      set_value_range_to_varying (vr);
2042 	      return;
2043 	    }
2044 	}
2045 
2046       /* For divisions, if flag_non_call_exceptions is true, we must
2047 	 not eliminate a division by zero.  */
2048       if (cfun->can_throw_non_call_exceptions
2049 	  && (vr1.type != VR_RANGE
2050 	      || range_includes_zero_p (vr1.min, vr1.max) != 0))
2051 	{
2052 	  set_value_range_to_varying (vr);
2053 	  return;
2054 	}
2055 
2056       /* For divisions, if op0 is VR_RANGE, we can deduce a range
2057 	 even if op1 is VR_VARYING, VR_ANTI_RANGE, symbolic or can
2058 	 include 0.  */
2059       if (vr0.type == VR_RANGE
2060 	  && (vr1.type != VR_RANGE
2061 	      || range_includes_zero_p (vr1.min, vr1.max) != 0))
2062 	{
2063 	  tree zero = build_int_cst (TREE_TYPE (vr0.min), 0);
2064 	  int cmp;
2065 
2066 	  min = NULL_TREE;
2067 	  max = NULL_TREE;
2068 	  if (TYPE_UNSIGNED (expr_type)
2069 	      || value_range_nonnegative_p (&vr1))
2070 	    {
2071 	      /* For unsigned division or when divisor is known
2072 		 to be non-negative, the range has to cover
2073 		 all numbers from 0 to max for positive max
2074 		 and all numbers from min to 0 for negative min.  */
2075 	      cmp = compare_values (vr0.max, zero);
2076 	      if (cmp == -1)
2077 		{
2078 		  /* When vr0.max < 0, vr1.min != 0 and value
2079 		     ranges for dividend and divisor are available.  */
2080 		  if (vr1.type == VR_RANGE
2081 		      && !symbolic_range_p (&vr0)
2082 		      && !symbolic_range_p (&vr1)
2083 		      && compare_values (vr1.min, zero) != 0)
2084 		    max = int_const_binop (code, vr0.max, vr1.min);
2085 		  else
2086 		    max = zero;
2087 		}
2088 	      else if (cmp == 0 || cmp == 1)
2089 		max = vr0.max;
2090 	      else
2091 		type = VR_VARYING;
2092 	      cmp = compare_values (vr0.min, zero);
2093 	      if (cmp == 1)
2094 		{
2095 		  /* For unsigned division when value ranges for dividend
2096 		     and divisor are available.  */
2097 		  if (vr1.type == VR_RANGE
2098 		      && !symbolic_range_p (&vr0)
2099 		      && !symbolic_range_p (&vr1)
2100 		      && compare_values (vr1.max, zero) != 0)
2101 		    min = int_const_binop (code, vr0.min, vr1.max);
2102 		  else
2103 		    min = zero;
2104 		}
2105 	      else if (cmp == 0 || cmp == -1)
2106 		min = vr0.min;
2107 	      else
2108 		type = VR_VARYING;
2109 	    }
2110 	  else
2111 	    {
2112 	      /* Otherwise the range is -max .. max or min .. -min
2113 		 depending on which bound is bigger in absolute value,
2114 		 as the division can change the sign.  */
2115 	      abs_extent_range (vr, vr0.min, vr0.max);
2116 	      return;
2117 	    }
2118 	  if (type == VR_VARYING)
2119 	    {
2120 	      set_value_range_to_varying (vr);
2121 	      return;
2122 	    }
2123 	}
2124       else if (range_int_cst_p (&vr0) && range_int_cst_p (&vr1))
2125 	{
2126 	  extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
2127 	  return;
2128 	}
2129     }
2130   else if (code == TRUNC_MOD_EXPR)
2131     {
2132       if (range_is_null (&vr1))
2133 	{
2134 	  set_value_range_to_undefined (vr);
2135 	  return;
2136 	}
2137       /* ABS (A % B) < ABS (B) and either
2138 	 0 <= A % B <= A or A <= A % B <= 0.  */
2139       type = VR_RANGE;
2140       signop sgn = TYPE_SIGN (expr_type);
2141       unsigned int prec = TYPE_PRECISION (expr_type);
2142       wide_int wmin, wmax, tmp;
2143       if (vr1.type == VR_RANGE && !symbolic_range_p (&vr1))
2144 	{
2145 	  wmax = wi::to_wide (vr1.max) - 1;
2146 	  if (sgn == SIGNED)
2147 	    {
2148 	      tmp = -1 - wi::to_wide (vr1.min);
2149 	      wmax = wi::smax (wmax, tmp);
2150 	    }
2151 	}
2152       else
2153 	{
2154 	  wmax = wi::max_value (prec, sgn);
2155 	  /* X % INT_MIN may be INT_MAX.  */
2156 	  if (sgn == UNSIGNED)
2157 	    wmax = wmax - 1;
2158 	}
2159 
2160       if (sgn == UNSIGNED)
2161 	wmin = wi::zero (prec);
2162       else
2163 	{
2164 	  wmin = -wmax;
2165 	  if (vr0.type == VR_RANGE && TREE_CODE (vr0.min) == INTEGER_CST)
2166 	    {
2167 	      tmp = wi::to_wide (vr0.min);
2168 	      if (wi::gts_p (tmp, 0))
2169 		tmp = wi::zero (prec);
2170 	      wmin = wi::smax (wmin, tmp);
2171 	    }
2172 	}
2173 
2174       if (vr0.type == VR_RANGE && TREE_CODE (vr0.max) == INTEGER_CST)
2175 	{
2176 	  tmp = wi::to_wide (vr0.max);
2177 	  if (sgn == SIGNED && wi::neg_p (tmp))
2178 	    tmp = wi::zero (prec);
2179 	  wmax = wi::min (wmax, tmp, sgn);
2180 	}
2181 
2182       min = wide_int_to_tree (expr_type, wmin);
2183       max = wide_int_to_tree (expr_type, wmax);
2184     }
2185   else if (code == BIT_AND_EXPR || code == BIT_IOR_EXPR || code == BIT_XOR_EXPR)
2186     {
2187       bool int_cst_range0, int_cst_range1;
2188       wide_int may_be_nonzero0, may_be_nonzero1;
2189       wide_int must_be_nonzero0, must_be_nonzero1;
2190 
2191       int_cst_range0 = zero_nonzero_bits_from_vr (expr_type, &vr0,
2192 						  &may_be_nonzero0,
2193 						  &must_be_nonzero0);
2194       int_cst_range1 = zero_nonzero_bits_from_vr (expr_type, &vr1,
2195 						  &may_be_nonzero1,
2196 						  &must_be_nonzero1);
2197 
2198       if (code == BIT_AND_EXPR || code == BIT_IOR_EXPR)
2199 	{
2200 	  value_range *vr0p = NULL, *vr1p = NULL;
2201 	  if (range_int_cst_singleton_p (&vr1))
2202 	    {
2203 	      vr0p = &vr0;
2204 	      vr1p = &vr1;
2205 	    }
2206 	  else if (range_int_cst_singleton_p (&vr0))
2207 	    {
2208 	      vr0p = &vr1;
2209 	      vr1p = &vr0;
2210 	    }
2211 	  /* For op & or | attempt to optimize:
2212 	     [x, y] op z into [x op z, y op z]
2213 	     if z is a constant which (for op | its bitwise not) has n
2214 	     consecutive least significant bits cleared followed by m 1
2215 	     consecutive bits set immediately above it and either
2216 	     m + n == precision, or (x >> (m + n)) == (y >> (m + n)).
2217 	     The least significant n bits of all the values in the range are
2218 	     cleared or set, the m bits above it are preserved and any bits
2219 	     above these are required to be the same for all values in the
2220 	     range.  */
2221 	  if (vr0p && range_int_cst_p (vr0p))
2222 	    {
2223 	      wide_int w = wi::to_wide (vr1p->min);
2224 	      int m = 0, n = 0;
2225 	      if (code == BIT_IOR_EXPR)
2226 		w = ~w;
2227 	      if (wi::eq_p (w, 0))
2228 		n = TYPE_PRECISION (expr_type);
2229 	      else
2230 		{
2231 		  n = wi::ctz (w);
2232 		  w = ~(w | wi::mask (n, false, w.get_precision ()));
2233 		  if (wi::eq_p (w, 0))
2234 		    m = TYPE_PRECISION (expr_type) - n;
2235 		  else
2236 		    m = wi::ctz (w) - n;
2237 		}
2238 	      wide_int mask = wi::mask (m + n, true, w.get_precision ());
2239 	      if ((mask & wi::to_wide (vr0p->min))
2240 		  == (mask & wi::to_wide (vr0p->max)))
2241 		{
2242 		  min = int_const_binop (code, vr0p->min, vr1p->min);
2243 		  max = int_const_binop (code, vr0p->max, vr1p->min);
2244 		}
2245 	    }
2246 	}
2247 
2248       type = VR_RANGE;
2249       if (min && max)
2250 	/* Optimized above already.  */;
2251       else if (code == BIT_AND_EXPR)
2252 	{
2253 	  min = wide_int_to_tree (expr_type,
2254 				  must_be_nonzero0 & must_be_nonzero1);
2255 	  wide_int wmax = may_be_nonzero0 & may_be_nonzero1;
2256 	  /* If both input ranges contain only negative values we can
2257 	     truncate the result range maximum to the minimum of the
2258 	     input range maxima.  */
2259 	  if (int_cst_range0 && int_cst_range1
2260 	      && tree_int_cst_sgn (vr0.max) < 0
2261 	      && tree_int_cst_sgn (vr1.max) < 0)
2262 	    {
2263 	      wmax = wi::min (wmax, wi::to_wide (vr0.max),
2264 			      TYPE_SIGN (expr_type));
2265 	      wmax = wi::min (wmax, wi::to_wide (vr1.max),
2266 			      TYPE_SIGN (expr_type));
2267 	    }
2268 	  /* If either input range contains only non-negative values
2269 	     we can truncate the result range maximum to the respective
2270 	     maximum of the input range.  */
2271 	  if (int_cst_range0 && tree_int_cst_sgn (vr0.min) >= 0)
2272 	    wmax = wi::min (wmax, wi::to_wide (vr0.max),
2273 			    TYPE_SIGN (expr_type));
2274 	  if (int_cst_range1 && tree_int_cst_sgn (vr1.min) >= 0)
2275 	    wmax = wi::min (wmax, wi::to_wide (vr1.max),
2276 			    TYPE_SIGN (expr_type));
2277 	  max = wide_int_to_tree (expr_type, wmax);
2278 	  cmp = compare_values (min, max);
2279 	  /* PR68217: In case of signed & sign-bit-CST should
2280 	     result in [-INF, 0] instead of [-INF, INF].  */
2281 	  if (cmp == -2 || cmp == 1)
2282 	    {
2283 	      wide_int sign_bit
2284 		= wi::set_bit_in_zero (TYPE_PRECISION (expr_type) - 1,
2285 				       TYPE_PRECISION (expr_type));
2286 	      if (!TYPE_UNSIGNED (expr_type)
2287 		  && ((int_cst_range0
2288 		       && value_range_constant_singleton (&vr0)
2289 		       && !wi::cmps (wi::to_wide (vr0.min), sign_bit))
2290 		      || (int_cst_range1
2291 			  && value_range_constant_singleton (&vr1)
2292 			  && !wi::cmps (wi::to_wide (vr1.min), sign_bit))))
2293 		{
2294 		  min = TYPE_MIN_VALUE (expr_type);
2295 		  max = build_int_cst (expr_type, 0);
2296 		}
2297 	    }
2298 	}
2299       else if (code == BIT_IOR_EXPR)
2300 	{
2301 	  max = wide_int_to_tree (expr_type,
2302 				  may_be_nonzero0 | may_be_nonzero1);
2303 	  wide_int wmin = must_be_nonzero0 | must_be_nonzero1;
2304 	  /* If the input ranges contain only positive values we can
2305 	     truncate the minimum of the result range to the maximum
2306 	     of the input range minima.  */
2307 	  if (int_cst_range0 && int_cst_range1
2308 	      && tree_int_cst_sgn (vr0.min) >= 0
2309 	      && tree_int_cst_sgn (vr1.min) >= 0)
2310 	    {
2311 	      wmin = wi::max (wmin, wi::to_wide (vr0.min),
2312 			      TYPE_SIGN (expr_type));
2313 	      wmin = wi::max (wmin, wi::to_wide (vr1.min),
2314 			      TYPE_SIGN (expr_type));
2315 	    }
2316 	  /* If either input range contains only negative values
2317 	     we can truncate the minimum of the result range to the
2318 	     respective minimum range.  */
2319 	  if (int_cst_range0 && tree_int_cst_sgn (vr0.max) < 0)
2320 	    wmin = wi::max (wmin, wi::to_wide (vr0.min),
2321 			    TYPE_SIGN (expr_type));
2322 	  if (int_cst_range1 && tree_int_cst_sgn (vr1.max) < 0)
2323 	    wmin = wi::max (wmin, wi::to_wide (vr1.min),
2324 			    TYPE_SIGN (expr_type));
2325 	  min = wide_int_to_tree (expr_type, wmin);
2326 	}
2327       else if (code == BIT_XOR_EXPR)
2328 	{
2329 	  wide_int result_zero_bits = ((must_be_nonzero0 & must_be_nonzero1)
2330 				       | ~(may_be_nonzero0 | may_be_nonzero1));
2331 	  wide_int result_one_bits
2332 	    = (wi::bit_and_not (must_be_nonzero0, may_be_nonzero1)
2333 	       | wi::bit_and_not (must_be_nonzero1, may_be_nonzero0));
2334 	  max = wide_int_to_tree (expr_type, ~result_zero_bits);
2335 	  min = wide_int_to_tree (expr_type, result_one_bits);
2336 	  /* If the range has all positive or all negative values the
2337 	     result is better than VARYING.  */
2338 	  if (tree_int_cst_sgn (min) < 0
2339 	      || tree_int_cst_sgn (max) >= 0)
2340 	    ;
2341 	  else
2342 	    max = min = NULL_TREE;
2343 	}
2344     }
2345   else
2346     gcc_unreachable ();
2347 
2348   /* If either MIN or MAX overflowed, then set the resulting range to
2349      VARYING.  */
2350   if (min == NULL_TREE
2351       || TREE_OVERFLOW_P (min)
2352       || max == NULL_TREE
2353       || TREE_OVERFLOW_P (max))
2354     {
2355       set_value_range_to_varying (vr);
2356       return;
2357     }
2358 
2359   /* We punt for [-INF, +INF].
2360      We learn nothing when we have INF on both sides.
2361      Note that we do accept [-INF, -INF] and [+INF, +INF].  */
2362   if (vrp_val_is_min (min) && vrp_val_is_max (max))
2363     {
2364       set_value_range_to_varying (vr);
2365       return;
2366     }
2367 
2368   cmp = compare_values (min, max);
2369   if (cmp == -2 || cmp == 1)
2370     {
2371       /* If the new range has its limits swapped around (MIN > MAX),
2372 	 then the operation caused one of them to wrap around, mark
2373 	 the new range VARYING.  */
2374       set_value_range_to_varying (vr);
2375     }
2376   else
2377     set_value_range (vr, type, min, max, NULL);
2378 }
2379 
2380 /* Extract range information from a unary operation CODE based on
2381    the range of its operand *VR0 with type OP0_TYPE with resulting type TYPE.
2382    The resulting range is stored in *VR.  */
2383 
2384 void
extract_range_from_unary_expr(value_range * vr,enum tree_code code,tree type,value_range * vr0_,tree op0_type)2385 extract_range_from_unary_expr (value_range *vr,
2386 			       enum tree_code code, tree type,
2387 			       value_range *vr0_, tree op0_type)
2388 {
2389   value_range vr0 = *vr0_, vrtem0 = VR_INITIALIZER, vrtem1 = VR_INITIALIZER;
2390 
2391   /* VRP only operates on integral and pointer types.  */
2392   if (!(INTEGRAL_TYPE_P (op0_type)
2393 	|| POINTER_TYPE_P (op0_type))
2394       || !(INTEGRAL_TYPE_P (type)
2395 	   || POINTER_TYPE_P (type)))
2396     {
2397       set_value_range_to_varying (vr);
2398       return;
2399     }
2400 
2401   /* If VR0 is UNDEFINED, so is the result.  */
2402   if (vr0.type == VR_UNDEFINED)
2403     {
2404       set_value_range_to_undefined (vr);
2405       return;
2406     }
2407 
2408   /* Handle operations that we express in terms of others.  */
2409   if (code == PAREN_EXPR || code == OBJ_TYPE_REF)
2410     {
2411       /* PAREN_EXPR and OBJ_TYPE_REF are simple copies.  */
2412       copy_value_range (vr, &vr0);
2413       return;
2414     }
2415   else if (code == NEGATE_EXPR)
2416     {
2417       /* -X is simply 0 - X, so re-use existing code that also handles
2418          anti-ranges fine.  */
2419       value_range zero = VR_INITIALIZER;
2420       set_value_range_to_value (&zero, build_int_cst (type, 0), NULL);
2421       extract_range_from_binary_expr_1 (vr, MINUS_EXPR, type, &zero, &vr0);
2422       return;
2423     }
2424   else if (code == BIT_NOT_EXPR)
2425     {
2426       /* ~X is simply -1 - X, so re-use existing code that also handles
2427          anti-ranges fine.  */
2428       value_range minusone = VR_INITIALIZER;
2429       set_value_range_to_value (&minusone, build_int_cst (type, -1), NULL);
2430       extract_range_from_binary_expr_1 (vr, MINUS_EXPR,
2431 					type, &minusone, &vr0);
2432       return;
2433     }
2434 
2435   /* Now canonicalize anti-ranges to ranges when they are not symbolic
2436      and express op ~[]  as (op []') U (op []'').  */
2437   if (vr0.type == VR_ANTI_RANGE
2438       && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1))
2439     {
2440       extract_range_from_unary_expr (vr, code, type, &vrtem0, op0_type);
2441       if (vrtem1.type != VR_UNDEFINED)
2442 	{
2443 	  value_range vrres = VR_INITIALIZER;
2444 	  extract_range_from_unary_expr (&vrres, code, type,
2445 					 &vrtem1, op0_type);
2446 	  vrp_meet (vr, &vrres);
2447 	}
2448       return;
2449     }
2450 
2451   if (CONVERT_EXPR_CODE_P (code))
2452     {
2453       tree inner_type = op0_type;
2454       tree outer_type = type;
2455 
2456       /* If the expression evaluates to a pointer, we are only interested in
2457 	 determining if it evaluates to NULL [0, 0] or non-NULL (~[0, 0]).  */
2458       if (POINTER_TYPE_P (type))
2459 	{
2460 	  if (range_is_nonnull (&vr0))
2461 	    set_value_range_to_nonnull (vr, type);
2462 	  else if (range_is_null (&vr0))
2463 	    set_value_range_to_null (vr, type);
2464 	  else
2465 	    set_value_range_to_varying (vr);
2466 	  return;
2467 	}
2468 
2469       /* If VR0 is varying and we increase the type precision, assume
2470 	 a full range for the following transformation.  */
2471       if (vr0.type == VR_VARYING
2472 	  && INTEGRAL_TYPE_P (inner_type)
2473 	  && TYPE_PRECISION (inner_type) < TYPE_PRECISION (outer_type))
2474 	{
2475 	  vr0.type = VR_RANGE;
2476 	  vr0.min = TYPE_MIN_VALUE (inner_type);
2477 	  vr0.max = TYPE_MAX_VALUE (inner_type);
2478 	}
2479 
2480       /* If VR0 is a constant range or anti-range and the conversion is
2481 	 not truncating we can convert the min and max values and
2482 	 canonicalize the resulting range.  Otherwise we can do the
2483 	 conversion if the size of the range is less than what the
2484 	 precision of the target type can represent and the range is
2485 	 not an anti-range.  */
2486       if ((vr0.type == VR_RANGE
2487 	   || vr0.type == VR_ANTI_RANGE)
2488 	  && TREE_CODE (vr0.min) == INTEGER_CST
2489 	  && TREE_CODE (vr0.max) == INTEGER_CST
2490 	  && (TYPE_PRECISION (outer_type) >= TYPE_PRECISION (inner_type)
2491 	      || (vr0.type == VR_RANGE
2492 		  && integer_zerop (int_const_binop (RSHIFT_EXPR,
2493 		       int_const_binop (MINUS_EXPR, vr0.max, vr0.min),
2494 		         size_int (TYPE_PRECISION (outer_type)))))))
2495 	{
2496 	  tree new_min, new_max;
2497 	  new_min = force_fit_type (outer_type, wi::to_widest (vr0.min),
2498 				    0, false);
2499 	  new_max = force_fit_type (outer_type, wi::to_widest (vr0.max),
2500 				    0, false);
2501 	  set_and_canonicalize_value_range (vr, vr0.type,
2502 					    new_min, new_max, NULL);
2503 	  return;
2504 	}
2505 
2506       set_value_range_to_varying (vr);
2507       return;
2508     }
2509   else if (code == ABS_EXPR)
2510     {
2511       tree min, max;
2512       int cmp;
2513 
2514       /* Pass through vr0 in the easy cases.  */
2515       if (TYPE_UNSIGNED (type)
2516 	  || value_range_nonnegative_p (&vr0))
2517 	{
2518 	  copy_value_range (vr, &vr0);
2519 	  return;
2520 	}
2521 
2522       /* For the remaining varying or symbolic ranges we can't do anything
2523 	 useful.  */
2524       if (vr0.type == VR_VARYING
2525 	  || symbolic_range_p (&vr0))
2526 	{
2527 	  set_value_range_to_varying (vr);
2528 	  return;
2529 	}
2530 
2531       /* -TYPE_MIN_VALUE = TYPE_MIN_VALUE with flag_wrapv so we can't get a
2532          useful range.  */
2533       if (!TYPE_OVERFLOW_UNDEFINED (type)
2534 	  && ((vr0.type == VR_RANGE
2535 	       && vrp_val_is_min (vr0.min))
2536 	      || (vr0.type == VR_ANTI_RANGE
2537 		  && !vrp_val_is_min (vr0.min))))
2538 	{
2539 	  set_value_range_to_varying (vr);
2540 	  return;
2541 	}
2542 
2543       /* ABS_EXPR may flip the range around, if the original range
2544 	 included negative values.  */
2545       if (!vrp_val_is_min (vr0.min))
2546 	min = fold_unary_to_constant (code, type, vr0.min);
2547       else
2548 	min = TYPE_MAX_VALUE (type);
2549 
2550       if (!vrp_val_is_min (vr0.max))
2551 	max = fold_unary_to_constant (code, type, vr0.max);
2552       else
2553 	max = TYPE_MAX_VALUE (type);
2554 
2555       cmp = compare_values (min, max);
2556 
2557       /* If a VR_ANTI_RANGEs contains zero, then we have
2558 	 ~[-INF, min(MIN, MAX)].  */
2559       if (vr0.type == VR_ANTI_RANGE)
2560 	{
2561 	  if (range_includes_zero_p (vr0.min, vr0.max) == 1)
2562 	    {
2563 	      /* Take the lower of the two values.  */
2564 	      if (cmp != 1)
2565 		max = min;
2566 
2567 	      /* Create ~[-INF, min (abs(MIN), abs(MAX))]
2568 	         or ~[-INF + 1, min (abs(MIN), abs(MAX))] when
2569 		 flag_wrapv is set and the original anti-range doesn't include
2570 	         TYPE_MIN_VALUE, remember -TYPE_MIN_VALUE = TYPE_MIN_VALUE.  */
2571 	      if (TYPE_OVERFLOW_WRAPS (type))
2572 		{
2573 		  tree type_min_value = TYPE_MIN_VALUE (type);
2574 
2575 		  min = (vr0.min != type_min_value
2576 			 ? int_const_binop (PLUS_EXPR, type_min_value,
2577 					    build_int_cst (TREE_TYPE (type_min_value), 1))
2578 			 : type_min_value);
2579 		}
2580 	      else
2581 		min = TYPE_MIN_VALUE (type);
2582 	    }
2583 	  else
2584 	    {
2585 	      /* All else has failed, so create the range [0, INF], even for
2586 	         flag_wrapv since TYPE_MIN_VALUE is in the original
2587 	         anti-range.  */
2588 	      vr0.type = VR_RANGE;
2589 	      min = build_int_cst (type, 0);
2590 	      max = TYPE_MAX_VALUE (type);
2591 	    }
2592 	}
2593 
2594       /* If the range contains zero then we know that the minimum value in the
2595          range will be zero.  */
2596       else if (range_includes_zero_p (vr0.min, vr0.max) == 1)
2597 	{
2598 	  if (cmp == 1)
2599 	    max = min;
2600 	  min = build_int_cst (type, 0);
2601 	}
2602       else
2603 	{
2604           /* If the range was reversed, swap MIN and MAX.  */
2605 	  if (cmp == 1)
2606 	    std::swap (min, max);
2607 	}
2608 
2609       cmp = compare_values (min, max);
2610       if (cmp == -2 || cmp == 1)
2611 	{
2612 	  /* If the new range has its limits swapped around (MIN > MAX),
2613 	     then the operation caused one of them to wrap around, mark
2614 	     the new range VARYING.  */
2615 	  set_value_range_to_varying (vr);
2616 	}
2617       else
2618 	set_value_range (vr, vr0.type, min, max, NULL);
2619       return;
2620     }
2621 
2622   /* For unhandled operations fall back to varying.  */
2623   set_value_range_to_varying (vr);
2624   return;
2625 }
2626 
2627 /* Debugging dumps.  */
2628 
2629 void dump_value_range (FILE *, const value_range *);
2630 void debug_value_range (value_range *);
2631 void dump_all_value_ranges (FILE *);
2632 void dump_vr_equiv (FILE *, bitmap);
2633 void debug_vr_equiv (bitmap);
2634 
2635 
2636 /* Dump value range VR to FILE.  */
2637 
2638 void
dump_value_range(FILE * file,const value_range * vr)2639 dump_value_range (FILE *file, const value_range *vr)
2640 {
2641   if (vr == NULL)
2642     fprintf (file, "[]");
2643   else if (vr->type == VR_UNDEFINED)
2644     fprintf (file, "UNDEFINED");
2645   else if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE)
2646     {
2647       tree type = TREE_TYPE (vr->min);
2648 
2649       fprintf (file, "%s[", (vr->type == VR_ANTI_RANGE) ? "~" : "");
2650 
2651       if (INTEGRAL_TYPE_P (type)
2652 	  && !TYPE_UNSIGNED (type)
2653 	  && vrp_val_is_min (vr->min))
2654 	fprintf (file, "-INF");
2655       else
2656 	print_generic_expr (file, vr->min);
2657 
2658       fprintf (file, ", ");
2659 
2660       if (INTEGRAL_TYPE_P (type)
2661 	  && vrp_val_is_max (vr->max))
2662 	fprintf (file, "+INF");
2663       else
2664 	print_generic_expr (file, vr->max);
2665 
2666       fprintf (file, "]");
2667 
2668       if (vr->equiv)
2669 	{
2670 	  bitmap_iterator bi;
2671 	  unsigned i, c = 0;
2672 
2673 	  fprintf (file, "  EQUIVALENCES: { ");
2674 
2675 	  EXECUTE_IF_SET_IN_BITMAP (vr->equiv, 0, i, bi)
2676 	    {
2677 	      print_generic_expr (file, ssa_name (i));
2678 	      fprintf (file, " ");
2679 	      c++;
2680 	    }
2681 
2682 	  fprintf (file, "} (%u elements)", c);
2683 	}
2684     }
2685   else if (vr->type == VR_VARYING)
2686     fprintf (file, "VARYING");
2687   else
2688     fprintf (file, "INVALID RANGE");
2689 }
2690 
2691 
2692 /* Dump value range VR to stderr.  */
2693 
2694 DEBUG_FUNCTION void
debug_value_range(value_range * vr)2695 debug_value_range (value_range *vr)
2696 {
2697   dump_value_range (stderr, vr);
2698   fprintf (stderr, "\n");
2699 }
2700 
2701 
2702 /* Given a COND_EXPR COND of the form 'V OP W', and an SSA name V,
2703    create a new SSA name N and return the assertion assignment
2704    'N = ASSERT_EXPR <V, V OP W>'.  */
2705 
2706 static gimple *
build_assert_expr_for(tree cond,tree v)2707 build_assert_expr_for (tree cond, tree v)
2708 {
2709   tree a;
2710   gassign *assertion;
2711 
2712   gcc_assert (TREE_CODE (v) == SSA_NAME
2713 	      && COMPARISON_CLASS_P (cond));
2714 
2715   a = build2 (ASSERT_EXPR, TREE_TYPE (v), v, cond);
2716   assertion = gimple_build_assign (NULL_TREE, a);
2717 
2718   /* The new ASSERT_EXPR, creates a new SSA name that replaces the
2719      operand of the ASSERT_EXPR.  Create it so the new name and the old one
2720      are registered in the replacement table so that we can fix the SSA web
2721      after adding all the ASSERT_EXPRs.  */
2722   tree new_def = create_new_def_for (v, assertion, NULL);
2723   /* Make sure we preserve abnormalness throughout an ASSERT_EXPR chain
2724      given we have to be able to fully propagate those out to re-create
2725      valid SSA when removing the asserts.  */
2726   if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (v))
2727     SSA_NAME_OCCURS_IN_ABNORMAL_PHI (new_def) = 1;
2728 
2729   return assertion;
2730 }
2731 
2732 
2733 /* Return false if EXPR is a predicate expression involving floating
2734    point values.  */
2735 
2736 static inline bool
fp_predicate(gimple * stmt)2737 fp_predicate (gimple *stmt)
2738 {
2739   GIMPLE_CHECK (stmt, GIMPLE_COND);
2740 
2741   return FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt)));
2742 }
2743 
2744 /* If the range of values taken by OP can be inferred after STMT executes,
2745    return the comparison code (COMP_CODE_P) and value (VAL_P) that
2746    describes the inferred range.  Return true if a range could be
2747    inferred.  */
2748 
2749 bool
infer_value_range(gimple * stmt,tree op,tree_code * comp_code_p,tree * val_p)2750 infer_value_range (gimple *stmt, tree op, tree_code *comp_code_p, tree *val_p)
2751 {
2752   *val_p = NULL_TREE;
2753   *comp_code_p = ERROR_MARK;
2754 
2755   /* Do not attempt to infer anything in names that flow through
2756      abnormal edges.  */
2757   if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op))
2758     return false;
2759 
2760   /* If STMT is the last statement of a basic block with no normal
2761      successors, there is no point inferring anything about any of its
2762      operands.  We would not be able to find a proper insertion point
2763      for the assertion, anyway.  */
2764   if (stmt_ends_bb_p (stmt))
2765     {
2766       edge_iterator ei;
2767       edge e;
2768 
2769       FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs)
2770 	if (!(e->flags & (EDGE_ABNORMAL|EDGE_EH)))
2771 	  break;
2772       if (e == NULL)
2773 	return false;
2774     }
2775 
2776   if (infer_nonnull_range (stmt, op))
2777     {
2778       *val_p = build_int_cst (TREE_TYPE (op), 0);
2779       *comp_code_p = NE_EXPR;
2780       return true;
2781     }
2782 
2783   return false;
2784 }
2785 
2786 
2787 void dump_asserts_for (FILE *, tree);
2788 void debug_asserts_for (tree);
2789 void dump_all_asserts (FILE *);
2790 void debug_all_asserts (void);
2791 
2792 /* Dump all the registered assertions for NAME to FILE.  */
2793 
2794 void
dump_asserts_for(FILE * file,tree name)2795 dump_asserts_for (FILE *file, tree name)
2796 {
2797   assert_locus *loc;
2798 
2799   fprintf (file, "Assertions to be inserted for ");
2800   print_generic_expr (file, name);
2801   fprintf (file, "\n");
2802 
2803   loc = asserts_for[SSA_NAME_VERSION (name)];
2804   while (loc)
2805     {
2806       fprintf (file, "\t");
2807       print_gimple_stmt (file, gsi_stmt (loc->si), 0);
2808       fprintf (file, "\n\tBB #%d", loc->bb->index);
2809       if (loc->e)
2810 	{
2811 	  fprintf (file, "\n\tEDGE %d->%d", loc->e->src->index,
2812 	           loc->e->dest->index);
2813 	  dump_edge_info (file, loc->e, dump_flags, 0);
2814 	}
2815       fprintf (file, "\n\tPREDICATE: ");
2816       print_generic_expr (file, loc->expr);
2817       fprintf (file, " %s ", get_tree_code_name (loc->comp_code));
2818       print_generic_expr (file, loc->val);
2819       fprintf (file, "\n\n");
2820       loc = loc->next;
2821     }
2822 
2823   fprintf (file, "\n");
2824 }
2825 
2826 
2827 /* Dump all the registered assertions for NAME to stderr.  */
2828 
2829 DEBUG_FUNCTION void
debug_asserts_for(tree name)2830 debug_asserts_for (tree name)
2831 {
2832   dump_asserts_for (stderr, name);
2833 }
2834 
2835 
2836 /* Dump all the registered assertions for all the names to FILE.  */
2837 
2838 void
dump_all_asserts(FILE * file)2839 dump_all_asserts (FILE *file)
2840 {
2841   unsigned i;
2842   bitmap_iterator bi;
2843 
2844   fprintf (file, "\nASSERT_EXPRs to be inserted\n\n");
2845   EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
2846     dump_asserts_for (file, ssa_name (i));
2847   fprintf (file, "\n");
2848 }
2849 
2850 
2851 /* Dump all the registered assertions for all the names to stderr.  */
2852 
2853 DEBUG_FUNCTION void
debug_all_asserts(void)2854 debug_all_asserts (void)
2855 {
2856   dump_all_asserts (stderr);
2857 }
2858 
2859 /* Push the assert info for NAME, EXPR, COMP_CODE and VAL to ASSERTS.  */
2860 
2861 static void
add_assert_info(vec<assert_info> & asserts,tree name,tree expr,enum tree_code comp_code,tree val)2862 add_assert_info (vec<assert_info> &asserts,
2863 		 tree name, tree expr, enum tree_code comp_code, tree val)
2864 {
2865   assert_info info;
2866   info.comp_code = comp_code;
2867   info.name = name;
2868   if (TREE_OVERFLOW_P (val))
2869     val = drop_tree_overflow (val);
2870   info.val = val;
2871   info.expr = expr;
2872   asserts.safe_push (info);
2873 }
2874 
2875 /* If NAME doesn't have an ASSERT_EXPR registered for asserting
2876    'EXPR COMP_CODE VAL' at a location that dominates block BB or
2877    E->DEST, then register this location as a possible insertion point
2878    for ASSERT_EXPR <NAME, EXPR COMP_CODE VAL>.
2879 
2880    BB, E and SI provide the exact insertion point for the new
2881    ASSERT_EXPR.  If BB is NULL, then the ASSERT_EXPR is to be inserted
2882    on edge E.  Otherwise, if E is NULL, the ASSERT_EXPR is inserted on
2883    BB.  If SI points to a COND_EXPR or a SWITCH_EXPR statement, then E
2884    must not be NULL.  */
2885 
2886 static void
register_new_assert_for(tree name,tree expr,enum tree_code comp_code,tree val,basic_block bb,edge e,gimple_stmt_iterator si)2887 register_new_assert_for (tree name, tree expr,
2888 			 enum tree_code comp_code,
2889 			 tree val,
2890 			 basic_block bb,
2891 			 edge e,
2892 			 gimple_stmt_iterator si)
2893 {
2894   assert_locus *n, *loc, *last_loc;
2895   basic_block dest_bb;
2896 
2897   gcc_checking_assert (bb == NULL || e == NULL);
2898 
2899   if (e == NULL)
2900     gcc_checking_assert (gimple_code (gsi_stmt (si)) != GIMPLE_COND
2901 			 && gimple_code (gsi_stmt (si)) != GIMPLE_SWITCH);
2902 
2903   /* Never build an assert comparing against an integer constant with
2904      TREE_OVERFLOW set.  This confuses our undefined overflow warning
2905      machinery.  */
2906   if (TREE_OVERFLOW_P (val))
2907     val = drop_tree_overflow (val);
2908 
2909   /* The new assertion A will be inserted at BB or E.  We need to
2910      determine if the new location is dominated by a previously
2911      registered location for A.  If we are doing an edge insertion,
2912      assume that A will be inserted at E->DEST.  Note that this is not
2913      necessarily true.
2914 
2915      If E is a critical edge, it will be split.  But even if E is
2916      split, the new block will dominate the same set of blocks that
2917      E->DEST dominates.
2918 
2919      The reverse, however, is not true, blocks dominated by E->DEST
2920      will not be dominated by the new block created to split E.  So,
2921      if the insertion location is on a critical edge, we will not use
2922      the new location to move another assertion previously registered
2923      at a block dominated by E->DEST.  */
2924   dest_bb = (bb) ? bb : e->dest;
2925 
2926   /* If NAME already has an ASSERT_EXPR registered for COMP_CODE and
2927      VAL at a block dominating DEST_BB, then we don't need to insert a new
2928      one.  Similarly, if the same assertion already exists at a block
2929      dominated by DEST_BB and the new location is not on a critical
2930      edge, then update the existing location for the assertion (i.e.,
2931      move the assertion up in the dominance tree).
2932 
2933      Note, this is implemented as a simple linked list because there
2934      should not be more than a handful of assertions registered per
2935      name.  If this becomes a performance problem, a table hashed by
2936      COMP_CODE and VAL could be implemented.  */
2937   loc = asserts_for[SSA_NAME_VERSION (name)];
2938   last_loc = loc;
2939   while (loc)
2940     {
2941       if (loc->comp_code == comp_code
2942 	  && (loc->val == val
2943 	      || operand_equal_p (loc->val, val, 0))
2944 	  && (loc->expr == expr
2945 	      || operand_equal_p (loc->expr, expr, 0)))
2946 	{
2947 	  /* If E is not a critical edge and DEST_BB
2948 	     dominates the existing location for the assertion, move
2949 	     the assertion up in the dominance tree by updating its
2950 	     location information.  */
2951 	  if ((e == NULL || !EDGE_CRITICAL_P (e))
2952 	      && dominated_by_p (CDI_DOMINATORS, loc->bb, dest_bb))
2953 	    {
2954 	      loc->bb = dest_bb;
2955 	      loc->e = e;
2956 	      loc->si = si;
2957 	      return;
2958 	    }
2959 	}
2960 
2961       /* Update the last node of the list and move to the next one.  */
2962       last_loc = loc;
2963       loc = loc->next;
2964     }
2965 
2966   /* If we didn't find an assertion already registered for
2967      NAME COMP_CODE VAL, add a new one at the end of the list of
2968      assertions associated with NAME.  */
2969   n = XNEW (struct assert_locus);
2970   n->bb = dest_bb;
2971   n->e = e;
2972   n->si = si;
2973   n->comp_code = comp_code;
2974   n->val = val;
2975   n->expr = expr;
2976   n->next = NULL;
2977 
2978   if (last_loc)
2979     last_loc->next = n;
2980   else
2981     asserts_for[SSA_NAME_VERSION (name)] = n;
2982 
2983   bitmap_set_bit (need_assert_for, SSA_NAME_VERSION (name));
2984 }
2985 
2986 /* (COND_OP0 COND_CODE COND_OP1) is a predicate which uses NAME.
2987    Extract a suitable test code and value and store them into *CODE_P and
2988    *VAL_P so the predicate is normalized to NAME *CODE_P *VAL_P.
2989 
2990    If no extraction was possible, return FALSE, otherwise return TRUE.
2991 
2992    If INVERT is true, then we invert the result stored into *CODE_P.  */
2993 
2994 static bool
extract_code_and_val_from_cond_with_ops(tree name,enum tree_code cond_code,tree cond_op0,tree cond_op1,bool invert,enum tree_code * code_p,tree * val_p)2995 extract_code_and_val_from_cond_with_ops (tree name, enum tree_code cond_code,
2996 					 tree cond_op0, tree cond_op1,
2997 					 bool invert, enum tree_code *code_p,
2998 					 tree *val_p)
2999 {
3000   enum tree_code comp_code;
3001   tree val;
3002 
3003   /* Otherwise, we have a comparison of the form NAME COMP VAL
3004      or VAL COMP NAME.  */
3005   if (name == cond_op1)
3006     {
3007       /* If the predicate is of the form VAL COMP NAME, flip
3008 	 COMP around because we need to register NAME as the
3009 	 first operand in the predicate.  */
3010       comp_code = swap_tree_comparison (cond_code);
3011       val = cond_op0;
3012     }
3013   else if (name == cond_op0)
3014     {
3015       /* The comparison is of the form NAME COMP VAL, so the
3016 	 comparison code remains unchanged.  */
3017       comp_code = cond_code;
3018       val = cond_op1;
3019     }
3020   else
3021     gcc_unreachable ();
3022 
3023   /* Invert the comparison code as necessary.  */
3024   if (invert)
3025     comp_code = invert_tree_comparison (comp_code, 0);
3026 
3027   /* VRP only handles integral and pointer types.  */
3028   if (! INTEGRAL_TYPE_P (TREE_TYPE (val))
3029       && ! POINTER_TYPE_P (TREE_TYPE (val)))
3030     return false;
3031 
3032   /* Do not register always-false predicates.
3033      FIXME:  this works around a limitation in fold() when dealing with
3034      enumerations.  Given 'enum { N1, N2 } x;', fold will not
3035      fold 'if (x > N2)' to 'if (0)'.  */
3036   if ((comp_code == GT_EXPR || comp_code == LT_EXPR)
3037       && INTEGRAL_TYPE_P (TREE_TYPE (val)))
3038     {
3039       tree min = TYPE_MIN_VALUE (TREE_TYPE (val));
3040       tree max = TYPE_MAX_VALUE (TREE_TYPE (val));
3041 
3042       if (comp_code == GT_EXPR
3043 	  && (!max
3044 	      || compare_values (val, max) == 0))
3045 	return false;
3046 
3047       if (comp_code == LT_EXPR
3048 	  && (!min
3049 	      || compare_values (val, min) == 0))
3050 	return false;
3051     }
3052   *code_p = comp_code;
3053   *val_p = val;
3054   return true;
3055 }
3056 
3057 /* Find out smallest RES where RES > VAL && (RES & MASK) == RES, if any
3058    (otherwise return VAL).  VAL and MASK must be zero-extended for
3059    precision PREC.  If SGNBIT is non-zero, first xor VAL with SGNBIT
3060    (to transform signed values into unsigned) and at the end xor
3061    SGNBIT back.  */
3062 
3063 static wide_int
masked_increment(const wide_int & val_in,const wide_int & mask,const wide_int & sgnbit,unsigned int prec)3064 masked_increment (const wide_int &val_in, const wide_int &mask,
3065 		  const wide_int &sgnbit, unsigned int prec)
3066 {
3067   wide_int bit = wi::one (prec), res;
3068   unsigned int i;
3069 
3070   wide_int val = val_in ^ sgnbit;
3071   for (i = 0; i < prec; i++, bit += bit)
3072     {
3073       res = mask;
3074       if ((res & bit) == 0)
3075 	continue;
3076       res = bit - 1;
3077       res = wi::bit_and_not (val + bit, res);
3078       res &= mask;
3079       if (wi::gtu_p (res, val))
3080 	return res ^ sgnbit;
3081     }
3082   return val ^ sgnbit;
3083 }
3084 
3085 /* Helper for overflow_comparison_p
3086 
3087    OP0 CODE OP1 is a comparison.  Examine the comparison and potentially
3088    OP1's defining statement to see if it ultimately has the form
3089    OP0 CODE (OP0 PLUS INTEGER_CST)
3090 
3091    If so, return TRUE indicating this is an overflow test and store into
3092    *NEW_CST an updated constant that can be used in a narrowed range test.
3093 
3094    REVERSED indicates if the comparison was originally:
3095 
3096    OP1 CODE' OP0.
3097 
3098    This affects how we build the updated constant.  */
3099 
3100 static bool
overflow_comparison_p_1(enum tree_code code,tree op0,tree op1,bool follow_assert_exprs,bool reversed,tree * new_cst)3101 overflow_comparison_p_1 (enum tree_code code, tree op0, tree op1,
3102 		         bool follow_assert_exprs, bool reversed, tree *new_cst)
3103 {
3104   /* See if this is a relational operation between two SSA_NAMES with
3105      unsigned, overflow wrapping values.  If so, check it more deeply.  */
3106   if ((code == LT_EXPR || code == LE_EXPR
3107        || code == GE_EXPR || code == GT_EXPR)
3108       && TREE_CODE (op0) == SSA_NAME
3109       && TREE_CODE (op1) == SSA_NAME
3110       && INTEGRAL_TYPE_P (TREE_TYPE (op0))
3111       && TYPE_UNSIGNED (TREE_TYPE (op0))
3112       && TYPE_OVERFLOW_WRAPS (TREE_TYPE (op0)))
3113     {
3114       gimple *op1_def = SSA_NAME_DEF_STMT (op1);
3115 
3116       /* If requested, follow any ASSERT_EXPRs backwards for OP1.  */
3117       if (follow_assert_exprs)
3118 	{
3119 	  while (gimple_assign_single_p (op1_def)
3120 		 && TREE_CODE (gimple_assign_rhs1 (op1_def)) == ASSERT_EXPR)
3121 	    {
3122 	      op1 = TREE_OPERAND (gimple_assign_rhs1 (op1_def), 0);
3123 	      if (TREE_CODE (op1) != SSA_NAME)
3124 		break;
3125 	      op1_def = SSA_NAME_DEF_STMT (op1);
3126 	    }
3127 	}
3128 
3129       /* Now look at the defining statement of OP1 to see if it adds
3130 	 or subtracts a nonzero constant from another operand.  */
3131       if (op1_def
3132 	  && is_gimple_assign (op1_def)
3133 	  && gimple_assign_rhs_code (op1_def) == PLUS_EXPR
3134 	  && TREE_CODE (gimple_assign_rhs2 (op1_def)) == INTEGER_CST
3135 	  && !integer_zerop (gimple_assign_rhs2 (op1_def)))
3136 	{
3137 	  tree target = gimple_assign_rhs1 (op1_def);
3138 
3139 	  /* If requested, follow ASSERT_EXPRs backwards for op0 looking
3140 	     for one where TARGET appears on the RHS.  */
3141 	  if (follow_assert_exprs)
3142 	    {
3143 	      /* Now see if that "other operand" is op0, following the chain
3144 		 of ASSERT_EXPRs if necessary.  */
3145 	      gimple *op0_def = SSA_NAME_DEF_STMT (op0);
3146 	      while (op0 != target
3147 		     && gimple_assign_single_p (op0_def)
3148 		     && TREE_CODE (gimple_assign_rhs1 (op0_def)) == ASSERT_EXPR)
3149 		{
3150 		  op0 = TREE_OPERAND (gimple_assign_rhs1 (op0_def), 0);
3151 		  if (TREE_CODE (op0) != SSA_NAME)
3152 		    break;
3153 		  op0_def = SSA_NAME_DEF_STMT (op0);
3154 		}
3155 	    }
3156 
3157 	  /* If we did not find our target SSA_NAME, then this is not
3158 	     an overflow test.  */
3159 	  if (op0 != target)
3160 	    return false;
3161 
3162 	  tree type = TREE_TYPE (op0);
3163 	  wide_int max = wi::max_value (TYPE_PRECISION (type), UNSIGNED);
3164 	  tree inc = gimple_assign_rhs2 (op1_def);
3165 	  if (reversed)
3166 	    *new_cst = wide_int_to_tree (type, max + wi::to_wide (inc));
3167 	  else
3168 	    *new_cst = wide_int_to_tree (type, max - wi::to_wide (inc));
3169 	  return true;
3170 	}
3171     }
3172   return false;
3173 }
3174 
3175 /* OP0 CODE OP1 is a comparison.  Examine the comparison and potentially
3176    OP1's defining statement to see if it ultimately has the form
3177    OP0 CODE (OP0 PLUS INTEGER_CST)
3178 
3179    If so, return TRUE indicating this is an overflow test and store into
3180    *NEW_CST an updated constant that can be used in a narrowed range test.
3181 
3182    These statements are left as-is in the IL to facilitate discovery of
3183    {ADD,SUB}_OVERFLOW sequences later in the optimizer pipeline.  But
3184    the alternate range representation is often useful within VRP.  */
3185 
3186 bool
overflow_comparison_p(tree_code code,tree name,tree val,bool use_equiv_p,tree * new_cst)3187 overflow_comparison_p (tree_code code, tree name, tree val,
3188 		       bool use_equiv_p, tree *new_cst)
3189 {
3190   if (overflow_comparison_p_1 (code, name, val, use_equiv_p, false, new_cst))
3191     return true;
3192   return overflow_comparison_p_1 (swap_tree_comparison (code), val, name,
3193 				  use_equiv_p, true, new_cst);
3194 }
3195 
3196 
3197 /* Try to register an edge assertion for SSA name NAME on edge E for
3198    the condition COND contributing to the conditional jump pointed to by BSI.
3199    Invert the condition COND if INVERT is true.  */
3200 
3201 static void
register_edge_assert_for_2(tree name,edge e,enum tree_code cond_code,tree cond_op0,tree cond_op1,bool invert,vec<assert_info> & asserts)3202 register_edge_assert_for_2 (tree name, edge e,
3203 			    enum tree_code cond_code,
3204 			    tree cond_op0, tree cond_op1, bool invert,
3205 			    vec<assert_info> &asserts)
3206 {
3207   tree val;
3208   enum tree_code comp_code;
3209 
3210   if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
3211 						cond_op0,
3212 						cond_op1,
3213 						invert, &comp_code, &val))
3214     return;
3215 
3216   /* Queue the assert.  */
3217   tree x;
3218   if (overflow_comparison_p (comp_code, name, val, false, &x))
3219     {
3220       enum tree_code new_code = ((comp_code == GT_EXPR || comp_code == GE_EXPR)
3221 				 ? GT_EXPR : LE_EXPR);
3222       add_assert_info (asserts, name, name, new_code, x);
3223     }
3224   add_assert_info (asserts, name, name, comp_code, val);
3225 
3226   /* In the case of NAME <= CST and NAME being defined as
3227      NAME = (unsigned) NAME2 + CST2 we can assert NAME2 >= -CST2
3228      and NAME2 <= CST - CST2.  We can do the same for NAME > CST.
3229      This catches range and anti-range tests.  */
3230   if ((comp_code == LE_EXPR
3231        || comp_code == GT_EXPR)
3232       && TREE_CODE (val) == INTEGER_CST
3233       && TYPE_UNSIGNED (TREE_TYPE (val)))
3234     {
3235       gimple *def_stmt = SSA_NAME_DEF_STMT (name);
3236       tree cst2 = NULL_TREE, name2 = NULL_TREE, name3 = NULL_TREE;
3237 
3238       /* Extract CST2 from the (optional) addition.  */
3239       if (is_gimple_assign (def_stmt)
3240 	  && gimple_assign_rhs_code (def_stmt) == PLUS_EXPR)
3241 	{
3242 	  name2 = gimple_assign_rhs1 (def_stmt);
3243 	  cst2 = gimple_assign_rhs2 (def_stmt);
3244 	  if (TREE_CODE (name2) == SSA_NAME
3245 	      && TREE_CODE (cst2) == INTEGER_CST)
3246 	    def_stmt = SSA_NAME_DEF_STMT (name2);
3247 	}
3248 
3249       /* Extract NAME2 from the (optional) sign-changing cast.  */
3250       if (gimple_assign_cast_p (def_stmt))
3251 	{
3252 	  if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt))
3253 	      && ! TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
3254 	      && (TYPE_PRECISION (gimple_expr_type (def_stmt))
3255 		  == TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))))
3256 	    name3 = gimple_assign_rhs1 (def_stmt);
3257 	}
3258 
3259       /* If name3 is used later, create an ASSERT_EXPR for it.  */
3260       if (name3 != NULL_TREE
3261       	  && TREE_CODE (name3) == SSA_NAME
3262 	  && (cst2 == NULL_TREE
3263 	      || TREE_CODE (cst2) == INTEGER_CST)
3264 	  && INTEGRAL_TYPE_P (TREE_TYPE (name3)))
3265 	{
3266 	  tree tmp;
3267 
3268 	  /* Build an expression for the range test.  */
3269 	  tmp = build1 (NOP_EXPR, TREE_TYPE (name), name3);
3270 	  if (cst2 != NULL_TREE)
3271 	    tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
3272 
3273 	  if (dump_file)
3274 	    {
3275 	      fprintf (dump_file, "Adding assert for ");
3276 	      print_generic_expr (dump_file, name3);
3277 	      fprintf (dump_file, " from ");
3278 	      print_generic_expr (dump_file, tmp);
3279 	      fprintf (dump_file, "\n");
3280 	    }
3281 
3282 	  add_assert_info (asserts, name3, tmp, comp_code, val);
3283 	}
3284 
3285       /* If name2 is used later, create an ASSERT_EXPR for it.  */
3286       if (name2 != NULL_TREE
3287       	  && TREE_CODE (name2) == SSA_NAME
3288 	  && TREE_CODE (cst2) == INTEGER_CST
3289 	  && INTEGRAL_TYPE_P (TREE_TYPE (name2)))
3290 	{
3291 	  tree tmp;
3292 
3293 	  /* Build an expression for the range test.  */
3294 	  tmp = name2;
3295 	  if (TREE_TYPE (name) != TREE_TYPE (name2))
3296 	    tmp = build1 (NOP_EXPR, TREE_TYPE (name), tmp);
3297 	  if (cst2 != NULL_TREE)
3298 	    tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
3299 
3300 	  if (dump_file)
3301 	    {
3302 	      fprintf (dump_file, "Adding assert for ");
3303 	      print_generic_expr (dump_file, name2);
3304 	      fprintf (dump_file, " from ");
3305 	      print_generic_expr (dump_file, tmp);
3306 	      fprintf (dump_file, "\n");
3307 	    }
3308 
3309 	  add_assert_info (asserts, name2, tmp, comp_code, val);
3310 	}
3311     }
3312 
3313   /* In the case of post-in/decrement tests like if (i++) ... and uses
3314      of the in/decremented value on the edge the extra name we want to
3315      assert for is not on the def chain of the name compared.  Instead
3316      it is in the set of use stmts.
3317      Similar cases happen for conversions that were simplified through
3318      fold_{sign_changed,widened}_comparison.  */
3319   if ((comp_code == NE_EXPR
3320        || comp_code == EQ_EXPR)
3321       && TREE_CODE (val) == INTEGER_CST)
3322     {
3323       imm_use_iterator ui;
3324       gimple *use_stmt;
3325       FOR_EACH_IMM_USE_STMT (use_stmt, ui, name)
3326 	{
3327 	  if (!is_gimple_assign (use_stmt))
3328 	    continue;
3329 
3330 	  /* Cut off to use-stmts that are dominating the predecessor.  */
3331 	  if (!dominated_by_p (CDI_DOMINATORS, e->src, gimple_bb (use_stmt)))
3332 	    continue;
3333 
3334 	  tree name2 = gimple_assign_lhs (use_stmt);
3335 	  if (TREE_CODE (name2) != SSA_NAME)
3336 	    continue;
3337 
3338 	  enum tree_code code = gimple_assign_rhs_code (use_stmt);
3339 	  tree cst;
3340 	  if (code == PLUS_EXPR
3341 	      || code == MINUS_EXPR)
3342 	    {
3343 	      cst = gimple_assign_rhs2 (use_stmt);
3344 	      if (TREE_CODE (cst) != INTEGER_CST)
3345 		continue;
3346 	      cst = int_const_binop (code, val, cst);
3347 	    }
3348 	  else if (CONVERT_EXPR_CODE_P (code))
3349 	    {
3350 	      /* For truncating conversions we cannot record
3351 		 an inequality.  */
3352 	      if (comp_code == NE_EXPR
3353 		  && (TYPE_PRECISION (TREE_TYPE (name2))
3354 		      < TYPE_PRECISION (TREE_TYPE (name))))
3355 		continue;
3356 	      cst = fold_convert (TREE_TYPE (name2), val);
3357 	    }
3358 	  else
3359 	    continue;
3360 
3361 	  if (TREE_OVERFLOW_P (cst))
3362 	    cst = drop_tree_overflow (cst);
3363 	  add_assert_info (asserts, name2, name2, comp_code, cst);
3364 	}
3365     }
3366 
3367   if (TREE_CODE_CLASS (comp_code) == tcc_comparison
3368       && TREE_CODE (val) == INTEGER_CST)
3369     {
3370       gimple *def_stmt = SSA_NAME_DEF_STMT (name);
3371       tree name2 = NULL_TREE, names[2], cst2 = NULL_TREE;
3372       tree val2 = NULL_TREE;
3373       unsigned int prec = TYPE_PRECISION (TREE_TYPE (val));
3374       wide_int mask = wi::zero (prec);
3375       unsigned int nprec = prec;
3376       enum tree_code rhs_code = ERROR_MARK;
3377 
3378       if (is_gimple_assign (def_stmt))
3379 	rhs_code = gimple_assign_rhs_code (def_stmt);
3380 
3381       /* In the case of NAME != CST1 where NAME = A +- CST2 we can
3382          assert that A != CST1 -+ CST2.  */
3383       if ((comp_code == EQ_EXPR || comp_code == NE_EXPR)
3384 	  && (rhs_code == PLUS_EXPR || rhs_code == MINUS_EXPR))
3385 	{
3386 	  tree op0 = gimple_assign_rhs1 (def_stmt);
3387 	  tree op1 = gimple_assign_rhs2 (def_stmt);
3388 	  if (TREE_CODE (op0) == SSA_NAME
3389 	      && TREE_CODE (op1) == INTEGER_CST)
3390 	    {
3391 	      enum tree_code reverse_op = (rhs_code == PLUS_EXPR
3392 					   ? MINUS_EXPR : PLUS_EXPR);
3393 	      op1 = int_const_binop (reverse_op, val, op1);
3394 	      if (TREE_OVERFLOW (op1))
3395 		op1 = drop_tree_overflow (op1);
3396 	      add_assert_info (asserts, op0, op0, comp_code, op1);
3397 	    }
3398 	}
3399 
3400       /* Add asserts for NAME cmp CST and NAME being defined
3401 	 as NAME = (int) NAME2.  */
3402       if (!TYPE_UNSIGNED (TREE_TYPE (val))
3403 	  && (comp_code == LE_EXPR || comp_code == LT_EXPR
3404 	      || comp_code == GT_EXPR || comp_code == GE_EXPR)
3405 	  && gimple_assign_cast_p (def_stmt))
3406 	{
3407 	  name2 = gimple_assign_rhs1 (def_stmt);
3408 	  if (CONVERT_EXPR_CODE_P (rhs_code)
3409 	      && INTEGRAL_TYPE_P (TREE_TYPE (name2))
3410 	      && TYPE_UNSIGNED (TREE_TYPE (name2))
3411 	      && prec == TYPE_PRECISION (TREE_TYPE (name2))
3412 	      && (comp_code == LE_EXPR || comp_code == GT_EXPR
3413 		  || !tree_int_cst_equal (val,
3414 					  TYPE_MIN_VALUE (TREE_TYPE (val)))))
3415 	    {
3416 	      tree tmp, cst;
3417 	      enum tree_code new_comp_code = comp_code;
3418 
3419 	      cst = fold_convert (TREE_TYPE (name2),
3420 				  TYPE_MIN_VALUE (TREE_TYPE (val)));
3421 	      /* Build an expression for the range test.  */
3422 	      tmp = build2 (PLUS_EXPR, TREE_TYPE (name2), name2, cst);
3423 	      cst = fold_build2 (PLUS_EXPR, TREE_TYPE (name2), cst,
3424 				 fold_convert (TREE_TYPE (name2), val));
3425 	      if (comp_code == LT_EXPR || comp_code == GE_EXPR)
3426 		{
3427 		  new_comp_code = comp_code == LT_EXPR ? LE_EXPR : GT_EXPR;
3428 		  cst = fold_build2 (MINUS_EXPR, TREE_TYPE (name2), cst,
3429 				     build_int_cst (TREE_TYPE (name2), 1));
3430 		}
3431 
3432 	      if (dump_file)
3433 		{
3434 		  fprintf (dump_file, "Adding assert for ");
3435 		  print_generic_expr (dump_file, name2);
3436 		  fprintf (dump_file, " from ");
3437 		  print_generic_expr (dump_file, tmp);
3438 		  fprintf (dump_file, "\n");
3439 		}
3440 
3441 	      add_assert_info (asserts, name2, tmp, new_comp_code, cst);
3442 	    }
3443 	}
3444 
3445       /* Add asserts for NAME cmp CST and NAME being defined as
3446 	 NAME = NAME2 >> CST2.
3447 
3448 	 Extract CST2 from the right shift.  */
3449       if (rhs_code == RSHIFT_EXPR)
3450 	{
3451 	  name2 = gimple_assign_rhs1 (def_stmt);
3452 	  cst2 = gimple_assign_rhs2 (def_stmt);
3453 	  if (TREE_CODE (name2) == SSA_NAME
3454 	      && tree_fits_uhwi_p (cst2)
3455 	      && INTEGRAL_TYPE_P (TREE_TYPE (name2))
3456 	      && IN_RANGE (tree_to_uhwi (cst2), 1, prec - 1)
3457 	      && type_has_mode_precision_p (TREE_TYPE (val)))
3458 	    {
3459 	      mask = wi::mask (tree_to_uhwi (cst2), false, prec);
3460 	      val2 = fold_binary (LSHIFT_EXPR, TREE_TYPE (val), val, cst2);
3461 	    }
3462 	}
3463       if (val2 != NULL_TREE
3464 	  && TREE_CODE (val2) == INTEGER_CST
3465 	  && simple_cst_equal (fold_build2 (RSHIFT_EXPR,
3466 					    TREE_TYPE (val),
3467 					    val2, cst2), val))
3468 	{
3469 	  enum tree_code new_comp_code = comp_code;
3470 	  tree tmp, new_val;
3471 
3472 	  tmp = name2;
3473 	  if (comp_code == EQ_EXPR || comp_code == NE_EXPR)
3474 	    {
3475 	      if (!TYPE_UNSIGNED (TREE_TYPE (val)))
3476 		{
3477 		  tree type = build_nonstandard_integer_type (prec, 1);
3478 		  tmp = build1 (NOP_EXPR, type, name2);
3479 		  val2 = fold_convert (type, val2);
3480 		}
3481 	      tmp = fold_build2 (MINUS_EXPR, TREE_TYPE (tmp), tmp, val2);
3482 	      new_val = wide_int_to_tree (TREE_TYPE (tmp), mask);
3483 	      new_comp_code = comp_code == EQ_EXPR ? LE_EXPR : GT_EXPR;
3484 	    }
3485 	  else if (comp_code == LT_EXPR || comp_code == GE_EXPR)
3486 	    {
3487 	      wide_int minval
3488 		= wi::min_value (prec, TYPE_SIGN (TREE_TYPE (val)));
3489 	      new_val = val2;
3490 	      if (minval == wi::to_wide (new_val))
3491 		new_val = NULL_TREE;
3492 	    }
3493 	  else
3494 	    {
3495 	      wide_int maxval
3496 		= wi::max_value (prec, TYPE_SIGN (TREE_TYPE (val)));
3497 	      mask |= wi::to_wide (val2);
3498 	      if (wi::eq_p (mask, maxval))
3499 		new_val = NULL_TREE;
3500 	      else
3501 		new_val = wide_int_to_tree (TREE_TYPE (val2), mask);
3502 	    }
3503 
3504 	  if (new_val)
3505 	    {
3506 	      if (dump_file)
3507 		{
3508 		  fprintf (dump_file, "Adding assert for ");
3509 		  print_generic_expr (dump_file, name2);
3510 		  fprintf (dump_file, " from ");
3511 		  print_generic_expr (dump_file, tmp);
3512 		  fprintf (dump_file, "\n");
3513 		}
3514 
3515 	      add_assert_info (asserts, name2, tmp, new_comp_code, new_val);
3516 	    }
3517 	}
3518 
3519       /* Add asserts for NAME cmp CST and NAME being defined as
3520 	 NAME = NAME2 & CST2.
3521 
3522 	 Extract CST2 from the and.
3523 
3524 	 Also handle
3525 	 NAME = (unsigned) NAME2;
3526 	 casts where NAME's type is unsigned and has smaller precision
3527 	 than NAME2's type as if it was NAME = NAME2 & MASK.  */
3528       names[0] = NULL_TREE;
3529       names[1] = NULL_TREE;
3530       cst2 = NULL_TREE;
3531       if (rhs_code == BIT_AND_EXPR
3532 	  || (CONVERT_EXPR_CODE_P (rhs_code)
3533 	      && INTEGRAL_TYPE_P (TREE_TYPE (val))
3534 	      && TYPE_UNSIGNED (TREE_TYPE (val))
3535 	      && TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
3536 		 > prec))
3537 	{
3538 	  name2 = gimple_assign_rhs1 (def_stmt);
3539 	  if (rhs_code == BIT_AND_EXPR)
3540 	    cst2 = gimple_assign_rhs2 (def_stmt);
3541 	  else
3542 	    {
3543 	      cst2 = TYPE_MAX_VALUE (TREE_TYPE (val));
3544 	      nprec = TYPE_PRECISION (TREE_TYPE (name2));
3545 	    }
3546 	  if (TREE_CODE (name2) == SSA_NAME
3547 	      && INTEGRAL_TYPE_P (TREE_TYPE (name2))
3548 	      && TREE_CODE (cst2) == INTEGER_CST
3549 	      && !integer_zerop (cst2)
3550 	      && (nprec > 1
3551 		  || TYPE_UNSIGNED (TREE_TYPE (val))))
3552 	    {
3553 	      gimple *def_stmt2 = SSA_NAME_DEF_STMT (name2);
3554 	      if (gimple_assign_cast_p (def_stmt2))
3555 		{
3556 		  names[1] = gimple_assign_rhs1 (def_stmt2);
3557 		  if (!CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt2))
3558 		      || !INTEGRAL_TYPE_P (TREE_TYPE (names[1]))
3559 		      || (TYPE_PRECISION (TREE_TYPE (name2))
3560 			  != TYPE_PRECISION (TREE_TYPE (names[1]))))
3561 		    names[1] = NULL_TREE;
3562 		}
3563 	      names[0] = name2;
3564 	    }
3565 	}
3566       if (names[0] || names[1])
3567 	{
3568 	  wide_int minv, maxv, valv, cst2v;
3569 	  wide_int tem, sgnbit;
3570 	  bool valid_p = false, valn, cst2n;
3571 	  enum tree_code ccode = comp_code;
3572 
3573 	  valv = wide_int::from (wi::to_wide (val), nprec, UNSIGNED);
3574 	  cst2v = wide_int::from (wi::to_wide (cst2), nprec, UNSIGNED);
3575 	  valn = wi::neg_p (valv, TYPE_SIGN (TREE_TYPE (val)));
3576 	  cst2n = wi::neg_p (cst2v, TYPE_SIGN (TREE_TYPE (val)));
3577 	  /* If CST2 doesn't have most significant bit set,
3578 	     but VAL is negative, we have comparison like
3579 	     if ((x & 0x123) > -4) (always true).  Just give up.  */
3580 	  if (!cst2n && valn)
3581 	    ccode = ERROR_MARK;
3582 	  if (cst2n)
3583 	    sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
3584 	  else
3585 	    sgnbit = wi::zero (nprec);
3586 	  minv = valv & cst2v;
3587 	  switch (ccode)
3588 	    {
3589 	    case EQ_EXPR:
3590 	      /* Minimum unsigned value for equality is VAL & CST2
3591 		 (should be equal to VAL, otherwise we probably should
3592 		 have folded the comparison into false) and
3593 		 maximum unsigned value is VAL | ~CST2.  */
3594 	      maxv = valv | ~cst2v;
3595 	      valid_p = true;
3596 	      break;
3597 
3598 	    case NE_EXPR:
3599 	      tem = valv | ~cst2v;
3600 	      /* If VAL is 0, handle (X & CST2) != 0 as (X & CST2) > 0U.  */
3601 	      if (valv == 0)
3602 		{
3603 		  cst2n = false;
3604 		  sgnbit = wi::zero (nprec);
3605 		  goto gt_expr;
3606 		}
3607 	      /* If (VAL | ~CST2) is all ones, handle it as
3608 		 (X & CST2) < VAL.  */
3609 	      if (tem == -1)
3610 		{
3611 		  cst2n = false;
3612 		  valn = false;
3613 		  sgnbit = wi::zero (nprec);
3614 		  goto lt_expr;
3615 		}
3616 	      if (!cst2n && wi::neg_p (cst2v))
3617 		sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
3618 	      if (sgnbit != 0)
3619 		{
3620 		  if (valv == sgnbit)
3621 		    {
3622 		      cst2n = true;
3623 		      valn = true;
3624 		      goto gt_expr;
3625 		    }
3626 		  if (tem == wi::mask (nprec - 1, false, nprec))
3627 		    {
3628 		      cst2n = true;
3629 		      goto lt_expr;
3630 		    }
3631 		  if (!cst2n)
3632 		    sgnbit = wi::zero (nprec);
3633 		}
3634 	      break;
3635 
3636 	    case GE_EXPR:
3637 	      /* Minimum unsigned value for >= if (VAL & CST2) == VAL
3638 		 is VAL and maximum unsigned value is ~0.  For signed
3639 		 comparison, if CST2 doesn't have most significant bit
3640 		 set, handle it similarly.  If CST2 has MSB set,
3641 		 the minimum is the same, and maximum is ~0U/2.  */
3642 	      if (minv != valv)
3643 		{
3644 		  /* If (VAL & CST2) != VAL, X & CST2 can't be equal to
3645 		     VAL.  */
3646 		  minv = masked_increment (valv, cst2v, sgnbit, nprec);
3647 		  if (minv == valv)
3648 		    break;
3649 		}
3650 	      maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
3651 	      valid_p = true;
3652 	      break;
3653 
3654 	    case GT_EXPR:
3655 	    gt_expr:
3656 	      /* Find out smallest MINV where MINV > VAL
3657 		 && (MINV & CST2) == MINV, if any.  If VAL is signed and
3658 		 CST2 has MSB set, compute it biased by 1 << (nprec - 1).  */
3659 	      minv = masked_increment (valv, cst2v, sgnbit, nprec);
3660 	      if (minv == valv)
3661 		break;
3662 	      maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
3663 	      valid_p = true;
3664 	      break;
3665 
3666 	    case LE_EXPR:
3667 	      /* Minimum unsigned value for <= is 0 and maximum
3668 		 unsigned value is VAL | ~CST2 if (VAL & CST2) == VAL.
3669 		 Otherwise, find smallest VAL2 where VAL2 > VAL
3670 		 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
3671 		 as maximum.
3672 		 For signed comparison, if CST2 doesn't have most
3673 		 significant bit set, handle it similarly.  If CST2 has
3674 		 MSB set, the maximum is the same and minimum is INT_MIN.  */
3675 	      if (minv == valv)
3676 		maxv = valv;
3677 	      else
3678 		{
3679 		  maxv = masked_increment (valv, cst2v, sgnbit, nprec);
3680 		  if (maxv == valv)
3681 		    break;
3682 		  maxv -= 1;
3683 		}
3684 	      maxv |= ~cst2v;
3685 	      minv = sgnbit;
3686 	      valid_p = true;
3687 	      break;
3688 
3689 	    case LT_EXPR:
3690 	    lt_expr:
3691 	      /* Minimum unsigned value for < is 0 and maximum
3692 		 unsigned value is (VAL-1) | ~CST2 if (VAL & CST2) == VAL.
3693 		 Otherwise, find smallest VAL2 where VAL2 > VAL
3694 		 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
3695 		 as maximum.
3696 		 For signed comparison, if CST2 doesn't have most
3697 		 significant bit set, handle it similarly.  If CST2 has
3698 		 MSB set, the maximum is the same and minimum is INT_MIN.  */
3699 	      if (minv == valv)
3700 		{
3701 		  if (valv == sgnbit)
3702 		    break;
3703 		  maxv = valv;
3704 		}
3705 	      else
3706 		{
3707 		  maxv = masked_increment (valv, cst2v, sgnbit, nprec);
3708 		  if (maxv == valv)
3709 		    break;
3710 		}
3711 	      maxv -= 1;
3712 	      maxv |= ~cst2v;
3713 	      minv = sgnbit;
3714 	      valid_p = true;
3715 	      break;
3716 
3717 	    default:
3718 	      break;
3719 	    }
3720 	  if (valid_p
3721 	      && (maxv - minv) != -1)
3722 	    {
3723 	      tree tmp, new_val, type;
3724 	      int i;
3725 
3726 	      for (i = 0; i < 2; i++)
3727 		if (names[i])
3728 		  {
3729 		    wide_int maxv2 = maxv;
3730 		    tmp = names[i];
3731 		    type = TREE_TYPE (names[i]);
3732 		    if (!TYPE_UNSIGNED (type))
3733 		      {
3734 			type = build_nonstandard_integer_type (nprec, 1);
3735 			tmp = build1 (NOP_EXPR, type, names[i]);
3736 		      }
3737 		    if (minv != 0)
3738 		      {
3739 			tmp = build2 (PLUS_EXPR, type, tmp,
3740 				      wide_int_to_tree (type, -minv));
3741 			maxv2 = maxv - minv;
3742 		      }
3743 		    new_val = wide_int_to_tree (type, maxv2);
3744 
3745 		    if (dump_file)
3746 		      {
3747 			fprintf (dump_file, "Adding assert for ");
3748 			print_generic_expr (dump_file, names[i]);
3749 			fprintf (dump_file, " from ");
3750 			print_generic_expr (dump_file, tmp);
3751 			fprintf (dump_file, "\n");
3752 		      }
3753 
3754 		    add_assert_info (asserts, names[i], tmp, LE_EXPR, new_val);
3755 		  }
3756 	    }
3757 	}
3758     }
3759 }
3760 
3761 /* OP is an operand of a truth value expression which is known to have
3762    a particular value.  Register any asserts for OP and for any
3763    operands in OP's defining statement.
3764 
3765    If CODE is EQ_EXPR, then we want to register OP is zero (false),
3766    if CODE is NE_EXPR, then we want to register OP is nonzero (true).   */
3767 
3768 static void
register_edge_assert_for_1(tree op,enum tree_code code,edge e,vec<assert_info> & asserts)3769 register_edge_assert_for_1 (tree op, enum tree_code code,
3770 			    edge e, vec<assert_info> &asserts)
3771 {
3772   gimple *op_def;
3773   tree val;
3774   enum tree_code rhs_code;
3775 
3776   /* We only care about SSA_NAMEs.  */
3777   if (TREE_CODE (op) != SSA_NAME)
3778     return;
3779 
3780   /* We know that OP will have a zero or nonzero value.  */
3781   val = build_int_cst (TREE_TYPE (op), 0);
3782   add_assert_info (asserts, op, op, code, val);
3783 
3784   /* Now look at how OP is set.  If it's set from a comparison,
3785      a truth operation or some bit operations, then we may be able
3786      to register information about the operands of that assignment.  */
3787   op_def = SSA_NAME_DEF_STMT (op);
3788   if (gimple_code (op_def) != GIMPLE_ASSIGN)
3789     return;
3790 
3791   rhs_code = gimple_assign_rhs_code (op_def);
3792 
3793   if (TREE_CODE_CLASS (rhs_code) == tcc_comparison)
3794     {
3795       bool invert = (code == EQ_EXPR ? true : false);
3796       tree op0 = gimple_assign_rhs1 (op_def);
3797       tree op1 = gimple_assign_rhs2 (op_def);
3798 
3799       if (TREE_CODE (op0) == SSA_NAME)
3800         register_edge_assert_for_2 (op0, e, rhs_code, op0, op1, invert, asserts);
3801       if (TREE_CODE (op1) == SSA_NAME)
3802         register_edge_assert_for_2 (op1, e, rhs_code, op0, op1, invert, asserts);
3803     }
3804   else if ((code == NE_EXPR
3805 	    && gimple_assign_rhs_code (op_def) == BIT_AND_EXPR)
3806 	   || (code == EQ_EXPR
3807 	       && gimple_assign_rhs_code (op_def) == BIT_IOR_EXPR))
3808     {
3809       /* Recurse on each operand.  */
3810       tree op0 = gimple_assign_rhs1 (op_def);
3811       tree op1 = gimple_assign_rhs2 (op_def);
3812       if (TREE_CODE (op0) == SSA_NAME
3813 	  && has_single_use (op0))
3814 	register_edge_assert_for_1 (op0, code, e, asserts);
3815       if (TREE_CODE (op1) == SSA_NAME
3816 	  && has_single_use (op1))
3817 	register_edge_assert_for_1 (op1, code, e, asserts);
3818     }
3819   else if (gimple_assign_rhs_code (op_def) == BIT_NOT_EXPR
3820 	   && TYPE_PRECISION (TREE_TYPE (gimple_assign_lhs (op_def))) == 1)
3821     {
3822       /* Recurse, flipping CODE.  */
3823       code = invert_tree_comparison (code, false);
3824       register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), code, e, asserts);
3825     }
3826   else if (gimple_assign_rhs_code (op_def) == SSA_NAME)
3827     {
3828       /* Recurse through the copy.  */
3829       register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), code, e, asserts);
3830     }
3831   else if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (op_def)))
3832     {
3833       /* Recurse through the type conversion, unless it is a narrowing
3834 	 conversion or conversion from non-integral type.  */
3835       tree rhs = gimple_assign_rhs1 (op_def);
3836       if (INTEGRAL_TYPE_P (TREE_TYPE (rhs))
3837 	  && (TYPE_PRECISION (TREE_TYPE (rhs))
3838 	      <= TYPE_PRECISION (TREE_TYPE (op))))
3839 	register_edge_assert_for_1 (rhs, code, e, asserts);
3840     }
3841 }
3842 
3843 /* Check if comparison
3844      NAME COND_OP INTEGER_CST
3845    has a form of
3846      (X & 11...100..0) COND_OP XX...X00...0
3847    Such comparison can yield assertions like
3848      X >= XX...X00...0
3849      X <= XX...X11...1
3850    in case of COND_OP being EQ_EXPR or
3851      X < XX...X00...0
3852      X > XX...X11...1
3853    in case of NE_EXPR.  */
3854 
3855 static bool
is_masked_range_test(tree name,tree valt,enum tree_code cond_code,tree * new_name,tree * low,enum tree_code * low_code,tree * high,enum tree_code * high_code)3856 is_masked_range_test (tree name, tree valt, enum tree_code cond_code,
3857 		      tree *new_name, tree *low, enum tree_code *low_code,
3858 		      tree *high, enum tree_code *high_code)
3859 {
3860   gimple *def_stmt = SSA_NAME_DEF_STMT (name);
3861 
3862   if (!is_gimple_assign (def_stmt)
3863       || gimple_assign_rhs_code (def_stmt) != BIT_AND_EXPR)
3864     return false;
3865 
3866   tree t = gimple_assign_rhs1 (def_stmt);
3867   tree maskt = gimple_assign_rhs2 (def_stmt);
3868   if (TREE_CODE (t) != SSA_NAME || TREE_CODE (maskt) != INTEGER_CST)
3869     return false;
3870 
3871   wi::tree_to_wide_ref mask = wi::to_wide (maskt);
3872   wide_int inv_mask = ~mask;
3873   /* Must have been removed by now so don't bother optimizing.  */
3874   if (mask == 0 || inv_mask == 0)
3875     return false;
3876 
3877   /* Assume VALT is INTEGER_CST.  */
3878   wi::tree_to_wide_ref val = wi::to_wide (valt);
3879 
3880   if ((inv_mask & (inv_mask + 1)) != 0
3881       || (val & mask) != val)
3882     return false;
3883 
3884   bool is_range = cond_code == EQ_EXPR;
3885 
3886   tree type = TREE_TYPE (t);
3887   wide_int min = wi::min_value (type),
3888     max = wi::max_value (type);
3889 
3890   if (is_range)
3891     {
3892       *low_code = val == min ? ERROR_MARK : GE_EXPR;
3893       *high_code = val == max ? ERROR_MARK : LE_EXPR;
3894     }
3895   else
3896     {
3897       /* We can still generate assertion if one of alternatives
3898 	 is known to always be false.  */
3899       if (val == min)
3900 	{
3901 	  *low_code = (enum tree_code) 0;
3902 	  *high_code = GT_EXPR;
3903 	}
3904       else if ((val | inv_mask) == max)
3905 	{
3906 	  *low_code = LT_EXPR;
3907 	  *high_code = (enum tree_code) 0;
3908 	}
3909       else
3910 	return false;
3911     }
3912 
3913   *new_name = t;
3914   *low = wide_int_to_tree (type, val);
3915   *high = wide_int_to_tree (type, val | inv_mask);
3916 
3917   return true;
3918 }
3919 
3920 /* Try to register an edge assertion for SSA name NAME on edge E for
3921    the condition COND contributing to the conditional jump pointed to by
3922    SI.  */
3923 
3924 void
register_edge_assert_for(tree name,edge e,enum tree_code cond_code,tree cond_op0,tree cond_op1,vec<assert_info> & asserts)3925 register_edge_assert_for (tree name, edge e,
3926 			  enum tree_code cond_code, tree cond_op0,
3927 			  tree cond_op1, vec<assert_info> &asserts)
3928 {
3929   tree val;
3930   enum tree_code comp_code;
3931   bool is_else_edge = (e->flags & EDGE_FALSE_VALUE) != 0;
3932 
3933   /* Do not attempt to infer anything in names that flow through
3934      abnormal edges.  */
3935   if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
3936     return;
3937 
3938   if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
3939 						cond_op0, cond_op1,
3940 						is_else_edge,
3941 						&comp_code, &val))
3942     return;
3943 
3944   /* Register ASSERT_EXPRs for name.  */
3945   register_edge_assert_for_2 (name, e, cond_code, cond_op0,
3946 			      cond_op1, is_else_edge, asserts);
3947 
3948 
3949   /* If COND is effectively an equality test of an SSA_NAME against
3950      the value zero or one, then we may be able to assert values
3951      for SSA_NAMEs which flow into COND.  */
3952 
3953   /* In the case of NAME == 1 or NAME != 0, for BIT_AND_EXPR defining
3954      statement of NAME we can assert both operands of the BIT_AND_EXPR
3955      have nonzero value.  */
3956   if (((comp_code == EQ_EXPR && integer_onep (val))
3957        || (comp_code == NE_EXPR && integer_zerop (val))))
3958     {
3959       gimple *def_stmt = SSA_NAME_DEF_STMT (name);
3960 
3961       if (is_gimple_assign (def_stmt)
3962 	  && gimple_assign_rhs_code (def_stmt) == BIT_AND_EXPR)
3963 	{
3964 	  tree op0 = gimple_assign_rhs1 (def_stmt);
3965 	  tree op1 = gimple_assign_rhs2 (def_stmt);
3966 	  register_edge_assert_for_1 (op0, NE_EXPR, e, asserts);
3967 	  register_edge_assert_for_1 (op1, NE_EXPR, e, asserts);
3968 	}
3969     }
3970 
3971   /* In the case of NAME == 0 or NAME != 1, for BIT_IOR_EXPR defining
3972      statement of NAME we can assert both operands of the BIT_IOR_EXPR
3973      have zero value.  */
3974   if (((comp_code == EQ_EXPR && integer_zerop (val))
3975        || (comp_code == NE_EXPR && integer_onep (val))))
3976     {
3977       gimple *def_stmt = SSA_NAME_DEF_STMT (name);
3978 
3979       /* For BIT_IOR_EXPR only if NAME == 0 both operands have
3980 	 necessarily zero value, or if type-precision is one.  */
3981       if (is_gimple_assign (def_stmt)
3982 	  && (gimple_assign_rhs_code (def_stmt) == BIT_IOR_EXPR
3983 	      && (TYPE_PRECISION (TREE_TYPE (name)) == 1
3984 	          || comp_code == EQ_EXPR)))
3985 	{
3986 	  tree op0 = gimple_assign_rhs1 (def_stmt);
3987 	  tree op1 = gimple_assign_rhs2 (def_stmt);
3988 	  register_edge_assert_for_1 (op0, EQ_EXPR, e, asserts);
3989 	  register_edge_assert_for_1 (op1, EQ_EXPR, e, asserts);
3990 	}
3991     }
3992 
3993   /* Sometimes we can infer ranges from (NAME & MASK) == VALUE.  */
3994   if ((comp_code == EQ_EXPR || comp_code == NE_EXPR)
3995       && TREE_CODE (val) == INTEGER_CST)
3996     {
3997       enum tree_code low_code, high_code;
3998       tree low, high;
3999       if (is_masked_range_test (name, val, comp_code, &name, &low,
4000 				&low_code, &high, &high_code))
4001 	{
4002 	  if (low_code != ERROR_MARK)
4003 	    register_edge_assert_for_2 (name, e, low_code, name,
4004 					low, /*invert*/false, asserts);
4005 	  if (high_code != ERROR_MARK)
4006 	    register_edge_assert_for_2 (name, e, high_code, name,
4007 					high, /*invert*/false, asserts);
4008 	}
4009     }
4010 }
4011 
4012 /* Finish found ASSERTS for E and register them at GSI.  */
4013 
4014 static void
finish_register_edge_assert_for(edge e,gimple_stmt_iterator gsi,vec<assert_info> & asserts)4015 finish_register_edge_assert_for (edge e, gimple_stmt_iterator gsi,
4016 				 vec<assert_info> &asserts)
4017 {
4018   for (unsigned i = 0; i < asserts.length (); ++i)
4019     /* Only register an ASSERT_EXPR if NAME was found in the sub-graph
4020        reachable from E.  */
4021     if (live_on_edge (e, asserts[i].name))
4022       register_new_assert_for (asserts[i].name, asserts[i].expr,
4023 			       asserts[i].comp_code, asserts[i].val,
4024 			       NULL, e, gsi);
4025 }
4026 
4027 
4028 
4029 /* Determine whether the outgoing edges of BB should receive an
4030    ASSERT_EXPR for each of the operands of BB's LAST statement.
4031    The last statement of BB must be a COND_EXPR.
4032 
4033    If any of the sub-graphs rooted at BB have an interesting use of
4034    the predicate operands, an assert location node is added to the
4035    list of assertions for the corresponding operands.  */
4036 
4037 static void
find_conditional_asserts(basic_block bb,gcond * last)4038 find_conditional_asserts (basic_block bb, gcond *last)
4039 {
4040   gimple_stmt_iterator bsi;
4041   tree op;
4042   edge_iterator ei;
4043   edge e;
4044   ssa_op_iter iter;
4045 
4046   bsi = gsi_for_stmt (last);
4047 
4048   /* Look for uses of the operands in each of the sub-graphs
4049      rooted at BB.  We need to check each of the outgoing edges
4050      separately, so that we know what kind of ASSERT_EXPR to
4051      insert.  */
4052   FOR_EACH_EDGE (e, ei, bb->succs)
4053     {
4054       if (e->dest == bb)
4055 	continue;
4056 
4057       /* Register the necessary assertions for each operand in the
4058 	 conditional predicate.  */
4059       auto_vec<assert_info, 8> asserts;
4060       FOR_EACH_SSA_TREE_OPERAND (op, last, iter, SSA_OP_USE)
4061 	register_edge_assert_for (op, e,
4062 				  gimple_cond_code (last),
4063 				  gimple_cond_lhs (last),
4064 				  gimple_cond_rhs (last), asserts);
4065       finish_register_edge_assert_for (e, bsi, asserts);
4066     }
4067 }
4068 
4069 struct case_info
4070 {
4071   tree expr;
4072   basic_block bb;
4073 };
4074 
4075 /* Compare two case labels sorting first by the destination bb index
4076    and then by the case value.  */
4077 
4078 static int
compare_case_labels(const void * p1,const void * p2)4079 compare_case_labels (const void *p1, const void *p2)
4080 {
4081   const struct case_info *ci1 = (const struct case_info *) p1;
4082   const struct case_info *ci2 = (const struct case_info *) p2;
4083   int idx1 = ci1->bb->index;
4084   int idx2 = ci2->bb->index;
4085 
4086   if (idx1 < idx2)
4087     return -1;
4088   else if (idx1 == idx2)
4089     {
4090       /* Make sure the default label is first in a group.  */
4091       if (!CASE_LOW (ci1->expr))
4092 	return -1;
4093       else if (!CASE_LOW (ci2->expr))
4094 	return 1;
4095       else
4096 	return tree_int_cst_compare (CASE_LOW (ci1->expr),
4097 				     CASE_LOW (ci2->expr));
4098     }
4099   else
4100     return 1;
4101 }
4102 
4103 /* Determine whether the outgoing edges of BB should receive an
4104    ASSERT_EXPR for each of the operands of BB's LAST statement.
4105    The last statement of BB must be a SWITCH_EXPR.
4106 
4107    If any of the sub-graphs rooted at BB have an interesting use of
4108    the predicate operands, an assert location node is added to the
4109    list of assertions for the corresponding operands.  */
4110 
4111 static void
find_switch_asserts(basic_block bb,gswitch * last)4112 find_switch_asserts (basic_block bb, gswitch *last)
4113 {
4114   gimple_stmt_iterator bsi;
4115   tree op;
4116   edge e;
4117   struct case_info *ci;
4118   size_t n = gimple_switch_num_labels (last);
4119 #if GCC_VERSION >= 4000
4120   unsigned int idx;
4121 #else
4122   /* Work around GCC 3.4 bug (PR 37086).  */
4123   volatile unsigned int idx;
4124 #endif
4125 
4126   bsi = gsi_for_stmt (last);
4127   op = gimple_switch_index (last);
4128   if (TREE_CODE (op) != SSA_NAME)
4129     return;
4130 
4131   /* Build a vector of case labels sorted by destination label.  */
4132   ci = XNEWVEC (struct case_info, n);
4133   for (idx = 0; idx < n; ++idx)
4134     {
4135       ci[idx].expr = gimple_switch_label (last, idx);
4136       ci[idx].bb = label_to_block (CASE_LABEL (ci[idx].expr));
4137     }
4138   edge default_edge = find_edge (bb, ci[0].bb);
4139   qsort (ci, n, sizeof (struct case_info), compare_case_labels);
4140 
4141   for (idx = 0; idx < n; ++idx)
4142     {
4143       tree min, max;
4144       tree cl = ci[idx].expr;
4145       basic_block cbb = ci[idx].bb;
4146 
4147       min = CASE_LOW (cl);
4148       max = CASE_HIGH (cl);
4149 
4150       /* If there are multiple case labels with the same destination
4151 	 we need to combine them to a single value range for the edge.  */
4152       if (idx + 1 < n && cbb == ci[idx + 1].bb)
4153 	{
4154 	  /* Skip labels until the last of the group.  */
4155 	  do {
4156 	    ++idx;
4157 	  } while (idx < n && cbb == ci[idx].bb);
4158 	  --idx;
4159 
4160 	  /* Pick up the maximum of the case label range.  */
4161 	  if (CASE_HIGH (ci[idx].expr))
4162 	    max = CASE_HIGH (ci[idx].expr);
4163 	  else
4164 	    max = CASE_LOW (ci[idx].expr);
4165 	}
4166 
4167       /* Can't extract a useful assertion out of a range that includes the
4168 	 default label.  */
4169       if (min == NULL_TREE)
4170 	continue;
4171 
4172       /* Find the edge to register the assert expr on.  */
4173       e = find_edge (bb, cbb);
4174 
4175       /* Register the necessary assertions for the operand in the
4176 	 SWITCH_EXPR.  */
4177       auto_vec<assert_info, 8> asserts;
4178       register_edge_assert_for (op, e,
4179 				max ? GE_EXPR : EQ_EXPR,
4180 				op, fold_convert (TREE_TYPE (op), min),
4181 				asserts);
4182       if (max)
4183 	register_edge_assert_for (op, e, LE_EXPR, op,
4184 				  fold_convert (TREE_TYPE (op), max),
4185 				  asserts);
4186       finish_register_edge_assert_for (e, bsi, asserts);
4187     }
4188 
4189   XDELETEVEC (ci);
4190 
4191   if (!live_on_edge (default_edge, op))
4192     return;
4193 
4194   /* Now register along the default label assertions that correspond to the
4195      anti-range of each label.  */
4196   int insertion_limit = PARAM_VALUE (PARAM_MAX_VRP_SWITCH_ASSERTIONS);
4197   if (insertion_limit == 0)
4198     return;
4199 
4200   /* We can't do this if the default case shares a label with another case.  */
4201   tree default_cl = gimple_switch_default_label (last);
4202   for (idx = 1; idx < n; idx++)
4203     {
4204       tree min, max;
4205       tree cl = gimple_switch_label (last, idx);
4206       if (CASE_LABEL (cl) == CASE_LABEL (default_cl))
4207 	continue;
4208 
4209       min = CASE_LOW (cl);
4210       max = CASE_HIGH (cl);
4211 
4212       /* Combine contiguous case ranges to reduce the number of assertions
4213 	 to insert.  */
4214       for (idx = idx + 1; idx < n; idx++)
4215 	{
4216 	  tree next_min, next_max;
4217 	  tree next_cl = gimple_switch_label (last, idx);
4218 	  if (CASE_LABEL (next_cl) == CASE_LABEL (default_cl))
4219 	    break;
4220 
4221 	  next_min = CASE_LOW (next_cl);
4222 	  next_max = CASE_HIGH (next_cl);
4223 
4224 	  wide_int difference = (wi::to_wide (next_min)
4225 				 - wi::to_wide (max ? max : min));
4226 	  if (wi::eq_p (difference, 1))
4227 	    max = next_max ? next_max : next_min;
4228 	  else
4229 	    break;
4230 	}
4231       idx--;
4232 
4233       if (max == NULL_TREE)
4234 	{
4235 	  /* Register the assertion OP != MIN.  */
4236 	  auto_vec<assert_info, 8> asserts;
4237 	  min = fold_convert (TREE_TYPE (op), min);
4238 	  register_edge_assert_for (op, default_edge, NE_EXPR, op, min,
4239 				    asserts);
4240 	  finish_register_edge_assert_for (default_edge, bsi, asserts);
4241 	}
4242       else
4243 	{
4244 	  /* Register the assertion (unsigned)OP - MIN > (MAX - MIN),
4245 	     which will give OP the anti-range ~[MIN,MAX].  */
4246 	  tree uop = fold_convert (unsigned_type_for (TREE_TYPE (op)), op);
4247 	  min = fold_convert (TREE_TYPE (uop), min);
4248 	  max = fold_convert (TREE_TYPE (uop), max);
4249 
4250 	  tree lhs = fold_build2 (MINUS_EXPR, TREE_TYPE (uop), uop, min);
4251 	  tree rhs = int_const_binop (MINUS_EXPR, max, min);
4252 	  register_new_assert_for (op, lhs, GT_EXPR, rhs,
4253 				   NULL, default_edge, bsi);
4254 	}
4255 
4256       if (--insertion_limit == 0)
4257 	break;
4258     }
4259 }
4260 
4261 
4262 /* Traverse all the statements in block BB looking for statements that
4263    may generate useful assertions for the SSA names in their operand.
4264    If a statement produces a useful assertion A for name N_i, then the
4265    list of assertions already generated for N_i is scanned to
4266    determine if A is actually needed.
4267 
4268    If N_i already had the assertion A at a location dominating the
4269    current location, then nothing needs to be done.  Otherwise, the
4270    new location for A is recorded instead.
4271 
4272    1- For every statement S in BB, all the variables used by S are
4273       added to bitmap FOUND_IN_SUBGRAPH.
4274 
4275    2- If statement S uses an operand N in a way that exposes a known
4276       value range for N, then if N was not already generated by an
4277       ASSERT_EXPR, create a new assert location for N.  For instance,
4278       if N is a pointer and the statement dereferences it, we can
4279       assume that N is not NULL.
4280 
4281    3- COND_EXPRs are a special case of #2.  We can derive range
4282       information from the predicate but need to insert different
4283       ASSERT_EXPRs for each of the sub-graphs rooted at the
4284       conditional block.  If the last statement of BB is a conditional
4285       expression of the form 'X op Y', then
4286 
4287       a) Remove X and Y from the set FOUND_IN_SUBGRAPH.
4288 
4289       b) If the conditional is the only entry point to the sub-graph
4290 	 corresponding to the THEN_CLAUSE, recurse into it.  On
4291 	 return, if X and/or Y are marked in FOUND_IN_SUBGRAPH, then
4292 	 an ASSERT_EXPR is added for the corresponding variable.
4293 
4294       c) Repeat step (b) on the ELSE_CLAUSE.
4295 
4296       d) Mark X and Y in FOUND_IN_SUBGRAPH.
4297 
4298       For instance,
4299 
4300 	    if (a == 9)
4301 	      b = a;
4302 	    else
4303 	      b = c + 1;
4304 
4305       In this case, an assertion on the THEN clause is useful to
4306       determine that 'a' is always 9 on that edge.  However, an assertion
4307       on the ELSE clause would be unnecessary.
4308 
4309    4- If BB does not end in a conditional expression, then we recurse
4310       into BB's dominator children.
4311 
4312    At the end of the recursive traversal, every SSA name will have a
4313    list of locations where ASSERT_EXPRs should be added.  When a new
4314    location for name N is found, it is registered by calling
4315    register_new_assert_for.  That function keeps track of all the
4316    registered assertions to prevent adding unnecessary assertions.
4317    For instance, if a pointer P_4 is dereferenced more than once in a
4318    dominator tree, only the location dominating all the dereference of
4319    P_4 will receive an ASSERT_EXPR.  */
4320 
4321 static void
find_assert_locations_1(basic_block bb,sbitmap live)4322 find_assert_locations_1 (basic_block bb, sbitmap live)
4323 {
4324   gimple *last;
4325 
4326   last = last_stmt (bb);
4327 
4328   /* If BB's last statement is a conditional statement involving integer
4329      operands, determine if we need to add ASSERT_EXPRs.  */
4330   if (last
4331       && gimple_code (last) == GIMPLE_COND
4332       && !fp_predicate (last)
4333       && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
4334     find_conditional_asserts (bb, as_a <gcond *> (last));
4335 
4336   /* If BB's last statement is a switch statement involving integer
4337      operands, determine if we need to add ASSERT_EXPRs.  */
4338   if (last
4339       && gimple_code (last) == GIMPLE_SWITCH
4340       && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
4341     find_switch_asserts (bb, as_a <gswitch *> (last));
4342 
4343   /* Traverse all the statements in BB marking used names and looking
4344      for statements that may infer assertions for their used operands.  */
4345   for (gimple_stmt_iterator si = gsi_last_bb (bb); !gsi_end_p (si);
4346        gsi_prev (&si))
4347     {
4348       gimple *stmt;
4349       tree op;
4350       ssa_op_iter i;
4351 
4352       stmt = gsi_stmt (si);
4353 
4354       if (is_gimple_debug (stmt))
4355 	continue;
4356 
4357       /* See if we can derive an assertion for any of STMT's operands.  */
4358       FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
4359 	{
4360 	  tree value;
4361 	  enum tree_code comp_code;
4362 
4363 	  /* If op is not live beyond this stmt, do not bother to insert
4364 	     asserts for it.  */
4365 	  if (!bitmap_bit_p (live, SSA_NAME_VERSION (op)))
4366 	    continue;
4367 
4368 	  /* If OP is used in such a way that we can infer a value
4369 	     range for it, and we don't find a previous assertion for
4370 	     it, create a new assertion location node for OP.  */
4371 	  if (infer_value_range (stmt, op, &comp_code, &value))
4372 	    {
4373 	      /* If we are able to infer a nonzero value range for OP,
4374 		 then walk backwards through the use-def chain to see if OP
4375 		 was set via a typecast.
4376 
4377 		 If so, then we can also infer a nonzero value range
4378 		 for the operand of the NOP_EXPR.  */
4379 	      if (comp_code == NE_EXPR && integer_zerop (value))
4380 		{
4381 		  tree t = op;
4382 		  gimple *def_stmt = SSA_NAME_DEF_STMT (t);
4383 
4384 		  while (is_gimple_assign (def_stmt)
4385 			 && CONVERT_EXPR_CODE_P
4386 			     (gimple_assign_rhs_code (def_stmt))
4387 			 && TREE_CODE
4388 			     (gimple_assign_rhs1 (def_stmt)) == SSA_NAME
4389 			 && POINTER_TYPE_P
4390 			     (TREE_TYPE (gimple_assign_rhs1 (def_stmt))))
4391 		    {
4392 		      t = gimple_assign_rhs1 (def_stmt);
4393 		      def_stmt = SSA_NAME_DEF_STMT (t);
4394 
4395 		      /* Note we want to register the assert for the
4396 			 operand of the NOP_EXPR after SI, not after the
4397 			 conversion.  */
4398 		      if (bitmap_bit_p (live, SSA_NAME_VERSION (t)))
4399 			register_new_assert_for (t, t, comp_code, value,
4400 						 bb, NULL, si);
4401 		    }
4402 		}
4403 
4404 	      register_new_assert_for (op, op, comp_code, value, bb, NULL, si);
4405 	    }
4406 	}
4407 
4408       /* Update live.  */
4409       FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
4410 	bitmap_set_bit (live, SSA_NAME_VERSION (op));
4411       FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_DEF)
4412 	bitmap_clear_bit (live, SSA_NAME_VERSION (op));
4413     }
4414 
4415   /* Traverse all PHI nodes in BB, updating live.  */
4416   for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
4417        gsi_next (&si))
4418     {
4419       use_operand_p arg_p;
4420       ssa_op_iter i;
4421       gphi *phi = si.phi ();
4422       tree res = gimple_phi_result (phi);
4423 
4424       if (virtual_operand_p (res))
4425 	continue;
4426 
4427       FOR_EACH_PHI_ARG (arg_p, phi, i, SSA_OP_USE)
4428 	{
4429 	  tree arg = USE_FROM_PTR (arg_p);
4430 	  if (TREE_CODE (arg) == SSA_NAME)
4431 	    bitmap_set_bit (live, SSA_NAME_VERSION (arg));
4432 	}
4433 
4434       bitmap_clear_bit (live, SSA_NAME_VERSION (res));
4435     }
4436 }
4437 
4438 /* Do an RPO walk over the function computing SSA name liveness
4439    on-the-fly and deciding on assert expressions to insert.  */
4440 
4441 static void
find_assert_locations(void)4442 find_assert_locations (void)
4443 {
4444   int *rpo = XNEWVEC (int, last_basic_block_for_fn (cfun));
4445   int *bb_rpo = XNEWVEC (int, last_basic_block_for_fn (cfun));
4446   int *last_rpo = XCNEWVEC (int, last_basic_block_for_fn (cfun));
4447   int rpo_cnt, i;
4448 
4449   live = XCNEWVEC (sbitmap, last_basic_block_for_fn (cfun));
4450   rpo_cnt = pre_and_rev_post_order_compute (NULL, rpo, false);
4451   for (i = 0; i < rpo_cnt; ++i)
4452     bb_rpo[rpo[i]] = i;
4453 
4454   /* Pre-seed loop latch liveness from loop header PHI nodes.  Due to
4455      the order we compute liveness and insert asserts we otherwise
4456      fail to insert asserts into the loop latch.  */
4457   loop_p loop;
4458   FOR_EACH_LOOP (loop, 0)
4459     {
4460       i = loop->latch->index;
4461       unsigned int j = single_succ_edge (loop->latch)->dest_idx;
4462       for (gphi_iterator gsi = gsi_start_phis (loop->header);
4463 	   !gsi_end_p (gsi); gsi_next (&gsi))
4464 	{
4465 	  gphi *phi = gsi.phi ();
4466 	  if (virtual_operand_p (gimple_phi_result (phi)))
4467 	    continue;
4468 	  tree arg = gimple_phi_arg_def (phi, j);
4469 	  if (TREE_CODE (arg) == SSA_NAME)
4470 	    {
4471 	      if (live[i] == NULL)
4472 		{
4473 		  live[i] = sbitmap_alloc (num_ssa_names);
4474 		  bitmap_clear (live[i]);
4475 		}
4476 	      bitmap_set_bit (live[i], SSA_NAME_VERSION (arg));
4477 	    }
4478 	}
4479     }
4480 
4481   for (i = rpo_cnt - 1; i >= 0; --i)
4482     {
4483       basic_block bb = BASIC_BLOCK_FOR_FN (cfun, rpo[i]);
4484       edge e;
4485       edge_iterator ei;
4486 
4487       if (!live[rpo[i]])
4488 	{
4489 	  live[rpo[i]] = sbitmap_alloc (num_ssa_names);
4490 	  bitmap_clear (live[rpo[i]]);
4491 	}
4492 
4493       /* Process BB and update the live information with uses in
4494          this block.  */
4495       find_assert_locations_1 (bb, live[rpo[i]]);
4496 
4497       /* Merge liveness into the predecessor blocks and free it.  */
4498       if (!bitmap_empty_p (live[rpo[i]]))
4499 	{
4500 	  int pred_rpo = i;
4501 	  FOR_EACH_EDGE (e, ei, bb->preds)
4502 	    {
4503 	      int pred = e->src->index;
4504 	      if ((e->flags & EDGE_DFS_BACK) || pred == ENTRY_BLOCK)
4505 		continue;
4506 
4507 	      if (!live[pred])
4508 		{
4509 		  live[pred] = sbitmap_alloc (num_ssa_names);
4510 		  bitmap_clear (live[pred]);
4511 		}
4512 	      bitmap_ior (live[pred], live[pred], live[rpo[i]]);
4513 
4514 	      if (bb_rpo[pred] < pred_rpo)
4515 		pred_rpo = bb_rpo[pred];
4516 	    }
4517 
4518 	  /* Record the RPO number of the last visited block that needs
4519 	     live information from this block.  */
4520 	  last_rpo[rpo[i]] = pred_rpo;
4521 	}
4522       else
4523 	{
4524 	  sbitmap_free (live[rpo[i]]);
4525 	  live[rpo[i]] = NULL;
4526 	}
4527 
4528       /* We can free all successors live bitmaps if all their
4529          predecessors have been visited already.  */
4530       FOR_EACH_EDGE (e, ei, bb->succs)
4531 	if (last_rpo[e->dest->index] == i
4532 	    && live[e->dest->index])
4533 	  {
4534 	    sbitmap_free (live[e->dest->index]);
4535 	    live[e->dest->index] = NULL;
4536 	  }
4537     }
4538 
4539   XDELETEVEC (rpo);
4540   XDELETEVEC (bb_rpo);
4541   XDELETEVEC (last_rpo);
4542   for (i = 0; i < last_basic_block_for_fn (cfun); ++i)
4543     if (live[i])
4544       sbitmap_free (live[i]);
4545   XDELETEVEC (live);
4546 }
4547 
4548 /* Create an ASSERT_EXPR for NAME and insert it in the location
4549    indicated by LOC.  Return true if we made any edge insertions.  */
4550 
4551 static bool
process_assert_insertions_for(tree name,assert_locus * loc)4552 process_assert_insertions_for (tree name, assert_locus *loc)
4553 {
4554   /* Build the comparison expression NAME_i COMP_CODE VAL.  */
4555   gimple *stmt;
4556   tree cond;
4557   gimple *assert_stmt;
4558   edge_iterator ei;
4559   edge e;
4560 
4561   /* If we have X <=> X do not insert an assert expr for that.  */
4562   if (loc->expr == loc->val)
4563     return false;
4564 
4565   cond = build2 (loc->comp_code, boolean_type_node, loc->expr, loc->val);
4566   assert_stmt = build_assert_expr_for (cond, name);
4567   if (loc->e)
4568     {
4569       /* We have been asked to insert the assertion on an edge.  This
4570 	 is used only by COND_EXPR and SWITCH_EXPR assertions.  */
4571       gcc_checking_assert (gimple_code (gsi_stmt (loc->si)) == GIMPLE_COND
4572 			   || (gimple_code (gsi_stmt (loc->si))
4573 			       == GIMPLE_SWITCH));
4574 
4575       gsi_insert_on_edge (loc->e, assert_stmt);
4576       return true;
4577     }
4578 
4579   /* If the stmt iterator points at the end then this is an insertion
4580      at the beginning of a block.  */
4581   if (gsi_end_p (loc->si))
4582     {
4583       gimple_stmt_iterator si = gsi_after_labels (loc->bb);
4584       gsi_insert_before (&si, assert_stmt, GSI_SAME_STMT);
4585       return false;
4586 
4587     }
4588   /* Otherwise, we can insert right after LOC->SI iff the
4589      statement must not be the last statement in the block.  */
4590   stmt = gsi_stmt (loc->si);
4591   if (!stmt_ends_bb_p (stmt))
4592     {
4593       gsi_insert_after (&loc->si, assert_stmt, GSI_SAME_STMT);
4594       return false;
4595     }
4596 
4597   /* If STMT must be the last statement in BB, we can only insert new
4598      assertions on the non-abnormal edge out of BB.  Note that since
4599      STMT is not control flow, there may only be one non-abnormal/eh edge
4600      out of BB.  */
4601   FOR_EACH_EDGE (e, ei, loc->bb->succs)
4602     if (!(e->flags & (EDGE_ABNORMAL|EDGE_EH)))
4603       {
4604 	gsi_insert_on_edge (e, assert_stmt);
4605 	return true;
4606       }
4607 
4608   gcc_unreachable ();
4609 }
4610 
4611 /* Qsort helper for sorting assert locations.  If stable is true, don't
4612    use iterative_hash_expr because it can be unstable for -fcompare-debug,
4613    on the other side some pointers might be NULL.  */
4614 
4615 template <bool stable>
4616 static int
compare_assert_loc(const void * pa,const void * pb)4617 compare_assert_loc (const void *pa, const void *pb)
4618 {
4619   assert_locus * const a = *(assert_locus * const *)pa;
4620   assert_locus * const b = *(assert_locus * const *)pb;
4621 
4622   /* If stable, some asserts might be optimized away already, sort
4623      them last.  */
4624   if (stable)
4625     {
4626       if (a == NULL)
4627 	return b != NULL;
4628       else if (b == NULL)
4629 	return -1;
4630     }
4631 
4632   if (a->e == NULL && b->e != NULL)
4633     return 1;
4634   else if (a->e != NULL && b->e == NULL)
4635     return -1;
4636 
4637   /* After the above checks, we know that (a->e == NULL) == (b->e == NULL),
4638      no need to test both a->e and b->e.  */
4639 
4640   /* Sort after destination index.  */
4641   if (a->e == NULL)
4642     ;
4643   else if (a->e->dest->index > b->e->dest->index)
4644     return 1;
4645   else if (a->e->dest->index < b->e->dest->index)
4646     return -1;
4647 
4648   /* Sort after comp_code.  */
4649   if (a->comp_code > b->comp_code)
4650     return 1;
4651   else if (a->comp_code < b->comp_code)
4652     return -1;
4653 
4654   hashval_t ha, hb;
4655 
4656   /* E.g. if a->val is ADDR_EXPR of a VAR_DECL, iterative_hash_expr
4657      uses DECL_UID of the VAR_DECL, so sorting might differ between
4658      -g and -g0.  When doing the removal of redundant assert exprs
4659      and commonization to successors, this does not matter, but for
4660      the final sort needs to be stable.  */
4661   if (stable)
4662     {
4663       ha = 0;
4664       hb = 0;
4665     }
4666   else
4667     {
4668       ha = iterative_hash_expr (a->expr, iterative_hash_expr (a->val, 0));
4669       hb = iterative_hash_expr (b->expr, iterative_hash_expr (b->val, 0));
4670     }
4671 
4672   /* Break the tie using hashing and source/bb index.  */
4673   if (ha == hb)
4674     return (a->e != NULL
4675 	    ? a->e->src->index - b->e->src->index
4676 	    : a->bb->index - b->bb->index);
4677   return ha > hb ? 1 : -1;
4678 }
4679 
4680 /* Process all the insertions registered for every name N_i registered
4681    in NEED_ASSERT_FOR.  The list of assertions to be inserted are
4682    found in ASSERTS_FOR[i].  */
4683 
4684 static void
process_assert_insertions(void)4685 process_assert_insertions (void)
4686 {
4687   unsigned i;
4688   bitmap_iterator bi;
4689   bool update_edges_p = false;
4690   int num_asserts = 0;
4691 
4692   if (dump_file && (dump_flags & TDF_DETAILS))
4693     dump_all_asserts (dump_file);
4694 
4695   EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
4696     {
4697       assert_locus *loc = asserts_for[i];
4698       gcc_assert (loc);
4699 
4700       auto_vec<assert_locus *, 16> asserts;
4701       for (; loc; loc = loc->next)
4702 	asserts.safe_push (loc);
4703       asserts.qsort (compare_assert_loc<false>);
4704 
4705       /* Push down common asserts to successors and remove redundant ones.  */
4706       unsigned ecnt = 0;
4707       assert_locus *common = NULL;
4708       unsigned commonj = 0;
4709       for (unsigned j = 0; j < asserts.length (); ++j)
4710 	{
4711 	  loc = asserts[j];
4712 	  if (! loc->e)
4713 	    common = NULL;
4714 	  else if (! common
4715 		   || loc->e->dest != common->e->dest
4716 		   || loc->comp_code != common->comp_code
4717 		   || ! operand_equal_p (loc->val, common->val, 0)
4718 		   || ! operand_equal_p (loc->expr, common->expr, 0))
4719 	    {
4720 	      commonj = j;
4721 	      common = loc;
4722 	      ecnt = 1;
4723 	    }
4724 	  else if (loc->e == asserts[j-1]->e)
4725 	    {
4726 	      /* Remove duplicate asserts.  */
4727 	      if (commonj == j - 1)
4728 		{
4729 		  commonj = j;
4730 		  common = loc;
4731 		}
4732 	      free (asserts[j-1]);
4733 	      asserts[j-1] = NULL;
4734 	    }
4735 	  else
4736 	    {
4737 	      ecnt++;
4738 	      if (EDGE_COUNT (common->e->dest->preds) == ecnt)
4739 		{
4740 		  /* We have the same assertion on all incoming edges of a BB.
4741 		     Insert it at the beginning of that block.  */
4742 		  loc->bb = loc->e->dest;
4743 		  loc->e = NULL;
4744 		  loc->si = gsi_none ();
4745 		  common = NULL;
4746 		  /* Clear asserts commoned.  */
4747 		  for (; commonj != j; ++commonj)
4748 		    if (asserts[commonj])
4749 		      {
4750 			free (asserts[commonj]);
4751 			asserts[commonj] = NULL;
4752 		      }
4753 		}
4754 	    }
4755 	}
4756 
4757       /* The asserts vector sorting above might be unstable for
4758 	 -fcompare-debug, sort again to ensure a stable sort.  */
4759       asserts.qsort (compare_assert_loc<true>);
4760       for (unsigned j = 0; j < asserts.length (); ++j)
4761 	{
4762 	  loc = asserts[j];
4763 	  if (! loc)
4764 	    break;
4765 	  update_edges_p |= process_assert_insertions_for (ssa_name (i), loc);
4766 	  num_asserts++;
4767 	  free (loc);
4768 	}
4769     }
4770 
4771   if (update_edges_p)
4772     gsi_commit_edge_inserts ();
4773 
4774   statistics_counter_event (cfun, "Number of ASSERT_EXPR expressions inserted",
4775 			    num_asserts);
4776 }
4777 
4778 
4779 /* Traverse the flowgraph looking for conditional jumps to insert range
4780    expressions.  These range expressions are meant to provide information
4781    to optimizations that need to reason in terms of value ranges.  They
4782    will not be expanded into RTL.  For instance, given:
4783 
4784    x = ...
4785    y = ...
4786    if (x < y)
4787      y = x - 2;
4788    else
4789      x = y + 3;
4790 
4791    this pass will transform the code into:
4792 
4793    x = ...
4794    y = ...
4795    if (x < y)
4796     {
4797       x = ASSERT_EXPR <x, x < y>
4798       y = x - 2
4799     }
4800    else
4801     {
4802       y = ASSERT_EXPR <y, x >= y>
4803       x = y + 3
4804     }
4805 
4806    The idea is that once copy and constant propagation have run, other
4807    optimizations will be able to determine what ranges of values can 'x'
4808    take in different paths of the code, simply by checking the reaching
4809    definition of 'x'.  */
4810 
4811 static void
insert_range_assertions(void)4812 insert_range_assertions (void)
4813 {
4814   need_assert_for = BITMAP_ALLOC (NULL);
4815   asserts_for = XCNEWVEC (assert_locus *, num_ssa_names);
4816 
4817   calculate_dominance_info (CDI_DOMINATORS);
4818 
4819   find_assert_locations ();
4820   if (!bitmap_empty_p (need_assert_for))
4821     {
4822       process_assert_insertions ();
4823       update_ssa (TODO_update_ssa_no_phi);
4824     }
4825 
4826   if (dump_file && (dump_flags & TDF_DETAILS))
4827     {
4828       fprintf (dump_file, "\nSSA form after inserting ASSERT_EXPRs\n");
4829       dump_function_to_file (current_function_decl, dump_file, dump_flags);
4830     }
4831 
4832   free (asserts_for);
4833   BITMAP_FREE (need_assert_for);
4834 }
4835 
4836 class vrp_prop : public ssa_propagation_engine
4837 {
4838  public:
4839   enum ssa_prop_result visit_stmt (gimple *, edge *, tree *) FINAL OVERRIDE;
4840   enum ssa_prop_result visit_phi (gphi *) FINAL OVERRIDE;
4841 
4842   void vrp_initialize (void);
4843   void vrp_finalize (bool);
4844   void check_all_array_refs (void);
4845   void check_array_ref (location_t, tree, bool);
4846   void search_for_addr_array (tree, location_t);
4847 
4848   class vr_values vr_values;
4849   /* Temporary delegator to minimize code churn.  */
get_value_range(const_tree op)4850   value_range *get_value_range (const_tree op)
4851     { return vr_values.get_value_range (op); }
set_defs_to_varying(gimple * stmt)4852   void set_defs_to_varying (gimple *stmt)
4853     { return vr_values.set_defs_to_varying (stmt); }
extract_range_from_stmt(gimple * stmt,edge * taken_edge_p,tree * output_p,value_range * vr)4854   void extract_range_from_stmt (gimple *stmt, edge *taken_edge_p,
4855 				tree *output_p, value_range *vr)
4856     { vr_values.extract_range_from_stmt (stmt, taken_edge_p, output_p, vr); }
update_value_range(const_tree op,value_range * vr)4857   bool update_value_range (const_tree op, value_range *vr)
4858     { return vr_values.update_value_range (op, vr); }
extract_range_basic(value_range * vr,gimple * stmt)4859   void extract_range_basic (value_range *vr, gimple *stmt)
4860     { vr_values.extract_range_basic (vr, stmt); }
extract_range_from_phi_node(gphi * phi,value_range * vr)4861   void extract_range_from_phi_node (gphi *phi, value_range *vr)
4862     { vr_values.extract_range_from_phi_node (phi, vr); }
4863 };
4864 /* Checks one ARRAY_REF in REF, located at LOCUS. Ignores flexible arrays
4865    and "struct" hacks. If VRP can determine that the
4866    array subscript is a constant, check if it is outside valid
4867    range. If the array subscript is a RANGE, warn if it is
4868    non-overlapping with valid range.
4869    IGNORE_OFF_BY_ONE is true if the ARRAY_REF is inside a ADDR_EXPR.  */
4870 
4871 void
check_array_ref(location_t location,tree ref,bool ignore_off_by_one)4872 vrp_prop::check_array_ref (location_t location, tree ref,
4873 			   bool ignore_off_by_one)
4874 {
4875   value_range *vr = NULL;
4876   tree low_sub, up_sub;
4877   tree low_bound, up_bound, up_bound_p1;
4878 
4879   if (TREE_NO_WARNING (ref))
4880     return;
4881 
4882   low_sub = up_sub = TREE_OPERAND (ref, 1);
4883   up_bound = array_ref_up_bound (ref);
4884 
4885   if (!up_bound
4886       || TREE_CODE (up_bound) != INTEGER_CST
4887       || (warn_array_bounds < 2
4888 	  && array_at_struct_end_p (ref)))
4889     {
4890       /* Accesses to trailing arrays via pointers may access storage
4891 	 beyond the types array bounds.  For such arrays, or for flexible
4892 	 array members, as well as for other arrays of an unknown size,
4893 	 replace the upper bound with a more permissive one that assumes
4894 	 the size of the largest object is PTRDIFF_MAX.  */
4895       tree eltsize = array_ref_element_size (ref);
4896 
4897       if (TREE_CODE (eltsize) != INTEGER_CST
4898 	  || integer_zerop (eltsize))
4899 	{
4900 	  up_bound = NULL_TREE;
4901 	  up_bound_p1 = NULL_TREE;
4902 	}
4903       else
4904 	{
4905 	  tree maxbound = TYPE_MAX_VALUE (ptrdiff_type_node);
4906 	  tree arg = TREE_OPERAND (ref, 0);
4907 	  poly_int64 off;
4908 
4909 	  if (get_addr_base_and_unit_offset (arg, &off) && known_gt (off, 0))
4910 	    maxbound = wide_int_to_tree (sizetype,
4911 					 wi::sub (wi::to_wide (maxbound),
4912 						  off));
4913 	  else
4914 	    maxbound = fold_convert (sizetype, maxbound);
4915 
4916 	  up_bound_p1 = int_const_binop (TRUNC_DIV_EXPR, maxbound, eltsize);
4917 
4918 	  up_bound = int_const_binop (MINUS_EXPR, up_bound_p1,
4919 				      build_int_cst (ptrdiff_type_node, 1));
4920 	}
4921     }
4922   else
4923     up_bound_p1 = int_const_binop (PLUS_EXPR, up_bound,
4924 				   build_int_cst (TREE_TYPE (up_bound), 1));
4925 
4926   low_bound = array_ref_low_bound (ref);
4927 
4928   tree artype = TREE_TYPE (TREE_OPERAND (ref, 0));
4929 
4930   /* Empty array.  */
4931   if (up_bound && tree_int_cst_equal (low_bound, up_bound_p1))
4932     {
4933       warning_at (location, OPT_Warray_bounds,
4934 		  "array subscript %E is above array bounds of %qT",
4935 		  low_bound, artype);
4936       TREE_NO_WARNING (ref) = 1;
4937     }
4938 
4939   if (TREE_CODE (low_sub) == SSA_NAME)
4940     {
4941       vr = get_value_range (low_sub);
4942       if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE)
4943         {
4944           low_sub = vr->type == VR_RANGE ? vr->max : vr->min;
4945           up_sub = vr->type == VR_RANGE ? vr->min : vr->max;
4946         }
4947     }
4948 
4949   if (vr && vr->type == VR_ANTI_RANGE)
4950     {
4951       if (up_bound
4952 	  && TREE_CODE (up_sub) == INTEGER_CST
4953           && (ignore_off_by_one
4954 	      ? tree_int_cst_lt (up_bound, up_sub)
4955 	      : tree_int_cst_le (up_bound, up_sub))
4956           && TREE_CODE (low_sub) == INTEGER_CST
4957           && tree_int_cst_le (low_sub, low_bound))
4958         {
4959           warning_at (location, OPT_Warray_bounds,
4960 		      "array subscript [%E, %E] is outside array bounds of %qT",
4961 		      low_sub, up_sub, artype);
4962           TREE_NO_WARNING (ref) = 1;
4963         }
4964     }
4965   else if (up_bound
4966 	   && TREE_CODE (up_sub) == INTEGER_CST
4967 	   && (ignore_off_by_one
4968 	       ? !tree_int_cst_le (up_sub, up_bound_p1)
4969 	       : !tree_int_cst_le (up_sub, up_bound)))
4970     {
4971       if (dump_file && (dump_flags & TDF_DETAILS))
4972 	{
4973 	  fprintf (dump_file, "Array bound warning for ");
4974 	  dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
4975 	  fprintf (dump_file, "\n");
4976 	}
4977       warning_at (location, OPT_Warray_bounds,
4978 		  "array subscript %E is above array bounds of %qT",
4979 		  up_sub, artype);
4980       TREE_NO_WARNING (ref) = 1;
4981     }
4982   else if (TREE_CODE (low_sub) == INTEGER_CST
4983            && tree_int_cst_lt (low_sub, low_bound))
4984     {
4985       if (dump_file && (dump_flags & TDF_DETAILS))
4986 	{
4987 	  fprintf (dump_file, "Array bound warning for ");
4988 	  dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
4989 	  fprintf (dump_file, "\n");
4990 	}
4991       warning_at (location, OPT_Warray_bounds,
4992 		  "array subscript %E is below array bounds of %qT",
4993 		  low_sub, artype);
4994       TREE_NO_WARNING (ref) = 1;
4995     }
4996 }
4997 
4998 /* Searches if the expr T, located at LOCATION computes
4999    address of an ARRAY_REF, and call check_array_ref on it.  */
5000 
5001 void
search_for_addr_array(tree t,location_t location)5002 vrp_prop::search_for_addr_array (tree t, location_t location)
5003 {
5004   /* Check each ARRAY_REFs in the reference chain. */
5005   do
5006     {
5007       if (TREE_CODE (t) == ARRAY_REF)
5008 	check_array_ref (location, t, true /*ignore_off_by_one*/);
5009 
5010       t = TREE_OPERAND (t, 0);
5011     }
5012   while (handled_component_p (t));
5013 
5014   if (TREE_CODE (t) == MEM_REF
5015       && TREE_CODE (TREE_OPERAND (t, 0)) == ADDR_EXPR
5016       && !TREE_NO_WARNING (t))
5017     {
5018       tree tem = TREE_OPERAND (TREE_OPERAND (t, 0), 0);
5019       tree low_bound, up_bound, el_sz;
5020       offset_int idx;
5021       if (TREE_CODE (TREE_TYPE (tem)) != ARRAY_TYPE
5022 	  || TREE_CODE (TREE_TYPE (TREE_TYPE (tem))) == ARRAY_TYPE
5023 	  || !TYPE_DOMAIN (TREE_TYPE (tem)))
5024 	return;
5025 
5026       low_bound = TYPE_MIN_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
5027       up_bound = TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
5028       el_sz = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (tem)));
5029       if (!low_bound
5030 	  || TREE_CODE (low_bound) != INTEGER_CST
5031 	  || !up_bound
5032 	  || TREE_CODE (up_bound) != INTEGER_CST
5033 	  || !el_sz
5034 	  || TREE_CODE (el_sz) != INTEGER_CST)
5035 	return;
5036 
5037       if (!mem_ref_offset (t).is_constant (&idx))
5038 	return;
5039 
5040       idx = wi::sdiv_trunc (idx, wi::to_offset (el_sz));
5041       if (idx < 0)
5042 	{
5043 	  if (dump_file && (dump_flags & TDF_DETAILS))
5044 	    {
5045 	      fprintf (dump_file, "Array bound warning for ");
5046 	      dump_generic_expr (MSG_NOTE, TDF_SLIM, t);
5047 	      fprintf (dump_file, "\n");
5048 	    }
5049 	  warning_at (location, OPT_Warray_bounds,
5050 		      "array subscript %wi is below array bounds of %qT",
5051 		      idx.to_shwi (), TREE_TYPE (tem));
5052 	  TREE_NO_WARNING (t) = 1;
5053 	}
5054       else if (idx > (wi::to_offset (up_bound)
5055 		      - wi::to_offset (low_bound) + 1))
5056 	{
5057 	  if (dump_file && (dump_flags & TDF_DETAILS))
5058 	    {
5059 	      fprintf (dump_file, "Array bound warning for ");
5060 	      dump_generic_expr (MSG_NOTE, TDF_SLIM, t);
5061 	      fprintf (dump_file, "\n");
5062 	    }
5063 	  warning_at (location, OPT_Warray_bounds,
5064 		      "array subscript %wu is above array bounds of %qT",
5065 		      idx.to_uhwi (), TREE_TYPE (tem));
5066 	  TREE_NO_WARNING (t) = 1;
5067 	}
5068     }
5069 }
5070 
5071 /* walk_tree() callback that checks if *TP is
5072    an ARRAY_REF inside an ADDR_EXPR (in which an array
5073    subscript one outside the valid range is allowed). Call
5074    check_array_ref for each ARRAY_REF found. The location is
5075    passed in DATA.  */
5076 
5077 static tree
check_array_bounds(tree * tp,int * walk_subtree,void * data)5078 check_array_bounds (tree *tp, int *walk_subtree, void *data)
5079 {
5080   tree t = *tp;
5081   struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
5082   location_t location;
5083 
5084   if (EXPR_HAS_LOCATION (t))
5085     location = EXPR_LOCATION (t);
5086   else
5087     location = gimple_location (wi->stmt);
5088 
5089   *walk_subtree = TRUE;
5090 
5091   vrp_prop *vrp_prop = (class vrp_prop *)wi->info;
5092   if (TREE_CODE (t) == ARRAY_REF)
5093     vrp_prop->check_array_ref (location, t, false /*ignore_off_by_one*/);
5094 
5095   else if (TREE_CODE (t) == ADDR_EXPR)
5096     {
5097       vrp_prop->search_for_addr_array (t, location);
5098       *walk_subtree = FALSE;
5099     }
5100 
5101   return NULL_TREE;
5102 }
5103 
5104 /* A dom_walker subclass for use by vrp_prop::check_all_array_refs,
5105    to walk over all statements of all reachable BBs and call
5106    check_array_bounds on them.  */
5107 
5108 class check_array_bounds_dom_walker : public dom_walker
5109 {
5110  public:
check_array_bounds_dom_walker(vrp_prop * prop)5111   check_array_bounds_dom_walker (vrp_prop *prop)
5112     : dom_walker (CDI_DOMINATORS,
5113 		  /* Discover non-executable edges, preserving EDGE_EXECUTABLE
5114 		     flags, so that we can merge in information on
5115 		     non-executable edges from vrp_folder .  */
5116 		  REACHABLE_BLOCKS_PRESERVING_FLAGS),
5117       m_prop (prop) {}
~check_array_bounds_dom_walker()5118   ~check_array_bounds_dom_walker () {}
5119 
5120   edge before_dom_children (basic_block) FINAL OVERRIDE;
5121 
5122  private:
5123   vrp_prop *m_prop;
5124 };
5125 
5126 /* Implementation of dom_walker::before_dom_children.
5127 
5128    Walk over all statements of BB and call check_array_bounds on them,
5129    and determine if there's a unique successor edge.  */
5130 
5131 edge
before_dom_children(basic_block bb)5132 check_array_bounds_dom_walker::before_dom_children (basic_block bb)
5133 {
5134   gimple_stmt_iterator si;
5135   for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
5136     {
5137       gimple *stmt = gsi_stmt (si);
5138       struct walk_stmt_info wi;
5139       if (!gimple_has_location (stmt)
5140 	  || is_gimple_debug (stmt))
5141 	continue;
5142 
5143       memset (&wi, 0, sizeof (wi));
5144 
5145       wi.info = m_prop;
5146 
5147       walk_gimple_op (stmt, check_array_bounds, &wi);
5148     }
5149 
5150   /* Determine if there's a unique successor edge, and if so, return
5151      that back to dom_walker, ensuring that we don't visit blocks that
5152      became unreachable during the VRP propagation
5153      (PR tree-optimization/83312).  */
5154   return find_taken_edge (bb, NULL_TREE);
5155 }
5156 
5157 /* Walk over all statements of all reachable BBs and call check_array_bounds
5158    on them.  */
5159 
5160 void
check_all_array_refs()5161 vrp_prop::check_all_array_refs ()
5162 {
5163   check_array_bounds_dom_walker w (this);
5164   w.walk (ENTRY_BLOCK_PTR_FOR_FN (cfun));
5165 }
5166 
5167 /* Return true if all imm uses of VAR are either in STMT, or
5168    feed (optionally through a chain of single imm uses) GIMPLE_COND
5169    in basic block COND_BB.  */
5170 
5171 static bool
all_imm_uses_in_stmt_or_feed_cond(tree var,gimple * stmt,basic_block cond_bb)5172 all_imm_uses_in_stmt_or_feed_cond (tree var, gimple *stmt, basic_block cond_bb)
5173 {
5174   use_operand_p use_p, use2_p;
5175   imm_use_iterator iter;
5176 
5177   FOR_EACH_IMM_USE_FAST (use_p, iter, var)
5178     if (USE_STMT (use_p) != stmt)
5179       {
5180 	gimple *use_stmt = USE_STMT (use_p), *use_stmt2;
5181 	if (is_gimple_debug (use_stmt))
5182 	  continue;
5183 	while (is_gimple_assign (use_stmt)
5184 	       && TREE_CODE (gimple_assign_lhs (use_stmt)) == SSA_NAME
5185 	       && single_imm_use (gimple_assign_lhs (use_stmt),
5186 				  &use2_p, &use_stmt2))
5187 	  use_stmt = use_stmt2;
5188 	if (gimple_code (use_stmt) != GIMPLE_COND
5189 	    || gimple_bb (use_stmt) != cond_bb)
5190 	  return false;
5191       }
5192   return true;
5193 }
5194 
5195 /* Handle
5196    _4 = x_3 & 31;
5197    if (_4 != 0)
5198      goto <bb 6>;
5199    else
5200      goto <bb 7>;
5201    <bb 6>:
5202    __builtin_unreachable ();
5203    <bb 7>:
5204    x_5 = ASSERT_EXPR <x_3, ...>;
5205    If x_3 has no other immediate uses (checked by caller),
5206    var is the x_3 var from ASSERT_EXPR, we can clear low 5 bits
5207    from the non-zero bitmask.  */
5208 
5209 void
maybe_set_nonzero_bits(edge e,tree var)5210 maybe_set_nonzero_bits (edge e, tree var)
5211 {
5212   basic_block cond_bb = e->src;
5213   gimple *stmt = last_stmt (cond_bb);
5214   tree cst;
5215 
5216   if (stmt == NULL
5217       || gimple_code (stmt) != GIMPLE_COND
5218       || gimple_cond_code (stmt) != ((e->flags & EDGE_TRUE_VALUE)
5219 				     ? EQ_EXPR : NE_EXPR)
5220       || TREE_CODE (gimple_cond_lhs (stmt)) != SSA_NAME
5221       || !integer_zerop (gimple_cond_rhs (stmt)))
5222     return;
5223 
5224   stmt = SSA_NAME_DEF_STMT (gimple_cond_lhs (stmt));
5225   if (!is_gimple_assign (stmt)
5226       || gimple_assign_rhs_code (stmt) != BIT_AND_EXPR
5227       || TREE_CODE (gimple_assign_rhs2 (stmt)) != INTEGER_CST)
5228     return;
5229   if (gimple_assign_rhs1 (stmt) != var)
5230     {
5231       gimple *stmt2;
5232 
5233       if (TREE_CODE (gimple_assign_rhs1 (stmt)) != SSA_NAME)
5234 	return;
5235       stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
5236       if (!gimple_assign_cast_p (stmt2)
5237 	  || gimple_assign_rhs1 (stmt2) != var
5238 	  || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt2))
5239 	  || (TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (stmt)))
5240 			      != TYPE_PRECISION (TREE_TYPE (var))))
5241 	return;
5242     }
5243   cst = gimple_assign_rhs2 (stmt);
5244   set_nonzero_bits (var, wi::bit_and_not (get_nonzero_bits (var),
5245 					  wi::to_wide (cst)));
5246 }
5247 
5248 /* Convert range assertion expressions into the implied copies and
5249    copy propagate away the copies.  Doing the trivial copy propagation
5250    here avoids the need to run the full copy propagation pass after
5251    VRP.
5252 
5253    FIXME, this will eventually lead to copy propagation removing the
5254    names that had useful range information attached to them.  For
5255    instance, if we had the assertion N_i = ASSERT_EXPR <N_j, N_j > 3>,
5256    then N_i will have the range [3, +INF].
5257 
5258    However, by converting the assertion into the implied copy
5259    operation N_i = N_j, we will then copy-propagate N_j into the uses
5260    of N_i and lose the range information.  We may want to hold on to
5261    ASSERT_EXPRs a little while longer as the ranges could be used in
5262    things like jump threading.
5263 
5264    The problem with keeping ASSERT_EXPRs around is that passes after
5265    VRP need to handle them appropriately.
5266 
5267    Another approach would be to make the range information a first
5268    class property of the SSA_NAME so that it can be queried from
5269    any pass.  This is made somewhat more complex by the need for
5270    multiple ranges to be associated with one SSA_NAME.  */
5271 
5272 static void
remove_range_assertions(void)5273 remove_range_assertions (void)
5274 {
5275   basic_block bb;
5276   gimple_stmt_iterator si;
5277   /* 1 if looking at ASSERT_EXPRs immediately at the beginning of
5278      a basic block preceeded by GIMPLE_COND branching to it and
5279      __builtin_trap, -1 if not yet checked, 0 otherwise.  */
5280   int is_unreachable;
5281 
5282   /* Note that the BSI iterator bump happens at the bottom of the
5283      loop and no bump is necessary if we're removing the statement
5284      referenced by the current BSI.  */
5285   FOR_EACH_BB_FN (bb, cfun)
5286     for (si = gsi_after_labels (bb), is_unreachable = -1; !gsi_end_p (si);)
5287       {
5288 	gimple *stmt = gsi_stmt (si);
5289 
5290 	if (is_gimple_assign (stmt)
5291 	    && gimple_assign_rhs_code (stmt) == ASSERT_EXPR)
5292 	  {
5293 	    tree lhs = gimple_assign_lhs (stmt);
5294 	    tree rhs = gimple_assign_rhs1 (stmt);
5295 	    tree var;
5296 
5297 	    var = ASSERT_EXPR_VAR (rhs);
5298 
5299 	    if (TREE_CODE (var) == SSA_NAME
5300 		&& !POINTER_TYPE_P (TREE_TYPE (lhs))
5301 		&& SSA_NAME_RANGE_INFO (lhs))
5302 	      {
5303 		if (is_unreachable == -1)
5304 		  {
5305 		    is_unreachable = 0;
5306 		    if (single_pred_p (bb)
5307 			&& assert_unreachable_fallthru_edge_p
5308 						    (single_pred_edge (bb)))
5309 		      is_unreachable = 1;
5310 		  }
5311 		/* Handle
5312 		   if (x_7 >= 10 && x_7 < 20)
5313 		     __builtin_unreachable ();
5314 		   x_8 = ASSERT_EXPR <x_7, ...>;
5315 		   if the only uses of x_7 are in the ASSERT_EXPR and
5316 		   in the condition.  In that case, we can copy the
5317 		   range info from x_8 computed in this pass also
5318 		   for x_7.  */
5319 		if (is_unreachable
5320 		    && all_imm_uses_in_stmt_or_feed_cond (var, stmt,
5321 							  single_pred (bb)))
5322 		  {
5323 		    set_range_info (var, SSA_NAME_RANGE_TYPE (lhs),
5324 				    SSA_NAME_RANGE_INFO (lhs)->get_min (),
5325 				    SSA_NAME_RANGE_INFO (lhs)->get_max ());
5326 		    maybe_set_nonzero_bits (single_pred_edge (bb), var);
5327 		  }
5328 	      }
5329 
5330 	    /* Propagate the RHS into every use of the LHS.  For SSA names
5331 	       also propagate abnormals as it merely restores the original
5332 	       IL in this case (an replace_uses_by would assert).  */
5333 	    if (TREE_CODE (var) == SSA_NAME)
5334 	      {
5335 		imm_use_iterator iter;
5336 		use_operand_p use_p;
5337 		gimple *use_stmt;
5338 		FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
5339 		  FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
5340 		    SET_USE (use_p, var);
5341 	      }
5342 	    else
5343 	      replace_uses_by (lhs, var);
5344 
5345 	    /* And finally, remove the copy, it is not needed.  */
5346 	    gsi_remove (&si, true);
5347 	    release_defs (stmt);
5348 	  }
5349 	else
5350 	  {
5351 	    if (!is_gimple_debug (gsi_stmt (si)))
5352 	      is_unreachable = 0;
5353 	    gsi_next (&si);
5354 	  }
5355       }
5356 }
5357 
5358 /* Return true if STMT is interesting for VRP.  */
5359 
5360 bool
stmt_interesting_for_vrp(gimple * stmt)5361 stmt_interesting_for_vrp (gimple *stmt)
5362 {
5363   if (gimple_code (stmt) == GIMPLE_PHI)
5364     {
5365       tree res = gimple_phi_result (stmt);
5366       return (!virtual_operand_p (res)
5367 	      && (INTEGRAL_TYPE_P (TREE_TYPE (res))
5368 		  || POINTER_TYPE_P (TREE_TYPE (res))));
5369     }
5370   else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
5371     {
5372       tree lhs = gimple_get_lhs (stmt);
5373 
5374       /* In general, assignments with virtual operands are not useful
5375 	 for deriving ranges, with the obvious exception of calls to
5376 	 builtin functions.  */
5377       if (lhs && TREE_CODE (lhs) == SSA_NAME
5378 	  && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
5379 	      || POINTER_TYPE_P (TREE_TYPE (lhs)))
5380 	  && (is_gimple_call (stmt)
5381 	      || !gimple_vuse (stmt)))
5382 	return true;
5383       else if (is_gimple_call (stmt) && gimple_call_internal_p (stmt))
5384 	switch (gimple_call_internal_fn (stmt))
5385 	  {
5386 	  case IFN_ADD_OVERFLOW:
5387 	  case IFN_SUB_OVERFLOW:
5388 	  case IFN_MUL_OVERFLOW:
5389 	  case IFN_ATOMIC_COMPARE_EXCHANGE:
5390 	    /* These internal calls return _Complex integer type,
5391 	       but are interesting to VRP nevertheless.  */
5392 	    if (lhs && TREE_CODE (lhs) == SSA_NAME)
5393 	      return true;
5394 	    break;
5395 	  default:
5396 	    break;
5397 	  }
5398     }
5399   else if (gimple_code (stmt) == GIMPLE_COND
5400 	   || gimple_code (stmt) == GIMPLE_SWITCH)
5401     return true;
5402 
5403   return false;
5404 }
5405 
5406 /* Initialization required by ssa_propagate engine.  */
5407 
5408 void
vrp_initialize()5409 vrp_prop::vrp_initialize ()
5410 {
5411   basic_block bb;
5412 
5413   FOR_EACH_BB_FN (bb, cfun)
5414     {
5415       for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
5416 	   gsi_next (&si))
5417 	{
5418 	  gphi *phi = si.phi ();
5419 	  if (!stmt_interesting_for_vrp (phi))
5420 	    {
5421 	      tree lhs = PHI_RESULT (phi);
5422 	      set_value_range_to_varying (get_value_range (lhs));
5423 	      prop_set_simulate_again (phi, false);
5424 	    }
5425 	  else
5426 	    prop_set_simulate_again (phi, true);
5427 	}
5428 
5429       for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
5430 	   gsi_next (&si))
5431         {
5432 	  gimple *stmt = gsi_stmt (si);
5433 
5434  	  /* If the statement is a control insn, then we do not
5435  	     want to avoid simulating the statement once.  Failure
5436  	     to do so means that those edges will never get added.  */
5437 	  if (stmt_ends_bb_p (stmt))
5438 	    prop_set_simulate_again (stmt, true);
5439 	  else if (!stmt_interesting_for_vrp (stmt))
5440 	    {
5441 	      set_defs_to_varying (stmt);
5442 	      prop_set_simulate_again (stmt, false);
5443 	    }
5444 	  else
5445 	    prop_set_simulate_again (stmt, true);
5446 	}
5447     }
5448 }
5449 
5450 /* Searches the case label vector VEC for the index *IDX of the CASE_LABEL
5451    that includes the value VAL.  The search is restricted to the range
5452    [START_IDX, n - 1] where n is the size of VEC.
5453 
5454    If there is a CASE_LABEL for VAL, its index is placed in IDX and true is
5455    returned.
5456 
5457    If there is no CASE_LABEL for VAL and there is one that is larger than VAL,
5458    it is placed in IDX and false is returned.
5459 
5460    If VAL is larger than any CASE_LABEL, n is placed on IDX and false is
5461    returned. */
5462 
5463 bool
find_case_label_index(gswitch * stmt,size_t start_idx,tree val,size_t * idx)5464 find_case_label_index (gswitch *stmt, size_t start_idx, tree val, size_t *idx)
5465 {
5466   size_t n = gimple_switch_num_labels (stmt);
5467   size_t low, high;
5468 
5469   /* Find case label for minimum of the value range or the next one.
5470      At each iteration we are searching in [low, high - 1]. */
5471 
5472   for (low = start_idx, high = n; high != low; )
5473     {
5474       tree t;
5475       int cmp;
5476       /* Note that i != high, so we never ask for n. */
5477       size_t i = (high + low) / 2;
5478       t = gimple_switch_label (stmt, i);
5479 
5480       /* Cache the result of comparing CASE_LOW and val.  */
5481       cmp = tree_int_cst_compare (CASE_LOW (t), val);
5482 
5483       if (cmp == 0)
5484 	{
5485 	  /* Ranges cannot be empty. */
5486 	  *idx = i;
5487 	  return true;
5488 	}
5489       else if (cmp > 0)
5490         high = i;
5491       else
5492 	{
5493 	  low = i + 1;
5494 	  if (CASE_HIGH (t) != NULL
5495 	      && tree_int_cst_compare (CASE_HIGH (t), val) >= 0)
5496 	    {
5497 	      *idx = i;
5498 	      return true;
5499 	    }
5500         }
5501     }
5502 
5503   *idx = high;
5504   return false;
5505 }
5506 
5507 /* Searches the case label vector VEC for the range of CASE_LABELs that is used
5508    for values between MIN and MAX. The first index is placed in MIN_IDX. The
5509    last index is placed in MAX_IDX. If the range of CASE_LABELs is empty
5510    then MAX_IDX < MIN_IDX.
5511    Returns true if the default label is not needed. */
5512 
5513 bool
find_case_label_range(gswitch * stmt,tree min,tree max,size_t * min_idx,size_t * max_idx)5514 find_case_label_range (gswitch *stmt, tree min, tree max, size_t *min_idx,
5515 		       size_t *max_idx)
5516 {
5517   size_t i, j;
5518   bool min_take_default = !find_case_label_index (stmt, 1, min, &i);
5519   bool max_take_default = !find_case_label_index (stmt, i, max, &j);
5520 
5521   if (i == j
5522       && min_take_default
5523       && max_take_default)
5524     {
5525       /* Only the default case label reached.
5526          Return an empty range. */
5527       *min_idx = 1;
5528       *max_idx = 0;
5529       return false;
5530     }
5531   else
5532     {
5533       bool take_default = min_take_default || max_take_default;
5534       tree low, high;
5535       size_t k;
5536 
5537       if (max_take_default)
5538 	j--;
5539 
5540       /* If the case label range is continuous, we do not need
5541 	 the default case label.  Verify that.  */
5542       high = CASE_LOW (gimple_switch_label (stmt, i));
5543       if (CASE_HIGH (gimple_switch_label (stmt, i)))
5544 	high = CASE_HIGH (gimple_switch_label (stmt, i));
5545       for (k = i + 1; k <= j; ++k)
5546 	{
5547 	  low = CASE_LOW (gimple_switch_label (stmt, k));
5548 	  if (!integer_onep (int_const_binop (MINUS_EXPR, low, high)))
5549 	    {
5550 	      take_default = true;
5551 	      break;
5552 	    }
5553 	  high = low;
5554 	  if (CASE_HIGH (gimple_switch_label (stmt, k)))
5555 	    high = CASE_HIGH (gimple_switch_label (stmt, k));
5556 	}
5557 
5558       *min_idx = i;
5559       *max_idx = j;
5560       return !take_default;
5561     }
5562 }
5563 
5564 /* Evaluate statement STMT.  If the statement produces a useful range,
5565    return SSA_PROP_INTERESTING and record the SSA name with the
5566    interesting range into *OUTPUT_P.
5567 
5568    If STMT is a conditional branch and we can determine its truth
5569    value, the taken edge is recorded in *TAKEN_EDGE_P.
5570 
5571    If STMT produces a varying value, return SSA_PROP_VARYING.  */
5572 
5573 enum ssa_prop_result
visit_stmt(gimple * stmt,edge * taken_edge_p,tree * output_p)5574 vrp_prop::visit_stmt (gimple *stmt, edge *taken_edge_p, tree *output_p)
5575 {
5576   value_range vr = VR_INITIALIZER;
5577   tree lhs = gimple_get_lhs (stmt);
5578   extract_range_from_stmt (stmt, taken_edge_p, output_p, &vr);
5579 
5580   if (*output_p)
5581     {
5582       if (update_value_range (*output_p, &vr))
5583 	{
5584 	  if (dump_file && (dump_flags & TDF_DETAILS))
5585 	    {
5586 	      fprintf (dump_file, "Found new range for ");
5587 	      print_generic_expr (dump_file, *output_p);
5588 	      fprintf (dump_file, ": ");
5589 	      dump_value_range (dump_file, &vr);
5590 	      fprintf (dump_file, "\n");
5591 	    }
5592 
5593 	  if (vr.type == VR_VARYING)
5594 	    return SSA_PROP_VARYING;
5595 
5596 	  return SSA_PROP_INTERESTING;
5597 	}
5598       return SSA_PROP_NOT_INTERESTING;
5599     }
5600 
5601   if (is_gimple_call (stmt) && gimple_call_internal_p (stmt))
5602     switch (gimple_call_internal_fn (stmt))
5603       {
5604       case IFN_ADD_OVERFLOW:
5605       case IFN_SUB_OVERFLOW:
5606       case IFN_MUL_OVERFLOW:
5607       case IFN_ATOMIC_COMPARE_EXCHANGE:
5608 	/* These internal calls return _Complex integer type,
5609 	   which VRP does not track, but the immediate uses
5610 	   thereof might be interesting.  */
5611 	if (lhs && TREE_CODE (lhs) == SSA_NAME)
5612 	  {
5613 	    imm_use_iterator iter;
5614 	    use_operand_p use_p;
5615 	    enum ssa_prop_result res = SSA_PROP_VARYING;
5616 
5617 	    set_value_range_to_varying (get_value_range (lhs));
5618 
5619 	    FOR_EACH_IMM_USE_FAST (use_p, iter, lhs)
5620 	      {
5621 		gimple *use_stmt = USE_STMT (use_p);
5622 		if (!is_gimple_assign (use_stmt))
5623 		  continue;
5624 		enum tree_code rhs_code = gimple_assign_rhs_code (use_stmt);
5625 		if (rhs_code != REALPART_EXPR && rhs_code != IMAGPART_EXPR)
5626 		  continue;
5627 		tree rhs1 = gimple_assign_rhs1 (use_stmt);
5628 		tree use_lhs = gimple_assign_lhs (use_stmt);
5629 		if (TREE_CODE (rhs1) != rhs_code
5630 		    || TREE_OPERAND (rhs1, 0) != lhs
5631 		    || TREE_CODE (use_lhs) != SSA_NAME
5632 		    || !stmt_interesting_for_vrp (use_stmt)
5633 		    || (!INTEGRAL_TYPE_P (TREE_TYPE (use_lhs))
5634 			|| !TYPE_MIN_VALUE (TREE_TYPE (use_lhs))
5635 			|| !TYPE_MAX_VALUE (TREE_TYPE (use_lhs))))
5636 		  continue;
5637 
5638 		/* If there is a change in the value range for any of the
5639 		   REALPART_EXPR/IMAGPART_EXPR immediate uses, return
5640 		   SSA_PROP_INTERESTING.  If there are any REALPART_EXPR
5641 		   or IMAGPART_EXPR immediate uses, but none of them have
5642 		   a change in their value ranges, return
5643 		   SSA_PROP_NOT_INTERESTING.  If there are no
5644 		   {REAL,IMAG}PART_EXPR uses at all,
5645 		   return SSA_PROP_VARYING.  */
5646 		value_range new_vr = VR_INITIALIZER;
5647 		extract_range_basic (&new_vr, use_stmt);
5648 		value_range *old_vr = get_value_range (use_lhs);
5649 		if (old_vr->type != new_vr.type
5650 		    || !vrp_operand_equal_p (old_vr->min, new_vr.min)
5651 		    || !vrp_operand_equal_p (old_vr->max, new_vr.max)
5652 		    || !vrp_bitmap_equal_p (old_vr->equiv, new_vr.equiv))
5653 		  res = SSA_PROP_INTERESTING;
5654 		else
5655 		  res = SSA_PROP_NOT_INTERESTING;
5656 		BITMAP_FREE (new_vr.equiv);
5657 		if (res == SSA_PROP_INTERESTING)
5658 		  {
5659 		    *output_p = lhs;
5660 		    return res;
5661 		  }
5662 	      }
5663 
5664 	    return res;
5665 	  }
5666 	break;
5667       default:
5668 	break;
5669       }
5670 
5671   /* All other statements produce nothing of interest for VRP, so mark
5672      their outputs varying and prevent further simulation.  */
5673   set_defs_to_varying (stmt);
5674 
5675   return (*taken_edge_p) ? SSA_PROP_INTERESTING : SSA_PROP_VARYING;
5676 }
5677 
5678 /* Union the two value-ranges { *VR0TYPE, *VR0MIN, *VR0MAX } and
5679    { VR1TYPE, VR0MIN, VR0MAX } and store the result
5680    in { *VR0TYPE, *VR0MIN, *VR0MAX }.  This may not be the smallest
5681    possible such range.  The resulting range is not canonicalized.  */
5682 
5683 static void
union_ranges(enum value_range_type * vr0type,tree * vr0min,tree * vr0max,enum value_range_type vr1type,tree vr1min,tree vr1max)5684 union_ranges (enum value_range_type *vr0type,
5685 	      tree *vr0min, tree *vr0max,
5686 	      enum value_range_type vr1type,
5687 	      tree vr1min, tree vr1max)
5688 {
5689   bool mineq = vrp_operand_equal_p (*vr0min, vr1min);
5690   bool maxeq = vrp_operand_equal_p (*vr0max, vr1max);
5691 
5692   /* [] is vr0, () is vr1 in the following classification comments.  */
5693   if (mineq && maxeq)
5694     {
5695       /* [(  )] */
5696       if (*vr0type == vr1type)
5697 	/* Nothing to do for equal ranges.  */
5698 	;
5699       else if ((*vr0type == VR_RANGE
5700 		&& vr1type == VR_ANTI_RANGE)
5701 	       || (*vr0type == VR_ANTI_RANGE
5702 		   && vr1type == VR_RANGE))
5703 	{
5704 	  /* For anti-range with range union the result is varying.  */
5705 	  goto give_up;
5706 	}
5707       else
5708 	gcc_unreachable ();
5709     }
5710   else if (operand_less_p (*vr0max, vr1min) == 1
5711 	   || operand_less_p (vr1max, *vr0min) == 1)
5712     {
5713       /* [ ] ( ) or ( ) [ ]
5714 	 If the ranges have an empty intersection, result of the union
5715 	 operation is the anti-range or if both are anti-ranges
5716 	 it covers all.  */
5717       if (*vr0type == VR_ANTI_RANGE
5718 	  && vr1type == VR_ANTI_RANGE)
5719 	goto give_up;
5720       else if (*vr0type == VR_ANTI_RANGE
5721 	       && vr1type == VR_RANGE)
5722 	;
5723       else if (*vr0type == VR_RANGE
5724 	       && vr1type == VR_ANTI_RANGE)
5725 	{
5726 	  *vr0type = vr1type;
5727 	  *vr0min = vr1min;
5728 	  *vr0max = vr1max;
5729 	}
5730       else if (*vr0type == VR_RANGE
5731 	       && vr1type == VR_RANGE)
5732 	{
5733 	  /* The result is the convex hull of both ranges.  */
5734 	  if (operand_less_p (*vr0max, vr1min) == 1)
5735 	    {
5736 	      /* If the result can be an anti-range, create one.  */
5737 	      if (TREE_CODE (*vr0max) == INTEGER_CST
5738 		  && TREE_CODE (vr1min) == INTEGER_CST
5739 		  && vrp_val_is_min (*vr0min)
5740 		  && vrp_val_is_max (vr1max))
5741 		{
5742 		  tree min = int_const_binop (PLUS_EXPR,
5743 					      *vr0max,
5744 					      build_int_cst (TREE_TYPE (*vr0max), 1));
5745 		  tree max = int_const_binop (MINUS_EXPR,
5746 					      vr1min,
5747 					      build_int_cst (TREE_TYPE (vr1min), 1));
5748 		  if (!operand_less_p (max, min))
5749 		    {
5750 		      *vr0type = VR_ANTI_RANGE;
5751 		      *vr0min = min;
5752 		      *vr0max = max;
5753 		    }
5754 		  else
5755 		    *vr0max = vr1max;
5756 		}
5757 	      else
5758 		*vr0max = vr1max;
5759 	    }
5760 	  else
5761 	    {
5762 	      /* If the result can be an anti-range, create one.  */
5763 	      if (TREE_CODE (vr1max) == INTEGER_CST
5764 		  && TREE_CODE (*vr0min) == INTEGER_CST
5765 		  && vrp_val_is_min (vr1min)
5766 		  && vrp_val_is_max (*vr0max))
5767 		{
5768 		  tree min = int_const_binop (PLUS_EXPR,
5769 					      vr1max,
5770 					      build_int_cst (TREE_TYPE (vr1max), 1));
5771 		  tree max = int_const_binop (MINUS_EXPR,
5772 					      *vr0min,
5773 					      build_int_cst (TREE_TYPE (*vr0min), 1));
5774 		  if (!operand_less_p (max, min))
5775 		    {
5776 		      *vr0type = VR_ANTI_RANGE;
5777 		      *vr0min = min;
5778 		      *vr0max = max;
5779 		    }
5780 		  else
5781 		    *vr0min = vr1min;
5782 		}
5783 	      else
5784 		*vr0min = vr1min;
5785 	    }
5786 	}
5787       else
5788 	gcc_unreachable ();
5789     }
5790   else if ((maxeq || operand_less_p (vr1max, *vr0max) == 1)
5791 	   && (mineq || operand_less_p (*vr0min, vr1min) == 1))
5792     {
5793       /* [ (  ) ] or [(  ) ] or [ (  )] */
5794       if (*vr0type == VR_RANGE
5795 	  && vr1type == VR_RANGE)
5796 	;
5797       else if (*vr0type == VR_ANTI_RANGE
5798 	       && vr1type == VR_ANTI_RANGE)
5799 	{
5800 	  *vr0type = vr1type;
5801 	  *vr0min = vr1min;
5802 	  *vr0max = vr1max;
5803 	}
5804       else if (*vr0type == VR_ANTI_RANGE
5805 	       && vr1type == VR_RANGE)
5806 	{
5807 	  /* Arbitrarily choose the right or left gap.  */
5808 	  if (!mineq && TREE_CODE (vr1min) == INTEGER_CST)
5809 	    *vr0max = int_const_binop (MINUS_EXPR, vr1min,
5810 				       build_int_cst (TREE_TYPE (vr1min), 1));
5811 	  else if (!maxeq && TREE_CODE (vr1max) == INTEGER_CST)
5812 	    *vr0min = int_const_binop (PLUS_EXPR, vr1max,
5813 				       build_int_cst (TREE_TYPE (vr1max), 1));
5814 	  else
5815 	    goto give_up;
5816 	}
5817       else if (*vr0type == VR_RANGE
5818 	       && vr1type == VR_ANTI_RANGE)
5819 	/* The result covers everything.  */
5820 	goto give_up;
5821       else
5822 	gcc_unreachable ();
5823     }
5824   else if ((maxeq || operand_less_p (*vr0max, vr1max) == 1)
5825 	   && (mineq || operand_less_p (vr1min, *vr0min) == 1))
5826     {
5827       /* ( [  ] ) or ([  ] ) or ( [  ]) */
5828       if (*vr0type == VR_RANGE
5829 	  && vr1type == VR_RANGE)
5830 	{
5831 	  *vr0type = vr1type;
5832 	  *vr0min = vr1min;
5833 	  *vr0max = vr1max;
5834 	}
5835       else if (*vr0type == VR_ANTI_RANGE
5836 	       && vr1type == VR_ANTI_RANGE)
5837 	;
5838       else if (*vr0type == VR_RANGE
5839 	       && vr1type == VR_ANTI_RANGE)
5840 	{
5841 	  *vr0type = VR_ANTI_RANGE;
5842 	  if (!mineq && TREE_CODE (*vr0min) == INTEGER_CST)
5843 	    {
5844 	      *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
5845 					 build_int_cst (TREE_TYPE (*vr0min), 1));
5846 	      *vr0min = vr1min;
5847 	    }
5848 	  else if (!maxeq && TREE_CODE (*vr0max) == INTEGER_CST)
5849 	    {
5850 	      *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
5851 					 build_int_cst (TREE_TYPE (*vr0max), 1));
5852 	      *vr0max = vr1max;
5853 	    }
5854 	  else
5855 	    goto give_up;
5856 	}
5857       else if (*vr0type == VR_ANTI_RANGE
5858 	       && vr1type == VR_RANGE)
5859 	/* The result covers everything.  */
5860 	goto give_up;
5861       else
5862 	gcc_unreachable ();
5863     }
5864   else if ((operand_less_p (vr1min, *vr0max) == 1
5865 	    || operand_equal_p (vr1min, *vr0max, 0))
5866 	   && operand_less_p (*vr0min, vr1min) == 1
5867 	   && operand_less_p (*vr0max, vr1max) == 1)
5868     {
5869       /* [  (  ]  ) or [   ](   ) */
5870       if (*vr0type == VR_RANGE
5871 	  && vr1type == VR_RANGE)
5872 	*vr0max = vr1max;
5873       else if (*vr0type == VR_ANTI_RANGE
5874 	       && vr1type == VR_ANTI_RANGE)
5875 	*vr0min = vr1min;
5876       else if (*vr0type == VR_ANTI_RANGE
5877 	       && vr1type == VR_RANGE)
5878 	{
5879 	  if (TREE_CODE (vr1min) == INTEGER_CST)
5880 	    *vr0max = int_const_binop (MINUS_EXPR, vr1min,
5881 				       build_int_cst (TREE_TYPE (vr1min), 1));
5882 	  else
5883 	    goto give_up;
5884 	}
5885       else if (*vr0type == VR_RANGE
5886 	       && vr1type == VR_ANTI_RANGE)
5887 	{
5888 	  if (TREE_CODE (*vr0max) == INTEGER_CST)
5889 	    {
5890 	      *vr0type = vr1type;
5891 	      *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
5892 					 build_int_cst (TREE_TYPE (*vr0max), 1));
5893 	      *vr0max = vr1max;
5894 	    }
5895 	  else
5896 	    goto give_up;
5897 	}
5898       else
5899 	gcc_unreachable ();
5900     }
5901   else if ((operand_less_p (*vr0min, vr1max) == 1
5902 	    || operand_equal_p (*vr0min, vr1max, 0))
5903 	   && operand_less_p (vr1min, *vr0min) == 1
5904 	   && operand_less_p (vr1max, *vr0max) == 1)
5905     {
5906       /* (  [  )  ] or (   )[   ] */
5907       if (*vr0type == VR_RANGE
5908 	  && vr1type == VR_RANGE)
5909 	*vr0min = vr1min;
5910       else if (*vr0type == VR_ANTI_RANGE
5911 	       && vr1type == VR_ANTI_RANGE)
5912 	*vr0max = vr1max;
5913       else if (*vr0type == VR_ANTI_RANGE
5914 	       && vr1type == VR_RANGE)
5915 	{
5916 	  if (TREE_CODE (vr1max) == INTEGER_CST)
5917 	    *vr0min = int_const_binop (PLUS_EXPR, vr1max,
5918 				       build_int_cst (TREE_TYPE (vr1max), 1));
5919 	  else
5920 	    goto give_up;
5921 	}
5922       else if (*vr0type == VR_RANGE
5923 	       && vr1type == VR_ANTI_RANGE)
5924 	{
5925 	  if (TREE_CODE (*vr0min) == INTEGER_CST)
5926 	    {
5927 	      *vr0type = vr1type;
5928 	      *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
5929 					 build_int_cst (TREE_TYPE (*vr0min), 1));
5930 	      *vr0min = vr1min;
5931 	    }
5932 	  else
5933 	    goto give_up;
5934 	}
5935       else
5936 	gcc_unreachable ();
5937     }
5938   else
5939     goto give_up;
5940 
5941   return;
5942 
5943 give_up:
5944   *vr0type = VR_VARYING;
5945   *vr0min = NULL_TREE;
5946   *vr0max = NULL_TREE;
5947 }
5948 
5949 /* Intersect the two value-ranges { *VR0TYPE, *VR0MIN, *VR0MAX } and
5950    { VR1TYPE, VR0MIN, VR0MAX } and store the result
5951    in { *VR0TYPE, *VR0MIN, *VR0MAX }.  This may not be the smallest
5952    possible such range.  The resulting range is not canonicalized.  */
5953 
5954 static void
intersect_ranges(enum value_range_type * vr0type,tree * vr0min,tree * vr0max,enum value_range_type vr1type,tree vr1min,tree vr1max)5955 intersect_ranges (enum value_range_type *vr0type,
5956 		  tree *vr0min, tree *vr0max,
5957 		  enum value_range_type vr1type,
5958 		  tree vr1min, tree vr1max)
5959 {
5960   bool mineq = vrp_operand_equal_p (*vr0min, vr1min);
5961   bool maxeq = vrp_operand_equal_p (*vr0max, vr1max);
5962 
5963   /* [] is vr0, () is vr1 in the following classification comments.  */
5964   if (mineq && maxeq)
5965     {
5966       /* [(  )] */
5967       if (*vr0type == vr1type)
5968 	/* Nothing to do for equal ranges.  */
5969 	;
5970       else if ((*vr0type == VR_RANGE
5971 		&& vr1type == VR_ANTI_RANGE)
5972 	       || (*vr0type == VR_ANTI_RANGE
5973 		   && vr1type == VR_RANGE))
5974 	{
5975 	  /* For anti-range with range intersection the result is empty.  */
5976 	  *vr0type = VR_UNDEFINED;
5977 	  *vr0min = NULL_TREE;
5978 	  *vr0max = NULL_TREE;
5979 	}
5980       else
5981 	gcc_unreachable ();
5982     }
5983   else if (operand_less_p (*vr0max, vr1min) == 1
5984 	   || operand_less_p (vr1max, *vr0min) == 1)
5985     {
5986       /* [ ] ( ) or ( ) [ ]
5987 	 If the ranges have an empty intersection, the result of the
5988 	 intersect operation is the range for intersecting an
5989 	 anti-range with a range or empty when intersecting two ranges.  */
5990       if (*vr0type == VR_RANGE
5991 	  && vr1type == VR_ANTI_RANGE)
5992 	;
5993       else if (*vr0type == VR_ANTI_RANGE
5994 	       && vr1type == VR_RANGE)
5995 	{
5996 	  *vr0type = vr1type;
5997 	  *vr0min = vr1min;
5998 	  *vr0max = vr1max;
5999 	}
6000       else if (*vr0type == VR_RANGE
6001 	       && vr1type == VR_RANGE)
6002 	{
6003 	  *vr0type = VR_UNDEFINED;
6004 	  *vr0min = NULL_TREE;
6005 	  *vr0max = NULL_TREE;
6006 	}
6007       else if (*vr0type == VR_ANTI_RANGE
6008 	       && vr1type == VR_ANTI_RANGE)
6009 	{
6010 	  /* If the anti-ranges are adjacent to each other merge them.  */
6011 	  if (TREE_CODE (*vr0max) == INTEGER_CST
6012 	      && TREE_CODE (vr1min) == INTEGER_CST
6013 	      && operand_less_p (*vr0max, vr1min) == 1
6014 	      && integer_onep (int_const_binop (MINUS_EXPR,
6015 						vr1min, *vr0max)))
6016 	    *vr0max = vr1max;
6017 	  else if (TREE_CODE (vr1max) == INTEGER_CST
6018 		   && TREE_CODE (*vr0min) == INTEGER_CST
6019 		   && operand_less_p (vr1max, *vr0min) == 1
6020 		   && integer_onep (int_const_binop (MINUS_EXPR,
6021 						     *vr0min, vr1max)))
6022 	    *vr0min = vr1min;
6023 	  /* Else arbitrarily take VR0.  */
6024 	}
6025     }
6026   else if ((maxeq || operand_less_p (vr1max, *vr0max) == 1)
6027 	   && (mineq || operand_less_p (*vr0min, vr1min) == 1))
6028     {
6029       /* [ (  ) ] or [(  ) ] or [ (  )] */
6030       if (*vr0type == VR_RANGE
6031 	  && vr1type == VR_RANGE)
6032 	{
6033 	  /* If both are ranges the result is the inner one.  */
6034 	  *vr0type = vr1type;
6035 	  *vr0min = vr1min;
6036 	  *vr0max = vr1max;
6037 	}
6038       else if (*vr0type == VR_RANGE
6039 	       && vr1type == VR_ANTI_RANGE)
6040 	{
6041 	  /* Choose the right gap if the left one is empty.  */
6042 	  if (mineq)
6043 	    {
6044 	      if (TREE_CODE (vr1max) != INTEGER_CST)
6045 		*vr0min = vr1max;
6046 	      else if (TYPE_PRECISION (TREE_TYPE (vr1max)) == 1
6047 		       && !TYPE_UNSIGNED (TREE_TYPE (vr1max)))
6048 		*vr0min
6049 		  = int_const_binop (MINUS_EXPR, vr1max,
6050 				     build_int_cst (TREE_TYPE (vr1max), -1));
6051 	      else
6052 		*vr0min
6053 		  = int_const_binop (PLUS_EXPR, vr1max,
6054 				     build_int_cst (TREE_TYPE (vr1max), 1));
6055 	    }
6056 	  /* Choose the left gap if the right one is empty.  */
6057 	  else if (maxeq)
6058 	    {
6059 	      if (TREE_CODE (vr1min) != INTEGER_CST)
6060 		*vr0max = vr1min;
6061 	      else if (TYPE_PRECISION (TREE_TYPE (vr1min)) == 1
6062 		       && !TYPE_UNSIGNED (TREE_TYPE (vr1min)))
6063 		*vr0max
6064 		  = int_const_binop (PLUS_EXPR, vr1min,
6065 				     build_int_cst (TREE_TYPE (vr1min), -1));
6066 	      else
6067 		*vr0max
6068 		  = int_const_binop (MINUS_EXPR, vr1min,
6069 				     build_int_cst (TREE_TYPE (vr1min), 1));
6070 	    }
6071 	  /* Choose the anti-range if the range is effectively varying.  */
6072 	  else if (vrp_val_is_min (*vr0min)
6073 		   && vrp_val_is_max (*vr0max))
6074 	    {
6075 	      *vr0type = vr1type;
6076 	      *vr0min = vr1min;
6077 	      *vr0max = vr1max;
6078 	    }
6079 	  /* Else choose the range.  */
6080 	}
6081       else if (*vr0type == VR_ANTI_RANGE
6082 	       && vr1type == VR_ANTI_RANGE)
6083 	/* If both are anti-ranges the result is the outer one.  */
6084 	;
6085       else if (*vr0type == VR_ANTI_RANGE
6086 	       && vr1type == VR_RANGE)
6087 	{
6088 	  /* The intersection is empty.  */
6089 	  *vr0type = VR_UNDEFINED;
6090 	  *vr0min = NULL_TREE;
6091 	  *vr0max = NULL_TREE;
6092 	}
6093       else
6094 	gcc_unreachable ();
6095     }
6096   else if ((maxeq || operand_less_p (*vr0max, vr1max) == 1)
6097 	   && (mineq || operand_less_p (vr1min, *vr0min) == 1))
6098     {
6099       /* ( [  ] ) or ([  ] ) or ( [  ]) */
6100       if (*vr0type == VR_RANGE
6101 	  && vr1type == VR_RANGE)
6102 	/* Choose the inner range.  */
6103 	;
6104       else if (*vr0type == VR_ANTI_RANGE
6105 	       && vr1type == VR_RANGE)
6106 	{
6107 	  /* Choose the right gap if the left is empty.  */
6108 	  if (mineq)
6109 	    {
6110 	      *vr0type = VR_RANGE;
6111 	      if (TREE_CODE (*vr0max) != INTEGER_CST)
6112 		*vr0min = *vr0max;
6113 	      else if (TYPE_PRECISION (TREE_TYPE (*vr0max)) == 1
6114 		       && !TYPE_UNSIGNED (TREE_TYPE (*vr0max)))
6115 		*vr0min
6116 		  = int_const_binop (MINUS_EXPR, *vr0max,
6117 				     build_int_cst (TREE_TYPE (*vr0max), -1));
6118 	      else
6119 		*vr0min
6120 		  = int_const_binop (PLUS_EXPR, *vr0max,
6121 				     build_int_cst (TREE_TYPE (*vr0max), 1));
6122 	      *vr0max = vr1max;
6123 	    }
6124 	  /* Choose the left gap if the right is empty.  */
6125 	  else if (maxeq)
6126 	    {
6127 	      *vr0type = VR_RANGE;
6128 	      if (TREE_CODE (*vr0min) != INTEGER_CST)
6129 		*vr0max = *vr0min;
6130 	      else if (TYPE_PRECISION (TREE_TYPE (*vr0min)) == 1
6131 		       && !TYPE_UNSIGNED (TREE_TYPE (*vr0min)))
6132 		*vr0max
6133 		  = int_const_binop (PLUS_EXPR, *vr0min,
6134 				     build_int_cst (TREE_TYPE (*vr0min), -1));
6135 	      else
6136 		*vr0max
6137 		  = int_const_binop (MINUS_EXPR, *vr0min,
6138 				     build_int_cst (TREE_TYPE (*vr0min), 1));
6139 	      *vr0min = vr1min;
6140 	    }
6141 	  /* Choose the anti-range if the range is effectively varying.  */
6142 	  else if (vrp_val_is_min (vr1min)
6143 		   && vrp_val_is_max (vr1max))
6144 	    ;
6145 	  /* Choose the anti-range if it is ~[0,0], that range is special
6146 	     enough to special case when vr1's range is relatively wide.
6147 	     At least for types bigger than int - this covers pointers
6148 	     and arguments to functions like ctz.  */
6149 	  else if (*vr0min == *vr0max
6150 		   && integer_zerop (*vr0min)
6151 		   && ((TYPE_PRECISION (TREE_TYPE (*vr0min))
6152 			>= TYPE_PRECISION (integer_type_node))
6153 		       || POINTER_TYPE_P (TREE_TYPE (*vr0min)))
6154 		   && TREE_CODE (vr1max) == INTEGER_CST
6155 		   && TREE_CODE (vr1min) == INTEGER_CST
6156 		   && (wi::clz (wi::to_wide (vr1max) - wi::to_wide (vr1min))
6157 		       < TYPE_PRECISION (TREE_TYPE (*vr0min)) / 2))
6158 	    ;
6159 	  /* Else choose the range.  */
6160 	  else
6161 	    {
6162 	      *vr0type = vr1type;
6163 	      *vr0min = vr1min;
6164 	      *vr0max = vr1max;
6165 	    }
6166 	}
6167       else if (*vr0type == VR_ANTI_RANGE
6168 	       && vr1type == VR_ANTI_RANGE)
6169 	{
6170 	  /* If both are anti-ranges the result is the outer one.  */
6171 	  *vr0type = vr1type;
6172 	  *vr0min = vr1min;
6173 	  *vr0max = vr1max;
6174 	}
6175       else if (vr1type == VR_ANTI_RANGE
6176 	       && *vr0type == VR_RANGE)
6177 	{
6178 	  /* The intersection is empty.  */
6179 	  *vr0type = VR_UNDEFINED;
6180 	  *vr0min = NULL_TREE;
6181 	  *vr0max = NULL_TREE;
6182 	}
6183       else
6184 	gcc_unreachable ();
6185     }
6186   else if ((operand_less_p (vr1min, *vr0max) == 1
6187 	    || operand_equal_p (vr1min, *vr0max, 0))
6188 	   && operand_less_p (*vr0min, vr1min) == 1)
6189     {
6190       /* [  (  ]  ) or [  ](  ) */
6191       if (*vr0type == VR_ANTI_RANGE
6192 	  && vr1type == VR_ANTI_RANGE)
6193 	*vr0max = vr1max;
6194       else if (*vr0type == VR_RANGE
6195 	       && vr1type == VR_RANGE)
6196 	*vr0min = vr1min;
6197       else if (*vr0type == VR_RANGE
6198 	       && vr1type == VR_ANTI_RANGE)
6199 	{
6200 	  if (TREE_CODE (vr1min) == INTEGER_CST)
6201 	    *vr0max = int_const_binop (MINUS_EXPR, vr1min,
6202 				       build_int_cst (TREE_TYPE (vr1min), 1));
6203 	  else
6204 	    *vr0max = vr1min;
6205 	}
6206       else if (*vr0type == VR_ANTI_RANGE
6207 	       && vr1type == VR_RANGE)
6208 	{
6209 	  *vr0type = VR_RANGE;
6210 	  if (TREE_CODE (*vr0max) == INTEGER_CST)
6211 	    *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
6212 				       build_int_cst (TREE_TYPE (*vr0max), 1));
6213 	  else
6214 	    *vr0min = *vr0max;
6215 	  *vr0max = vr1max;
6216 	}
6217       else
6218 	gcc_unreachable ();
6219     }
6220   else if ((operand_less_p (*vr0min, vr1max) == 1
6221 	    || operand_equal_p (*vr0min, vr1max, 0))
6222 	   && operand_less_p (vr1min, *vr0min) == 1)
6223     {
6224       /* (  [  )  ] or (  )[  ] */
6225       if (*vr0type == VR_ANTI_RANGE
6226 	  && vr1type == VR_ANTI_RANGE)
6227 	*vr0min = vr1min;
6228       else if (*vr0type == VR_RANGE
6229 	       && vr1type == VR_RANGE)
6230 	*vr0max = vr1max;
6231       else if (*vr0type == VR_RANGE
6232 	       && vr1type == VR_ANTI_RANGE)
6233 	{
6234 	  if (TREE_CODE (vr1max) == INTEGER_CST)
6235 	    *vr0min = int_const_binop (PLUS_EXPR, vr1max,
6236 				       build_int_cst (TREE_TYPE (vr1max), 1));
6237 	  else
6238 	    *vr0min = vr1max;
6239 	}
6240       else if (*vr0type == VR_ANTI_RANGE
6241 	       && vr1type == VR_RANGE)
6242 	{
6243 	  *vr0type = VR_RANGE;
6244 	  if (TREE_CODE (*vr0min) == INTEGER_CST)
6245 	    *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
6246 				       build_int_cst (TREE_TYPE (*vr0min), 1));
6247 	  else
6248 	    *vr0max = *vr0min;
6249 	  *vr0min = vr1min;
6250 	}
6251       else
6252 	gcc_unreachable ();
6253     }
6254 
6255   /* As a fallback simply use { *VRTYPE, *VR0MIN, *VR0MAX } as
6256      result for the intersection.  That's always a conservative
6257      correct estimate unless VR1 is a constant singleton range
6258      in which case we choose that.  */
6259   if (vr1type == VR_RANGE
6260       && is_gimple_min_invariant (vr1min)
6261       && vrp_operand_equal_p (vr1min, vr1max))
6262     {
6263       *vr0type = vr1type;
6264       *vr0min = vr1min;
6265       *vr0max = vr1max;
6266     }
6267 
6268   return;
6269 }
6270 
6271 
6272 /* Intersect the two value-ranges *VR0 and *VR1 and store the result
6273    in *VR0.  This may not be the smallest possible such range.  */
6274 
6275 static void
vrp_intersect_ranges_1(value_range * vr0,value_range * vr1)6276 vrp_intersect_ranges_1 (value_range *vr0, value_range *vr1)
6277 {
6278   value_range saved;
6279 
6280   /* If either range is VR_VARYING the other one wins.  */
6281   if (vr1->type == VR_VARYING)
6282     return;
6283   if (vr0->type == VR_VARYING)
6284     {
6285       copy_value_range (vr0, vr1);
6286       return;
6287     }
6288 
6289   /* When either range is VR_UNDEFINED the resulting range is
6290      VR_UNDEFINED, too.  */
6291   if (vr0->type == VR_UNDEFINED)
6292     return;
6293   if (vr1->type == VR_UNDEFINED)
6294     {
6295       set_value_range_to_undefined (vr0);
6296       return;
6297     }
6298 
6299   /* Save the original vr0 so we can return it as conservative intersection
6300      result when our worker turns things to varying.  */
6301   saved = *vr0;
6302   intersect_ranges (&vr0->type, &vr0->min, &vr0->max,
6303 		    vr1->type, vr1->min, vr1->max);
6304   /* Make sure to canonicalize the result though as the inversion of a
6305      VR_RANGE can still be a VR_RANGE.  */
6306   set_and_canonicalize_value_range (vr0, vr0->type,
6307 				    vr0->min, vr0->max, vr0->equiv);
6308   /* If that failed, use the saved original VR0.  */
6309   if (vr0->type == VR_VARYING)
6310     {
6311       *vr0 = saved;
6312       return;
6313     }
6314   /* If the result is VR_UNDEFINED there is no need to mess with
6315      the equivalencies.  */
6316   if (vr0->type == VR_UNDEFINED)
6317     return;
6318 
6319   /* The resulting set of equivalences for range intersection is the union of
6320      the two sets.  */
6321   if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
6322     bitmap_ior_into (vr0->equiv, vr1->equiv);
6323   else if (vr1->equiv && !vr0->equiv)
6324     {
6325       /* All equivalence bitmaps are allocated from the same obstack.  So
6326 	 we can use the obstack associated with VR to allocate vr0->equiv.  */
6327       vr0->equiv = BITMAP_ALLOC (vr1->equiv->obstack);
6328       bitmap_copy (vr0->equiv, vr1->equiv);
6329     }
6330 }
6331 
6332 void
vrp_intersect_ranges(value_range * vr0,value_range * vr1)6333 vrp_intersect_ranges (value_range *vr0, value_range *vr1)
6334 {
6335   if (dump_file && (dump_flags & TDF_DETAILS))
6336     {
6337       fprintf (dump_file, "Intersecting\n  ");
6338       dump_value_range (dump_file, vr0);
6339       fprintf (dump_file, "\nand\n  ");
6340       dump_value_range (dump_file, vr1);
6341       fprintf (dump_file, "\n");
6342     }
6343   vrp_intersect_ranges_1 (vr0, vr1);
6344   if (dump_file && (dump_flags & TDF_DETAILS))
6345     {
6346       fprintf (dump_file, "to\n  ");
6347       dump_value_range (dump_file, vr0);
6348       fprintf (dump_file, "\n");
6349     }
6350 }
6351 
6352 /* Meet operation for value ranges.  Given two value ranges VR0 and
6353    VR1, store in VR0 a range that contains both VR0 and VR1.  This
6354    may not be the smallest possible such range.  */
6355 
6356 static void
vrp_meet_1(value_range * vr0,const value_range * vr1)6357 vrp_meet_1 (value_range *vr0, const value_range *vr1)
6358 {
6359   value_range saved;
6360 
6361   if (vr0->type == VR_UNDEFINED)
6362     {
6363       set_value_range (vr0, vr1->type, vr1->min, vr1->max, vr1->equiv);
6364       return;
6365     }
6366 
6367   if (vr1->type == VR_UNDEFINED)
6368     {
6369       /* VR0 already has the resulting range.  */
6370       return;
6371     }
6372 
6373   if (vr0->type == VR_VARYING)
6374     {
6375       /* Nothing to do.  VR0 already has the resulting range.  */
6376       return;
6377     }
6378 
6379   if (vr1->type == VR_VARYING)
6380     {
6381       set_value_range_to_varying (vr0);
6382       return;
6383     }
6384 
6385   saved = *vr0;
6386   union_ranges (&vr0->type, &vr0->min, &vr0->max,
6387 		vr1->type, vr1->min, vr1->max);
6388   if (vr0->type == VR_VARYING)
6389     {
6390       /* Failed to find an efficient meet.  Before giving up and setting
6391 	 the result to VARYING, see if we can at least derive a useful
6392 	 anti-range.  FIXME, all this nonsense about distinguishing
6393 	 anti-ranges from ranges is necessary because of the odd
6394 	 semantics of range_includes_zero_p and friends.  */
6395       if (((saved.type == VR_RANGE
6396 	    && range_includes_zero_p (saved.min, saved.max) == 0)
6397 	   || (saved.type == VR_ANTI_RANGE
6398 	       && range_includes_zero_p (saved.min, saved.max) == 1))
6399 	  && ((vr1->type == VR_RANGE
6400 	       && range_includes_zero_p (vr1->min, vr1->max) == 0)
6401 	      || (vr1->type == VR_ANTI_RANGE
6402 		  && range_includes_zero_p (vr1->min, vr1->max) == 1)))
6403 	{
6404 	  set_value_range_to_nonnull (vr0, TREE_TYPE (saved.min));
6405 
6406 	  /* Since this meet operation did not result from the meeting of
6407 	     two equivalent names, VR0 cannot have any equivalences.  */
6408 	  if (vr0->equiv)
6409 	    bitmap_clear (vr0->equiv);
6410 	  return;
6411 	}
6412 
6413       set_value_range_to_varying (vr0);
6414       return;
6415     }
6416   set_and_canonicalize_value_range (vr0, vr0->type, vr0->min, vr0->max,
6417 				    vr0->equiv);
6418   if (vr0->type == VR_VARYING)
6419     return;
6420 
6421   /* The resulting set of equivalences is always the intersection of
6422      the two sets.  */
6423   if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
6424     bitmap_and_into (vr0->equiv, vr1->equiv);
6425   else if (vr0->equiv && !vr1->equiv)
6426     bitmap_clear (vr0->equiv);
6427 }
6428 
6429 void
vrp_meet(value_range * vr0,const value_range * vr1)6430 vrp_meet (value_range *vr0, const value_range *vr1)
6431 {
6432   if (dump_file && (dump_flags & TDF_DETAILS))
6433     {
6434       fprintf (dump_file, "Meeting\n  ");
6435       dump_value_range (dump_file, vr0);
6436       fprintf (dump_file, "\nand\n  ");
6437       dump_value_range (dump_file, vr1);
6438       fprintf (dump_file, "\n");
6439     }
6440   vrp_meet_1 (vr0, vr1);
6441   if (dump_file && (dump_flags & TDF_DETAILS))
6442     {
6443       fprintf (dump_file, "to\n  ");
6444       dump_value_range (dump_file, vr0);
6445       fprintf (dump_file, "\n");
6446     }
6447 }
6448 
6449 
6450 /* Visit all arguments for PHI node PHI that flow through executable
6451    edges.  If a valid value range can be derived from all the incoming
6452    value ranges, set a new range for the LHS of PHI.  */
6453 
6454 enum ssa_prop_result
visit_phi(gphi * phi)6455 vrp_prop::visit_phi (gphi *phi)
6456 {
6457   tree lhs = PHI_RESULT (phi);
6458   value_range vr_result = VR_INITIALIZER;
6459   extract_range_from_phi_node (phi, &vr_result);
6460   if (update_value_range (lhs, &vr_result))
6461     {
6462       if (dump_file && (dump_flags & TDF_DETAILS))
6463 	{
6464 	  fprintf (dump_file, "Found new range for ");
6465 	  print_generic_expr (dump_file, lhs);
6466 	  fprintf (dump_file, ": ");
6467 	  dump_value_range (dump_file, &vr_result);
6468 	  fprintf (dump_file, "\n");
6469 	}
6470 
6471       if (vr_result.type == VR_VARYING)
6472 	return SSA_PROP_VARYING;
6473 
6474       return SSA_PROP_INTERESTING;
6475     }
6476 
6477   /* Nothing changed, don't add outgoing edges.  */
6478   return SSA_PROP_NOT_INTERESTING;
6479 }
6480 
6481 class vrp_folder : public substitute_and_fold_engine
6482 {
6483  public:
6484   tree get_value (tree) FINAL OVERRIDE;
6485   bool fold_stmt (gimple_stmt_iterator *) FINAL OVERRIDE;
6486   bool fold_predicate_in (gimple_stmt_iterator *);
6487 
6488   class vr_values *vr_values;
6489 
6490   /* Delegators.  */
vrp_evaluate_conditional(tree_code code,tree op0,tree op1,gimple * stmt)6491   tree vrp_evaluate_conditional (tree_code code, tree op0,
6492 				 tree op1, gimple *stmt)
6493     { return vr_values->vrp_evaluate_conditional (code, op0, op1, stmt); }
simplify_stmt_using_ranges(gimple_stmt_iterator * gsi)6494   bool simplify_stmt_using_ranges (gimple_stmt_iterator *gsi)
6495     { return vr_values->simplify_stmt_using_ranges (gsi); }
op_with_constant_singleton_value_range(tree op)6496  tree op_with_constant_singleton_value_range (tree op)
6497     { return vr_values->op_with_constant_singleton_value_range (op); }
6498 };
6499 
6500 /* If the statement pointed by SI has a predicate whose value can be
6501    computed using the value range information computed by VRP, compute
6502    its value and return true.  Otherwise, return false.  */
6503 
6504 bool
fold_predicate_in(gimple_stmt_iterator * si)6505 vrp_folder::fold_predicate_in (gimple_stmt_iterator *si)
6506 {
6507   bool assignment_p = false;
6508   tree val;
6509   gimple *stmt = gsi_stmt (*si);
6510 
6511   if (is_gimple_assign (stmt)
6512       && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison)
6513     {
6514       assignment_p = true;
6515       val = vrp_evaluate_conditional (gimple_assign_rhs_code (stmt),
6516 				      gimple_assign_rhs1 (stmt),
6517 				      gimple_assign_rhs2 (stmt),
6518 				      stmt);
6519     }
6520   else if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
6521     val = vrp_evaluate_conditional (gimple_cond_code (cond_stmt),
6522 				    gimple_cond_lhs (cond_stmt),
6523 				    gimple_cond_rhs (cond_stmt),
6524 				    stmt);
6525   else
6526     return false;
6527 
6528   if (val)
6529     {
6530       if (assignment_p)
6531         val = fold_convert (gimple_expr_type (stmt), val);
6532 
6533       if (dump_file)
6534 	{
6535 	  fprintf (dump_file, "Folding predicate ");
6536 	  print_gimple_expr (dump_file, stmt, 0);
6537 	  fprintf (dump_file, " to ");
6538 	  print_generic_expr (dump_file, val);
6539 	  fprintf (dump_file, "\n");
6540 	}
6541 
6542       if (is_gimple_assign (stmt))
6543 	gimple_assign_set_rhs_from_tree (si, val);
6544       else
6545 	{
6546 	  gcc_assert (gimple_code (stmt) == GIMPLE_COND);
6547 	  gcond *cond_stmt = as_a <gcond *> (stmt);
6548 	  if (integer_zerop (val))
6549 	    gimple_cond_make_false (cond_stmt);
6550 	  else if (integer_onep (val))
6551 	    gimple_cond_make_true (cond_stmt);
6552 	  else
6553 	    gcc_unreachable ();
6554 	}
6555 
6556       return true;
6557     }
6558 
6559   return false;
6560 }
6561 
6562 /* Callback for substitute_and_fold folding the stmt at *SI.  */
6563 
6564 bool
fold_stmt(gimple_stmt_iterator * si)6565 vrp_folder::fold_stmt (gimple_stmt_iterator *si)
6566 {
6567   if (fold_predicate_in (si))
6568     return true;
6569 
6570   return simplify_stmt_using_ranges (si);
6571 }
6572 
6573 /* If OP has a value range with a single constant value return that,
6574    otherwise return NULL_TREE.  This returns OP itself if OP is a
6575    constant.
6576 
6577    Implemented as a pure wrapper right now, but this will change.  */
6578 
6579 tree
get_value(tree op)6580 vrp_folder::get_value (tree op)
6581 {
6582   return op_with_constant_singleton_value_range (op);
6583 }
6584 
6585 /* Return the LHS of any ASSERT_EXPR where OP appears as the first
6586    argument to the ASSERT_EXPR and in which the ASSERT_EXPR dominates
6587    BB.  If no such ASSERT_EXPR is found, return OP.  */
6588 
6589 static tree
lhs_of_dominating_assert(tree op,basic_block bb,gimple * stmt)6590 lhs_of_dominating_assert (tree op, basic_block bb, gimple *stmt)
6591 {
6592   imm_use_iterator imm_iter;
6593   gimple *use_stmt;
6594   use_operand_p use_p;
6595 
6596   if (TREE_CODE (op) == SSA_NAME)
6597     {
6598       FOR_EACH_IMM_USE_FAST (use_p, imm_iter, op)
6599 	{
6600 	  use_stmt = USE_STMT (use_p);
6601 	  if (use_stmt != stmt
6602 	      && gimple_assign_single_p (use_stmt)
6603 	      && TREE_CODE (gimple_assign_rhs1 (use_stmt)) == ASSERT_EXPR
6604 	      && TREE_OPERAND (gimple_assign_rhs1 (use_stmt), 0) == op
6605 	      && dominated_by_p (CDI_DOMINATORS, bb, gimple_bb (use_stmt)))
6606 	    return gimple_assign_lhs (use_stmt);
6607 	}
6608     }
6609   return op;
6610 }
6611 
6612 /* A hack.  */
6613 static class vr_values *x_vr_values;
6614 
6615 /* A trivial wrapper so that we can present the generic jump threading
6616    code with a simple API for simplifying statements.  STMT is the
6617    statement we want to simplify, WITHIN_STMT provides the location
6618    for any overflow warnings.  */
6619 
6620 static tree
simplify_stmt_for_jump_threading(gimple * stmt,gimple * within_stmt,class avail_exprs_stack * avail_exprs_stack ATTRIBUTE_UNUSED,basic_block bb)6621 simplify_stmt_for_jump_threading (gimple *stmt, gimple *within_stmt,
6622     class avail_exprs_stack *avail_exprs_stack ATTRIBUTE_UNUSED,
6623     basic_block bb)
6624 {
6625   /* First see if the conditional is in the hash table.  */
6626   tree cached_lhs = avail_exprs_stack->lookup_avail_expr (stmt, false, true);
6627   if (cached_lhs && is_gimple_min_invariant (cached_lhs))
6628     return cached_lhs;
6629 
6630   vr_values *vr_values = x_vr_values;
6631   if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
6632     {
6633       tree op0 = gimple_cond_lhs (cond_stmt);
6634       op0 = lhs_of_dominating_assert (op0, bb, stmt);
6635 
6636       tree op1 = gimple_cond_rhs (cond_stmt);
6637       op1 = lhs_of_dominating_assert (op1, bb, stmt);
6638 
6639       return vr_values->vrp_evaluate_conditional (gimple_cond_code (cond_stmt),
6640 						  op0, op1, within_stmt);
6641     }
6642 
6643   /* We simplify a switch statement by trying to determine which case label
6644      will be taken.  If we are successful then we return the corresponding
6645      CASE_LABEL_EXPR.  */
6646   if (gswitch *switch_stmt = dyn_cast <gswitch *> (stmt))
6647     {
6648       tree op = gimple_switch_index (switch_stmt);
6649       if (TREE_CODE (op) != SSA_NAME)
6650 	return NULL_TREE;
6651 
6652       op = lhs_of_dominating_assert (op, bb, stmt);
6653 
6654       value_range *vr = vr_values->get_value_range (op);
6655       if ((vr->type != VR_RANGE && vr->type != VR_ANTI_RANGE)
6656 	  || symbolic_range_p (vr))
6657 	return NULL_TREE;
6658 
6659       if (vr->type == VR_RANGE)
6660 	{
6661 	  size_t i, j;
6662 	  /* Get the range of labels that contain a part of the operand's
6663 	     value range.  */
6664 	  find_case_label_range (switch_stmt, vr->min, vr->max, &i, &j);
6665 
6666 	  /* Is there only one such label?  */
6667 	  if (i == j)
6668 	    {
6669 	      tree label = gimple_switch_label (switch_stmt, i);
6670 
6671 	      /* The i'th label will be taken only if the value range of the
6672 		 operand is entirely within the bounds of this label.  */
6673 	      if (CASE_HIGH (label) != NULL_TREE
6674 		  ? (tree_int_cst_compare (CASE_LOW (label), vr->min) <= 0
6675 		     && tree_int_cst_compare (CASE_HIGH (label), vr->max) >= 0)
6676 		  : (tree_int_cst_equal (CASE_LOW (label), vr->min)
6677 		     && tree_int_cst_equal (vr->min, vr->max)))
6678 		return label;
6679 	    }
6680 
6681 	  /* If there are no such labels then the default label will be
6682 	     taken.  */
6683 	  if (i > j)
6684 	    return gimple_switch_label (switch_stmt, 0);
6685 	}
6686 
6687       if (vr->type == VR_ANTI_RANGE)
6688 	{
6689 	  unsigned n = gimple_switch_num_labels (switch_stmt);
6690 	  tree min_label = gimple_switch_label (switch_stmt, 1);
6691 	  tree max_label = gimple_switch_label (switch_stmt, n - 1);
6692 
6693 	  /* The default label will be taken only if the anti-range of the
6694 	     operand is entirely outside the bounds of all the (non-default)
6695 	     case labels.  */
6696 	  if (tree_int_cst_compare (vr->min, CASE_LOW (min_label)) <= 0
6697 	      && (CASE_HIGH (max_label) != NULL_TREE
6698 		  ? tree_int_cst_compare (vr->max, CASE_HIGH (max_label)) >= 0
6699 		  : tree_int_cst_compare (vr->max, CASE_LOW (max_label)) >= 0))
6700 	  return gimple_switch_label (switch_stmt, 0);
6701 	}
6702 
6703       return NULL_TREE;
6704     }
6705 
6706   if (gassign *assign_stmt = dyn_cast <gassign *> (stmt))
6707     {
6708       tree lhs = gimple_assign_lhs (assign_stmt);
6709       if (TREE_CODE (lhs) == SSA_NAME
6710 	  && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
6711 	      || POINTER_TYPE_P (TREE_TYPE (lhs)))
6712 	  && stmt_interesting_for_vrp (stmt))
6713 	{
6714 	  edge dummy_e;
6715 	  tree dummy_tree;
6716 	  value_range new_vr = VR_INITIALIZER;
6717 	  vr_values->extract_range_from_stmt (stmt, &dummy_e,
6718 					      &dummy_tree, &new_vr);
6719 	  if (range_int_cst_singleton_p (&new_vr))
6720 	    return new_vr.min;
6721 	}
6722     }
6723 
6724   return NULL_TREE;
6725 }
6726 
6727 class vrp_dom_walker : public dom_walker
6728 {
6729 public:
vrp_dom_walker(cdi_direction direction,class const_and_copies * const_and_copies,class avail_exprs_stack * avail_exprs_stack)6730   vrp_dom_walker (cdi_direction direction,
6731 		  class const_and_copies *const_and_copies,
6732 		  class avail_exprs_stack *avail_exprs_stack)
6733     : dom_walker (direction, REACHABLE_BLOCKS),
6734       m_const_and_copies (const_and_copies),
6735       m_avail_exprs_stack (avail_exprs_stack),
6736       m_dummy_cond (NULL) {}
6737 
6738   virtual edge before_dom_children (basic_block);
6739   virtual void after_dom_children (basic_block);
6740 
6741   class vr_values *vr_values;
6742 
6743 private:
6744   class const_and_copies *m_const_and_copies;
6745   class avail_exprs_stack *m_avail_exprs_stack;
6746 
6747   gcond *m_dummy_cond;
6748 
6749 };
6750 
6751 /* Called before processing dominator children of BB.  We want to look
6752    at ASSERT_EXPRs and record information from them in the appropriate
6753    tables.
6754 
6755    We could look at other statements here.  It's not seen as likely
6756    to significantly increase the jump threads we discover.  */
6757 
6758 edge
before_dom_children(basic_block bb)6759 vrp_dom_walker::before_dom_children (basic_block bb)
6760 {
6761   gimple_stmt_iterator gsi;
6762 
6763   m_avail_exprs_stack->push_marker ();
6764   m_const_and_copies->push_marker ();
6765   for (gsi = gsi_start_nondebug_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
6766     {
6767       gimple *stmt = gsi_stmt (gsi);
6768       if (gimple_assign_single_p (stmt)
6769          && TREE_CODE (gimple_assign_rhs1 (stmt)) == ASSERT_EXPR)
6770 	{
6771 	  tree rhs1 = gimple_assign_rhs1 (stmt);
6772 	  tree cond = TREE_OPERAND (rhs1, 1);
6773 	  tree inverted = invert_truthvalue (cond);
6774 	  vec<cond_equivalence> p;
6775 	  p.create (3);
6776 	  record_conditions (&p, cond, inverted);
6777 	  for (unsigned int i = 0; i < p.length (); i++)
6778 	    m_avail_exprs_stack->record_cond (&p[i]);
6779 
6780 	  tree lhs = gimple_assign_lhs (stmt);
6781 	  m_const_and_copies->record_const_or_copy (lhs,
6782 						    TREE_OPERAND (rhs1, 0));
6783 	  p.release ();
6784 	  continue;
6785 	}
6786       break;
6787     }
6788   return NULL;
6789 }
6790 
6791 /* Called after processing dominator children of BB.  This is where we
6792    actually call into the threader.  */
6793 void
after_dom_children(basic_block bb)6794 vrp_dom_walker::after_dom_children (basic_block bb)
6795 {
6796   if (!m_dummy_cond)
6797     m_dummy_cond = gimple_build_cond (NE_EXPR,
6798 				      integer_zero_node, integer_zero_node,
6799 				      NULL, NULL);
6800 
6801   x_vr_values = vr_values;
6802   thread_outgoing_edges (bb, m_dummy_cond, m_const_and_copies,
6803 			 m_avail_exprs_stack, NULL,
6804 			 simplify_stmt_for_jump_threading);
6805   x_vr_values = NULL;
6806 
6807   m_avail_exprs_stack->pop_to_marker ();
6808   m_const_and_copies->pop_to_marker ();
6809 }
6810 
6811 /* Blocks which have more than one predecessor and more than
6812    one successor present jump threading opportunities, i.e.,
6813    when the block is reached from a specific predecessor, we
6814    may be able to determine which of the outgoing edges will
6815    be traversed.  When this optimization applies, we are able
6816    to avoid conditionals at runtime and we may expose secondary
6817    optimization opportunities.
6818 
6819    This routine is effectively a driver for the generic jump
6820    threading code.  It basically just presents the generic code
6821    with edges that may be suitable for jump threading.
6822 
6823    Unlike DOM, we do not iterate VRP if jump threading was successful.
6824    While iterating may expose new opportunities for VRP, it is expected
6825    those opportunities would be very limited and the compile time cost
6826    to expose those opportunities would be significant.
6827 
6828    As jump threading opportunities are discovered, they are registered
6829    for later realization.  */
6830 
6831 static void
identify_jump_threads(class vr_values * vr_values)6832 identify_jump_threads (class vr_values *vr_values)
6833 {
6834   int i;
6835   edge e;
6836 
6837   /* Ugh.  When substituting values earlier in this pass we can
6838      wipe the dominance information.  So rebuild the dominator
6839      information as we need it within the jump threading code.  */
6840   calculate_dominance_info (CDI_DOMINATORS);
6841 
6842   /* We do not allow VRP information to be used for jump threading
6843      across a back edge in the CFG.  Otherwise it becomes too
6844      difficult to avoid eliminating loop exit tests.  Of course
6845      EDGE_DFS_BACK is not accurate at this time so we have to
6846      recompute it.  */
6847   mark_dfs_back_edges ();
6848 
6849   /* Do not thread across edges we are about to remove.  Just marking
6850      them as EDGE_IGNORE will do.  */
6851   FOR_EACH_VEC_ELT (to_remove_edges, i, e)
6852     e->flags |= EDGE_IGNORE;
6853 
6854   /* Allocate our unwinder stack to unwind any temporary equivalences
6855      that might be recorded.  */
6856   const_and_copies *equiv_stack = new const_and_copies ();
6857 
6858   hash_table<expr_elt_hasher> *avail_exprs
6859     = new hash_table<expr_elt_hasher> (1024);
6860   avail_exprs_stack *avail_exprs_stack
6861     = new class avail_exprs_stack (avail_exprs);
6862 
6863   vrp_dom_walker walker (CDI_DOMINATORS, equiv_stack, avail_exprs_stack);
6864   walker.vr_values = vr_values;
6865   walker.walk (cfun->cfg->x_entry_block_ptr);
6866 
6867   /* Clear EDGE_IGNORE.  */
6868   FOR_EACH_VEC_ELT (to_remove_edges, i, e)
6869     e->flags &= ~EDGE_IGNORE;
6870 
6871   /* We do not actually update the CFG or SSA graphs at this point as
6872      ASSERT_EXPRs are still in the IL and cfg cleanup code does not yet
6873      handle ASSERT_EXPRs gracefully.  */
6874   delete equiv_stack;
6875   delete avail_exprs;
6876   delete avail_exprs_stack;
6877 }
6878 
6879 /* Traverse all the blocks folding conditionals with known ranges.  */
6880 
6881 void
vrp_finalize(bool warn_array_bounds_p)6882 vrp_prop::vrp_finalize (bool warn_array_bounds_p)
6883 {
6884   size_t i;
6885 
6886   /* We have completed propagating through the lattice.  */
6887   vr_values.set_lattice_propagation_complete ();
6888 
6889   if (dump_file)
6890     {
6891       fprintf (dump_file, "\nValue ranges after VRP:\n\n");
6892       vr_values.dump_all_value_ranges (dump_file);
6893       fprintf (dump_file, "\n");
6894     }
6895 
6896   /* Set value range to non pointer SSA_NAMEs.  */
6897   for (i = 0; i < num_ssa_names; i++)
6898     {
6899       tree name = ssa_name (i);
6900       if (!name)
6901 	continue;
6902 
6903       value_range *vr = get_value_range (name);
6904       if (!name
6905 	  || (vr->type == VR_VARYING)
6906 	  || (vr->type == VR_UNDEFINED)
6907 	  || (TREE_CODE (vr->min) != INTEGER_CST)
6908 	  || (TREE_CODE (vr->max) != INTEGER_CST))
6909 	continue;
6910 
6911       if (POINTER_TYPE_P (TREE_TYPE (name))
6912 	  && ((vr->type == VR_RANGE
6913 	       && range_includes_zero_p (vr->min, vr->max) == 0)
6914 	      || (vr->type == VR_ANTI_RANGE
6915 		  && range_includes_zero_p (vr->min, vr->max) == 1)))
6916 	set_ptr_nonnull (name);
6917       else if (!POINTER_TYPE_P (TREE_TYPE (name)))
6918 	set_range_info (name, vr->type,
6919 			wi::to_wide (vr->min),
6920 			wi::to_wide (vr->max));
6921     }
6922 
6923   /* If we're checking array refs, we want to merge information on
6924      the executability of each edge between vrp_folder and the
6925      check_array_bounds_dom_walker: each can clear the
6926      EDGE_EXECUTABLE flag on edges, in different ways.
6927 
6928      Hence, if we're going to call check_all_array_refs, set
6929      the flag on every edge now, rather than in
6930      check_array_bounds_dom_walker's ctor; vrp_folder may clear
6931      it from some edges.  */
6932   if (warn_array_bounds && warn_array_bounds_p)
6933     set_all_edges_as_executable (cfun);
6934 
6935   class vrp_folder vrp_folder;
6936   vrp_folder.vr_values = &vr_values;
6937   vrp_folder.substitute_and_fold ();
6938 
6939   if (warn_array_bounds && warn_array_bounds_p)
6940     check_all_array_refs ();
6941 }
6942 
6943 /* Main entry point to VRP (Value Range Propagation).  This pass is
6944    loosely based on J. R. C. Patterson, ``Accurate Static Branch
6945    Prediction by Value Range Propagation,'' in SIGPLAN Conference on
6946    Programming Language Design and Implementation, pp. 67-78, 1995.
6947    Also available at http://citeseer.ist.psu.edu/patterson95accurate.html
6948 
6949    This is essentially an SSA-CCP pass modified to deal with ranges
6950    instead of constants.
6951 
6952    While propagating ranges, we may find that two or more SSA name
6953    have equivalent, though distinct ranges.  For instance,
6954 
6955      1	x_9 = p_3->a;
6956      2	p_4 = ASSERT_EXPR <p_3, p_3 != 0>
6957      3	if (p_4 == q_2)
6958      4	  p_5 = ASSERT_EXPR <p_4, p_4 == q_2>;
6959      5	endif
6960      6	if (q_2)
6961 
6962    In the code above, pointer p_5 has range [q_2, q_2], but from the
6963    code we can also determine that p_5 cannot be NULL and, if q_2 had
6964    a non-varying range, p_5's range should also be compatible with it.
6965 
6966    These equivalences are created by two expressions: ASSERT_EXPR and
6967    copy operations.  Since p_5 is an assertion on p_4, and p_4 was the
6968    result of another assertion, then we can use the fact that p_5 and
6969    p_4 are equivalent when evaluating p_5's range.
6970 
6971    Together with value ranges, we also propagate these equivalences
6972    between names so that we can take advantage of information from
6973    multiple ranges when doing final replacement.  Note that this
6974    equivalency relation is transitive but not symmetric.
6975 
6976    In the example above, p_5 is equivalent to p_4, q_2 and p_3, but we
6977    cannot assert that q_2 is equivalent to p_5 because q_2 may be used
6978    in contexts where that assertion does not hold (e.g., in line 6).
6979 
6980    TODO, the main difference between this pass and Patterson's is that
6981    we do not propagate edge probabilities.  We only compute whether
6982    edges can be taken or not.  That is, instead of having a spectrum
6983    of jump probabilities between 0 and 1, we only deal with 0, 1 and
6984    DON'T KNOW.  In the future, it may be worthwhile to propagate
6985    probabilities to aid branch prediction.  */
6986 
6987 static unsigned int
execute_vrp(bool warn_array_bounds_p)6988 execute_vrp (bool warn_array_bounds_p)
6989 {
6990   int i;
6991   edge e;
6992   switch_update *su;
6993 
6994   loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
6995   rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
6996   scev_initialize ();
6997 
6998   /* ???  This ends up using stale EDGE_DFS_BACK for liveness computation.
6999      Inserting assertions may split edges which will invalidate
7000      EDGE_DFS_BACK.  */
7001   insert_range_assertions ();
7002 
7003   to_remove_edges.create (10);
7004   to_update_switch_stmts.create (5);
7005   threadedge_initialize_values ();
7006 
7007   /* For visiting PHI nodes we need EDGE_DFS_BACK computed.  */
7008   mark_dfs_back_edges ();
7009 
7010   class vrp_prop vrp_prop;
7011   vrp_prop.vrp_initialize ();
7012   vrp_prop.ssa_propagate ();
7013   vrp_prop.vrp_finalize (warn_array_bounds_p);
7014 
7015   /* We must identify jump threading opportunities before we release
7016      the datastructures built by VRP.  */
7017   identify_jump_threads (&vrp_prop.vr_values);
7018 
7019   /* A comparison of an SSA_NAME against a constant where the SSA_NAME
7020      was set by a type conversion can often be rewritten to use the
7021      RHS of the type conversion.
7022 
7023      However, doing so inhibits jump threading through the comparison.
7024      So that transformation is not performed until after jump threading
7025      is complete.  */
7026   basic_block bb;
7027   FOR_EACH_BB_FN (bb, cfun)
7028     {
7029       gimple *last = last_stmt (bb);
7030       if (last && gimple_code (last) == GIMPLE_COND)
7031 	vrp_prop.vr_values.simplify_cond_using_ranges_2 (as_a <gcond *> (last));
7032     }
7033 
7034   free_numbers_of_iterations_estimates (cfun);
7035 
7036   /* ASSERT_EXPRs must be removed before finalizing jump threads
7037      as finalizing jump threads calls the CFG cleanup code which
7038      does not properly handle ASSERT_EXPRs.  */
7039   remove_range_assertions ();
7040 
7041   /* If we exposed any new variables, go ahead and put them into
7042      SSA form now, before we handle jump threading.  This simplifies
7043      interactions between rewriting of _DECL nodes into SSA form
7044      and rewriting SSA_NAME nodes into SSA form after block
7045      duplication and CFG manipulation.  */
7046   update_ssa (TODO_update_ssa);
7047 
7048   /* We identified all the jump threading opportunities earlier, but could
7049      not transform the CFG at that time.  This routine transforms the
7050      CFG and arranges for the dominator tree to be rebuilt if necessary.
7051 
7052      Note the SSA graph update will occur during the normal TODO
7053      processing by the pass manager.  */
7054   thread_through_all_blocks (false);
7055 
7056   /* Remove dead edges from SWITCH_EXPR optimization.  This leaves the
7057      CFG in a broken state and requires a cfg_cleanup run.  */
7058   FOR_EACH_VEC_ELT (to_remove_edges, i, e)
7059     remove_edge (e);
7060   /* Update SWITCH_EXPR case label vector.  */
7061   FOR_EACH_VEC_ELT (to_update_switch_stmts, i, su)
7062     {
7063       size_t j;
7064       size_t n = TREE_VEC_LENGTH (su->vec);
7065       tree label;
7066       gimple_switch_set_num_labels (su->stmt, n);
7067       for (j = 0; j < n; j++)
7068 	gimple_switch_set_label (su->stmt, j, TREE_VEC_ELT (su->vec, j));
7069       /* As we may have replaced the default label with a regular one
7070 	 make sure to make it a real default label again.  This ensures
7071 	 optimal expansion.  */
7072       label = gimple_switch_label (su->stmt, 0);
7073       CASE_LOW (label) = NULL_TREE;
7074       CASE_HIGH (label) = NULL_TREE;
7075     }
7076 
7077   if (to_remove_edges.length () > 0)
7078     {
7079       free_dominance_info (CDI_DOMINATORS);
7080       loops_state_set (LOOPS_NEED_FIXUP);
7081     }
7082 
7083   to_remove_edges.release ();
7084   to_update_switch_stmts.release ();
7085   threadedge_finalize_values ();
7086 
7087   scev_finalize ();
7088   loop_optimizer_finalize ();
7089   return 0;
7090 }
7091 
7092 namespace {
7093 
7094 const pass_data pass_data_vrp =
7095 {
7096   GIMPLE_PASS, /* type */
7097   "vrp", /* name */
7098   OPTGROUP_NONE, /* optinfo_flags */
7099   TV_TREE_VRP, /* tv_id */
7100   PROP_ssa, /* properties_required */
7101   0, /* properties_provided */
7102   0, /* properties_destroyed */
7103   0, /* todo_flags_start */
7104   ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */
7105 };
7106 
7107 class pass_vrp : public gimple_opt_pass
7108 {
7109 public:
pass_vrp(gcc::context * ctxt)7110   pass_vrp (gcc::context *ctxt)
7111     : gimple_opt_pass (pass_data_vrp, ctxt), warn_array_bounds_p (false)
7112   {}
7113 
7114   /* opt_pass methods: */
clone()7115   opt_pass * clone () { return new pass_vrp (m_ctxt); }
set_pass_param(unsigned int n,bool param)7116   void set_pass_param (unsigned int n, bool param)
7117     {
7118       gcc_assert (n == 0);
7119       warn_array_bounds_p = param;
7120     }
gate(function *)7121   virtual bool gate (function *) { return flag_tree_vrp != 0; }
execute(function *)7122   virtual unsigned int execute (function *)
7123     { return execute_vrp (warn_array_bounds_p); }
7124 
7125  private:
7126   bool warn_array_bounds_p;
7127 }; // class pass_vrp
7128 
7129 } // anon namespace
7130 
7131 gimple_opt_pass *
make_pass_vrp(gcc::context * ctxt)7132 make_pass_vrp (gcc::context *ctxt)
7133 {
7134   return new pass_vrp (ctxt);
7135 }
7136