1 /* Support routines for Value Range Propagation (VRP).
2    Copyright (C) 2005-2021 Free Software Foundation, Inc.
3    Contributed by Diego Novillo <dnovillo@redhat.com>.
4 
5 This file is part of GCC.
6 
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11 
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15 GNU General Public License for more details.
16 
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3.  If not see
19 <http://www.gnu.org/licenses/>.  */
20 
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "insn-codes.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "gimple.h"
29 #include "cfghooks.h"
30 #include "tree-pass.h"
31 #include "ssa.h"
32 #include "optabs-tree.h"
33 #include "gimple-pretty-print.h"
34 #include "flags.h"
35 #include "fold-const.h"
36 #include "stor-layout.h"
37 #include "calls.h"
38 #include "cfganal.h"
39 #include "gimple-fold.h"
40 #include "tree-eh.h"
41 #include "gimple-iterator.h"
42 #include "gimple-walk.h"
43 #include "tree-cfg.h"
44 #include "tree-ssa-loop-manip.h"
45 #include "tree-ssa-loop-niter.h"
46 #include "tree-ssa-loop.h"
47 #include "tree-into-ssa.h"
48 #include "tree-ssa.h"
49 #include "cfgloop.h"
50 #include "tree-scalar-evolution.h"
51 #include "tree-ssa-propagate.h"
52 #include "tree-chrec.h"
53 #include "tree-ssa-threadupdate.h"
54 #include "tree-ssa-scopedtables.h"
55 #include "tree-ssa-threadedge.h"
56 #include "omp-general.h"
57 #include "target.h"
58 #include "case-cfn-macros.h"
59 #include "alloc-pool.h"
60 #include "domwalk.h"
61 #include "tree-cfgcleanup.h"
62 #include "stringpool.h"
63 #include "attribs.h"
64 #include "vr-values.h"
65 #include "builtins.h"
66 #include "range-op.h"
67 #include "value-range-equiv.h"
68 #include "gimple-array-bounds.h"
69 
70 /* Set of SSA names found live during the RPO traversal of the function
71    for still active basic-blocks.  */
72 class live_names
73 {
74 public:
75   live_names ();
76   ~live_names ();
77   void set (tree, basic_block);
78   void clear (tree, basic_block);
79   void merge (basic_block dest, basic_block src);
80   bool live_on_block_p (tree, basic_block);
81   bool live_on_edge_p (tree, edge);
82   bool block_has_live_names_p (basic_block);
83   void clear_block (basic_block);
84 
85 private:
86   sbitmap *live;
87   unsigned num_blocks;
88   void init_bitmap_if_needed (basic_block);
89 };
90 
91 void
init_bitmap_if_needed(basic_block bb)92 live_names::init_bitmap_if_needed (basic_block bb)
93 {
94   unsigned i = bb->index;
95   if (!live[i])
96     {
97       live[i] = sbitmap_alloc (num_ssa_names);
98       bitmap_clear (live[i]);
99     }
100 }
101 
102 bool
block_has_live_names_p(basic_block bb)103 live_names::block_has_live_names_p (basic_block bb)
104 {
105   unsigned i = bb->index;
106   return live[i] && bitmap_empty_p (live[i]);
107 }
108 
109 void
clear_block(basic_block bb)110 live_names::clear_block (basic_block bb)
111 {
112   unsigned i = bb->index;
113   if (live[i])
114     {
115       sbitmap_free (live[i]);
116       live[i] = NULL;
117     }
118 }
119 
120 void
merge(basic_block dest,basic_block src)121 live_names::merge (basic_block dest, basic_block src)
122 {
123   init_bitmap_if_needed (dest);
124   init_bitmap_if_needed (src);
125   bitmap_ior (live[dest->index], live[dest->index], live[src->index]);
126 }
127 
128 void
set(tree name,basic_block bb)129 live_names::set (tree name, basic_block bb)
130 {
131   init_bitmap_if_needed (bb);
132   bitmap_set_bit (live[bb->index], SSA_NAME_VERSION (name));
133 }
134 
135 void
clear(tree name,basic_block bb)136 live_names::clear (tree name, basic_block bb)
137 {
138   unsigned i = bb->index;
139   if (live[i])
140     bitmap_clear_bit (live[i], SSA_NAME_VERSION (name));
141 }
142 
live_names()143 live_names::live_names ()
144 {
145   num_blocks = last_basic_block_for_fn (cfun);
146   live = XCNEWVEC (sbitmap, num_blocks);
147 }
148 
~live_names()149 live_names::~live_names ()
150 {
151   for (unsigned i = 0; i < num_blocks; ++i)
152     if (live[i])
153       sbitmap_free (live[i]);
154   XDELETEVEC (live);
155 }
156 
157 bool
live_on_block_p(tree name,basic_block bb)158 live_names::live_on_block_p (tree name, basic_block bb)
159 {
160   return (live[bb->index]
161 	  && bitmap_bit_p (live[bb->index], SSA_NAME_VERSION (name)));
162 }
163 
164 /* Return true if the SSA name NAME is live on the edge E.  */
165 
166 bool
live_on_edge_p(tree name,edge e)167 live_names::live_on_edge_p (tree name, edge e)
168 {
169   return live_on_block_p (name, e->dest);
170 }
171 
172 
173 /* VR_TYPE describes a range with mininum value *MIN and maximum
174    value *MAX.  Restrict the range to the set of values that have
175    no bits set outside NONZERO_BITS.  Update *MIN and *MAX and
176    return the new range type.
177 
178    SGN gives the sign of the values described by the range.  */
179 
180 enum value_range_kind
intersect_range_with_nonzero_bits(enum value_range_kind vr_type,wide_int * min,wide_int * max,const wide_int & nonzero_bits,signop sgn)181 intersect_range_with_nonzero_bits (enum value_range_kind vr_type,
182 				   wide_int *min, wide_int *max,
183 				   const wide_int &nonzero_bits,
184 				   signop sgn)
185 {
186   if (vr_type == VR_ANTI_RANGE)
187     {
188       /* The VR_ANTI_RANGE is equivalent to the union of the ranges
189 	 A: [-INF, *MIN) and B: (*MAX, +INF].  First use NONZERO_BITS
190 	 to create an inclusive upper bound for A and an inclusive lower
191 	 bound for B.  */
192       wide_int a_max = wi::round_down_for_mask (*min - 1, nonzero_bits);
193       wide_int b_min = wi::round_up_for_mask (*max + 1, nonzero_bits);
194 
195       /* If the calculation of A_MAX wrapped, A is effectively empty
196 	 and A_MAX is the highest value that satisfies NONZERO_BITS.
197 	 Likewise if the calculation of B_MIN wrapped, B is effectively
198 	 empty and B_MIN is the lowest value that satisfies NONZERO_BITS.  */
199       bool a_empty = wi::ge_p (a_max, *min, sgn);
200       bool b_empty = wi::le_p (b_min, *max, sgn);
201 
202       /* If both A and B are empty, there are no valid values.  */
203       if (a_empty && b_empty)
204 	return VR_UNDEFINED;
205 
206       /* If exactly one of A or B is empty, return a VR_RANGE for the
207 	 other one.  */
208       if (a_empty || b_empty)
209 	{
210 	  *min = b_min;
211 	  *max = a_max;
212 	  gcc_checking_assert (wi::le_p (*min, *max, sgn));
213 	  return VR_RANGE;
214 	}
215 
216       /* Update the VR_ANTI_RANGE bounds.  */
217       *min = a_max + 1;
218       *max = b_min - 1;
219       gcc_checking_assert (wi::le_p (*min, *max, sgn));
220 
221       /* Now check whether the excluded range includes any values that
222 	 satisfy NONZERO_BITS.  If not, switch to a full VR_RANGE.  */
223       if (wi::round_up_for_mask (*min, nonzero_bits) == b_min)
224 	{
225 	  unsigned int precision = min->get_precision ();
226 	  *min = wi::min_value (precision, sgn);
227 	  *max = wi::max_value (precision, sgn);
228 	  vr_type = VR_RANGE;
229 	}
230     }
231   if (vr_type == VR_RANGE)
232     {
233       *max = wi::round_down_for_mask (*max, nonzero_bits);
234 
235       /* Check that the range contains at least one valid value.  */
236       if (wi::gt_p (*min, *max, sgn))
237 	return VR_UNDEFINED;
238 
239       *min = wi::round_up_for_mask (*min, nonzero_bits);
240       gcc_checking_assert (wi::le_p (*min, *max, sgn));
241     }
242   return vr_type;
243 }
244 
245 /* Return true if max and min of VR are INTEGER_CST.  It's not necessary
246    a singleton.  */
247 
248 bool
range_int_cst_p(const value_range * vr)249 range_int_cst_p (const value_range *vr)
250 {
251   return (vr->kind () == VR_RANGE && range_has_numeric_bounds_p (vr));
252 }
253 
254 /* Return the single symbol (an SSA_NAME) contained in T if any, or NULL_TREE
255    otherwise.  We only handle additive operations and set NEG to true if the
256    symbol is negated and INV to the invariant part, if any.  */
257 
258 tree
get_single_symbol(tree t,bool * neg,tree * inv)259 get_single_symbol (tree t, bool *neg, tree *inv)
260 {
261   bool neg_;
262   tree inv_;
263 
264   *inv = NULL_TREE;
265   *neg = false;
266 
267   if (TREE_CODE (t) == PLUS_EXPR
268       || TREE_CODE (t) == POINTER_PLUS_EXPR
269       || TREE_CODE (t) == MINUS_EXPR)
270     {
271       if (is_gimple_min_invariant (TREE_OPERAND (t, 0)))
272 	{
273 	  neg_ = (TREE_CODE (t) == MINUS_EXPR);
274 	  inv_ = TREE_OPERAND (t, 0);
275 	  t = TREE_OPERAND (t, 1);
276 	}
277       else if (is_gimple_min_invariant (TREE_OPERAND (t, 1)))
278 	{
279 	  neg_ = false;
280 	  inv_ = TREE_OPERAND (t, 1);
281 	  t = TREE_OPERAND (t, 0);
282 	}
283       else
284         return NULL_TREE;
285     }
286   else
287     {
288       neg_ = false;
289       inv_ = NULL_TREE;
290     }
291 
292   if (TREE_CODE (t) == NEGATE_EXPR)
293     {
294       t = TREE_OPERAND (t, 0);
295       neg_ = !neg_;
296     }
297 
298   if (TREE_CODE (t) != SSA_NAME)
299     return NULL_TREE;
300 
301   if (inv_ && TREE_OVERFLOW_P (inv_))
302     inv_ = drop_tree_overflow (inv_);
303 
304   *neg = neg_;
305   *inv = inv_;
306   return t;
307 }
308 
309 /* The reverse operation: build a symbolic expression with TYPE
310    from symbol SYM, negated according to NEG, and invariant INV.  */
311 
312 static tree
build_symbolic_expr(tree type,tree sym,bool neg,tree inv)313 build_symbolic_expr (tree type, tree sym, bool neg, tree inv)
314 {
315   const bool pointer_p = POINTER_TYPE_P (type);
316   tree t = sym;
317 
318   if (neg)
319     t = build1 (NEGATE_EXPR, type, t);
320 
321   if (integer_zerop (inv))
322     return t;
323 
324   return build2 (pointer_p ? POINTER_PLUS_EXPR : PLUS_EXPR, type, t, inv);
325 }
326 
327 /* Return
328    1 if VAL < VAL2
329    0 if !(VAL < VAL2)
330    -2 if those are incomparable.  */
331 int
operand_less_p(tree val,tree val2)332 operand_less_p (tree val, tree val2)
333 {
334   /* LT is folded faster than GE and others.  Inline the common case.  */
335   if (TREE_CODE (val) == INTEGER_CST && TREE_CODE (val2) == INTEGER_CST)
336     return tree_int_cst_lt (val, val2);
337   else if (TREE_CODE (val) == SSA_NAME && TREE_CODE (val2) == SSA_NAME)
338     return val == val2 ? 0 : -2;
339   else
340     {
341       int cmp = compare_values (val, val2);
342       if (cmp == -1)
343 	return 1;
344       else if (cmp == 0 || cmp == 1)
345 	return 0;
346       else
347 	return -2;
348     }
349 
350   return 0;
351 }
352 
353 /* Compare two values VAL1 and VAL2.  Return
354 
355    	-2 if VAL1 and VAL2 cannot be compared at compile-time,
356    	-1 if VAL1 < VAL2,
357    	 0 if VAL1 == VAL2,
358 	+1 if VAL1 > VAL2, and
359 	+2 if VAL1 != VAL2
360 
361    This is similar to tree_int_cst_compare but supports pointer values
362    and values that cannot be compared at compile time.
363 
364    If STRICT_OVERFLOW_P is not NULL, then set *STRICT_OVERFLOW_P to
365    true if the return value is only valid if we assume that signed
366    overflow is undefined.  */
367 
368 int
compare_values_warnv(tree val1,tree val2,bool * strict_overflow_p)369 compare_values_warnv (tree val1, tree val2, bool *strict_overflow_p)
370 {
371   if (val1 == val2)
372     return 0;
373 
374   /* Below we rely on the fact that VAL1 and VAL2 are both pointers or
375      both integers.  */
376   gcc_assert (POINTER_TYPE_P (TREE_TYPE (val1))
377 	      == POINTER_TYPE_P (TREE_TYPE (val2)));
378 
379   /* Convert the two values into the same type.  This is needed because
380      sizetype causes sign extension even for unsigned types.  */
381   if (!useless_type_conversion_p (TREE_TYPE (val1), TREE_TYPE (val2)))
382     val2 = fold_convert (TREE_TYPE (val1), val2);
383 
384   const bool overflow_undefined
385     = INTEGRAL_TYPE_P (TREE_TYPE (val1))
386       && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (val1));
387   tree inv1, inv2;
388   bool neg1, neg2;
389   tree sym1 = get_single_symbol (val1, &neg1, &inv1);
390   tree sym2 = get_single_symbol (val2, &neg2, &inv2);
391 
392   /* If VAL1 and VAL2 are of the form '[-]NAME [+ CST]', return -1 or +1
393      accordingly.  If VAL1 and VAL2 don't use the same name, return -2.  */
394   if (sym1 && sym2)
395     {
396       /* Both values must use the same name with the same sign.  */
397       if (sym1 != sym2 || neg1 != neg2)
398 	return -2;
399 
400       /* [-]NAME + CST == [-]NAME + CST.  */
401       if (inv1 == inv2)
402 	return 0;
403 
404       /* If overflow is defined we cannot simplify more.  */
405       if (!overflow_undefined)
406 	return -2;
407 
408       if (strict_overflow_p != NULL
409 	  /* Symbolic range building sets TREE_NO_WARNING to declare
410 	     that overflow doesn't happen.  */
411 	  && (!inv1 || !TREE_NO_WARNING (val1))
412 	  && (!inv2 || !TREE_NO_WARNING (val2)))
413 	*strict_overflow_p = true;
414 
415       if (!inv1)
416 	inv1 = build_int_cst (TREE_TYPE (val1), 0);
417       if (!inv2)
418 	inv2 = build_int_cst (TREE_TYPE (val2), 0);
419 
420       return wi::cmp (wi::to_wide (inv1), wi::to_wide (inv2),
421 		      TYPE_SIGN (TREE_TYPE (val1)));
422     }
423 
424   const bool cst1 = is_gimple_min_invariant (val1);
425   const bool cst2 = is_gimple_min_invariant (val2);
426 
427   /* If one is of the form '[-]NAME + CST' and the other is constant, then
428      it might be possible to say something depending on the constants.  */
429   if ((sym1 && inv1 && cst2) || (sym2 && inv2 && cst1))
430     {
431       if (!overflow_undefined)
432 	return -2;
433 
434       if (strict_overflow_p != NULL
435 	  /* Symbolic range building sets TREE_NO_WARNING to declare
436 	     that overflow doesn't happen.  */
437 	  && (!sym1 || !TREE_NO_WARNING (val1))
438 	  && (!sym2 || !TREE_NO_WARNING (val2)))
439 	*strict_overflow_p = true;
440 
441       const signop sgn = TYPE_SIGN (TREE_TYPE (val1));
442       tree cst = cst1 ? val1 : val2;
443       tree inv = cst1 ? inv2 : inv1;
444 
445       /* Compute the difference between the constants.  If it overflows or
446 	 underflows, this means that we can trivially compare the NAME with
447 	 it and, consequently, the two values with each other.  */
448       wide_int diff = wi::to_wide (cst) - wi::to_wide (inv);
449       if (wi::cmp (0, wi::to_wide (inv), sgn)
450 	  != wi::cmp (diff, wi::to_wide (cst), sgn))
451 	{
452 	  const int res = wi::cmp (wi::to_wide (cst), wi::to_wide (inv), sgn);
453 	  return cst1 ? res : -res;
454 	}
455 
456       return -2;
457     }
458 
459   /* We cannot say anything more for non-constants.  */
460   if (!cst1 || !cst2)
461     return -2;
462 
463   if (!POINTER_TYPE_P (TREE_TYPE (val1)))
464     {
465       /* We cannot compare overflowed values.  */
466       if (TREE_OVERFLOW (val1) || TREE_OVERFLOW (val2))
467 	return -2;
468 
469       if (TREE_CODE (val1) == INTEGER_CST
470 	  && TREE_CODE (val2) == INTEGER_CST)
471 	return tree_int_cst_compare (val1, val2);
472 
473       if (poly_int_tree_p (val1) && poly_int_tree_p (val2))
474 	{
475 	  if (known_eq (wi::to_poly_widest (val1),
476 			wi::to_poly_widest (val2)))
477 	    return 0;
478 	  if (known_lt (wi::to_poly_widest (val1),
479 			wi::to_poly_widest (val2)))
480 	    return -1;
481 	  if (known_gt (wi::to_poly_widest (val1),
482 			wi::to_poly_widest (val2)))
483 	    return 1;
484 	}
485 
486       return -2;
487     }
488   else
489     {
490       if (TREE_CODE (val1) == INTEGER_CST && TREE_CODE (val2) == INTEGER_CST)
491 	{
492 	  /* We cannot compare overflowed values.  */
493 	  if (TREE_OVERFLOW (val1) || TREE_OVERFLOW (val2))
494 	    return -2;
495 
496 	  return tree_int_cst_compare (val1, val2);
497 	}
498 
499       /* First see if VAL1 and VAL2 are not the same.  */
500       if (operand_equal_p (val1, val2, 0))
501 	return 0;
502 
503       fold_defer_overflow_warnings ();
504 
505       /* If VAL1 is a lower address than VAL2, return -1.  */
506       tree t = fold_binary_to_constant (LT_EXPR, boolean_type_node, val1, val2);
507       if (t && integer_onep (t))
508 	{
509 	  fold_undefer_and_ignore_overflow_warnings ();
510 	  return -1;
511 	}
512 
513       /* If VAL1 is a higher address than VAL2, return +1.  */
514       t = fold_binary_to_constant (LT_EXPR, boolean_type_node, val2, val1);
515       if (t && integer_onep (t))
516 	{
517 	  fold_undefer_and_ignore_overflow_warnings ();
518 	  return 1;
519 	}
520 
521       /* If VAL1 is different than VAL2, return +2.  */
522       t = fold_binary_to_constant (NE_EXPR, boolean_type_node, val1, val2);
523       fold_undefer_and_ignore_overflow_warnings ();
524       if (t && integer_onep (t))
525 	return 2;
526 
527       return -2;
528     }
529 }
530 
531 /* Compare values like compare_values_warnv.  */
532 
533 int
compare_values(tree val1,tree val2)534 compare_values (tree val1, tree val2)
535 {
536   bool sop;
537   return compare_values_warnv (val1, val2, &sop);
538 }
539 
540 /* If BOUND will include a symbolic bound, adjust it accordingly,
541    otherwise leave it as is.
542 
543    CODE is the original operation that combined the bounds (PLUS_EXPR
544    or MINUS_EXPR).
545 
546    TYPE is the type of the original operation.
547 
548    SYM_OPn is the symbolic for OPn if it has a symbolic.
549 
550    NEG_OPn is TRUE if the OPn was negated.  */
551 
552 static void
adjust_symbolic_bound(tree & bound,enum tree_code code,tree type,tree sym_op0,tree sym_op1,bool neg_op0,bool neg_op1)553 adjust_symbolic_bound (tree &bound, enum tree_code code, tree type,
554 		       tree sym_op0, tree sym_op1,
555 		       bool neg_op0, bool neg_op1)
556 {
557   bool minus_p = (code == MINUS_EXPR);
558   /* If the result bound is constant, we're done; otherwise, build the
559      symbolic lower bound.  */
560   if (sym_op0 == sym_op1)
561     ;
562   else if (sym_op0)
563     bound = build_symbolic_expr (type, sym_op0,
564 				 neg_op0, bound);
565   else if (sym_op1)
566     {
567       /* We may not negate if that might introduce
568 	 undefined overflow.  */
569       if (!minus_p
570 	  || neg_op1
571 	  || TYPE_OVERFLOW_WRAPS (type))
572 	bound = build_symbolic_expr (type, sym_op1,
573 				     neg_op1 ^ minus_p, bound);
574       else
575 	bound = NULL_TREE;
576     }
577 }
578 
579 /* Combine OP1 and OP1, which are two parts of a bound, into one wide
580    int bound according to CODE.  CODE is the operation combining the
581    bound (either a PLUS_EXPR or a MINUS_EXPR).
582 
583    TYPE is the type of the combine operation.
584 
585    WI is the wide int to store the result.
586 
587    OVF is -1 if an underflow occurred, +1 if an overflow occurred or 0
588    if over/underflow occurred.  */
589 
590 static void
combine_bound(enum tree_code code,wide_int & wi,wi::overflow_type & ovf,tree type,tree op0,tree op1)591 combine_bound (enum tree_code code, wide_int &wi, wi::overflow_type &ovf,
592 	       tree type, tree op0, tree op1)
593 {
594   bool minus_p = (code == MINUS_EXPR);
595   const signop sgn = TYPE_SIGN (type);
596   const unsigned int prec = TYPE_PRECISION (type);
597 
598   /* Combine the bounds, if any.  */
599   if (op0 && op1)
600     {
601       if (minus_p)
602 	wi = wi::sub (wi::to_wide (op0), wi::to_wide (op1), sgn, &ovf);
603       else
604 	wi = wi::add (wi::to_wide (op0), wi::to_wide (op1), sgn, &ovf);
605     }
606   else if (op0)
607     wi = wi::to_wide (op0);
608   else if (op1)
609     {
610       if (minus_p)
611 	wi = wi::neg (wi::to_wide (op1), &ovf);
612       else
613 	wi = wi::to_wide (op1);
614     }
615   else
616     wi = wi::shwi (0, prec);
617 }
618 
619 /* Given a range in [WMIN, WMAX], adjust it for possible overflow and
620    put the result in VR.
621 
622    TYPE is the type of the range.
623 
624    MIN_OVF and MAX_OVF indicate what type of overflow, if any,
625    occurred while originally calculating WMIN or WMAX.  -1 indicates
626    underflow.  +1 indicates overflow.  0 indicates neither.  */
627 
628 static void
set_value_range_with_overflow(value_range_kind & kind,tree & min,tree & max,tree type,const wide_int & wmin,const wide_int & wmax,wi::overflow_type min_ovf,wi::overflow_type max_ovf)629 set_value_range_with_overflow (value_range_kind &kind, tree &min, tree &max,
630 			       tree type,
631 			       const wide_int &wmin, const wide_int &wmax,
632 			       wi::overflow_type min_ovf,
633 			       wi::overflow_type max_ovf)
634 {
635   const signop sgn = TYPE_SIGN (type);
636   const unsigned int prec = TYPE_PRECISION (type);
637 
638   /* For one bit precision if max < min, then the swapped
639      range covers all values.  */
640   if (prec == 1 && wi::lt_p (wmax, wmin, sgn))
641     {
642       kind = VR_VARYING;
643       return;
644     }
645 
646   if (TYPE_OVERFLOW_WRAPS (type))
647     {
648       /* If overflow wraps, truncate the values and adjust the
649 	 range kind and bounds appropriately.  */
650       wide_int tmin = wide_int::from (wmin, prec, sgn);
651       wide_int tmax = wide_int::from (wmax, prec, sgn);
652       if ((min_ovf != wi::OVF_NONE) == (max_ovf != wi::OVF_NONE))
653 	{
654 	  /* If the limits are swapped, we wrapped around and cover
655 	     the entire range.  */
656 	  if (wi::gt_p (tmin, tmax, sgn))
657 	    kind = VR_VARYING;
658 	  else
659 	    {
660 	      kind = VR_RANGE;
661 	      /* No overflow or both overflow or underflow.  The
662 		 range kind stays VR_RANGE.  */
663 	      min = wide_int_to_tree (type, tmin);
664 	      max = wide_int_to_tree (type, tmax);
665 	    }
666 	  return;
667 	}
668       else if ((min_ovf == wi::OVF_UNDERFLOW && max_ovf == wi::OVF_NONE)
669 	       || (max_ovf == wi::OVF_OVERFLOW && min_ovf == wi::OVF_NONE))
670 	{
671 	  /* Min underflow or max overflow.  The range kind
672 	     changes to VR_ANTI_RANGE.  */
673 	  bool covers = false;
674 	  wide_int tem = tmin;
675 	  tmin = tmax + 1;
676 	  if (wi::cmp (tmin, tmax, sgn) < 0)
677 	    covers = true;
678 	  tmax = tem - 1;
679 	  if (wi::cmp (tmax, tem, sgn) > 0)
680 	    covers = true;
681 	  /* If the anti-range would cover nothing, drop to varying.
682 	     Likewise if the anti-range bounds are outside of the
683 	     types values.  */
684 	  if (covers || wi::cmp (tmin, tmax, sgn) > 0)
685 	    {
686 	      kind = VR_VARYING;
687 	      return;
688 	    }
689 	  kind = VR_ANTI_RANGE;
690 	  min = wide_int_to_tree (type, tmin);
691 	  max = wide_int_to_tree (type, tmax);
692 	  return;
693 	}
694       else
695 	{
696 	  /* Other underflow and/or overflow, drop to VR_VARYING.  */
697 	  kind = VR_VARYING;
698 	  return;
699 	}
700     }
701   else
702     {
703       /* If overflow does not wrap, saturate to the types min/max
704 	 value.  */
705       wide_int type_min = wi::min_value (prec, sgn);
706       wide_int type_max = wi::max_value (prec, sgn);
707       kind = VR_RANGE;
708       if (min_ovf == wi::OVF_UNDERFLOW)
709 	min = wide_int_to_tree (type, type_min);
710       else if (min_ovf == wi::OVF_OVERFLOW)
711 	min = wide_int_to_tree (type, type_max);
712       else
713 	min = wide_int_to_tree (type, wmin);
714 
715       if (max_ovf == wi::OVF_UNDERFLOW)
716 	max = wide_int_to_tree (type, type_min);
717       else if (max_ovf == wi::OVF_OVERFLOW)
718 	max = wide_int_to_tree (type, type_max);
719       else
720 	max = wide_int_to_tree (type, wmax);
721     }
722 }
723 
724 /* Fold two value range's of a POINTER_PLUS_EXPR into VR.  */
725 
726 static void
extract_range_from_pointer_plus_expr(value_range * vr,enum tree_code code,tree expr_type,const value_range * vr0,const value_range * vr1)727 extract_range_from_pointer_plus_expr (value_range *vr,
728 				      enum tree_code code,
729 				      tree expr_type,
730 				      const value_range *vr0,
731 				      const value_range *vr1)
732 {
733   gcc_checking_assert (POINTER_TYPE_P (expr_type)
734 		       && code == POINTER_PLUS_EXPR);
735   /* For pointer types, we are really only interested in asserting
736      whether the expression evaluates to non-NULL.
737      With -fno-delete-null-pointer-checks we need to be more
738      conservative.  As some object might reside at address 0,
739      then some offset could be added to it and the same offset
740      subtracted again and the result would be NULL.
741      E.g.
742      static int a[12]; where &a[0] is NULL and
743      ptr = &a[6];
744      ptr -= 6;
745      ptr will be NULL here, even when there is POINTER_PLUS_EXPR
746      where the first range doesn't include zero and the second one
747      doesn't either.  As the second operand is sizetype (unsigned),
748      consider all ranges where the MSB could be set as possible
749      subtractions where the result might be NULL.  */
750   if ((!range_includes_zero_p (vr0)
751        || !range_includes_zero_p (vr1))
752       && !TYPE_OVERFLOW_WRAPS (expr_type)
753       && (flag_delete_null_pointer_checks
754 	  || (range_int_cst_p (vr1)
755 	      && !tree_int_cst_sign_bit (vr1->max ()))))
756     vr->set_nonzero (expr_type);
757   else if (vr0->zero_p () && vr1->zero_p ())
758     vr->set_zero (expr_type);
759   else
760     vr->set_varying (expr_type);
761 }
762 
763 /* Extract range information from a PLUS/MINUS_EXPR and store the
764    result in *VR.  */
765 
766 static void
extract_range_from_plus_minus_expr(value_range * vr,enum tree_code code,tree expr_type,const value_range * vr0_,const value_range * vr1_)767 extract_range_from_plus_minus_expr (value_range *vr,
768 				    enum tree_code code,
769 				    tree expr_type,
770 				    const value_range *vr0_,
771 				    const value_range *vr1_)
772 {
773   gcc_checking_assert (code == PLUS_EXPR || code == MINUS_EXPR);
774 
775   value_range vr0 = *vr0_, vr1 = *vr1_;
776   value_range vrtem0, vrtem1;
777 
778   /* Now canonicalize anti-ranges to ranges when they are not symbolic
779      and express ~[] op X as ([]' op X) U ([]'' op X).  */
780   if (vr0.kind () == VR_ANTI_RANGE
781       && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1))
782     {
783       extract_range_from_plus_minus_expr (vr, code, expr_type, &vrtem0, vr1_);
784       if (!vrtem1.undefined_p ())
785 	{
786 	  value_range vrres;
787 	  extract_range_from_plus_minus_expr (&vrres, code, expr_type,
788 					      &vrtem1, vr1_);
789 	  vr->union_ (&vrres);
790 	}
791       return;
792     }
793   /* Likewise for X op ~[].  */
794   if (vr1.kind () == VR_ANTI_RANGE
795       && ranges_from_anti_range (&vr1, &vrtem0, &vrtem1))
796     {
797       extract_range_from_plus_minus_expr (vr, code, expr_type, vr0_, &vrtem0);
798       if (!vrtem1.undefined_p ())
799 	{
800 	  value_range vrres;
801 	  extract_range_from_plus_minus_expr (&vrres, code, expr_type,
802 					      vr0_, &vrtem1);
803 	  vr->union_ (&vrres);
804 	}
805       return;
806     }
807 
808   value_range_kind kind;
809   value_range_kind vr0_kind = vr0.kind (), vr1_kind = vr1.kind ();
810   tree vr0_min = vr0.min (), vr0_max = vr0.max ();
811   tree vr1_min = vr1.min (), vr1_max = vr1.max ();
812   tree min = NULL_TREE, max = NULL_TREE;
813 
814   /* This will normalize things such that calculating
815      [0,0] - VR_VARYING is not dropped to varying, but is
816      calculated as [MIN+1, MAX].  */
817   if (vr0.varying_p ())
818     {
819       vr0_kind = VR_RANGE;
820       vr0_min = vrp_val_min (expr_type);
821       vr0_max = vrp_val_max (expr_type);
822     }
823   if (vr1.varying_p ())
824     {
825       vr1_kind = VR_RANGE;
826       vr1_min = vrp_val_min (expr_type);
827       vr1_max = vrp_val_max (expr_type);
828     }
829 
830   const bool minus_p = (code == MINUS_EXPR);
831   tree min_op0 = vr0_min;
832   tree min_op1 = minus_p ? vr1_max : vr1_min;
833   tree max_op0 = vr0_max;
834   tree max_op1 = minus_p ? vr1_min : vr1_max;
835   tree sym_min_op0 = NULL_TREE;
836   tree sym_min_op1 = NULL_TREE;
837   tree sym_max_op0 = NULL_TREE;
838   tree sym_max_op1 = NULL_TREE;
839   bool neg_min_op0, neg_min_op1, neg_max_op0, neg_max_op1;
840 
841   neg_min_op0 = neg_min_op1 = neg_max_op0 = neg_max_op1 = false;
842 
843   /* If we have a PLUS or MINUS with two VR_RANGEs, either constant or
844      single-symbolic ranges, try to compute the precise resulting range,
845      but only if we know that this resulting range will also be constant
846      or single-symbolic.  */
847   if (vr0_kind == VR_RANGE && vr1_kind == VR_RANGE
848       && (TREE_CODE (min_op0) == INTEGER_CST
849 	  || (sym_min_op0
850 	      = get_single_symbol (min_op0, &neg_min_op0, &min_op0)))
851       && (TREE_CODE (min_op1) == INTEGER_CST
852 	  || (sym_min_op1
853 	      = get_single_symbol (min_op1, &neg_min_op1, &min_op1)))
854       && (!(sym_min_op0 && sym_min_op1)
855 	  || (sym_min_op0 == sym_min_op1
856 	      && neg_min_op0 == (minus_p ? neg_min_op1 : !neg_min_op1)))
857       && (TREE_CODE (max_op0) == INTEGER_CST
858 	  || (sym_max_op0
859 	      = get_single_symbol (max_op0, &neg_max_op0, &max_op0)))
860       && (TREE_CODE (max_op1) == INTEGER_CST
861 	  || (sym_max_op1
862 	      = get_single_symbol (max_op1, &neg_max_op1, &max_op1)))
863       && (!(sym_max_op0 && sym_max_op1)
864 	  || (sym_max_op0 == sym_max_op1
865 	      && neg_max_op0 == (minus_p ? neg_max_op1 : !neg_max_op1))))
866     {
867       wide_int wmin, wmax;
868       wi::overflow_type min_ovf = wi::OVF_NONE;
869       wi::overflow_type max_ovf = wi::OVF_NONE;
870 
871       /* Build the bounds.  */
872       combine_bound (code, wmin, min_ovf, expr_type, min_op0, min_op1);
873       combine_bound (code, wmax, max_ovf, expr_type, max_op0, max_op1);
874 
875       /* If the resulting range will be symbolic, we need to eliminate any
876 	 explicit or implicit overflow introduced in the above computation
877 	 because compare_values could make an incorrect use of it.  That's
878 	 why we require one of the ranges to be a singleton.  */
879       if ((sym_min_op0 != sym_min_op1 || sym_max_op0 != sym_max_op1)
880 	  && ((bool)min_ovf || (bool)max_ovf
881 	      || (min_op0 != max_op0 && min_op1 != max_op1)))
882 	{
883 	  vr->set_varying (expr_type);
884 	  return;
885 	}
886 
887       /* Adjust the range for possible overflow.  */
888       set_value_range_with_overflow (kind, min, max, expr_type,
889 				     wmin, wmax, min_ovf, max_ovf);
890       if (kind == VR_VARYING)
891 	{
892 	  vr->set_varying (expr_type);
893 	  return;
894 	}
895 
896       /* Build the symbolic bounds if needed.  */
897       adjust_symbolic_bound (min, code, expr_type,
898 			     sym_min_op0, sym_min_op1,
899 			     neg_min_op0, neg_min_op1);
900       adjust_symbolic_bound (max, code, expr_type,
901 			     sym_max_op0, sym_max_op1,
902 			     neg_max_op0, neg_max_op1);
903     }
904   else
905     {
906       /* For other cases, for example if we have a PLUS_EXPR with two
907 	 VR_ANTI_RANGEs, drop to VR_VARYING.  It would take more effort
908 	 to compute a precise range for such a case.
909 	 ???  General even mixed range kind operations can be expressed
910 	 by for example transforming ~[3, 5] + [1, 2] to range-only
911 	 operations and a union primitive:
912 	 [-INF, 2] + [1, 2]  U  [5, +INF] + [1, 2]
913 	 [-INF+1, 4]     U    [6, +INF(OVF)]
914 	 though usually the union is not exactly representable with
915 	 a single range or anti-range as the above is
916 	 [-INF+1, +INF(OVF)] intersected with ~[5, 5]
917 	 but one could use a scheme similar to equivalences for this. */
918       vr->set_varying (expr_type);
919       return;
920     }
921 
922   /* If either MIN or MAX overflowed, then set the resulting range to
923      VARYING.  */
924   if (min == NULL_TREE
925       || TREE_OVERFLOW_P (min)
926       || max == NULL_TREE
927       || TREE_OVERFLOW_P (max))
928     {
929       vr->set_varying (expr_type);
930       return;
931     }
932 
933   int cmp = compare_values (min, max);
934   if (cmp == -2 || cmp == 1)
935     {
936       /* If the new range has its limits swapped around (MIN > MAX),
937 	 then the operation caused one of them to wrap around, mark
938 	 the new range VARYING.  */
939       vr->set_varying (expr_type);
940     }
941   else
942     vr->set (min, max, kind);
943 }
944 
945 /* Return the range-ops handler for CODE and EXPR_TYPE.  If no
946    suitable operator is found, return NULL and set VR to VARYING.  */
947 
948 static const range_operator *
get_range_op_handler(value_range * vr,enum tree_code code,tree expr_type)949 get_range_op_handler (value_range *vr,
950 		      enum tree_code code,
951 		      tree expr_type)
952 {
953   const range_operator *op = range_op_handler (code, expr_type);
954   if (!op)
955     vr->set_varying (expr_type);
956   return op;
957 }
958 
959 /* If the types passed are supported, return TRUE, otherwise set VR to
960    VARYING and return FALSE.  */
961 
962 static bool
963 supported_types_p (value_range *vr,
964 		   tree type0,
965 		   tree type1 = NULL)
966 {
967   if (!value_range::supports_type_p (type0)
968       || (type1 && !value_range::supports_type_p (type1)))
969     {
970       vr->set_varying (type0);
971       return false;
972     }
973   return true;
974 }
975 
976 /* If any of the ranges passed are defined, return TRUE, otherwise set
977    VR to UNDEFINED and return FALSE.  */
978 
979 static bool
980 defined_ranges_p (value_range *vr,
981 		  const value_range *vr0, const value_range *vr1 = NULL)
982 {
983   if (vr0->undefined_p () && (!vr1 || vr1->undefined_p ()))
984     {
985       vr->set_undefined ();
986       return false;
987     }
988   return true;
989 }
990 
991 static value_range
drop_undefines_to_varying(const value_range * vr,tree expr_type)992 drop_undefines_to_varying (const value_range *vr, tree expr_type)
993 {
994   if (vr->undefined_p ())
995     return value_range (expr_type);
996   else
997     return *vr;
998 }
999 
1000 /* If any operand is symbolic, perform a binary operation on them and
1001    return TRUE, otherwise return FALSE.  */
1002 
1003 static bool
range_fold_binary_symbolics_p(value_range * vr,tree_code code,tree expr_type,const value_range * vr0_,const value_range * vr1_)1004 range_fold_binary_symbolics_p (value_range *vr,
1005 			       tree_code code,
1006 			       tree expr_type,
1007 			       const value_range *vr0_,
1008 			       const value_range *vr1_)
1009 {
1010   if (vr0_->symbolic_p () || vr1_->symbolic_p ())
1011     {
1012       value_range vr0 = drop_undefines_to_varying (vr0_, expr_type);
1013       value_range vr1 = drop_undefines_to_varying (vr1_, expr_type);
1014       if ((code == PLUS_EXPR || code == MINUS_EXPR))
1015 	{
1016 	  extract_range_from_plus_minus_expr (vr, code, expr_type,
1017 					      &vr0, &vr1);
1018 	  return true;
1019 	}
1020       if (POINTER_TYPE_P (expr_type) && code == POINTER_PLUS_EXPR)
1021 	{
1022 	  extract_range_from_pointer_plus_expr (vr, code, expr_type,
1023 						&vr0, &vr1);
1024 	  return true;
1025 	}
1026       const range_operator *op = get_range_op_handler (vr, code, expr_type);
1027       vr0.normalize_symbolics ();
1028       vr1.normalize_symbolics ();
1029       return op->fold_range (*vr, expr_type, vr0, vr1);
1030     }
1031   return false;
1032 }
1033 
1034 /* If operand is symbolic, perform a unary operation on it and return
1035    TRUE, otherwise return FALSE.  */
1036 
1037 static bool
range_fold_unary_symbolics_p(value_range * vr,tree_code code,tree expr_type,const value_range * vr0)1038 range_fold_unary_symbolics_p (value_range *vr,
1039 			      tree_code code,
1040 			      tree expr_type,
1041 			      const value_range *vr0)
1042 {
1043   if (vr0->symbolic_p ())
1044     {
1045       if (code == NEGATE_EXPR)
1046 	{
1047 	  /* -X is simply 0 - X.  */
1048 	  value_range zero;
1049 	  zero.set_zero (vr0->type ());
1050 	  range_fold_binary_expr (vr, MINUS_EXPR, expr_type, &zero, vr0);
1051 	  return true;
1052 	}
1053       if (code == BIT_NOT_EXPR)
1054 	{
1055 	  /* ~X is simply -1 - X.  */
1056 	  value_range minusone;
1057 	  minusone.set (build_int_cst (vr0->type (), -1));
1058 	  range_fold_binary_expr (vr, MINUS_EXPR, expr_type, &minusone, vr0);
1059 	  return true;
1060 	}
1061       const range_operator *op = get_range_op_handler (vr, code, expr_type);
1062       value_range vr0_cst (*vr0);
1063       vr0_cst.normalize_symbolics ();
1064       return op->fold_range (*vr, expr_type, vr0_cst, value_range (expr_type));
1065     }
1066   return false;
1067 }
1068 
1069 /* Perform a binary operation on a pair of ranges.  */
1070 
1071 void
range_fold_binary_expr(value_range * vr,enum tree_code code,tree expr_type,const value_range * vr0_,const value_range * vr1_)1072 range_fold_binary_expr (value_range *vr,
1073 			enum tree_code code,
1074 			tree expr_type,
1075 			const value_range *vr0_,
1076 			const value_range *vr1_)
1077 {
1078   if (!supported_types_p (vr, expr_type)
1079       || !defined_ranges_p (vr, vr0_, vr1_))
1080     return;
1081   const range_operator *op = get_range_op_handler (vr, code, expr_type);
1082   if (!op)
1083     return;
1084 
1085   if (range_fold_binary_symbolics_p (vr, code, expr_type, vr0_, vr1_))
1086     return;
1087 
1088   value_range vr0 (*vr0_);
1089   value_range vr1 (*vr1_);
1090   if (vr0.undefined_p ())
1091     vr0.set_varying (expr_type);
1092   if (vr1.undefined_p ())
1093     vr1.set_varying (expr_type);
1094   vr0.normalize_addresses ();
1095   vr1.normalize_addresses ();
1096   op->fold_range (*vr, expr_type, vr0, vr1);
1097 }
1098 
1099 /* Perform a unary operation on a range.  */
1100 
1101 void
range_fold_unary_expr(value_range * vr,enum tree_code code,tree expr_type,const value_range * vr0,tree vr0_type)1102 range_fold_unary_expr (value_range *vr,
1103 		       enum tree_code code, tree expr_type,
1104 		       const value_range *vr0,
1105 		       tree vr0_type)
1106 {
1107   if (!supported_types_p (vr, expr_type, vr0_type)
1108       || !defined_ranges_p (vr, vr0))
1109     return;
1110   const range_operator *op = get_range_op_handler (vr, code, expr_type);
1111   if (!op)
1112     return;
1113 
1114   if (range_fold_unary_symbolics_p (vr, code, expr_type, vr0))
1115     return;
1116 
1117   value_range vr0_cst (*vr0);
1118   vr0_cst.normalize_addresses ();
1119   op->fold_range (*vr, expr_type, vr0_cst, value_range (expr_type));
1120 }
1121 
1122 /* If the range of values taken by OP can be inferred after STMT executes,
1123    return the comparison code (COMP_CODE_P) and value (VAL_P) that
1124    describes the inferred range.  Return true if a range could be
1125    inferred.  */
1126 
1127 bool
infer_value_range(gimple * stmt,tree op,tree_code * comp_code_p,tree * val_p)1128 infer_value_range (gimple *stmt, tree op, tree_code *comp_code_p, tree *val_p)
1129 {
1130   *val_p = NULL_TREE;
1131   *comp_code_p = ERROR_MARK;
1132 
1133   /* Do not attempt to infer anything in names that flow through
1134      abnormal edges.  */
1135   if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op))
1136     return false;
1137 
1138   /* If STMT is the last statement of a basic block with no normal
1139      successors, there is no point inferring anything about any of its
1140      operands.  We would not be able to find a proper insertion point
1141      for the assertion, anyway.  */
1142   if (stmt_ends_bb_p (stmt))
1143     {
1144       edge_iterator ei;
1145       edge e;
1146 
1147       FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs)
1148 	if (!(e->flags & (EDGE_ABNORMAL|EDGE_EH)))
1149 	  break;
1150       if (e == NULL)
1151 	return false;
1152     }
1153 
1154   if (infer_nonnull_range (stmt, op))
1155     {
1156       *val_p = build_int_cst (TREE_TYPE (op), 0);
1157       *comp_code_p = NE_EXPR;
1158       return true;
1159     }
1160 
1161   return false;
1162 }
1163 
1164 /* Dump assert_info structure.  */
1165 
1166 void
dump_assert_info(FILE * file,const assert_info & assert)1167 dump_assert_info (FILE *file, const assert_info &assert)
1168 {
1169   fprintf (file, "Assert for: ");
1170   print_generic_expr (file, assert.name);
1171   fprintf (file, "\n\tPREDICATE: expr=[");
1172   print_generic_expr (file, assert.expr);
1173   fprintf (file, "] %s ", get_tree_code_name (assert.comp_code));
1174   fprintf (file, "val=[");
1175   print_generic_expr (file, assert.val);
1176   fprintf (file, "]\n\n");
1177 }
1178 
1179 DEBUG_FUNCTION void
debug(const assert_info & assert)1180 debug (const assert_info &assert)
1181 {
1182   dump_assert_info (stderr, assert);
1183 }
1184 
1185 /* Dump a vector of assert_info's.  */
1186 
1187 void
dump_asserts_info(FILE * file,const vec<assert_info> & asserts)1188 dump_asserts_info (FILE *file, const vec<assert_info> &asserts)
1189 {
1190   for (unsigned i = 0; i < asserts.length (); ++i)
1191     {
1192       dump_assert_info (file, asserts[i]);
1193       fprintf (file, "\n");
1194     }
1195 }
1196 
1197 DEBUG_FUNCTION void
debug(const vec<assert_info> & asserts)1198 debug (const vec<assert_info> &asserts)
1199 {
1200   dump_asserts_info (stderr, asserts);
1201 }
1202 
1203 /* Push the assert info for NAME, EXPR, COMP_CODE and VAL to ASSERTS.  */
1204 
1205 static void
add_assert_info(vec<assert_info> & asserts,tree name,tree expr,enum tree_code comp_code,tree val)1206 add_assert_info (vec<assert_info> &asserts,
1207 		 tree name, tree expr, enum tree_code comp_code, tree val)
1208 {
1209   assert_info info;
1210   info.comp_code = comp_code;
1211   info.name = name;
1212   if (TREE_OVERFLOW_P (val))
1213     val = drop_tree_overflow (val);
1214   info.val = val;
1215   info.expr = expr;
1216   asserts.safe_push (info);
1217   if (dump_enabled_p ())
1218     dump_printf (MSG_NOTE | MSG_PRIORITY_INTERNALS,
1219 		 "Adding assert for %T from %T %s %T\n",
1220 		 name, expr, op_symbol_code (comp_code), val);
1221 }
1222 
1223 /* (COND_OP0 COND_CODE COND_OP1) is a predicate which uses NAME.
1224    Extract a suitable test code and value and store them into *CODE_P and
1225    *VAL_P so the predicate is normalized to NAME *CODE_P *VAL_P.
1226 
1227    If no extraction was possible, return FALSE, otherwise return TRUE.
1228 
1229    If INVERT is true, then we invert the result stored into *CODE_P.  */
1230 
1231 static bool
extract_code_and_val_from_cond_with_ops(tree name,enum tree_code cond_code,tree cond_op0,tree cond_op1,bool invert,enum tree_code * code_p,tree * val_p)1232 extract_code_and_val_from_cond_with_ops (tree name, enum tree_code cond_code,
1233 					 tree cond_op0, tree cond_op1,
1234 					 bool invert, enum tree_code *code_p,
1235 					 tree *val_p)
1236 {
1237   enum tree_code comp_code;
1238   tree val;
1239 
1240   /* Otherwise, we have a comparison of the form NAME COMP VAL
1241      or VAL COMP NAME.  */
1242   if (name == cond_op1)
1243     {
1244       /* If the predicate is of the form VAL COMP NAME, flip
1245 	 COMP around because we need to register NAME as the
1246 	 first operand in the predicate.  */
1247       comp_code = swap_tree_comparison (cond_code);
1248       val = cond_op0;
1249     }
1250   else if (name == cond_op0)
1251     {
1252       /* The comparison is of the form NAME COMP VAL, so the
1253 	 comparison code remains unchanged.  */
1254       comp_code = cond_code;
1255       val = cond_op1;
1256     }
1257   else
1258     gcc_unreachable ();
1259 
1260   /* Invert the comparison code as necessary.  */
1261   if (invert)
1262     comp_code = invert_tree_comparison (comp_code, 0);
1263 
1264   /* VRP only handles integral and pointer types.  */
1265   if (! INTEGRAL_TYPE_P (TREE_TYPE (val))
1266       && ! POINTER_TYPE_P (TREE_TYPE (val)))
1267     return false;
1268 
1269   /* Do not register always-false predicates.
1270      FIXME:  this works around a limitation in fold() when dealing with
1271      enumerations.  Given 'enum { N1, N2 } x;', fold will not
1272      fold 'if (x > N2)' to 'if (0)'.  */
1273   if ((comp_code == GT_EXPR || comp_code == LT_EXPR)
1274       && INTEGRAL_TYPE_P (TREE_TYPE (val)))
1275     {
1276       tree min = TYPE_MIN_VALUE (TREE_TYPE (val));
1277       tree max = TYPE_MAX_VALUE (TREE_TYPE (val));
1278 
1279       if (comp_code == GT_EXPR
1280 	  && (!max
1281 	      || compare_values (val, max) == 0))
1282 	return false;
1283 
1284       if (comp_code == LT_EXPR
1285 	  && (!min
1286 	      || compare_values (val, min) == 0))
1287 	return false;
1288     }
1289   *code_p = comp_code;
1290   *val_p = val;
1291   return true;
1292 }
1293 
1294 /* Find out smallest RES where RES > VAL && (RES & MASK) == RES, if any
1295    (otherwise return VAL).  VAL and MASK must be zero-extended for
1296    precision PREC.  If SGNBIT is non-zero, first xor VAL with SGNBIT
1297    (to transform signed values into unsigned) and at the end xor
1298    SGNBIT back.  */
1299 
1300 wide_int
masked_increment(const wide_int & val_in,const wide_int & mask,const wide_int & sgnbit,unsigned int prec)1301 masked_increment (const wide_int &val_in, const wide_int &mask,
1302 		  const wide_int &sgnbit, unsigned int prec)
1303 {
1304   wide_int bit = wi::one (prec), res;
1305   unsigned int i;
1306 
1307   wide_int val = val_in ^ sgnbit;
1308   for (i = 0; i < prec; i++, bit += bit)
1309     {
1310       res = mask;
1311       if ((res & bit) == 0)
1312 	continue;
1313       res = bit - 1;
1314       res = wi::bit_and_not (val + bit, res);
1315       res &= mask;
1316       if (wi::gtu_p (res, val))
1317 	return res ^ sgnbit;
1318     }
1319   return val ^ sgnbit;
1320 }
1321 
1322 /* Helper for overflow_comparison_p
1323 
1324    OP0 CODE OP1 is a comparison.  Examine the comparison and potentially
1325    OP1's defining statement to see if it ultimately has the form
1326    OP0 CODE (OP0 PLUS INTEGER_CST)
1327 
1328    If so, return TRUE indicating this is an overflow test and store into
1329    *NEW_CST an updated constant that can be used in a narrowed range test.
1330 
1331    REVERSED indicates if the comparison was originally:
1332 
1333    OP1 CODE' OP0.
1334 
1335    This affects how we build the updated constant.  */
1336 
1337 static bool
overflow_comparison_p_1(enum tree_code code,tree op0,tree op1,bool follow_assert_exprs,bool reversed,tree * new_cst)1338 overflow_comparison_p_1 (enum tree_code code, tree op0, tree op1,
1339 		         bool follow_assert_exprs, bool reversed, tree *new_cst)
1340 {
1341   /* See if this is a relational operation between two SSA_NAMES with
1342      unsigned, overflow wrapping values.  If so, check it more deeply.  */
1343   if ((code == LT_EXPR || code == LE_EXPR
1344        || code == GE_EXPR || code == GT_EXPR)
1345       && TREE_CODE (op0) == SSA_NAME
1346       && TREE_CODE (op1) == SSA_NAME
1347       && INTEGRAL_TYPE_P (TREE_TYPE (op0))
1348       && TYPE_UNSIGNED (TREE_TYPE (op0))
1349       && TYPE_OVERFLOW_WRAPS (TREE_TYPE (op0)))
1350     {
1351       gimple *op1_def = SSA_NAME_DEF_STMT (op1);
1352 
1353       /* If requested, follow any ASSERT_EXPRs backwards for OP1.  */
1354       if (follow_assert_exprs)
1355 	{
1356 	  while (gimple_assign_single_p (op1_def)
1357 		 && TREE_CODE (gimple_assign_rhs1 (op1_def)) == ASSERT_EXPR)
1358 	    {
1359 	      op1 = TREE_OPERAND (gimple_assign_rhs1 (op1_def), 0);
1360 	      if (TREE_CODE (op1) != SSA_NAME)
1361 		break;
1362 	      op1_def = SSA_NAME_DEF_STMT (op1);
1363 	    }
1364 	}
1365 
1366       /* Now look at the defining statement of OP1 to see if it adds
1367 	 or subtracts a nonzero constant from another operand.  */
1368       if (op1_def
1369 	  && is_gimple_assign (op1_def)
1370 	  && gimple_assign_rhs_code (op1_def) == PLUS_EXPR
1371 	  && TREE_CODE (gimple_assign_rhs2 (op1_def)) == INTEGER_CST
1372 	  && !integer_zerop (gimple_assign_rhs2 (op1_def)))
1373 	{
1374 	  tree target = gimple_assign_rhs1 (op1_def);
1375 
1376 	  /* If requested, follow ASSERT_EXPRs backwards for op0 looking
1377 	     for one where TARGET appears on the RHS.  */
1378 	  if (follow_assert_exprs)
1379 	    {
1380 	      /* Now see if that "other operand" is op0, following the chain
1381 		 of ASSERT_EXPRs if necessary.  */
1382 	      gimple *op0_def = SSA_NAME_DEF_STMT (op0);
1383 	      while (op0 != target
1384 		     && gimple_assign_single_p (op0_def)
1385 		     && TREE_CODE (gimple_assign_rhs1 (op0_def)) == ASSERT_EXPR)
1386 		{
1387 		  op0 = TREE_OPERAND (gimple_assign_rhs1 (op0_def), 0);
1388 		  if (TREE_CODE (op0) != SSA_NAME)
1389 		    break;
1390 		  op0_def = SSA_NAME_DEF_STMT (op0);
1391 		}
1392 	    }
1393 
1394 	  /* If we did not find our target SSA_NAME, then this is not
1395 	     an overflow test.  */
1396 	  if (op0 != target)
1397 	    return false;
1398 
1399 	  tree type = TREE_TYPE (op0);
1400 	  wide_int max = wi::max_value (TYPE_PRECISION (type), UNSIGNED);
1401 	  tree inc = gimple_assign_rhs2 (op1_def);
1402 	  if (reversed)
1403 	    *new_cst = wide_int_to_tree (type, max + wi::to_wide (inc));
1404 	  else
1405 	    *new_cst = wide_int_to_tree (type, max - wi::to_wide (inc));
1406 	  return true;
1407 	}
1408     }
1409   return false;
1410 }
1411 
1412 /* OP0 CODE OP1 is a comparison.  Examine the comparison and potentially
1413    OP1's defining statement to see if it ultimately has the form
1414    OP0 CODE (OP0 PLUS INTEGER_CST)
1415 
1416    If so, return TRUE indicating this is an overflow test and store into
1417    *NEW_CST an updated constant that can be used in a narrowed range test.
1418 
1419    These statements are left as-is in the IL to facilitate discovery of
1420    {ADD,SUB}_OVERFLOW sequences later in the optimizer pipeline.  But
1421    the alternate range representation is often useful within VRP.  */
1422 
1423 bool
overflow_comparison_p(tree_code code,tree name,tree val,bool use_equiv_p,tree * new_cst)1424 overflow_comparison_p (tree_code code, tree name, tree val,
1425 		       bool use_equiv_p, tree *new_cst)
1426 {
1427   if (overflow_comparison_p_1 (code, name, val, use_equiv_p, false, new_cst))
1428     return true;
1429   return overflow_comparison_p_1 (swap_tree_comparison (code), val, name,
1430 				  use_equiv_p, true, new_cst);
1431 }
1432 
1433 
1434 /* Try to register an edge assertion for SSA name NAME on edge E for
1435    the condition COND contributing to the conditional jump pointed to by BSI.
1436    Invert the condition COND if INVERT is true.  */
1437 
1438 static void
register_edge_assert_for_2(tree name,edge e,enum tree_code cond_code,tree cond_op0,tree cond_op1,bool invert,vec<assert_info> & asserts)1439 register_edge_assert_for_2 (tree name, edge e,
1440 			    enum tree_code cond_code,
1441 			    tree cond_op0, tree cond_op1, bool invert,
1442 			    vec<assert_info> &asserts)
1443 {
1444   tree val;
1445   enum tree_code comp_code;
1446 
1447   if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
1448 						cond_op0,
1449 						cond_op1,
1450 						invert, &comp_code, &val))
1451     return;
1452 
1453   /* Queue the assert.  */
1454   tree x;
1455   if (overflow_comparison_p (comp_code, name, val, false, &x))
1456     {
1457       enum tree_code new_code = ((comp_code == GT_EXPR || comp_code == GE_EXPR)
1458 				 ? GT_EXPR : LE_EXPR);
1459       add_assert_info (asserts, name, name, new_code, x);
1460     }
1461   add_assert_info (asserts, name, name, comp_code, val);
1462 
1463   /* In the case of NAME <= CST and NAME being defined as
1464      NAME = (unsigned) NAME2 + CST2 we can assert NAME2 >= -CST2
1465      and NAME2 <= CST - CST2.  We can do the same for NAME > CST.
1466      This catches range and anti-range tests.  */
1467   if ((comp_code == LE_EXPR
1468        || comp_code == GT_EXPR)
1469       && TREE_CODE (val) == INTEGER_CST
1470       && TYPE_UNSIGNED (TREE_TYPE (val)))
1471     {
1472       gimple *def_stmt = SSA_NAME_DEF_STMT (name);
1473       tree cst2 = NULL_TREE, name2 = NULL_TREE, name3 = NULL_TREE;
1474 
1475       /* Extract CST2 from the (optional) addition.  */
1476       if (is_gimple_assign (def_stmt)
1477 	  && gimple_assign_rhs_code (def_stmt) == PLUS_EXPR)
1478 	{
1479 	  name2 = gimple_assign_rhs1 (def_stmt);
1480 	  cst2 = gimple_assign_rhs2 (def_stmt);
1481 	  if (TREE_CODE (name2) == SSA_NAME
1482 	      && TREE_CODE (cst2) == INTEGER_CST)
1483 	    def_stmt = SSA_NAME_DEF_STMT (name2);
1484 	}
1485 
1486       /* Extract NAME2 from the (optional) sign-changing cast.  */
1487       if (gimple_assign_cast_p (def_stmt))
1488 	{
1489 	  if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt))
1490 	      && ! TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
1491 	      && (TYPE_PRECISION (gimple_expr_type (def_stmt))
1492 		  == TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))))
1493 	    name3 = gimple_assign_rhs1 (def_stmt);
1494 	}
1495 
1496       /* If name3 is used later, create an ASSERT_EXPR for it.  */
1497       if (name3 != NULL_TREE
1498       	  && TREE_CODE (name3) == SSA_NAME
1499 	  && (cst2 == NULL_TREE
1500 	      || TREE_CODE (cst2) == INTEGER_CST)
1501 	  && INTEGRAL_TYPE_P (TREE_TYPE (name3)))
1502 	{
1503 	  tree tmp;
1504 
1505 	  /* Build an expression for the range test.  */
1506 	  tmp = build1 (NOP_EXPR, TREE_TYPE (name), name3);
1507 	  if (cst2 != NULL_TREE)
1508 	    tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
1509 	  add_assert_info (asserts, name3, tmp, comp_code, val);
1510 	}
1511 
1512       /* If name2 is used later, create an ASSERT_EXPR for it.  */
1513       if (name2 != NULL_TREE
1514       	  && TREE_CODE (name2) == SSA_NAME
1515 	  && TREE_CODE (cst2) == INTEGER_CST
1516 	  && INTEGRAL_TYPE_P (TREE_TYPE (name2)))
1517 	{
1518 	  tree tmp;
1519 
1520 	  /* Build an expression for the range test.  */
1521 	  tmp = name2;
1522 	  if (TREE_TYPE (name) != TREE_TYPE (name2))
1523 	    tmp = build1 (NOP_EXPR, TREE_TYPE (name), tmp);
1524 	  if (cst2 != NULL_TREE)
1525 	    tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
1526 	  add_assert_info (asserts, name2, tmp, comp_code, val);
1527 	}
1528     }
1529 
1530   /* In the case of post-in/decrement tests like if (i++) ... and uses
1531      of the in/decremented value on the edge the extra name we want to
1532      assert for is not on the def chain of the name compared.  Instead
1533      it is in the set of use stmts.
1534      Similar cases happen for conversions that were simplified through
1535      fold_{sign_changed,widened}_comparison.  */
1536   if ((comp_code == NE_EXPR
1537        || comp_code == EQ_EXPR)
1538       && TREE_CODE (val) == INTEGER_CST)
1539     {
1540       imm_use_iterator ui;
1541       gimple *use_stmt;
1542       FOR_EACH_IMM_USE_STMT (use_stmt, ui, name)
1543 	{
1544 	  if (!is_gimple_assign (use_stmt))
1545 	    continue;
1546 
1547 	  /* Cut off to use-stmts that are dominating the predecessor.  */
1548 	  if (!dominated_by_p (CDI_DOMINATORS, e->src, gimple_bb (use_stmt)))
1549 	    continue;
1550 
1551 	  tree name2 = gimple_assign_lhs (use_stmt);
1552 	  if (TREE_CODE (name2) != SSA_NAME)
1553 	    continue;
1554 
1555 	  enum tree_code code = gimple_assign_rhs_code (use_stmt);
1556 	  tree cst;
1557 	  if (code == PLUS_EXPR
1558 	      || code == MINUS_EXPR)
1559 	    {
1560 	      cst = gimple_assign_rhs2 (use_stmt);
1561 	      if (TREE_CODE (cst) != INTEGER_CST)
1562 		continue;
1563 	      cst = int_const_binop (code, val, cst);
1564 	    }
1565 	  else if (CONVERT_EXPR_CODE_P (code))
1566 	    {
1567 	      /* For truncating conversions we cannot record
1568 		 an inequality.  */
1569 	      if (comp_code == NE_EXPR
1570 		  && (TYPE_PRECISION (TREE_TYPE (name2))
1571 		      < TYPE_PRECISION (TREE_TYPE (name))))
1572 		continue;
1573 	      cst = fold_convert (TREE_TYPE (name2), val);
1574 	    }
1575 	  else
1576 	    continue;
1577 
1578 	  if (TREE_OVERFLOW_P (cst))
1579 	    cst = drop_tree_overflow (cst);
1580 	  add_assert_info (asserts, name2, name2, comp_code, cst);
1581 	}
1582     }
1583 
1584   if (TREE_CODE_CLASS (comp_code) == tcc_comparison
1585       && TREE_CODE (val) == INTEGER_CST)
1586     {
1587       gimple *def_stmt = SSA_NAME_DEF_STMT (name);
1588       tree name2 = NULL_TREE, names[2], cst2 = NULL_TREE;
1589       tree val2 = NULL_TREE;
1590       unsigned int prec = TYPE_PRECISION (TREE_TYPE (val));
1591       wide_int mask = wi::zero (prec);
1592       unsigned int nprec = prec;
1593       enum tree_code rhs_code = ERROR_MARK;
1594 
1595       if (is_gimple_assign (def_stmt))
1596 	rhs_code = gimple_assign_rhs_code (def_stmt);
1597 
1598       /* In the case of NAME != CST1 where NAME = A +- CST2 we can
1599          assert that A != CST1 -+ CST2.  */
1600       if ((comp_code == EQ_EXPR || comp_code == NE_EXPR)
1601 	  && (rhs_code == PLUS_EXPR || rhs_code == MINUS_EXPR))
1602 	{
1603 	  tree op0 = gimple_assign_rhs1 (def_stmt);
1604 	  tree op1 = gimple_assign_rhs2 (def_stmt);
1605 	  if (TREE_CODE (op0) == SSA_NAME
1606 	      && TREE_CODE (op1) == INTEGER_CST)
1607 	    {
1608 	      enum tree_code reverse_op = (rhs_code == PLUS_EXPR
1609 					   ? MINUS_EXPR : PLUS_EXPR);
1610 	      op1 = int_const_binop (reverse_op, val, op1);
1611 	      if (TREE_OVERFLOW (op1))
1612 		op1 = drop_tree_overflow (op1);
1613 	      add_assert_info (asserts, op0, op0, comp_code, op1);
1614 	    }
1615 	}
1616 
1617       /* Add asserts for NAME cmp CST and NAME being defined
1618 	 as NAME = (int) NAME2.  */
1619       if (!TYPE_UNSIGNED (TREE_TYPE (val))
1620 	  && (comp_code == LE_EXPR || comp_code == LT_EXPR
1621 	      || comp_code == GT_EXPR || comp_code == GE_EXPR)
1622 	  && gimple_assign_cast_p (def_stmt))
1623 	{
1624 	  name2 = gimple_assign_rhs1 (def_stmt);
1625 	  if (CONVERT_EXPR_CODE_P (rhs_code)
1626 	      && TREE_CODE (name2) == SSA_NAME
1627 	      && INTEGRAL_TYPE_P (TREE_TYPE (name2))
1628 	      && TYPE_UNSIGNED (TREE_TYPE (name2))
1629 	      && prec == TYPE_PRECISION (TREE_TYPE (name2))
1630 	      && (comp_code == LE_EXPR || comp_code == GT_EXPR
1631 		  || !tree_int_cst_equal (val,
1632 					  TYPE_MIN_VALUE (TREE_TYPE (val)))))
1633 	    {
1634 	      tree tmp, cst;
1635 	      enum tree_code new_comp_code = comp_code;
1636 
1637 	      cst = fold_convert (TREE_TYPE (name2),
1638 				  TYPE_MIN_VALUE (TREE_TYPE (val)));
1639 	      /* Build an expression for the range test.  */
1640 	      tmp = build2 (PLUS_EXPR, TREE_TYPE (name2), name2, cst);
1641 	      cst = fold_build2 (PLUS_EXPR, TREE_TYPE (name2), cst,
1642 				 fold_convert (TREE_TYPE (name2), val));
1643 	      if (comp_code == LT_EXPR || comp_code == GE_EXPR)
1644 		{
1645 		  new_comp_code = comp_code == LT_EXPR ? LE_EXPR : GT_EXPR;
1646 		  cst = fold_build2 (MINUS_EXPR, TREE_TYPE (name2), cst,
1647 				     build_int_cst (TREE_TYPE (name2), 1));
1648 		}
1649 	      add_assert_info (asserts, name2, tmp, new_comp_code, cst);
1650 	    }
1651 	}
1652 
1653       /* Add asserts for NAME cmp CST and NAME being defined as
1654 	 NAME = NAME2 >> CST2.
1655 
1656 	 Extract CST2 from the right shift.  */
1657       if (rhs_code == RSHIFT_EXPR)
1658 	{
1659 	  name2 = gimple_assign_rhs1 (def_stmt);
1660 	  cst2 = gimple_assign_rhs2 (def_stmt);
1661 	  if (TREE_CODE (name2) == SSA_NAME
1662 	      && tree_fits_uhwi_p (cst2)
1663 	      && INTEGRAL_TYPE_P (TREE_TYPE (name2))
1664 	      && IN_RANGE (tree_to_uhwi (cst2), 1, prec - 1)
1665 	      && type_has_mode_precision_p (TREE_TYPE (val)))
1666 	    {
1667 	      mask = wi::mask (tree_to_uhwi (cst2), false, prec);
1668 	      val2 = fold_binary (LSHIFT_EXPR, TREE_TYPE (val), val, cst2);
1669 	    }
1670 	}
1671       if (val2 != NULL_TREE
1672 	  && TREE_CODE (val2) == INTEGER_CST
1673 	  && simple_cst_equal (fold_build2 (RSHIFT_EXPR,
1674 					    TREE_TYPE (val),
1675 					    val2, cst2), val))
1676 	{
1677 	  enum tree_code new_comp_code = comp_code;
1678 	  tree tmp, new_val;
1679 
1680 	  tmp = name2;
1681 	  if (comp_code == EQ_EXPR || comp_code == NE_EXPR)
1682 	    {
1683 	      if (!TYPE_UNSIGNED (TREE_TYPE (val)))
1684 		{
1685 		  tree type = build_nonstandard_integer_type (prec, 1);
1686 		  tmp = build1 (NOP_EXPR, type, name2);
1687 		  val2 = fold_convert (type, val2);
1688 		}
1689 	      tmp = fold_build2 (MINUS_EXPR, TREE_TYPE (tmp), tmp, val2);
1690 	      new_val = wide_int_to_tree (TREE_TYPE (tmp), mask);
1691 	      new_comp_code = comp_code == EQ_EXPR ? LE_EXPR : GT_EXPR;
1692 	    }
1693 	  else if (comp_code == LT_EXPR || comp_code == GE_EXPR)
1694 	    {
1695 	      wide_int minval
1696 		= wi::min_value (prec, TYPE_SIGN (TREE_TYPE (val)));
1697 	      new_val = val2;
1698 	      if (minval == wi::to_wide (new_val))
1699 		new_val = NULL_TREE;
1700 	    }
1701 	  else
1702 	    {
1703 	      wide_int maxval
1704 		= wi::max_value (prec, TYPE_SIGN (TREE_TYPE (val)));
1705 	      mask |= wi::to_wide (val2);
1706 	      if (wi::eq_p (mask, maxval))
1707 		new_val = NULL_TREE;
1708 	      else
1709 		new_val = wide_int_to_tree (TREE_TYPE (val2), mask);
1710 	    }
1711 
1712 	  if (new_val)
1713 	    add_assert_info (asserts, name2, tmp, new_comp_code, new_val);
1714 	}
1715 
1716       /* If we have a conversion that doesn't change the value of the source
1717          simply register the same assert for it.  */
1718       if (CONVERT_EXPR_CODE_P (rhs_code))
1719 	{
1720 	  wide_int rmin, rmax;
1721 	  tree rhs1 = gimple_assign_rhs1 (def_stmt);
1722 	  if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
1723 	      && TREE_CODE (rhs1) == SSA_NAME
1724 	      /* Make sure the relation preserves the upper/lower boundary of
1725 	         the range conservatively.  */
1726 	      && (comp_code == NE_EXPR
1727 		  || comp_code == EQ_EXPR
1728 		  || (TYPE_SIGN (TREE_TYPE (name))
1729 		      == TYPE_SIGN (TREE_TYPE (rhs1)))
1730 		  || ((comp_code == LE_EXPR
1731 		       || comp_code == LT_EXPR)
1732 		      && !TYPE_UNSIGNED (TREE_TYPE (rhs1)))
1733 		  || ((comp_code == GE_EXPR
1734 		       || comp_code == GT_EXPR)
1735 		      && TYPE_UNSIGNED (TREE_TYPE (rhs1))))
1736 	      /* And the conversion does not alter the value we compare
1737 	         against and all values in rhs1 can be represented in
1738 		 the converted to type.  */
1739 	      && int_fits_type_p (val, TREE_TYPE (rhs1))
1740 	      && ((TYPE_PRECISION (TREE_TYPE (name))
1741 		   > TYPE_PRECISION (TREE_TYPE (rhs1)))
1742 		  || (get_range_info (rhs1, &rmin, &rmax) == VR_RANGE
1743 		      && wi::fits_to_tree_p
1744 			   (widest_int::from (rmin,
1745 					      TYPE_SIGN (TREE_TYPE (rhs1))),
1746 			    TREE_TYPE (name))
1747 		      && wi::fits_to_tree_p
1748 			   (widest_int::from (rmax,
1749 					      TYPE_SIGN (TREE_TYPE (rhs1))),
1750 			    TREE_TYPE (name)))))
1751 	    add_assert_info (asserts, rhs1, rhs1,
1752 		 	     comp_code, fold_convert (TREE_TYPE (rhs1), val));
1753 	}
1754 
1755       /* Add asserts for NAME cmp CST and NAME being defined as
1756 	 NAME = NAME2 & CST2.
1757 
1758 	 Extract CST2 from the and.
1759 
1760 	 Also handle
1761 	 NAME = (unsigned) NAME2;
1762 	 casts where NAME's type is unsigned and has smaller precision
1763 	 than NAME2's type as if it was NAME = NAME2 & MASK.  */
1764       names[0] = NULL_TREE;
1765       names[1] = NULL_TREE;
1766       cst2 = NULL_TREE;
1767       if (rhs_code == BIT_AND_EXPR
1768 	  || (CONVERT_EXPR_CODE_P (rhs_code)
1769 	      && INTEGRAL_TYPE_P (TREE_TYPE (val))
1770 	      && TYPE_UNSIGNED (TREE_TYPE (val))
1771 	      && TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
1772 		 > prec))
1773 	{
1774 	  name2 = gimple_assign_rhs1 (def_stmt);
1775 	  if (rhs_code == BIT_AND_EXPR)
1776 	    cst2 = gimple_assign_rhs2 (def_stmt);
1777 	  else
1778 	    {
1779 	      cst2 = TYPE_MAX_VALUE (TREE_TYPE (val));
1780 	      nprec = TYPE_PRECISION (TREE_TYPE (name2));
1781 	    }
1782 	  if (TREE_CODE (name2) == SSA_NAME
1783 	      && INTEGRAL_TYPE_P (TREE_TYPE (name2))
1784 	      && TREE_CODE (cst2) == INTEGER_CST
1785 	      && !integer_zerop (cst2)
1786 	      && (nprec > 1
1787 		  || TYPE_UNSIGNED (TREE_TYPE (val))))
1788 	    {
1789 	      gimple *def_stmt2 = SSA_NAME_DEF_STMT (name2);
1790 	      if (gimple_assign_cast_p (def_stmt2))
1791 		{
1792 		  names[1] = gimple_assign_rhs1 (def_stmt2);
1793 		  if (!CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt2))
1794 		      || TREE_CODE (names[1]) != SSA_NAME
1795 		      || !INTEGRAL_TYPE_P (TREE_TYPE (names[1]))
1796 		      || (TYPE_PRECISION (TREE_TYPE (name2))
1797 			  != TYPE_PRECISION (TREE_TYPE (names[1]))))
1798 		    names[1] = NULL_TREE;
1799 		}
1800 	      names[0] = name2;
1801 	    }
1802 	}
1803       if (names[0] || names[1])
1804 	{
1805 	  wide_int minv, maxv, valv, cst2v;
1806 	  wide_int tem, sgnbit;
1807 	  bool valid_p = false, valn, cst2n;
1808 	  enum tree_code ccode = comp_code;
1809 
1810 	  valv = wide_int::from (wi::to_wide (val), nprec, UNSIGNED);
1811 	  cst2v = wide_int::from (wi::to_wide (cst2), nprec, UNSIGNED);
1812 	  valn = wi::neg_p (valv, TYPE_SIGN (TREE_TYPE (val)));
1813 	  cst2n = wi::neg_p (cst2v, TYPE_SIGN (TREE_TYPE (val)));
1814 	  /* If CST2 doesn't have most significant bit set,
1815 	     but VAL is negative, we have comparison like
1816 	     if ((x & 0x123) > -4) (always true).  Just give up.  */
1817 	  if (!cst2n && valn)
1818 	    ccode = ERROR_MARK;
1819 	  if (cst2n)
1820 	    sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
1821 	  else
1822 	    sgnbit = wi::zero (nprec);
1823 	  minv = valv & cst2v;
1824 	  switch (ccode)
1825 	    {
1826 	    case EQ_EXPR:
1827 	      /* Minimum unsigned value for equality is VAL & CST2
1828 		 (should be equal to VAL, otherwise we probably should
1829 		 have folded the comparison into false) and
1830 		 maximum unsigned value is VAL | ~CST2.  */
1831 	      maxv = valv | ~cst2v;
1832 	      valid_p = true;
1833 	      break;
1834 
1835 	    case NE_EXPR:
1836 	      tem = valv | ~cst2v;
1837 	      /* If VAL is 0, handle (X & CST2) != 0 as (X & CST2) > 0U.  */
1838 	      if (valv == 0)
1839 		{
1840 		  cst2n = false;
1841 		  sgnbit = wi::zero (nprec);
1842 		  goto gt_expr;
1843 		}
1844 	      /* If (VAL | ~CST2) is all ones, handle it as
1845 		 (X & CST2) < VAL.  */
1846 	      if (tem == -1)
1847 		{
1848 		  cst2n = false;
1849 		  valn = false;
1850 		  sgnbit = wi::zero (nprec);
1851 		  goto lt_expr;
1852 		}
1853 	      if (!cst2n && wi::neg_p (cst2v))
1854 		sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
1855 	      if (sgnbit != 0)
1856 		{
1857 		  if (valv == sgnbit)
1858 		    {
1859 		      cst2n = true;
1860 		      valn = true;
1861 		      goto gt_expr;
1862 		    }
1863 		  if (tem == wi::mask (nprec - 1, false, nprec))
1864 		    {
1865 		      cst2n = true;
1866 		      goto lt_expr;
1867 		    }
1868 		  if (!cst2n)
1869 		    sgnbit = wi::zero (nprec);
1870 		}
1871 	      break;
1872 
1873 	    case GE_EXPR:
1874 	      /* Minimum unsigned value for >= if (VAL & CST2) == VAL
1875 		 is VAL and maximum unsigned value is ~0.  For signed
1876 		 comparison, if CST2 doesn't have most significant bit
1877 		 set, handle it similarly.  If CST2 has MSB set,
1878 		 the minimum is the same, and maximum is ~0U/2.  */
1879 	      if (minv != valv)
1880 		{
1881 		  /* If (VAL & CST2) != VAL, X & CST2 can't be equal to
1882 		     VAL.  */
1883 		  minv = masked_increment (valv, cst2v, sgnbit, nprec);
1884 		  if (minv == valv)
1885 		    break;
1886 		}
1887 	      maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
1888 	      valid_p = true;
1889 	      break;
1890 
1891 	    case GT_EXPR:
1892 	    gt_expr:
1893 	      /* Find out smallest MINV where MINV > VAL
1894 		 && (MINV & CST2) == MINV, if any.  If VAL is signed and
1895 		 CST2 has MSB set, compute it biased by 1 << (nprec - 1).  */
1896 	      minv = masked_increment (valv, cst2v, sgnbit, nprec);
1897 	      if (minv == valv)
1898 		break;
1899 	      maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
1900 	      valid_p = true;
1901 	      break;
1902 
1903 	    case LE_EXPR:
1904 	      /* Minimum unsigned value for <= is 0 and maximum
1905 		 unsigned value is VAL | ~CST2 if (VAL & CST2) == VAL.
1906 		 Otherwise, find smallest VAL2 where VAL2 > VAL
1907 		 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
1908 		 as maximum.
1909 		 For signed comparison, if CST2 doesn't have most
1910 		 significant bit set, handle it similarly.  If CST2 has
1911 		 MSB set, the maximum is the same and minimum is INT_MIN.  */
1912 	      if (minv == valv)
1913 		maxv = valv;
1914 	      else
1915 		{
1916 		  maxv = masked_increment (valv, cst2v, sgnbit, nprec);
1917 		  if (maxv == valv)
1918 		    break;
1919 		  maxv -= 1;
1920 		}
1921 	      maxv |= ~cst2v;
1922 	      minv = sgnbit;
1923 	      valid_p = true;
1924 	      break;
1925 
1926 	    case LT_EXPR:
1927 	    lt_expr:
1928 	      /* Minimum unsigned value for < is 0 and maximum
1929 		 unsigned value is (VAL-1) | ~CST2 if (VAL & CST2) == VAL.
1930 		 Otherwise, find smallest VAL2 where VAL2 > VAL
1931 		 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
1932 		 as maximum.
1933 		 For signed comparison, if CST2 doesn't have most
1934 		 significant bit set, handle it similarly.  If CST2 has
1935 		 MSB set, the maximum is the same and minimum is INT_MIN.  */
1936 	      if (minv == valv)
1937 		{
1938 		  if (valv == sgnbit)
1939 		    break;
1940 		  maxv = valv;
1941 		}
1942 	      else
1943 		{
1944 		  maxv = masked_increment (valv, cst2v, sgnbit, nprec);
1945 		  if (maxv == valv)
1946 		    break;
1947 		}
1948 	      maxv -= 1;
1949 	      maxv |= ~cst2v;
1950 	      minv = sgnbit;
1951 	      valid_p = true;
1952 	      break;
1953 
1954 	    default:
1955 	      break;
1956 	    }
1957 	  if (valid_p
1958 	      && (maxv - minv) != -1)
1959 	    {
1960 	      tree tmp, new_val, type;
1961 	      int i;
1962 
1963 	      for (i = 0; i < 2; i++)
1964 		if (names[i])
1965 		  {
1966 		    wide_int maxv2 = maxv;
1967 		    tmp = names[i];
1968 		    type = TREE_TYPE (names[i]);
1969 		    if (!TYPE_UNSIGNED (type))
1970 		      {
1971 			type = build_nonstandard_integer_type (nprec, 1);
1972 			tmp = build1 (NOP_EXPR, type, names[i]);
1973 		      }
1974 		    if (minv != 0)
1975 		      {
1976 			tmp = build2 (PLUS_EXPR, type, tmp,
1977 				      wide_int_to_tree (type, -minv));
1978 			maxv2 = maxv - minv;
1979 		      }
1980 		    new_val = wide_int_to_tree (type, maxv2);
1981 		    add_assert_info (asserts, names[i], tmp, LE_EXPR, new_val);
1982 		  }
1983 	    }
1984 	}
1985     }
1986 }
1987 
1988 /* OP is an operand of a truth value expression which is known to have
1989    a particular value.  Register any asserts for OP and for any
1990    operands in OP's defining statement.
1991 
1992    If CODE is EQ_EXPR, then we want to register OP is zero (false),
1993    if CODE is NE_EXPR, then we want to register OP is nonzero (true).   */
1994 
1995 static void
register_edge_assert_for_1(tree op,enum tree_code code,edge e,vec<assert_info> & asserts)1996 register_edge_assert_for_1 (tree op, enum tree_code code,
1997 			    edge e, vec<assert_info> &asserts)
1998 {
1999   gimple *op_def;
2000   tree val;
2001   enum tree_code rhs_code;
2002 
2003   /* We only care about SSA_NAMEs.  */
2004   if (TREE_CODE (op) != SSA_NAME)
2005     return;
2006 
2007   /* We know that OP will have a zero or nonzero value.  */
2008   val = build_int_cst (TREE_TYPE (op), 0);
2009   add_assert_info (asserts, op, op, code, val);
2010 
2011   /* Now look at how OP is set.  If it's set from a comparison,
2012      a truth operation or some bit operations, then we may be able
2013      to register information about the operands of that assignment.  */
2014   op_def = SSA_NAME_DEF_STMT (op);
2015   if (gimple_code (op_def) != GIMPLE_ASSIGN)
2016     return;
2017 
2018   rhs_code = gimple_assign_rhs_code (op_def);
2019 
2020   if (TREE_CODE_CLASS (rhs_code) == tcc_comparison)
2021     {
2022       bool invert = (code == EQ_EXPR ? true : false);
2023       tree op0 = gimple_assign_rhs1 (op_def);
2024       tree op1 = gimple_assign_rhs2 (op_def);
2025 
2026       if (TREE_CODE (op0) == SSA_NAME)
2027         register_edge_assert_for_2 (op0, e, rhs_code, op0, op1, invert, asserts);
2028       if (TREE_CODE (op1) == SSA_NAME)
2029         register_edge_assert_for_2 (op1, e, rhs_code, op0, op1, invert, asserts);
2030     }
2031   else if ((code == NE_EXPR
2032 	    && gimple_assign_rhs_code (op_def) == BIT_AND_EXPR)
2033 	   || (code == EQ_EXPR
2034 	       && gimple_assign_rhs_code (op_def) == BIT_IOR_EXPR))
2035     {
2036       /* Recurse on each operand.  */
2037       tree op0 = gimple_assign_rhs1 (op_def);
2038       tree op1 = gimple_assign_rhs2 (op_def);
2039       if (TREE_CODE (op0) == SSA_NAME
2040 	  && has_single_use (op0))
2041 	register_edge_assert_for_1 (op0, code, e, asserts);
2042       if (TREE_CODE (op1) == SSA_NAME
2043 	  && has_single_use (op1))
2044 	register_edge_assert_for_1 (op1, code, e, asserts);
2045     }
2046   else if (gimple_assign_rhs_code (op_def) == BIT_NOT_EXPR
2047 	   && TYPE_PRECISION (TREE_TYPE (gimple_assign_lhs (op_def))) == 1)
2048     {
2049       /* Recurse, flipping CODE.  */
2050       code = invert_tree_comparison (code, false);
2051       register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), code, e, asserts);
2052     }
2053   else if (gimple_assign_rhs_code (op_def) == SSA_NAME)
2054     {
2055       /* Recurse through the copy.  */
2056       register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), code, e, asserts);
2057     }
2058   else if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (op_def)))
2059     {
2060       /* Recurse through the type conversion, unless it is a narrowing
2061 	 conversion or conversion from non-integral type.  */
2062       tree rhs = gimple_assign_rhs1 (op_def);
2063       if (INTEGRAL_TYPE_P (TREE_TYPE (rhs))
2064 	  && (TYPE_PRECISION (TREE_TYPE (rhs))
2065 	      <= TYPE_PRECISION (TREE_TYPE (op))))
2066 	register_edge_assert_for_1 (rhs, code, e, asserts);
2067     }
2068 }
2069 
2070 /* Check if comparison
2071      NAME COND_OP INTEGER_CST
2072    has a form of
2073      (X & 11...100..0) COND_OP XX...X00...0
2074    Such comparison can yield assertions like
2075      X >= XX...X00...0
2076      X <= XX...X11...1
2077    in case of COND_OP being EQ_EXPR or
2078      X < XX...X00...0
2079      X > XX...X11...1
2080    in case of NE_EXPR.  */
2081 
2082 static bool
is_masked_range_test(tree name,tree valt,enum tree_code cond_code,tree * new_name,tree * low,enum tree_code * low_code,tree * high,enum tree_code * high_code)2083 is_masked_range_test (tree name, tree valt, enum tree_code cond_code,
2084 		      tree *new_name, tree *low, enum tree_code *low_code,
2085 		      tree *high, enum tree_code *high_code)
2086 {
2087   gimple *def_stmt = SSA_NAME_DEF_STMT (name);
2088 
2089   if (!is_gimple_assign (def_stmt)
2090       || gimple_assign_rhs_code (def_stmt) != BIT_AND_EXPR)
2091     return false;
2092 
2093   tree t = gimple_assign_rhs1 (def_stmt);
2094   tree maskt = gimple_assign_rhs2 (def_stmt);
2095   if (TREE_CODE (t) != SSA_NAME || TREE_CODE (maskt) != INTEGER_CST)
2096     return false;
2097 
2098   wi::tree_to_wide_ref mask = wi::to_wide (maskt);
2099   wide_int inv_mask = ~mask;
2100   /* Must have been removed by now so don't bother optimizing.  */
2101   if (mask == 0 || inv_mask == 0)
2102     return false;
2103 
2104   /* Assume VALT is INTEGER_CST.  */
2105   wi::tree_to_wide_ref val = wi::to_wide (valt);
2106 
2107   if ((inv_mask & (inv_mask + 1)) != 0
2108       || (val & mask) != val)
2109     return false;
2110 
2111   bool is_range = cond_code == EQ_EXPR;
2112 
2113   tree type = TREE_TYPE (t);
2114   wide_int min = wi::min_value (type),
2115     max = wi::max_value (type);
2116 
2117   if (is_range)
2118     {
2119       *low_code = val == min ? ERROR_MARK : GE_EXPR;
2120       *high_code = val == max ? ERROR_MARK : LE_EXPR;
2121     }
2122   else
2123     {
2124       /* We can still generate assertion if one of alternatives
2125 	 is known to always be false.  */
2126       if (val == min)
2127 	{
2128 	  *low_code = (enum tree_code) 0;
2129 	  *high_code = GT_EXPR;
2130 	}
2131       else if ((val | inv_mask) == max)
2132 	{
2133 	  *low_code = LT_EXPR;
2134 	  *high_code = (enum tree_code) 0;
2135 	}
2136       else
2137 	return false;
2138     }
2139 
2140   *new_name = t;
2141   *low = wide_int_to_tree (type, val);
2142   *high = wide_int_to_tree (type, val | inv_mask);
2143 
2144   return true;
2145 }
2146 
2147 /* Try to register an edge assertion for SSA name NAME on edge E for
2148    the condition COND contributing to the conditional jump pointed to by
2149    SI.  */
2150 
2151 void
register_edge_assert_for(tree name,edge e,enum tree_code cond_code,tree cond_op0,tree cond_op1,vec<assert_info> & asserts)2152 register_edge_assert_for (tree name, edge e,
2153 			  enum tree_code cond_code, tree cond_op0,
2154 			  tree cond_op1, vec<assert_info> &asserts)
2155 {
2156   tree val;
2157   enum tree_code comp_code;
2158   bool is_else_edge = (e->flags & EDGE_FALSE_VALUE) != 0;
2159 
2160   /* Do not attempt to infer anything in names that flow through
2161      abnormal edges.  */
2162   if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
2163     return;
2164 
2165   if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
2166 						cond_op0, cond_op1,
2167 						is_else_edge,
2168 						&comp_code, &val))
2169     return;
2170 
2171   /* Register ASSERT_EXPRs for name.  */
2172   register_edge_assert_for_2 (name, e, cond_code, cond_op0,
2173 			      cond_op1, is_else_edge, asserts);
2174 
2175 
2176   /* If COND is effectively an equality test of an SSA_NAME against
2177      the value zero or one, then we may be able to assert values
2178      for SSA_NAMEs which flow into COND.  */
2179 
2180   /* In the case of NAME == 1 or NAME != 0, for BIT_AND_EXPR defining
2181      statement of NAME we can assert both operands of the BIT_AND_EXPR
2182      have nonzero value.  */
2183   if ((comp_code == EQ_EXPR && integer_onep (val))
2184       || (comp_code == NE_EXPR && integer_zerop (val)))
2185     {
2186       gimple *def_stmt = SSA_NAME_DEF_STMT (name);
2187 
2188       if (is_gimple_assign (def_stmt)
2189 	  && gimple_assign_rhs_code (def_stmt) == BIT_AND_EXPR)
2190 	{
2191 	  tree op0 = gimple_assign_rhs1 (def_stmt);
2192 	  tree op1 = gimple_assign_rhs2 (def_stmt);
2193 	  register_edge_assert_for_1 (op0, NE_EXPR, e, asserts);
2194 	  register_edge_assert_for_1 (op1, NE_EXPR, e, asserts);
2195 	}
2196       else if (is_gimple_assign (def_stmt)
2197 	       && (TREE_CODE_CLASS (gimple_assign_rhs_code (def_stmt))
2198 		   == tcc_comparison))
2199 	register_edge_assert_for_1 (name, NE_EXPR, e, asserts);
2200     }
2201 
2202   /* In the case of NAME == 0 or NAME != 1, for BIT_IOR_EXPR defining
2203      statement of NAME we can assert both operands of the BIT_IOR_EXPR
2204      have zero value.  */
2205   if ((comp_code == EQ_EXPR && integer_zerop (val))
2206       || (comp_code == NE_EXPR
2207 	  && integer_onep (val)
2208 	  && TYPE_PRECISION (TREE_TYPE (name)) == 1))
2209     {
2210       gimple *def_stmt = SSA_NAME_DEF_STMT (name);
2211 
2212       /* For BIT_IOR_EXPR only if NAME == 0 both operands have
2213 	 necessarily zero value, or if type-precision is one.  */
2214       if (is_gimple_assign (def_stmt)
2215 	  && gimple_assign_rhs_code (def_stmt) == BIT_IOR_EXPR)
2216 	{
2217 	  tree op0 = gimple_assign_rhs1 (def_stmt);
2218 	  tree op1 = gimple_assign_rhs2 (def_stmt);
2219 	  register_edge_assert_for_1 (op0, EQ_EXPR, e, asserts);
2220 	  register_edge_assert_for_1 (op1, EQ_EXPR, e, asserts);
2221 	}
2222       else if (is_gimple_assign (def_stmt)
2223 	       && (TREE_CODE_CLASS (gimple_assign_rhs_code (def_stmt))
2224 		   == tcc_comparison))
2225 	register_edge_assert_for_1 (name, EQ_EXPR, e, asserts);
2226     }
2227 
2228   /* Sometimes we can infer ranges from (NAME & MASK) == VALUE.  */
2229   if ((comp_code == EQ_EXPR || comp_code == NE_EXPR)
2230       && TREE_CODE (val) == INTEGER_CST)
2231     {
2232       enum tree_code low_code, high_code;
2233       tree low, high;
2234       if (is_masked_range_test (name, val, comp_code, &name, &low,
2235 				&low_code, &high, &high_code))
2236 	{
2237 	  if (low_code != ERROR_MARK)
2238 	    register_edge_assert_for_2 (name, e, low_code, name,
2239 					low, /*invert*/false, asserts);
2240 	  if (high_code != ERROR_MARK)
2241 	    register_edge_assert_for_2 (name, e, high_code, name,
2242 					high, /*invert*/false, asserts);
2243 	}
2244     }
2245 }
2246 
2247 /* Handle
2248    _4 = x_3 & 31;
2249    if (_4 != 0)
2250      goto <bb 6>;
2251    else
2252      goto <bb 7>;
2253    <bb 6>:
2254    __builtin_unreachable ();
2255    <bb 7>:
2256    x_5 = ASSERT_EXPR <x_3, ...>;
2257    If x_3 has no other immediate uses (checked by caller),
2258    var is the x_3 var from ASSERT_EXPR, we can clear low 5 bits
2259    from the non-zero bitmask.  */
2260 
2261 void
maybe_set_nonzero_bits(edge e,tree var)2262 maybe_set_nonzero_bits (edge e, tree var)
2263 {
2264   basic_block cond_bb = e->src;
2265   gimple *stmt = last_stmt (cond_bb);
2266   tree cst;
2267 
2268   if (stmt == NULL
2269       || gimple_code (stmt) != GIMPLE_COND
2270       || gimple_cond_code (stmt) != ((e->flags & EDGE_TRUE_VALUE)
2271 				     ? EQ_EXPR : NE_EXPR)
2272       || TREE_CODE (gimple_cond_lhs (stmt)) != SSA_NAME
2273       || !integer_zerop (gimple_cond_rhs (stmt)))
2274     return;
2275 
2276   stmt = SSA_NAME_DEF_STMT (gimple_cond_lhs (stmt));
2277   if (!is_gimple_assign (stmt)
2278       || gimple_assign_rhs_code (stmt) != BIT_AND_EXPR
2279       || TREE_CODE (gimple_assign_rhs2 (stmt)) != INTEGER_CST)
2280     return;
2281   if (gimple_assign_rhs1 (stmt) != var)
2282     {
2283       gimple *stmt2;
2284 
2285       if (TREE_CODE (gimple_assign_rhs1 (stmt)) != SSA_NAME)
2286 	return;
2287       stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
2288       if (!gimple_assign_cast_p (stmt2)
2289 	  || gimple_assign_rhs1 (stmt2) != var
2290 	  || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt2))
2291 	  || (TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (stmt)))
2292 			      != TYPE_PRECISION (TREE_TYPE (var))))
2293 	return;
2294     }
2295   cst = gimple_assign_rhs2 (stmt);
2296   set_nonzero_bits (var, wi::bit_and_not (get_nonzero_bits (var),
2297 					  wi::to_wide (cst)));
2298 }
2299 
2300 /* Return true if STMT is interesting for VRP.  */
2301 
2302 bool
stmt_interesting_for_vrp(gimple * stmt)2303 stmt_interesting_for_vrp (gimple *stmt)
2304 {
2305   if (gimple_code (stmt) == GIMPLE_PHI)
2306     {
2307       tree res = gimple_phi_result (stmt);
2308       return (!virtual_operand_p (res)
2309 	      && (INTEGRAL_TYPE_P (TREE_TYPE (res))
2310 		  || POINTER_TYPE_P (TREE_TYPE (res))));
2311     }
2312   else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
2313     {
2314       tree lhs = gimple_get_lhs (stmt);
2315 
2316       /* In general, assignments with virtual operands are not useful
2317 	 for deriving ranges, with the obvious exception of calls to
2318 	 builtin functions.  */
2319       if (lhs && TREE_CODE (lhs) == SSA_NAME
2320 	  && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
2321 	      || POINTER_TYPE_P (TREE_TYPE (lhs)))
2322 	  && (is_gimple_call (stmt)
2323 	      || !gimple_vuse (stmt)))
2324 	return true;
2325       else if (is_gimple_call (stmt) && gimple_call_internal_p (stmt))
2326 	switch (gimple_call_internal_fn (stmt))
2327 	  {
2328 	  case IFN_ADD_OVERFLOW:
2329 	  case IFN_SUB_OVERFLOW:
2330 	  case IFN_MUL_OVERFLOW:
2331 	  case IFN_ATOMIC_COMPARE_EXCHANGE:
2332 	    /* These internal calls return _Complex integer type,
2333 	       but are interesting to VRP nevertheless.  */
2334 	    if (lhs && TREE_CODE (lhs) == SSA_NAME)
2335 	      return true;
2336 	    break;
2337 	  default:
2338 	    break;
2339 	  }
2340     }
2341   else if (gimple_code (stmt) == GIMPLE_COND
2342 	   || gimple_code (stmt) == GIMPLE_SWITCH)
2343     return true;
2344 
2345   return false;
2346 }
2347 
2348 
2349 /* Return the LHS of any ASSERT_EXPR where OP appears as the first
2350    argument to the ASSERT_EXPR and in which the ASSERT_EXPR dominates
2351    BB.  If no such ASSERT_EXPR is found, return OP.  */
2352 
2353 static tree
lhs_of_dominating_assert(tree op,basic_block bb,gimple * stmt)2354 lhs_of_dominating_assert (tree op, basic_block bb, gimple *stmt)
2355 {
2356   imm_use_iterator imm_iter;
2357   gimple *use_stmt;
2358   use_operand_p use_p;
2359 
2360   if (TREE_CODE (op) == SSA_NAME)
2361     {
2362       FOR_EACH_IMM_USE_FAST (use_p, imm_iter, op)
2363 	{
2364 	  use_stmt = USE_STMT (use_p);
2365 	  if (use_stmt != stmt
2366 	      && gimple_assign_single_p (use_stmt)
2367 	      && TREE_CODE (gimple_assign_rhs1 (use_stmt)) == ASSERT_EXPR
2368 	      && TREE_OPERAND (gimple_assign_rhs1 (use_stmt), 0) == op
2369 	      && dominated_by_p (CDI_DOMINATORS, bb, gimple_bb (use_stmt)))
2370 	    return gimple_assign_lhs (use_stmt);
2371 	}
2372     }
2373   return op;
2374 }
2375 
2376 /* A hack.  */
2377 static class vr_values *x_vr_values;
2378 
2379 /* Searches the case label vector VEC for the index *IDX of the CASE_LABEL
2380    that includes the value VAL.  The search is restricted to the range
2381    [START_IDX, n - 1] where n is the size of VEC.
2382 
2383    If there is a CASE_LABEL for VAL, its index is placed in IDX and true is
2384    returned.
2385 
2386    If there is no CASE_LABEL for VAL and there is one that is larger than VAL,
2387    it is placed in IDX and false is returned.
2388 
2389    If VAL is larger than any CASE_LABEL, n is placed on IDX and false is
2390    returned. */
2391 
2392 bool
find_case_label_index(gswitch * stmt,size_t start_idx,tree val,size_t * idx)2393 find_case_label_index (gswitch *stmt, size_t start_idx, tree val, size_t *idx)
2394 {
2395   size_t n = gimple_switch_num_labels (stmt);
2396   size_t low, high;
2397 
2398   /* Find case label for minimum of the value range or the next one.
2399      At each iteration we are searching in [low, high - 1]. */
2400 
2401   for (low = start_idx, high = n; high != low; )
2402     {
2403       tree t;
2404       int cmp;
2405       /* Note that i != high, so we never ask for n. */
2406       size_t i = (high + low) / 2;
2407       t = gimple_switch_label (stmt, i);
2408 
2409       /* Cache the result of comparing CASE_LOW and val.  */
2410       cmp = tree_int_cst_compare (CASE_LOW (t), val);
2411 
2412       if (cmp == 0)
2413 	{
2414 	  /* Ranges cannot be empty. */
2415 	  *idx = i;
2416 	  return true;
2417 	}
2418       else if (cmp > 0)
2419         high = i;
2420       else
2421 	{
2422 	  low = i + 1;
2423 	  if (CASE_HIGH (t) != NULL
2424 	      && tree_int_cst_compare (CASE_HIGH (t), val) >= 0)
2425 	    {
2426 	      *idx = i;
2427 	      return true;
2428 	    }
2429         }
2430     }
2431 
2432   *idx = high;
2433   return false;
2434 }
2435 
2436 /* Searches the case label vector VEC for the range of CASE_LABELs that is used
2437    for values between MIN and MAX. The first index is placed in MIN_IDX. The
2438    last index is placed in MAX_IDX. If the range of CASE_LABELs is empty
2439    then MAX_IDX < MIN_IDX.
2440    Returns true if the default label is not needed. */
2441 
2442 bool
find_case_label_range(gswitch * stmt,tree min,tree max,size_t * min_idx,size_t * max_idx)2443 find_case_label_range (gswitch *stmt, tree min, tree max, size_t *min_idx,
2444 		       size_t *max_idx)
2445 {
2446   size_t i, j;
2447   bool min_take_default = !find_case_label_index (stmt, 1, min, &i);
2448   bool max_take_default = !find_case_label_index (stmt, i, max, &j);
2449 
2450   if (i == j
2451       && min_take_default
2452       && max_take_default)
2453     {
2454       /* Only the default case label reached.
2455          Return an empty range. */
2456       *min_idx = 1;
2457       *max_idx = 0;
2458       return false;
2459     }
2460   else
2461     {
2462       bool take_default = min_take_default || max_take_default;
2463       tree low, high;
2464       size_t k;
2465 
2466       if (max_take_default)
2467 	j--;
2468 
2469       /* If the case label range is continuous, we do not need
2470 	 the default case label.  Verify that.  */
2471       high = CASE_LOW (gimple_switch_label (stmt, i));
2472       if (CASE_HIGH (gimple_switch_label (stmt, i)))
2473 	high = CASE_HIGH (gimple_switch_label (stmt, i));
2474       for (k = i + 1; k <= j; ++k)
2475 	{
2476 	  low = CASE_LOW (gimple_switch_label (stmt, k));
2477 	  if (!integer_onep (int_const_binop (MINUS_EXPR, low, high)))
2478 	    {
2479 	      take_default = true;
2480 	      break;
2481 	    }
2482 	  high = low;
2483 	  if (CASE_HIGH (gimple_switch_label (stmt, k)))
2484 	    high = CASE_HIGH (gimple_switch_label (stmt, k));
2485 	}
2486 
2487       *min_idx = i;
2488       *max_idx = j;
2489       return !take_default;
2490     }
2491 }
2492 
2493 /* Given a SWITCH_STMT, return the case label that encompasses the
2494    known possible values for the switch operand.  RANGE_OF_OP is a
2495    range for the known values of the switch operand.  */
2496 
2497 tree
find_case_label_range(gswitch * switch_stmt,const irange * range_of_op)2498 find_case_label_range (gswitch *switch_stmt, const irange *range_of_op)
2499 {
2500   if (range_of_op->undefined_p ()
2501       || range_of_op->varying_p ()
2502       || range_of_op->symbolic_p ())
2503     return NULL_TREE;
2504 
2505   size_t i, j;
2506   tree op = gimple_switch_index (switch_stmt);
2507   tree type = TREE_TYPE (op);
2508   tree tmin = wide_int_to_tree (type, range_of_op->lower_bound ());
2509   tree tmax = wide_int_to_tree (type, range_of_op->upper_bound ());
2510   find_case_label_range (switch_stmt, tmin, tmax, &i, &j);
2511   if (i == j)
2512     {
2513       /* Look for exactly one label that encompasses the range of
2514 	 the operand.  */
2515       tree label = gimple_switch_label (switch_stmt, i);
2516       tree case_high
2517 	= CASE_HIGH (label) ? CASE_HIGH (label) : CASE_LOW (label);
2518       int_range_max label_range (CASE_LOW (label), case_high);
2519       if (!types_compatible_p (label_range.type (), range_of_op->type ()))
2520 	range_cast (label_range, range_of_op->type ());
2521       label_range.intersect (range_of_op);
2522       if (label_range == *range_of_op)
2523 	return label;
2524     }
2525   else if (i > j)
2526     {
2527       /* If there are no labels at all, take the default.  */
2528       return gimple_switch_label (switch_stmt, 0);
2529     }
2530   else
2531     {
2532       /* Otherwise, there are various labels that can encompass
2533 	 the range of operand.  In which case, see if the range of
2534 	 the operand is entirely *outside* the bounds of all the
2535 	 (non-default) case labels.  If so, take the default.  */
2536       unsigned n = gimple_switch_num_labels (switch_stmt);
2537       tree min_label = gimple_switch_label (switch_stmt, 1);
2538       tree max_label = gimple_switch_label (switch_stmt, n - 1);
2539       tree case_high = CASE_HIGH (max_label);
2540       if (!case_high)
2541 	case_high = CASE_LOW (max_label);
2542       int_range_max label_range (CASE_LOW (min_label), case_high);
2543       if (!types_compatible_p (label_range.type (), range_of_op->type ()))
2544 	range_cast (label_range, range_of_op->type ());
2545       label_range.intersect (range_of_op);
2546       if (label_range.undefined_p ())
2547 	return gimple_switch_label (switch_stmt, 0);
2548     }
2549   return NULL_TREE;
2550 }
2551 
2552 struct case_info
2553 {
2554   tree expr;
2555   basic_block bb;
2556 };
2557 
2558 /* Location information for ASSERT_EXPRs.  Each instance of this
2559    structure describes an ASSERT_EXPR for an SSA name.  Since a single
2560    SSA name may have more than one assertion associated with it, these
2561    locations are kept in a linked list attached to the corresponding
2562    SSA name.  */
2563 struct assert_locus
2564 {
2565   /* Basic block where the assertion would be inserted.  */
2566   basic_block bb;
2567 
2568   /* Some assertions need to be inserted on an edge (e.g., assertions
2569      generated by COND_EXPRs).  In those cases, BB will be NULL.  */
2570   edge e;
2571 
2572   /* Pointer to the statement that generated this assertion.  */
2573   gimple_stmt_iterator si;
2574 
2575   /* Predicate code for the ASSERT_EXPR.  Must be COMPARISON_CLASS_P.  */
2576   enum tree_code comp_code;
2577 
2578   /* Value being compared against.  */
2579   tree val;
2580 
2581   /* Expression to compare.  */
2582   tree expr;
2583 
2584   /* Next node in the linked list.  */
2585   assert_locus *next;
2586 };
2587 
2588 /* Class to traverse the flowgraph looking for conditional jumps to
2589    insert ASSERT_EXPR range expressions.  These range expressions are
2590    meant to provide information to optimizations that need to reason
2591    in terms of value ranges.  They will not be expanded into RTL.  */
2592 
2593 class vrp_asserts
2594 {
2595 public:
vrp_asserts(struct function * fn)2596   vrp_asserts (struct function *fn) : fun (fn) { }
2597 
2598   void insert_range_assertions ();
2599 
2600   /* Convert range assertion expressions into the implied copies and
2601      copy propagate away the copies.  */
2602   void remove_range_assertions ();
2603 
2604   /* Dump all the registered assertions for all the names to FILE.  */
2605   void dump (FILE *);
2606 
2607   /* Dump all the registered assertions for NAME to FILE.  */
2608   void dump (FILE *file, tree name);
2609 
2610   /* Dump all the registered assertions for NAME to stderr.  */
debug(tree name)2611   void debug (tree name)
2612   {
2613     dump (stderr, name);
2614   }
2615 
2616   /* Dump all the registered assertions for all the names to stderr.  */
debug()2617   void debug ()
2618   {
2619     dump (stderr);
2620   }
2621 
2622 private:
2623   /* Set of SSA names found live during the RPO traversal of the function
2624      for still active basic-blocks.  */
2625   live_names live;
2626 
2627   /* Function to work on.  */
2628   struct function *fun;
2629 
2630   /* If bit I is present, it means that SSA name N_i has a list of
2631      assertions that should be inserted in the IL.  */
2632   bitmap need_assert_for;
2633 
2634   /* Array of locations lists where to insert assertions.  ASSERTS_FOR[I]
2635      holds a list of ASSERT_LOCUS_T nodes that describe where
2636      ASSERT_EXPRs for SSA name N_I should be inserted.  */
2637   assert_locus **asserts_for;
2638 
2639   /* Finish found ASSERTS for E and register them at GSI.  */
2640   void finish_register_edge_assert_for (edge e, gimple_stmt_iterator gsi,
2641 					vec<assert_info> &asserts);
2642 
2643   /* Determine whether the outgoing edges of BB should receive an
2644      ASSERT_EXPR for each of the operands of BB's LAST statement.  The
2645      last statement of BB must be a SWITCH_EXPR.
2646 
2647      If any of the sub-graphs rooted at BB have an interesting use of
2648      the predicate operands, an assert location node is added to the
2649      list of assertions for the corresponding operands.  */
2650   void find_switch_asserts (basic_block bb, gswitch *last);
2651 
2652   /* Do an RPO walk over the function computing SSA name liveness
2653      on-the-fly and deciding on assert expressions to insert.  */
2654   void find_assert_locations ();
2655 
2656   /* Traverse all the statements in block BB looking for statements that
2657      may generate useful assertions for the SSA names in their operand.
2658      See method implementation comentary for more information.  */
2659   void find_assert_locations_in_bb (basic_block bb);
2660 
2661   /* Determine whether the outgoing edges of BB should receive an
2662      ASSERT_EXPR for each of the operands of BB's LAST statement.
2663      The last statement of BB must be a COND_EXPR.
2664 
2665      If any of the sub-graphs rooted at BB have an interesting use of
2666      the predicate operands, an assert location node is added to the
2667      list of assertions for the corresponding operands.  */
2668   void find_conditional_asserts (basic_block bb, gcond *last);
2669 
2670   /* Process all the insertions registered for every name N_i registered
2671      in NEED_ASSERT_FOR.  The list of assertions to be inserted are
2672      found in ASSERTS_FOR[i].  */
2673   void process_assert_insertions ();
2674 
2675   /* If NAME doesn't have an ASSERT_EXPR registered for asserting
2676      'EXPR COMP_CODE VAL' at a location that dominates block BB or
2677      E->DEST, then register this location as a possible insertion point
2678      for ASSERT_EXPR <NAME, EXPR COMP_CODE VAL>.
2679 
2680      BB, E and SI provide the exact insertion point for the new
2681      ASSERT_EXPR.  If BB is NULL, then the ASSERT_EXPR is to be inserted
2682      on edge E.  Otherwise, if E is NULL, the ASSERT_EXPR is inserted on
2683      BB.  If SI points to a COND_EXPR or a SWITCH_EXPR statement, then E
2684      must not be NULL.  */
2685   void register_new_assert_for (tree name, tree expr,
2686 				enum tree_code comp_code,
2687 				tree val, basic_block bb,
2688 				edge e, gimple_stmt_iterator si);
2689 
2690   /* Given a COND_EXPR COND of the form 'V OP W', and an SSA name V,
2691      create a new SSA name N and return the assertion assignment
2692      'N = ASSERT_EXPR <V, V OP W>'.  */
2693   gimple *build_assert_expr_for (tree cond, tree v);
2694 
2695   /* Create an ASSERT_EXPR for NAME and insert it in the location
2696      indicated by LOC.  Return true if we made any edge insertions.  */
2697   bool process_assert_insertions_for (tree name, assert_locus *loc);
2698 
2699   /* Qsort callback for sorting assert locations.  */
2700   template <bool stable> static int compare_assert_loc (const void *,
2701 							const void *);
2702 
2703   /* Return false if EXPR is a predicate expression involving floating
2704      point values.  */
fp_predicate(gimple * stmt)2705   bool fp_predicate (gimple *stmt)
2706   {
2707     GIMPLE_CHECK (stmt, GIMPLE_COND);
2708     return FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt)));
2709   }
2710 
2711   bool all_imm_uses_in_stmt_or_feed_cond (tree var, gimple *stmt,
2712 					  basic_block cond_bb);
2713 
2714   static int compare_case_labels (const void *, const void *);
2715 };
2716 
2717 /* Given a COND_EXPR COND of the form 'V OP W', and an SSA name V,
2718    create a new SSA name N and return the assertion assignment
2719    'N = ASSERT_EXPR <V, V OP W>'.  */
2720 
2721 gimple *
build_assert_expr_for(tree cond,tree v)2722 vrp_asserts::build_assert_expr_for (tree cond, tree v)
2723 {
2724   tree a;
2725   gassign *assertion;
2726 
2727   gcc_assert (TREE_CODE (v) == SSA_NAME
2728 	      && COMPARISON_CLASS_P (cond));
2729 
2730   a = build2 (ASSERT_EXPR, TREE_TYPE (v), v, cond);
2731   assertion = gimple_build_assign (NULL_TREE, a);
2732 
2733   /* The new ASSERT_EXPR, creates a new SSA name that replaces the
2734      operand of the ASSERT_EXPR.  Create it so the new name and the old one
2735      are registered in the replacement table so that we can fix the SSA web
2736      after adding all the ASSERT_EXPRs.  */
2737   tree new_def = create_new_def_for (v, assertion, NULL);
2738   /* Make sure we preserve abnormalness throughout an ASSERT_EXPR chain
2739      given we have to be able to fully propagate those out to re-create
2740      valid SSA when removing the asserts.  */
2741   if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (v))
2742     SSA_NAME_OCCURS_IN_ABNORMAL_PHI (new_def) = 1;
2743 
2744   return assertion;
2745 }
2746 
2747 /* Dump all the registered assertions for NAME to FILE.  */
2748 
2749 void
dump(FILE * file,tree name)2750 vrp_asserts::dump (FILE *file, tree name)
2751 {
2752   assert_locus *loc;
2753 
2754   fprintf (file, "Assertions to be inserted for ");
2755   print_generic_expr (file, name);
2756   fprintf (file, "\n");
2757 
2758   loc = asserts_for[SSA_NAME_VERSION (name)];
2759   while (loc)
2760     {
2761       fprintf (file, "\t");
2762       print_gimple_stmt (file, gsi_stmt (loc->si), 0);
2763       fprintf (file, "\n\tBB #%d", loc->bb->index);
2764       if (loc->e)
2765 	{
2766 	  fprintf (file, "\n\tEDGE %d->%d", loc->e->src->index,
2767 	           loc->e->dest->index);
2768 	  dump_edge_info (file, loc->e, dump_flags, 0);
2769 	}
2770       fprintf (file, "\n\tPREDICATE: ");
2771       print_generic_expr (file, loc->expr);
2772       fprintf (file, " %s ", get_tree_code_name (loc->comp_code));
2773       print_generic_expr (file, loc->val);
2774       fprintf (file, "\n\n");
2775       loc = loc->next;
2776     }
2777 
2778   fprintf (file, "\n");
2779 }
2780 
2781 /* Dump all the registered assertions for all the names to FILE.  */
2782 
2783 void
dump(FILE * file)2784 vrp_asserts::dump (FILE *file)
2785 {
2786   unsigned i;
2787   bitmap_iterator bi;
2788 
2789   fprintf (file, "\nASSERT_EXPRs to be inserted\n\n");
2790   EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
2791     dump (file, ssa_name (i));
2792   fprintf (file, "\n");
2793 }
2794 
2795 /* If NAME doesn't have an ASSERT_EXPR registered for asserting
2796    'EXPR COMP_CODE VAL' at a location that dominates block BB or
2797    E->DEST, then register this location as a possible insertion point
2798    for ASSERT_EXPR <NAME, EXPR COMP_CODE VAL>.
2799 
2800    BB, E and SI provide the exact insertion point for the new
2801    ASSERT_EXPR.  If BB is NULL, then the ASSERT_EXPR is to be inserted
2802    on edge E.  Otherwise, if E is NULL, the ASSERT_EXPR is inserted on
2803    BB.  If SI points to a COND_EXPR or a SWITCH_EXPR statement, then E
2804    must not be NULL.  */
2805 
2806 void
register_new_assert_for(tree name,tree expr,enum tree_code comp_code,tree val,basic_block bb,edge e,gimple_stmt_iterator si)2807 vrp_asserts::register_new_assert_for (tree name, tree expr,
2808 				      enum tree_code comp_code,
2809 				      tree val,
2810 				      basic_block bb,
2811 				      edge e,
2812 				      gimple_stmt_iterator si)
2813 {
2814   assert_locus *n, *loc, *last_loc;
2815   basic_block dest_bb;
2816 
2817   gcc_checking_assert (bb == NULL || e == NULL);
2818 
2819   if (e == NULL)
2820     gcc_checking_assert (gimple_code (gsi_stmt (si)) != GIMPLE_COND
2821 			 && gimple_code (gsi_stmt (si)) != GIMPLE_SWITCH);
2822 
2823   /* Never build an assert comparing against an integer constant with
2824      TREE_OVERFLOW set.  This confuses our undefined overflow warning
2825      machinery.  */
2826   if (TREE_OVERFLOW_P (val))
2827     val = drop_tree_overflow (val);
2828 
2829   /* The new assertion A will be inserted at BB or E.  We need to
2830      determine if the new location is dominated by a previously
2831      registered location for A.  If we are doing an edge insertion,
2832      assume that A will be inserted at E->DEST.  Note that this is not
2833      necessarily true.
2834 
2835      If E is a critical edge, it will be split.  But even if E is
2836      split, the new block will dominate the same set of blocks that
2837      E->DEST dominates.
2838 
2839      The reverse, however, is not true, blocks dominated by E->DEST
2840      will not be dominated by the new block created to split E.  So,
2841      if the insertion location is on a critical edge, we will not use
2842      the new location to move another assertion previously registered
2843      at a block dominated by E->DEST.  */
2844   dest_bb = (bb) ? bb : e->dest;
2845 
2846   /* If NAME already has an ASSERT_EXPR registered for COMP_CODE and
2847      VAL at a block dominating DEST_BB, then we don't need to insert a new
2848      one.  Similarly, if the same assertion already exists at a block
2849      dominated by DEST_BB and the new location is not on a critical
2850      edge, then update the existing location for the assertion (i.e.,
2851      move the assertion up in the dominance tree).
2852 
2853      Note, this is implemented as a simple linked list because there
2854      should not be more than a handful of assertions registered per
2855      name.  If this becomes a performance problem, a table hashed by
2856      COMP_CODE and VAL could be implemented.  */
2857   loc = asserts_for[SSA_NAME_VERSION (name)];
2858   last_loc = loc;
2859   while (loc)
2860     {
2861       if (loc->comp_code == comp_code
2862 	  && (loc->val == val
2863 	      || operand_equal_p (loc->val, val, 0))
2864 	  && (loc->expr == expr
2865 	      || operand_equal_p (loc->expr, expr, 0)))
2866 	{
2867 	  /* If E is not a critical edge and DEST_BB
2868 	     dominates the existing location for the assertion, move
2869 	     the assertion up in the dominance tree by updating its
2870 	     location information.  */
2871 	  if ((e == NULL || !EDGE_CRITICAL_P (e))
2872 	      && dominated_by_p (CDI_DOMINATORS, loc->bb, dest_bb))
2873 	    {
2874 	      loc->bb = dest_bb;
2875 	      loc->e = e;
2876 	      loc->si = si;
2877 	      return;
2878 	    }
2879 	}
2880 
2881       /* Update the last node of the list and move to the next one.  */
2882       last_loc = loc;
2883       loc = loc->next;
2884     }
2885 
2886   /* If we didn't find an assertion already registered for
2887      NAME COMP_CODE VAL, add a new one at the end of the list of
2888      assertions associated with NAME.  */
2889   n = XNEW (struct assert_locus);
2890   n->bb = dest_bb;
2891   n->e = e;
2892   n->si = si;
2893   n->comp_code = comp_code;
2894   n->val = val;
2895   n->expr = expr;
2896   n->next = NULL;
2897 
2898   if (last_loc)
2899     last_loc->next = n;
2900   else
2901     asserts_for[SSA_NAME_VERSION (name)] = n;
2902 
2903   bitmap_set_bit (need_assert_for, SSA_NAME_VERSION (name));
2904 }
2905 
2906 /* Finish found ASSERTS for E and register them at GSI.  */
2907 
2908 void
finish_register_edge_assert_for(edge e,gimple_stmt_iterator gsi,vec<assert_info> & asserts)2909 vrp_asserts::finish_register_edge_assert_for (edge e,
2910 					      gimple_stmt_iterator gsi,
2911 					      vec<assert_info> &asserts)
2912 {
2913   for (unsigned i = 0; i < asserts.length (); ++i)
2914     /* Only register an ASSERT_EXPR if NAME was found in the sub-graph
2915        reachable from E.  */
2916     if (live.live_on_edge_p (asserts[i].name, e))
2917       register_new_assert_for (asserts[i].name, asserts[i].expr,
2918 			       asserts[i].comp_code, asserts[i].val,
2919 			       NULL, e, gsi);
2920 }
2921 
2922 /* Determine whether the outgoing edges of BB should receive an
2923    ASSERT_EXPR for each of the operands of BB's LAST statement.
2924    The last statement of BB must be a COND_EXPR.
2925 
2926    If any of the sub-graphs rooted at BB have an interesting use of
2927    the predicate operands, an assert location node is added to the
2928    list of assertions for the corresponding operands.  */
2929 
2930 void
find_conditional_asserts(basic_block bb,gcond * last)2931 vrp_asserts::find_conditional_asserts (basic_block bb, gcond *last)
2932 {
2933   gimple_stmt_iterator bsi;
2934   tree op;
2935   edge_iterator ei;
2936   edge e;
2937   ssa_op_iter iter;
2938 
2939   bsi = gsi_for_stmt (last);
2940 
2941   /* Look for uses of the operands in each of the sub-graphs
2942      rooted at BB.  We need to check each of the outgoing edges
2943      separately, so that we know what kind of ASSERT_EXPR to
2944      insert.  */
2945   FOR_EACH_EDGE (e, ei, bb->succs)
2946     {
2947       if (e->dest == bb)
2948 	continue;
2949 
2950       /* Register the necessary assertions for each operand in the
2951 	 conditional predicate.  */
2952       auto_vec<assert_info, 8> asserts;
2953       FOR_EACH_SSA_TREE_OPERAND (op, last, iter, SSA_OP_USE)
2954 	register_edge_assert_for (op, e,
2955 				  gimple_cond_code (last),
2956 				  gimple_cond_lhs (last),
2957 				  gimple_cond_rhs (last), asserts);
2958       finish_register_edge_assert_for (e, bsi, asserts);
2959     }
2960 }
2961 
2962 /* Compare two case labels sorting first by the destination bb index
2963    and then by the case value.  */
2964 
2965 int
compare_case_labels(const void * p1,const void * p2)2966 vrp_asserts::compare_case_labels (const void *p1, const void *p2)
2967 {
2968   const struct case_info *ci1 = (const struct case_info *) p1;
2969   const struct case_info *ci2 = (const struct case_info *) p2;
2970   int idx1 = ci1->bb->index;
2971   int idx2 = ci2->bb->index;
2972 
2973   if (idx1 < idx2)
2974     return -1;
2975   else if (idx1 == idx2)
2976     {
2977       /* Make sure the default label is first in a group.  */
2978       if (!CASE_LOW (ci1->expr))
2979 	return -1;
2980       else if (!CASE_LOW (ci2->expr))
2981 	return 1;
2982       else
2983 	return tree_int_cst_compare (CASE_LOW (ci1->expr),
2984 				     CASE_LOW (ci2->expr));
2985     }
2986   else
2987     return 1;
2988 }
2989 
2990 /* Determine whether the outgoing edges of BB should receive an
2991    ASSERT_EXPR for each of the operands of BB's LAST statement.
2992    The last statement of BB must be a SWITCH_EXPR.
2993 
2994    If any of the sub-graphs rooted at BB have an interesting use of
2995    the predicate operands, an assert location node is added to the
2996    list of assertions for the corresponding operands.  */
2997 
2998 void
find_switch_asserts(basic_block bb,gswitch * last)2999 vrp_asserts::find_switch_asserts (basic_block bb, gswitch *last)
3000 {
3001   gimple_stmt_iterator bsi;
3002   tree op;
3003   edge e;
3004   struct case_info *ci;
3005   size_t n = gimple_switch_num_labels (last);
3006 #if GCC_VERSION >= 4000
3007   unsigned int idx;
3008 #else
3009   /* Work around GCC 3.4 bug (PR 37086).  */
3010   volatile unsigned int idx;
3011 #endif
3012 
3013   bsi = gsi_for_stmt (last);
3014   op = gimple_switch_index (last);
3015   if (TREE_CODE (op) != SSA_NAME)
3016     return;
3017 
3018   /* Build a vector of case labels sorted by destination label.  */
3019   ci = XNEWVEC (struct case_info, n);
3020   for (idx = 0; idx < n; ++idx)
3021     {
3022       ci[idx].expr = gimple_switch_label (last, idx);
3023       ci[idx].bb = label_to_block (fun, CASE_LABEL (ci[idx].expr));
3024     }
3025   edge default_edge = find_edge (bb, ci[0].bb);
3026   qsort (ci, n, sizeof (struct case_info), compare_case_labels);
3027 
3028   for (idx = 0; idx < n; ++idx)
3029     {
3030       tree min, max;
3031       tree cl = ci[idx].expr;
3032       basic_block cbb = ci[idx].bb;
3033 
3034       min = CASE_LOW (cl);
3035       max = CASE_HIGH (cl);
3036 
3037       /* If there are multiple case labels with the same destination
3038 	 we need to combine them to a single value range for the edge.  */
3039       if (idx + 1 < n && cbb == ci[idx + 1].bb)
3040 	{
3041 	  /* Skip labels until the last of the group.  */
3042 	  do {
3043 	    ++idx;
3044 	  } while (idx < n && cbb == ci[idx].bb);
3045 	  --idx;
3046 
3047 	  /* Pick up the maximum of the case label range.  */
3048 	  if (CASE_HIGH (ci[idx].expr))
3049 	    max = CASE_HIGH (ci[idx].expr);
3050 	  else
3051 	    max = CASE_LOW (ci[idx].expr);
3052 	}
3053 
3054       /* Can't extract a useful assertion out of a range that includes the
3055 	 default label.  */
3056       if (min == NULL_TREE)
3057 	continue;
3058 
3059       /* Find the edge to register the assert expr on.  */
3060       e = find_edge (bb, cbb);
3061 
3062       /* Register the necessary assertions for the operand in the
3063 	 SWITCH_EXPR.  */
3064       auto_vec<assert_info, 8> asserts;
3065       register_edge_assert_for (op, e,
3066 				max ? GE_EXPR : EQ_EXPR,
3067 				op, fold_convert (TREE_TYPE (op), min),
3068 				asserts);
3069       if (max)
3070 	register_edge_assert_for (op, e, LE_EXPR, op,
3071 				  fold_convert (TREE_TYPE (op), max),
3072 				  asserts);
3073       finish_register_edge_assert_for (e, bsi, asserts);
3074     }
3075 
3076   XDELETEVEC (ci);
3077 
3078   if (!live.live_on_edge_p (op, default_edge))
3079     return;
3080 
3081   /* Now register along the default label assertions that correspond to the
3082      anti-range of each label.  */
3083   int insertion_limit = param_max_vrp_switch_assertions;
3084   if (insertion_limit == 0)
3085     return;
3086 
3087   /* We can't do this if the default case shares a label with another case.  */
3088   tree default_cl = gimple_switch_default_label (last);
3089   for (idx = 1; idx < n; idx++)
3090     {
3091       tree min, max;
3092       tree cl = gimple_switch_label (last, idx);
3093       if (CASE_LABEL (cl) == CASE_LABEL (default_cl))
3094 	continue;
3095 
3096       min = CASE_LOW (cl);
3097       max = CASE_HIGH (cl);
3098 
3099       /* Combine contiguous case ranges to reduce the number of assertions
3100 	 to insert.  */
3101       for (idx = idx + 1; idx < n; idx++)
3102 	{
3103 	  tree next_min, next_max;
3104 	  tree next_cl = gimple_switch_label (last, idx);
3105 	  if (CASE_LABEL (next_cl) == CASE_LABEL (default_cl))
3106 	    break;
3107 
3108 	  next_min = CASE_LOW (next_cl);
3109 	  next_max = CASE_HIGH (next_cl);
3110 
3111 	  wide_int difference = (wi::to_wide (next_min)
3112 				 - wi::to_wide (max ? max : min));
3113 	  if (wi::eq_p (difference, 1))
3114 	    max = next_max ? next_max : next_min;
3115 	  else
3116 	    break;
3117 	}
3118       idx--;
3119 
3120       if (max == NULL_TREE)
3121 	{
3122 	  /* Register the assertion OP != MIN.  */
3123 	  auto_vec<assert_info, 8> asserts;
3124 	  min = fold_convert (TREE_TYPE (op), min);
3125 	  register_edge_assert_for (op, default_edge, NE_EXPR, op, min,
3126 				    asserts);
3127 	  finish_register_edge_assert_for (default_edge, bsi, asserts);
3128 	}
3129       else
3130 	{
3131 	  /* Register the assertion (unsigned)OP - MIN > (MAX - MIN),
3132 	     which will give OP the anti-range ~[MIN,MAX].  */
3133 	  tree uop = fold_convert (unsigned_type_for (TREE_TYPE (op)), op);
3134 	  min = fold_convert (TREE_TYPE (uop), min);
3135 	  max = fold_convert (TREE_TYPE (uop), max);
3136 
3137 	  tree lhs = fold_build2 (MINUS_EXPR, TREE_TYPE (uop), uop, min);
3138 	  tree rhs = int_const_binop (MINUS_EXPR, max, min);
3139 	  register_new_assert_for (op, lhs, GT_EXPR, rhs,
3140 				   NULL, default_edge, bsi);
3141 	}
3142 
3143       if (--insertion_limit == 0)
3144 	break;
3145     }
3146 }
3147 
3148 /* Traverse all the statements in block BB looking for statements that
3149    may generate useful assertions for the SSA names in their operand.
3150    If a statement produces a useful assertion A for name N_i, then the
3151    list of assertions already generated for N_i is scanned to
3152    determine if A is actually needed.
3153 
3154    If N_i already had the assertion A at a location dominating the
3155    current location, then nothing needs to be done.  Otherwise, the
3156    new location for A is recorded instead.
3157 
3158    1- For every statement S in BB, all the variables used by S are
3159       added to bitmap FOUND_IN_SUBGRAPH.
3160 
3161    2- If statement S uses an operand N in a way that exposes a known
3162       value range for N, then if N was not already generated by an
3163       ASSERT_EXPR, create a new assert location for N.  For instance,
3164       if N is a pointer and the statement dereferences it, we can
3165       assume that N is not NULL.
3166 
3167    3- COND_EXPRs are a special case of #2.  We can derive range
3168       information from the predicate but need to insert different
3169       ASSERT_EXPRs for each of the sub-graphs rooted at the
3170       conditional block.  If the last statement of BB is a conditional
3171       expression of the form 'X op Y', then
3172 
3173       a) Remove X and Y from the set FOUND_IN_SUBGRAPH.
3174 
3175       b) If the conditional is the only entry point to the sub-graph
3176 	 corresponding to the THEN_CLAUSE, recurse into it.  On
3177 	 return, if X and/or Y are marked in FOUND_IN_SUBGRAPH, then
3178 	 an ASSERT_EXPR is added for the corresponding variable.
3179 
3180       c) Repeat step (b) on the ELSE_CLAUSE.
3181 
3182       d) Mark X and Y in FOUND_IN_SUBGRAPH.
3183 
3184       For instance,
3185 
3186 	    if (a == 9)
3187 	      b = a;
3188 	    else
3189 	      b = c + 1;
3190 
3191       In this case, an assertion on the THEN clause is useful to
3192       determine that 'a' is always 9 on that edge.  However, an assertion
3193       on the ELSE clause would be unnecessary.
3194 
3195    4- If BB does not end in a conditional expression, then we recurse
3196       into BB's dominator children.
3197 
3198    At the end of the recursive traversal, every SSA name will have a
3199    list of locations where ASSERT_EXPRs should be added.  When a new
3200    location for name N is found, it is registered by calling
3201    register_new_assert_for.  That function keeps track of all the
3202    registered assertions to prevent adding unnecessary assertions.
3203    For instance, if a pointer P_4 is dereferenced more than once in a
3204    dominator tree, only the location dominating all the dereference of
3205    P_4 will receive an ASSERT_EXPR.  */
3206 
3207 void
find_assert_locations_in_bb(basic_block bb)3208 vrp_asserts::find_assert_locations_in_bb (basic_block bb)
3209 {
3210   gimple *last;
3211 
3212   last = last_stmt (bb);
3213 
3214   /* If BB's last statement is a conditional statement involving integer
3215      operands, determine if we need to add ASSERT_EXPRs.  */
3216   if (last
3217       && gimple_code (last) == GIMPLE_COND
3218       && !fp_predicate (last)
3219       && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
3220     find_conditional_asserts (bb, as_a <gcond *> (last));
3221 
3222   /* If BB's last statement is a switch statement involving integer
3223      operands, determine if we need to add ASSERT_EXPRs.  */
3224   if (last
3225       && gimple_code (last) == GIMPLE_SWITCH
3226       && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
3227     find_switch_asserts (bb, as_a <gswitch *> (last));
3228 
3229   /* Traverse all the statements in BB marking used names and looking
3230      for statements that may infer assertions for their used operands.  */
3231   for (gimple_stmt_iterator si = gsi_last_bb (bb); !gsi_end_p (si);
3232        gsi_prev (&si))
3233     {
3234       gimple *stmt;
3235       tree op;
3236       ssa_op_iter i;
3237 
3238       stmt = gsi_stmt (si);
3239 
3240       if (is_gimple_debug (stmt))
3241 	continue;
3242 
3243       /* See if we can derive an assertion for any of STMT's operands.  */
3244       FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
3245 	{
3246 	  tree value;
3247 	  enum tree_code comp_code;
3248 
3249 	  /* If op is not live beyond this stmt, do not bother to insert
3250 	     asserts for it.  */
3251 	  if (!live.live_on_block_p (op, bb))
3252 	    continue;
3253 
3254 	  /* If OP is used in such a way that we can infer a value
3255 	     range for it, and we don't find a previous assertion for
3256 	     it, create a new assertion location node for OP.  */
3257 	  if (infer_value_range (stmt, op, &comp_code, &value))
3258 	    {
3259 	      /* If we are able to infer a nonzero value range for OP,
3260 		 then walk backwards through the use-def chain to see if OP
3261 		 was set via a typecast.
3262 
3263 		 If so, then we can also infer a nonzero value range
3264 		 for the operand of the NOP_EXPR.  */
3265 	      if (comp_code == NE_EXPR && integer_zerop (value))
3266 		{
3267 		  tree t = op;
3268 		  gimple *def_stmt = SSA_NAME_DEF_STMT (t);
3269 
3270 		  while (is_gimple_assign (def_stmt)
3271 			 && CONVERT_EXPR_CODE_P
3272 			     (gimple_assign_rhs_code (def_stmt))
3273 			 && TREE_CODE
3274 			     (gimple_assign_rhs1 (def_stmt)) == SSA_NAME
3275 			 && POINTER_TYPE_P
3276 			     (TREE_TYPE (gimple_assign_rhs1 (def_stmt))))
3277 		    {
3278 		      t = gimple_assign_rhs1 (def_stmt);
3279 		      def_stmt = SSA_NAME_DEF_STMT (t);
3280 
3281 		      /* Note we want to register the assert for the
3282 			 operand of the NOP_EXPR after SI, not after the
3283 			 conversion.  */
3284 		      if (live.live_on_block_p (t, bb))
3285 			register_new_assert_for (t, t, comp_code, value,
3286 						 bb, NULL, si);
3287 		    }
3288 		}
3289 
3290 	      register_new_assert_for (op, op, comp_code, value, bb, NULL, si);
3291 	    }
3292 	}
3293 
3294       /* Update live.  */
3295       FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
3296 	live.set (op, bb);
3297       FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_DEF)
3298 	live.clear (op, bb);
3299     }
3300 
3301   /* Traverse all PHI nodes in BB, updating live.  */
3302   for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
3303        gsi_next (&si))
3304     {
3305       use_operand_p arg_p;
3306       ssa_op_iter i;
3307       gphi *phi = si.phi ();
3308       tree res = gimple_phi_result (phi);
3309 
3310       if (virtual_operand_p (res))
3311 	continue;
3312 
3313       FOR_EACH_PHI_ARG (arg_p, phi, i, SSA_OP_USE)
3314 	{
3315 	  tree arg = USE_FROM_PTR (arg_p);
3316 	  if (TREE_CODE (arg) == SSA_NAME)
3317 	    live.set (arg, bb);
3318 	}
3319 
3320       live.clear (res, bb);
3321     }
3322 }
3323 
3324 /* Do an RPO walk over the function computing SSA name liveness
3325    on-the-fly and deciding on assert expressions to insert.  */
3326 
3327 void
find_assert_locations(void)3328 vrp_asserts::find_assert_locations (void)
3329 {
3330   int *rpo = XNEWVEC (int, last_basic_block_for_fn (fun));
3331   int *bb_rpo = XNEWVEC (int, last_basic_block_for_fn (fun));
3332   int *last_rpo = XCNEWVEC (int, last_basic_block_for_fn (fun));
3333   int rpo_cnt, i;
3334 
3335   rpo_cnt = pre_and_rev_post_order_compute (NULL, rpo, false);
3336   for (i = 0; i < rpo_cnt; ++i)
3337     bb_rpo[rpo[i]] = i;
3338 
3339   /* Pre-seed loop latch liveness from loop header PHI nodes.  Due to
3340      the order we compute liveness and insert asserts we otherwise
3341      fail to insert asserts into the loop latch.  */
3342   loop_p loop;
3343   FOR_EACH_LOOP (loop, 0)
3344     {
3345       i = loop->latch->index;
3346       unsigned int j = single_succ_edge (loop->latch)->dest_idx;
3347       for (gphi_iterator gsi = gsi_start_phis (loop->header);
3348 	   !gsi_end_p (gsi); gsi_next (&gsi))
3349 	{
3350 	  gphi *phi = gsi.phi ();
3351 	  if (virtual_operand_p (gimple_phi_result (phi)))
3352 	    continue;
3353 	  tree arg = gimple_phi_arg_def (phi, j);
3354 	  if (TREE_CODE (arg) == SSA_NAME)
3355 	    live.set (arg, loop->latch);
3356 	}
3357     }
3358 
3359   for (i = rpo_cnt - 1; i >= 0; --i)
3360     {
3361       basic_block bb = BASIC_BLOCK_FOR_FN (fun, rpo[i]);
3362       edge e;
3363       edge_iterator ei;
3364 
3365       /* Process BB and update the live information with uses in
3366          this block.  */
3367       find_assert_locations_in_bb (bb);
3368 
3369       /* Merge liveness into the predecessor blocks and free it.  */
3370       if (!live.block_has_live_names_p (bb))
3371 	{
3372 	  int pred_rpo = i;
3373 	  FOR_EACH_EDGE (e, ei, bb->preds)
3374 	    {
3375 	      int pred = e->src->index;
3376 	      if ((e->flags & EDGE_DFS_BACK) || pred == ENTRY_BLOCK)
3377 		continue;
3378 
3379 	      live.merge (e->src, bb);
3380 
3381 	      if (bb_rpo[pred] < pred_rpo)
3382 		pred_rpo = bb_rpo[pred];
3383 	    }
3384 
3385 	  /* Record the RPO number of the last visited block that needs
3386 	     live information from this block.  */
3387 	  last_rpo[rpo[i]] = pred_rpo;
3388 	}
3389       else
3390 	live.clear_block (bb);
3391 
3392       /* We can free all successors live bitmaps if all their
3393          predecessors have been visited already.  */
3394       FOR_EACH_EDGE (e, ei, bb->succs)
3395 	if (last_rpo[e->dest->index] == i)
3396 	  live.clear_block (e->dest);
3397     }
3398 
3399   XDELETEVEC (rpo);
3400   XDELETEVEC (bb_rpo);
3401   XDELETEVEC (last_rpo);
3402 }
3403 
3404 /* Create an ASSERT_EXPR for NAME and insert it in the location
3405    indicated by LOC.  Return true if we made any edge insertions.  */
3406 
3407 bool
process_assert_insertions_for(tree name,assert_locus * loc)3408 vrp_asserts::process_assert_insertions_for (tree name, assert_locus *loc)
3409 {
3410   /* Build the comparison expression NAME_i COMP_CODE VAL.  */
3411   gimple *stmt;
3412   tree cond;
3413   gimple *assert_stmt;
3414   edge_iterator ei;
3415   edge e;
3416 
3417   /* If we have X <=> X do not insert an assert expr for that.  */
3418   if (loc->expr == loc->val)
3419     return false;
3420 
3421   cond = build2 (loc->comp_code, boolean_type_node, loc->expr, loc->val);
3422   assert_stmt = build_assert_expr_for (cond, name);
3423   if (loc->e)
3424     {
3425       /* We have been asked to insert the assertion on an edge.  This
3426 	 is used only by COND_EXPR and SWITCH_EXPR assertions.  */
3427       gcc_checking_assert (gimple_code (gsi_stmt (loc->si)) == GIMPLE_COND
3428 			   || (gimple_code (gsi_stmt (loc->si))
3429 			       == GIMPLE_SWITCH));
3430 
3431       gsi_insert_on_edge (loc->e, assert_stmt);
3432       return true;
3433     }
3434 
3435   /* If the stmt iterator points at the end then this is an insertion
3436      at the beginning of a block.  */
3437   if (gsi_end_p (loc->si))
3438     {
3439       gimple_stmt_iterator si = gsi_after_labels (loc->bb);
3440       gsi_insert_before (&si, assert_stmt, GSI_SAME_STMT);
3441       return false;
3442 
3443     }
3444   /* Otherwise, we can insert right after LOC->SI iff the
3445      statement must not be the last statement in the block.  */
3446   stmt = gsi_stmt (loc->si);
3447   if (!stmt_ends_bb_p (stmt))
3448     {
3449       gsi_insert_after (&loc->si, assert_stmt, GSI_SAME_STMT);
3450       return false;
3451     }
3452 
3453   /* If STMT must be the last statement in BB, we can only insert new
3454      assertions on the non-abnormal edge out of BB.  Note that since
3455      STMT is not control flow, there may only be one non-abnormal/eh edge
3456      out of BB.  */
3457   FOR_EACH_EDGE (e, ei, loc->bb->succs)
3458     if (!(e->flags & (EDGE_ABNORMAL|EDGE_EH)))
3459       {
3460 	gsi_insert_on_edge (e, assert_stmt);
3461 	return true;
3462       }
3463 
3464   gcc_unreachable ();
3465 }
3466 
3467 /* Qsort helper for sorting assert locations.  If stable is true, don't
3468    use iterative_hash_expr because it can be unstable for -fcompare-debug,
3469    on the other side some pointers might be NULL.  */
3470 
3471 template <bool stable>
3472 int
compare_assert_loc(const void * pa,const void * pb)3473 vrp_asserts::compare_assert_loc (const void *pa, const void *pb)
3474 {
3475   assert_locus * const a = *(assert_locus * const *)pa;
3476   assert_locus * const b = *(assert_locus * const *)pb;
3477 
3478   /* If stable, some asserts might be optimized away already, sort
3479      them last.  */
3480   if (stable)
3481     {
3482       if (a == NULL)
3483 	return b != NULL;
3484       else if (b == NULL)
3485 	return -1;
3486     }
3487 
3488   if (a->e == NULL && b->e != NULL)
3489     return 1;
3490   else if (a->e != NULL && b->e == NULL)
3491     return -1;
3492 
3493   /* After the above checks, we know that (a->e == NULL) == (b->e == NULL),
3494      no need to test both a->e and b->e.  */
3495 
3496   /* Sort after destination index.  */
3497   if (a->e == NULL)
3498     ;
3499   else if (a->e->dest->index > b->e->dest->index)
3500     return 1;
3501   else if (a->e->dest->index < b->e->dest->index)
3502     return -1;
3503 
3504   /* Sort after comp_code.  */
3505   if (a->comp_code > b->comp_code)
3506     return 1;
3507   else if (a->comp_code < b->comp_code)
3508     return -1;
3509 
3510   hashval_t ha, hb;
3511 
3512   /* E.g. if a->val is ADDR_EXPR of a VAR_DECL, iterative_hash_expr
3513      uses DECL_UID of the VAR_DECL, so sorting might differ between
3514      -g and -g0.  When doing the removal of redundant assert exprs
3515      and commonization to successors, this does not matter, but for
3516      the final sort needs to be stable.  */
3517   if (stable)
3518     {
3519       ha = 0;
3520       hb = 0;
3521     }
3522   else
3523     {
3524       ha = iterative_hash_expr (a->expr, iterative_hash_expr (a->val, 0));
3525       hb = iterative_hash_expr (b->expr, iterative_hash_expr (b->val, 0));
3526     }
3527 
3528   /* Break the tie using hashing and source/bb index.  */
3529   if (ha == hb)
3530     return (a->e != NULL
3531 	    ? a->e->src->index - b->e->src->index
3532 	    : a->bb->index - b->bb->index);
3533   return ha > hb ? 1 : -1;
3534 }
3535 
3536 /* Process all the insertions registered for every name N_i registered
3537    in NEED_ASSERT_FOR.  The list of assertions to be inserted are
3538    found in ASSERTS_FOR[i].  */
3539 
3540 void
process_assert_insertions()3541 vrp_asserts::process_assert_insertions ()
3542 {
3543   unsigned i;
3544   bitmap_iterator bi;
3545   bool update_edges_p = false;
3546   int num_asserts = 0;
3547 
3548   if (dump_file && (dump_flags & TDF_DETAILS))
3549     dump (dump_file);
3550 
3551   EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
3552     {
3553       assert_locus *loc = asserts_for[i];
3554       gcc_assert (loc);
3555 
3556       auto_vec<assert_locus *, 16> asserts;
3557       for (; loc; loc = loc->next)
3558 	asserts.safe_push (loc);
3559       asserts.qsort (compare_assert_loc<false>);
3560 
3561       /* Push down common asserts to successors and remove redundant ones.  */
3562       unsigned ecnt = 0;
3563       assert_locus *common = NULL;
3564       unsigned commonj = 0;
3565       for (unsigned j = 0; j < asserts.length (); ++j)
3566 	{
3567 	  loc = asserts[j];
3568 	  if (! loc->e)
3569 	    common = NULL;
3570 	  else if (! common
3571 		   || loc->e->dest != common->e->dest
3572 		   || loc->comp_code != common->comp_code
3573 		   || ! operand_equal_p (loc->val, common->val, 0)
3574 		   || ! operand_equal_p (loc->expr, common->expr, 0))
3575 	    {
3576 	      commonj = j;
3577 	      common = loc;
3578 	      ecnt = 1;
3579 	    }
3580 	  else if (loc->e == asserts[j-1]->e)
3581 	    {
3582 	      /* Remove duplicate asserts.  */
3583 	      if (commonj == j - 1)
3584 		{
3585 		  commonj = j;
3586 		  common = loc;
3587 		}
3588 	      free (asserts[j-1]);
3589 	      asserts[j-1] = NULL;
3590 	    }
3591 	  else
3592 	    {
3593 	      ecnt++;
3594 	      if (EDGE_COUNT (common->e->dest->preds) == ecnt)
3595 		{
3596 		  /* We have the same assertion on all incoming edges of a BB.
3597 		     Insert it at the beginning of that block.  */
3598 		  loc->bb = loc->e->dest;
3599 		  loc->e = NULL;
3600 		  loc->si = gsi_none ();
3601 		  common = NULL;
3602 		  /* Clear asserts commoned.  */
3603 		  for (; commonj != j; ++commonj)
3604 		    if (asserts[commonj])
3605 		      {
3606 			free (asserts[commonj]);
3607 			asserts[commonj] = NULL;
3608 		      }
3609 		}
3610 	    }
3611 	}
3612 
3613       /* The asserts vector sorting above might be unstable for
3614 	 -fcompare-debug, sort again to ensure a stable sort.  */
3615       asserts.qsort (compare_assert_loc<true>);
3616       for (unsigned j = 0; j < asserts.length (); ++j)
3617 	{
3618 	  loc = asserts[j];
3619 	  if (! loc)
3620 	    break;
3621 	  update_edges_p |= process_assert_insertions_for (ssa_name (i), loc);
3622 	  num_asserts++;
3623 	  free (loc);
3624 	}
3625     }
3626 
3627   if (update_edges_p)
3628     gsi_commit_edge_inserts ();
3629 
3630   statistics_counter_event (fun, "Number of ASSERT_EXPR expressions inserted",
3631 			    num_asserts);
3632 }
3633 
3634 /* Traverse the flowgraph looking for conditional jumps to insert range
3635    expressions.  These range expressions are meant to provide information
3636    to optimizations that need to reason in terms of value ranges.  They
3637    will not be expanded into RTL.  For instance, given:
3638 
3639    x = ...
3640    y = ...
3641    if (x < y)
3642      y = x - 2;
3643    else
3644      x = y + 3;
3645 
3646    this pass will transform the code into:
3647 
3648    x = ...
3649    y = ...
3650    if (x < y)
3651     {
3652       x = ASSERT_EXPR <x, x < y>
3653       y = x - 2
3654     }
3655    else
3656     {
3657       y = ASSERT_EXPR <y, x >= y>
3658       x = y + 3
3659     }
3660 
3661    The idea is that once copy and constant propagation have run, other
3662    optimizations will be able to determine what ranges of values can 'x'
3663    take in different paths of the code, simply by checking the reaching
3664    definition of 'x'.  */
3665 
3666 void
insert_range_assertions(void)3667 vrp_asserts::insert_range_assertions (void)
3668 {
3669   need_assert_for = BITMAP_ALLOC (NULL);
3670   asserts_for = XCNEWVEC (assert_locus *, num_ssa_names);
3671 
3672   calculate_dominance_info (CDI_DOMINATORS);
3673 
3674   find_assert_locations ();
3675   if (!bitmap_empty_p (need_assert_for))
3676     {
3677       process_assert_insertions ();
3678       update_ssa (TODO_update_ssa_no_phi);
3679     }
3680 
3681   if (dump_file && (dump_flags & TDF_DETAILS))
3682     {
3683       fprintf (dump_file, "\nSSA form after inserting ASSERT_EXPRs\n");
3684       dump_function_to_file (current_function_decl, dump_file, dump_flags);
3685     }
3686 
3687   free (asserts_for);
3688   BITMAP_FREE (need_assert_for);
3689 }
3690 
3691 /* Return true if all imm uses of VAR are either in STMT, or
3692    feed (optionally through a chain of single imm uses) GIMPLE_COND
3693    in basic block COND_BB.  */
3694 
3695 bool
all_imm_uses_in_stmt_or_feed_cond(tree var,gimple * stmt,basic_block cond_bb)3696 vrp_asserts::all_imm_uses_in_stmt_or_feed_cond (tree var,
3697 						gimple *stmt,
3698 						basic_block cond_bb)
3699 {
3700   use_operand_p use_p, use2_p;
3701   imm_use_iterator iter;
3702 
3703   FOR_EACH_IMM_USE_FAST (use_p, iter, var)
3704     if (USE_STMT (use_p) != stmt)
3705       {
3706 	gimple *use_stmt = USE_STMT (use_p), *use_stmt2;
3707 	if (is_gimple_debug (use_stmt))
3708 	  continue;
3709 	while (is_gimple_assign (use_stmt)
3710 	       && TREE_CODE (gimple_assign_lhs (use_stmt)) == SSA_NAME
3711 	       && single_imm_use (gimple_assign_lhs (use_stmt),
3712 				  &use2_p, &use_stmt2))
3713 	  use_stmt = use_stmt2;
3714 	if (gimple_code (use_stmt) != GIMPLE_COND
3715 	    || gimple_bb (use_stmt) != cond_bb)
3716 	  return false;
3717       }
3718   return true;
3719 }
3720 
3721 /* Convert range assertion expressions into the implied copies and
3722    copy propagate away the copies.  Doing the trivial copy propagation
3723    here avoids the need to run the full copy propagation pass after
3724    VRP.
3725 
3726    FIXME, this will eventually lead to copy propagation removing the
3727    names that had useful range information attached to them.  For
3728    instance, if we had the assertion N_i = ASSERT_EXPR <N_j, N_j > 3>,
3729    then N_i will have the range [3, +INF].
3730 
3731    However, by converting the assertion into the implied copy
3732    operation N_i = N_j, we will then copy-propagate N_j into the uses
3733    of N_i and lose the range information.  We may want to hold on to
3734    ASSERT_EXPRs a little while longer as the ranges could be used in
3735    things like jump threading.
3736 
3737    The problem with keeping ASSERT_EXPRs around is that passes after
3738    VRP need to handle them appropriately.
3739 
3740    Another approach would be to make the range information a first
3741    class property of the SSA_NAME so that it can be queried from
3742    any pass.  This is made somewhat more complex by the need for
3743    multiple ranges to be associated with one SSA_NAME.  */
3744 
3745 void
remove_range_assertions()3746 vrp_asserts::remove_range_assertions ()
3747 {
3748   basic_block bb;
3749   gimple_stmt_iterator si;
3750   /* 1 if looking at ASSERT_EXPRs immediately at the beginning of
3751      a basic block preceeded by GIMPLE_COND branching to it and
3752      __builtin_trap, -1 if not yet checked, 0 otherwise.  */
3753   int is_unreachable;
3754 
3755   /* Note that the BSI iterator bump happens at the bottom of the
3756      loop and no bump is necessary if we're removing the statement
3757      referenced by the current BSI.  */
3758   FOR_EACH_BB_FN (bb, fun)
3759     for (si = gsi_after_labels (bb), is_unreachable = -1; !gsi_end_p (si);)
3760       {
3761 	gimple *stmt = gsi_stmt (si);
3762 
3763 	if (is_gimple_assign (stmt)
3764 	    && gimple_assign_rhs_code (stmt) == ASSERT_EXPR)
3765 	  {
3766 	    tree lhs = gimple_assign_lhs (stmt);
3767 	    tree rhs = gimple_assign_rhs1 (stmt);
3768 	    tree var;
3769 
3770 	    var = ASSERT_EXPR_VAR (rhs);
3771 
3772 	    if (TREE_CODE (var) == SSA_NAME
3773 		&& !POINTER_TYPE_P (TREE_TYPE (lhs))
3774 		&& SSA_NAME_RANGE_INFO (lhs))
3775 	      {
3776 		if (is_unreachable == -1)
3777 		  {
3778 		    is_unreachable = 0;
3779 		    if (single_pred_p (bb)
3780 			&& assert_unreachable_fallthru_edge_p
3781 						    (single_pred_edge (bb)))
3782 		      is_unreachable = 1;
3783 		  }
3784 		/* Handle
3785 		   if (x_7 >= 10 && x_7 < 20)
3786 		     __builtin_unreachable ();
3787 		   x_8 = ASSERT_EXPR <x_7, ...>;
3788 		   if the only uses of x_7 are in the ASSERT_EXPR and
3789 		   in the condition.  In that case, we can copy the
3790 		   range info from x_8 computed in this pass also
3791 		   for x_7.  */
3792 		if (is_unreachable
3793 		    && all_imm_uses_in_stmt_or_feed_cond (var, stmt,
3794 							  single_pred (bb)))
3795 		  {
3796 		    set_range_info (var, SSA_NAME_RANGE_TYPE (lhs),
3797 				    SSA_NAME_RANGE_INFO (lhs)->get_min (),
3798 				    SSA_NAME_RANGE_INFO (lhs)->get_max ());
3799 		    maybe_set_nonzero_bits (single_pred_edge (bb), var);
3800 		  }
3801 	      }
3802 
3803 	    /* Propagate the RHS into every use of the LHS.  For SSA names
3804 	       also propagate abnormals as it merely restores the original
3805 	       IL in this case (an replace_uses_by would assert).  */
3806 	    if (TREE_CODE (var) == SSA_NAME)
3807 	      {
3808 		imm_use_iterator iter;
3809 		use_operand_p use_p;
3810 		gimple *use_stmt;
3811 		FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
3812 		  FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
3813 		    SET_USE (use_p, var);
3814 	      }
3815 	    else
3816 	      replace_uses_by (lhs, var);
3817 
3818 	    /* And finally, remove the copy, it is not needed.  */
3819 	    gsi_remove (&si, true);
3820 	    release_defs (stmt);
3821 	  }
3822 	else
3823 	  {
3824 	    if (!is_gimple_debug (gsi_stmt (si)))
3825 	      is_unreachable = 0;
3826 	    gsi_next (&si);
3827 	  }
3828       }
3829 }
3830 
3831 class vrp_prop : public ssa_propagation_engine
3832 {
3833 public:
vrp_prop(vr_values * v)3834   vrp_prop (vr_values *v)
3835     : ssa_propagation_engine (),
3836       m_vr_values (v) { }
3837 
3838   void initialize (struct function *);
3839   void finalize ();
3840 
3841 private:
3842   enum ssa_prop_result visit_stmt (gimple *, edge *, tree *) FINAL OVERRIDE;
3843   enum ssa_prop_result visit_phi (gphi *) FINAL OVERRIDE;
3844 
3845   struct function *fun;
3846   vr_values *m_vr_values;
3847 };
3848 
3849 /* Initialization required by ssa_propagate engine.  */
3850 
3851 void
initialize(struct function * fn)3852 vrp_prop::initialize (struct function *fn)
3853 {
3854   basic_block bb;
3855   fun = fn;
3856 
3857   FOR_EACH_BB_FN (bb, fun)
3858     {
3859       for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
3860 	   gsi_next (&si))
3861 	{
3862 	  gphi *phi = si.phi ();
3863 	  if (!stmt_interesting_for_vrp (phi))
3864 	    {
3865 	      tree lhs = PHI_RESULT (phi);
3866 	      m_vr_values->set_def_to_varying (lhs);
3867 	      prop_set_simulate_again (phi, false);
3868 	    }
3869 	  else
3870 	    prop_set_simulate_again (phi, true);
3871 	}
3872 
3873       for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
3874 	   gsi_next (&si))
3875         {
3876 	  gimple *stmt = gsi_stmt (si);
3877 
3878  	  /* If the statement is a control insn, then we do not
3879  	     want to avoid simulating the statement once.  Failure
3880  	     to do so means that those edges will never get added.  */
3881 	  if (stmt_ends_bb_p (stmt))
3882 	    prop_set_simulate_again (stmt, true);
3883 	  else if (!stmt_interesting_for_vrp (stmt))
3884 	    {
3885 	      m_vr_values->set_defs_to_varying (stmt);
3886 	      prop_set_simulate_again (stmt, false);
3887 	    }
3888 	  else
3889 	    prop_set_simulate_again (stmt, true);
3890 	}
3891     }
3892 }
3893 
3894 /* Evaluate statement STMT.  If the statement produces a useful range,
3895    return SSA_PROP_INTERESTING and record the SSA name with the
3896    interesting range into *OUTPUT_P.
3897 
3898    If STMT is a conditional branch and we can determine its truth
3899    value, the taken edge is recorded in *TAKEN_EDGE_P.
3900 
3901    If STMT produces a varying value, return SSA_PROP_VARYING.  */
3902 
3903 enum ssa_prop_result
visit_stmt(gimple * stmt,edge * taken_edge_p,tree * output_p)3904 vrp_prop::visit_stmt (gimple *stmt, edge *taken_edge_p, tree *output_p)
3905 {
3906   tree lhs = gimple_get_lhs (stmt);
3907   value_range_equiv vr;
3908   m_vr_values->extract_range_from_stmt (stmt, taken_edge_p, output_p, &vr);
3909 
3910   if (*output_p)
3911     {
3912       if (m_vr_values->update_value_range (*output_p, &vr))
3913 	{
3914 	  if (dump_file && (dump_flags & TDF_DETAILS))
3915 	    {
3916 	      fprintf (dump_file, "Found new range for ");
3917 	      print_generic_expr (dump_file, *output_p);
3918 	      fprintf (dump_file, ": ");
3919 	      dump_value_range (dump_file, &vr);
3920 	      fprintf (dump_file, "\n");
3921 	    }
3922 
3923 	  if (vr.varying_p ())
3924 	    return SSA_PROP_VARYING;
3925 
3926 	  return SSA_PROP_INTERESTING;
3927 	}
3928       return SSA_PROP_NOT_INTERESTING;
3929     }
3930 
3931   if (is_gimple_call (stmt) && gimple_call_internal_p (stmt))
3932     switch (gimple_call_internal_fn (stmt))
3933       {
3934       case IFN_ADD_OVERFLOW:
3935       case IFN_SUB_OVERFLOW:
3936       case IFN_MUL_OVERFLOW:
3937       case IFN_ATOMIC_COMPARE_EXCHANGE:
3938 	/* These internal calls return _Complex integer type,
3939 	   which VRP does not track, but the immediate uses
3940 	   thereof might be interesting.  */
3941 	if (lhs && TREE_CODE (lhs) == SSA_NAME)
3942 	  {
3943 	    imm_use_iterator iter;
3944 	    use_operand_p use_p;
3945 	    enum ssa_prop_result res = SSA_PROP_VARYING;
3946 
3947 	    m_vr_values->set_def_to_varying (lhs);
3948 
3949 	    FOR_EACH_IMM_USE_FAST (use_p, iter, lhs)
3950 	      {
3951 		gimple *use_stmt = USE_STMT (use_p);
3952 		if (!is_gimple_assign (use_stmt))
3953 		  continue;
3954 		enum tree_code rhs_code = gimple_assign_rhs_code (use_stmt);
3955 		if (rhs_code != REALPART_EXPR && rhs_code != IMAGPART_EXPR)
3956 		  continue;
3957 		tree rhs1 = gimple_assign_rhs1 (use_stmt);
3958 		tree use_lhs = gimple_assign_lhs (use_stmt);
3959 		if (TREE_CODE (rhs1) != rhs_code
3960 		    || TREE_OPERAND (rhs1, 0) != lhs
3961 		    || TREE_CODE (use_lhs) != SSA_NAME
3962 		    || !stmt_interesting_for_vrp (use_stmt)
3963 		    || (!INTEGRAL_TYPE_P (TREE_TYPE (use_lhs))
3964 			|| !TYPE_MIN_VALUE (TREE_TYPE (use_lhs))
3965 			|| !TYPE_MAX_VALUE (TREE_TYPE (use_lhs))))
3966 		  continue;
3967 
3968 		/* If there is a change in the value range for any of the
3969 		   REALPART_EXPR/IMAGPART_EXPR immediate uses, return
3970 		   SSA_PROP_INTERESTING.  If there are any REALPART_EXPR
3971 		   or IMAGPART_EXPR immediate uses, but none of them have
3972 		   a change in their value ranges, return
3973 		   SSA_PROP_NOT_INTERESTING.  If there are no
3974 		   {REAL,IMAG}PART_EXPR uses at all,
3975 		   return SSA_PROP_VARYING.  */
3976 		value_range_equiv new_vr;
3977 		m_vr_values->extract_range_basic (&new_vr, use_stmt);
3978 		const value_range_equiv *old_vr
3979 		  = m_vr_values->get_value_range (use_lhs);
3980 		if (!old_vr->equal_p (new_vr, /*ignore_equivs=*/false))
3981 		  res = SSA_PROP_INTERESTING;
3982 		else
3983 		  res = SSA_PROP_NOT_INTERESTING;
3984 		new_vr.equiv_clear ();
3985 		if (res == SSA_PROP_INTERESTING)
3986 		  {
3987 		    *output_p = lhs;
3988 		    return res;
3989 		  }
3990 	      }
3991 
3992 	    return res;
3993 	  }
3994 	break;
3995       default:
3996 	break;
3997       }
3998 
3999   /* All other statements produce nothing of interest for VRP, so mark
4000      their outputs varying and prevent further simulation.  */
4001   m_vr_values->set_defs_to_varying (stmt);
4002 
4003   return (*taken_edge_p) ? SSA_PROP_INTERESTING : SSA_PROP_VARYING;
4004 }
4005 
4006 /* Visit all arguments for PHI node PHI that flow through executable
4007    edges.  If a valid value range can be derived from all the incoming
4008    value ranges, set a new range for the LHS of PHI.  */
4009 
4010 enum ssa_prop_result
visit_phi(gphi * phi)4011 vrp_prop::visit_phi (gphi *phi)
4012 {
4013   tree lhs = PHI_RESULT (phi);
4014   value_range_equiv vr_result;
4015   m_vr_values->extract_range_from_phi_node (phi, &vr_result);
4016   if (m_vr_values->update_value_range (lhs, &vr_result))
4017     {
4018       if (dump_file && (dump_flags & TDF_DETAILS))
4019 	{
4020 	  fprintf (dump_file, "Found new range for ");
4021 	  print_generic_expr (dump_file, lhs);
4022 	  fprintf (dump_file, ": ");
4023 	  dump_value_range (dump_file, &vr_result);
4024 	  fprintf (dump_file, "\n");
4025 	}
4026 
4027       if (vr_result.varying_p ())
4028 	return SSA_PROP_VARYING;
4029 
4030       return SSA_PROP_INTERESTING;
4031     }
4032 
4033   /* Nothing changed, don't add outgoing edges.  */
4034   return SSA_PROP_NOT_INTERESTING;
4035 }
4036 
4037 /* Traverse all the blocks folding conditionals with known ranges.  */
4038 
4039 void
finalize()4040 vrp_prop::finalize ()
4041 {
4042   size_t i;
4043 
4044   /* We have completed propagating through the lattice.  */
4045   m_vr_values->set_lattice_propagation_complete ();
4046 
4047   if (dump_file)
4048     {
4049       fprintf (dump_file, "\nValue ranges after VRP:\n\n");
4050       m_vr_values->dump_all_value_ranges (dump_file);
4051       fprintf (dump_file, "\n");
4052     }
4053 
4054   /* Set value range to non pointer SSA_NAMEs.  */
4055   for (i = 0; i < num_ssa_names; i++)
4056     {
4057       tree name = ssa_name (i);
4058       if (!name)
4059 	continue;
4060 
4061       const value_range_equiv *vr = m_vr_values->get_value_range (name);
4062       if (!name || !vr->constant_p ())
4063 	continue;
4064 
4065       if (POINTER_TYPE_P (TREE_TYPE (name))
4066 	  && range_includes_zero_p (vr) == 0)
4067 	set_ptr_nonnull (name);
4068       else if (!POINTER_TYPE_P (TREE_TYPE (name)))
4069 	set_range_info (name, *vr);
4070     }
4071 }
4072 
4073 class vrp_folder : public substitute_and_fold_engine
4074 {
4075  public:
vrp_folder(vr_values * v)4076   vrp_folder (vr_values *v)
4077     : substitute_and_fold_engine (/* Fold all stmts.  */ true),
4078       m_vr_values (v), simplifier (v)
4079     {  }
4080 
4081 private:
value_of_expr(tree name,gimple * stmt)4082   tree value_of_expr (tree name, gimple *stmt) OVERRIDE
4083     {
4084       return m_vr_values->value_of_expr (name, stmt);
4085     }
4086   bool fold_stmt (gimple_stmt_iterator *) FINAL OVERRIDE;
4087   bool fold_predicate_in (gimple_stmt_iterator *);
4088 
4089   vr_values *m_vr_values;
4090   simplify_using_ranges simplifier;
4091 };
4092 
4093 /* If the statement pointed by SI has a predicate whose value can be
4094    computed using the value range information computed by VRP, compute
4095    its value and return true.  Otherwise, return false.  */
4096 
4097 bool
fold_predicate_in(gimple_stmt_iterator * si)4098 vrp_folder::fold_predicate_in (gimple_stmt_iterator *si)
4099 {
4100   bool assignment_p = false;
4101   tree val;
4102   gimple *stmt = gsi_stmt (*si);
4103 
4104   if (is_gimple_assign (stmt)
4105       && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison)
4106     {
4107       assignment_p = true;
4108       val = simplifier.vrp_evaluate_conditional (gimple_assign_rhs_code (stmt),
4109 						 gimple_assign_rhs1 (stmt),
4110 						 gimple_assign_rhs2 (stmt),
4111 						 stmt);
4112     }
4113   else if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
4114     val = simplifier.vrp_evaluate_conditional (gimple_cond_code (cond_stmt),
4115 					       gimple_cond_lhs (cond_stmt),
4116 					       gimple_cond_rhs (cond_stmt),
4117 					       stmt);
4118   else
4119     return false;
4120 
4121   if (val)
4122     {
4123       if (assignment_p)
4124         val = fold_convert (gimple_expr_type (stmt), val);
4125 
4126       if (dump_file)
4127 	{
4128 	  fprintf (dump_file, "Folding predicate ");
4129 	  print_gimple_expr (dump_file, stmt, 0);
4130 	  fprintf (dump_file, " to ");
4131 	  print_generic_expr (dump_file, val);
4132 	  fprintf (dump_file, "\n");
4133 	}
4134 
4135       if (is_gimple_assign (stmt))
4136 	gimple_assign_set_rhs_from_tree (si, val);
4137       else
4138 	{
4139 	  gcc_assert (gimple_code (stmt) == GIMPLE_COND);
4140 	  gcond *cond_stmt = as_a <gcond *> (stmt);
4141 	  if (integer_zerop (val))
4142 	    gimple_cond_make_false (cond_stmt);
4143 	  else if (integer_onep (val))
4144 	    gimple_cond_make_true (cond_stmt);
4145 	  else
4146 	    gcc_unreachable ();
4147 	}
4148 
4149       return true;
4150     }
4151 
4152   return false;
4153 }
4154 
4155 /* Callback for substitute_and_fold folding the stmt at *SI.  */
4156 
4157 bool
fold_stmt(gimple_stmt_iterator * si)4158 vrp_folder::fold_stmt (gimple_stmt_iterator *si)
4159 {
4160   if (fold_predicate_in (si))
4161     return true;
4162 
4163   return simplifier.simplify (si);
4164 }
4165 
4166 /* Blocks which have more than one predecessor and more than
4167    one successor present jump threading opportunities, i.e.,
4168    when the block is reached from a specific predecessor, we
4169    may be able to determine which of the outgoing edges will
4170    be traversed.  When this optimization applies, we are able
4171    to avoid conditionals at runtime and we may expose secondary
4172    optimization opportunities.
4173 
4174    This class is effectively a driver for the generic jump
4175    threading code.  It basically just presents the generic code
4176    with edges that may be suitable for jump threading.
4177 
4178    Unlike DOM, we do not iterate VRP if jump threading was successful.
4179    While iterating may expose new opportunities for VRP, it is expected
4180    those opportunities would be very limited and the compile time cost
4181    to expose those opportunities would be significant.
4182 
4183    As jump threading opportunities are discovered, they are registered
4184    for later realization.  */
4185 
4186 class vrp_jump_threader : public dom_walker
4187 {
4188 public:
4189   vrp_jump_threader (struct function *, vr_values *);
4190   ~vrp_jump_threader ();
4191 
thread_jumps()4192   void thread_jumps ()
4193   {
4194     walk (m_fun->cfg->x_entry_block_ptr);
4195   }
4196 
4197 private:
4198   static tree simplify_stmt (gimple *stmt, gimple *within_stmt,
4199 			     avail_exprs_stack *, basic_block);
4200   virtual edge before_dom_children (basic_block);
4201   virtual void after_dom_children (basic_block);
4202 
4203   function *m_fun;
4204   vr_values *m_vr_values;
4205   const_and_copies *m_const_and_copies;
4206   avail_exprs_stack *m_avail_exprs_stack;
4207   hash_table<expr_elt_hasher> *m_avail_exprs;
4208   gcond *m_dummy_cond;
4209 };
4210 
vrp_jump_threader(struct function * fun,vr_values * v)4211 vrp_jump_threader::vrp_jump_threader (struct function *fun, vr_values *v)
4212   : dom_walker (CDI_DOMINATORS, REACHABLE_BLOCKS)
4213 {
4214   /* Ugh.  When substituting values earlier in this pass we can wipe
4215      the dominance information.  So rebuild the dominator information
4216      as we need it within the jump threading code.  */
4217   calculate_dominance_info (CDI_DOMINATORS);
4218 
4219   /* We do not allow VRP information to be used for jump threading
4220      across a back edge in the CFG.  Otherwise it becomes too
4221      difficult to avoid eliminating loop exit tests.  Of course
4222      EDGE_DFS_BACK is not accurate at this time so we have to
4223      recompute it.  */
4224   mark_dfs_back_edges ();
4225 
4226   /* Allocate our unwinder stack to unwind any temporary equivalences
4227      that might be recorded.  */
4228   m_const_and_copies = new const_and_copies ();
4229 
4230   m_dummy_cond = NULL;
4231   m_fun = fun;
4232   m_vr_values = v;
4233   m_avail_exprs = new hash_table<expr_elt_hasher> (1024);
4234   m_avail_exprs_stack = new avail_exprs_stack (m_avail_exprs);
4235 }
4236 
~vrp_jump_threader()4237 vrp_jump_threader::~vrp_jump_threader ()
4238 {
4239   /* We do not actually update the CFG or SSA graphs at this point as
4240      ASSERT_EXPRs are still in the IL and cfg cleanup code does not
4241      yet handle ASSERT_EXPRs gracefully.  */
4242   delete m_const_and_copies;
4243   delete m_avail_exprs;
4244   delete m_avail_exprs_stack;
4245 }
4246 
4247 /* Called before processing dominator children of BB.  We want to look
4248    at ASSERT_EXPRs and record information from them in the appropriate
4249    tables.
4250 
4251    We could look at other statements here.  It's not seen as likely
4252    to significantly increase the jump threads we discover.  */
4253 
4254 edge
before_dom_children(basic_block bb)4255 vrp_jump_threader::before_dom_children (basic_block bb)
4256 {
4257   gimple_stmt_iterator gsi;
4258 
4259   m_avail_exprs_stack->push_marker ();
4260   m_const_and_copies->push_marker ();
4261   for (gsi = gsi_start_nondebug_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4262     {
4263       gimple *stmt = gsi_stmt (gsi);
4264       if (gimple_assign_single_p (stmt)
4265          && TREE_CODE (gimple_assign_rhs1 (stmt)) == ASSERT_EXPR)
4266 	{
4267 	  tree rhs1 = gimple_assign_rhs1 (stmt);
4268 	  tree cond = TREE_OPERAND (rhs1, 1);
4269 	  tree inverted = invert_truthvalue (cond);
4270 	  vec<cond_equivalence> p;
4271 	  p.create (3);
4272 	  record_conditions (&p, cond, inverted);
4273 	  for (unsigned int i = 0; i < p.length (); i++)
4274 	    m_avail_exprs_stack->record_cond (&p[i]);
4275 
4276 	  tree lhs = gimple_assign_lhs (stmt);
4277 	  m_const_and_copies->record_const_or_copy (lhs,
4278 						    TREE_OPERAND (rhs1, 0));
4279 	  p.release ();
4280 	  continue;
4281 	}
4282       break;
4283     }
4284   return NULL;
4285 }
4286 
4287 /* A trivial wrapper so that we can present the generic jump threading
4288    code with a simple API for simplifying statements.  STMT is the
4289    statement we want to simplify, WITHIN_STMT provides the location
4290    for any overflow warnings.
4291 
4292    ?? This should be cleaned up.  There's a virtually identical copy
4293    of this function in tree-ssa-dom.c.  */
4294 
4295 tree
simplify_stmt(gimple * stmt,gimple * within_stmt,avail_exprs_stack * avail_exprs_stack,basic_block bb)4296 vrp_jump_threader::simplify_stmt (gimple *stmt,
4297 				  gimple *within_stmt,
4298 				  avail_exprs_stack *avail_exprs_stack,
4299 				  basic_block bb)
4300 {
4301   /* First see if the conditional is in the hash table.  */
4302   tree cached_lhs = avail_exprs_stack->lookup_avail_expr (stmt, false, true);
4303   if (cached_lhs && is_gimple_min_invariant (cached_lhs))
4304     return cached_lhs;
4305 
4306   class vr_values *vr_values = x_vr_values;
4307   if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
4308     {
4309       tree op0 = gimple_cond_lhs (cond_stmt);
4310       op0 = lhs_of_dominating_assert (op0, bb, stmt);
4311 
4312       tree op1 = gimple_cond_rhs (cond_stmt);
4313       op1 = lhs_of_dominating_assert (op1, bb, stmt);
4314 
4315       simplify_using_ranges simplifier (vr_values);
4316       return simplifier.vrp_evaluate_conditional (gimple_cond_code (cond_stmt),
4317 						  op0, op1, within_stmt);
4318     }
4319 
4320   if (gswitch *switch_stmt = dyn_cast <gswitch *> (stmt))
4321     {
4322       tree op = gimple_switch_index (switch_stmt);
4323       if (TREE_CODE (op) != SSA_NAME)
4324 	return NULL_TREE;
4325 
4326       op = lhs_of_dominating_assert (op, bb, stmt);
4327 
4328       const value_range_equiv *vr = vr_values->get_value_range (op);
4329       return find_case_label_range (switch_stmt, vr);
4330     }
4331 
4332   if (gassign *assign_stmt = dyn_cast <gassign *> (stmt))
4333     {
4334       tree lhs = gimple_assign_lhs (assign_stmt);
4335       if (TREE_CODE (lhs) == SSA_NAME
4336 	  && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
4337 	      || POINTER_TYPE_P (TREE_TYPE (lhs)))
4338 	  && stmt_interesting_for_vrp (stmt))
4339 	{
4340 	  edge dummy_e;
4341 	  tree dummy_tree;
4342 	  value_range_equiv new_vr;
4343 	  vr_values->extract_range_from_stmt (stmt, &dummy_e,
4344 					      &dummy_tree, &new_vr);
4345 	  tree singleton;
4346 	  if (new_vr.singleton_p (&singleton))
4347 	    return singleton;
4348 	}
4349     }
4350 
4351   return NULL_TREE;
4352 }
4353 
4354 /* Called after processing dominator children of BB.  This is where we
4355    actually call into the threader.  */
4356 void
after_dom_children(basic_block bb)4357 vrp_jump_threader::after_dom_children (basic_block bb)
4358 {
4359   if (!m_dummy_cond)
4360     m_dummy_cond = gimple_build_cond (NE_EXPR,
4361 				      integer_zero_node, integer_zero_node,
4362 				      NULL, NULL);
4363 
4364   x_vr_values = m_vr_values;
4365   thread_outgoing_edges (bb, m_dummy_cond, m_const_and_copies,
4366 			 m_avail_exprs_stack, NULL,
4367 			 simplify_stmt);
4368   x_vr_values = NULL;
4369 
4370   m_avail_exprs_stack->pop_to_marker ();
4371   m_const_and_copies->pop_to_marker ();
4372 }
4373 
4374 /* STMT is a conditional at the end of a basic block.
4375 
4376    If the conditional is of the form SSA_NAME op constant and the SSA_NAME
4377    was set via a type conversion, try to replace the SSA_NAME with the RHS
4378    of the type conversion.  Doing so makes the conversion dead which helps
4379    subsequent passes.  */
4380 
4381 static void
vrp_simplify_cond_using_ranges(vr_values * query,gcond * stmt)4382 vrp_simplify_cond_using_ranges (vr_values *query, gcond *stmt)
4383 {
4384   tree op0 = gimple_cond_lhs (stmt);
4385   tree op1 = gimple_cond_rhs (stmt);
4386 
4387   /* If we have a comparison of an SSA_NAME (OP0) against a constant,
4388      see if OP0 was set by a type conversion where the source of
4389      the conversion is another SSA_NAME with a range that fits
4390      into the range of OP0's type.
4391 
4392      If so, the conversion is redundant as the earlier SSA_NAME can be
4393      used for the comparison directly if we just massage the constant in the
4394      comparison.  */
4395   if (TREE_CODE (op0) == SSA_NAME
4396       && TREE_CODE (op1) == INTEGER_CST)
4397     {
4398       gimple *def_stmt = SSA_NAME_DEF_STMT (op0);
4399       tree innerop;
4400 
4401       if (!is_gimple_assign (def_stmt))
4402 	return;
4403 
4404       switch (gimple_assign_rhs_code (def_stmt))
4405 	{
4406 	CASE_CONVERT:
4407 	  innerop = gimple_assign_rhs1 (def_stmt);
4408 	  break;
4409 	case VIEW_CONVERT_EXPR:
4410 	  innerop = TREE_OPERAND (gimple_assign_rhs1 (def_stmt), 0);
4411 	  if (!INTEGRAL_TYPE_P (TREE_TYPE (innerop)))
4412 	    return;
4413 	  break;
4414 	default:
4415 	  return;
4416 	}
4417 
4418       if (TREE_CODE (innerop) == SSA_NAME
4419 	  && !POINTER_TYPE_P (TREE_TYPE (innerop))
4420 	  && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (innerop)
4421 	  && desired_pro_or_demotion_p (TREE_TYPE (innerop), TREE_TYPE (op0)))
4422 	{
4423 	  const value_range *vr = query->get_value_range (innerop);
4424 
4425 	  if (range_int_cst_p (vr)
4426 	      && range_fits_type_p (vr,
4427 				    TYPE_PRECISION (TREE_TYPE (op0)),
4428 				    TYPE_SIGN (TREE_TYPE (op0)))
4429 	      && int_fits_type_p (op1, TREE_TYPE (innerop)))
4430 	    {
4431 	      tree newconst = fold_convert (TREE_TYPE (innerop), op1);
4432 	      gimple_cond_set_lhs (stmt, innerop);
4433 	      gimple_cond_set_rhs (stmt, newconst);
4434 	      update_stmt (stmt);
4435 	      if (dump_file && (dump_flags & TDF_DETAILS))
4436 		{
4437 		  fprintf (dump_file, "Folded into: ");
4438 		  print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
4439 		  fprintf (dump_file, "\n");
4440 		}
4441 	    }
4442 	}
4443     }
4444 }
4445 
4446 /* Main entry point to VRP (Value Range Propagation).  This pass is
4447    loosely based on J. R. C. Patterson, ``Accurate Static Branch
4448    Prediction by Value Range Propagation,'' in SIGPLAN Conference on
4449    Programming Language Design and Implementation, pp. 67-78, 1995.
4450    Also available at http://citeseer.ist.psu.edu/patterson95accurate.html
4451 
4452    This is essentially an SSA-CCP pass modified to deal with ranges
4453    instead of constants.
4454 
4455    While propagating ranges, we may find that two or more SSA name
4456    have equivalent, though distinct ranges.  For instance,
4457 
4458      1	x_9 = p_3->a;
4459      2	p_4 = ASSERT_EXPR <p_3, p_3 != 0>
4460      3	if (p_4 == q_2)
4461      4	  p_5 = ASSERT_EXPR <p_4, p_4 == q_2>;
4462      5	endif
4463      6	if (q_2)
4464 
4465    In the code above, pointer p_5 has range [q_2, q_2], but from the
4466    code we can also determine that p_5 cannot be NULL and, if q_2 had
4467    a non-varying range, p_5's range should also be compatible with it.
4468 
4469    These equivalences are created by two expressions: ASSERT_EXPR and
4470    copy operations.  Since p_5 is an assertion on p_4, and p_4 was the
4471    result of another assertion, then we can use the fact that p_5 and
4472    p_4 are equivalent when evaluating p_5's range.
4473 
4474    Together with value ranges, we also propagate these equivalences
4475    between names so that we can take advantage of information from
4476    multiple ranges when doing final replacement.  Note that this
4477    equivalency relation is transitive but not symmetric.
4478 
4479    In the example above, p_5 is equivalent to p_4, q_2 and p_3, but we
4480    cannot assert that q_2 is equivalent to p_5 because q_2 may be used
4481    in contexts where that assertion does not hold (e.g., in line 6).
4482 
4483    TODO, the main difference between this pass and Patterson's is that
4484    we do not propagate edge probabilities.  We only compute whether
4485    edges can be taken or not.  That is, instead of having a spectrum
4486    of jump probabilities between 0 and 1, we only deal with 0, 1 and
4487    DON'T KNOW.  In the future, it may be worthwhile to propagate
4488    probabilities to aid branch prediction.  */
4489 
4490 static unsigned int
execute_vrp(struct function * fun,bool warn_array_bounds_p)4491 execute_vrp (struct function *fun, bool warn_array_bounds_p)
4492 {
4493   loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
4494   rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
4495   scev_initialize ();
4496 
4497   /* ???  This ends up using stale EDGE_DFS_BACK for liveness computation.
4498      Inserting assertions may split edges which will invalidate
4499      EDGE_DFS_BACK.  */
4500   vrp_asserts assert_engine (fun);
4501   assert_engine.insert_range_assertions ();
4502 
4503   threadedge_initialize_values ();
4504 
4505   /* For visiting PHI nodes we need EDGE_DFS_BACK computed.  */
4506   mark_dfs_back_edges ();
4507 
4508   vr_values vrp_vr_values;
4509 
4510   class vrp_prop vrp_prop (&vrp_vr_values);
4511   vrp_prop.initialize (fun);
4512   vrp_prop.ssa_propagate ();
4513 
4514   /* Instantiate the folder here, so that edge cleanups happen at the
4515      end of this function.  */
4516   vrp_folder folder (&vrp_vr_values);
4517   vrp_prop.finalize ();
4518 
4519   /* If we're checking array refs, we want to merge information on
4520      the executability of each edge between vrp_folder and the
4521      check_array_bounds_dom_walker: each can clear the
4522      EDGE_EXECUTABLE flag on edges, in different ways.
4523 
4524      Hence, if we're going to call check_all_array_refs, set
4525      the flag on every edge now, rather than in
4526      check_array_bounds_dom_walker's ctor; vrp_folder may clear
4527      it from some edges.  */
4528   if (warn_array_bounds && warn_array_bounds_p)
4529     set_all_edges_as_executable (fun);
4530 
4531   folder.substitute_and_fold ();
4532 
4533   if (warn_array_bounds && warn_array_bounds_p)
4534     {
4535       array_bounds_checker array_checker (fun, &vrp_vr_values);
4536       array_checker.check ();
4537     }
4538 
4539   /* We must identify jump threading opportunities before we release
4540      the datastructures built by VRP.  */
4541   vrp_jump_threader threader (fun, &vrp_vr_values);
4542   threader.thread_jumps ();
4543 
4544   /* A comparison of an SSA_NAME against a constant where the SSA_NAME
4545      was set by a type conversion can often be rewritten to use the
4546      RHS of the type conversion.
4547 
4548      However, doing so inhibits jump threading through the comparison.
4549      So that transformation is not performed until after jump threading
4550      is complete.  */
4551   basic_block bb;
4552   FOR_EACH_BB_FN (bb, fun)
4553     {
4554       gimple *last = last_stmt (bb);
4555       if (last && gimple_code (last) == GIMPLE_COND)
4556 	vrp_simplify_cond_using_ranges (&vrp_vr_values,
4557 					as_a <gcond *> (last));
4558     }
4559 
4560   free_numbers_of_iterations_estimates (fun);
4561 
4562   /* ASSERT_EXPRs must be removed before finalizing jump threads
4563      as finalizing jump threads calls the CFG cleanup code which
4564      does not properly handle ASSERT_EXPRs.  */
4565   assert_engine.remove_range_assertions ();
4566 
4567   /* If we exposed any new variables, go ahead and put them into
4568      SSA form now, before we handle jump threading.  This simplifies
4569      interactions between rewriting of _DECL nodes into SSA form
4570      and rewriting SSA_NAME nodes into SSA form after block
4571      duplication and CFG manipulation.  */
4572   update_ssa (TODO_update_ssa);
4573 
4574   /* We identified all the jump threading opportunities earlier, but could
4575      not transform the CFG at that time.  This routine transforms the
4576      CFG and arranges for the dominator tree to be rebuilt if necessary.
4577 
4578      Note the SSA graph update will occur during the normal TODO
4579      processing by the pass manager.  */
4580   thread_through_all_blocks (false);
4581 
4582   threadedge_finalize_values ();
4583 
4584   scev_finalize ();
4585   loop_optimizer_finalize ();
4586   return 0;
4587 }
4588 
4589 namespace {
4590 
4591 const pass_data pass_data_vrp =
4592 {
4593   GIMPLE_PASS, /* type */
4594   "vrp", /* name */
4595   OPTGROUP_NONE, /* optinfo_flags */
4596   TV_TREE_VRP, /* tv_id */
4597   PROP_ssa, /* properties_required */
4598   0, /* properties_provided */
4599   0, /* properties_destroyed */
4600   0, /* todo_flags_start */
4601   ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */
4602 };
4603 
4604 class pass_vrp : public gimple_opt_pass
4605 {
4606 public:
pass_vrp(gcc::context * ctxt)4607   pass_vrp (gcc::context *ctxt)
4608     : gimple_opt_pass (pass_data_vrp, ctxt), warn_array_bounds_p (false)
4609   {}
4610 
4611   /* opt_pass methods: */
clone()4612   opt_pass * clone () { return new pass_vrp (m_ctxt); }
set_pass_param(unsigned int n,bool param)4613   void set_pass_param (unsigned int n, bool param)
4614     {
4615       gcc_assert (n == 0);
4616       warn_array_bounds_p = param;
4617     }
gate(function *)4618   virtual bool gate (function *) { return flag_tree_vrp != 0; }
execute(function * fun)4619   virtual unsigned int execute (function *fun)
4620     { return execute_vrp (fun, warn_array_bounds_p); }
4621 
4622  private:
4623   bool warn_array_bounds_p;
4624 }; // class pass_vrp
4625 
4626 } // anon namespace
4627 
4628 gimple_opt_pass *
make_pass_vrp(gcc::context * ctxt)4629 make_pass_vrp (gcc::context *ctxt)
4630 {
4631   return new pass_vrp (ctxt);
4632 }
4633 
4634 
4635 /* Worker for determine_value_range.  */
4636 
4637 static void
determine_value_range_1(value_range * vr,tree expr)4638 determine_value_range_1 (value_range *vr, tree expr)
4639 {
4640   if (BINARY_CLASS_P (expr))
4641     {
4642       value_range vr0, vr1;
4643       determine_value_range_1 (&vr0, TREE_OPERAND (expr, 0));
4644       determine_value_range_1 (&vr1, TREE_OPERAND (expr, 1));
4645       range_fold_binary_expr (vr, TREE_CODE (expr), TREE_TYPE (expr),
4646 			      &vr0, &vr1);
4647     }
4648   else if (UNARY_CLASS_P (expr))
4649     {
4650       value_range vr0;
4651       determine_value_range_1 (&vr0, TREE_OPERAND (expr, 0));
4652       range_fold_unary_expr (vr, TREE_CODE (expr), TREE_TYPE (expr),
4653 			     &vr0, TREE_TYPE (TREE_OPERAND (expr, 0)));
4654     }
4655   else if (TREE_CODE (expr) == INTEGER_CST)
4656     vr->set (expr);
4657   else
4658     {
4659       value_range_kind kind;
4660       wide_int min, max;
4661       /* For SSA names try to extract range info computed by VRP.  Otherwise
4662 	 fall back to varying.  */
4663       if (TREE_CODE (expr) == SSA_NAME
4664 	  && INTEGRAL_TYPE_P (TREE_TYPE (expr))
4665 	  && (kind = get_range_info (expr, &min, &max)) != VR_VARYING)
4666 	vr->set (wide_int_to_tree (TREE_TYPE (expr), min),
4667 		 wide_int_to_tree (TREE_TYPE (expr), max),
4668 		 kind);
4669       else
4670 	vr->set_varying (TREE_TYPE (expr));
4671     }
4672 }
4673 
4674 /* Compute a value-range for EXPR and set it in *MIN and *MAX.  Return
4675    the determined range type.  */
4676 
4677 value_range_kind
determine_value_range(tree expr,wide_int * min,wide_int * max)4678 determine_value_range (tree expr, wide_int *min, wide_int *max)
4679 {
4680   value_range vr;
4681   determine_value_range_1 (&vr, expr);
4682   if (vr.constant_p ())
4683     {
4684       *min = wi::to_wide (vr.min ());
4685       *max = wi::to_wide (vr.max ());
4686       return vr.kind ();
4687     }
4688 
4689   return VR_VARYING;
4690 }
4691