1 /* Support routines for Value Range Propagation (VRP).
2    Copyright (C) 2005-2019 Free Software Foundation, Inc.
3    Contributed by Diego Novillo <dnovillo@redhat.com>.
4 
5 This file is part of GCC.
6 
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11 
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15 GNU General Public License for more details.
16 
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3.  If not see
19 <http://www.gnu.org/licenses/>.  */
20 
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "insn-codes.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "gimple.h"
29 #include "cfghooks.h"
30 #include "tree-pass.h"
31 #include "ssa.h"
32 #include "optabs-tree.h"
33 #include "gimple-pretty-print.h"
34 #include "diagnostic-core.h"
35 #include "flags.h"
36 #include "fold-const.h"
37 #include "stor-layout.h"
38 #include "calls.h"
39 #include "cfganal.h"
40 #include "gimple-fold.h"
41 #include "tree-eh.h"
42 #include "gimple-iterator.h"
43 #include "gimple-walk.h"
44 #include "tree-cfg.h"
45 #include "tree-dfa.h"
46 #include "tree-ssa-loop-manip.h"
47 #include "tree-ssa-loop-niter.h"
48 #include "tree-ssa-loop.h"
49 #include "tree-into-ssa.h"
50 #include "tree-ssa.h"
51 #include "intl.h"
52 #include "cfgloop.h"
53 #include "tree-scalar-evolution.h"
54 #include "tree-ssa-propagate.h"
55 #include "tree-chrec.h"
56 #include "tree-ssa-threadupdate.h"
57 #include "tree-ssa-scopedtables.h"
58 #include "tree-ssa-threadedge.h"
59 #include "omp-general.h"
60 #include "target.h"
61 #include "case-cfn-macros.h"
62 #include "params.h"
63 #include "alloc-pool.h"
64 #include "domwalk.h"
65 #include "tree-cfgcleanup.h"
66 #include "stringpool.h"
67 #include "attribs.h"
68 #include "vr-values.h"
69 #include "builtins.h"
70 #include "wide-int-range.h"
71 
72 /* Set of SSA names found live during the RPO traversal of the function
73    for still active basic-blocks.  */
74 static sbitmap *live;
75 
76 void
set(enum value_range_kind kind,tree min,tree max)77 value_range_base::set (enum value_range_kind kind, tree min, tree max)
78 {
79   m_kind = kind;
80   m_min = min;
81   m_max = max;
82   if (flag_checking)
83     check ();
84 }
85 
86 void
set_equiv(bitmap equiv)87 value_range::set_equiv (bitmap equiv)
88 {
89   /* Since updating the equivalence set involves deep copying the
90      bitmaps, only do it if absolutely necessary.
91 
92      All equivalence bitmaps are allocated from the same obstack.  So
93      we can use the obstack associated with EQUIV to allocate vr->equiv.  */
94   if (m_equiv == NULL
95       && equiv != NULL)
96     m_equiv = BITMAP_ALLOC (equiv->obstack);
97 
98   if (equiv != m_equiv)
99     {
100       if (equiv && !bitmap_empty_p (equiv))
101 	bitmap_copy (m_equiv, equiv);
102       else
103 	bitmap_clear (m_equiv);
104     }
105 }
106 
107 /* Initialize value_range.  */
108 
109 void
set(enum value_range_kind kind,tree min,tree max,bitmap equiv)110 value_range::set (enum value_range_kind kind, tree min, tree max,
111 		  bitmap equiv)
112 {
113   value_range_base::set (kind, min, max);
114   set_equiv (equiv);
115   if (flag_checking)
116     check ();
117 }
118 
value_range_base(value_range_kind kind,tree min,tree max)119 value_range_base::value_range_base (value_range_kind kind, tree min, tree max)
120 {
121   set (kind, min, max);
122 }
123 
value_range(value_range_kind kind,tree min,tree max,bitmap equiv)124 value_range::value_range (value_range_kind kind, tree min, tree max,
125 			  bitmap equiv)
126 {
127   m_equiv = NULL;
128   set (kind, min, max, equiv);
129 }
130 
value_range(const value_range_base & other)131 value_range::value_range (const value_range_base &other)
132 {
133   m_equiv = NULL;
134   set (other.kind (), other.min(), other.max (), NULL);
135 }
136 
137 /* Like set, but keep the equivalences in place.  */
138 
139 void
update(value_range_kind kind,tree min,tree max)140 value_range::update (value_range_kind kind, tree min, tree max)
141 {
142   set (kind, min, max,
143        (kind != VR_UNDEFINED && kind != VR_VARYING) ? m_equiv : NULL);
144 }
145 
146 /* Copy value_range in FROM into THIS while avoiding bitmap sharing.
147 
148    Note: The code that avoids the bitmap sharing looks at the existing
149    this->m_equiv, so this function cannot be used to initalize an
150    object.  Use the constructors for initialization.  */
151 
152 void
deep_copy(const value_range * from)153 value_range::deep_copy (const value_range *from)
154 {
155   set (from->m_kind, from->min (), from->max (), from->m_equiv);
156 }
157 
158 void
move(value_range * from)159 value_range::move (value_range *from)
160 {
161   set (from->m_kind, from->min (), from->max ());
162   m_equiv = from->m_equiv;
163   from->m_equiv = NULL;
164 }
165 
166 /* Check the validity of the range.  */
167 
168 void
check()169 value_range_base::check ()
170 {
171   switch (m_kind)
172     {
173     case VR_RANGE:
174     case VR_ANTI_RANGE:
175       {
176 	int cmp;
177 
178 	gcc_assert (m_min && m_max);
179 
180 	gcc_assert (!TREE_OVERFLOW_P (m_min) && !TREE_OVERFLOW_P (m_max));
181 
182 	/* Creating ~[-MIN, +MAX] is stupid because that would be
183 	   the empty set.  */
184 	if (INTEGRAL_TYPE_P (TREE_TYPE (m_min)) && m_kind == VR_ANTI_RANGE)
185 	  gcc_assert (!vrp_val_is_min (m_min) || !vrp_val_is_max (m_max));
186 
187 	cmp = compare_values (m_min, m_max);
188 	gcc_assert (cmp == 0 || cmp == -1 || cmp == -2);
189 	break;
190       }
191     case VR_UNDEFINED:
192     case VR_VARYING:
193       gcc_assert (!min () && !max ());
194       break;
195     default:
196       gcc_unreachable ();
197     }
198 }
199 
200 void
check()201 value_range::check ()
202 {
203   value_range_base::check ();
204   switch (m_kind)
205     {
206     case VR_UNDEFINED:
207     case VR_VARYING:
208       gcc_assert (!m_equiv || bitmap_empty_p (m_equiv));
209     default:;
210     }
211 }
212 
213 /* Equality operator.  We purposely do not overload ==, to avoid
214    confusion with the equality bitmap in the derived value_range
215    class.  */
216 
217 bool
equal_p(const value_range_base & other)218 value_range_base::equal_p (const value_range_base &other) const
219 {
220   return (m_kind == other.m_kind
221 	  && vrp_operand_equal_p (m_min, other.m_min)
222 	  && vrp_operand_equal_p (m_max, other.m_max));
223 }
224 
225 /* Returns TRUE if THIS == OTHER.  Ignores the equivalence bitmap if
226    IGNORE_EQUIVS is TRUE.  */
227 
228 bool
equal_p(const value_range & other,bool ignore_equivs)229 value_range::equal_p (const value_range &other, bool ignore_equivs) const
230 {
231   return (value_range_base::equal_p (other)
232 	  && (ignore_equivs
233 	      || vrp_bitmap_equal_p (m_equiv, other.m_equiv)));
234 }
235 
236 /* Return TRUE if this is a symbolic range.  */
237 
238 bool
symbolic_p()239 value_range_base::symbolic_p () const
240 {
241   return (!varying_p ()
242 	  && !undefined_p ()
243 	  && (!is_gimple_min_invariant (m_min)
244 	      || !is_gimple_min_invariant (m_max)));
245 }
246 
247 /* NOTE: This is not the inverse of symbolic_p because the range
248    could also be varying or undefined.  Ideally they should be inverse
249    of each other, with varying only applying to symbolics.  Varying of
250    constants would be represented as [-MIN, +MAX].  */
251 
252 bool
constant_p()253 value_range_base::constant_p () const
254 {
255   return (!varying_p ()
256 	  && !undefined_p ()
257 	  && TREE_CODE (m_min) == INTEGER_CST
258 	  && TREE_CODE (m_max) == INTEGER_CST);
259 }
260 
261 void
set_undefined()262 value_range_base::set_undefined ()
263 {
264   set (VR_UNDEFINED, NULL, NULL);
265 }
266 
267 void
set_undefined()268 value_range::set_undefined ()
269 {
270   set (VR_UNDEFINED, NULL, NULL, NULL);
271 }
272 
273 void
set_varying()274 value_range_base::set_varying ()
275 {
276   set (VR_VARYING, NULL, NULL);
277 }
278 
279 void
set_varying()280 value_range::set_varying ()
281 {
282   set (VR_VARYING, NULL, NULL, NULL);
283 }
284 
285 /* Return TRUE if it is possible that range contains VAL.  */
286 
287 bool
may_contain_p(tree val)288 value_range_base::may_contain_p (tree val) const
289 {
290   if (varying_p ())
291     return true;
292 
293   if (undefined_p ())
294     return true;
295 
296   if (m_kind == VR_ANTI_RANGE)
297     {
298       int res = value_inside_range (val, min (), max ());
299       return res == 0 || res == -2;
300     }
301   return value_inside_range (val, min (), max ()) != 0;
302 }
303 
304 void
equiv_clear()305 value_range::equiv_clear ()
306 {
307   if (m_equiv)
308     bitmap_clear (m_equiv);
309 }
310 
311 /* Add VAR and VAR's equivalence set (VAR_VR) to the equivalence
312    bitmap.  If no equivalence table has been created, OBSTACK is the
313    obstack to use (NULL for the default obstack).
314 
315    This is the central point where equivalence processing can be
316    turned on/off.  */
317 
318 void
equiv_add(const_tree var,const value_range * var_vr,bitmap_obstack * obstack)319 value_range::equiv_add (const_tree var,
320 			const value_range *var_vr,
321 			bitmap_obstack *obstack)
322 {
323   if (!m_equiv)
324     m_equiv = BITMAP_ALLOC (obstack);
325   unsigned ver = SSA_NAME_VERSION (var);
326   bitmap_set_bit (m_equiv, ver);
327   if (var_vr && var_vr->m_equiv)
328     bitmap_ior_into (m_equiv, var_vr->m_equiv);
329 }
330 
331 /* If range is a singleton, place it in RESULT and return TRUE.
332    Note: A singleton can be any gimple invariant, not just constants.
333    So, [&x, &x] counts as a singleton.  */
334 
335 bool
singleton_p(tree * result)336 value_range_base::singleton_p (tree *result) const
337 {
338   if (m_kind == VR_RANGE
339       && vrp_operand_equal_p (min (), max ())
340       && is_gimple_min_invariant (min ()))
341     {
342       if (result)
343         *result = min ();
344       return true;
345     }
346   return false;
347 }
348 
349 tree
type()350 value_range_base::type () const
351 {
352   /* Types are only valid for VR_RANGE and VR_ANTI_RANGE, which are
353      known to have non-zero min/max.  */
354   gcc_assert (min ());
355   return TREE_TYPE (min ());
356 }
357 
358 void
dump(FILE * file)359 value_range_base::dump (FILE *file) const
360 {
361   if (undefined_p ())
362     fprintf (file, "UNDEFINED");
363   else if (m_kind == VR_RANGE || m_kind == VR_ANTI_RANGE)
364     {
365       tree ttype = type ();
366 
367       print_generic_expr (file, ttype);
368       fprintf (file, " ");
369 
370       fprintf (file, "%s[", (m_kind == VR_ANTI_RANGE) ? "~" : "");
371 
372       if (INTEGRAL_TYPE_P (ttype)
373 	  && !TYPE_UNSIGNED (ttype)
374 	  && vrp_val_is_min (min ())
375 	  && TYPE_PRECISION (ttype) != 1)
376 	fprintf (file, "-INF");
377       else
378 	print_generic_expr (file, min ());
379 
380       fprintf (file, ", ");
381 
382       if (INTEGRAL_TYPE_P (ttype)
383 	  && vrp_val_is_max (max ())
384 	  && TYPE_PRECISION (ttype) != 1)
385 	fprintf (file, "+INF");
386       else
387 	print_generic_expr (file, max ());
388 
389       fprintf (file, "]");
390     }
391   else if (varying_p ())
392     fprintf (file, "VARYING");
393   else
394     gcc_unreachable ();
395 }
396 
397 void
dump(FILE * file)398 value_range::dump (FILE *file) const
399 {
400   value_range_base::dump (file);
401   if ((m_kind == VR_RANGE || m_kind == VR_ANTI_RANGE)
402       && m_equiv)
403     {
404       bitmap_iterator bi;
405       unsigned i, c = 0;
406 
407       fprintf (file, "  EQUIVALENCES: { ");
408 
409       EXECUTE_IF_SET_IN_BITMAP (m_equiv, 0, i, bi)
410 	{
411 	  print_generic_expr (file, ssa_name (i));
412 	  fprintf (file, " ");
413 	  c++;
414 	}
415 
416       fprintf (file, "} (%u elements)", c);
417     }
418 }
419 
420 void
dump_value_range(FILE * file,const value_range * vr)421 dump_value_range (FILE *file, const value_range *vr)
422 {
423   if (!vr)
424     fprintf (file, "[]");
425   else
426     vr->dump (file);
427 }
428 
429 void
dump_value_range(FILE * file,const value_range_base * vr)430 dump_value_range (FILE *file, const value_range_base *vr)
431 {
432   if (!vr)
433     fprintf (file, "[]");
434   else
435     vr->dump (file);
436 }
437 
438 DEBUG_FUNCTION void
debug(const value_range_base * vr)439 debug (const value_range_base *vr)
440 {
441   dump_value_range (stderr, vr);
442 }
443 
444 DEBUG_FUNCTION void
debug(const value_range_base & vr)445 debug (const value_range_base &vr)
446 {
447   dump_value_range (stderr, &vr);
448 }
449 
450 DEBUG_FUNCTION void
debug(const value_range * vr)451 debug (const value_range *vr)
452 {
453   dump_value_range (stderr, vr);
454 }
455 
456 DEBUG_FUNCTION void
debug(const value_range & vr)457 debug (const value_range &vr)
458 {
459   dump_value_range (stderr, &vr);
460 }
461 
462 /* Return true if the SSA name NAME is live on the edge E.  */
463 
464 static bool
live_on_edge(edge e,tree name)465 live_on_edge (edge e, tree name)
466 {
467   return (live[e->dest->index]
468 	  && bitmap_bit_p (live[e->dest->index], SSA_NAME_VERSION (name)));
469 }
470 
471 /* Location information for ASSERT_EXPRs.  Each instance of this
472    structure describes an ASSERT_EXPR for an SSA name.  Since a single
473    SSA name may have more than one assertion associated with it, these
474    locations are kept in a linked list attached to the corresponding
475    SSA name.  */
476 struct assert_locus
477 {
478   /* Basic block where the assertion would be inserted.  */
479   basic_block bb;
480 
481   /* Some assertions need to be inserted on an edge (e.g., assertions
482      generated by COND_EXPRs).  In those cases, BB will be NULL.  */
483   edge e;
484 
485   /* Pointer to the statement that generated this assertion.  */
486   gimple_stmt_iterator si;
487 
488   /* Predicate code for the ASSERT_EXPR.  Must be COMPARISON_CLASS_P.  */
489   enum tree_code comp_code;
490 
491   /* Value being compared against.  */
492   tree val;
493 
494   /* Expression to compare.  */
495   tree expr;
496 
497   /* Next node in the linked list.  */
498   assert_locus *next;
499 };
500 
501 /* If bit I is present, it means that SSA name N_i has a list of
502    assertions that should be inserted in the IL.  */
503 static bitmap need_assert_for;
504 
505 /* Array of locations lists where to insert assertions.  ASSERTS_FOR[I]
506    holds a list of ASSERT_LOCUS_T nodes that describe where
507    ASSERT_EXPRs for SSA name N_I should be inserted.  */
508 static assert_locus **asserts_for;
509 
510 /* Return the maximum value for TYPE.  */
511 
512 tree
vrp_val_max(const_tree type)513 vrp_val_max (const_tree type)
514 {
515   if (!INTEGRAL_TYPE_P (type))
516     return NULL_TREE;
517 
518   return TYPE_MAX_VALUE (type);
519 }
520 
521 /* Return the minimum value for TYPE.  */
522 
523 tree
vrp_val_min(const_tree type)524 vrp_val_min (const_tree type)
525 {
526   if (!INTEGRAL_TYPE_P (type))
527     return NULL_TREE;
528 
529   return TYPE_MIN_VALUE (type);
530 }
531 
532 /* Return whether VAL is equal to the maximum value of its type.
533    We can't do a simple equality comparison with TYPE_MAX_VALUE because
534    C typedefs and Ada subtypes can produce types whose TYPE_MAX_VALUE
535    is not == to the integer constant with the same value in the type.  */
536 
537 bool
vrp_val_is_max(const_tree val)538 vrp_val_is_max (const_tree val)
539 {
540   tree type_max = vrp_val_max (TREE_TYPE (val));
541   return (val == type_max
542 	  || (type_max != NULL_TREE
543 	      && operand_equal_p (val, type_max, 0)));
544 }
545 
546 /* Return whether VAL is equal to the minimum value of its type.  */
547 
548 bool
vrp_val_is_min(const_tree val)549 vrp_val_is_min (const_tree val)
550 {
551   tree type_min = vrp_val_min (TREE_TYPE (val));
552   return (val == type_min
553 	  || (type_min != NULL_TREE
554 	      && operand_equal_p (val, type_min, 0)));
555 }
556 
557 /* VR_TYPE describes a range with mininum value *MIN and maximum
558    value *MAX.  Restrict the range to the set of values that have
559    no bits set outside NONZERO_BITS.  Update *MIN and *MAX and
560    return the new range type.
561 
562    SGN gives the sign of the values described by the range.  */
563 
564 enum value_range_kind
intersect_range_with_nonzero_bits(enum value_range_kind vr_type,wide_int * min,wide_int * max,const wide_int & nonzero_bits,signop sgn)565 intersect_range_with_nonzero_bits (enum value_range_kind vr_type,
566 				   wide_int *min, wide_int *max,
567 				   const wide_int &nonzero_bits,
568 				   signop sgn)
569 {
570   if (vr_type == VR_ANTI_RANGE)
571     {
572       /* The VR_ANTI_RANGE is equivalent to the union of the ranges
573 	 A: [-INF, *MIN) and B: (*MAX, +INF].  First use NONZERO_BITS
574 	 to create an inclusive upper bound for A and an inclusive lower
575 	 bound for B.  */
576       wide_int a_max = wi::round_down_for_mask (*min - 1, nonzero_bits);
577       wide_int b_min = wi::round_up_for_mask (*max + 1, nonzero_bits);
578 
579       /* If the calculation of A_MAX wrapped, A is effectively empty
580 	 and A_MAX is the highest value that satisfies NONZERO_BITS.
581 	 Likewise if the calculation of B_MIN wrapped, B is effectively
582 	 empty and B_MIN is the lowest value that satisfies NONZERO_BITS.  */
583       bool a_empty = wi::ge_p (a_max, *min, sgn);
584       bool b_empty = wi::le_p (b_min, *max, sgn);
585 
586       /* If both A and B are empty, there are no valid values.  */
587       if (a_empty && b_empty)
588 	return VR_UNDEFINED;
589 
590       /* If exactly one of A or B is empty, return a VR_RANGE for the
591 	 other one.  */
592       if (a_empty || b_empty)
593 	{
594 	  *min = b_min;
595 	  *max = a_max;
596 	  gcc_checking_assert (wi::le_p (*min, *max, sgn));
597 	  return VR_RANGE;
598 	}
599 
600       /* Update the VR_ANTI_RANGE bounds.  */
601       *min = a_max + 1;
602       *max = b_min - 1;
603       gcc_checking_assert (wi::le_p (*min, *max, sgn));
604 
605       /* Now check whether the excluded range includes any values that
606 	 satisfy NONZERO_BITS.  If not, switch to a full VR_RANGE.  */
607       if (wi::round_up_for_mask (*min, nonzero_bits) == b_min)
608 	{
609 	  unsigned int precision = min->get_precision ();
610 	  *min = wi::min_value (precision, sgn);
611 	  *max = wi::max_value (precision, sgn);
612 	  vr_type = VR_RANGE;
613 	}
614     }
615   if (vr_type == VR_RANGE)
616     {
617       *max = wi::round_down_for_mask (*max, nonzero_bits);
618 
619       /* Check that the range contains at least one valid value.  */
620       if (wi::gt_p (*min, *max, sgn))
621 	return VR_UNDEFINED;
622 
623       *min = wi::round_up_for_mask (*min, nonzero_bits);
624       gcc_checking_assert (wi::le_p (*min, *max, sgn));
625     }
626   return vr_type;
627 }
628 
629 
630 /* Set value range to the canonical form of {VRTYPE, MIN, MAX, EQUIV}.
631    This means adjusting VRTYPE, MIN and MAX representing the case of a
632    wrapping range with MAX < MIN covering [MIN, type_max] U [type_min, MAX]
633    as anti-rage ~[MAX+1, MIN-1].  Likewise for wrapping anti-ranges.
634    In corner cases where MAX+1 or MIN-1 wraps this will fall back
635    to varying.
636    This routine exists to ease canonicalization in the case where we
637    extract ranges from var + CST op limit.  */
638 
639 void
set_and_canonicalize(enum value_range_kind kind,tree min,tree max)640 value_range_base::set_and_canonicalize (enum value_range_kind kind,
641 					tree min, tree max)
642 {
643   /* Use the canonical setters for VR_UNDEFINED and VR_VARYING.  */
644   if (kind == VR_UNDEFINED)
645     {
646       set_undefined ();
647       return;
648     }
649   else if (kind == VR_VARYING)
650     {
651       set_varying ();
652       return;
653     }
654 
655   /* Nothing to canonicalize for symbolic ranges.  */
656   if (TREE_CODE (min) != INTEGER_CST
657       || TREE_CODE (max) != INTEGER_CST)
658     {
659       set (kind, min, max);
660       return;
661     }
662 
663   /* Wrong order for min and max, to swap them and the VR type we need
664      to adjust them.  */
665   if (tree_int_cst_lt (max, min))
666     {
667       tree one, tmp;
668 
669       /* For one bit precision if max < min, then the swapped
670 	 range covers all values, so for VR_RANGE it is varying and
671 	 for VR_ANTI_RANGE empty range, so drop to varying as well.  */
672       if (TYPE_PRECISION (TREE_TYPE (min)) == 1)
673 	{
674 	  set_varying ();
675 	  return;
676 	}
677 
678       one = build_int_cst (TREE_TYPE (min), 1);
679       tmp = int_const_binop (PLUS_EXPR, max, one);
680       max = int_const_binop (MINUS_EXPR, min, one);
681       min = tmp;
682 
683       /* There's one corner case, if we had [C+1, C] before we now have
684 	 that again.  But this represents an empty value range, so drop
685 	 to varying in this case.  */
686       if (tree_int_cst_lt (max, min))
687 	{
688 	  set_varying ();
689 	  return;
690 	}
691 
692       kind = kind == VR_RANGE ? VR_ANTI_RANGE : VR_RANGE;
693     }
694 
695   /* Anti-ranges that can be represented as ranges should be so.  */
696   if (kind == VR_ANTI_RANGE)
697     {
698       /* For -fstrict-enums we may receive out-of-range ranges so consider
699          values < -INF and values > INF as -INF/INF as well.  */
700       tree type = TREE_TYPE (min);
701       bool is_min = (INTEGRAL_TYPE_P (type)
702 		     && tree_int_cst_compare (min, TYPE_MIN_VALUE (type)) <= 0);
703       bool is_max = (INTEGRAL_TYPE_P (type)
704 		     && tree_int_cst_compare (max, TYPE_MAX_VALUE (type)) >= 0);
705 
706       if (is_min && is_max)
707 	{
708 	  /* We cannot deal with empty ranges, drop to varying.
709 	     ???  This could be VR_UNDEFINED instead.  */
710 	  set_varying ();
711 	  return;
712 	}
713       else if (TYPE_PRECISION (TREE_TYPE (min)) == 1
714 	       && (is_min || is_max))
715 	{
716 	  /* Non-empty boolean ranges can always be represented
717 	     as a singleton range.  */
718 	  if (is_min)
719 	    min = max = vrp_val_max (TREE_TYPE (min));
720 	  else
721 	    min = max = vrp_val_min (TREE_TYPE (min));
722 	  kind = VR_RANGE;
723 	}
724       else if (is_min
725 	       /* As a special exception preserve non-null ranges.  */
726 	       && !(TYPE_UNSIGNED (TREE_TYPE (min))
727 		    && integer_zerop (max)))
728         {
729 	  tree one = build_int_cst (TREE_TYPE (max), 1);
730 	  min = int_const_binop (PLUS_EXPR, max, one);
731 	  max = vrp_val_max (TREE_TYPE (max));
732 	  kind = VR_RANGE;
733         }
734       else if (is_max)
735         {
736 	  tree one = build_int_cst (TREE_TYPE (min), 1);
737 	  max = int_const_binop (MINUS_EXPR, min, one);
738 	  min = vrp_val_min (TREE_TYPE (min));
739 	  kind = VR_RANGE;
740         }
741     }
742 
743   /* Do not drop [-INF(OVF), +INF(OVF)] to varying.  (OVF) has to be sticky
744      to make sure VRP iteration terminates, otherwise we can get into
745      oscillations.  */
746 
747   set (kind, min, max);
748 }
749 
750 void
set_and_canonicalize(enum value_range_kind kind,tree min,tree max,bitmap equiv)751 value_range::set_and_canonicalize (enum value_range_kind kind,
752 				   tree min, tree max, bitmap equiv)
753 {
754   value_range_base::set_and_canonicalize (kind, min, max);
755   if (this->kind () == VR_RANGE || this->kind () == VR_ANTI_RANGE)
756     set_equiv (equiv);
757   else
758     equiv_clear ();
759 }
760 
761 void
set(tree val)762 value_range_base::set (tree val)
763 {
764   gcc_assert (TREE_CODE (val) == SSA_NAME || is_gimple_min_invariant (val));
765   if (TREE_OVERFLOW_P (val))
766     val = drop_tree_overflow (val);
767   set (VR_RANGE, val, val);
768 }
769 
770 void
set(tree val)771 value_range::set (tree val)
772 {
773   gcc_assert (TREE_CODE (val) == SSA_NAME || is_gimple_min_invariant (val));
774   if (TREE_OVERFLOW_P (val))
775     val = drop_tree_overflow (val);
776   set (VR_RANGE, val, val, NULL);
777 }
778 
779 /* Set value range VR to a non-NULL range of type TYPE.  */
780 
781 void
set_nonnull(tree type)782 value_range_base::set_nonnull (tree type)
783 {
784   tree zero = build_int_cst (type, 0);
785   set (VR_ANTI_RANGE, zero, zero);
786 }
787 
788 void
set_nonnull(tree type)789 value_range::set_nonnull (tree type)
790 {
791   tree zero = build_int_cst (type, 0);
792   set (VR_ANTI_RANGE, zero, zero, NULL);
793 }
794 
795 /* Set value range VR to a NULL range of type TYPE.  */
796 
797 void
set_null(tree type)798 value_range_base::set_null (tree type)
799 {
800   set (build_int_cst (type, 0));
801 }
802 
803 void
set_null(tree type)804 value_range::set_null (tree type)
805 {
806   set (build_int_cst (type, 0));
807 }
808 
809 /* Return true, if VAL1 and VAL2 are equal values for VRP purposes.  */
810 
811 bool
vrp_operand_equal_p(const_tree val1,const_tree val2)812 vrp_operand_equal_p (const_tree val1, const_tree val2)
813 {
814   if (val1 == val2)
815     return true;
816   if (!val1 || !val2 || !operand_equal_p (val1, val2, 0))
817     return false;
818   return true;
819 }
820 
821 /* Return true, if the bitmaps B1 and B2 are equal.  */
822 
823 bool
vrp_bitmap_equal_p(const_bitmap b1,const_bitmap b2)824 vrp_bitmap_equal_p (const_bitmap b1, const_bitmap b2)
825 {
826   return (b1 == b2
827 	  || ((!b1 || bitmap_empty_p (b1))
828 	      && (!b2 || bitmap_empty_p (b2)))
829 	  || (b1 && b2
830 	      && bitmap_equal_p (b1, b2)));
831 }
832 
833 /* Return true if VR is [0, 0].  */
834 
835 static inline bool
range_is_null(const value_range_base * vr)836 range_is_null (const value_range_base *vr)
837 {
838   return vr->zero_p ();
839 }
840 
841 static inline bool
range_is_nonnull(const value_range_base * vr)842 range_is_nonnull (const value_range_base *vr)
843 {
844   return (vr->kind () == VR_ANTI_RANGE
845 	  && vr->min () == vr->max ()
846 	  && integer_zerop (vr->min ()));
847 }
848 
849 /* Return true if max and min of VR are INTEGER_CST.  It's not necessary
850    a singleton.  */
851 
852 bool
range_int_cst_p(const value_range_base * vr)853 range_int_cst_p (const value_range_base *vr)
854 {
855   return (vr->kind () == VR_RANGE
856 	  && TREE_CODE (vr->min ()) == INTEGER_CST
857 	  && TREE_CODE (vr->max ()) == INTEGER_CST);
858 }
859 
860 /* Return true if VR is a INTEGER_CST singleton.  */
861 
862 bool
range_int_cst_singleton_p(const value_range_base * vr)863 range_int_cst_singleton_p (const value_range_base *vr)
864 {
865   return (range_int_cst_p (vr)
866 	  && tree_int_cst_equal (vr->min (), vr->max ()));
867 }
868 
869 /* Return the single symbol (an SSA_NAME) contained in T if any, or NULL_TREE
870    otherwise.  We only handle additive operations and set NEG to true if the
871    symbol is negated and INV to the invariant part, if any.  */
872 
873 tree
get_single_symbol(tree t,bool * neg,tree * inv)874 get_single_symbol (tree t, bool *neg, tree *inv)
875 {
876   bool neg_;
877   tree inv_;
878 
879   *inv = NULL_TREE;
880   *neg = false;
881 
882   if (TREE_CODE (t) == PLUS_EXPR
883       || TREE_CODE (t) == POINTER_PLUS_EXPR
884       || TREE_CODE (t) == MINUS_EXPR)
885     {
886       if (is_gimple_min_invariant (TREE_OPERAND (t, 0)))
887 	{
888 	  neg_ = (TREE_CODE (t) == MINUS_EXPR);
889 	  inv_ = TREE_OPERAND (t, 0);
890 	  t = TREE_OPERAND (t, 1);
891 	}
892       else if (is_gimple_min_invariant (TREE_OPERAND (t, 1)))
893 	{
894 	  neg_ = false;
895 	  inv_ = TREE_OPERAND (t, 1);
896 	  t = TREE_OPERAND (t, 0);
897 	}
898       else
899         return NULL_TREE;
900     }
901   else
902     {
903       neg_ = false;
904       inv_ = NULL_TREE;
905     }
906 
907   if (TREE_CODE (t) == NEGATE_EXPR)
908     {
909       t = TREE_OPERAND (t, 0);
910       neg_ = !neg_;
911     }
912 
913   if (TREE_CODE (t) != SSA_NAME)
914     return NULL_TREE;
915 
916   if (inv_ && TREE_OVERFLOW_P (inv_))
917     inv_ = drop_tree_overflow (inv_);
918 
919   *neg = neg_;
920   *inv = inv_;
921   return t;
922 }
923 
924 /* The reverse operation: build a symbolic expression with TYPE
925    from symbol SYM, negated according to NEG, and invariant INV.  */
926 
927 static tree
build_symbolic_expr(tree type,tree sym,bool neg,tree inv)928 build_symbolic_expr (tree type, tree sym, bool neg, tree inv)
929 {
930   const bool pointer_p = POINTER_TYPE_P (type);
931   tree t = sym;
932 
933   if (neg)
934     t = build1 (NEGATE_EXPR, type, t);
935 
936   if (integer_zerop (inv))
937     return t;
938 
939   return build2 (pointer_p ? POINTER_PLUS_EXPR : PLUS_EXPR, type, t, inv);
940 }
941 
942 /* Return
943    1 if VAL < VAL2
944    0 if !(VAL < VAL2)
945    -2 if those are incomparable.  */
946 int
operand_less_p(tree val,tree val2)947 operand_less_p (tree val, tree val2)
948 {
949   /* LT is folded faster than GE and others.  Inline the common case.  */
950   if (TREE_CODE (val) == INTEGER_CST && TREE_CODE (val2) == INTEGER_CST)
951     return tree_int_cst_lt (val, val2);
952   else
953     {
954       tree tcmp;
955 
956       fold_defer_overflow_warnings ();
957 
958       tcmp = fold_binary_to_constant (LT_EXPR, boolean_type_node, val, val2);
959 
960       fold_undefer_and_ignore_overflow_warnings ();
961 
962       if (!tcmp
963 	  || TREE_CODE (tcmp) != INTEGER_CST)
964 	return -2;
965 
966       if (!integer_zerop (tcmp))
967 	return 1;
968     }
969 
970   return 0;
971 }
972 
973 /* Compare two values VAL1 and VAL2.  Return
974 
975    	-2 if VAL1 and VAL2 cannot be compared at compile-time,
976    	-1 if VAL1 < VAL2,
977    	 0 if VAL1 == VAL2,
978 	+1 if VAL1 > VAL2, and
979 	+2 if VAL1 != VAL2
980 
981    This is similar to tree_int_cst_compare but supports pointer values
982    and values that cannot be compared at compile time.
983 
984    If STRICT_OVERFLOW_P is not NULL, then set *STRICT_OVERFLOW_P to
985    true if the return value is only valid if we assume that signed
986    overflow is undefined.  */
987 
988 int
compare_values_warnv(tree val1,tree val2,bool * strict_overflow_p)989 compare_values_warnv (tree val1, tree val2, bool *strict_overflow_p)
990 {
991   if (val1 == val2)
992     return 0;
993 
994   /* Below we rely on the fact that VAL1 and VAL2 are both pointers or
995      both integers.  */
996   gcc_assert (POINTER_TYPE_P (TREE_TYPE (val1))
997 	      == POINTER_TYPE_P (TREE_TYPE (val2)));
998 
999   /* Convert the two values into the same type.  This is needed because
1000      sizetype causes sign extension even for unsigned types.  */
1001   val2 = fold_convert (TREE_TYPE (val1), val2);
1002   STRIP_USELESS_TYPE_CONVERSION (val2);
1003 
1004   const bool overflow_undefined
1005     = INTEGRAL_TYPE_P (TREE_TYPE (val1))
1006       && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (val1));
1007   tree inv1, inv2;
1008   bool neg1, neg2;
1009   tree sym1 = get_single_symbol (val1, &neg1, &inv1);
1010   tree sym2 = get_single_symbol (val2, &neg2, &inv2);
1011 
1012   /* If VAL1 and VAL2 are of the form '[-]NAME [+ CST]', return -1 or +1
1013      accordingly.  If VAL1 and VAL2 don't use the same name, return -2.  */
1014   if (sym1 && sym2)
1015     {
1016       /* Both values must use the same name with the same sign.  */
1017       if (sym1 != sym2 || neg1 != neg2)
1018 	return -2;
1019 
1020       /* [-]NAME + CST == [-]NAME + CST.  */
1021       if (inv1 == inv2)
1022 	return 0;
1023 
1024       /* If overflow is defined we cannot simplify more.  */
1025       if (!overflow_undefined)
1026 	return -2;
1027 
1028       if (strict_overflow_p != NULL
1029 	  /* Symbolic range building sets TREE_NO_WARNING to declare
1030 	     that overflow doesn't happen.  */
1031 	  && (!inv1 || !TREE_NO_WARNING (val1))
1032 	  && (!inv2 || !TREE_NO_WARNING (val2)))
1033 	*strict_overflow_p = true;
1034 
1035       if (!inv1)
1036 	inv1 = build_int_cst (TREE_TYPE (val1), 0);
1037       if (!inv2)
1038 	inv2 = build_int_cst (TREE_TYPE (val2), 0);
1039 
1040       return wi::cmp (wi::to_wide (inv1), wi::to_wide (inv2),
1041 		      TYPE_SIGN (TREE_TYPE (val1)));
1042     }
1043 
1044   const bool cst1 = is_gimple_min_invariant (val1);
1045   const bool cst2 = is_gimple_min_invariant (val2);
1046 
1047   /* If one is of the form '[-]NAME + CST' and the other is constant, then
1048      it might be possible to say something depending on the constants.  */
1049   if ((sym1 && inv1 && cst2) || (sym2 && inv2 && cst1))
1050     {
1051       if (!overflow_undefined)
1052 	return -2;
1053 
1054       if (strict_overflow_p != NULL
1055 	  /* Symbolic range building sets TREE_NO_WARNING to declare
1056 	     that overflow doesn't happen.  */
1057 	  && (!sym1 || !TREE_NO_WARNING (val1))
1058 	  && (!sym2 || !TREE_NO_WARNING (val2)))
1059 	*strict_overflow_p = true;
1060 
1061       const signop sgn = TYPE_SIGN (TREE_TYPE (val1));
1062       tree cst = cst1 ? val1 : val2;
1063       tree inv = cst1 ? inv2 : inv1;
1064 
1065       /* Compute the difference between the constants.  If it overflows or
1066 	 underflows, this means that we can trivially compare the NAME with
1067 	 it and, consequently, the two values with each other.  */
1068       wide_int diff = wi::to_wide (cst) - wi::to_wide (inv);
1069       if (wi::cmp (0, wi::to_wide (inv), sgn)
1070 	  != wi::cmp (diff, wi::to_wide (cst), sgn))
1071 	{
1072 	  const int res = wi::cmp (wi::to_wide (cst), wi::to_wide (inv), sgn);
1073 	  return cst1 ? res : -res;
1074 	}
1075 
1076       return -2;
1077     }
1078 
1079   /* We cannot say anything more for non-constants.  */
1080   if (!cst1 || !cst2)
1081     return -2;
1082 
1083   if (!POINTER_TYPE_P (TREE_TYPE (val1)))
1084     {
1085       /* We cannot compare overflowed values.  */
1086       if (TREE_OVERFLOW (val1) || TREE_OVERFLOW (val2))
1087 	return -2;
1088 
1089       if (TREE_CODE (val1) == INTEGER_CST
1090 	  && TREE_CODE (val2) == INTEGER_CST)
1091 	return tree_int_cst_compare (val1, val2);
1092 
1093       if (poly_int_tree_p (val1) && poly_int_tree_p (val2))
1094 	{
1095 	  if (known_eq (wi::to_poly_widest (val1),
1096 			wi::to_poly_widest (val2)))
1097 	    return 0;
1098 	  if (known_lt (wi::to_poly_widest (val1),
1099 			wi::to_poly_widest (val2)))
1100 	    return -1;
1101 	  if (known_gt (wi::to_poly_widest (val1),
1102 			wi::to_poly_widest (val2)))
1103 	    return 1;
1104 	}
1105 
1106       return -2;
1107     }
1108   else
1109     {
1110       tree t;
1111 
1112       /* First see if VAL1 and VAL2 are not the same.  */
1113       if (val1 == val2 || operand_equal_p (val1, val2, 0))
1114 	return 0;
1115 
1116       /* If VAL1 is a lower address than VAL2, return -1.  */
1117       if (operand_less_p (val1, val2) == 1)
1118 	return -1;
1119 
1120       /* If VAL1 is a higher address than VAL2, return +1.  */
1121       if (operand_less_p (val2, val1) == 1)
1122 	return 1;
1123 
1124       /* If VAL1 is different than VAL2, return +2.
1125 	 For integer constants we either have already returned -1 or 1
1126 	 or they are equivalent.  We still might succeed in proving
1127 	 something about non-trivial operands.  */
1128       if (TREE_CODE (val1) != INTEGER_CST
1129 	  || TREE_CODE (val2) != INTEGER_CST)
1130 	{
1131           t = fold_binary_to_constant (NE_EXPR, boolean_type_node, val1, val2);
1132 	  if (t && integer_onep (t))
1133 	    return 2;
1134 	}
1135 
1136       return -2;
1137     }
1138 }
1139 
1140 /* Compare values like compare_values_warnv.  */
1141 
1142 int
compare_values(tree val1,tree val2)1143 compare_values (tree val1, tree val2)
1144 {
1145   bool sop;
1146   return compare_values_warnv (val1, val2, &sop);
1147 }
1148 
1149 
1150 /* Return 1 if VAL is inside value range MIN <= VAL <= MAX,
1151           0 if VAL is not inside [MIN, MAX],
1152 	 -2 if we cannot tell either way.
1153 
1154    Benchmark compile/20001226-1.c compilation time after changing this
1155    function.  */
1156 
1157 int
value_inside_range(tree val,tree min,tree max)1158 value_inside_range (tree val, tree min, tree max)
1159 {
1160   int cmp1, cmp2;
1161 
1162   cmp1 = operand_less_p (val, min);
1163   if (cmp1 == -2)
1164     return -2;
1165   if (cmp1 == 1)
1166     return 0;
1167 
1168   cmp2 = operand_less_p (max, val);
1169   if (cmp2 == -2)
1170     return -2;
1171 
1172   return !cmp2;
1173 }
1174 
1175 
1176 /* Return TRUE if *VR includes the value X.  */
1177 
1178 bool
range_includes_p(const value_range_base * vr,HOST_WIDE_INT x)1179 range_includes_p (const value_range_base *vr, HOST_WIDE_INT x)
1180 {
1181   if (vr->varying_p () || vr->undefined_p ())
1182     return true;
1183   return vr->may_contain_p (build_int_cst (vr->type (), x));
1184 }
1185 
1186 /* If *VR has a value range that is a single constant value return that,
1187    otherwise return NULL_TREE.
1188 
1189    ?? This actually returns TRUE for [&x, &x], so perhaps "constant"
1190    is not the best name.  */
1191 
1192 tree
value_range_constant_singleton(const value_range_base * vr)1193 value_range_constant_singleton (const value_range_base *vr)
1194 {
1195   tree result = NULL;
1196   if (vr->singleton_p (&result))
1197     return result;
1198   return NULL;
1199 }
1200 
1201 /* Value range wrapper for wide_int_range_set_zero_nonzero_bits.
1202 
1203    Compute MAY_BE_NONZERO and MUST_BE_NONZERO bit masks for range in VR.
1204 
1205    Return TRUE if VR was a constant range and we were able to compute
1206    the bit masks.  */
1207 
1208 bool
vrp_set_zero_nonzero_bits(const tree expr_type,const value_range_base * vr,wide_int * may_be_nonzero,wide_int * must_be_nonzero)1209 vrp_set_zero_nonzero_bits (const tree expr_type,
1210 			   const value_range_base *vr,
1211 			   wide_int *may_be_nonzero,
1212 			   wide_int *must_be_nonzero)
1213 {
1214   if (!range_int_cst_p (vr))
1215     {
1216       *may_be_nonzero = wi::minus_one (TYPE_PRECISION (expr_type));
1217       *must_be_nonzero = wi::zero (TYPE_PRECISION (expr_type));
1218       return false;
1219     }
1220   wide_int_range_set_zero_nonzero_bits (TYPE_SIGN (expr_type),
1221 					wi::to_wide (vr->min ()),
1222 					wi::to_wide (vr->max ()),
1223 					*may_be_nonzero, *must_be_nonzero);
1224   return true;
1225 }
1226 
1227 /* Create two value-ranges in *VR0 and *VR1 from the anti-range *AR
1228    so that *VR0 U *VR1 == *AR.  Returns true if that is possible,
1229    false otherwise.  If *AR can be represented with a single range
1230    *VR1 will be VR_UNDEFINED.  */
1231 
1232 static bool
ranges_from_anti_range(const value_range_base * ar,value_range_base * vr0,value_range_base * vr1)1233 ranges_from_anti_range (const value_range_base *ar,
1234 			value_range_base *vr0, value_range_base *vr1)
1235 {
1236   tree type = ar->type ();
1237 
1238   vr0->set_undefined ();
1239   vr1->set_undefined ();
1240 
1241   /* As a future improvement, we could handle ~[0, A] as: [-INF, -1] U
1242      [A+1, +INF].  Not sure if this helps in practice, though.  */
1243 
1244   if (ar->kind () != VR_ANTI_RANGE
1245       || TREE_CODE (ar->min ()) != INTEGER_CST
1246       || TREE_CODE (ar->max ()) != INTEGER_CST
1247       || !vrp_val_min (type)
1248       || !vrp_val_max (type))
1249     return false;
1250 
1251   if (tree_int_cst_lt (vrp_val_min (type), ar->min ()))
1252     vr0->set (VR_RANGE,
1253 	      vrp_val_min (type),
1254 	      wide_int_to_tree (type, wi::to_wide (ar->min ()) - 1));
1255   if (tree_int_cst_lt (ar->max (), vrp_val_max (type)))
1256     vr1->set (VR_RANGE,
1257 	      wide_int_to_tree (type, wi::to_wide (ar->max ()) + 1),
1258 	      vrp_val_max (type));
1259   if (vr0->undefined_p ())
1260     {
1261       *vr0 = *vr1;
1262       vr1->set_undefined ();
1263     }
1264 
1265   return !vr0->undefined_p ();
1266 }
1267 
1268 /* Extract the components of a value range into a pair of wide ints in
1269    [WMIN, WMAX].
1270 
1271    If the value range is anything but a VR_*RANGE of constants, the
1272    resulting wide ints are set to [-MIN, +MAX] for the type.  */
1273 
1274 static void inline
extract_range_into_wide_ints(const value_range_base * vr,signop sign,unsigned prec,wide_int & wmin,wide_int & wmax)1275 extract_range_into_wide_ints (const value_range_base *vr,
1276 			      signop sign, unsigned prec,
1277 			      wide_int &wmin, wide_int &wmax)
1278 {
1279   gcc_assert (vr->kind () != VR_ANTI_RANGE || vr->symbolic_p ());
1280   if (range_int_cst_p (vr))
1281     {
1282       wmin = wi::to_wide (vr->min ());
1283       wmax = wi::to_wide (vr->max ());
1284     }
1285   else
1286     {
1287       wmin = wi::min_value (prec, sign);
1288       wmax = wi::max_value (prec, sign);
1289     }
1290 }
1291 
1292 /* Value range wrapper for wide_int_range_multiplicative_op:
1293 
1294      *VR = *VR0 .CODE. *VR1.  */
1295 
1296 static void
extract_range_from_multiplicative_op(value_range_base * vr,enum tree_code code,const value_range_base * vr0,const value_range_base * vr1)1297 extract_range_from_multiplicative_op (value_range_base *vr,
1298 				      enum tree_code code,
1299 				      const value_range_base *vr0,
1300 				      const value_range_base *vr1)
1301 {
1302   gcc_assert (code == MULT_EXPR
1303 	      || code == TRUNC_DIV_EXPR
1304 	      || code == FLOOR_DIV_EXPR
1305 	      || code == CEIL_DIV_EXPR
1306 	      || code == EXACT_DIV_EXPR
1307 	      || code == ROUND_DIV_EXPR
1308 	      || code == RSHIFT_EXPR
1309 	      || code == LSHIFT_EXPR);
1310   gcc_assert (vr0->kind () == VR_RANGE
1311 	      && vr0->kind () == vr1->kind ());
1312 
1313   tree type = vr0->type ();
1314   wide_int res_lb, res_ub;
1315   wide_int vr0_lb = wi::to_wide (vr0->min ());
1316   wide_int vr0_ub = wi::to_wide (vr0->max ());
1317   wide_int vr1_lb = wi::to_wide (vr1->min ());
1318   wide_int vr1_ub = wi::to_wide (vr1->max ());
1319   bool overflow_undefined = TYPE_OVERFLOW_UNDEFINED (type);
1320   unsigned prec = TYPE_PRECISION (type);
1321 
1322   if (wide_int_range_multiplicative_op (res_lb, res_ub,
1323 					code, TYPE_SIGN (type), prec,
1324 					vr0_lb, vr0_ub, vr1_lb, vr1_ub,
1325 					overflow_undefined))
1326     vr->set_and_canonicalize (VR_RANGE,
1327 			      wide_int_to_tree (type, res_lb),
1328 			      wide_int_to_tree (type, res_ub));
1329   else
1330     vr->set_varying ();
1331 }
1332 
1333 /* If BOUND will include a symbolic bound, adjust it accordingly,
1334    otherwise leave it as is.
1335 
1336    CODE is the original operation that combined the bounds (PLUS_EXPR
1337    or MINUS_EXPR).
1338 
1339    TYPE is the type of the original operation.
1340 
1341    SYM_OPn is the symbolic for OPn if it has a symbolic.
1342 
1343    NEG_OPn is TRUE if the OPn was negated.  */
1344 
1345 static void
adjust_symbolic_bound(tree & bound,enum tree_code code,tree type,tree sym_op0,tree sym_op1,bool neg_op0,bool neg_op1)1346 adjust_symbolic_bound (tree &bound, enum tree_code code, tree type,
1347 		       tree sym_op0, tree sym_op1,
1348 		       bool neg_op0, bool neg_op1)
1349 {
1350   bool minus_p = (code == MINUS_EXPR);
1351   /* If the result bound is constant, we're done; otherwise, build the
1352      symbolic lower bound.  */
1353   if (sym_op0 == sym_op1)
1354     ;
1355   else if (sym_op0)
1356     bound = build_symbolic_expr (type, sym_op0,
1357 				 neg_op0, bound);
1358   else if (sym_op1)
1359     {
1360       /* We may not negate if that might introduce
1361 	 undefined overflow.  */
1362       if (!minus_p
1363 	  || neg_op1
1364 	  || TYPE_OVERFLOW_WRAPS (type))
1365 	bound = build_symbolic_expr (type, sym_op1,
1366 				     neg_op1 ^ minus_p, bound);
1367       else
1368 	bound = NULL_TREE;
1369     }
1370 }
1371 
1372 /* Combine OP1 and OP1, which are two parts of a bound, into one wide
1373    int bound according to CODE.  CODE is the operation combining the
1374    bound (either a PLUS_EXPR or a MINUS_EXPR).
1375 
1376    TYPE is the type of the combine operation.
1377 
1378    WI is the wide int to store the result.
1379 
1380    OVF is -1 if an underflow occurred, +1 if an overflow occurred or 0
1381    if over/underflow occurred.  */
1382 
1383 static void
combine_bound(enum tree_code code,wide_int & wi,wi::overflow_type & ovf,tree type,tree op0,tree op1)1384 combine_bound (enum tree_code code, wide_int &wi, wi::overflow_type &ovf,
1385 	       tree type, tree op0, tree op1)
1386 {
1387   bool minus_p = (code == MINUS_EXPR);
1388   const signop sgn = TYPE_SIGN (type);
1389   const unsigned int prec = TYPE_PRECISION (type);
1390 
1391   /* Combine the bounds, if any.  */
1392   if (op0 && op1)
1393     {
1394       if (minus_p)
1395 	wi = wi::sub (wi::to_wide (op0), wi::to_wide (op1), sgn, &ovf);
1396       else
1397 	wi = wi::add (wi::to_wide (op0), wi::to_wide (op1), sgn, &ovf);
1398     }
1399   else if (op0)
1400     wi = wi::to_wide (op0);
1401   else if (op1)
1402     {
1403       if (minus_p)
1404 	wi = wi::neg (wi::to_wide (op1), &ovf);
1405       else
1406 	wi = wi::to_wide (op1);
1407     }
1408   else
1409     wi = wi::shwi (0, prec);
1410 }
1411 
1412 /* Given a range in [WMIN, WMAX], adjust it for possible overflow and
1413    put the result in VR.
1414 
1415    TYPE is the type of the range.
1416 
1417    MIN_OVF and MAX_OVF indicate what type of overflow, if any,
1418    occurred while originally calculating WMIN or WMAX.  -1 indicates
1419    underflow.  +1 indicates overflow.  0 indicates neither.  */
1420 
1421 static void
set_value_range_with_overflow(value_range_kind & kind,tree & min,tree & max,tree type,const wide_int & wmin,const wide_int & wmax,wi::overflow_type min_ovf,wi::overflow_type max_ovf)1422 set_value_range_with_overflow (value_range_kind &kind, tree &min, tree &max,
1423 			       tree type,
1424 			       const wide_int &wmin, const wide_int &wmax,
1425 			       wi::overflow_type min_ovf,
1426 			       wi::overflow_type max_ovf)
1427 {
1428   const signop sgn = TYPE_SIGN (type);
1429   const unsigned int prec = TYPE_PRECISION (type);
1430 
1431   /* For one bit precision if max < min, then the swapped
1432      range covers all values.  */
1433   if (prec == 1 && wi::lt_p (wmax, wmin, sgn))
1434     {
1435       kind = VR_VARYING;
1436       return;
1437     }
1438 
1439   if (TYPE_OVERFLOW_WRAPS (type))
1440     {
1441       /* If overflow wraps, truncate the values and adjust the
1442 	 range kind and bounds appropriately.  */
1443       wide_int tmin = wide_int::from (wmin, prec, sgn);
1444       wide_int tmax = wide_int::from (wmax, prec, sgn);
1445       if ((min_ovf != wi::OVF_NONE) == (max_ovf != wi::OVF_NONE))
1446 	{
1447 	  /* If the limits are swapped, we wrapped around and cover
1448 	     the entire range.  We have a similar check at the end of
1449 	     extract_range_from_binary_expr.  */
1450 	  if (wi::gt_p (tmin, tmax, sgn))
1451 	    kind = VR_VARYING;
1452 	  else
1453 	    {
1454 	      kind = VR_RANGE;
1455 	      /* No overflow or both overflow or underflow.  The
1456 		 range kind stays VR_RANGE.  */
1457 	      min = wide_int_to_tree (type, tmin);
1458 	      max = wide_int_to_tree (type, tmax);
1459 	    }
1460 	  return;
1461 	}
1462       else if ((min_ovf == wi::OVF_UNDERFLOW && max_ovf == wi::OVF_NONE)
1463 	       || (max_ovf == wi::OVF_OVERFLOW && min_ovf == wi::OVF_NONE))
1464 	{
1465 	  /* Min underflow or max overflow.  The range kind
1466 	     changes to VR_ANTI_RANGE.  */
1467 	  bool covers = false;
1468 	  wide_int tem = tmin;
1469 	  tmin = tmax + 1;
1470 	  if (wi::cmp (tmin, tmax, sgn) < 0)
1471 	    covers = true;
1472 	  tmax = tem - 1;
1473 	  if (wi::cmp (tmax, tem, sgn) > 0)
1474 	    covers = true;
1475 	  /* If the anti-range would cover nothing, drop to varying.
1476 	     Likewise if the anti-range bounds are outside of the
1477 	     types values.  */
1478 	  if (covers || wi::cmp (tmin, tmax, sgn) > 0)
1479 	    {
1480 	      kind = VR_VARYING;
1481 	      return;
1482 	    }
1483 	  kind = VR_ANTI_RANGE;
1484 	  min = wide_int_to_tree (type, tmin);
1485 	  max = wide_int_to_tree (type, tmax);
1486 	  return;
1487 	}
1488       else
1489 	{
1490 	  /* Other underflow and/or overflow, drop to VR_VARYING.  */
1491 	  kind = VR_VARYING;
1492 	  return;
1493 	}
1494     }
1495   else
1496     {
1497       /* If overflow does not wrap, saturate to the types min/max
1498 	 value.  */
1499       wide_int type_min = wi::min_value (prec, sgn);
1500       wide_int type_max = wi::max_value (prec, sgn);
1501       kind = VR_RANGE;
1502       if (min_ovf == wi::OVF_UNDERFLOW)
1503 	min = wide_int_to_tree (type, type_min);
1504       else if (min_ovf == wi::OVF_OVERFLOW)
1505 	min = wide_int_to_tree (type, type_max);
1506       else
1507 	min = wide_int_to_tree (type, wmin);
1508 
1509       if (max_ovf == wi::OVF_UNDERFLOW)
1510 	max = wide_int_to_tree (type, type_min);
1511       else if (max_ovf == wi::OVF_OVERFLOW)
1512 	max = wide_int_to_tree (type, type_max);
1513       else
1514 	max = wide_int_to_tree (type, wmax);
1515     }
1516 }
1517 
1518 /* Extract range information from a binary operation CODE based on
1519    the ranges of each of its operands *VR0 and *VR1 with resulting
1520    type EXPR_TYPE.  The resulting range is stored in *VR.  */
1521 
1522 void
extract_range_from_binary_expr(value_range_base * vr,enum tree_code code,tree expr_type,const value_range_base * vr0_,const value_range_base * vr1_)1523 extract_range_from_binary_expr (value_range_base *vr,
1524 				enum tree_code code, tree expr_type,
1525 				const value_range_base *vr0_,
1526 				const value_range_base *vr1_)
1527 {
1528   signop sign = TYPE_SIGN (expr_type);
1529   unsigned int prec = TYPE_PRECISION (expr_type);
1530   value_range_base vr0 = *vr0_, vr1 = *vr1_;
1531   value_range_base vrtem0, vrtem1;
1532   enum value_range_kind type;
1533   tree min = NULL_TREE, max = NULL_TREE;
1534   int cmp;
1535 
1536   if (!INTEGRAL_TYPE_P (expr_type)
1537       && !POINTER_TYPE_P (expr_type))
1538     {
1539       vr->set_varying ();
1540       return;
1541     }
1542 
1543   /* Not all binary expressions can be applied to ranges in a
1544      meaningful way.  Handle only arithmetic operations.  */
1545   if (code != PLUS_EXPR
1546       && code != MINUS_EXPR
1547       && code != POINTER_PLUS_EXPR
1548       && code != MULT_EXPR
1549       && code != TRUNC_DIV_EXPR
1550       && code != FLOOR_DIV_EXPR
1551       && code != CEIL_DIV_EXPR
1552       && code != EXACT_DIV_EXPR
1553       && code != ROUND_DIV_EXPR
1554       && code != TRUNC_MOD_EXPR
1555       && code != RSHIFT_EXPR
1556       && code != LSHIFT_EXPR
1557       && code != MIN_EXPR
1558       && code != MAX_EXPR
1559       && code != BIT_AND_EXPR
1560       && code != BIT_IOR_EXPR
1561       && code != BIT_XOR_EXPR)
1562     {
1563       vr->set_varying ();
1564       return;
1565     }
1566 
1567   /* If both ranges are UNDEFINED, so is the result.  */
1568   if (vr0.undefined_p () && vr1.undefined_p ())
1569     {
1570       vr->set_undefined ();
1571       return;
1572     }
1573   /* If one of the ranges is UNDEFINED drop it to VARYING for the following
1574      code.  At some point we may want to special-case operations that
1575      have UNDEFINED result for all or some value-ranges of the not UNDEFINED
1576      operand.  */
1577   else if (vr0.undefined_p ())
1578     vr0.set_varying ();
1579   else if (vr1.undefined_p ())
1580     vr1.set_varying ();
1581 
1582   /* We get imprecise results from ranges_from_anti_range when
1583      code is EXACT_DIV_EXPR.  We could mask out bits in the resulting
1584      range, but then we also need to hack up vrp_union.  It's just
1585      easier to special case when vr0 is ~[0,0] for EXACT_DIV_EXPR.  */
1586   if (code == EXACT_DIV_EXPR && range_is_nonnull (&vr0))
1587     {
1588       vr->set_nonnull (expr_type);
1589       return;
1590     }
1591 
1592   /* Now canonicalize anti-ranges to ranges when they are not symbolic
1593      and express ~[] op X as ([]' op X) U ([]'' op X).  */
1594   if (vr0.kind () == VR_ANTI_RANGE
1595       && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1))
1596     {
1597       extract_range_from_binary_expr (vr, code, expr_type, &vrtem0, vr1_);
1598       if (!vrtem1.undefined_p ())
1599 	{
1600 	  value_range_base vrres;
1601 	  extract_range_from_binary_expr (&vrres, code, expr_type,
1602 					  &vrtem1, vr1_);
1603 	  vr->union_ (&vrres);
1604 	}
1605       return;
1606     }
1607   /* Likewise for X op ~[].  */
1608   if (vr1.kind () == VR_ANTI_RANGE
1609       && ranges_from_anti_range (&vr1, &vrtem0, &vrtem1))
1610     {
1611       extract_range_from_binary_expr (vr, code, expr_type, vr0_, &vrtem0);
1612       if (!vrtem1.undefined_p ())
1613 	{
1614 	  value_range_base vrres;
1615 	  extract_range_from_binary_expr (&vrres, code, expr_type,
1616 					  vr0_, &vrtem1);
1617 	  vr->union_ (&vrres);
1618 	}
1619       return;
1620     }
1621 
1622   /* The type of the resulting value range defaults to VR0.TYPE.  */
1623   type = vr0.kind ();
1624 
1625   /* Refuse to operate on VARYING ranges, ranges of different kinds
1626      and symbolic ranges.  As an exception, we allow BIT_{AND,IOR}
1627      because we may be able to derive a useful range even if one of
1628      the operands is VR_VARYING or symbolic range.  Similarly for
1629      divisions, MIN/MAX and PLUS/MINUS.
1630 
1631      TODO, we may be able to derive anti-ranges in some cases.  */
1632   if (code != BIT_AND_EXPR
1633       && code != BIT_IOR_EXPR
1634       && code != TRUNC_DIV_EXPR
1635       && code != FLOOR_DIV_EXPR
1636       && code != CEIL_DIV_EXPR
1637       && code != EXACT_DIV_EXPR
1638       && code != ROUND_DIV_EXPR
1639       && code != TRUNC_MOD_EXPR
1640       && code != MIN_EXPR
1641       && code != MAX_EXPR
1642       && code != PLUS_EXPR
1643       && code != MINUS_EXPR
1644       && code != RSHIFT_EXPR
1645       && code != POINTER_PLUS_EXPR
1646       && (vr0.varying_p ()
1647 	  || vr1.varying_p ()
1648 	  || vr0.kind () != vr1.kind ()
1649 	  || vr0.symbolic_p ()
1650 	  || vr1.symbolic_p ()))
1651     {
1652       vr->set_varying ();
1653       return;
1654     }
1655 
1656   /* Now evaluate the expression to determine the new range.  */
1657   if (POINTER_TYPE_P (expr_type))
1658     {
1659       if (code == MIN_EXPR || code == MAX_EXPR)
1660 	{
1661 	  /* For MIN/MAX expressions with pointers, we only care about
1662 	     nullness, if both are non null, then the result is nonnull.
1663 	     If both are null, then the result is null. Otherwise they
1664 	     are varying.  */
1665 	  if (!range_includes_zero_p (&vr0) && !range_includes_zero_p (&vr1))
1666 	    vr->set_nonnull (expr_type);
1667 	  else if (range_is_null (&vr0) && range_is_null (&vr1))
1668 	    vr->set_null (expr_type);
1669 	  else
1670 	    vr->set_varying ();
1671 	}
1672       else if (code == POINTER_PLUS_EXPR)
1673 	{
1674 	  /* For pointer types, we are really only interested in asserting
1675 	     whether the expression evaluates to non-NULL.
1676 	     With -fno-delete-null-pointer-checks we need to be more
1677 	     conservative.  As some object might reside at address 0,
1678 	     then some offset could be added to it and the same offset
1679 	     subtracted again and the result would be NULL.
1680 	     E.g.
1681 	     static int a[12]; where &a[0] is NULL and
1682 	     ptr = &a[6];
1683 	     ptr -= 6;
1684 	     ptr will be NULL here, even when there is POINTER_PLUS_EXPR
1685 	     where the first range doesn't include zero and the second one
1686 	     doesn't either.  As the second operand is sizetype (unsigned),
1687 	     consider all ranges where the MSB could be set as possible
1688 	     subtractions where the result might be NULL.  */
1689 	  if ((!range_includes_zero_p (&vr0)
1690 	       || !range_includes_zero_p (&vr1))
1691 	      && !TYPE_OVERFLOW_WRAPS (expr_type)
1692 	      && (flag_delete_null_pointer_checks
1693 		  || (range_int_cst_p (&vr1)
1694 		      && !tree_int_cst_sign_bit (vr1.max ()))))
1695 	    vr->set_nonnull (expr_type);
1696 	  else if (range_is_null (&vr0) && range_is_null (&vr1))
1697 	    vr->set_null (expr_type);
1698 	  else
1699 	    vr->set_varying ();
1700 	}
1701       else if (code == BIT_AND_EXPR)
1702 	{
1703 	  /* For pointer types, we are really only interested in asserting
1704 	     whether the expression evaluates to non-NULL.  */
1705 	  if (!range_includes_zero_p (&vr0) && !range_includes_zero_p (&vr1))
1706 	    vr->set_nonnull (expr_type);
1707 	  else if (range_is_null (&vr0) || range_is_null (&vr1))
1708 	    vr->set_null (expr_type);
1709 	  else
1710 	    vr->set_varying ();
1711 	}
1712       else
1713 	vr->set_varying ();
1714 
1715       return;
1716     }
1717 
1718   /* For integer ranges, apply the operation to each end of the
1719      range and see what we end up with.  */
1720   if (code == PLUS_EXPR || code == MINUS_EXPR)
1721     {
1722       /* This will normalize things such that calculating
1723 	 [0,0] - VR_VARYING is not dropped to varying, but is
1724 	 calculated as [MIN+1, MAX].  */
1725       if (vr0.varying_p ())
1726 	vr0.set (VR_RANGE, vrp_val_min (expr_type), vrp_val_max (expr_type));
1727       if (vr1.varying_p ())
1728 	vr1.set (VR_RANGE, vrp_val_min (expr_type), vrp_val_max (expr_type));
1729 
1730       const bool minus_p = (code == MINUS_EXPR);
1731       tree min_op0 = vr0.min ();
1732       tree min_op1 = minus_p ? vr1.max () : vr1.min ();
1733       tree max_op0 = vr0.max ();
1734       tree max_op1 = minus_p ? vr1.min () : vr1.max ();
1735       tree sym_min_op0 = NULL_TREE;
1736       tree sym_min_op1 = NULL_TREE;
1737       tree sym_max_op0 = NULL_TREE;
1738       tree sym_max_op1 = NULL_TREE;
1739       bool neg_min_op0, neg_min_op1, neg_max_op0, neg_max_op1;
1740 
1741       neg_min_op0 = neg_min_op1 = neg_max_op0 = neg_max_op1 = false;
1742 
1743       /* If we have a PLUS or MINUS with two VR_RANGEs, either constant or
1744 	 single-symbolic ranges, try to compute the precise resulting range,
1745 	 but only if we know that this resulting range will also be constant
1746 	 or single-symbolic.  */
1747       if (vr0.kind () == VR_RANGE && vr1.kind () == VR_RANGE
1748 	  && (TREE_CODE (min_op0) == INTEGER_CST
1749 	      || (sym_min_op0
1750 		  = get_single_symbol (min_op0, &neg_min_op0, &min_op0)))
1751 	  && (TREE_CODE (min_op1) == INTEGER_CST
1752 	      || (sym_min_op1
1753 		  = get_single_symbol (min_op1, &neg_min_op1, &min_op1)))
1754 	  && (!(sym_min_op0 && sym_min_op1)
1755 	      || (sym_min_op0 == sym_min_op1
1756 		  && neg_min_op0 == (minus_p ? neg_min_op1 : !neg_min_op1)))
1757 	  && (TREE_CODE (max_op0) == INTEGER_CST
1758 	      || (sym_max_op0
1759 		  = get_single_symbol (max_op0, &neg_max_op0, &max_op0)))
1760 	  && (TREE_CODE (max_op1) == INTEGER_CST
1761 	      || (sym_max_op1
1762 		  = get_single_symbol (max_op1, &neg_max_op1, &max_op1)))
1763 	  && (!(sym_max_op0 && sym_max_op1)
1764 	      || (sym_max_op0 == sym_max_op1
1765 		  && neg_max_op0 == (minus_p ? neg_max_op1 : !neg_max_op1))))
1766 	{
1767 	  wide_int wmin, wmax;
1768 	  wi::overflow_type min_ovf = wi::OVF_NONE;
1769 	  wi::overflow_type max_ovf = wi::OVF_NONE;
1770 
1771 	  /* Build the bounds.  */
1772 	  combine_bound (code, wmin, min_ovf, expr_type, min_op0, min_op1);
1773 	  combine_bound (code, wmax, max_ovf, expr_type, max_op0, max_op1);
1774 
1775 	  /* If we have overflow for the constant part and the resulting
1776 	     range will be symbolic, drop to VR_VARYING.  */
1777 	  if (((bool)min_ovf && sym_min_op0 != sym_min_op1)
1778 	      || ((bool)max_ovf && sym_max_op0 != sym_max_op1))
1779 	    {
1780 	      vr->set_varying ();
1781 	      return;
1782 	    }
1783 
1784 	  /* Adjust the range for possible overflow.  */
1785 	  min = NULL_TREE;
1786 	  max = NULL_TREE;
1787 	  set_value_range_with_overflow (type, min, max, expr_type,
1788 					 wmin, wmax, min_ovf, max_ovf);
1789 	  if (type == VR_VARYING)
1790 	    {
1791 	      vr->set_varying ();
1792 	      return;
1793 	    }
1794 
1795 	  /* Build the symbolic bounds if needed.  */
1796 	  adjust_symbolic_bound (min, code, expr_type,
1797 				 sym_min_op0, sym_min_op1,
1798 				 neg_min_op0, neg_min_op1);
1799 	  adjust_symbolic_bound (max, code, expr_type,
1800 				 sym_max_op0, sym_max_op1,
1801 				 neg_max_op0, neg_max_op1);
1802 	}
1803       else
1804 	{
1805 	  /* For other cases, for example if we have a PLUS_EXPR with two
1806 	     VR_ANTI_RANGEs, drop to VR_VARYING.  It would take more effort
1807 	     to compute a precise range for such a case.
1808 	     ???  General even mixed range kind operations can be expressed
1809 	     by for example transforming ~[3, 5] + [1, 2] to range-only
1810 	     operations and a union primitive:
1811 	       [-INF, 2] + [1, 2]  U  [5, +INF] + [1, 2]
1812 	           [-INF+1, 4]     U    [6, +INF(OVF)]
1813 	     though usually the union is not exactly representable with
1814 	     a single range or anti-range as the above is
1815 		 [-INF+1, +INF(OVF)] intersected with ~[5, 5]
1816 	     but one could use a scheme similar to equivalences for this. */
1817 	  vr->set_varying ();
1818 	  return;
1819 	}
1820     }
1821   else if (code == MIN_EXPR
1822 	   || code == MAX_EXPR)
1823     {
1824       wide_int wmin, wmax;
1825       wide_int vr0_min, vr0_max;
1826       wide_int vr1_min, vr1_max;
1827       extract_range_into_wide_ints (&vr0, sign, prec, vr0_min, vr0_max);
1828       extract_range_into_wide_ints (&vr1, sign, prec, vr1_min, vr1_max);
1829       if (wide_int_range_min_max (wmin, wmax, code, sign, prec,
1830 				  vr0_min, vr0_max, vr1_min, vr1_max))
1831 	vr->set (VR_RANGE, wide_int_to_tree (expr_type, wmin),
1832 		 wide_int_to_tree (expr_type, wmax));
1833       else
1834 	vr->set_varying ();
1835       return;
1836     }
1837   else if (code == MULT_EXPR)
1838     {
1839       if (!range_int_cst_p (&vr0)
1840 	  || !range_int_cst_p (&vr1))
1841 	{
1842 	  vr->set_varying ();
1843 	  return;
1844 	}
1845       extract_range_from_multiplicative_op (vr, code, &vr0, &vr1);
1846       return;
1847     }
1848   else if (code == RSHIFT_EXPR
1849 	   || code == LSHIFT_EXPR)
1850     {
1851       if (range_int_cst_p (&vr1)
1852 	  && !wide_int_range_shift_undefined_p
1853 		(TYPE_SIGN (TREE_TYPE (vr1.min ())),
1854 		 prec,
1855 		 wi::to_wide (vr1.min ()),
1856 		 wi::to_wide (vr1.max ())))
1857 	{
1858 	  if (code == RSHIFT_EXPR)
1859 	    {
1860 	      /* Even if vr0 is VARYING or otherwise not usable, we can derive
1861 		 useful ranges just from the shift count.  E.g.
1862 		 x >> 63 for signed 64-bit x is always [-1, 0].  */
1863 	      if (vr0.kind () != VR_RANGE || vr0.symbolic_p ())
1864 		vr0.set (VR_RANGE, vrp_val_min (expr_type),
1865 			 vrp_val_max (expr_type));
1866 	      extract_range_from_multiplicative_op (vr, code, &vr0, &vr1);
1867 	      return;
1868 	    }
1869 	  else if (code == LSHIFT_EXPR
1870 		   && range_int_cst_p (&vr0))
1871 	    {
1872 	      wide_int res_lb, res_ub;
1873 	      if (wide_int_range_lshift (res_lb, res_ub, sign, prec,
1874 					 wi::to_wide (vr0.min ()),
1875 					 wi::to_wide (vr0.max ()),
1876 					 wi::to_wide (vr1.min ()),
1877 					 wi::to_wide (vr1.max ()),
1878 					 TYPE_OVERFLOW_UNDEFINED (expr_type)))
1879 		{
1880 		  min = wide_int_to_tree (expr_type, res_lb);
1881 		  max = wide_int_to_tree (expr_type, res_ub);
1882 		  vr->set_and_canonicalize (VR_RANGE, min, max);
1883 		  return;
1884 		}
1885 	    }
1886 	}
1887       vr->set_varying ();
1888       return;
1889     }
1890   else if (code == TRUNC_DIV_EXPR
1891 	   || code == FLOOR_DIV_EXPR
1892 	   || code == CEIL_DIV_EXPR
1893 	   || code == EXACT_DIV_EXPR
1894 	   || code == ROUND_DIV_EXPR)
1895     {
1896       wide_int dividend_min, dividend_max, divisor_min, divisor_max;
1897       wide_int wmin, wmax, extra_min, extra_max;
1898       bool extra_range_p;
1899 
1900       /* Special case explicit division by zero as undefined.  */
1901       if (range_is_null (&vr1))
1902 	{
1903 	  vr->set_undefined ();
1904 	  return;
1905 	}
1906 
1907       /* First, normalize ranges into constants we can handle.  Note
1908 	 that VR_ANTI_RANGE's of constants were already normalized
1909 	 before arriving here.
1910 
1911 	 NOTE: As a future improvement, we may be able to do better
1912 	 with mixed symbolic (anti-)ranges like [0, A].  See note in
1913 	 ranges_from_anti_range.  */
1914       extract_range_into_wide_ints (&vr0, sign, prec,
1915 				    dividend_min, dividend_max);
1916       extract_range_into_wide_ints (&vr1, sign, prec,
1917 				    divisor_min, divisor_max);
1918       if (!wide_int_range_div (wmin, wmax, code, sign, prec,
1919 			       dividend_min, dividend_max,
1920 			       divisor_min, divisor_max,
1921 			       TYPE_OVERFLOW_UNDEFINED (expr_type),
1922 			       extra_range_p, extra_min, extra_max))
1923 	{
1924 	  vr->set_varying ();
1925 	  return;
1926 	}
1927       vr->set (VR_RANGE, wide_int_to_tree (expr_type, wmin),
1928 	       wide_int_to_tree (expr_type, wmax));
1929       if (extra_range_p)
1930 	{
1931 	  value_range_base
1932 	    extra_range (VR_RANGE, wide_int_to_tree (expr_type, extra_min),
1933 			 wide_int_to_tree (expr_type, extra_max));
1934 	  vr->union_ (&extra_range);
1935 	}
1936       return;
1937     }
1938   else if (code == TRUNC_MOD_EXPR)
1939     {
1940       if (range_is_null (&vr1))
1941 	{
1942 	  vr->set_undefined ();
1943 	  return;
1944 	}
1945       wide_int wmin, wmax, tmp;
1946       wide_int vr0_min, vr0_max, vr1_min, vr1_max;
1947       extract_range_into_wide_ints (&vr0, sign, prec, vr0_min, vr0_max);
1948       extract_range_into_wide_ints (&vr1, sign, prec, vr1_min, vr1_max);
1949       wide_int_range_trunc_mod (wmin, wmax, sign, prec,
1950 				vr0_min, vr0_max, vr1_min, vr1_max);
1951       min = wide_int_to_tree (expr_type, wmin);
1952       max = wide_int_to_tree (expr_type, wmax);
1953       vr->set (VR_RANGE, min, max);
1954       return;
1955     }
1956   else if (code == BIT_AND_EXPR || code == BIT_IOR_EXPR || code == BIT_XOR_EXPR)
1957     {
1958       wide_int may_be_nonzero0, may_be_nonzero1;
1959       wide_int must_be_nonzero0, must_be_nonzero1;
1960       wide_int wmin, wmax;
1961       wide_int vr0_min, vr0_max, vr1_min, vr1_max;
1962       vrp_set_zero_nonzero_bits (expr_type, &vr0,
1963 				 &may_be_nonzero0, &must_be_nonzero0);
1964       vrp_set_zero_nonzero_bits (expr_type, &vr1,
1965 				 &may_be_nonzero1, &must_be_nonzero1);
1966       extract_range_into_wide_ints (&vr0, sign, prec, vr0_min, vr0_max);
1967       extract_range_into_wide_ints (&vr1, sign, prec, vr1_min, vr1_max);
1968       if (code == BIT_AND_EXPR)
1969 	{
1970 	  if (wide_int_range_bit_and (wmin, wmax, sign, prec,
1971 				      vr0_min, vr0_max,
1972 				      vr1_min, vr1_max,
1973 				      must_be_nonzero0,
1974 				      may_be_nonzero0,
1975 				      must_be_nonzero1,
1976 				      may_be_nonzero1))
1977 	    {
1978 	      min = wide_int_to_tree (expr_type, wmin);
1979 	      max = wide_int_to_tree (expr_type, wmax);
1980 	      vr->set (VR_RANGE, min, max);
1981 	    }
1982 	  else
1983 	    vr->set_varying ();
1984 	  return;
1985 	}
1986       else if (code == BIT_IOR_EXPR)
1987 	{
1988 	  if (wide_int_range_bit_ior (wmin, wmax, sign,
1989 				      vr0_min, vr0_max,
1990 				      vr1_min, vr1_max,
1991 				      must_be_nonzero0,
1992 				      may_be_nonzero0,
1993 				      must_be_nonzero1,
1994 				      may_be_nonzero1))
1995 	    {
1996 	      min = wide_int_to_tree (expr_type, wmin);
1997 	      max = wide_int_to_tree (expr_type, wmax);
1998 	      vr->set (VR_RANGE, min, max);
1999 	    }
2000 	  else
2001 	    vr->set_varying ();
2002 	  return;
2003 	}
2004       else if (code == BIT_XOR_EXPR)
2005 	{
2006 	  if (wide_int_range_bit_xor (wmin, wmax, sign, prec,
2007 				      must_be_nonzero0,
2008 				      may_be_nonzero0,
2009 				      must_be_nonzero1,
2010 				      may_be_nonzero1))
2011 	    {
2012 	      min = wide_int_to_tree (expr_type, wmin);
2013 	      max = wide_int_to_tree (expr_type, wmax);
2014 	      vr->set (VR_RANGE, min, max);
2015 	    }
2016 	  else
2017 	    vr->set_varying ();
2018 	  return;
2019 	}
2020     }
2021   else
2022     gcc_unreachable ();
2023 
2024   /* If either MIN or MAX overflowed, then set the resulting range to
2025      VARYING.  */
2026   if (min == NULL_TREE
2027       || TREE_OVERFLOW_P (min)
2028       || max == NULL_TREE
2029       || TREE_OVERFLOW_P (max))
2030     {
2031       vr->set_varying ();
2032       return;
2033     }
2034 
2035   /* We punt for [-INF, +INF].
2036      We learn nothing when we have INF on both sides.
2037      Note that we do accept [-INF, -INF] and [+INF, +INF].  */
2038   if (vrp_val_is_min (min) && vrp_val_is_max (max))
2039     {
2040       vr->set_varying ();
2041       return;
2042     }
2043 
2044   cmp = compare_values (min, max);
2045   if (cmp == -2 || cmp == 1)
2046     {
2047       /* If the new range has its limits swapped around (MIN > MAX),
2048 	 then the operation caused one of them to wrap around, mark
2049 	 the new range VARYING.  */
2050       vr->set_varying ();
2051     }
2052   else
2053     vr->set (type, min, max);
2054 }
2055 
2056 /* Extract range information from a unary operation CODE based on
2057    the range of its operand *VR0 with type OP0_TYPE with resulting type TYPE.
2058    The resulting range is stored in *VR.  */
2059 
2060 void
extract_range_from_unary_expr(value_range_base * vr,enum tree_code code,tree type,const value_range_base * vr0_,tree op0_type)2061 extract_range_from_unary_expr (value_range_base *vr,
2062 			       enum tree_code code, tree type,
2063 			       const value_range_base *vr0_, tree op0_type)
2064 {
2065   signop sign = TYPE_SIGN (type);
2066   unsigned int prec = TYPE_PRECISION (type);
2067   value_range_base vr0 = *vr0_;
2068   value_range_base vrtem0, vrtem1;
2069 
2070   /* VRP only operates on integral and pointer types.  */
2071   if (!(INTEGRAL_TYPE_P (op0_type)
2072 	|| POINTER_TYPE_P (op0_type))
2073       || !(INTEGRAL_TYPE_P (type)
2074 	   || POINTER_TYPE_P (type)))
2075     {
2076       vr->set_varying ();
2077       return;
2078     }
2079 
2080   /* If VR0 is UNDEFINED, so is the result.  */
2081   if (vr0.undefined_p ())
2082     {
2083       vr->set_undefined ();
2084       return;
2085     }
2086 
2087   /* Handle operations that we express in terms of others.  */
2088   if (code == PAREN_EXPR)
2089     {
2090       /* PAREN_EXPR and OBJ_TYPE_REF are simple copies.  */
2091       *vr = vr0;
2092       return;
2093     }
2094   else if (code == NEGATE_EXPR)
2095     {
2096       /* -X is simply 0 - X, so re-use existing code that also handles
2097          anti-ranges fine.  */
2098       value_range_base zero;
2099       zero.set (build_int_cst (type, 0));
2100       extract_range_from_binary_expr (vr, MINUS_EXPR, type, &zero, &vr0);
2101       return;
2102     }
2103   else if (code == BIT_NOT_EXPR)
2104     {
2105       /* ~X is simply -1 - X, so re-use existing code that also handles
2106          anti-ranges fine.  */
2107       value_range_base minusone;
2108       minusone.set (build_int_cst (type, -1));
2109       extract_range_from_binary_expr (vr, MINUS_EXPR, type, &minusone, &vr0);
2110       return;
2111     }
2112 
2113   /* Now canonicalize anti-ranges to ranges when they are not symbolic
2114      and express op ~[]  as (op []') U (op []'').  */
2115   if (vr0.kind () == VR_ANTI_RANGE
2116       && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1))
2117     {
2118       extract_range_from_unary_expr (vr, code, type, &vrtem0, op0_type);
2119       if (!vrtem1.undefined_p ())
2120 	{
2121 	  value_range_base vrres;
2122 	  extract_range_from_unary_expr (&vrres, code, type,
2123 					 &vrtem1, op0_type);
2124 	  vr->union_ (&vrres);
2125 	}
2126       return;
2127     }
2128 
2129   if (CONVERT_EXPR_CODE_P (code))
2130     {
2131       tree inner_type = op0_type;
2132       tree outer_type = type;
2133 
2134       /* If the expression involves a pointer, we are only interested in
2135 	 determining if it evaluates to NULL [0, 0] or non-NULL (~[0, 0]).
2136 
2137 	 This may lose precision when converting (char *)~[0,2] to
2138 	 int, because we'll forget that the pointer can also not be 1
2139 	 or 2.  In practice we don't care, as this is some idiot
2140 	 storing a magic constant to a pointer.  */
2141       if (POINTER_TYPE_P (type) || POINTER_TYPE_P (op0_type))
2142 	{
2143 	  if (!range_includes_zero_p (&vr0))
2144 	    vr->set_nonnull (type);
2145 	  else if (range_is_null (&vr0))
2146 	    vr->set_null (type);
2147 	  else
2148 	    vr->set_varying ();
2149 	  return;
2150 	}
2151 
2152       /* The POINTER_TYPE_P code above will have dealt with all
2153 	 pointer anti-ranges.  Any remaining anti-ranges at this point
2154 	 will be integer conversions from SSA names that will be
2155 	 normalized into VARYING.  For instance: ~[x_55, x_55].  */
2156       gcc_assert (vr0.kind () != VR_ANTI_RANGE
2157 		  || TREE_CODE (vr0.min ()) != INTEGER_CST);
2158 
2159       /* NOTES: Previously we were returning VARYING for all symbolics, but
2160 	 we can do better by treating them as [-MIN, +MAX].  For
2161 	 example, converting [SYM, SYM] from INT to LONG UNSIGNED,
2162 	 we can return: ~[0x8000000, 0xffffffff7fffffff].
2163 
2164 	 We were also failing to convert ~[0,0] from char* to unsigned,
2165 	 instead choosing to return VR_VARYING.  Now we return ~[0,0].  */
2166       wide_int vr0_min, vr0_max, wmin, wmax;
2167       signop inner_sign = TYPE_SIGN (inner_type);
2168       signop outer_sign = TYPE_SIGN (outer_type);
2169       unsigned inner_prec = TYPE_PRECISION (inner_type);
2170       unsigned outer_prec = TYPE_PRECISION (outer_type);
2171       extract_range_into_wide_ints (&vr0, inner_sign, inner_prec,
2172 				    vr0_min, vr0_max);
2173       if (wide_int_range_convert (wmin, wmax,
2174 				  inner_sign, inner_prec,
2175 				  outer_sign, outer_prec,
2176 				  vr0_min, vr0_max))
2177 	{
2178 	  tree min = wide_int_to_tree (outer_type, wmin);
2179 	  tree max = wide_int_to_tree (outer_type, wmax);
2180 	  vr->set_and_canonicalize (VR_RANGE, min, max);
2181 	}
2182       else
2183 	vr->set_varying ();
2184       return;
2185     }
2186   else if (code == ABS_EXPR)
2187     {
2188       wide_int wmin, wmax;
2189       wide_int vr0_min, vr0_max;
2190       extract_range_into_wide_ints (&vr0, sign, prec, vr0_min, vr0_max);
2191       if (wide_int_range_abs (wmin, wmax, sign, prec, vr0_min, vr0_max,
2192 			      TYPE_OVERFLOW_UNDEFINED (type)))
2193 	vr->set (VR_RANGE, wide_int_to_tree (type, wmin),
2194 		 wide_int_to_tree (type, wmax));
2195       else
2196 	vr->set_varying ();
2197       return;
2198     }
2199   else if (code == ABSU_EXPR)
2200     {
2201       wide_int wmin, wmax;
2202       wide_int vr0_min, vr0_max;
2203       extract_range_into_wide_ints (&vr0, SIGNED, prec, vr0_min, vr0_max);
2204       wide_int_range_absu (wmin, wmax, prec, vr0_min, vr0_max);
2205       vr->set (VR_RANGE, wide_int_to_tree (type, wmin),
2206 	       wide_int_to_tree (type, wmax));
2207       return;
2208     }
2209 
2210   /* For unhandled operations fall back to varying.  */
2211   vr->set_varying ();
2212   return;
2213 }
2214 
2215 /* Given a COND_EXPR COND of the form 'V OP W', and an SSA name V,
2216    create a new SSA name N and return the assertion assignment
2217    'N = ASSERT_EXPR <V, V OP W>'.  */
2218 
2219 static gimple *
build_assert_expr_for(tree cond,tree v)2220 build_assert_expr_for (tree cond, tree v)
2221 {
2222   tree a;
2223   gassign *assertion;
2224 
2225   gcc_assert (TREE_CODE (v) == SSA_NAME
2226 	      && COMPARISON_CLASS_P (cond));
2227 
2228   a = build2 (ASSERT_EXPR, TREE_TYPE (v), v, cond);
2229   assertion = gimple_build_assign (NULL_TREE, a);
2230 
2231   /* The new ASSERT_EXPR, creates a new SSA name that replaces the
2232      operand of the ASSERT_EXPR.  Create it so the new name and the old one
2233      are registered in the replacement table so that we can fix the SSA web
2234      after adding all the ASSERT_EXPRs.  */
2235   tree new_def = create_new_def_for (v, assertion, NULL);
2236   /* Make sure we preserve abnormalness throughout an ASSERT_EXPR chain
2237      given we have to be able to fully propagate those out to re-create
2238      valid SSA when removing the asserts.  */
2239   if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (v))
2240     SSA_NAME_OCCURS_IN_ABNORMAL_PHI (new_def) = 1;
2241 
2242   return assertion;
2243 }
2244 
2245 
2246 /* Return false if EXPR is a predicate expression involving floating
2247    point values.  */
2248 
2249 static inline bool
fp_predicate(gimple * stmt)2250 fp_predicate (gimple *stmt)
2251 {
2252   GIMPLE_CHECK (stmt, GIMPLE_COND);
2253 
2254   return FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt)));
2255 }
2256 
2257 /* If the range of values taken by OP can be inferred after STMT executes,
2258    return the comparison code (COMP_CODE_P) and value (VAL_P) that
2259    describes the inferred range.  Return true if a range could be
2260    inferred.  */
2261 
2262 bool
infer_value_range(gimple * stmt,tree op,tree_code * comp_code_p,tree * val_p)2263 infer_value_range (gimple *stmt, tree op, tree_code *comp_code_p, tree *val_p)
2264 {
2265   *val_p = NULL_TREE;
2266   *comp_code_p = ERROR_MARK;
2267 
2268   /* Do not attempt to infer anything in names that flow through
2269      abnormal edges.  */
2270   if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op))
2271     return false;
2272 
2273   /* If STMT is the last statement of a basic block with no normal
2274      successors, there is no point inferring anything about any of its
2275      operands.  We would not be able to find a proper insertion point
2276      for the assertion, anyway.  */
2277   if (stmt_ends_bb_p (stmt))
2278     {
2279       edge_iterator ei;
2280       edge e;
2281 
2282       FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs)
2283 	if (!(e->flags & (EDGE_ABNORMAL|EDGE_EH)))
2284 	  break;
2285       if (e == NULL)
2286 	return false;
2287     }
2288 
2289   if (infer_nonnull_range (stmt, op))
2290     {
2291       *val_p = build_int_cst (TREE_TYPE (op), 0);
2292       *comp_code_p = NE_EXPR;
2293       return true;
2294     }
2295 
2296   return false;
2297 }
2298 
2299 
2300 void dump_asserts_for (FILE *, tree);
2301 void debug_asserts_for (tree);
2302 void dump_all_asserts (FILE *);
2303 void debug_all_asserts (void);
2304 
2305 /* Dump all the registered assertions for NAME to FILE.  */
2306 
2307 void
dump_asserts_for(FILE * file,tree name)2308 dump_asserts_for (FILE *file, tree name)
2309 {
2310   assert_locus *loc;
2311 
2312   fprintf (file, "Assertions to be inserted for ");
2313   print_generic_expr (file, name);
2314   fprintf (file, "\n");
2315 
2316   loc = asserts_for[SSA_NAME_VERSION (name)];
2317   while (loc)
2318     {
2319       fprintf (file, "\t");
2320       print_gimple_stmt (file, gsi_stmt (loc->si), 0);
2321       fprintf (file, "\n\tBB #%d", loc->bb->index);
2322       if (loc->e)
2323 	{
2324 	  fprintf (file, "\n\tEDGE %d->%d", loc->e->src->index,
2325 	           loc->e->dest->index);
2326 	  dump_edge_info (file, loc->e, dump_flags, 0);
2327 	}
2328       fprintf (file, "\n\tPREDICATE: ");
2329       print_generic_expr (file, loc->expr);
2330       fprintf (file, " %s ", get_tree_code_name (loc->comp_code));
2331       print_generic_expr (file, loc->val);
2332       fprintf (file, "\n\n");
2333       loc = loc->next;
2334     }
2335 
2336   fprintf (file, "\n");
2337 }
2338 
2339 
2340 /* Dump all the registered assertions for NAME to stderr.  */
2341 
2342 DEBUG_FUNCTION void
debug_asserts_for(tree name)2343 debug_asserts_for (tree name)
2344 {
2345   dump_asserts_for (stderr, name);
2346 }
2347 
2348 
2349 /* Dump all the registered assertions for all the names to FILE.  */
2350 
2351 void
dump_all_asserts(FILE * file)2352 dump_all_asserts (FILE *file)
2353 {
2354   unsigned i;
2355   bitmap_iterator bi;
2356 
2357   fprintf (file, "\nASSERT_EXPRs to be inserted\n\n");
2358   EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
2359     dump_asserts_for (file, ssa_name (i));
2360   fprintf (file, "\n");
2361 }
2362 
2363 
2364 /* Dump all the registered assertions for all the names to stderr.  */
2365 
2366 DEBUG_FUNCTION void
debug_all_asserts(void)2367 debug_all_asserts (void)
2368 {
2369   dump_all_asserts (stderr);
2370 }
2371 
2372 /* Push the assert info for NAME, EXPR, COMP_CODE and VAL to ASSERTS.  */
2373 
2374 static void
add_assert_info(vec<assert_info> & asserts,tree name,tree expr,enum tree_code comp_code,tree val)2375 add_assert_info (vec<assert_info> &asserts,
2376 		 tree name, tree expr, enum tree_code comp_code, tree val)
2377 {
2378   assert_info info;
2379   info.comp_code = comp_code;
2380   info.name = name;
2381   if (TREE_OVERFLOW_P (val))
2382     val = drop_tree_overflow (val);
2383   info.val = val;
2384   info.expr = expr;
2385   asserts.safe_push (info);
2386   if (dump_enabled_p ())
2387     dump_printf (MSG_NOTE | MSG_PRIORITY_INTERNALS,
2388 		 "Adding assert for %T from %T %s %T\n",
2389 		 name, expr, op_symbol_code (comp_code), val);
2390 }
2391 
2392 /* If NAME doesn't have an ASSERT_EXPR registered for asserting
2393    'EXPR COMP_CODE VAL' at a location that dominates block BB or
2394    E->DEST, then register this location as a possible insertion point
2395    for ASSERT_EXPR <NAME, EXPR COMP_CODE VAL>.
2396 
2397    BB, E and SI provide the exact insertion point for the new
2398    ASSERT_EXPR.  If BB is NULL, then the ASSERT_EXPR is to be inserted
2399    on edge E.  Otherwise, if E is NULL, the ASSERT_EXPR is inserted on
2400    BB.  If SI points to a COND_EXPR or a SWITCH_EXPR statement, then E
2401    must not be NULL.  */
2402 
2403 static void
register_new_assert_for(tree name,tree expr,enum tree_code comp_code,tree val,basic_block bb,edge e,gimple_stmt_iterator si)2404 register_new_assert_for (tree name, tree expr,
2405 			 enum tree_code comp_code,
2406 			 tree val,
2407 			 basic_block bb,
2408 			 edge e,
2409 			 gimple_stmt_iterator si)
2410 {
2411   assert_locus *n, *loc, *last_loc;
2412   basic_block dest_bb;
2413 
2414   gcc_checking_assert (bb == NULL || e == NULL);
2415 
2416   if (e == NULL)
2417     gcc_checking_assert (gimple_code (gsi_stmt (si)) != GIMPLE_COND
2418 			 && gimple_code (gsi_stmt (si)) != GIMPLE_SWITCH);
2419 
2420   /* Never build an assert comparing against an integer constant with
2421      TREE_OVERFLOW set.  This confuses our undefined overflow warning
2422      machinery.  */
2423   if (TREE_OVERFLOW_P (val))
2424     val = drop_tree_overflow (val);
2425 
2426   /* The new assertion A will be inserted at BB or E.  We need to
2427      determine if the new location is dominated by a previously
2428      registered location for A.  If we are doing an edge insertion,
2429      assume that A will be inserted at E->DEST.  Note that this is not
2430      necessarily true.
2431 
2432      If E is a critical edge, it will be split.  But even if E is
2433      split, the new block will dominate the same set of blocks that
2434      E->DEST dominates.
2435 
2436      The reverse, however, is not true, blocks dominated by E->DEST
2437      will not be dominated by the new block created to split E.  So,
2438      if the insertion location is on a critical edge, we will not use
2439      the new location to move another assertion previously registered
2440      at a block dominated by E->DEST.  */
2441   dest_bb = (bb) ? bb : e->dest;
2442 
2443   /* If NAME already has an ASSERT_EXPR registered for COMP_CODE and
2444      VAL at a block dominating DEST_BB, then we don't need to insert a new
2445      one.  Similarly, if the same assertion already exists at a block
2446      dominated by DEST_BB and the new location is not on a critical
2447      edge, then update the existing location for the assertion (i.e.,
2448      move the assertion up in the dominance tree).
2449 
2450      Note, this is implemented as a simple linked list because there
2451      should not be more than a handful of assertions registered per
2452      name.  If this becomes a performance problem, a table hashed by
2453      COMP_CODE and VAL could be implemented.  */
2454   loc = asserts_for[SSA_NAME_VERSION (name)];
2455   last_loc = loc;
2456   while (loc)
2457     {
2458       if (loc->comp_code == comp_code
2459 	  && (loc->val == val
2460 	      || operand_equal_p (loc->val, val, 0))
2461 	  && (loc->expr == expr
2462 	      || operand_equal_p (loc->expr, expr, 0)))
2463 	{
2464 	  /* If E is not a critical edge and DEST_BB
2465 	     dominates the existing location for the assertion, move
2466 	     the assertion up in the dominance tree by updating its
2467 	     location information.  */
2468 	  if ((e == NULL || !EDGE_CRITICAL_P (e))
2469 	      && dominated_by_p (CDI_DOMINATORS, loc->bb, dest_bb))
2470 	    {
2471 	      loc->bb = dest_bb;
2472 	      loc->e = e;
2473 	      loc->si = si;
2474 	      return;
2475 	    }
2476 	}
2477 
2478       /* Update the last node of the list and move to the next one.  */
2479       last_loc = loc;
2480       loc = loc->next;
2481     }
2482 
2483   /* If we didn't find an assertion already registered for
2484      NAME COMP_CODE VAL, add a new one at the end of the list of
2485      assertions associated with NAME.  */
2486   n = XNEW (struct assert_locus);
2487   n->bb = dest_bb;
2488   n->e = e;
2489   n->si = si;
2490   n->comp_code = comp_code;
2491   n->val = val;
2492   n->expr = expr;
2493   n->next = NULL;
2494 
2495   if (last_loc)
2496     last_loc->next = n;
2497   else
2498     asserts_for[SSA_NAME_VERSION (name)] = n;
2499 
2500   bitmap_set_bit (need_assert_for, SSA_NAME_VERSION (name));
2501 }
2502 
2503 /* (COND_OP0 COND_CODE COND_OP1) is a predicate which uses NAME.
2504    Extract a suitable test code and value and store them into *CODE_P and
2505    *VAL_P so the predicate is normalized to NAME *CODE_P *VAL_P.
2506 
2507    If no extraction was possible, return FALSE, otherwise return TRUE.
2508 
2509    If INVERT is true, then we invert the result stored into *CODE_P.  */
2510 
2511 static bool
extract_code_and_val_from_cond_with_ops(tree name,enum tree_code cond_code,tree cond_op0,tree cond_op1,bool invert,enum tree_code * code_p,tree * val_p)2512 extract_code_and_val_from_cond_with_ops (tree name, enum tree_code cond_code,
2513 					 tree cond_op0, tree cond_op1,
2514 					 bool invert, enum tree_code *code_p,
2515 					 tree *val_p)
2516 {
2517   enum tree_code comp_code;
2518   tree val;
2519 
2520   /* Otherwise, we have a comparison of the form NAME COMP VAL
2521      or VAL COMP NAME.  */
2522   if (name == cond_op1)
2523     {
2524       /* If the predicate is of the form VAL COMP NAME, flip
2525 	 COMP around because we need to register NAME as the
2526 	 first operand in the predicate.  */
2527       comp_code = swap_tree_comparison (cond_code);
2528       val = cond_op0;
2529     }
2530   else if (name == cond_op0)
2531     {
2532       /* The comparison is of the form NAME COMP VAL, so the
2533 	 comparison code remains unchanged.  */
2534       comp_code = cond_code;
2535       val = cond_op1;
2536     }
2537   else
2538     gcc_unreachable ();
2539 
2540   /* Invert the comparison code as necessary.  */
2541   if (invert)
2542     comp_code = invert_tree_comparison (comp_code, 0);
2543 
2544   /* VRP only handles integral and pointer types.  */
2545   if (! INTEGRAL_TYPE_P (TREE_TYPE (val))
2546       && ! POINTER_TYPE_P (TREE_TYPE (val)))
2547     return false;
2548 
2549   /* Do not register always-false predicates.
2550      FIXME:  this works around a limitation in fold() when dealing with
2551      enumerations.  Given 'enum { N1, N2 } x;', fold will not
2552      fold 'if (x > N2)' to 'if (0)'.  */
2553   if ((comp_code == GT_EXPR || comp_code == LT_EXPR)
2554       && INTEGRAL_TYPE_P (TREE_TYPE (val)))
2555     {
2556       tree min = TYPE_MIN_VALUE (TREE_TYPE (val));
2557       tree max = TYPE_MAX_VALUE (TREE_TYPE (val));
2558 
2559       if (comp_code == GT_EXPR
2560 	  && (!max
2561 	      || compare_values (val, max) == 0))
2562 	return false;
2563 
2564       if (comp_code == LT_EXPR
2565 	  && (!min
2566 	      || compare_values (val, min) == 0))
2567 	return false;
2568     }
2569   *code_p = comp_code;
2570   *val_p = val;
2571   return true;
2572 }
2573 
2574 /* Find out smallest RES where RES > VAL && (RES & MASK) == RES, if any
2575    (otherwise return VAL).  VAL and MASK must be zero-extended for
2576    precision PREC.  If SGNBIT is non-zero, first xor VAL with SGNBIT
2577    (to transform signed values into unsigned) and at the end xor
2578    SGNBIT back.  */
2579 
2580 static wide_int
masked_increment(const wide_int & val_in,const wide_int & mask,const wide_int & sgnbit,unsigned int prec)2581 masked_increment (const wide_int &val_in, const wide_int &mask,
2582 		  const wide_int &sgnbit, unsigned int prec)
2583 {
2584   wide_int bit = wi::one (prec), res;
2585   unsigned int i;
2586 
2587   wide_int val = val_in ^ sgnbit;
2588   for (i = 0; i < prec; i++, bit += bit)
2589     {
2590       res = mask;
2591       if ((res & bit) == 0)
2592 	continue;
2593       res = bit - 1;
2594       res = wi::bit_and_not (val + bit, res);
2595       res &= mask;
2596       if (wi::gtu_p (res, val))
2597 	return res ^ sgnbit;
2598     }
2599   return val ^ sgnbit;
2600 }
2601 
2602 /* Helper for overflow_comparison_p
2603 
2604    OP0 CODE OP1 is a comparison.  Examine the comparison and potentially
2605    OP1's defining statement to see if it ultimately has the form
2606    OP0 CODE (OP0 PLUS INTEGER_CST)
2607 
2608    If so, return TRUE indicating this is an overflow test and store into
2609    *NEW_CST an updated constant that can be used in a narrowed range test.
2610 
2611    REVERSED indicates if the comparison was originally:
2612 
2613    OP1 CODE' OP0.
2614 
2615    This affects how we build the updated constant.  */
2616 
2617 static bool
overflow_comparison_p_1(enum tree_code code,tree op0,tree op1,bool follow_assert_exprs,bool reversed,tree * new_cst)2618 overflow_comparison_p_1 (enum tree_code code, tree op0, tree op1,
2619 		         bool follow_assert_exprs, bool reversed, tree *new_cst)
2620 {
2621   /* See if this is a relational operation between two SSA_NAMES with
2622      unsigned, overflow wrapping values.  If so, check it more deeply.  */
2623   if ((code == LT_EXPR || code == LE_EXPR
2624        || code == GE_EXPR || code == GT_EXPR)
2625       && TREE_CODE (op0) == SSA_NAME
2626       && TREE_CODE (op1) == SSA_NAME
2627       && INTEGRAL_TYPE_P (TREE_TYPE (op0))
2628       && TYPE_UNSIGNED (TREE_TYPE (op0))
2629       && TYPE_OVERFLOW_WRAPS (TREE_TYPE (op0)))
2630     {
2631       gimple *op1_def = SSA_NAME_DEF_STMT (op1);
2632 
2633       /* If requested, follow any ASSERT_EXPRs backwards for OP1.  */
2634       if (follow_assert_exprs)
2635 	{
2636 	  while (gimple_assign_single_p (op1_def)
2637 		 && TREE_CODE (gimple_assign_rhs1 (op1_def)) == ASSERT_EXPR)
2638 	    {
2639 	      op1 = TREE_OPERAND (gimple_assign_rhs1 (op1_def), 0);
2640 	      if (TREE_CODE (op1) != SSA_NAME)
2641 		break;
2642 	      op1_def = SSA_NAME_DEF_STMT (op1);
2643 	    }
2644 	}
2645 
2646       /* Now look at the defining statement of OP1 to see if it adds
2647 	 or subtracts a nonzero constant from another operand.  */
2648       if (op1_def
2649 	  && is_gimple_assign (op1_def)
2650 	  && gimple_assign_rhs_code (op1_def) == PLUS_EXPR
2651 	  && TREE_CODE (gimple_assign_rhs2 (op1_def)) == INTEGER_CST
2652 	  && !integer_zerop (gimple_assign_rhs2 (op1_def)))
2653 	{
2654 	  tree target = gimple_assign_rhs1 (op1_def);
2655 
2656 	  /* If requested, follow ASSERT_EXPRs backwards for op0 looking
2657 	     for one where TARGET appears on the RHS.  */
2658 	  if (follow_assert_exprs)
2659 	    {
2660 	      /* Now see if that "other operand" is op0, following the chain
2661 		 of ASSERT_EXPRs if necessary.  */
2662 	      gimple *op0_def = SSA_NAME_DEF_STMT (op0);
2663 	      while (op0 != target
2664 		     && gimple_assign_single_p (op0_def)
2665 		     && TREE_CODE (gimple_assign_rhs1 (op0_def)) == ASSERT_EXPR)
2666 		{
2667 		  op0 = TREE_OPERAND (gimple_assign_rhs1 (op0_def), 0);
2668 		  if (TREE_CODE (op0) != SSA_NAME)
2669 		    break;
2670 		  op0_def = SSA_NAME_DEF_STMT (op0);
2671 		}
2672 	    }
2673 
2674 	  /* If we did not find our target SSA_NAME, then this is not
2675 	     an overflow test.  */
2676 	  if (op0 != target)
2677 	    return false;
2678 
2679 	  tree type = TREE_TYPE (op0);
2680 	  wide_int max = wi::max_value (TYPE_PRECISION (type), UNSIGNED);
2681 	  tree inc = gimple_assign_rhs2 (op1_def);
2682 	  if (reversed)
2683 	    *new_cst = wide_int_to_tree (type, max + wi::to_wide (inc));
2684 	  else
2685 	    *new_cst = wide_int_to_tree (type, max - wi::to_wide (inc));
2686 	  return true;
2687 	}
2688     }
2689   return false;
2690 }
2691 
2692 /* OP0 CODE OP1 is a comparison.  Examine the comparison and potentially
2693    OP1's defining statement to see if it ultimately has the form
2694    OP0 CODE (OP0 PLUS INTEGER_CST)
2695 
2696    If so, return TRUE indicating this is an overflow test and store into
2697    *NEW_CST an updated constant that can be used in a narrowed range test.
2698 
2699    These statements are left as-is in the IL to facilitate discovery of
2700    {ADD,SUB}_OVERFLOW sequences later in the optimizer pipeline.  But
2701    the alternate range representation is often useful within VRP.  */
2702 
2703 bool
overflow_comparison_p(tree_code code,tree name,tree val,bool use_equiv_p,tree * new_cst)2704 overflow_comparison_p (tree_code code, tree name, tree val,
2705 		       bool use_equiv_p, tree *new_cst)
2706 {
2707   if (overflow_comparison_p_1 (code, name, val, use_equiv_p, false, new_cst))
2708     return true;
2709   return overflow_comparison_p_1 (swap_tree_comparison (code), val, name,
2710 				  use_equiv_p, true, new_cst);
2711 }
2712 
2713 
2714 /* Try to register an edge assertion for SSA name NAME on edge E for
2715    the condition COND contributing to the conditional jump pointed to by BSI.
2716    Invert the condition COND if INVERT is true.  */
2717 
2718 static void
register_edge_assert_for_2(tree name,edge e,enum tree_code cond_code,tree cond_op0,tree cond_op1,bool invert,vec<assert_info> & asserts)2719 register_edge_assert_for_2 (tree name, edge e,
2720 			    enum tree_code cond_code,
2721 			    tree cond_op0, tree cond_op1, bool invert,
2722 			    vec<assert_info> &asserts)
2723 {
2724   tree val;
2725   enum tree_code comp_code;
2726 
2727   if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
2728 						cond_op0,
2729 						cond_op1,
2730 						invert, &comp_code, &val))
2731     return;
2732 
2733   /* Queue the assert.  */
2734   tree x;
2735   if (overflow_comparison_p (comp_code, name, val, false, &x))
2736     {
2737       enum tree_code new_code = ((comp_code == GT_EXPR || comp_code == GE_EXPR)
2738 				 ? GT_EXPR : LE_EXPR);
2739       add_assert_info (asserts, name, name, new_code, x);
2740     }
2741   add_assert_info (asserts, name, name, comp_code, val);
2742 
2743   /* In the case of NAME <= CST and NAME being defined as
2744      NAME = (unsigned) NAME2 + CST2 we can assert NAME2 >= -CST2
2745      and NAME2 <= CST - CST2.  We can do the same for NAME > CST.
2746      This catches range and anti-range tests.  */
2747   if ((comp_code == LE_EXPR
2748        || comp_code == GT_EXPR)
2749       && TREE_CODE (val) == INTEGER_CST
2750       && TYPE_UNSIGNED (TREE_TYPE (val)))
2751     {
2752       gimple *def_stmt = SSA_NAME_DEF_STMT (name);
2753       tree cst2 = NULL_TREE, name2 = NULL_TREE, name3 = NULL_TREE;
2754 
2755       /* Extract CST2 from the (optional) addition.  */
2756       if (is_gimple_assign (def_stmt)
2757 	  && gimple_assign_rhs_code (def_stmt) == PLUS_EXPR)
2758 	{
2759 	  name2 = gimple_assign_rhs1 (def_stmt);
2760 	  cst2 = gimple_assign_rhs2 (def_stmt);
2761 	  if (TREE_CODE (name2) == SSA_NAME
2762 	      && TREE_CODE (cst2) == INTEGER_CST)
2763 	    def_stmt = SSA_NAME_DEF_STMT (name2);
2764 	}
2765 
2766       /* Extract NAME2 from the (optional) sign-changing cast.  */
2767       if (gimple_assign_cast_p (def_stmt))
2768 	{
2769 	  if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt))
2770 	      && ! TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
2771 	      && (TYPE_PRECISION (gimple_expr_type (def_stmt))
2772 		  == TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))))
2773 	    name3 = gimple_assign_rhs1 (def_stmt);
2774 	}
2775 
2776       /* If name3 is used later, create an ASSERT_EXPR for it.  */
2777       if (name3 != NULL_TREE
2778       	  && TREE_CODE (name3) == SSA_NAME
2779 	  && (cst2 == NULL_TREE
2780 	      || TREE_CODE (cst2) == INTEGER_CST)
2781 	  && INTEGRAL_TYPE_P (TREE_TYPE (name3)))
2782 	{
2783 	  tree tmp;
2784 
2785 	  /* Build an expression for the range test.  */
2786 	  tmp = build1 (NOP_EXPR, TREE_TYPE (name), name3);
2787 	  if (cst2 != NULL_TREE)
2788 	    tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
2789 	  add_assert_info (asserts, name3, tmp, comp_code, val);
2790 	}
2791 
2792       /* If name2 is used later, create an ASSERT_EXPR for it.  */
2793       if (name2 != NULL_TREE
2794       	  && TREE_CODE (name2) == SSA_NAME
2795 	  && TREE_CODE (cst2) == INTEGER_CST
2796 	  && INTEGRAL_TYPE_P (TREE_TYPE (name2)))
2797 	{
2798 	  tree tmp;
2799 
2800 	  /* Build an expression for the range test.  */
2801 	  tmp = name2;
2802 	  if (TREE_TYPE (name) != TREE_TYPE (name2))
2803 	    tmp = build1 (NOP_EXPR, TREE_TYPE (name), tmp);
2804 	  if (cst2 != NULL_TREE)
2805 	    tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
2806 	  add_assert_info (asserts, name2, tmp, comp_code, val);
2807 	}
2808     }
2809 
2810   /* In the case of post-in/decrement tests like if (i++) ... and uses
2811      of the in/decremented value on the edge the extra name we want to
2812      assert for is not on the def chain of the name compared.  Instead
2813      it is in the set of use stmts.
2814      Similar cases happen for conversions that were simplified through
2815      fold_{sign_changed,widened}_comparison.  */
2816   if ((comp_code == NE_EXPR
2817        || comp_code == EQ_EXPR)
2818       && TREE_CODE (val) == INTEGER_CST)
2819     {
2820       imm_use_iterator ui;
2821       gimple *use_stmt;
2822       FOR_EACH_IMM_USE_STMT (use_stmt, ui, name)
2823 	{
2824 	  if (!is_gimple_assign (use_stmt))
2825 	    continue;
2826 
2827 	  /* Cut off to use-stmts that are dominating the predecessor.  */
2828 	  if (!dominated_by_p (CDI_DOMINATORS, e->src, gimple_bb (use_stmt)))
2829 	    continue;
2830 
2831 	  tree name2 = gimple_assign_lhs (use_stmt);
2832 	  if (TREE_CODE (name2) != SSA_NAME)
2833 	    continue;
2834 
2835 	  enum tree_code code = gimple_assign_rhs_code (use_stmt);
2836 	  tree cst;
2837 	  if (code == PLUS_EXPR
2838 	      || code == MINUS_EXPR)
2839 	    {
2840 	      cst = gimple_assign_rhs2 (use_stmt);
2841 	      if (TREE_CODE (cst) != INTEGER_CST)
2842 		continue;
2843 	      cst = int_const_binop (code, val, cst);
2844 	    }
2845 	  else if (CONVERT_EXPR_CODE_P (code))
2846 	    {
2847 	      /* For truncating conversions we cannot record
2848 		 an inequality.  */
2849 	      if (comp_code == NE_EXPR
2850 		  && (TYPE_PRECISION (TREE_TYPE (name2))
2851 		      < TYPE_PRECISION (TREE_TYPE (name))))
2852 		continue;
2853 	      cst = fold_convert (TREE_TYPE (name2), val);
2854 	    }
2855 	  else
2856 	    continue;
2857 
2858 	  if (TREE_OVERFLOW_P (cst))
2859 	    cst = drop_tree_overflow (cst);
2860 	  add_assert_info (asserts, name2, name2, comp_code, cst);
2861 	}
2862     }
2863 
2864   if (TREE_CODE_CLASS (comp_code) == tcc_comparison
2865       && TREE_CODE (val) == INTEGER_CST)
2866     {
2867       gimple *def_stmt = SSA_NAME_DEF_STMT (name);
2868       tree name2 = NULL_TREE, names[2], cst2 = NULL_TREE;
2869       tree val2 = NULL_TREE;
2870       unsigned int prec = TYPE_PRECISION (TREE_TYPE (val));
2871       wide_int mask = wi::zero (prec);
2872       unsigned int nprec = prec;
2873       enum tree_code rhs_code = ERROR_MARK;
2874 
2875       if (is_gimple_assign (def_stmt))
2876 	rhs_code = gimple_assign_rhs_code (def_stmt);
2877 
2878       /* In the case of NAME != CST1 where NAME = A +- CST2 we can
2879          assert that A != CST1 -+ CST2.  */
2880       if ((comp_code == EQ_EXPR || comp_code == NE_EXPR)
2881 	  && (rhs_code == PLUS_EXPR || rhs_code == MINUS_EXPR))
2882 	{
2883 	  tree op0 = gimple_assign_rhs1 (def_stmt);
2884 	  tree op1 = gimple_assign_rhs2 (def_stmt);
2885 	  if (TREE_CODE (op0) == SSA_NAME
2886 	      && TREE_CODE (op1) == INTEGER_CST)
2887 	    {
2888 	      enum tree_code reverse_op = (rhs_code == PLUS_EXPR
2889 					   ? MINUS_EXPR : PLUS_EXPR);
2890 	      op1 = int_const_binop (reverse_op, val, op1);
2891 	      if (TREE_OVERFLOW (op1))
2892 		op1 = drop_tree_overflow (op1);
2893 	      add_assert_info (asserts, op0, op0, comp_code, op1);
2894 	    }
2895 	}
2896 
2897       /* Add asserts for NAME cmp CST and NAME being defined
2898 	 as NAME = (int) NAME2.  */
2899       if (!TYPE_UNSIGNED (TREE_TYPE (val))
2900 	  && (comp_code == LE_EXPR || comp_code == LT_EXPR
2901 	      || comp_code == GT_EXPR || comp_code == GE_EXPR)
2902 	  && gimple_assign_cast_p (def_stmt))
2903 	{
2904 	  name2 = gimple_assign_rhs1 (def_stmt);
2905 	  if (CONVERT_EXPR_CODE_P (rhs_code)
2906 	      && TREE_CODE (name2) == SSA_NAME
2907 	      && INTEGRAL_TYPE_P (TREE_TYPE (name2))
2908 	      && TYPE_UNSIGNED (TREE_TYPE (name2))
2909 	      && prec == TYPE_PRECISION (TREE_TYPE (name2))
2910 	      && (comp_code == LE_EXPR || comp_code == GT_EXPR
2911 		  || !tree_int_cst_equal (val,
2912 					  TYPE_MIN_VALUE (TREE_TYPE (val)))))
2913 	    {
2914 	      tree tmp, cst;
2915 	      enum tree_code new_comp_code = comp_code;
2916 
2917 	      cst = fold_convert (TREE_TYPE (name2),
2918 				  TYPE_MIN_VALUE (TREE_TYPE (val)));
2919 	      /* Build an expression for the range test.  */
2920 	      tmp = build2 (PLUS_EXPR, TREE_TYPE (name2), name2, cst);
2921 	      cst = fold_build2 (PLUS_EXPR, TREE_TYPE (name2), cst,
2922 				 fold_convert (TREE_TYPE (name2), val));
2923 	      if (comp_code == LT_EXPR || comp_code == GE_EXPR)
2924 		{
2925 		  new_comp_code = comp_code == LT_EXPR ? LE_EXPR : GT_EXPR;
2926 		  cst = fold_build2 (MINUS_EXPR, TREE_TYPE (name2), cst,
2927 				     build_int_cst (TREE_TYPE (name2), 1));
2928 		}
2929 	      add_assert_info (asserts, name2, tmp, new_comp_code, cst);
2930 	    }
2931 	}
2932 
2933       /* Add asserts for NAME cmp CST and NAME being defined as
2934 	 NAME = NAME2 >> CST2.
2935 
2936 	 Extract CST2 from the right shift.  */
2937       if (rhs_code == RSHIFT_EXPR)
2938 	{
2939 	  name2 = gimple_assign_rhs1 (def_stmt);
2940 	  cst2 = gimple_assign_rhs2 (def_stmt);
2941 	  if (TREE_CODE (name2) == SSA_NAME
2942 	      && tree_fits_uhwi_p (cst2)
2943 	      && INTEGRAL_TYPE_P (TREE_TYPE (name2))
2944 	      && IN_RANGE (tree_to_uhwi (cst2), 1, prec - 1)
2945 	      && type_has_mode_precision_p (TREE_TYPE (val)))
2946 	    {
2947 	      mask = wi::mask (tree_to_uhwi (cst2), false, prec);
2948 	      val2 = fold_binary (LSHIFT_EXPR, TREE_TYPE (val), val, cst2);
2949 	    }
2950 	}
2951       if (val2 != NULL_TREE
2952 	  && TREE_CODE (val2) == INTEGER_CST
2953 	  && simple_cst_equal (fold_build2 (RSHIFT_EXPR,
2954 					    TREE_TYPE (val),
2955 					    val2, cst2), val))
2956 	{
2957 	  enum tree_code new_comp_code = comp_code;
2958 	  tree tmp, new_val;
2959 
2960 	  tmp = name2;
2961 	  if (comp_code == EQ_EXPR || comp_code == NE_EXPR)
2962 	    {
2963 	      if (!TYPE_UNSIGNED (TREE_TYPE (val)))
2964 		{
2965 		  tree type = build_nonstandard_integer_type (prec, 1);
2966 		  tmp = build1 (NOP_EXPR, type, name2);
2967 		  val2 = fold_convert (type, val2);
2968 		}
2969 	      tmp = fold_build2 (MINUS_EXPR, TREE_TYPE (tmp), tmp, val2);
2970 	      new_val = wide_int_to_tree (TREE_TYPE (tmp), mask);
2971 	      new_comp_code = comp_code == EQ_EXPR ? LE_EXPR : GT_EXPR;
2972 	    }
2973 	  else if (comp_code == LT_EXPR || comp_code == GE_EXPR)
2974 	    {
2975 	      wide_int minval
2976 		= wi::min_value (prec, TYPE_SIGN (TREE_TYPE (val)));
2977 	      new_val = val2;
2978 	      if (minval == wi::to_wide (new_val))
2979 		new_val = NULL_TREE;
2980 	    }
2981 	  else
2982 	    {
2983 	      wide_int maxval
2984 		= wi::max_value (prec, TYPE_SIGN (TREE_TYPE (val)));
2985 	      mask |= wi::to_wide (val2);
2986 	      if (wi::eq_p (mask, maxval))
2987 		new_val = NULL_TREE;
2988 	      else
2989 		new_val = wide_int_to_tree (TREE_TYPE (val2), mask);
2990 	    }
2991 
2992 	  if (new_val)
2993 	    add_assert_info (asserts, name2, tmp, new_comp_code, new_val);
2994 	}
2995 
2996       /* If we have a conversion that doesn't change the value of the source
2997          simply register the same assert for it.  */
2998       if (CONVERT_EXPR_CODE_P (rhs_code))
2999 	{
3000 	  wide_int rmin, rmax;
3001 	  tree rhs1 = gimple_assign_rhs1 (def_stmt);
3002 	  if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
3003 	      && TREE_CODE (rhs1) == SSA_NAME
3004 	      /* Make sure the relation preserves the upper/lower boundary of
3005 	         the range conservatively.  */
3006 	      && (comp_code == NE_EXPR
3007 		  || comp_code == EQ_EXPR
3008 		  || (TYPE_SIGN (TREE_TYPE (name))
3009 		      == TYPE_SIGN (TREE_TYPE (rhs1)))
3010 		  || ((comp_code == LE_EXPR
3011 		       || comp_code == LT_EXPR)
3012 		      && !TYPE_UNSIGNED (TREE_TYPE (rhs1)))
3013 		  || ((comp_code == GE_EXPR
3014 		       || comp_code == GT_EXPR)
3015 		      && TYPE_UNSIGNED (TREE_TYPE (rhs1))))
3016 	      /* And the conversion does not alter the value we compare
3017 	         against and all values in rhs1 can be represented in
3018 		 the converted to type.  */
3019 	      && int_fits_type_p (val, TREE_TYPE (rhs1))
3020 	      && ((TYPE_PRECISION (TREE_TYPE (name))
3021 		   > TYPE_PRECISION (TREE_TYPE (rhs1)))
3022 		  || (get_range_info (rhs1, &rmin, &rmax) == VR_RANGE
3023 		      && wi::fits_to_tree_p (rmin, TREE_TYPE (name))
3024 		      && wi::fits_to_tree_p (rmax, TREE_TYPE (name)))))
3025 	    add_assert_info (asserts, rhs1, rhs1,
3026 		 	     comp_code, fold_convert (TREE_TYPE (rhs1), val));
3027 	}
3028 
3029       /* Add asserts for NAME cmp CST and NAME being defined as
3030 	 NAME = NAME2 & CST2.
3031 
3032 	 Extract CST2 from the and.
3033 
3034 	 Also handle
3035 	 NAME = (unsigned) NAME2;
3036 	 casts where NAME's type is unsigned and has smaller precision
3037 	 than NAME2's type as if it was NAME = NAME2 & MASK.  */
3038       names[0] = NULL_TREE;
3039       names[1] = NULL_TREE;
3040       cst2 = NULL_TREE;
3041       if (rhs_code == BIT_AND_EXPR
3042 	  || (CONVERT_EXPR_CODE_P (rhs_code)
3043 	      && INTEGRAL_TYPE_P (TREE_TYPE (val))
3044 	      && TYPE_UNSIGNED (TREE_TYPE (val))
3045 	      && TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
3046 		 > prec))
3047 	{
3048 	  name2 = gimple_assign_rhs1 (def_stmt);
3049 	  if (rhs_code == BIT_AND_EXPR)
3050 	    cst2 = gimple_assign_rhs2 (def_stmt);
3051 	  else
3052 	    {
3053 	      cst2 = TYPE_MAX_VALUE (TREE_TYPE (val));
3054 	      nprec = TYPE_PRECISION (TREE_TYPE (name2));
3055 	    }
3056 	  if (TREE_CODE (name2) == SSA_NAME
3057 	      && INTEGRAL_TYPE_P (TREE_TYPE (name2))
3058 	      && TREE_CODE (cst2) == INTEGER_CST
3059 	      && !integer_zerop (cst2)
3060 	      && (nprec > 1
3061 		  || TYPE_UNSIGNED (TREE_TYPE (val))))
3062 	    {
3063 	      gimple *def_stmt2 = SSA_NAME_DEF_STMT (name2);
3064 	      if (gimple_assign_cast_p (def_stmt2))
3065 		{
3066 		  names[1] = gimple_assign_rhs1 (def_stmt2);
3067 		  if (!CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt2))
3068 		      || TREE_CODE (names[1]) != SSA_NAME
3069 		      || !INTEGRAL_TYPE_P (TREE_TYPE (names[1]))
3070 		      || (TYPE_PRECISION (TREE_TYPE (name2))
3071 			  != TYPE_PRECISION (TREE_TYPE (names[1]))))
3072 		    names[1] = NULL_TREE;
3073 		}
3074 	      names[0] = name2;
3075 	    }
3076 	}
3077       if (names[0] || names[1])
3078 	{
3079 	  wide_int minv, maxv, valv, cst2v;
3080 	  wide_int tem, sgnbit;
3081 	  bool valid_p = false, valn, cst2n;
3082 	  enum tree_code ccode = comp_code;
3083 
3084 	  valv = wide_int::from (wi::to_wide (val), nprec, UNSIGNED);
3085 	  cst2v = wide_int::from (wi::to_wide (cst2), nprec, UNSIGNED);
3086 	  valn = wi::neg_p (valv, TYPE_SIGN (TREE_TYPE (val)));
3087 	  cst2n = wi::neg_p (cst2v, TYPE_SIGN (TREE_TYPE (val)));
3088 	  /* If CST2 doesn't have most significant bit set,
3089 	     but VAL is negative, we have comparison like
3090 	     if ((x & 0x123) > -4) (always true).  Just give up.  */
3091 	  if (!cst2n && valn)
3092 	    ccode = ERROR_MARK;
3093 	  if (cst2n)
3094 	    sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
3095 	  else
3096 	    sgnbit = wi::zero (nprec);
3097 	  minv = valv & cst2v;
3098 	  switch (ccode)
3099 	    {
3100 	    case EQ_EXPR:
3101 	      /* Minimum unsigned value for equality is VAL & CST2
3102 		 (should be equal to VAL, otherwise we probably should
3103 		 have folded the comparison into false) and
3104 		 maximum unsigned value is VAL | ~CST2.  */
3105 	      maxv = valv | ~cst2v;
3106 	      valid_p = true;
3107 	      break;
3108 
3109 	    case NE_EXPR:
3110 	      tem = valv | ~cst2v;
3111 	      /* If VAL is 0, handle (X & CST2) != 0 as (X & CST2) > 0U.  */
3112 	      if (valv == 0)
3113 		{
3114 		  cst2n = false;
3115 		  sgnbit = wi::zero (nprec);
3116 		  goto gt_expr;
3117 		}
3118 	      /* If (VAL | ~CST2) is all ones, handle it as
3119 		 (X & CST2) < VAL.  */
3120 	      if (tem == -1)
3121 		{
3122 		  cst2n = false;
3123 		  valn = false;
3124 		  sgnbit = wi::zero (nprec);
3125 		  goto lt_expr;
3126 		}
3127 	      if (!cst2n && wi::neg_p (cst2v))
3128 		sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
3129 	      if (sgnbit != 0)
3130 		{
3131 		  if (valv == sgnbit)
3132 		    {
3133 		      cst2n = true;
3134 		      valn = true;
3135 		      goto gt_expr;
3136 		    }
3137 		  if (tem == wi::mask (nprec - 1, false, nprec))
3138 		    {
3139 		      cst2n = true;
3140 		      goto lt_expr;
3141 		    }
3142 		  if (!cst2n)
3143 		    sgnbit = wi::zero (nprec);
3144 		}
3145 	      break;
3146 
3147 	    case GE_EXPR:
3148 	      /* Minimum unsigned value for >= if (VAL & CST2) == VAL
3149 		 is VAL and maximum unsigned value is ~0.  For signed
3150 		 comparison, if CST2 doesn't have most significant bit
3151 		 set, handle it similarly.  If CST2 has MSB set,
3152 		 the minimum is the same, and maximum is ~0U/2.  */
3153 	      if (minv != valv)
3154 		{
3155 		  /* If (VAL & CST2) != VAL, X & CST2 can't be equal to
3156 		     VAL.  */
3157 		  minv = masked_increment (valv, cst2v, sgnbit, nprec);
3158 		  if (minv == valv)
3159 		    break;
3160 		}
3161 	      maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
3162 	      valid_p = true;
3163 	      break;
3164 
3165 	    case GT_EXPR:
3166 	    gt_expr:
3167 	      /* Find out smallest MINV where MINV > VAL
3168 		 && (MINV & CST2) == MINV, if any.  If VAL is signed and
3169 		 CST2 has MSB set, compute it biased by 1 << (nprec - 1).  */
3170 	      minv = masked_increment (valv, cst2v, sgnbit, nprec);
3171 	      if (minv == valv)
3172 		break;
3173 	      maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
3174 	      valid_p = true;
3175 	      break;
3176 
3177 	    case LE_EXPR:
3178 	      /* Minimum unsigned value for <= is 0 and maximum
3179 		 unsigned value is VAL | ~CST2 if (VAL & CST2) == VAL.
3180 		 Otherwise, find smallest VAL2 where VAL2 > VAL
3181 		 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
3182 		 as maximum.
3183 		 For signed comparison, if CST2 doesn't have most
3184 		 significant bit set, handle it similarly.  If CST2 has
3185 		 MSB set, the maximum is the same and minimum is INT_MIN.  */
3186 	      if (minv == valv)
3187 		maxv = valv;
3188 	      else
3189 		{
3190 		  maxv = masked_increment (valv, cst2v, sgnbit, nprec);
3191 		  if (maxv == valv)
3192 		    break;
3193 		  maxv -= 1;
3194 		}
3195 	      maxv |= ~cst2v;
3196 	      minv = sgnbit;
3197 	      valid_p = true;
3198 	      break;
3199 
3200 	    case LT_EXPR:
3201 	    lt_expr:
3202 	      /* Minimum unsigned value for < is 0 and maximum
3203 		 unsigned value is (VAL-1) | ~CST2 if (VAL & CST2) == VAL.
3204 		 Otherwise, find smallest VAL2 where VAL2 > VAL
3205 		 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
3206 		 as maximum.
3207 		 For signed comparison, if CST2 doesn't have most
3208 		 significant bit set, handle it similarly.  If CST2 has
3209 		 MSB set, the maximum is the same and minimum is INT_MIN.  */
3210 	      if (minv == valv)
3211 		{
3212 		  if (valv == sgnbit)
3213 		    break;
3214 		  maxv = valv;
3215 		}
3216 	      else
3217 		{
3218 		  maxv = masked_increment (valv, cst2v, sgnbit, nprec);
3219 		  if (maxv == valv)
3220 		    break;
3221 		}
3222 	      maxv -= 1;
3223 	      maxv |= ~cst2v;
3224 	      minv = sgnbit;
3225 	      valid_p = true;
3226 	      break;
3227 
3228 	    default:
3229 	      break;
3230 	    }
3231 	  if (valid_p
3232 	      && (maxv - minv) != -1)
3233 	    {
3234 	      tree tmp, new_val, type;
3235 	      int i;
3236 
3237 	      for (i = 0; i < 2; i++)
3238 		if (names[i])
3239 		  {
3240 		    wide_int maxv2 = maxv;
3241 		    tmp = names[i];
3242 		    type = TREE_TYPE (names[i]);
3243 		    if (!TYPE_UNSIGNED (type))
3244 		      {
3245 			type = build_nonstandard_integer_type (nprec, 1);
3246 			tmp = build1 (NOP_EXPR, type, names[i]);
3247 		      }
3248 		    if (minv != 0)
3249 		      {
3250 			tmp = build2 (PLUS_EXPR, type, tmp,
3251 				      wide_int_to_tree (type, -minv));
3252 			maxv2 = maxv - minv;
3253 		      }
3254 		    new_val = wide_int_to_tree (type, maxv2);
3255 		    add_assert_info (asserts, names[i], tmp, LE_EXPR, new_val);
3256 		  }
3257 	    }
3258 	}
3259     }
3260 }
3261 
3262 /* OP is an operand of a truth value expression which is known to have
3263    a particular value.  Register any asserts for OP and for any
3264    operands in OP's defining statement.
3265 
3266    If CODE is EQ_EXPR, then we want to register OP is zero (false),
3267    if CODE is NE_EXPR, then we want to register OP is nonzero (true).   */
3268 
3269 static void
register_edge_assert_for_1(tree op,enum tree_code code,edge e,vec<assert_info> & asserts)3270 register_edge_assert_for_1 (tree op, enum tree_code code,
3271 			    edge e, vec<assert_info> &asserts)
3272 {
3273   gimple *op_def;
3274   tree val;
3275   enum tree_code rhs_code;
3276 
3277   /* We only care about SSA_NAMEs.  */
3278   if (TREE_CODE (op) != SSA_NAME)
3279     return;
3280 
3281   /* We know that OP will have a zero or nonzero value.  */
3282   val = build_int_cst (TREE_TYPE (op), 0);
3283   add_assert_info (asserts, op, op, code, val);
3284 
3285   /* Now look at how OP is set.  If it's set from a comparison,
3286      a truth operation or some bit operations, then we may be able
3287      to register information about the operands of that assignment.  */
3288   op_def = SSA_NAME_DEF_STMT (op);
3289   if (gimple_code (op_def) != GIMPLE_ASSIGN)
3290     return;
3291 
3292   rhs_code = gimple_assign_rhs_code (op_def);
3293 
3294   if (TREE_CODE_CLASS (rhs_code) == tcc_comparison)
3295     {
3296       bool invert = (code == EQ_EXPR ? true : false);
3297       tree op0 = gimple_assign_rhs1 (op_def);
3298       tree op1 = gimple_assign_rhs2 (op_def);
3299 
3300       if (TREE_CODE (op0) == SSA_NAME)
3301         register_edge_assert_for_2 (op0, e, rhs_code, op0, op1, invert, asserts);
3302       if (TREE_CODE (op1) == SSA_NAME)
3303         register_edge_assert_for_2 (op1, e, rhs_code, op0, op1, invert, asserts);
3304     }
3305   else if ((code == NE_EXPR
3306 	    && gimple_assign_rhs_code (op_def) == BIT_AND_EXPR)
3307 	   || (code == EQ_EXPR
3308 	       && gimple_assign_rhs_code (op_def) == BIT_IOR_EXPR))
3309     {
3310       /* Recurse on each operand.  */
3311       tree op0 = gimple_assign_rhs1 (op_def);
3312       tree op1 = gimple_assign_rhs2 (op_def);
3313       if (TREE_CODE (op0) == SSA_NAME
3314 	  && has_single_use (op0))
3315 	register_edge_assert_for_1 (op0, code, e, asserts);
3316       if (TREE_CODE (op1) == SSA_NAME
3317 	  && has_single_use (op1))
3318 	register_edge_assert_for_1 (op1, code, e, asserts);
3319     }
3320   else if (gimple_assign_rhs_code (op_def) == BIT_NOT_EXPR
3321 	   && TYPE_PRECISION (TREE_TYPE (gimple_assign_lhs (op_def))) == 1)
3322     {
3323       /* Recurse, flipping CODE.  */
3324       code = invert_tree_comparison (code, false);
3325       register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), code, e, asserts);
3326     }
3327   else if (gimple_assign_rhs_code (op_def) == SSA_NAME)
3328     {
3329       /* Recurse through the copy.  */
3330       register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), code, e, asserts);
3331     }
3332   else if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (op_def)))
3333     {
3334       /* Recurse through the type conversion, unless it is a narrowing
3335 	 conversion or conversion from non-integral type.  */
3336       tree rhs = gimple_assign_rhs1 (op_def);
3337       if (INTEGRAL_TYPE_P (TREE_TYPE (rhs))
3338 	  && (TYPE_PRECISION (TREE_TYPE (rhs))
3339 	      <= TYPE_PRECISION (TREE_TYPE (op))))
3340 	register_edge_assert_for_1 (rhs, code, e, asserts);
3341     }
3342 }
3343 
3344 /* Check if comparison
3345      NAME COND_OP INTEGER_CST
3346    has a form of
3347      (X & 11...100..0) COND_OP XX...X00...0
3348    Such comparison can yield assertions like
3349      X >= XX...X00...0
3350      X <= XX...X11...1
3351    in case of COND_OP being EQ_EXPR or
3352      X < XX...X00...0
3353      X > XX...X11...1
3354    in case of NE_EXPR.  */
3355 
3356 static bool
is_masked_range_test(tree name,tree valt,enum tree_code cond_code,tree * new_name,tree * low,enum tree_code * low_code,tree * high,enum tree_code * high_code)3357 is_masked_range_test (tree name, tree valt, enum tree_code cond_code,
3358 		      tree *new_name, tree *low, enum tree_code *low_code,
3359 		      tree *high, enum tree_code *high_code)
3360 {
3361   gimple *def_stmt = SSA_NAME_DEF_STMT (name);
3362 
3363   if (!is_gimple_assign (def_stmt)
3364       || gimple_assign_rhs_code (def_stmt) != BIT_AND_EXPR)
3365     return false;
3366 
3367   tree t = gimple_assign_rhs1 (def_stmt);
3368   tree maskt = gimple_assign_rhs2 (def_stmt);
3369   if (TREE_CODE (t) != SSA_NAME || TREE_CODE (maskt) != INTEGER_CST)
3370     return false;
3371 
3372   wi::tree_to_wide_ref mask = wi::to_wide (maskt);
3373   wide_int inv_mask = ~mask;
3374   /* Must have been removed by now so don't bother optimizing.  */
3375   if (mask == 0 || inv_mask == 0)
3376     return false;
3377 
3378   /* Assume VALT is INTEGER_CST.  */
3379   wi::tree_to_wide_ref val = wi::to_wide (valt);
3380 
3381   if ((inv_mask & (inv_mask + 1)) != 0
3382       || (val & mask) != val)
3383     return false;
3384 
3385   bool is_range = cond_code == EQ_EXPR;
3386 
3387   tree type = TREE_TYPE (t);
3388   wide_int min = wi::min_value (type),
3389     max = wi::max_value (type);
3390 
3391   if (is_range)
3392     {
3393       *low_code = val == min ? ERROR_MARK : GE_EXPR;
3394       *high_code = val == max ? ERROR_MARK : LE_EXPR;
3395     }
3396   else
3397     {
3398       /* We can still generate assertion if one of alternatives
3399 	 is known to always be false.  */
3400       if (val == min)
3401 	{
3402 	  *low_code = (enum tree_code) 0;
3403 	  *high_code = GT_EXPR;
3404 	}
3405       else if ((val | inv_mask) == max)
3406 	{
3407 	  *low_code = LT_EXPR;
3408 	  *high_code = (enum tree_code) 0;
3409 	}
3410       else
3411 	return false;
3412     }
3413 
3414   *new_name = t;
3415   *low = wide_int_to_tree (type, val);
3416   *high = wide_int_to_tree (type, val | inv_mask);
3417 
3418   return true;
3419 }
3420 
3421 /* Try to register an edge assertion for SSA name NAME on edge E for
3422    the condition COND contributing to the conditional jump pointed to by
3423    SI.  */
3424 
3425 void
register_edge_assert_for(tree name,edge e,enum tree_code cond_code,tree cond_op0,tree cond_op1,vec<assert_info> & asserts)3426 register_edge_assert_for (tree name, edge e,
3427 			  enum tree_code cond_code, tree cond_op0,
3428 			  tree cond_op1, vec<assert_info> &asserts)
3429 {
3430   tree val;
3431   enum tree_code comp_code;
3432   bool is_else_edge = (e->flags & EDGE_FALSE_VALUE) != 0;
3433 
3434   /* Do not attempt to infer anything in names that flow through
3435      abnormal edges.  */
3436   if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
3437     return;
3438 
3439   if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
3440 						cond_op0, cond_op1,
3441 						is_else_edge,
3442 						&comp_code, &val))
3443     return;
3444 
3445   /* Register ASSERT_EXPRs for name.  */
3446   register_edge_assert_for_2 (name, e, cond_code, cond_op0,
3447 			      cond_op1, is_else_edge, asserts);
3448 
3449 
3450   /* If COND is effectively an equality test of an SSA_NAME against
3451      the value zero or one, then we may be able to assert values
3452      for SSA_NAMEs which flow into COND.  */
3453 
3454   /* In the case of NAME == 1 or NAME != 0, for BIT_AND_EXPR defining
3455      statement of NAME we can assert both operands of the BIT_AND_EXPR
3456      have nonzero value.  */
3457   if (((comp_code == EQ_EXPR && integer_onep (val))
3458        || (comp_code == NE_EXPR && integer_zerop (val))))
3459     {
3460       gimple *def_stmt = SSA_NAME_DEF_STMT (name);
3461 
3462       if (is_gimple_assign (def_stmt)
3463 	  && gimple_assign_rhs_code (def_stmt) == BIT_AND_EXPR)
3464 	{
3465 	  tree op0 = gimple_assign_rhs1 (def_stmt);
3466 	  tree op1 = gimple_assign_rhs2 (def_stmt);
3467 	  register_edge_assert_for_1 (op0, NE_EXPR, e, asserts);
3468 	  register_edge_assert_for_1 (op1, NE_EXPR, e, asserts);
3469 	}
3470     }
3471 
3472   /* In the case of NAME == 0 or NAME != 1, for BIT_IOR_EXPR defining
3473      statement of NAME we can assert both operands of the BIT_IOR_EXPR
3474      have zero value.  */
3475   if (((comp_code == EQ_EXPR && integer_zerop (val))
3476        || (comp_code == NE_EXPR && integer_onep (val))))
3477     {
3478       gimple *def_stmt = SSA_NAME_DEF_STMT (name);
3479 
3480       /* For BIT_IOR_EXPR only if NAME == 0 both operands have
3481 	 necessarily zero value, or if type-precision is one.  */
3482       if (is_gimple_assign (def_stmt)
3483 	  && (gimple_assign_rhs_code (def_stmt) == BIT_IOR_EXPR
3484 	      && (TYPE_PRECISION (TREE_TYPE (name)) == 1
3485 	          || comp_code == EQ_EXPR)))
3486 	{
3487 	  tree op0 = gimple_assign_rhs1 (def_stmt);
3488 	  tree op1 = gimple_assign_rhs2 (def_stmt);
3489 	  register_edge_assert_for_1 (op0, EQ_EXPR, e, asserts);
3490 	  register_edge_assert_for_1 (op1, EQ_EXPR, e, asserts);
3491 	}
3492     }
3493 
3494   /* Sometimes we can infer ranges from (NAME & MASK) == VALUE.  */
3495   if ((comp_code == EQ_EXPR || comp_code == NE_EXPR)
3496       && TREE_CODE (val) == INTEGER_CST)
3497     {
3498       enum tree_code low_code, high_code;
3499       tree low, high;
3500       if (is_masked_range_test (name, val, comp_code, &name, &low,
3501 				&low_code, &high, &high_code))
3502 	{
3503 	  if (low_code != ERROR_MARK)
3504 	    register_edge_assert_for_2 (name, e, low_code, name,
3505 					low, /*invert*/false, asserts);
3506 	  if (high_code != ERROR_MARK)
3507 	    register_edge_assert_for_2 (name, e, high_code, name,
3508 					high, /*invert*/false, asserts);
3509 	}
3510     }
3511 }
3512 
3513 /* Finish found ASSERTS for E and register them at GSI.  */
3514 
3515 static void
finish_register_edge_assert_for(edge e,gimple_stmt_iterator gsi,vec<assert_info> & asserts)3516 finish_register_edge_assert_for (edge e, gimple_stmt_iterator gsi,
3517 				 vec<assert_info> &asserts)
3518 {
3519   for (unsigned i = 0; i < asserts.length (); ++i)
3520     /* Only register an ASSERT_EXPR if NAME was found in the sub-graph
3521        reachable from E.  */
3522     if (live_on_edge (e, asserts[i].name))
3523       register_new_assert_for (asserts[i].name, asserts[i].expr,
3524 			       asserts[i].comp_code, asserts[i].val,
3525 			       NULL, e, gsi);
3526 }
3527 
3528 
3529 
3530 /* Determine whether the outgoing edges of BB should receive an
3531    ASSERT_EXPR for each of the operands of BB's LAST statement.
3532    The last statement of BB must be a COND_EXPR.
3533 
3534    If any of the sub-graphs rooted at BB have an interesting use of
3535    the predicate operands, an assert location node is added to the
3536    list of assertions for the corresponding operands.  */
3537 
3538 static void
find_conditional_asserts(basic_block bb,gcond * last)3539 find_conditional_asserts (basic_block bb, gcond *last)
3540 {
3541   gimple_stmt_iterator bsi;
3542   tree op;
3543   edge_iterator ei;
3544   edge e;
3545   ssa_op_iter iter;
3546 
3547   bsi = gsi_for_stmt (last);
3548 
3549   /* Look for uses of the operands in each of the sub-graphs
3550      rooted at BB.  We need to check each of the outgoing edges
3551      separately, so that we know what kind of ASSERT_EXPR to
3552      insert.  */
3553   FOR_EACH_EDGE (e, ei, bb->succs)
3554     {
3555       if (e->dest == bb)
3556 	continue;
3557 
3558       /* Register the necessary assertions for each operand in the
3559 	 conditional predicate.  */
3560       auto_vec<assert_info, 8> asserts;
3561       FOR_EACH_SSA_TREE_OPERAND (op, last, iter, SSA_OP_USE)
3562 	register_edge_assert_for (op, e,
3563 				  gimple_cond_code (last),
3564 				  gimple_cond_lhs (last),
3565 				  gimple_cond_rhs (last), asserts);
3566       finish_register_edge_assert_for (e, bsi, asserts);
3567     }
3568 }
3569 
3570 struct case_info
3571 {
3572   tree expr;
3573   basic_block bb;
3574 };
3575 
3576 /* Compare two case labels sorting first by the destination bb index
3577    and then by the case value.  */
3578 
3579 static int
compare_case_labels(const void * p1,const void * p2)3580 compare_case_labels (const void *p1, const void *p2)
3581 {
3582   const struct case_info *ci1 = (const struct case_info *) p1;
3583   const struct case_info *ci2 = (const struct case_info *) p2;
3584   int idx1 = ci1->bb->index;
3585   int idx2 = ci2->bb->index;
3586 
3587   if (idx1 < idx2)
3588     return -1;
3589   else if (idx1 == idx2)
3590     {
3591       /* Make sure the default label is first in a group.  */
3592       if (!CASE_LOW (ci1->expr))
3593 	return -1;
3594       else if (!CASE_LOW (ci2->expr))
3595 	return 1;
3596       else
3597 	return tree_int_cst_compare (CASE_LOW (ci1->expr),
3598 				     CASE_LOW (ci2->expr));
3599     }
3600   else
3601     return 1;
3602 }
3603 
3604 /* Determine whether the outgoing edges of BB should receive an
3605    ASSERT_EXPR for each of the operands of BB's LAST statement.
3606    The last statement of BB must be a SWITCH_EXPR.
3607 
3608    If any of the sub-graphs rooted at BB have an interesting use of
3609    the predicate operands, an assert location node is added to the
3610    list of assertions for the corresponding operands.  */
3611 
3612 static void
find_switch_asserts(basic_block bb,gswitch * last)3613 find_switch_asserts (basic_block bb, gswitch *last)
3614 {
3615   gimple_stmt_iterator bsi;
3616   tree op;
3617   edge e;
3618   struct case_info *ci;
3619   size_t n = gimple_switch_num_labels (last);
3620 #if GCC_VERSION >= 4000
3621   unsigned int idx;
3622 #else
3623   /* Work around GCC 3.4 bug (PR 37086).  */
3624   volatile unsigned int idx;
3625 #endif
3626 
3627   bsi = gsi_for_stmt (last);
3628   op = gimple_switch_index (last);
3629   if (TREE_CODE (op) != SSA_NAME)
3630     return;
3631 
3632   /* Build a vector of case labels sorted by destination label.  */
3633   ci = XNEWVEC (struct case_info, n);
3634   for (idx = 0; idx < n; ++idx)
3635     {
3636       ci[idx].expr = gimple_switch_label (last, idx);
3637       ci[idx].bb = label_to_block (cfun, CASE_LABEL (ci[idx].expr));
3638     }
3639   edge default_edge = find_edge (bb, ci[0].bb);
3640   qsort (ci, n, sizeof (struct case_info), compare_case_labels);
3641 
3642   for (idx = 0; idx < n; ++idx)
3643     {
3644       tree min, max;
3645       tree cl = ci[idx].expr;
3646       basic_block cbb = ci[idx].bb;
3647 
3648       min = CASE_LOW (cl);
3649       max = CASE_HIGH (cl);
3650 
3651       /* If there are multiple case labels with the same destination
3652 	 we need to combine them to a single value range for the edge.  */
3653       if (idx + 1 < n && cbb == ci[idx + 1].bb)
3654 	{
3655 	  /* Skip labels until the last of the group.  */
3656 	  do {
3657 	    ++idx;
3658 	  } while (idx < n && cbb == ci[idx].bb);
3659 	  --idx;
3660 
3661 	  /* Pick up the maximum of the case label range.  */
3662 	  if (CASE_HIGH (ci[idx].expr))
3663 	    max = CASE_HIGH (ci[idx].expr);
3664 	  else
3665 	    max = CASE_LOW (ci[idx].expr);
3666 	}
3667 
3668       /* Can't extract a useful assertion out of a range that includes the
3669 	 default label.  */
3670       if (min == NULL_TREE)
3671 	continue;
3672 
3673       /* Find the edge to register the assert expr on.  */
3674       e = find_edge (bb, cbb);
3675 
3676       /* Register the necessary assertions for the operand in the
3677 	 SWITCH_EXPR.  */
3678       auto_vec<assert_info, 8> asserts;
3679       register_edge_assert_for (op, e,
3680 				max ? GE_EXPR : EQ_EXPR,
3681 				op, fold_convert (TREE_TYPE (op), min),
3682 				asserts);
3683       if (max)
3684 	register_edge_assert_for (op, e, LE_EXPR, op,
3685 				  fold_convert (TREE_TYPE (op), max),
3686 				  asserts);
3687       finish_register_edge_assert_for (e, bsi, asserts);
3688     }
3689 
3690   XDELETEVEC (ci);
3691 
3692   if (!live_on_edge (default_edge, op))
3693     return;
3694 
3695   /* Now register along the default label assertions that correspond to the
3696      anti-range of each label.  */
3697   int insertion_limit = PARAM_VALUE (PARAM_MAX_VRP_SWITCH_ASSERTIONS);
3698   if (insertion_limit == 0)
3699     return;
3700 
3701   /* We can't do this if the default case shares a label with another case.  */
3702   tree default_cl = gimple_switch_default_label (last);
3703   for (idx = 1; idx < n; idx++)
3704     {
3705       tree min, max;
3706       tree cl = gimple_switch_label (last, idx);
3707       if (CASE_LABEL (cl) == CASE_LABEL (default_cl))
3708 	continue;
3709 
3710       min = CASE_LOW (cl);
3711       max = CASE_HIGH (cl);
3712 
3713       /* Combine contiguous case ranges to reduce the number of assertions
3714 	 to insert.  */
3715       for (idx = idx + 1; idx < n; idx++)
3716 	{
3717 	  tree next_min, next_max;
3718 	  tree next_cl = gimple_switch_label (last, idx);
3719 	  if (CASE_LABEL (next_cl) == CASE_LABEL (default_cl))
3720 	    break;
3721 
3722 	  next_min = CASE_LOW (next_cl);
3723 	  next_max = CASE_HIGH (next_cl);
3724 
3725 	  wide_int difference = (wi::to_wide (next_min)
3726 				 - wi::to_wide (max ? max : min));
3727 	  if (wi::eq_p (difference, 1))
3728 	    max = next_max ? next_max : next_min;
3729 	  else
3730 	    break;
3731 	}
3732       idx--;
3733 
3734       if (max == NULL_TREE)
3735 	{
3736 	  /* Register the assertion OP != MIN.  */
3737 	  auto_vec<assert_info, 8> asserts;
3738 	  min = fold_convert (TREE_TYPE (op), min);
3739 	  register_edge_assert_for (op, default_edge, NE_EXPR, op, min,
3740 				    asserts);
3741 	  finish_register_edge_assert_for (default_edge, bsi, asserts);
3742 	}
3743       else
3744 	{
3745 	  /* Register the assertion (unsigned)OP - MIN > (MAX - MIN),
3746 	     which will give OP the anti-range ~[MIN,MAX].  */
3747 	  tree uop = fold_convert (unsigned_type_for (TREE_TYPE (op)), op);
3748 	  min = fold_convert (TREE_TYPE (uop), min);
3749 	  max = fold_convert (TREE_TYPE (uop), max);
3750 
3751 	  tree lhs = fold_build2 (MINUS_EXPR, TREE_TYPE (uop), uop, min);
3752 	  tree rhs = int_const_binop (MINUS_EXPR, max, min);
3753 	  register_new_assert_for (op, lhs, GT_EXPR, rhs,
3754 				   NULL, default_edge, bsi);
3755 	}
3756 
3757       if (--insertion_limit == 0)
3758 	break;
3759     }
3760 }
3761 
3762 
3763 /* Traverse all the statements in block BB looking for statements that
3764    may generate useful assertions for the SSA names in their operand.
3765    If a statement produces a useful assertion A for name N_i, then the
3766    list of assertions already generated for N_i is scanned to
3767    determine if A is actually needed.
3768 
3769    If N_i already had the assertion A at a location dominating the
3770    current location, then nothing needs to be done.  Otherwise, the
3771    new location for A is recorded instead.
3772 
3773    1- For every statement S in BB, all the variables used by S are
3774       added to bitmap FOUND_IN_SUBGRAPH.
3775 
3776    2- If statement S uses an operand N in a way that exposes a known
3777       value range for N, then if N was not already generated by an
3778       ASSERT_EXPR, create a new assert location for N.  For instance,
3779       if N is a pointer and the statement dereferences it, we can
3780       assume that N is not NULL.
3781 
3782    3- COND_EXPRs are a special case of #2.  We can derive range
3783       information from the predicate but need to insert different
3784       ASSERT_EXPRs for each of the sub-graphs rooted at the
3785       conditional block.  If the last statement of BB is a conditional
3786       expression of the form 'X op Y', then
3787 
3788       a) Remove X and Y from the set FOUND_IN_SUBGRAPH.
3789 
3790       b) If the conditional is the only entry point to the sub-graph
3791 	 corresponding to the THEN_CLAUSE, recurse into it.  On
3792 	 return, if X and/or Y are marked in FOUND_IN_SUBGRAPH, then
3793 	 an ASSERT_EXPR is added for the corresponding variable.
3794 
3795       c) Repeat step (b) on the ELSE_CLAUSE.
3796 
3797       d) Mark X and Y in FOUND_IN_SUBGRAPH.
3798 
3799       For instance,
3800 
3801 	    if (a == 9)
3802 	      b = a;
3803 	    else
3804 	      b = c + 1;
3805 
3806       In this case, an assertion on the THEN clause is useful to
3807       determine that 'a' is always 9 on that edge.  However, an assertion
3808       on the ELSE clause would be unnecessary.
3809 
3810    4- If BB does not end in a conditional expression, then we recurse
3811       into BB's dominator children.
3812 
3813    At the end of the recursive traversal, every SSA name will have a
3814    list of locations where ASSERT_EXPRs should be added.  When a new
3815    location for name N is found, it is registered by calling
3816    register_new_assert_for.  That function keeps track of all the
3817    registered assertions to prevent adding unnecessary assertions.
3818    For instance, if a pointer P_4 is dereferenced more than once in a
3819    dominator tree, only the location dominating all the dereference of
3820    P_4 will receive an ASSERT_EXPR.  */
3821 
3822 static void
find_assert_locations_1(basic_block bb,sbitmap live)3823 find_assert_locations_1 (basic_block bb, sbitmap live)
3824 {
3825   gimple *last;
3826 
3827   last = last_stmt (bb);
3828 
3829   /* If BB's last statement is a conditional statement involving integer
3830      operands, determine if we need to add ASSERT_EXPRs.  */
3831   if (last
3832       && gimple_code (last) == GIMPLE_COND
3833       && !fp_predicate (last)
3834       && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
3835     find_conditional_asserts (bb, as_a <gcond *> (last));
3836 
3837   /* If BB's last statement is a switch statement involving integer
3838      operands, determine if we need to add ASSERT_EXPRs.  */
3839   if (last
3840       && gimple_code (last) == GIMPLE_SWITCH
3841       && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
3842     find_switch_asserts (bb, as_a <gswitch *> (last));
3843 
3844   /* Traverse all the statements in BB marking used names and looking
3845      for statements that may infer assertions for their used operands.  */
3846   for (gimple_stmt_iterator si = gsi_last_bb (bb); !gsi_end_p (si);
3847        gsi_prev (&si))
3848     {
3849       gimple *stmt;
3850       tree op;
3851       ssa_op_iter i;
3852 
3853       stmt = gsi_stmt (si);
3854 
3855       if (is_gimple_debug (stmt))
3856 	continue;
3857 
3858       /* See if we can derive an assertion for any of STMT's operands.  */
3859       FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
3860 	{
3861 	  tree value;
3862 	  enum tree_code comp_code;
3863 
3864 	  /* If op is not live beyond this stmt, do not bother to insert
3865 	     asserts for it.  */
3866 	  if (!bitmap_bit_p (live, SSA_NAME_VERSION (op)))
3867 	    continue;
3868 
3869 	  /* If OP is used in such a way that we can infer a value
3870 	     range for it, and we don't find a previous assertion for
3871 	     it, create a new assertion location node for OP.  */
3872 	  if (infer_value_range (stmt, op, &comp_code, &value))
3873 	    {
3874 	      /* If we are able to infer a nonzero value range for OP,
3875 		 then walk backwards through the use-def chain to see if OP
3876 		 was set via a typecast.
3877 
3878 		 If so, then we can also infer a nonzero value range
3879 		 for the operand of the NOP_EXPR.  */
3880 	      if (comp_code == NE_EXPR && integer_zerop (value))
3881 		{
3882 		  tree t = op;
3883 		  gimple *def_stmt = SSA_NAME_DEF_STMT (t);
3884 
3885 		  while (is_gimple_assign (def_stmt)
3886 			 && CONVERT_EXPR_CODE_P
3887 			     (gimple_assign_rhs_code (def_stmt))
3888 			 && TREE_CODE
3889 			     (gimple_assign_rhs1 (def_stmt)) == SSA_NAME
3890 			 && POINTER_TYPE_P
3891 			     (TREE_TYPE (gimple_assign_rhs1 (def_stmt))))
3892 		    {
3893 		      t = gimple_assign_rhs1 (def_stmt);
3894 		      def_stmt = SSA_NAME_DEF_STMT (t);
3895 
3896 		      /* Note we want to register the assert for the
3897 			 operand of the NOP_EXPR after SI, not after the
3898 			 conversion.  */
3899 		      if (bitmap_bit_p (live, SSA_NAME_VERSION (t)))
3900 			register_new_assert_for (t, t, comp_code, value,
3901 						 bb, NULL, si);
3902 		    }
3903 		}
3904 
3905 	      register_new_assert_for (op, op, comp_code, value, bb, NULL, si);
3906 	    }
3907 	}
3908 
3909       /* Update live.  */
3910       FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
3911 	bitmap_set_bit (live, SSA_NAME_VERSION (op));
3912       FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_DEF)
3913 	bitmap_clear_bit (live, SSA_NAME_VERSION (op));
3914     }
3915 
3916   /* Traverse all PHI nodes in BB, updating live.  */
3917   for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
3918        gsi_next (&si))
3919     {
3920       use_operand_p arg_p;
3921       ssa_op_iter i;
3922       gphi *phi = si.phi ();
3923       tree res = gimple_phi_result (phi);
3924 
3925       if (virtual_operand_p (res))
3926 	continue;
3927 
3928       FOR_EACH_PHI_ARG (arg_p, phi, i, SSA_OP_USE)
3929 	{
3930 	  tree arg = USE_FROM_PTR (arg_p);
3931 	  if (TREE_CODE (arg) == SSA_NAME)
3932 	    bitmap_set_bit (live, SSA_NAME_VERSION (arg));
3933 	}
3934 
3935       bitmap_clear_bit (live, SSA_NAME_VERSION (res));
3936     }
3937 }
3938 
3939 /* Do an RPO walk over the function computing SSA name liveness
3940    on-the-fly and deciding on assert expressions to insert.  */
3941 
3942 static void
find_assert_locations(void)3943 find_assert_locations (void)
3944 {
3945   int *rpo = XNEWVEC (int, last_basic_block_for_fn (cfun));
3946   int *bb_rpo = XNEWVEC (int, last_basic_block_for_fn (cfun));
3947   int *last_rpo = XCNEWVEC (int, last_basic_block_for_fn (cfun));
3948   int rpo_cnt, i;
3949 
3950   live = XCNEWVEC (sbitmap, last_basic_block_for_fn (cfun));
3951   rpo_cnt = pre_and_rev_post_order_compute (NULL, rpo, false);
3952   for (i = 0; i < rpo_cnt; ++i)
3953     bb_rpo[rpo[i]] = i;
3954 
3955   /* Pre-seed loop latch liveness from loop header PHI nodes.  Due to
3956      the order we compute liveness and insert asserts we otherwise
3957      fail to insert asserts into the loop latch.  */
3958   loop_p loop;
3959   FOR_EACH_LOOP (loop, 0)
3960     {
3961       i = loop->latch->index;
3962       unsigned int j = single_succ_edge (loop->latch)->dest_idx;
3963       for (gphi_iterator gsi = gsi_start_phis (loop->header);
3964 	   !gsi_end_p (gsi); gsi_next (&gsi))
3965 	{
3966 	  gphi *phi = gsi.phi ();
3967 	  if (virtual_operand_p (gimple_phi_result (phi)))
3968 	    continue;
3969 	  tree arg = gimple_phi_arg_def (phi, j);
3970 	  if (TREE_CODE (arg) == SSA_NAME)
3971 	    {
3972 	      if (live[i] == NULL)
3973 		{
3974 		  live[i] = sbitmap_alloc (num_ssa_names);
3975 		  bitmap_clear (live[i]);
3976 		}
3977 	      bitmap_set_bit (live[i], SSA_NAME_VERSION (arg));
3978 	    }
3979 	}
3980     }
3981 
3982   for (i = rpo_cnt - 1; i >= 0; --i)
3983     {
3984       basic_block bb = BASIC_BLOCK_FOR_FN (cfun, rpo[i]);
3985       edge e;
3986       edge_iterator ei;
3987 
3988       if (!live[rpo[i]])
3989 	{
3990 	  live[rpo[i]] = sbitmap_alloc (num_ssa_names);
3991 	  bitmap_clear (live[rpo[i]]);
3992 	}
3993 
3994       /* Process BB and update the live information with uses in
3995          this block.  */
3996       find_assert_locations_1 (bb, live[rpo[i]]);
3997 
3998       /* Merge liveness into the predecessor blocks and free it.  */
3999       if (!bitmap_empty_p (live[rpo[i]]))
4000 	{
4001 	  int pred_rpo = i;
4002 	  FOR_EACH_EDGE (e, ei, bb->preds)
4003 	    {
4004 	      int pred = e->src->index;
4005 	      if ((e->flags & EDGE_DFS_BACK) || pred == ENTRY_BLOCK)
4006 		continue;
4007 
4008 	      if (!live[pred])
4009 		{
4010 		  live[pred] = sbitmap_alloc (num_ssa_names);
4011 		  bitmap_clear (live[pred]);
4012 		}
4013 	      bitmap_ior (live[pred], live[pred], live[rpo[i]]);
4014 
4015 	      if (bb_rpo[pred] < pred_rpo)
4016 		pred_rpo = bb_rpo[pred];
4017 	    }
4018 
4019 	  /* Record the RPO number of the last visited block that needs
4020 	     live information from this block.  */
4021 	  last_rpo[rpo[i]] = pred_rpo;
4022 	}
4023       else
4024 	{
4025 	  sbitmap_free (live[rpo[i]]);
4026 	  live[rpo[i]] = NULL;
4027 	}
4028 
4029       /* We can free all successors live bitmaps if all their
4030          predecessors have been visited already.  */
4031       FOR_EACH_EDGE (e, ei, bb->succs)
4032 	if (last_rpo[e->dest->index] == i
4033 	    && live[e->dest->index])
4034 	  {
4035 	    sbitmap_free (live[e->dest->index]);
4036 	    live[e->dest->index] = NULL;
4037 	  }
4038     }
4039 
4040   XDELETEVEC (rpo);
4041   XDELETEVEC (bb_rpo);
4042   XDELETEVEC (last_rpo);
4043   for (i = 0; i < last_basic_block_for_fn (cfun); ++i)
4044     if (live[i])
4045       sbitmap_free (live[i]);
4046   XDELETEVEC (live);
4047 }
4048 
4049 /* Create an ASSERT_EXPR for NAME and insert it in the location
4050    indicated by LOC.  Return true if we made any edge insertions.  */
4051 
4052 static bool
process_assert_insertions_for(tree name,assert_locus * loc)4053 process_assert_insertions_for (tree name, assert_locus *loc)
4054 {
4055   /* Build the comparison expression NAME_i COMP_CODE VAL.  */
4056   gimple *stmt;
4057   tree cond;
4058   gimple *assert_stmt;
4059   edge_iterator ei;
4060   edge e;
4061 
4062   /* If we have X <=> X do not insert an assert expr for that.  */
4063   if (loc->expr == loc->val)
4064     return false;
4065 
4066   cond = build2 (loc->comp_code, boolean_type_node, loc->expr, loc->val);
4067   assert_stmt = build_assert_expr_for (cond, name);
4068   if (loc->e)
4069     {
4070       /* We have been asked to insert the assertion on an edge.  This
4071 	 is used only by COND_EXPR and SWITCH_EXPR assertions.  */
4072       gcc_checking_assert (gimple_code (gsi_stmt (loc->si)) == GIMPLE_COND
4073 			   || (gimple_code (gsi_stmt (loc->si))
4074 			       == GIMPLE_SWITCH));
4075 
4076       gsi_insert_on_edge (loc->e, assert_stmt);
4077       return true;
4078     }
4079 
4080   /* If the stmt iterator points at the end then this is an insertion
4081      at the beginning of a block.  */
4082   if (gsi_end_p (loc->si))
4083     {
4084       gimple_stmt_iterator si = gsi_after_labels (loc->bb);
4085       gsi_insert_before (&si, assert_stmt, GSI_SAME_STMT);
4086       return false;
4087 
4088     }
4089   /* Otherwise, we can insert right after LOC->SI iff the
4090      statement must not be the last statement in the block.  */
4091   stmt = gsi_stmt (loc->si);
4092   if (!stmt_ends_bb_p (stmt))
4093     {
4094       gsi_insert_after (&loc->si, assert_stmt, GSI_SAME_STMT);
4095       return false;
4096     }
4097 
4098   /* If STMT must be the last statement in BB, we can only insert new
4099      assertions on the non-abnormal edge out of BB.  Note that since
4100      STMT is not control flow, there may only be one non-abnormal/eh edge
4101      out of BB.  */
4102   FOR_EACH_EDGE (e, ei, loc->bb->succs)
4103     if (!(e->flags & (EDGE_ABNORMAL|EDGE_EH)))
4104       {
4105 	gsi_insert_on_edge (e, assert_stmt);
4106 	return true;
4107       }
4108 
4109   gcc_unreachable ();
4110 }
4111 
4112 /* Qsort helper for sorting assert locations.  If stable is true, don't
4113    use iterative_hash_expr because it can be unstable for -fcompare-debug,
4114    on the other side some pointers might be NULL.  */
4115 
4116 template <bool stable>
4117 static int
compare_assert_loc(const void * pa,const void * pb)4118 compare_assert_loc (const void *pa, const void *pb)
4119 {
4120   assert_locus * const a = *(assert_locus * const *)pa;
4121   assert_locus * const b = *(assert_locus * const *)pb;
4122 
4123   /* If stable, some asserts might be optimized away already, sort
4124      them last.  */
4125   if (stable)
4126     {
4127       if (a == NULL)
4128 	return b != NULL;
4129       else if (b == NULL)
4130 	return -1;
4131     }
4132 
4133   if (a->e == NULL && b->e != NULL)
4134     return 1;
4135   else if (a->e != NULL && b->e == NULL)
4136     return -1;
4137 
4138   /* After the above checks, we know that (a->e == NULL) == (b->e == NULL),
4139      no need to test both a->e and b->e.  */
4140 
4141   /* Sort after destination index.  */
4142   if (a->e == NULL)
4143     ;
4144   else if (a->e->dest->index > b->e->dest->index)
4145     return 1;
4146   else if (a->e->dest->index < b->e->dest->index)
4147     return -1;
4148 
4149   /* Sort after comp_code.  */
4150   if (a->comp_code > b->comp_code)
4151     return 1;
4152   else if (a->comp_code < b->comp_code)
4153     return -1;
4154 
4155   hashval_t ha, hb;
4156 
4157   /* E.g. if a->val is ADDR_EXPR of a VAR_DECL, iterative_hash_expr
4158      uses DECL_UID of the VAR_DECL, so sorting might differ between
4159      -g and -g0.  When doing the removal of redundant assert exprs
4160      and commonization to successors, this does not matter, but for
4161      the final sort needs to be stable.  */
4162   if (stable)
4163     {
4164       ha = 0;
4165       hb = 0;
4166     }
4167   else
4168     {
4169       ha = iterative_hash_expr (a->expr, iterative_hash_expr (a->val, 0));
4170       hb = iterative_hash_expr (b->expr, iterative_hash_expr (b->val, 0));
4171     }
4172 
4173   /* Break the tie using hashing and source/bb index.  */
4174   if (ha == hb)
4175     return (a->e != NULL
4176 	    ? a->e->src->index - b->e->src->index
4177 	    : a->bb->index - b->bb->index);
4178   return ha > hb ? 1 : -1;
4179 }
4180 
4181 /* Process all the insertions registered for every name N_i registered
4182    in NEED_ASSERT_FOR.  The list of assertions to be inserted are
4183    found in ASSERTS_FOR[i].  */
4184 
4185 static void
process_assert_insertions(void)4186 process_assert_insertions (void)
4187 {
4188   unsigned i;
4189   bitmap_iterator bi;
4190   bool update_edges_p = false;
4191   int num_asserts = 0;
4192 
4193   if (dump_file && (dump_flags & TDF_DETAILS))
4194     dump_all_asserts (dump_file);
4195 
4196   EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
4197     {
4198       assert_locus *loc = asserts_for[i];
4199       gcc_assert (loc);
4200 
4201       auto_vec<assert_locus *, 16> asserts;
4202       for (; loc; loc = loc->next)
4203 	asserts.safe_push (loc);
4204       asserts.qsort (compare_assert_loc<false>);
4205 
4206       /* Push down common asserts to successors and remove redundant ones.  */
4207       unsigned ecnt = 0;
4208       assert_locus *common = NULL;
4209       unsigned commonj = 0;
4210       for (unsigned j = 0; j < asserts.length (); ++j)
4211 	{
4212 	  loc = asserts[j];
4213 	  if (! loc->e)
4214 	    common = NULL;
4215 	  else if (! common
4216 		   || loc->e->dest != common->e->dest
4217 		   || loc->comp_code != common->comp_code
4218 		   || ! operand_equal_p (loc->val, common->val, 0)
4219 		   || ! operand_equal_p (loc->expr, common->expr, 0))
4220 	    {
4221 	      commonj = j;
4222 	      common = loc;
4223 	      ecnt = 1;
4224 	    }
4225 	  else if (loc->e == asserts[j-1]->e)
4226 	    {
4227 	      /* Remove duplicate asserts.  */
4228 	      if (commonj == j - 1)
4229 		{
4230 		  commonj = j;
4231 		  common = loc;
4232 		}
4233 	      free (asserts[j-1]);
4234 	      asserts[j-1] = NULL;
4235 	    }
4236 	  else
4237 	    {
4238 	      ecnt++;
4239 	      if (EDGE_COUNT (common->e->dest->preds) == ecnt)
4240 		{
4241 		  /* We have the same assertion on all incoming edges of a BB.
4242 		     Insert it at the beginning of that block.  */
4243 		  loc->bb = loc->e->dest;
4244 		  loc->e = NULL;
4245 		  loc->si = gsi_none ();
4246 		  common = NULL;
4247 		  /* Clear asserts commoned.  */
4248 		  for (; commonj != j; ++commonj)
4249 		    if (asserts[commonj])
4250 		      {
4251 			free (asserts[commonj]);
4252 			asserts[commonj] = NULL;
4253 		      }
4254 		}
4255 	    }
4256 	}
4257 
4258       /* The asserts vector sorting above might be unstable for
4259 	 -fcompare-debug, sort again to ensure a stable sort.  */
4260       asserts.qsort (compare_assert_loc<true>);
4261       for (unsigned j = 0; j < asserts.length (); ++j)
4262 	{
4263 	  loc = asserts[j];
4264 	  if (! loc)
4265 	    break;
4266 	  update_edges_p |= process_assert_insertions_for (ssa_name (i), loc);
4267 	  num_asserts++;
4268 	  free (loc);
4269 	}
4270     }
4271 
4272   if (update_edges_p)
4273     gsi_commit_edge_inserts ();
4274 
4275   statistics_counter_event (cfun, "Number of ASSERT_EXPR expressions inserted",
4276 			    num_asserts);
4277 }
4278 
4279 
4280 /* Traverse the flowgraph looking for conditional jumps to insert range
4281    expressions.  These range expressions are meant to provide information
4282    to optimizations that need to reason in terms of value ranges.  They
4283    will not be expanded into RTL.  For instance, given:
4284 
4285    x = ...
4286    y = ...
4287    if (x < y)
4288      y = x - 2;
4289    else
4290      x = y + 3;
4291 
4292    this pass will transform the code into:
4293 
4294    x = ...
4295    y = ...
4296    if (x < y)
4297     {
4298       x = ASSERT_EXPR <x, x < y>
4299       y = x - 2
4300     }
4301    else
4302     {
4303       y = ASSERT_EXPR <y, x >= y>
4304       x = y + 3
4305     }
4306 
4307    The idea is that once copy and constant propagation have run, other
4308    optimizations will be able to determine what ranges of values can 'x'
4309    take in different paths of the code, simply by checking the reaching
4310    definition of 'x'.  */
4311 
4312 static void
insert_range_assertions(void)4313 insert_range_assertions (void)
4314 {
4315   need_assert_for = BITMAP_ALLOC (NULL);
4316   asserts_for = XCNEWVEC (assert_locus *, num_ssa_names);
4317 
4318   calculate_dominance_info (CDI_DOMINATORS);
4319 
4320   find_assert_locations ();
4321   if (!bitmap_empty_p (need_assert_for))
4322     {
4323       process_assert_insertions ();
4324       update_ssa (TODO_update_ssa_no_phi);
4325     }
4326 
4327   if (dump_file && (dump_flags & TDF_DETAILS))
4328     {
4329       fprintf (dump_file, "\nSSA form after inserting ASSERT_EXPRs\n");
4330       dump_function_to_file (current_function_decl, dump_file, dump_flags);
4331     }
4332 
4333   free (asserts_for);
4334   BITMAP_FREE (need_assert_for);
4335 }
4336 
4337 class vrp_prop : public ssa_propagation_engine
4338 {
4339  public:
4340   enum ssa_prop_result visit_stmt (gimple *, edge *, tree *) FINAL OVERRIDE;
4341   enum ssa_prop_result visit_phi (gphi *) FINAL OVERRIDE;
4342 
4343   void vrp_initialize (void);
4344   void vrp_finalize (bool);
4345   void check_all_array_refs (void);
4346   void check_array_ref (location_t, tree, bool);
4347   void check_mem_ref (location_t, tree, bool);
4348   void search_for_addr_array (tree, location_t);
4349 
4350   class vr_values vr_values;
4351   /* Temporary delegator to minimize code churn.  */
get_value_range(const_tree op)4352   value_range *get_value_range (const_tree op)
4353     { return vr_values.get_value_range (op); }
set_defs_to_varying(gimple * stmt)4354   void set_defs_to_varying (gimple *stmt)
4355     { return vr_values.set_defs_to_varying (stmt); }
extract_range_from_stmt(gimple * stmt,edge * taken_edge_p,tree * output_p,value_range * vr)4356   void extract_range_from_stmt (gimple *stmt, edge *taken_edge_p,
4357 				tree *output_p, value_range *vr)
4358     { vr_values.extract_range_from_stmt (stmt, taken_edge_p, output_p, vr); }
update_value_range(const_tree op,value_range * vr)4359   bool update_value_range (const_tree op, value_range *vr)
4360     { return vr_values.update_value_range (op, vr); }
extract_range_basic(value_range * vr,gimple * stmt)4361   void extract_range_basic (value_range *vr, gimple *stmt)
4362     { vr_values.extract_range_basic (vr, stmt); }
extract_range_from_phi_node(gphi * phi,value_range * vr)4363   void extract_range_from_phi_node (gphi *phi, value_range *vr)
4364     { vr_values.extract_range_from_phi_node (phi, vr); }
4365 };
4366 /* Checks one ARRAY_REF in REF, located at LOCUS. Ignores flexible arrays
4367    and "struct" hacks. If VRP can determine that the
4368    array subscript is a constant, check if it is outside valid
4369    range. If the array subscript is a RANGE, warn if it is
4370    non-overlapping with valid range.
4371    IGNORE_OFF_BY_ONE is true if the ARRAY_REF is inside a ADDR_EXPR.  */
4372 
4373 void
check_array_ref(location_t location,tree ref,bool ignore_off_by_one)4374 vrp_prop::check_array_ref (location_t location, tree ref,
4375 			   bool ignore_off_by_one)
4376 {
4377   const value_range *vr = NULL;
4378   tree low_sub, up_sub;
4379   tree low_bound, up_bound, up_bound_p1;
4380 
4381   if (TREE_NO_WARNING (ref))
4382     return;
4383 
4384   low_sub = up_sub = TREE_OPERAND (ref, 1);
4385   up_bound = array_ref_up_bound (ref);
4386 
4387   if (!up_bound
4388       || TREE_CODE (up_bound) != INTEGER_CST
4389       || (warn_array_bounds < 2
4390 	  && array_at_struct_end_p (ref)))
4391     {
4392       /* Accesses to trailing arrays via pointers may access storage
4393 	 beyond the types array bounds.  For such arrays, or for flexible
4394 	 array members, as well as for other arrays of an unknown size,
4395 	 replace the upper bound with a more permissive one that assumes
4396 	 the size of the largest object is PTRDIFF_MAX.  */
4397       tree eltsize = array_ref_element_size (ref);
4398 
4399       if (TREE_CODE (eltsize) != INTEGER_CST
4400 	  || integer_zerop (eltsize))
4401 	{
4402 	  up_bound = NULL_TREE;
4403 	  up_bound_p1 = NULL_TREE;
4404 	}
4405       else
4406 	{
4407 	  tree maxbound = TYPE_MAX_VALUE (ptrdiff_type_node);
4408 	  tree arg = TREE_OPERAND (ref, 0);
4409 	  poly_int64 off;
4410 
4411 	  if (get_addr_base_and_unit_offset (arg, &off) && known_gt (off, 0))
4412 	    maxbound = wide_int_to_tree (sizetype,
4413 					 wi::sub (wi::to_wide (maxbound),
4414 						  off));
4415 	  else
4416 	    maxbound = fold_convert (sizetype, maxbound);
4417 
4418 	  up_bound_p1 = int_const_binop (TRUNC_DIV_EXPR, maxbound, eltsize);
4419 
4420 	  up_bound = int_const_binop (MINUS_EXPR, up_bound_p1,
4421 				      build_int_cst (ptrdiff_type_node, 1));
4422 	}
4423     }
4424   else
4425     up_bound_p1 = int_const_binop (PLUS_EXPR, up_bound,
4426 				   build_int_cst (TREE_TYPE (up_bound), 1));
4427 
4428   low_bound = array_ref_low_bound (ref);
4429 
4430   tree artype = TREE_TYPE (TREE_OPERAND (ref, 0));
4431 
4432   bool warned = false;
4433 
4434   /* Empty array.  */
4435   if (up_bound && tree_int_cst_equal (low_bound, up_bound_p1))
4436     warned = warning_at (location, OPT_Warray_bounds,
4437 			 "array subscript %E is above array bounds of %qT",
4438 			 low_bound, artype);
4439 
4440   if (TREE_CODE (low_sub) == SSA_NAME)
4441     {
4442       vr = get_value_range (low_sub);
4443       if (!vr->undefined_p () && !vr->varying_p ())
4444         {
4445           low_sub = vr->kind () == VR_RANGE ? vr->max () : vr->min ();
4446           up_sub = vr->kind () == VR_RANGE ? vr->min () : vr->max ();
4447         }
4448     }
4449 
4450   if (vr && vr->kind () == VR_ANTI_RANGE)
4451     {
4452       if (up_bound
4453 	  && TREE_CODE (up_sub) == INTEGER_CST
4454           && (ignore_off_by_one
4455 	      ? tree_int_cst_lt (up_bound, up_sub)
4456 	      : tree_int_cst_le (up_bound, up_sub))
4457           && TREE_CODE (low_sub) == INTEGER_CST
4458           && tree_int_cst_le (low_sub, low_bound))
4459 	warned = warning_at (location, OPT_Warray_bounds,
4460 			     "array subscript [%E, %E] is outside "
4461 			     "array bounds of %qT",
4462 			     low_sub, up_sub, artype);
4463     }
4464   else if (up_bound
4465 	   && TREE_CODE (up_sub) == INTEGER_CST
4466 	   && (ignore_off_by_one
4467 	       ? !tree_int_cst_le (up_sub, up_bound_p1)
4468 	       : !tree_int_cst_le (up_sub, up_bound)))
4469     {
4470       if (dump_file && (dump_flags & TDF_DETAILS))
4471 	{
4472 	  fprintf (dump_file, "Array bound warning for ");
4473 	  dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
4474 	  fprintf (dump_file, "\n");
4475 	}
4476       warned = warning_at (location, OPT_Warray_bounds,
4477 			   "array subscript %E is above array bounds of %qT",
4478 			   up_sub, artype);
4479     }
4480   else if (TREE_CODE (low_sub) == INTEGER_CST
4481            && tree_int_cst_lt (low_sub, low_bound))
4482     {
4483       if (dump_file && (dump_flags & TDF_DETAILS))
4484 	{
4485 	  fprintf (dump_file, "Array bound warning for ");
4486 	  dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
4487 	  fprintf (dump_file, "\n");
4488 	}
4489       warned = warning_at (location, OPT_Warray_bounds,
4490 			   "array subscript %E is below array bounds of %qT",
4491 			   low_sub, artype);
4492     }
4493 
4494   if (warned)
4495     {
4496       ref = TREE_OPERAND (ref, 0);
4497 
4498       if (DECL_P (ref))
4499 	inform (DECL_SOURCE_LOCATION (ref), "while referencing %qD", ref);
4500 
4501       TREE_NO_WARNING (ref) = 1;
4502     }
4503 }
4504 
4505 /* Checks one MEM_REF in REF, located at LOCATION, for out-of-bounds
4506    references to string constants.  If VRP can determine that the array
4507    subscript is a constant, check if it is outside valid range.
4508    If the array subscript is a RANGE, warn if it is non-overlapping
4509    with valid range.
4510    IGNORE_OFF_BY_ONE is true if the MEM_REF is inside an ADDR_EXPR
4511    (used to allow one-past-the-end indices for code that takes
4512    the address of the just-past-the-end element of an array).  */
4513 
4514 void
check_mem_ref(location_t location,tree ref,bool ignore_off_by_one)4515 vrp_prop::check_mem_ref (location_t location, tree ref,
4516 			 bool ignore_off_by_one)
4517 {
4518   if (TREE_NO_WARNING (ref))
4519     return;
4520 
4521   tree arg = TREE_OPERAND (ref, 0);
4522   /* The constant and variable offset of the reference.  */
4523   tree cstoff = TREE_OPERAND (ref, 1);
4524   tree varoff = NULL_TREE;
4525 
4526   const offset_int maxobjsize = tree_to_shwi (max_object_size ());
4527 
4528   /* The array or string constant bounds in bytes.  Initially set
4529      to [-MAXOBJSIZE - 1, MAXOBJSIZE]  until a tighter bound is
4530      determined.  */
4531   offset_int arrbounds[2] = { -maxobjsize - 1, maxobjsize };
4532 
4533   /* The minimum and maximum intermediate offset.  For a reference
4534      to be valid, not only does the final offset/subscript must be
4535      in bounds but all intermediate offsets should be as well.
4536      GCC may be able to deal gracefully with such out-of-bounds
4537      offsets so the checking is only enbaled at -Warray-bounds=2
4538      where it may help detect bugs in uses of the intermediate
4539      offsets that could otherwise not be detectable.  */
4540   offset_int ioff = wi::to_offset (fold_convert (ptrdiff_type_node, cstoff));
4541   offset_int extrema[2] = { 0, wi::abs (ioff) };
4542 
4543   /* The range of the byte offset into the reference.  */
4544   offset_int offrange[2] = { 0, 0 };
4545 
4546   const value_range *vr = NULL;
4547 
4548   /* Determine the offsets and increment OFFRANGE for the bounds of each.
4549      The loop computes the range of the final offset for expressions such
4550      as (A + i0 + ... + iN)[CSTOFF] where i0 through iN are SSA_NAMEs in
4551      some range.  */
4552   while (TREE_CODE (arg) == SSA_NAME)
4553     {
4554       gimple *def = SSA_NAME_DEF_STMT (arg);
4555       if (!is_gimple_assign (def))
4556 	break;
4557 
4558       tree_code code = gimple_assign_rhs_code (def);
4559       if (code == POINTER_PLUS_EXPR)
4560 	{
4561 	  arg = gimple_assign_rhs1 (def);
4562 	  varoff = gimple_assign_rhs2 (def);
4563 	}
4564       else if (code == ASSERT_EXPR)
4565 	{
4566 	  arg = TREE_OPERAND (gimple_assign_rhs1 (def), 0);
4567 	  continue;
4568 	}
4569       else
4570 	return;
4571 
4572       /* VAROFF should always be a SSA_NAME here (and not even
4573 	 INTEGER_CST) but there's no point in taking chances.  */
4574       if (TREE_CODE (varoff) != SSA_NAME)
4575 	break;
4576 
4577       vr = get_value_range (varoff);
4578       if (!vr || vr->undefined_p () || vr->varying_p ())
4579 	break;
4580 
4581       if (!vr->constant_p ())
4582         break;
4583 
4584       if (vr->kind () == VR_RANGE)
4585 	{
4586 	  offset_int min
4587 	    = wi::to_offset (fold_convert (ptrdiff_type_node, vr->min ()));
4588 	  offset_int max
4589 	    = wi::to_offset (fold_convert (ptrdiff_type_node, vr->max ()));
4590 	  if (min < max)
4591 	    {
4592 	      offrange[0] += min;
4593 	      offrange[1] += max;
4594 	    }
4595 	  else
4596 	    {
4597 	      /* When MIN >= MAX, the offset is effectively in a union
4598 		 of two ranges: [-MAXOBJSIZE -1, MAX] and [MIN, MAXOBJSIZE].
4599 		 Since there is no way to represent such a range across
4600 		 additions, conservatively add [-MAXOBJSIZE -1, MAXOBJSIZE]
4601 		 to OFFRANGE.  */
4602 	      offrange[0] += arrbounds[0];
4603 	      offrange[1] += arrbounds[1];
4604 	    }
4605 	}
4606       else
4607 	{
4608 	  /* For an anti-range, analogously to the above, conservatively
4609 	     add [-MAXOBJSIZE -1, MAXOBJSIZE] to OFFRANGE.  */
4610 	  offrange[0] += arrbounds[0];
4611 	  offrange[1] += arrbounds[1];
4612 	}
4613 
4614       /* Keep track of the minimum and maximum offset.  */
4615       if (offrange[1] < 0 && offrange[1] < extrema[0])
4616 	extrema[0] = offrange[1];
4617       if (offrange[0] > 0 && offrange[0] > extrema[1])
4618 	extrema[1] = offrange[0];
4619 
4620       if (offrange[0] < arrbounds[0])
4621 	offrange[0] = arrbounds[0];
4622 
4623       if (offrange[1] > arrbounds[1])
4624 	offrange[1] = arrbounds[1];
4625     }
4626 
4627   if (TREE_CODE (arg) == ADDR_EXPR)
4628     {
4629       arg = TREE_OPERAND (arg, 0);
4630       if (TREE_CODE (arg) != STRING_CST
4631 	  && TREE_CODE (arg) != VAR_DECL)
4632 	return;
4633     }
4634   else
4635     return;
4636 
4637   /* The type of the object being referred to.  It can be an array,
4638      string literal, or a non-array type when the MEM_REF represents
4639      a reference/subscript via a pointer to an object that is not
4640      an element of an array.  References to members of structs and
4641      unions are excluded because MEM_REF doesn't make it possible
4642      to identify the member where the reference originated.
4643      Incomplete types are excluded as well because their size is
4644      not known.  */
4645   tree reftype = TREE_TYPE (arg);
4646   if (POINTER_TYPE_P (reftype)
4647       || !COMPLETE_TYPE_P (reftype)
4648       || TREE_CODE (TYPE_SIZE_UNIT (reftype)) != INTEGER_CST
4649       || RECORD_OR_UNION_TYPE_P (reftype))
4650     return;
4651 
4652   offset_int eltsize;
4653   if (TREE_CODE (reftype) == ARRAY_TYPE)
4654     {
4655       eltsize = wi::to_offset (TYPE_SIZE_UNIT (TREE_TYPE (reftype)));
4656 
4657       if (tree dom = TYPE_DOMAIN (reftype))
4658 	{
4659 	  tree bnds[] = { TYPE_MIN_VALUE (dom), TYPE_MAX_VALUE (dom) };
4660 	  if (array_at_struct_end_p (arg)
4661 	      || !bnds[0] || !bnds[1])
4662 	    {
4663 	      arrbounds[0] = 0;
4664 	      arrbounds[1] = wi::lrshift (maxobjsize, wi::floor_log2 (eltsize));
4665 	    }
4666 	  else
4667 	    {
4668 	      arrbounds[0] = wi::to_offset (bnds[0]) * eltsize;
4669 	      arrbounds[1] = (wi::to_offset (bnds[1]) + 1) * eltsize;
4670 	    }
4671 	}
4672       else
4673 	{
4674 	  arrbounds[0] = 0;
4675 	  arrbounds[1] = wi::lrshift (maxobjsize, wi::floor_log2 (eltsize));
4676 	}
4677 
4678       if (TREE_CODE (ref) == MEM_REF)
4679 	{
4680 	  /* For MEM_REF determine a tighter bound of the non-array
4681 	     element type.  */
4682 	  tree eltype = TREE_TYPE (reftype);
4683 	  while (TREE_CODE (eltype) == ARRAY_TYPE)
4684 	    eltype = TREE_TYPE (eltype);
4685 	  eltsize = wi::to_offset (TYPE_SIZE_UNIT (eltype));
4686 	}
4687     }
4688   else
4689     {
4690       eltsize = 1;
4691       arrbounds[0] = 0;
4692       arrbounds[1] = wi::to_offset (TYPE_SIZE_UNIT (reftype));
4693     }
4694 
4695   offrange[0] += ioff;
4696   offrange[1] += ioff;
4697 
4698   /* Compute the more permissive upper bound when IGNORE_OFF_BY_ONE
4699      is set (when taking the address of the one-past-last element
4700      of an array) but always use the stricter bound in diagnostics. */
4701   offset_int ubound = arrbounds[1];
4702   if (ignore_off_by_one)
4703     ubound += 1;
4704 
4705   if (offrange[0] >= ubound || offrange[1] < arrbounds[0])
4706     {
4707       /* Treat a reference to a non-array object as one to an array
4708 	 of a single element.  */
4709       if (TREE_CODE (reftype) != ARRAY_TYPE)
4710 	reftype = build_array_type_nelts (reftype, 1);
4711 
4712       if (TREE_CODE (ref) == MEM_REF)
4713 	{
4714 	  /* Extract the element type out of MEM_REF and use its size
4715 	     to compute the index to print in the diagnostic; arrays
4716 	     in MEM_REF don't mean anything.  A type with no size like
4717 	     void is as good as having a size of 1.  */
4718 	  tree type = TREE_TYPE (ref);
4719 	  while (TREE_CODE (type) == ARRAY_TYPE)
4720 	    type = TREE_TYPE (type);
4721 	  if (tree size = TYPE_SIZE_UNIT (type))
4722 	    {
4723 	      offrange[0] = offrange[0] / wi::to_offset (size);
4724 	      offrange[1] = offrange[1] / wi::to_offset (size);
4725 	    }
4726 	}
4727       else
4728 	{
4729 	  /* For anything other than MEM_REF, compute the index to
4730 	     print in the diagnostic as the offset over element size.  */
4731 	  offrange[0] = offrange[0] / eltsize;
4732 	  offrange[1] = offrange[1] / eltsize;
4733 	}
4734 
4735       bool warned;
4736       if (offrange[0] == offrange[1])
4737 	warned = warning_at (location, OPT_Warray_bounds,
4738 			     "array subscript %wi is outside array bounds "
4739 			     "of %qT",
4740 			     offrange[0].to_shwi (), reftype);
4741       else
4742 	warned = warning_at (location, OPT_Warray_bounds,
4743 			     "array subscript [%wi, %wi] is outside "
4744 			     "array bounds of %qT",
4745 			     offrange[0].to_shwi (),
4746 			     offrange[1].to_shwi (), reftype);
4747       if (warned && DECL_P (arg))
4748 	inform (DECL_SOURCE_LOCATION (arg), "while referencing %qD", arg);
4749 
4750       if (warned)
4751 	TREE_NO_WARNING (ref) = 1;
4752       return;
4753     }
4754 
4755   if (warn_array_bounds < 2)
4756     return;
4757 
4758   /* At level 2 check also intermediate offsets.  */
4759   int i = 0;
4760   if (extrema[i] < -arrbounds[1] || extrema[i = 1] > ubound)
4761     {
4762       HOST_WIDE_INT tmpidx = extrema[i].to_shwi () / eltsize.to_shwi ();
4763 
4764       if (warning_at (location, OPT_Warray_bounds,
4765 		      "intermediate array offset %wi is outside array bounds "
4766 		      "of %qT", tmpidx, reftype))
4767 	TREE_NO_WARNING (ref) = 1;
4768     }
4769 }
4770 
4771 /* Searches if the expr T, located at LOCATION computes
4772    address of an ARRAY_REF, and call check_array_ref on it.  */
4773 
4774 void
search_for_addr_array(tree t,location_t location)4775 vrp_prop::search_for_addr_array (tree t, location_t location)
4776 {
4777   /* Check each ARRAY_REF and MEM_REF in the reference chain. */
4778   do
4779     {
4780       if (TREE_CODE (t) == ARRAY_REF)
4781 	check_array_ref (location, t, true /*ignore_off_by_one*/);
4782       else if (TREE_CODE (t) == MEM_REF)
4783 	check_mem_ref (location, t, true /*ignore_off_by_one*/);
4784 
4785       t = TREE_OPERAND (t, 0);
4786     }
4787   while (handled_component_p (t) || TREE_CODE (t) == MEM_REF);
4788 
4789   if (TREE_CODE (t) != MEM_REF
4790       || TREE_CODE (TREE_OPERAND (t, 0)) != ADDR_EXPR
4791       || TREE_NO_WARNING (t))
4792     return;
4793 
4794   tree tem = TREE_OPERAND (TREE_OPERAND (t, 0), 0);
4795   tree low_bound, up_bound, el_sz;
4796   if (TREE_CODE (TREE_TYPE (tem)) != ARRAY_TYPE
4797       || TREE_CODE (TREE_TYPE (TREE_TYPE (tem))) == ARRAY_TYPE
4798       || !TYPE_DOMAIN (TREE_TYPE (tem)))
4799     return;
4800 
4801   low_bound = TYPE_MIN_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
4802   up_bound = TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
4803   el_sz = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (tem)));
4804   if (!low_bound
4805       || TREE_CODE (low_bound) != INTEGER_CST
4806       || !up_bound
4807       || TREE_CODE (up_bound) != INTEGER_CST
4808       || !el_sz
4809       || TREE_CODE (el_sz) != INTEGER_CST)
4810     return;
4811 
4812   offset_int idx;
4813   if (!mem_ref_offset (t).is_constant (&idx))
4814     return;
4815 
4816   bool warned = false;
4817   idx = wi::sdiv_trunc (idx, wi::to_offset (el_sz));
4818   if (idx < 0)
4819     {
4820       if (dump_file && (dump_flags & TDF_DETAILS))
4821 	{
4822 	  fprintf (dump_file, "Array bound warning for ");
4823 	  dump_generic_expr (MSG_NOTE, TDF_SLIM, t);
4824 	  fprintf (dump_file, "\n");
4825 	}
4826       warned = warning_at (location, OPT_Warray_bounds,
4827 			   "array subscript %wi is below "
4828 			   "array bounds of %qT",
4829 			   idx.to_shwi (), TREE_TYPE (tem));
4830     }
4831   else if (idx > (wi::to_offset (up_bound)
4832 		  - wi::to_offset (low_bound) + 1))
4833     {
4834       if (dump_file && (dump_flags & TDF_DETAILS))
4835 	{
4836 	  fprintf (dump_file, "Array bound warning for ");
4837 	  dump_generic_expr (MSG_NOTE, TDF_SLIM, t);
4838 	  fprintf (dump_file, "\n");
4839 	}
4840       warned = warning_at (location, OPT_Warray_bounds,
4841 			   "array subscript %wu is above "
4842 			   "array bounds of %qT",
4843 			   idx.to_uhwi (), TREE_TYPE (tem));
4844     }
4845 
4846   if (warned)
4847     {
4848       if (DECL_P (t))
4849 	inform (DECL_SOURCE_LOCATION (t), "while referencing %qD", t);
4850 
4851       TREE_NO_WARNING (t) = 1;
4852     }
4853 }
4854 
4855 /* walk_tree() callback that checks if *TP is
4856    an ARRAY_REF inside an ADDR_EXPR (in which an array
4857    subscript one outside the valid range is allowed). Call
4858    check_array_ref for each ARRAY_REF found. The location is
4859    passed in DATA.  */
4860 
4861 static tree
check_array_bounds(tree * tp,int * walk_subtree,void * data)4862 check_array_bounds (tree *tp, int *walk_subtree, void *data)
4863 {
4864   tree t = *tp;
4865   struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
4866   location_t location;
4867 
4868   if (EXPR_HAS_LOCATION (t))
4869     location = EXPR_LOCATION (t);
4870   else
4871     location = gimple_location (wi->stmt);
4872 
4873   *walk_subtree = TRUE;
4874 
4875   vrp_prop *vrp_prop = (class vrp_prop *)wi->info;
4876   if (TREE_CODE (t) == ARRAY_REF)
4877     vrp_prop->check_array_ref (location, t, false /*ignore_off_by_one*/);
4878   else if (TREE_CODE (t) == MEM_REF)
4879     vrp_prop->check_mem_ref (location, t, false /*ignore_off_by_one*/);
4880   else if (TREE_CODE (t) == ADDR_EXPR)
4881     {
4882       vrp_prop->search_for_addr_array (t, location);
4883       *walk_subtree = FALSE;
4884     }
4885 
4886   return NULL_TREE;
4887 }
4888 
4889 /* A dom_walker subclass for use by vrp_prop::check_all_array_refs,
4890    to walk over all statements of all reachable BBs and call
4891    check_array_bounds on them.  */
4892 
4893 class check_array_bounds_dom_walker : public dom_walker
4894 {
4895  public:
check_array_bounds_dom_walker(vrp_prop * prop)4896   check_array_bounds_dom_walker (vrp_prop *prop)
4897     : dom_walker (CDI_DOMINATORS,
4898 		  /* Discover non-executable edges, preserving EDGE_EXECUTABLE
4899 		     flags, so that we can merge in information on
4900 		     non-executable edges from vrp_folder .  */
4901 		  REACHABLE_BLOCKS_PRESERVING_FLAGS),
4902       m_prop (prop) {}
~check_array_bounds_dom_walker()4903   ~check_array_bounds_dom_walker () {}
4904 
4905   edge before_dom_children (basic_block) FINAL OVERRIDE;
4906 
4907  private:
4908   vrp_prop *m_prop;
4909 };
4910 
4911 /* Implementation of dom_walker::before_dom_children.
4912 
4913    Walk over all statements of BB and call check_array_bounds on them,
4914    and determine if there's a unique successor edge.  */
4915 
4916 edge
before_dom_children(basic_block bb)4917 check_array_bounds_dom_walker::before_dom_children (basic_block bb)
4918 {
4919   gimple_stmt_iterator si;
4920   for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
4921     {
4922       gimple *stmt = gsi_stmt (si);
4923       struct walk_stmt_info wi;
4924       if (!gimple_has_location (stmt)
4925 	  || is_gimple_debug (stmt))
4926 	continue;
4927 
4928       memset (&wi, 0, sizeof (wi));
4929 
4930       wi.info = m_prop;
4931 
4932       walk_gimple_op (stmt, check_array_bounds, &wi);
4933     }
4934 
4935   /* Determine if there's a unique successor edge, and if so, return
4936      that back to dom_walker, ensuring that we don't visit blocks that
4937      became unreachable during the VRP propagation
4938      (PR tree-optimization/83312).  */
4939   return find_taken_edge (bb, NULL_TREE);
4940 }
4941 
4942 /* Walk over all statements of all reachable BBs and call check_array_bounds
4943    on them.  */
4944 
4945 void
check_all_array_refs()4946 vrp_prop::check_all_array_refs ()
4947 {
4948   check_array_bounds_dom_walker w (this);
4949   w.walk (ENTRY_BLOCK_PTR_FOR_FN (cfun));
4950 }
4951 
4952 /* Return true if all imm uses of VAR are either in STMT, or
4953    feed (optionally through a chain of single imm uses) GIMPLE_COND
4954    in basic block COND_BB.  */
4955 
4956 static bool
all_imm_uses_in_stmt_or_feed_cond(tree var,gimple * stmt,basic_block cond_bb)4957 all_imm_uses_in_stmt_or_feed_cond (tree var, gimple *stmt, basic_block cond_bb)
4958 {
4959   use_operand_p use_p, use2_p;
4960   imm_use_iterator iter;
4961 
4962   FOR_EACH_IMM_USE_FAST (use_p, iter, var)
4963     if (USE_STMT (use_p) != stmt)
4964       {
4965 	gimple *use_stmt = USE_STMT (use_p), *use_stmt2;
4966 	if (is_gimple_debug (use_stmt))
4967 	  continue;
4968 	while (is_gimple_assign (use_stmt)
4969 	       && TREE_CODE (gimple_assign_lhs (use_stmt)) == SSA_NAME
4970 	       && single_imm_use (gimple_assign_lhs (use_stmt),
4971 				  &use2_p, &use_stmt2))
4972 	  use_stmt = use_stmt2;
4973 	if (gimple_code (use_stmt) != GIMPLE_COND
4974 	    || gimple_bb (use_stmt) != cond_bb)
4975 	  return false;
4976       }
4977   return true;
4978 }
4979 
4980 /* Handle
4981    _4 = x_3 & 31;
4982    if (_4 != 0)
4983      goto <bb 6>;
4984    else
4985      goto <bb 7>;
4986    <bb 6>:
4987    __builtin_unreachable ();
4988    <bb 7>:
4989    x_5 = ASSERT_EXPR <x_3, ...>;
4990    If x_3 has no other immediate uses (checked by caller),
4991    var is the x_3 var from ASSERT_EXPR, we can clear low 5 bits
4992    from the non-zero bitmask.  */
4993 
4994 void
maybe_set_nonzero_bits(edge e,tree var)4995 maybe_set_nonzero_bits (edge e, tree var)
4996 {
4997   basic_block cond_bb = e->src;
4998   gimple *stmt = last_stmt (cond_bb);
4999   tree cst;
5000 
5001   if (stmt == NULL
5002       || gimple_code (stmt) != GIMPLE_COND
5003       || gimple_cond_code (stmt) != ((e->flags & EDGE_TRUE_VALUE)
5004 				     ? EQ_EXPR : NE_EXPR)
5005       || TREE_CODE (gimple_cond_lhs (stmt)) != SSA_NAME
5006       || !integer_zerop (gimple_cond_rhs (stmt)))
5007     return;
5008 
5009   stmt = SSA_NAME_DEF_STMT (gimple_cond_lhs (stmt));
5010   if (!is_gimple_assign (stmt)
5011       || gimple_assign_rhs_code (stmt) != BIT_AND_EXPR
5012       || TREE_CODE (gimple_assign_rhs2 (stmt)) != INTEGER_CST)
5013     return;
5014   if (gimple_assign_rhs1 (stmt) != var)
5015     {
5016       gimple *stmt2;
5017 
5018       if (TREE_CODE (gimple_assign_rhs1 (stmt)) != SSA_NAME)
5019 	return;
5020       stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
5021       if (!gimple_assign_cast_p (stmt2)
5022 	  || gimple_assign_rhs1 (stmt2) != var
5023 	  || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt2))
5024 	  || (TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (stmt)))
5025 			      != TYPE_PRECISION (TREE_TYPE (var))))
5026 	return;
5027     }
5028   cst = gimple_assign_rhs2 (stmt);
5029   set_nonzero_bits (var, wi::bit_and_not (get_nonzero_bits (var),
5030 					  wi::to_wide (cst)));
5031 }
5032 
5033 /* Convert range assertion expressions into the implied copies and
5034    copy propagate away the copies.  Doing the trivial copy propagation
5035    here avoids the need to run the full copy propagation pass after
5036    VRP.
5037 
5038    FIXME, this will eventually lead to copy propagation removing the
5039    names that had useful range information attached to them.  For
5040    instance, if we had the assertion N_i = ASSERT_EXPR <N_j, N_j > 3>,
5041    then N_i will have the range [3, +INF].
5042 
5043    However, by converting the assertion into the implied copy
5044    operation N_i = N_j, we will then copy-propagate N_j into the uses
5045    of N_i and lose the range information.  We may want to hold on to
5046    ASSERT_EXPRs a little while longer as the ranges could be used in
5047    things like jump threading.
5048 
5049    The problem with keeping ASSERT_EXPRs around is that passes after
5050    VRP need to handle them appropriately.
5051 
5052    Another approach would be to make the range information a first
5053    class property of the SSA_NAME so that it can be queried from
5054    any pass.  This is made somewhat more complex by the need for
5055    multiple ranges to be associated with one SSA_NAME.  */
5056 
5057 static void
remove_range_assertions(void)5058 remove_range_assertions (void)
5059 {
5060   basic_block bb;
5061   gimple_stmt_iterator si;
5062   /* 1 if looking at ASSERT_EXPRs immediately at the beginning of
5063      a basic block preceeded by GIMPLE_COND branching to it and
5064      __builtin_trap, -1 if not yet checked, 0 otherwise.  */
5065   int is_unreachable;
5066 
5067   /* Note that the BSI iterator bump happens at the bottom of the
5068      loop and no bump is necessary if we're removing the statement
5069      referenced by the current BSI.  */
5070   FOR_EACH_BB_FN (bb, cfun)
5071     for (si = gsi_after_labels (bb), is_unreachable = -1; !gsi_end_p (si);)
5072       {
5073 	gimple *stmt = gsi_stmt (si);
5074 
5075 	if (is_gimple_assign (stmt)
5076 	    && gimple_assign_rhs_code (stmt) == ASSERT_EXPR)
5077 	  {
5078 	    tree lhs = gimple_assign_lhs (stmt);
5079 	    tree rhs = gimple_assign_rhs1 (stmt);
5080 	    tree var;
5081 
5082 	    var = ASSERT_EXPR_VAR (rhs);
5083 
5084 	    if (TREE_CODE (var) == SSA_NAME
5085 		&& !POINTER_TYPE_P (TREE_TYPE (lhs))
5086 		&& SSA_NAME_RANGE_INFO (lhs))
5087 	      {
5088 		if (is_unreachable == -1)
5089 		  {
5090 		    is_unreachable = 0;
5091 		    if (single_pred_p (bb)
5092 			&& assert_unreachable_fallthru_edge_p
5093 						    (single_pred_edge (bb)))
5094 		      is_unreachable = 1;
5095 		  }
5096 		/* Handle
5097 		   if (x_7 >= 10 && x_7 < 20)
5098 		     __builtin_unreachable ();
5099 		   x_8 = ASSERT_EXPR <x_7, ...>;
5100 		   if the only uses of x_7 are in the ASSERT_EXPR and
5101 		   in the condition.  In that case, we can copy the
5102 		   range info from x_8 computed in this pass also
5103 		   for x_7.  */
5104 		if (is_unreachable
5105 		    && all_imm_uses_in_stmt_or_feed_cond (var, stmt,
5106 							  single_pred (bb)))
5107 		  {
5108 		    set_range_info (var, SSA_NAME_RANGE_TYPE (lhs),
5109 				    SSA_NAME_RANGE_INFO (lhs)->get_min (),
5110 				    SSA_NAME_RANGE_INFO (lhs)->get_max ());
5111 		    maybe_set_nonzero_bits (single_pred_edge (bb), var);
5112 		  }
5113 	      }
5114 
5115 	    /* Propagate the RHS into every use of the LHS.  For SSA names
5116 	       also propagate abnormals as it merely restores the original
5117 	       IL in this case (an replace_uses_by would assert).  */
5118 	    if (TREE_CODE (var) == SSA_NAME)
5119 	      {
5120 		imm_use_iterator iter;
5121 		use_operand_p use_p;
5122 		gimple *use_stmt;
5123 		FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
5124 		  FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
5125 		    SET_USE (use_p, var);
5126 	      }
5127 	    else
5128 	      replace_uses_by (lhs, var);
5129 
5130 	    /* And finally, remove the copy, it is not needed.  */
5131 	    gsi_remove (&si, true);
5132 	    release_defs (stmt);
5133 	  }
5134 	else
5135 	  {
5136 	    if (!is_gimple_debug (gsi_stmt (si)))
5137 	      is_unreachable = 0;
5138 	    gsi_next (&si);
5139 	  }
5140       }
5141 }
5142 
5143 /* Return true if STMT is interesting for VRP.  */
5144 
5145 bool
stmt_interesting_for_vrp(gimple * stmt)5146 stmt_interesting_for_vrp (gimple *stmt)
5147 {
5148   if (gimple_code (stmt) == GIMPLE_PHI)
5149     {
5150       tree res = gimple_phi_result (stmt);
5151       return (!virtual_operand_p (res)
5152 	      && (INTEGRAL_TYPE_P (TREE_TYPE (res))
5153 		  || POINTER_TYPE_P (TREE_TYPE (res))));
5154     }
5155   else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
5156     {
5157       tree lhs = gimple_get_lhs (stmt);
5158 
5159       /* In general, assignments with virtual operands are not useful
5160 	 for deriving ranges, with the obvious exception of calls to
5161 	 builtin functions.  */
5162       if (lhs && TREE_CODE (lhs) == SSA_NAME
5163 	  && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
5164 	      || POINTER_TYPE_P (TREE_TYPE (lhs)))
5165 	  && (is_gimple_call (stmt)
5166 	      || !gimple_vuse (stmt)))
5167 	return true;
5168       else if (is_gimple_call (stmt) && gimple_call_internal_p (stmt))
5169 	switch (gimple_call_internal_fn (stmt))
5170 	  {
5171 	  case IFN_ADD_OVERFLOW:
5172 	  case IFN_SUB_OVERFLOW:
5173 	  case IFN_MUL_OVERFLOW:
5174 	  case IFN_ATOMIC_COMPARE_EXCHANGE:
5175 	    /* These internal calls return _Complex integer type,
5176 	       but are interesting to VRP nevertheless.  */
5177 	    if (lhs && TREE_CODE (lhs) == SSA_NAME)
5178 	      return true;
5179 	    break;
5180 	  default:
5181 	    break;
5182 	  }
5183     }
5184   else if (gimple_code (stmt) == GIMPLE_COND
5185 	   || gimple_code (stmt) == GIMPLE_SWITCH)
5186     return true;
5187 
5188   return false;
5189 }
5190 
5191 /* Initialization required by ssa_propagate engine.  */
5192 
5193 void
vrp_initialize()5194 vrp_prop::vrp_initialize ()
5195 {
5196   basic_block bb;
5197 
5198   FOR_EACH_BB_FN (bb, cfun)
5199     {
5200       for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
5201 	   gsi_next (&si))
5202 	{
5203 	  gphi *phi = si.phi ();
5204 	  if (!stmt_interesting_for_vrp (phi))
5205 	    {
5206 	      tree lhs = PHI_RESULT (phi);
5207 	      get_value_range (lhs)->set_varying ();
5208 	      prop_set_simulate_again (phi, false);
5209 	    }
5210 	  else
5211 	    prop_set_simulate_again (phi, true);
5212 	}
5213 
5214       for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
5215 	   gsi_next (&si))
5216         {
5217 	  gimple *stmt = gsi_stmt (si);
5218 
5219  	  /* If the statement is a control insn, then we do not
5220  	     want to avoid simulating the statement once.  Failure
5221  	     to do so means that those edges will never get added.  */
5222 	  if (stmt_ends_bb_p (stmt))
5223 	    prop_set_simulate_again (stmt, true);
5224 	  else if (!stmt_interesting_for_vrp (stmt))
5225 	    {
5226 	      set_defs_to_varying (stmt);
5227 	      prop_set_simulate_again (stmt, false);
5228 	    }
5229 	  else
5230 	    prop_set_simulate_again (stmt, true);
5231 	}
5232     }
5233 }
5234 
5235 /* Searches the case label vector VEC for the index *IDX of the CASE_LABEL
5236    that includes the value VAL.  The search is restricted to the range
5237    [START_IDX, n - 1] where n is the size of VEC.
5238 
5239    If there is a CASE_LABEL for VAL, its index is placed in IDX and true is
5240    returned.
5241 
5242    If there is no CASE_LABEL for VAL and there is one that is larger than VAL,
5243    it is placed in IDX and false is returned.
5244 
5245    If VAL is larger than any CASE_LABEL, n is placed on IDX and false is
5246    returned. */
5247 
5248 bool
find_case_label_index(gswitch * stmt,size_t start_idx,tree val,size_t * idx)5249 find_case_label_index (gswitch *stmt, size_t start_idx, tree val, size_t *idx)
5250 {
5251   size_t n = gimple_switch_num_labels (stmt);
5252   size_t low, high;
5253 
5254   /* Find case label for minimum of the value range or the next one.
5255      At each iteration we are searching in [low, high - 1]. */
5256 
5257   for (low = start_idx, high = n; high != low; )
5258     {
5259       tree t;
5260       int cmp;
5261       /* Note that i != high, so we never ask for n. */
5262       size_t i = (high + low) / 2;
5263       t = gimple_switch_label (stmt, i);
5264 
5265       /* Cache the result of comparing CASE_LOW and val.  */
5266       cmp = tree_int_cst_compare (CASE_LOW (t), val);
5267 
5268       if (cmp == 0)
5269 	{
5270 	  /* Ranges cannot be empty. */
5271 	  *idx = i;
5272 	  return true;
5273 	}
5274       else if (cmp > 0)
5275         high = i;
5276       else
5277 	{
5278 	  low = i + 1;
5279 	  if (CASE_HIGH (t) != NULL
5280 	      && tree_int_cst_compare (CASE_HIGH (t), val) >= 0)
5281 	    {
5282 	      *idx = i;
5283 	      return true;
5284 	    }
5285         }
5286     }
5287 
5288   *idx = high;
5289   return false;
5290 }
5291 
5292 /* Searches the case label vector VEC for the range of CASE_LABELs that is used
5293    for values between MIN and MAX. The first index is placed in MIN_IDX. The
5294    last index is placed in MAX_IDX. If the range of CASE_LABELs is empty
5295    then MAX_IDX < MIN_IDX.
5296    Returns true if the default label is not needed. */
5297 
5298 bool
find_case_label_range(gswitch * stmt,tree min,tree max,size_t * min_idx,size_t * max_idx)5299 find_case_label_range (gswitch *stmt, tree min, tree max, size_t *min_idx,
5300 		       size_t *max_idx)
5301 {
5302   size_t i, j;
5303   bool min_take_default = !find_case_label_index (stmt, 1, min, &i);
5304   bool max_take_default = !find_case_label_index (stmt, i, max, &j);
5305 
5306   if (i == j
5307       && min_take_default
5308       && max_take_default)
5309     {
5310       /* Only the default case label reached.
5311          Return an empty range. */
5312       *min_idx = 1;
5313       *max_idx = 0;
5314       return false;
5315     }
5316   else
5317     {
5318       bool take_default = min_take_default || max_take_default;
5319       tree low, high;
5320       size_t k;
5321 
5322       if (max_take_default)
5323 	j--;
5324 
5325       /* If the case label range is continuous, we do not need
5326 	 the default case label.  Verify that.  */
5327       high = CASE_LOW (gimple_switch_label (stmt, i));
5328       if (CASE_HIGH (gimple_switch_label (stmt, i)))
5329 	high = CASE_HIGH (gimple_switch_label (stmt, i));
5330       for (k = i + 1; k <= j; ++k)
5331 	{
5332 	  low = CASE_LOW (gimple_switch_label (stmt, k));
5333 	  if (!integer_onep (int_const_binop (MINUS_EXPR, low, high)))
5334 	    {
5335 	      take_default = true;
5336 	      break;
5337 	    }
5338 	  high = low;
5339 	  if (CASE_HIGH (gimple_switch_label (stmt, k)))
5340 	    high = CASE_HIGH (gimple_switch_label (stmt, k));
5341 	}
5342 
5343       *min_idx = i;
5344       *max_idx = j;
5345       return !take_default;
5346     }
5347 }
5348 
5349 /* Evaluate statement STMT.  If the statement produces a useful range,
5350    return SSA_PROP_INTERESTING and record the SSA name with the
5351    interesting range into *OUTPUT_P.
5352 
5353    If STMT is a conditional branch and we can determine its truth
5354    value, the taken edge is recorded in *TAKEN_EDGE_P.
5355 
5356    If STMT produces a varying value, return SSA_PROP_VARYING.  */
5357 
5358 enum ssa_prop_result
visit_stmt(gimple * stmt,edge * taken_edge_p,tree * output_p)5359 vrp_prop::visit_stmt (gimple *stmt, edge *taken_edge_p, tree *output_p)
5360 {
5361   tree lhs = gimple_get_lhs (stmt);
5362   value_range vr;
5363   extract_range_from_stmt (stmt, taken_edge_p, output_p, &vr);
5364 
5365   if (*output_p)
5366     {
5367       if (update_value_range (*output_p, &vr))
5368 	{
5369 	  if (dump_file && (dump_flags & TDF_DETAILS))
5370 	    {
5371 	      fprintf (dump_file, "Found new range for ");
5372 	      print_generic_expr (dump_file, *output_p);
5373 	      fprintf (dump_file, ": ");
5374 	      dump_value_range (dump_file, &vr);
5375 	      fprintf (dump_file, "\n");
5376 	    }
5377 
5378 	  if (vr.varying_p ())
5379 	    return SSA_PROP_VARYING;
5380 
5381 	  return SSA_PROP_INTERESTING;
5382 	}
5383       return SSA_PROP_NOT_INTERESTING;
5384     }
5385 
5386   if (is_gimple_call (stmt) && gimple_call_internal_p (stmt))
5387     switch (gimple_call_internal_fn (stmt))
5388       {
5389       case IFN_ADD_OVERFLOW:
5390       case IFN_SUB_OVERFLOW:
5391       case IFN_MUL_OVERFLOW:
5392       case IFN_ATOMIC_COMPARE_EXCHANGE:
5393 	/* These internal calls return _Complex integer type,
5394 	   which VRP does not track, but the immediate uses
5395 	   thereof might be interesting.  */
5396 	if (lhs && TREE_CODE (lhs) == SSA_NAME)
5397 	  {
5398 	    imm_use_iterator iter;
5399 	    use_operand_p use_p;
5400 	    enum ssa_prop_result res = SSA_PROP_VARYING;
5401 
5402 	    get_value_range (lhs)->set_varying ();
5403 
5404 	    FOR_EACH_IMM_USE_FAST (use_p, iter, lhs)
5405 	      {
5406 		gimple *use_stmt = USE_STMT (use_p);
5407 		if (!is_gimple_assign (use_stmt))
5408 		  continue;
5409 		enum tree_code rhs_code = gimple_assign_rhs_code (use_stmt);
5410 		if (rhs_code != REALPART_EXPR && rhs_code != IMAGPART_EXPR)
5411 		  continue;
5412 		tree rhs1 = gimple_assign_rhs1 (use_stmt);
5413 		tree use_lhs = gimple_assign_lhs (use_stmt);
5414 		if (TREE_CODE (rhs1) != rhs_code
5415 		    || TREE_OPERAND (rhs1, 0) != lhs
5416 		    || TREE_CODE (use_lhs) != SSA_NAME
5417 		    || !stmt_interesting_for_vrp (use_stmt)
5418 		    || (!INTEGRAL_TYPE_P (TREE_TYPE (use_lhs))
5419 			|| !TYPE_MIN_VALUE (TREE_TYPE (use_lhs))
5420 			|| !TYPE_MAX_VALUE (TREE_TYPE (use_lhs))))
5421 		  continue;
5422 
5423 		/* If there is a change in the value range for any of the
5424 		   REALPART_EXPR/IMAGPART_EXPR immediate uses, return
5425 		   SSA_PROP_INTERESTING.  If there are any REALPART_EXPR
5426 		   or IMAGPART_EXPR immediate uses, but none of them have
5427 		   a change in their value ranges, return
5428 		   SSA_PROP_NOT_INTERESTING.  If there are no
5429 		   {REAL,IMAG}PART_EXPR uses at all,
5430 		   return SSA_PROP_VARYING.  */
5431 		value_range new_vr;
5432 		extract_range_basic (&new_vr, use_stmt);
5433 		const value_range *old_vr = get_value_range (use_lhs);
5434 		if (!old_vr->equal_p (new_vr, /*ignore_equivs=*/false))
5435 		  res = SSA_PROP_INTERESTING;
5436 		else
5437 		  res = SSA_PROP_NOT_INTERESTING;
5438 		new_vr.equiv_clear ();
5439 		if (res == SSA_PROP_INTERESTING)
5440 		  {
5441 		    *output_p = lhs;
5442 		    return res;
5443 		  }
5444 	      }
5445 
5446 	    return res;
5447 	  }
5448 	break;
5449       default:
5450 	break;
5451       }
5452 
5453   /* All other statements produce nothing of interest for VRP, so mark
5454      their outputs varying and prevent further simulation.  */
5455   set_defs_to_varying (stmt);
5456 
5457   return (*taken_edge_p) ? SSA_PROP_INTERESTING : SSA_PROP_VARYING;
5458 }
5459 
5460 /* Union the two value-ranges { *VR0TYPE, *VR0MIN, *VR0MAX } and
5461    { VR1TYPE, VR0MIN, VR0MAX } and store the result
5462    in { *VR0TYPE, *VR0MIN, *VR0MAX }.  This may not be the smallest
5463    possible such range.  The resulting range is not canonicalized.  */
5464 
5465 static void
union_ranges(enum value_range_kind * vr0type,tree * vr0min,tree * vr0max,enum value_range_kind vr1type,tree vr1min,tree vr1max)5466 union_ranges (enum value_range_kind *vr0type,
5467 	      tree *vr0min, tree *vr0max,
5468 	      enum value_range_kind vr1type,
5469 	      tree vr1min, tree vr1max)
5470 {
5471   bool mineq = vrp_operand_equal_p (*vr0min, vr1min);
5472   bool maxeq = vrp_operand_equal_p (*vr0max, vr1max);
5473 
5474   /* [] is vr0, () is vr1 in the following classification comments.  */
5475   if (mineq && maxeq)
5476     {
5477       /* [(  )] */
5478       if (*vr0type == vr1type)
5479 	/* Nothing to do for equal ranges.  */
5480 	;
5481       else if ((*vr0type == VR_RANGE
5482 		&& vr1type == VR_ANTI_RANGE)
5483 	       || (*vr0type == VR_ANTI_RANGE
5484 		   && vr1type == VR_RANGE))
5485 	{
5486 	  /* For anti-range with range union the result is varying.  */
5487 	  goto give_up;
5488 	}
5489       else
5490 	gcc_unreachable ();
5491     }
5492   else if (operand_less_p (*vr0max, vr1min) == 1
5493 	   || operand_less_p (vr1max, *vr0min) == 1)
5494     {
5495       /* [ ] ( ) or ( ) [ ]
5496 	 If the ranges have an empty intersection, result of the union
5497 	 operation is the anti-range or if both are anti-ranges
5498 	 it covers all.  */
5499       if (*vr0type == VR_ANTI_RANGE
5500 	  && vr1type == VR_ANTI_RANGE)
5501 	goto give_up;
5502       else if (*vr0type == VR_ANTI_RANGE
5503 	       && vr1type == VR_RANGE)
5504 	;
5505       else if (*vr0type == VR_RANGE
5506 	       && vr1type == VR_ANTI_RANGE)
5507 	{
5508 	  *vr0type = vr1type;
5509 	  *vr0min = vr1min;
5510 	  *vr0max = vr1max;
5511 	}
5512       else if (*vr0type == VR_RANGE
5513 	       && vr1type == VR_RANGE)
5514 	{
5515 	  /* The result is the convex hull of both ranges.  */
5516 	  if (operand_less_p (*vr0max, vr1min) == 1)
5517 	    {
5518 	      /* If the result can be an anti-range, create one.  */
5519 	      if (TREE_CODE (*vr0max) == INTEGER_CST
5520 		  && TREE_CODE (vr1min) == INTEGER_CST
5521 		  && vrp_val_is_min (*vr0min)
5522 		  && vrp_val_is_max (vr1max))
5523 		{
5524 		  tree min = int_const_binop (PLUS_EXPR,
5525 					      *vr0max,
5526 					      build_int_cst (TREE_TYPE (*vr0max), 1));
5527 		  tree max = int_const_binop (MINUS_EXPR,
5528 					      vr1min,
5529 					      build_int_cst (TREE_TYPE (vr1min), 1));
5530 		  if (!operand_less_p (max, min))
5531 		    {
5532 		      *vr0type = VR_ANTI_RANGE;
5533 		      *vr0min = min;
5534 		      *vr0max = max;
5535 		    }
5536 		  else
5537 		    *vr0max = vr1max;
5538 		}
5539 	      else
5540 		*vr0max = vr1max;
5541 	    }
5542 	  else
5543 	    {
5544 	      /* If the result can be an anti-range, create one.  */
5545 	      if (TREE_CODE (vr1max) == INTEGER_CST
5546 		  && TREE_CODE (*vr0min) == INTEGER_CST
5547 		  && vrp_val_is_min (vr1min)
5548 		  && vrp_val_is_max (*vr0max))
5549 		{
5550 		  tree min = int_const_binop (PLUS_EXPR,
5551 					      vr1max,
5552 					      build_int_cst (TREE_TYPE (vr1max), 1));
5553 		  tree max = int_const_binop (MINUS_EXPR,
5554 					      *vr0min,
5555 					      build_int_cst (TREE_TYPE (*vr0min), 1));
5556 		  if (!operand_less_p (max, min))
5557 		    {
5558 		      *vr0type = VR_ANTI_RANGE;
5559 		      *vr0min = min;
5560 		      *vr0max = max;
5561 		    }
5562 		  else
5563 		    *vr0min = vr1min;
5564 		}
5565 	      else
5566 		*vr0min = vr1min;
5567 	    }
5568 	}
5569       else
5570 	gcc_unreachable ();
5571     }
5572   else if ((maxeq || operand_less_p (vr1max, *vr0max) == 1)
5573 	   && (mineq || operand_less_p (*vr0min, vr1min) == 1))
5574     {
5575       /* [ (  ) ] or [(  ) ] or [ (  )] */
5576       if (*vr0type == VR_RANGE
5577 	  && vr1type == VR_RANGE)
5578 	;
5579       else if (*vr0type == VR_ANTI_RANGE
5580 	       && vr1type == VR_ANTI_RANGE)
5581 	{
5582 	  *vr0type = vr1type;
5583 	  *vr0min = vr1min;
5584 	  *vr0max = vr1max;
5585 	}
5586       else if (*vr0type == VR_ANTI_RANGE
5587 	       && vr1type == VR_RANGE)
5588 	{
5589 	  /* Arbitrarily choose the right or left gap.  */
5590 	  if (!mineq && TREE_CODE (vr1min) == INTEGER_CST)
5591 	    *vr0max = int_const_binop (MINUS_EXPR, vr1min,
5592 				       build_int_cst (TREE_TYPE (vr1min), 1));
5593 	  else if (!maxeq && TREE_CODE (vr1max) == INTEGER_CST)
5594 	    *vr0min = int_const_binop (PLUS_EXPR, vr1max,
5595 				       build_int_cst (TREE_TYPE (vr1max), 1));
5596 	  else
5597 	    goto give_up;
5598 	}
5599       else if (*vr0type == VR_RANGE
5600 	       && vr1type == VR_ANTI_RANGE)
5601 	/* The result covers everything.  */
5602 	goto give_up;
5603       else
5604 	gcc_unreachable ();
5605     }
5606   else if ((maxeq || operand_less_p (*vr0max, vr1max) == 1)
5607 	   && (mineq || operand_less_p (vr1min, *vr0min) == 1))
5608     {
5609       /* ( [  ] ) or ([  ] ) or ( [  ]) */
5610       if (*vr0type == VR_RANGE
5611 	  && vr1type == VR_RANGE)
5612 	{
5613 	  *vr0type = vr1type;
5614 	  *vr0min = vr1min;
5615 	  *vr0max = vr1max;
5616 	}
5617       else if (*vr0type == VR_ANTI_RANGE
5618 	       && vr1type == VR_ANTI_RANGE)
5619 	;
5620       else if (*vr0type == VR_RANGE
5621 	       && vr1type == VR_ANTI_RANGE)
5622 	{
5623 	  *vr0type = VR_ANTI_RANGE;
5624 	  if (!mineq && TREE_CODE (*vr0min) == INTEGER_CST)
5625 	    {
5626 	      *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
5627 					 build_int_cst (TREE_TYPE (*vr0min), 1));
5628 	      *vr0min = vr1min;
5629 	    }
5630 	  else if (!maxeq && TREE_CODE (*vr0max) == INTEGER_CST)
5631 	    {
5632 	      *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
5633 					 build_int_cst (TREE_TYPE (*vr0max), 1));
5634 	      *vr0max = vr1max;
5635 	    }
5636 	  else
5637 	    goto give_up;
5638 	}
5639       else if (*vr0type == VR_ANTI_RANGE
5640 	       && vr1type == VR_RANGE)
5641 	/* The result covers everything.  */
5642 	goto give_up;
5643       else
5644 	gcc_unreachable ();
5645     }
5646   else if ((operand_less_p (vr1min, *vr0max) == 1
5647 	    || operand_equal_p (vr1min, *vr0max, 0))
5648 	   && operand_less_p (*vr0min, vr1min) == 1
5649 	   && operand_less_p (*vr0max, vr1max) == 1)
5650     {
5651       /* [  (  ]  ) or [   ](   ) */
5652       if (*vr0type == VR_RANGE
5653 	  && vr1type == VR_RANGE)
5654 	*vr0max = vr1max;
5655       else if (*vr0type == VR_ANTI_RANGE
5656 	       && vr1type == VR_ANTI_RANGE)
5657 	*vr0min = vr1min;
5658       else if (*vr0type == VR_ANTI_RANGE
5659 	       && vr1type == VR_RANGE)
5660 	{
5661 	  if (TREE_CODE (vr1min) == INTEGER_CST)
5662 	    *vr0max = int_const_binop (MINUS_EXPR, vr1min,
5663 				       build_int_cst (TREE_TYPE (vr1min), 1));
5664 	  else
5665 	    goto give_up;
5666 	}
5667       else if (*vr0type == VR_RANGE
5668 	       && vr1type == VR_ANTI_RANGE)
5669 	{
5670 	  if (TREE_CODE (*vr0max) == INTEGER_CST)
5671 	    {
5672 	      *vr0type = vr1type;
5673 	      *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
5674 					 build_int_cst (TREE_TYPE (*vr0max), 1));
5675 	      *vr0max = vr1max;
5676 	    }
5677 	  else
5678 	    goto give_up;
5679 	}
5680       else
5681 	gcc_unreachable ();
5682     }
5683   else if ((operand_less_p (*vr0min, vr1max) == 1
5684 	    || operand_equal_p (*vr0min, vr1max, 0))
5685 	   && operand_less_p (vr1min, *vr0min) == 1
5686 	   && operand_less_p (vr1max, *vr0max) == 1)
5687     {
5688       /* (  [  )  ] or (   )[   ] */
5689       if (*vr0type == VR_RANGE
5690 	  && vr1type == VR_RANGE)
5691 	*vr0min = vr1min;
5692       else if (*vr0type == VR_ANTI_RANGE
5693 	       && vr1type == VR_ANTI_RANGE)
5694 	*vr0max = vr1max;
5695       else if (*vr0type == VR_ANTI_RANGE
5696 	       && vr1type == VR_RANGE)
5697 	{
5698 	  if (TREE_CODE (vr1max) == INTEGER_CST)
5699 	    *vr0min = int_const_binop (PLUS_EXPR, vr1max,
5700 				       build_int_cst (TREE_TYPE (vr1max), 1));
5701 	  else
5702 	    goto give_up;
5703 	}
5704       else if (*vr0type == VR_RANGE
5705 	       && vr1type == VR_ANTI_RANGE)
5706 	{
5707 	  if (TREE_CODE (*vr0min) == INTEGER_CST)
5708 	    {
5709 	      *vr0type = vr1type;
5710 	      *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
5711 					 build_int_cst (TREE_TYPE (*vr0min), 1));
5712 	      *vr0min = vr1min;
5713 	    }
5714 	  else
5715 	    goto give_up;
5716 	}
5717       else
5718 	gcc_unreachable ();
5719     }
5720   else
5721     goto give_up;
5722 
5723   return;
5724 
5725 give_up:
5726   *vr0type = VR_VARYING;
5727   *vr0min = NULL_TREE;
5728   *vr0max = NULL_TREE;
5729 }
5730 
5731 /* Intersect the two value-ranges { *VR0TYPE, *VR0MIN, *VR0MAX } and
5732    { VR1TYPE, VR0MIN, VR0MAX } and store the result
5733    in { *VR0TYPE, *VR0MIN, *VR0MAX }.  This may not be the smallest
5734    possible such range.  The resulting range is not canonicalized.  */
5735 
5736 static void
intersect_ranges(enum value_range_kind * vr0type,tree * vr0min,tree * vr0max,enum value_range_kind vr1type,tree vr1min,tree vr1max)5737 intersect_ranges (enum value_range_kind *vr0type,
5738 		  tree *vr0min, tree *vr0max,
5739 		  enum value_range_kind vr1type,
5740 		  tree vr1min, tree vr1max)
5741 {
5742   bool mineq = vrp_operand_equal_p (*vr0min, vr1min);
5743   bool maxeq = vrp_operand_equal_p (*vr0max, vr1max);
5744 
5745   /* [] is vr0, () is vr1 in the following classification comments.  */
5746   if (mineq && maxeq)
5747     {
5748       /* [(  )] */
5749       if (*vr0type == vr1type)
5750 	/* Nothing to do for equal ranges.  */
5751 	;
5752       else if ((*vr0type == VR_RANGE
5753 		&& vr1type == VR_ANTI_RANGE)
5754 	       || (*vr0type == VR_ANTI_RANGE
5755 		   && vr1type == VR_RANGE))
5756 	{
5757 	  /* For anti-range with range intersection the result is empty.  */
5758 	  *vr0type = VR_UNDEFINED;
5759 	  *vr0min = NULL_TREE;
5760 	  *vr0max = NULL_TREE;
5761 	}
5762       else
5763 	gcc_unreachable ();
5764     }
5765   else if (operand_less_p (*vr0max, vr1min) == 1
5766 	   || operand_less_p (vr1max, *vr0min) == 1)
5767     {
5768       /* [ ] ( ) or ( ) [ ]
5769 	 If the ranges have an empty intersection, the result of the
5770 	 intersect operation is the range for intersecting an
5771 	 anti-range with a range or empty when intersecting two ranges.  */
5772       if (*vr0type == VR_RANGE
5773 	  && vr1type == VR_ANTI_RANGE)
5774 	;
5775       else if (*vr0type == VR_ANTI_RANGE
5776 	       && vr1type == VR_RANGE)
5777 	{
5778 	  *vr0type = vr1type;
5779 	  *vr0min = vr1min;
5780 	  *vr0max = vr1max;
5781 	}
5782       else if (*vr0type == VR_RANGE
5783 	       && vr1type == VR_RANGE)
5784 	{
5785 	  *vr0type = VR_UNDEFINED;
5786 	  *vr0min = NULL_TREE;
5787 	  *vr0max = NULL_TREE;
5788 	}
5789       else if (*vr0type == VR_ANTI_RANGE
5790 	       && vr1type == VR_ANTI_RANGE)
5791 	{
5792 	  /* If the anti-ranges are adjacent to each other merge them.  */
5793 	  if (TREE_CODE (*vr0max) == INTEGER_CST
5794 	      && TREE_CODE (vr1min) == INTEGER_CST
5795 	      && operand_less_p (*vr0max, vr1min) == 1
5796 	      && integer_onep (int_const_binop (MINUS_EXPR,
5797 						vr1min, *vr0max)))
5798 	    *vr0max = vr1max;
5799 	  else if (TREE_CODE (vr1max) == INTEGER_CST
5800 		   && TREE_CODE (*vr0min) == INTEGER_CST
5801 		   && operand_less_p (vr1max, *vr0min) == 1
5802 		   && integer_onep (int_const_binop (MINUS_EXPR,
5803 						     *vr0min, vr1max)))
5804 	    *vr0min = vr1min;
5805 	  /* Else arbitrarily take VR0.  */
5806 	}
5807     }
5808   else if ((maxeq || operand_less_p (vr1max, *vr0max) == 1)
5809 	   && (mineq || operand_less_p (*vr0min, vr1min) == 1))
5810     {
5811       /* [ (  ) ] or [(  ) ] or [ (  )] */
5812       if (*vr0type == VR_RANGE
5813 	  && vr1type == VR_RANGE)
5814 	{
5815 	  /* If both are ranges the result is the inner one.  */
5816 	  *vr0type = vr1type;
5817 	  *vr0min = vr1min;
5818 	  *vr0max = vr1max;
5819 	}
5820       else if (*vr0type == VR_RANGE
5821 	       && vr1type == VR_ANTI_RANGE)
5822 	{
5823 	  /* Choose the right gap if the left one is empty.  */
5824 	  if (mineq)
5825 	    {
5826 	      if (TREE_CODE (vr1max) != INTEGER_CST)
5827 		*vr0min = vr1max;
5828 	      else if (TYPE_PRECISION (TREE_TYPE (vr1max)) == 1
5829 		       && !TYPE_UNSIGNED (TREE_TYPE (vr1max)))
5830 		*vr0min
5831 		  = int_const_binop (MINUS_EXPR, vr1max,
5832 				     build_int_cst (TREE_TYPE (vr1max), -1));
5833 	      else
5834 		*vr0min
5835 		  = int_const_binop (PLUS_EXPR, vr1max,
5836 				     build_int_cst (TREE_TYPE (vr1max), 1));
5837 	    }
5838 	  /* Choose the left gap if the right one is empty.  */
5839 	  else if (maxeq)
5840 	    {
5841 	      if (TREE_CODE (vr1min) != INTEGER_CST)
5842 		*vr0max = vr1min;
5843 	      else if (TYPE_PRECISION (TREE_TYPE (vr1min)) == 1
5844 		       && !TYPE_UNSIGNED (TREE_TYPE (vr1min)))
5845 		*vr0max
5846 		  = int_const_binop (PLUS_EXPR, vr1min,
5847 				     build_int_cst (TREE_TYPE (vr1min), -1));
5848 	      else
5849 		*vr0max
5850 		  = int_const_binop (MINUS_EXPR, vr1min,
5851 				     build_int_cst (TREE_TYPE (vr1min), 1));
5852 	    }
5853 	  /* Choose the anti-range if the range is effectively varying.  */
5854 	  else if (vrp_val_is_min (*vr0min)
5855 		   && vrp_val_is_max (*vr0max))
5856 	    {
5857 	      *vr0type = vr1type;
5858 	      *vr0min = vr1min;
5859 	      *vr0max = vr1max;
5860 	    }
5861 	  /* Else choose the range.  */
5862 	}
5863       else if (*vr0type == VR_ANTI_RANGE
5864 	       && vr1type == VR_ANTI_RANGE)
5865 	/* If both are anti-ranges the result is the outer one.  */
5866 	;
5867       else if (*vr0type == VR_ANTI_RANGE
5868 	       && vr1type == VR_RANGE)
5869 	{
5870 	  /* The intersection is empty.  */
5871 	  *vr0type = VR_UNDEFINED;
5872 	  *vr0min = NULL_TREE;
5873 	  *vr0max = NULL_TREE;
5874 	}
5875       else
5876 	gcc_unreachable ();
5877     }
5878   else if ((maxeq || operand_less_p (*vr0max, vr1max) == 1)
5879 	   && (mineq || operand_less_p (vr1min, *vr0min) == 1))
5880     {
5881       /* ( [  ] ) or ([  ] ) or ( [  ]) */
5882       if (*vr0type == VR_RANGE
5883 	  && vr1type == VR_RANGE)
5884 	/* Choose the inner range.  */
5885 	;
5886       else if (*vr0type == VR_ANTI_RANGE
5887 	       && vr1type == VR_RANGE)
5888 	{
5889 	  /* Choose the right gap if the left is empty.  */
5890 	  if (mineq)
5891 	    {
5892 	      *vr0type = VR_RANGE;
5893 	      if (TREE_CODE (*vr0max) != INTEGER_CST)
5894 		*vr0min = *vr0max;
5895 	      else if (TYPE_PRECISION (TREE_TYPE (*vr0max)) == 1
5896 		       && !TYPE_UNSIGNED (TREE_TYPE (*vr0max)))
5897 		*vr0min
5898 		  = int_const_binop (MINUS_EXPR, *vr0max,
5899 				     build_int_cst (TREE_TYPE (*vr0max), -1));
5900 	      else
5901 		*vr0min
5902 		  = int_const_binop (PLUS_EXPR, *vr0max,
5903 				     build_int_cst (TREE_TYPE (*vr0max), 1));
5904 	      *vr0max = vr1max;
5905 	    }
5906 	  /* Choose the left gap if the right is empty.  */
5907 	  else if (maxeq)
5908 	    {
5909 	      *vr0type = VR_RANGE;
5910 	      if (TREE_CODE (*vr0min) != INTEGER_CST)
5911 		*vr0max = *vr0min;
5912 	      else if (TYPE_PRECISION (TREE_TYPE (*vr0min)) == 1
5913 		       && !TYPE_UNSIGNED (TREE_TYPE (*vr0min)))
5914 		*vr0max
5915 		  = int_const_binop (PLUS_EXPR, *vr0min,
5916 				     build_int_cst (TREE_TYPE (*vr0min), -1));
5917 	      else
5918 		*vr0max
5919 		  = int_const_binop (MINUS_EXPR, *vr0min,
5920 				     build_int_cst (TREE_TYPE (*vr0min), 1));
5921 	      *vr0min = vr1min;
5922 	    }
5923 	  /* Choose the anti-range if the range is effectively varying.  */
5924 	  else if (vrp_val_is_min (vr1min)
5925 		   && vrp_val_is_max (vr1max))
5926 	    ;
5927 	  /* Choose the anti-range if it is ~[0,0], that range is special
5928 	     enough to special case when vr1's range is relatively wide.
5929 	     At least for types bigger than int - this covers pointers
5930 	     and arguments to functions like ctz.  */
5931 	  else if (*vr0min == *vr0max
5932 		   && integer_zerop (*vr0min)
5933 		   && ((TYPE_PRECISION (TREE_TYPE (*vr0min))
5934 			>= TYPE_PRECISION (integer_type_node))
5935 		       || POINTER_TYPE_P (TREE_TYPE (*vr0min)))
5936 		   && TREE_CODE (vr1max) == INTEGER_CST
5937 		   && TREE_CODE (vr1min) == INTEGER_CST
5938 		   && (wi::clz (wi::to_wide (vr1max) - wi::to_wide (vr1min))
5939 		       < TYPE_PRECISION (TREE_TYPE (*vr0min)) / 2))
5940 	    ;
5941 	  /* Else choose the range.  */
5942 	  else
5943 	    {
5944 	      *vr0type = vr1type;
5945 	      *vr0min = vr1min;
5946 	      *vr0max = vr1max;
5947 	    }
5948 	}
5949       else if (*vr0type == VR_ANTI_RANGE
5950 	       && vr1type == VR_ANTI_RANGE)
5951 	{
5952 	  /* If both are anti-ranges the result is the outer one.  */
5953 	  *vr0type = vr1type;
5954 	  *vr0min = vr1min;
5955 	  *vr0max = vr1max;
5956 	}
5957       else if (vr1type == VR_ANTI_RANGE
5958 	       && *vr0type == VR_RANGE)
5959 	{
5960 	  /* The intersection is empty.  */
5961 	  *vr0type = VR_UNDEFINED;
5962 	  *vr0min = NULL_TREE;
5963 	  *vr0max = NULL_TREE;
5964 	}
5965       else
5966 	gcc_unreachable ();
5967     }
5968   else if ((operand_less_p (vr1min, *vr0max) == 1
5969 	    || operand_equal_p (vr1min, *vr0max, 0))
5970 	   && operand_less_p (*vr0min, vr1min) == 1)
5971     {
5972       /* [  (  ]  ) or [  ](  ) */
5973       if (*vr0type == VR_ANTI_RANGE
5974 	  && vr1type == VR_ANTI_RANGE)
5975 	*vr0max = vr1max;
5976       else if (*vr0type == VR_RANGE
5977 	       && vr1type == VR_RANGE)
5978 	*vr0min = vr1min;
5979       else if (*vr0type == VR_RANGE
5980 	       && vr1type == VR_ANTI_RANGE)
5981 	{
5982 	  if (TREE_CODE (vr1min) == INTEGER_CST)
5983 	    *vr0max = int_const_binop (MINUS_EXPR, vr1min,
5984 				       build_int_cst (TREE_TYPE (vr1min), 1));
5985 	  else
5986 	    *vr0max = vr1min;
5987 	}
5988       else if (*vr0type == VR_ANTI_RANGE
5989 	       && vr1type == VR_RANGE)
5990 	{
5991 	  *vr0type = VR_RANGE;
5992 	  if (TREE_CODE (*vr0max) == INTEGER_CST)
5993 	    *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
5994 				       build_int_cst (TREE_TYPE (*vr0max), 1));
5995 	  else
5996 	    *vr0min = *vr0max;
5997 	  *vr0max = vr1max;
5998 	}
5999       else
6000 	gcc_unreachable ();
6001     }
6002   else if ((operand_less_p (*vr0min, vr1max) == 1
6003 	    || operand_equal_p (*vr0min, vr1max, 0))
6004 	   && operand_less_p (vr1min, *vr0min) == 1)
6005     {
6006       /* (  [  )  ] or (  )[  ] */
6007       if (*vr0type == VR_ANTI_RANGE
6008 	  && vr1type == VR_ANTI_RANGE)
6009 	*vr0min = vr1min;
6010       else if (*vr0type == VR_RANGE
6011 	       && vr1type == VR_RANGE)
6012 	*vr0max = vr1max;
6013       else if (*vr0type == VR_RANGE
6014 	       && vr1type == VR_ANTI_RANGE)
6015 	{
6016 	  if (TREE_CODE (vr1max) == INTEGER_CST)
6017 	    *vr0min = int_const_binop (PLUS_EXPR, vr1max,
6018 				       build_int_cst (TREE_TYPE (vr1max), 1));
6019 	  else
6020 	    *vr0min = vr1max;
6021 	}
6022       else if (*vr0type == VR_ANTI_RANGE
6023 	       && vr1type == VR_RANGE)
6024 	{
6025 	  *vr0type = VR_RANGE;
6026 	  if (TREE_CODE (*vr0min) == INTEGER_CST)
6027 	    *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
6028 				       build_int_cst (TREE_TYPE (*vr0min), 1));
6029 	  else
6030 	    *vr0max = *vr0min;
6031 	  *vr0min = vr1min;
6032 	}
6033       else
6034 	gcc_unreachable ();
6035     }
6036 
6037   /* As a fallback simply use { *VRTYPE, *VR0MIN, *VR0MAX } as
6038      result for the intersection.  That's always a conservative
6039      correct estimate unless VR1 is a constant singleton range
6040      in which case we choose that.  */
6041   if (vr1type == VR_RANGE
6042       && is_gimple_min_invariant (vr1min)
6043       && vrp_operand_equal_p (vr1min, vr1max))
6044     {
6045       *vr0type = vr1type;
6046       *vr0min = vr1min;
6047       *vr0max = vr1max;
6048     }
6049 }
6050 
6051 
6052 /* Intersect the two value-ranges *VR0 and *VR1 and store the result
6053    in *VR0.  This may not be the smallest possible such range.  */
6054 
6055 void
intersect_helper(value_range * vr0,const value_range * vr1)6056 value_range::intersect_helper (value_range *vr0, const value_range *vr1)
6057 {
6058   /* If either range is VR_VARYING the other one wins.  */
6059   if (vr1->varying_p ())
6060     return;
6061   if (vr0->varying_p ())
6062     {
6063       vr0->deep_copy (vr1);
6064       return;
6065     }
6066 
6067   /* When either range is VR_UNDEFINED the resulting range is
6068      VR_UNDEFINED, too.  */
6069   if (vr0->undefined_p ())
6070     return;
6071   if (vr1->undefined_p ())
6072     {
6073       vr0->set_undefined ();
6074       return;
6075     }
6076 
6077   value_range_kind vr0type = vr0->kind ();
6078   tree vr0min = vr0->min ();
6079   tree vr0max = vr0->max ();
6080   intersect_ranges (&vr0type, &vr0min, &vr0max,
6081 		    vr1->kind (), vr1->min (), vr1->max ());
6082   /* Make sure to canonicalize the result though as the inversion of a
6083      VR_RANGE can still be a VR_RANGE.  Work on a temporary so we can
6084      fall back to vr0 when this turns things to varying.  */
6085   value_range tem;
6086   tem.set_and_canonicalize (vr0type, vr0min, vr0max);
6087   /* If that failed, use the saved original VR0.  */
6088   if (tem.varying_p ())
6089     return;
6090   vr0->update (tem.kind (), tem.min (), tem.max ());
6091 
6092   /* If the result is VR_UNDEFINED there is no need to mess with
6093      the equivalencies.  */
6094   if (vr0->undefined_p ())
6095     return;
6096 
6097   /* The resulting set of equivalences for range intersection is the union of
6098      the two sets.  */
6099   if (vr0->m_equiv && vr1->m_equiv && vr0->m_equiv != vr1->m_equiv)
6100     bitmap_ior_into (vr0->m_equiv, vr1->m_equiv);
6101   else if (vr1->m_equiv && !vr0->m_equiv)
6102     {
6103       /* All equivalence bitmaps are allocated from the same obstack.  So
6104 	 we can use the obstack associated with VR to allocate vr0->equiv.  */
6105       vr0->m_equiv = BITMAP_ALLOC (vr1->m_equiv->obstack);
6106       bitmap_copy (m_equiv, vr1->m_equiv);
6107     }
6108 }
6109 
6110 void
intersect(const value_range * other)6111 value_range::intersect (const value_range *other)
6112 {
6113   if (dump_file && (dump_flags & TDF_DETAILS))
6114     {
6115       fprintf (dump_file, "Intersecting\n  ");
6116       dump_value_range (dump_file, this);
6117       fprintf (dump_file, "\nand\n  ");
6118       dump_value_range (dump_file, other);
6119       fprintf (dump_file, "\n");
6120     }
6121   intersect_helper (this, other);
6122   if (dump_file && (dump_flags & TDF_DETAILS))
6123     {
6124       fprintf (dump_file, "to\n  ");
6125       dump_value_range (dump_file, this);
6126       fprintf (dump_file, "\n");
6127     }
6128 }
6129 
6130 /* Helper for meet operation for value ranges.  Given two value ranges VR0 and
6131    VR1, return a range that contains both VR0 and VR1.  This may not be the
6132    smallest possible such range.  */
6133 
6134 value_range_base
union_helper(const value_range_base * vr0,const value_range_base * vr1)6135 value_range_base::union_helper (const value_range_base *vr0,
6136 				const value_range_base *vr1)
6137 {
6138   /* VR0 has the resulting range if VR1 is undefined or VR0 is varying.  */
6139   if (vr1->undefined_p ()
6140       || vr0->varying_p ())
6141     return *vr0;
6142 
6143   /* VR1 has the resulting range if VR0 is undefined or VR1 is varying.  */
6144   if (vr0->undefined_p ()
6145       || vr1->varying_p ())
6146     return *vr1;
6147 
6148   value_range_kind vr0type = vr0->kind ();
6149   tree vr0min = vr0->min ();
6150   tree vr0max = vr0->max ();
6151   union_ranges (&vr0type, &vr0min, &vr0max,
6152 		vr1->kind (), vr1->min (), vr1->max ());
6153 
6154   /* Work on a temporary so we can still use vr0 when union returns varying.  */
6155   value_range tem;
6156   tem.set_and_canonicalize (vr0type, vr0min, vr0max);
6157 
6158   /* Failed to find an efficient meet.  Before giving up and setting
6159      the result to VARYING, see if we can at least derive a useful
6160      anti-range.  */
6161   if (tem.varying_p ()
6162       && range_includes_zero_p (vr0) == 0
6163       && range_includes_zero_p (vr1) == 0)
6164     {
6165       tem.set_nonnull (vr0->type ());
6166       return tem;
6167     }
6168 
6169   return tem;
6170 }
6171 
6172 
6173 /* Meet operation for value ranges.  Given two value ranges VR0 and
6174    VR1, store in VR0 a range that contains both VR0 and VR1.  This
6175    may not be the smallest possible such range.  */
6176 
6177 void
union_(const value_range_base * other)6178 value_range_base::union_ (const value_range_base *other)
6179 {
6180   if (dump_file && (dump_flags & TDF_DETAILS))
6181     {
6182       fprintf (dump_file, "Meeting\n  ");
6183       dump_value_range (dump_file, this);
6184       fprintf (dump_file, "\nand\n  ");
6185       dump_value_range (dump_file, other);
6186       fprintf (dump_file, "\n");
6187     }
6188 
6189   *this = union_helper (this, other);
6190 
6191   if (dump_file && (dump_flags & TDF_DETAILS))
6192     {
6193       fprintf (dump_file, "to\n  ");
6194       dump_value_range (dump_file, this);
6195       fprintf (dump_file, "\n");
6196     }
6197 }
6198 
6199 void
union_(const value_range * other)6200 value_range::union_ (const value_range *other)
6201 {
6202   if (dump_file && (dump_flags & TDF_DETAILS))
6203     {
6204       fprintf (dump_file, "Meeting\n  ");
6205       dump_value_range (dump_file, this);
6206       fprintf (dump_file, "\nand\n  ");
6207       dump_value_range (dump_file, other);
6208       fprintf (dump_file, "\n");
6209     }
6210 
6211   /* If THIS is undefined we want to pick up equivalences from OTHER.
6212      Just special-case this here rather than trying to fixup after the fact.  */
6213   if (this->undefined_p ())
6214     this->deep_copy (other);
6215   else
6216     {
6217       value_range_base tem = union_helper (this, other);
6218       this->update (tem.kind (), tem.min (), tem.max ());
6219 
6220       /* The resulting set of equivalences is always the intersection of
6221 	 the two sets.  */
6222       if (this->m_equiv && other->m_equiv && this->m_equiv != other->m_equiv)
6223 	bitmap_and_into (this->m_equiv, other->m_equiv);
6224       else if (this->m_equiv && !other->m_equiv)
6225 	bitmap_clear (this->m_equiv);
6226     }
6227 
6228   if (dump_file && (dump_flags & TDF_DETAILS))
6229     {
6230       fprintf (dump_file, "to\n  ");
6231       dump_value_range (dump_file, this);
6232       fprintf (dump_file, "\n");
6233     }
6234 }
6235 
6236 /* Visit all arguments for PHI node PHI that flow through executable
6237    edges.  If a valid value range can be derived from all the incoming
6238    value ranges, set a new range for the LHS of PHI.  */
6239 
6240 enum ssa_prop_result
visit_phi(gphi * phi)6241 vrp_prop::visit_phi (gphi *phi)
6242 {
6243   tree lhs = PHI_RESULT (phi);
6244   value_range vr_result;
6245   extract_range_from_phi_node (phi, &vr_result);
6246   if (update_value_range (lhs, &vr_result))
6247     {
6248       if (dump_file && (dump_flags & TDF_DETAILS))
6249 	{
6250 	  fprintf (dump_file, "Found new range for ");
6251 	  print_generic_expr (dump_file, lhs);
6252 	  fprintf (dump_file, ": ");
6253 	  dump_value_range (dump_file, &vr_result);
6254 	  fprintf (dump_file, "\n");
6255 	}
6256 
6257       if (vr_result.varying_p ())
6258 	return SSA_PROP_VARYING;
6259 
6260       return SSA_PROP_INTERESTING;
6261     }
6262 
6263   /* Nothing changed, don't add outgoing edges.  */
6264   return SSA_PROP_NOT_INTERESTING;
6265 }
6266 
6267 class vrp_folder : public substitute_and_fold_engine
6268 {
6269  public:
6270   tree get_value (tree) FINAL OVERRIDE;
6271   bool fold_stmt (gimple_stmt_iterator *) FINAL OVERRIDE;
6272   bool fold_predicate_in (gimple_stmt_iterator *);
6273 
6274   class vr_values *vr_values;
6275 
6276   /* Delegators.  */
vrp_evaluate_conditional(tree_code code,tree op0,tree op1,gimple * stmt)6277   tree vrp_evaluate_conditional (tree_code code, tree op0,
6278 				 tree op1, gimple *stmt)
6279     { return vr_values->vrp_evaluate_conditional (code, op0, op1, stmt); }
simplify_stmt_using_ranges(gimple_stmt_iterator * gsi)6280   bool simplify_stmt_using_ranges (gimple_stmt_iterator *gsi)
6281     { return vr_values->simplify_stmt_using_ranges (gsi); }
op_with_constant_singleton_value_range(tree op)6282  tree op_with_constant_singleton_value_range (tree op)
6283     { return vr_values->op_with_constant_singleton_value_range (op); }
6284 };
6285 
6286 /* If the statement pointed by SI has a predicate whose value can be
6287    computed using the value range information computed by VRP, compute
6288    its value and return true.  Otherwise, return false.  */
6289 
6290 bool
fold_predicate_in(gimple_stmt_iterator * si)6291 vrp_folder::fold_predicate_in (gimple_stmt_iterator *si)
6292 {
6293   bool assignment_p = false;
6294   tree val;
6295   gimple *stmt = gsi_stmt (*si);
6296 
6297   if (is_gimple_assign (stmt)
6298       && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison)
6299     {
6300       assignment_p = true;
6301       val = vrp_evaluate_conditional (gimple_assign_rhs_code (stmt),
6302 				      gimple_assign_rhs1 (stmt),
6303 				      gimple_assign_rhs2 (stmt),
6304 				      stmt);
6305     }
6306   else if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
6307     val = vrp_evaluate_conditional (gimple_cond_code (cond_stmt),
6308 				    gimple_cond_lhs (cond_stmt),
6309 				    gimple_cond_rhs (cond_stmt),
6310 				    stmt);
6311   else
6312     return false;
6313 
6314   if (val)
6315     {
6316       if (assignment_p)
6317         val = fold_convert (gimple_expr_type (stmt), val);
6318 
6319       if (dump_file)
6320 	{
6321 	  fprintf (dump_file, "Folding predicate ");
6322 	  print_gimple_expr (dump_file, stmt, 0);
6323 	  fprintf (dump_file, " to ");
6324 	  print_generic_expr (dump_file, val);
6325 	  fprintf (dump_file, "\n");
6326 	}
6327 
6328       if (is_gimple_assign (stmt))
6329 	gimple_assign_set_rhs_from_tree (si, val);
6330       else
6331 	{
6332 	  gcc_assert (gimple_code (stmt) == GIMPLE_COND);
6333 	  gcond *cond_stmt = as_a <gcond *> (stmt);
6334 	  if (integer_zerop (val))
6335 	    gimple_cond_make_false (cond_stmt);
6336 	  else if (integer_onep (val))
6337 	    gimple_cond_make_true (cond_stmt);
6338 	  else
6339 	    gcc_unreachable ();
6340 	}
6341 
6342       return true;
6343     }
6344 
6345   return false;
6346 }
6347 
6348 /* Callback for substitute_and_fold folding the stmt at *SI.  */
6349 
6350 bool
fold_stmt(gimple_stmt_iterator * si)6351 vrp_folder::fold_stmt (gimple_stmt_iterator *si)
6352 {
6353   if (fold_predicate_in (si))
6354     return true;
6355 
6356   return simplify_stmt_using_ranges (si);
6357 }
6358 
6359 /* If OP has a value range with a single constant value return that,
6360    otherwise return NULL_TREE.  This returns OP itself if OP is a
6361    constant.
6362 
6363    Implemented as a pure wrapper right now, but this will change.  */
6364 
6365 tree
get_value(tree op)6366 vrp_folder::get_value (tree op)
6367 {
6368   return op_with_constant_singleton_value_range (op);
6369 }
6370 
6371 /* Return the LHS of any ASSERT_EXPR where OP appears as the first
6372    argument to the ASSERT_EXPR and in which the ASSERT_EXPR dominates
6373    BB.  If no such ASSERT_EXPR is found, return OP.  */
6374 
6375 static tree
lhs_of_dominating_assert(tree op,basic_block bb,gimple * stmt)6376 lhs_of_dominating_assert (tree op, basic_block bb, gimple *stmt)
6377 {
6378   imm_use_iterator imm_iter;
6379   gimple *use_stmt;
6380   use_operand_p use_p;
6381 
6382   if (TREE_CODE (op) == SSA_NAME)
6383     {
6384       FOR_EACH_IMM_USE_FAST (use_p, imm_iter, op)
6385 	{
6386 	  use_stmt = USE_STMT (use_p);
6387 	  if (use_stmt != stmt
6388 	      && gimple_assign_single_p (use_stmt)
6389 	      && TREE_CODE (gimple_assign_rhs1 (use_stmt)) == ASSERT_EXPR
6390 	      && TREE_OPERAND (gimple_assign_rhs1 (use_stmt), 0) == op
6391 	      && dominated_by_p (CDI_DOMINATORS, bb, gimple_bb (use_stmt)))
6392 	    return gimple_assign_lhs (use_stmt);
6393 	}
6394     }
6395   return op;
6396 }
6397 
6398 /* A hack.  */
6399 static class vr_values *x_vr_values;
6400 
6401 /* A trivial wrapper so that we can present the generic jump threading
6402    code with a simple API for simplifying statements.  STMT is the
6403    statement we want to simplify, WITHIN_STMT provides the location
6404    for any overflow warnings.  */
6405 
6406 static tree
simplify_stmt_for_jump_threading(gimple * stmt,gimple * within_stmt,class avail_exprs_stack * avail_exprs_stack ATTRIBUTE_UNUSED,basic_block bb)6407 simplify_stmt_for_jump_threading (gimple *stmt, gimple *within_stmt,
6408     class avail_exprs_stack *avail_exprs_stack ATTRIBUTE_UNUSED,
6409     basic_block bb)
6410 {
6411   /* First see if the conditional is in the hash table.  */
6412   tree cached_lhs = avail_exprs_stack->lookup_avail_expr (stmt, false, true);
6413   if (cached_lhs && is_gimple_min_invariant (cached_lhs))
6414     return cached_lhs;
6415 
6416   vr_values *vr_values = x_vr_values;
6417   if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
6418     {
6419       tree op0 = gimple_cond_lhs (cond_stmt);
6420       op0 = lhs_of_dominating_assert (op0, bb, stmt);
6421 
6422       tree op1 = gimple_cond_rhs (cond_stmt);
6423       op1 = lhs_of_dominating_assert (op1, bb, stmt);
6424 
6425       return vr_values->vrp_evaluate_conditional (gimple_cond_code (cond_stmt),
6426 						  op0, op1, within_stmt);
6427     }
6428 
6429   /* We simplify a switch statement by trying to determine which case label
6430      will be taken.  If we are successful then we return the corresponding
6431      CASE_LABEL_EXPR.  */
6432   if (gswitch *switch_stmt = dyn_cast <gswitch *> (stmt))
6433     {
6434       tree op = gimple_switch_index (switch_stmt);
6435       if (TREE_CODE (op) != SSA_NAME)
6436 	return NULL_TREE;
6437 
6438       op = lhs_of_dominating_assert (op, bb, stmt);
6439 
6440       const value_range *vr = vr_values->get_value_range (op);
6441       if (vr->undefined_p ()
6442 	  || vr->varying_p ()
6443 	  || vr->symbolic_p ())
6444 	return NULL_TREE;
6445 
6446       if (vr->kind () == VR_RANGE)
6447 	{
6448 	  size_t i, j;
6449 	  /* Get the range of labels that contain a part of the operand's
6450 	     value range.  */
6451 	  find_case_label_range (switch_stmt, vr->min (), vr->max (), &i, &j);
6452 
6453 	  /* Is there only one such label?  */
6454 	  if (i == j)
6455 	    {
6456 	      tree label = gimple_switch_label (switch_stmt, i);
6457 
6458 	      /* The i'th label will be taken only if the value range of the
6459 		 operand is entirely within the bounds of this label.  */
6460 	      if (CASE_HIGH (label) != NULL_TREE
6461 		  ? (tree_int_cst_compare (CASE_LOW (label), vr->min ()) <= 0
6462 		     && tree_int_cst_compare (CASE_HIGH (label),
6463 					      vr->max ()) >= 0)
6464 		  : (tree_int_cst_equal (CASE_LOW (label), vr->min ())
6465 		     && tree_int_cst_equal (vr->min (), vr->max ())))
6466 		return label;
6467 	    }
6468 
6469 	  /* If there are no such labels then the default label will be
6470 	     taken.  */
6471 	  if (i > j)
6472 	    return gimple_switch_label (switch_stmt, 0);
6473 	}
6474 
6475       if (vr->kind () == VR_ANTI_RANGE)
6476 	{
6477 	  unsigned n = gimple_switch_num_labels (switch_stmt);
6478 	  tree min_label = gimple_switch_label (switch_stmt, 1);
6479 	  tree max_label = gimple_switch_label (switch_stmt, n - 1);
6480 
6481 	  /* The default label will be taken only if the anti-range of the
6482 	     operand is entirely outside the bounds of all the (non-default)
6483 	     case labels.  */
6484 	  if (tree_int_cst_compare (vr->min (), CASE_LOW (min_label)) <= 0
6485 	      && (CASE_HIGH (max_label) != NULL_TREE
6486 		  ? tree_int_cst_compare (vr->max (),
6487 					  CASE_HIGH (max_label)) >= 0
6488 		  : tree_int_cst_compare (vr->max (),
6489 					  CASE_LOW (max_label)) >= 0))
6490 	  return gimple_switch_label (switch_stmt, 0);
6491 	}
6492 
6493       return NULL_TREE;
6494     }
6495 
6496   if (gassign *assign_stmt = dyn_cast <gassign *> (stmt))
6497     {
6498       tree lhs = gimple_assign_lhs (assign_stmt);
6499       if (TREE_CODE (lhs) == SSA_NAME
6500 	  && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
6501 	      || POINTER_TYPE_P (TREE_TYPE (lhs)))
6502 	  && stmt_interesting_for_vrp (stmt))
6503 	{
6504 	  edge dummy_e;
6505 	  tree dummy_tree;
6506 	  value_range new_vr;
6507 	  vr_values->extract_range_from_stmt (stmt, &dummy_e,
6508 					      &dummy_tree, &new_vr);
6509 	  tree singleton;
6510 	  if (new_vr.singleton_p (&singleton))
6511 	    return singleton;
6512 	}
6513     }
6514 
6515   return NULL_TREE;
6516 }
6517 
6518 class vrp_dom_walker : public dom_walker
6519 {
6520 public:
vrp_dom_walker(cdi_direction direction,class const_and_copies * const_and_copies,class avail_exprs_stack * avail_exprs_stack)6521   vrp_dom_walker (cdi_direction direction,
6522 		  class const_and_copies *const_and_copies,
6523 		  class avail_exprs_stack *avail_exprs_stack)
6524     : dom_walker (direction, REACHABLE_BLOCKS),
6525       m_const_and_copies (const_and_copies),
6526       m_avail_exprs_stack (avail_exprs_stack),
6527       m_dummy_cond (NULL) {}
6528 
6529   virtual edge before_dom_children (basic_block);
6530   virtual void after_dom_children (basic_block);
6531 
6532   class vr_values *vr_values;
6533 
6534 private:
6535   class const_and_copies *m_const_and_copies;
6536   class avail_exprs_stack *m_avail_exprs_stack;
6537 
6538   gcond *m_dummy_cond;
6539 
6540 };
6541 
6542 /* Called before processing dominator children of BB.  We want to look
6543    at ASSERT_EXPRs and record information from them in the appropriate
6544    tables.
6545 
6546    We could look at other statements here.  It's not seen as likely
6547    to significantly increase the jump threads we discover.  */
6548 
6549 edge
before_dom_children(basic_block bb)6550 vrp_dom_walker::before_dom_children (basic_block bb)
6551 {
6552   gimple_stmt_iterator gsi;
6553 
6554   m_avail_exprs_stack->push_marker ();
6555   m_const_and_copies->push_marker ();
6556   for (gsi = gsi_start_nondebug_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
6557     {
6558       gimple *stmt = gsi_stmt (gsi);
6559       if (gimple_assign_single_p (stmt)
6560          && TREE_CODE (gimple_assign_rhs1 (stmt)) == ASSERT_EXPR)
6561 	{
6562 	  tree rhs1 = gimple_assign_rhs1 (stmt);
6563 	  tree cond = TREE_OPERAND (rhs1, 1);
6564 	  tree inverted = invert_truthvalue (cond);
6565 	  vec<cond_equivalence> p;
6566 	  p.create (3);
6567 	  record_conditions (&p, cond, inverted);
6568 	  for (unsigned int i = 0; i < p.length (); i++)
6569 	    m_avail_exprs_stack->record_cond (&p[i]);
6570 
6571 	  tree lhs = gimple_assign_lhs (stmt);
6572 	  m_const_and_copies->record_const_or_copy (lhs,
6573 						    TREE_OPERAND (rhs1, 0));
6574 	  p.release ();
6575 	  continue;
6576 	}
6577       break;
6578     }
6579   return NULL;
6580 }
6581 
6582 /* Called after processing dominator children of BB.  This is where we
6583    actually call into the threader.  */
6584 void
after_dom_children(basic_block bb)6585 vrp_dom_walker::after_dom_children (basic_block bb)
6586 {
6587   if (!m_dummy_cond)
6588     m_dummy_cond = gimple_build_cond (NE_EXPR,
6589 				      integer_zero_node, integer_zero_node,
6590 				      NULL, NULL);
6591 
6592   x_vr_values = vr_values;
6593   thread_outgoing_edges (bb, m_dummy_cond, m_const_and_copies,
6594 			 m_avail_exprs_stack, NULL,
6595 			 simplify_stmt_for_jump_threading);
6596   x_vr_values = NULL;
6597 
6598   m_avail_exprs_stack->pop_to_marker ();
6599   m_const_and_copies->pop_to_marker ();
6600 }
6601 
6602 /* Blocks which have more than one predecessor and more than
6603    one successor present jump threading opportunities, i.e.,
6604    when the block is reached from a specific predecessor, we
6605    may be able to determine which of the outgoing edges will
6606    be traversed.  When this optimization applies, we are able
6607    to avoid conditionals at runtime and we may expose secondary
6608    optimization opportunities.
6609 
6610    This routine is effectively a driver for the generic jump
6611    threading code.  It basically just presents the generic code
6612    with edges that may be suitable for jump threading.
6613 
6614    Unlike DOM, we do not iterate VRP if jump threading was successful.
6615    While iterating may expose new opportunities for VRP, it is expected
6616    those opportunities would be very limited and the compile time cost
6617    to expose those opportunities would be significant.
6618 
6619    As jump threading opportunities are discovered, they are registered
6620    for later realization.  */
6621 
6622 static void
identify_jump_threads(class vr_values * vr_values)6623 identify_jump_threads (class vr_values *vr_values)
6624 {
6625   /* Ugh.  When substituting values earlier in this pass we can
6626      wipe the dominance information.  So rebuild the dominator
6627      information as we need it within the jump threading code.  */
6628   calculate_dominance_info (CDI_DOMINATORS);
6629 
6630   /* We do not allow VRP information to be used for jump threading
6631      across a back edge in the CFG.  Otherwise it becomes too
6632      difficult to avoid eliminating loop exit tests.  Of course
6633      EDGE_DFS_BACK is not accurate at this time so we have to
6634      recompute it.  */
6635   mark_dfs_back_edges ();
6636 
6637   /* Allocate our unwinder stack to unwind any temporary equivalences
6638      that might be recorded.  */
6639   const_and_copies *equiv_stack = new const_and_copies ();
6640 
6641   hash_table<expr_elt_hasher> *avail_exprs
6642     = new hash_table<expr_elt_hasher> (1024);
6643   avail_exprs_stack *avail_exprs_stack
6644     = new class avail_exprs_stack (avail_exprs);
6645 
6646   vrp_dom_walker walker (CDI_DOMINATORS, equiv_stack, avail_exprs_stack);
6647   walker.vr_values = vr_values;
6648   walker.walk (cfun->cfg->x_entry_block_ptr);
6649 
6650   /* We do not actually update the CFG or SSA graphs at this point as
6651      ASSERT_EXPRs are still in the IL and cfg cleanup code does not yet
6652      handle ASSERT_EXPRs gracefully.  */
6653   delete equiv_stack;
6654   delete avail_exprs;
6655   delete avail_exprs_stack;
6656 }
6657 
6658 /* Traverse all the blocks folding conditionals with known ranges.  */
6659 
6660 void
vrp_finalize(bool warn_array_bounds_p)6661 vrp_prop::vrp_finalize (bool warn_array_bounds_p)
6662 {
6663   size_t i;
6664 
6665   /* We have completed propagating through the lattice.  */
6666   vr_values.set_lattice_propagation_complete ();
6667 
6668   if (dump_file)
6669     {
6670       fprintf (dump_file, "\nValue ranges after VRP:\n\n");
6671       vr_values.dump_all_value_ranges (dump_file);
6672       fprintf (dump_file, "\n");
6673     }
6674 
6675   /* Set value range to non pointer SSA_NAMEs.  */
6676   for (i = 0; i < num_ssa_names; i++)
6677     {
6678       tree name = ssa_name (i);
6679       if (!name)
6680 	continue;
6681 
6682       const value_range *vr = get_value_range (name);
6683       if (!name || !vr->constant_p ())
6684 	continue;
6685 
6686       if (POINTER_TYPE_P (TREE_TYPE (name))
6687 	  && range_includes_zero_p (vr) == 0)
6688 	set_ptr_nonnull (name);
6689       else if (!POINTER_TYPE_P (TREE_TYPE (name)))
6690 	set_range_info (name, *vr);
6691     }
6692 
6693   /* If we're checking array refs, we want to merge information on
6694      the executability of each edge between vrp_folder and the
6695      check_array_bounds_dom_walker: each can clear the
6696      EDGE_EXECUTABLE flag on edges, in different ways.
6697 
6698      Hence, if we're going to call check_all_array_refs, set
6699      the flag on every edge now, rather than in
6700      check_array_bounds_dom_walker's ctor; vrp_folder may clear
6701      it from some edges.  */
6702   if (warn_array_bounds && warn_array_bounds_p)
6703     set_all_edges_as_executable (cfun);
6704 
6705   class vrp_folder vrp_folder;
6706   vrp_folder.vr_values = &vr_values;
6707   vrp_folder.substitute_and_fold ();
6708 
6709   if (warn_array_bounds && warn_array_bounds_p)
6710     check_all_array_refs ();
6711 }
6712 
6713 /* Main entry point to VRP (Value Range Propagation).  This pass is
6714    loosely based on J. R. C. Patterson, ``Accurate Static Branch
6715    Prediction by Value Range Propagation,'' in SIGPLAN Conference on
6716    Programming Language Design and Implementation, pp. 67-78, 1995.
6717    Also available at http://citeseer.ist.psu.edu/patterson95accurate.html
6718 
6719    This is essentially an SSA-CCP pass modified to deal with ranges
6720    instead of constants.
6721 
6722    While propagating ranges, we may find that two or more SSA name
6723    have equivalent, though distinct ranges.  For instance,
6724 
6725      1	x_9 = p_3->a;
6726      2	p_4 = ASSERT_EXPR <p_3, p_3 != 0>
6727      3	if (p_4 == q_2)
6728      4	  p_5 = ASSERT_EXPR <p_4, p_4 == q_2>;
6729      5	endif
6730      6	if (q_2)
6731 
6732    In the code above, pointer p_5 has range [q_2, q_2], but from the
6733    code we can also determine that p_5 cannot be NULL and, if q_2 had
6734    a non-varying range, p_5's range should also be compatible with it.
6735 
6736    These equivalences are created by two expressions: ASSERT_EXPR and
6737    copy operations.  Since p_5 is an assertion on p_4, and p_4 was the
6738    result of another assertion, then we can use the fact that p_5 and
6739    p_4 are equivalent when evaluating p_5's range.
6740 
6741    Together with value ranges, we also propagate these equivalences
6742    between names so that we can take advantage of information from
6743    multiple ranges when doing final replacement.  Note that this
6744    equivalency relation is transitive but not symmetric.
6745 
6746    In the example above, p_5 is equivalent to p_4, q_2 and p_3, but we
6747    cannot assert that q_2 is equivalent to p_5 because q_2 may be used
6748    in contexts where that assertion does not hold (e.g., in line 6).
6749 
6750    TODO, the main difference between this pass and Patterson's is that
6751    we do not propagate edge probabilities.  We only compute whether
6752    edges can be taken or not.  That is, instead of having a spectrum
6753    of jump probabilities between 0 and 1, we only deal with 0, 1 and
6754    DON'T KNOW.  In the future, it may be worthwhile to propagate
6755    probabilities to aid branch prediction.  */
6756 
6757 static unsigned int
execute_vrp(bool warn_array_bounds_p)6758 execute_vrp (bool warn_array_bounds_p)
6759 {
6760 
6761   loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
6762   rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
6763   scev_initialize ();
6764 
6765   /* ???  This ends up using stale EDGE_DFS_BACK for liveness computation.
6766      Inserting assertions may split edges which will invalidate
6767      EDGE_DFS_BACK.  */
6768   insert_range_assertions ();
6769 
6770   threadedge_initialize_values ();
6771 
6772   /* For visiting PHI nodes we need EDGE_DFS_BACK computed.  */
6773   mark_dfs_back_edges ();
6774 
6775   class vrp_prop vrp_prop;
6776   vrp_prop.vrp_initialize ();
6777   vrp_prop.ssa_propagate ();
6778   vrp_prop.vrp_finalize (warn_array_bounds_p);
6779 
6780   /* We must identify jump threading opportunities before we release
6781      the datastructures built by VRP.  */
6782   identify_jump_threads (&vrp_prop.vr_values);
6783 
6784   /* A comparison of an SSA_NAME against a constant where the SSA_NAME
6785      was set by a type conversion can often be rewritten to use the
6786      RHS of the type conversion.
6787 
6788      However, doing so inhibits jump threading through the comparison.
6789      So that transformation is not performed until after jump threading
6790      is complete.  */
6791   basic_block bb;
6792   FOR_EACH_BB_FN (bb, cfun)
6793     {
6794       gimple *last = last_stmt (bb);
6795       if (last && gimple_code (last) == GIMPLE_COND)
6796 	vrp_prop.vr_values.simplify_cond_using_ranges_2 (as_a <gcond *> (last));
6797     }
6798 
6799   free_numbers_of_iterations_estimates (cfun);
6800 
6801   /* ASSERT_EXPRs must be removed before finalizing jump threads
6802      as finalizing jump threads calls the CFG cleanup code which
6803      does not properly handle ASSERT_EXPRs.  */
6804   remove_range_assertions ();
6805 
6806   /* If we exposed any new variables, go ahead and put them into
6807      SSA form now, before we handle jump threading.  This simplifies
6808      interactions between rewriting of _DECL nodes into SSA form
6809      and rewriting SSA_NAME nodes into SSA form after block
6810      duplication and CFG manipulation.  */
6811   update_ssa (TODO_update_ssa);
6812 
6813   /* We identified all the jump threading opportunities earlier, but could
6814      not transform the CFG at that time.  This routine transforms the
6815      CFG and arranges for the dominator tree to be rebuilt if necessary.
6816 
6817      Note the SSA graph update will occur during the normal TODO
6818      processing by the pass manager.  */
6819   thread_through_all_blocks (false);
6820 
6821   vrp_prop.vr_values.cleanup_edges_and_switches ();
6822   threadedge_finalize_values ();
6823 
6824   scev_finalize ();
6825   loop_optimizer_finalize ();
6826   return 0;
6827 }
6828 
6829 namespace {
6830 
6831 const pass_data pass_data_vrp =
6832 {
6833   GIMPLE_PASS, /* type */
6834   "vrp", /* name */
6835   OPTGROUP_NONE, /* optinfo_flags */
6836   TV_TREE_VRP, /* tv_id */
6837   PROP_ssa, /* properties_required */
6838   0, /* properties_provided */
6839   0, /* properties_destroyed */
6840   0, /* todo_flags_start */
6841   ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */
6842 };
6843 
6844 class pass_vrp : public gimple_opt_pass
6845 {
6846 public:
pass_vrp(gcc::context * ctxt)6847   pass_vrp (gcc::context *ctxt)
6848     : gimple_opt_pass (pass_data_vrp, ctxt), warn_array_bounds_p (false)
6849   {}
6850 
6851   /* opt_pass methods: */
clone()6852   opt_pass * clone () { return new pass_vrp (m_ctxt); }
set_pass_param(unsigned int n,bool param)6853   void set_pass_param (unsigned int n, bool param)
6854     {
6855       gcc_assert (n == 0);
6856       warn_array_bounds_p = param;
6857     }
gate(function *)6858   virtual bool gate (function *) { return flag_tree_vrp != 0; }
execute(function *)6859   virtual unsigned int execute (function *)
6860     { return execute_vrp (warn_array_bounds_p); }
6861 
6862  private:
6863   bool warn_array_bounds_p;
6864 }; // class pass_vrp
6865 
6866 } // anon namespace
6867 
6868 gimple_opt_pass *
make_pass_vrp(gcc::context * ctxt)6869 make_pass_vrp (gcc::context *ctxt)
6870 {
6871   return new pass_vrp (ctxt);
6872 }
6873 
6874 
6875 /* Worker for determine_value_range.  */
6876 
6877 static void
determine_value_range_1(value_range_base * vr,tree expr)6878 determine_value_range_1 (value_range_base *vr, tree expr)
6879 {
6880   if (BINARY_CLASS_P (expr))
6881     {
6882       value_range_base vr0, vr1;
6883       determine_value_range_1 (&vr0, TREE_OPERAND (expr, 0));
6884       determine_value_range_1 (&vr1, TREE_OPERAND (expr, 1));
6885       extract_range_from_binary_expr (vr, TREE_CODE (expr), TREE_TYPE (expr),
6886 				      &vr0, &vr1);
6887     }
6888   else if (UNARY_CLASS_P (expr))
6889     {
6890       value_range_base vr0;
6891       determine_value_range_1 (&vr0, TREE_OPERAND (expr, 0));
6892       extract_range_from_unary_expr (vr, TREE_CODE (expr), TREE_TYPE (expr),
6893 				     &vr0, TREE_TYPE (TREE_OPERAND (expr, 0)));
6894     }
6895   else if (TREE_CODE (expr) == INTEGER_CST)
6896     vr->set (expr);
6897   else
6898     {
6899       value_range_kind kind;
6900       wide_int min, max;
6901       /* For SSA names try to extract range info computed by VRP.  Otherwise
6902 	 fall back to varying.  */
6903       if (TREE_CODE (expr) == SSA_NAME
6904 	  && INTEGRAL_TYPE_P (TREE_TYPE (expr))
6905 	  && (kind = get_range_info (expr, &min, &max)) != VR_VARYING)
6906 	vr->set (kind, wide_int_to_tree (TREE_TYPE (expr), min),
6907 		 wide_int_to_tree (TREE_TYPE (expr), max));
6908       else
6909 	vr->set_varying ();
6910     }
6911 }
6912 
6913 /* Compute a value-range for EXPR and set it in *MIN and *MAX.  Return
6914    the determined range type.  */
6915 
6916 value_range_kind
determine_value_range(tree expr,wide_int * min,wide_int * max)6917 determine_value_range (tree expr, wide_int *min, wide_int *max)
6918 {
6919   value_range_base vr;
6920   determine_value_range_1 (&vr, expr);
6921   if (vr.constant_p ())
6922     {
6923       *min = wi::to_wide (vr.min ());
6924       *max = wi::to_wide (vr.max ());
6925       return vr.kind ();
6926     }
6927 
6928   return VR_VARYING;
6929 }
6930