1 /* Functions to determine/estimate number of iterations of a loop.
2    Copyright (C) 2004-2016 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
10 
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14 for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "rtl.h"
25 #include "tree.h"
26 #include "gimple.h"
27 #include "tree-pass.h"
28 #include "ssa.h"
29 #include "gimple-pretty-print.h"
30 #include "diagnostic-core.h"
31 #include "stor-layout.h"
32 #include "fold-const.h"
33 #include "calls.h"
34 #include "intl.h"
35 #include "gimplify.h"
36 #include "gimple-iterator.h"
37 #include "tree-cfg.h"
38 #include "tree-ssa-loop-ivopts.h"
39 #include "tree-ssa-loop-niter.h"
40 #include "tree-ssa-loop.h"
41 #include "cfgloop.h"
42 #include "tree-chrec.h"
43 #include "tree-scalar-evolution.h"
44 #include "params.h"
45 
46 
47 /* The maximum number of dominator BBs we search for conditions
48    of loop header copies we use for simplifying a conditional
49    expression.  */
50 #define MAX_DOMINATORS_TO_WALK 8
51 
52 /*
53 
54    Analysis of number of iterations of an affine exit test.
55 
56 */
57 
58 /* Bounds on some value, BELOW <= X <= UP.  */
59 
60 struct bounds
61 {
62   mpz_t below, up;
63 };
64 
65 
66 /* Splits expression EXPR to a variable part VAR and constant OFFSET.  */
67 
68 static void
split_to_var_and_offset(tree expr,tree * var,mpz_t offset)69 split_to_var_and_offset (tree expr, tree *var, mpz_t offset)
70 {
71   tree type = TREE_TYPE (expr);
72   tree op0, op1;
73   bool negate = false;
74 
75   *var = expr;
76   mpz_set_ui (offset, 0);
77 
78   switch (TREE_CODE (expr))
79     {
80     case MINUS_EXPR:
81       negate = true;
82       /* Fallthru.  */
83 
84     case PLUS_EXPR:
85     case POINTER_PLUS_EXPR:
86       op0 = TREE_OPERAND (expr, 0);
87       op1 = TREE_OPERAND (expr, 1);
88 
89       if (TREE_CODE (op1) != INTEGER_CST)
90 	break;
91 
92       *var = op0;
93       /* Always sign extend the offset.  */
94       wi::to_mpz (op1, offset, SIGNED);
95       if (negate)
96 	mpz_neg (offset, offset);
97       break;
98 
99     case INTEGER_CST:
100       *var = build_int_cst_type (type, 0);
101       wi::to_mpz (expr, offset, TYPE_SIGN (type));
102       break;
103 
104     default:
105       break;
106     }
107 }
108 
109 /* From condition C0 CMP C1 derives information regarding the value range
110    of VAR, which is of TYPE.  Results are stored in to BELOW and UP.  */
111 
112 static void
refine_value_range_using_guard(tree type,tree var,tree c0,enum tree_code cmp,tree c1,mpz_t below,mpz_t up)113 refine_value_range_using_guard (tree type, tree var,
114 				tree c0, enum tree_code cmp, tree c1,
115 				mpz_t below, mpz_t up)
116 {
117   tree varc0, varc1, ctype;
118   mpz_t offc0, offc1;
119   mpz_t mint, maxt, minc1, maxc1;
120   wide_int minv, maxv;
121   bool no_wrap = nowrap_type_p (type);
122   bool c0_ok, c1_ok;
123   signop sgn = TYPE_SIGN (type);
124 
125   switch (cmp)
126     {
127     case LT_EXPR:
128     case LE_EXPR:
129     case GT_EXPR:
130     case GE_EXPR:
131       STRIP_SIGN_NOPS (c0);
132       STRIP_SIGN_NOPS (c1);
133       ctype = TREE_TYPE (c0);
134       if (!useless_type_conversion_p (ctype, type))
135 	return;
136 
137       break;
138 
139     case EQ_EXPR:
140       /* We could derive quite precise information from EQ_EXPR, however,
141 	 such a guard is unlikely to appear, so we do not bother with
142 	 handling it.  */
143       return;
144 
145     case NE_EXPR:
146       /* NE_EXPR comparisons do not contain much of useful information,
147 	 except for cases of comparing with bounds.  */
148       if (TREE_CODE (c1) != INTEGER_CST
149 	  || !INTEGRAL_TYPE_P (type))
150 	return;
151 
152       /* Ensure that the condition speaks about an expression in the same
153 	 type as X and Y.  */
154       ctype = TREE_TYPE (c0);
155       if (TYPE_PRECISION (ctype) != TYPE_PRECISION (type))
156 	return;
157       c0 = fold_convert (type, c0);
158       c1 = fold_convert (type, c1);
159 
160       if (operand_equal_p (var, c0, 0))
161 	{
162 	  mpz_t valc1;
163 
164 	  /* Case of comparing VAR with its below/up bounds.  */
165 	  mpz_init (valc1);
166 	  wi::to_mpz (c1, valc1, TYPE_SIGN (type));
167 	  if (mpz_cmp (valc1, below) == 0)
168 	    cmp = GT_EXPR;
169 	  if (mpz_cmp (valc1, up) == 0)
170 	    cmp = LT_EXPR;
171 
172 	  mpz_clear (valc1);
173 	}
174       else
175 	{
176 	  /* Case of comparing with the bounds of the type.  */
177 	  wide_int min = wi::min_value (type);
178 	  wide_int max = wi::max_value (type);
179 
180 	  if (wi::eq_p (c1, min))
181 	    cmp = GT_EXPR;
182 	  if (wi::eq_p (c1, max))
183 	    cmp = LT_EXPR;
184 	}
185 
186       /* Quick return if no useful information.  */
187       if (cmp == NE_EXPR)
188 	return;
189 
190       break;
191 
192     default:
193       return;
194     }
195 
196   mpz_init (offc0);
197   mpz_init (offc1);
198   split_to_var_and_offset (expand_simple_operations (c0), &varc0, offc0);
199   split_to_var_and_offset (expand_simple_operations (c1), &varc1, offc1);
200 
201   /* We are only interested in comparisons of expressions based on VAR.  */
202   if (operand_equal_p (var, varc1, 0))
203     {
204       std::swap (varc0, varc1);
205       mpz_swap (offc0, offc1);
206       cmp = swap_tree_comparison (cmp);
207     }
208   else if (!operand_equal_p (var, varc0, 0))
209     {
210       mpz_clear (offc0);
211       mpz_clear (offc1);
212       return;
213     }
214 
215   mpz_init (mint);
216   mpz_init (maxt);
217   get_type_static_bounds (type, mint, maxt);
218   mpz_init (minc1);
219   mpz_init (maxc1);
220   /* Setup range information for varc1.  */
221   if (integer_zerop (varc1))
222     {
223       wi::to_mpz (integer_zero_node, minc1, TYPE_SIGN (type));
224       wi::to_mpz (integer_zero_node, maxc1, TYPE_SIGN (type));
225     }
226   else if (TREE_CODE (varc1) == SSA_NAME
227 	   && INTEGRAL_TYPE_P (type)
228 	   && get_range_info (varc1, &minv, &maxv) == VR_RANGE)
229     {
230       gcc_assert (wi::le_p (minv, maxv, sgn));
231       wi::to_mpz (minv, minc1, sgn);
232       wi::to_mpz (maxv, maxc1, sgn);
233     }
234   else
235     {
236       mpz_set (minc1, mint);
237       mpz_set (maxc1, maxt);
238     }
239 
240   /* Compute valid range information for varc1 + offc1.  Note nothing
241      useful can be derived if it overflows or underflows.  Overflow or
242      underflow could happen when:
243 
244        offc1 > 0 && varc1 + offc1 > MAX_VAL (type)
245        offc1 < 0 && varc1 + offc1 < MIN_VAL (type).  */
246   mpz_add (minc1, minc1, offc1);
247   mpz_add (maxc1, maxc1, offc1);
248   c1_ok = (no_wrap
249 	   || mpz_sgn (offc1) == 0
250 	   || (mpz_sgn (offc1) < 0 && mpz_cmp (minc1, mint) >= 0)
251 	   || (mpz_sgn (offc1) > 0 && mpz_cmp (maxc1, maxt) <= 0));
252   if (!c1_ok)
253     goto end;
254 
255   if (mpz_cmp (minc1, mint) < 0)
256     mpz_set (minc1, mint);
257   if (mpz_cmp (maxc1, maxt) > 0)
258     mpz_set (maxc1, maxt);
259 
260   if (cmp == LT_EXPR)
261     {
262       cmp = LE_EXPR;
263       mpz_sub_ui (maxc1, maxc1, 1);
264     }
265   if (cmp == GT_EXPR)
266     {
267       cmp = GE_EXPR;
268       mpz_add_ui (minc1, minc1, 1);
269     }
270 
271   /* Compute range information for varc0.  If there is no overflow,
272      the condition implied that
273 
274        (varc0) cmp (varc1 + offc1 - offc0)
275 
276      We can possibly improve the upper bound of varc0 if cmp is LE_EXPR,
277      or the below bound if cmp is GE_EXPR.
278 
279      To prove there is no overflow/underflow, we need to check below
280      four cases:
281        1) cmp == LE_EXPR && offc0 > 0
282 
283 	    (varc0 + offc0) doesn't overflow
284 	    && (varc1 + offc1 - offc0) doesn't underflow
285 
286        2) cmp == LE_EXPR && offc0 < 0
287 
288 	    (varc0 + offc0) doesn't underflow
289 	    && (varc1 + offc1 - offc0) doesn't overfloe
290 
291 	  In this case, (varc0 + offc0) will never underflow if we can
292 	  prove (varc1 + offc1 - offc0) doesn't overflow.
293 
294        3) cmp == GE_EXPR && offc0 < 0
295 
296 	    (varc0 + offc0) doesn't underflow
297 	    && (varc1 + offc1 - offc0) doesn't overflow
298 
299        4) cmp == GE_EXPR && offc0 > 0
300 
301 	    (varc0 + offc0) doesn't overflow
302 	    && (varc1 + offc1 - offc0) doesn't underflow
303 
304 	  In this case, (varc0 + offc0) will never overflow if we can
305 	  prove (varc1 + offc1 - offc0) doesn't underflow.
306 
307      Note we only handle case 2 and 4 in below code.  */
308 
309   mpz_sub (minc1, minc1, offc0);
310   mpz_sub (maxc1, maxc1, offc0);
311   c0_ok = (no_wrap
312 	   || mpz_sgn (offc0) == 0
313 	   || (cmp == LE_EXPR
314 	       && mpz_sgn (offc0) < 0 && mpz_cmp (maxc1, maxt) <= 0)
315 	   || (cmp == GE_EXPR
316 	       && mpz_sgn (offc0) > 0 && mpz_cmp (minc1, mint) >= 0));
317   if (!c0_ok)
318     goto end;
319 
320   if (cmp == LE_EXPR)
321     {
322       if (mpz_cmp (up, maxc1) > 0)
323 	mpz_set (up, maxc1);
324     }
325   else
326     {
327       if (mpz_cmp (below, minc1) < 0)
328 	mpz_set (below, minc1);
329     }
330 
331 end:
332   mpz_clear (mint);
333   mpz_clear (maxt);
334   mpz_clear (minc1);
335   mpz_clear (maxc1);
336   mpz_clear (offc0);
337   mpz_clear (offc1);
338 }
339 
340 /* Stores estimate on the minimum/maximum value of the expression VAR + OFF
341    in TYPE to MIN and MAX.  */
342 
343 static void
determine_value_range(struct loop * loop,tree type,tree var,mpz_t off,mpz_t min,mpz_t max)344 determine_value_range (struct loop *loop, tree type, tree var, mpz_t off,
345 		       mpz_t min, mpz_t max)
346 {
347   int cnt = 0;
348   mpz_t minm, maxm;
349   basic_block bb;
350   wide_int minv, maxv;
351   enum value_range_type rtype = VR_VARYING;
352 
353   /* If the expression is a constant, we know its value exactly.  */
354   if (integer_zerop (var))
355     {
356       mpz_set (min, off);
357       mpz_set (max, off);
358       return;
359     }
360 
361   get_type_static_bounds (type, min, max);
362 
363   /* See if we have some range info from VRP.  */
364   if (TREE_CODE (var) == SSA_NAME && INTEGRAL_TYPE_P (type))
365     {
366       edge e = loop_preheader_edge (loop);
367       signop sgn = TYPE_SIGN (type);
368       gphi_iterator gsi;
369 
370       /* Either for VAR itself...  */
371       rtype = get_range_info (var, &minv, &maxv);
372       /* Or for PHI results in loop->header where VAR is used as
373 	 PHI argument from the loop preheader edge.  */
374       for (gsi = gsi_start_phis (loop->header); !gsi_end_p (gsi); gsi_next (&gsi))
375 	{
376 	  gphi *phi = gsi.phi ();
377 	  wide_int minc, maxc;
378 	  if (PHI_ARG_DEF_FROM_EDGE (phi, e) == var
379 	      && (get_range_info (gimple_phi_result (phi), &minc, &maxc)
380 		  == VR_RANGE))
381 	    {
382 	      if (rtype != VR_RANGE)
383 		{
384 		  rtype = VR_RANGE;
385 		  minv = minc;
386 		  maxv = maxc;
387 		}
388 	      else
389 		{
390 		  minv = wi::max (minv, minc, sgn);
391 		  maxv = wi::min (maxv, maxc, sgn);
392 		  /* If the PHI result range are inconsistent with
393 		     the VAR range, give up on looking at the PHI
394 		     results.  This can happen if VR_UNDEFINED is
395 		     involved.  */
396 		  if (wi::gt_p (minv, maxv, sgn))
397 		    {
398 		      rtype = get_range_info (var, &minv, &maxv);
399 		      break;
400 		    }
401 		}
402 	    }
403 	}
404       mpz_init (minm);
405       mpz_init (maxm);
406       if (rtype != VR_RANGE)
407 	{
408 	  mpz_set (minm, min);
409 	  mpz_set (maxm, max);
410 	}
411       else
412 	{
413 	  gcc_assert (wi::le_p (minv, maxv, sgn));
414 	  wi::to_mpz (minv, minm, sgn);
415 	  wi::to_mpz (maxv, maxm, sgn);
416 	}
417       /* Now walk the dominators of the loop header and use the entry
418 	 guards to refine the estimates.  */
419       for (bb = loop->header;
420 	   bb != ENTRY_BLOCK_PTR_FOR_FN (cfun) && cnt < MAX_DOMINATORS_TO_WALK;
421 	   bb = get_immediate_dominator (CDI_DOMINATORS, bb))
422 	{
423 	  edge e;
424 	  tree c0, c1;
425 	  gimple *cond;
426 	  enum tree_code cmp;
427 
428 	  if (!single_pred_p (bb))
429 	    continue;
430 	  e = single_pred_edge (bb);
431 
432 	  if (!(e->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)))
433 	    continue;
434 
435 	  cond = last_stmt (e->src);
436 	  c0 = gimple_cond_lhs (cond);
437 	  cmp = gimple_cond_code (cond);
438 	  c1 = gimple_cond_rhs (cond);
439 
440 	  if (e->flags & EDGE_FALSE_VALUE)
441 	    cmp = invert_tree_comparison (cmp, false);
442 
443 	  refine_value_range_using_guard (type, var, c0, cmp, c1, minm, maxm);
444 	  ++cnt;
445 	}
446 
447       mpz_add (minm, minm, off);
448       mpz_add (maxm, maxm, off);
449       /* If the computation may not wrap or off is zero, then this
450 	 is always fine.  If off is negative and minv + off isn't
451 	 smaller than type's minimum, or off is positive and
452 	 maxv + off isn't bigger than type's maximum, use the more
453 	 precise range too.  */
454       if (nowrap_type_p (type)
455 	  || mpz_sgn (off) == 0
456 	  || (mpz_sgn (off) < 0 && mpz_cmp (minm, min) >= 0)
457 	  || (mpz_sgn (off) > 0 && mpz_cmp (maxm, max) <= 0))
458 	{
459 	  mpz_set (min, minm);
460 	  mpz_set (max, maxm);
461 	  mpz_clear (minm);
462 	  mpz_clear (maxm);
463 	  return;
464 	}
465       mpz_clear (minm);
466       mpz_clear (maxm);
467     }
468 
469   /* If the computation may wrap, we know nothing about the value, except for
470      the range of the type.  */
471   if (!nowrap_type_p (type))
472     return;
473 
474   /* Since the addition of OFF does not wrap, if OFF is positive, then we may
475      add it to MIN, otherwise to MAX.  */
476   if (mpz_sgn (off) < 0)
477     mpz_add (max, max, off);
478   else
479     mpz_add (min, min, off);
480 }
481 
482 /* Stores the bounds on the difference of the values of the expressions
483    (var + X) and (var + Y), computed in TYPE, to BNDS.  */
484 
485 static void
bound_difference_of_offsetted_base(tree type,mpz_t x,mpz_t y,bounds * bnds)486 bound_difference_of_offsetted_base (tree type, mpz_t x, mpz_t y,
487 				    bounds *bnds)
488 {
489   int rel = mpz_cmp (x, y);
490   bool may_wrap = !nowrap_type_p (type);
491   mpz_t m;
492 
493   /* If X == Y, then the expressions are always equal.
494      If X > Y, there are the following possibilities:
495        a) neither of var + X and var + Y overflow or underflow, or both of
496 	  them do.  Then their difference is X - Y.
497        b) var + X overflows, and var + Y does not.  Then the values of the
498 	  expressions are var + X - M and var + Y, where M is the range of
499 	  the type, and their difference is X - Y - M.
500        c) var + Y underflows and var + X does not.  Their difference again
501 	  is M - X + Y.
502        Therefore, if the arithmetics in type does not overflow, then the
503        bounds are (X - Y, X - Y), otherwise they are (X - Y - M, X - Y)
504      Similarly, if X < Y, the bounds are either (X - Y, X - Y) or
505      (X - Y, X - Y + M).  */
506 
507   if (rel == 0)
508     {
509       mpz_set_ui (bnds->below, 0);
510       mpz_set_ui (bnds->up, 0);
511       return;
512     }
513 
514   mpz_init (m);
515   wi::to_mpz (wi::minus_one (TYPE_PRECISION (type)), m, UNSIGNED);
516   mpz_add_ui (m, m, 1);
517   mpz_sub (bnds->up, x, y);
518   mpz_set (bnds->below, bnds->up);
519 
520   if (may_wrap)
521     {
522       if (rel > 0)
523 	mpz_sub (bnds->below, bnds->below, m);
524       else
525 	mpz_add (bnds->up, bnds->up, m);
526     }
527 
528   mpz_clear (m);
529 }
530 
531 /* From condition C0 CMP C1 derives information regarding the
532    difference of values of VARX + OFFX and VARY + OFFY, computed in TYPE,
533    and stores it to BNDS.  */
534 
535 static void
refine_bounds_using_guard(tree type,tree varx,mpz_t offx,tree vary,mpz_t offy,tree c0,enum tree_code cmp,tree c1,bounds * bnds)536 refine_bounds_using_guard (tree type, tree varx, mpz_t offx,
537 			   tree vary, mpz_t offy,
538 			   tree c0, enum tree_code cmp, tree c1,
539 			   bounds *bnds)
540 {
541   tree varc0, varc1, ctype;
542   mpz_t offc0, offc1, loffx, loffy, bnd;
543   bool lbound = false;
544   bool no_wrap = nowrap_type_p (type);
545   bool x_ok, y_ok;
546 
547   switch (cmp)
548     {
549     case LT_EXPR:
550     case LE_EXPR:
551     case GT_EXPR:
552     case GE_EXPR:
553       STRIP_SIGN_NOPS (c0);
554       STRIP_SIGN_NOPS (c1);
555       ctype = TREE_TYPE (c0);
556       if (!useless_type_conversion_p (ctype, type))
557 	return;
558 
559       break;
560 
561     case EQ_EXPR:
562       /* We could derive quite precise information from EQ_EXPR, however, such
563 	 a guard is unlikely to appear, so we do not bother with handling
564 	 it.  */
565       return;
566 
567     case NE_EXPR:
568       /* NE_EXPR comparisons do not contain much of useful information, except for
569 	 special case of comparing with the bounds of the type.  */
570       if (TREE_CODE (c1) != INTEGER_CST
571 	  || !INTEGRAL_TYPE_P (type))
572 	return;
573 
574       /* Ensure that the condition speaks about an expression in the same type
575 	 as X and Y.  */
576       ctype = TREE_TYPE (c0);
577       if (TYPE_PRECISION (ctype) != TYPE_PRECISION (type))
578 	return;
579       c0 = fold_convert (type, c0);
580       c1 = fold_convert (type, c1);
581 
582       if (TYPE_MIN_VALUE (type)
583 	  && operand_equal_p (c1, TYPE_MIN_VALUE (type), 0))
584 	{
585 	  cmp = GT_EXPR;
586 	  break;
587 	}
588       if (TYPE_MAX_VALUE (type)
589 	  && operand_equal_p (c1, TYPE_MAX_VALUE (type), 0))
590 	{
591 	  cmp = LT_EXPR;
592 	  break;
593 	}
594 
595       return;
596     default:
597       return;
598     }
599 
600   mpz_init (offc0);
601   mpz_init (offc1);
602   split_to_var_and_offset (expand_simple_operations (c0), &varc0, offc0);
603   split_to_var_and_offset (expand_simple_operations (c1), &varc1, offc1);
604 
605   /* We are only interested in comparisons of expressions based on VARX and
606      VARY.  TODO -- we might also be able to derive some bounds from
607      expressions containing just one of the variables.  */
608 
609   if (operand_equal_p (varx, varc1, 0))
610     {
611       std::swap (varc0, varc1);
612       mpz_swap (offc0, offc1);
613       cmp = swap_tree_comparison (cmp);
614     }
615 
616   if (!operand_equal_p (varx, varc0, 0)
617       || !operand_equal_p (vary, varc1, 0))
618     goto end;
619 
620   mpz_init_set (loffx, offx);
621   mpz_init_set (loffy, offy);
622 
623   if (cmp == GT_EXPR || cmp == GE_EXPR)
624     {
625       std::swap (varx, vary);
626       mpz_swap (offc0, offc1);
627       mpz_swap (loffx, loffy);
628       cmp = swap_tree_comparison (cmp);
629       lbound = true;
630     }
631 
632   /* If there is no overflow, the condition implies that
633 
634      (VARX + OFFX) cmp (VARY + OFFY) + (OFFX - OFFY + OFFC1 - OFFC0).
635 
636      The overflows and underflows may complicate things a bit; each
637      overflow decreases the appropriate offset by M, and underflow
638      increases it by M.  The above inequality would not necessarily be
639      true if
640 
641      -- VARX + OFFX underflows and VARX + OFFC0 does not, or
642 	VARX + OFFC0 overflows, but VARX + OFFX does not.
643 	This may only happen if OFFX < OFFC0.
644      -- VARY + OFFY overflows and VARY + OFFC1 does not, or
645 	VARY + OFFC1 underflows and VARY + OFFY does not.
646 	This may only happen if OFFY > OFFC1.  */
647 
648   if (no_wrap)
649     {
650       x_ok = true;
651       y_ok = true;
652     }
653   else
654     {
655       x_ok = (integer_zerop (varx)
656 	      || mpz_cmp (loffx, offc0) >= 0);
657       y_ok = (integer_zerop (vary)
658 	      || mpz_cmp (loffy, offc1) <= 0);
659     }
660 
661   if (x_ok && y_ok)
662     {
663       mpz_init (bnd);
664       mpz_sub (bnd, loffx, loffy);
665       mpz_add (bnd, bnd, offc1);
666       mpz_sub (bnd, bnd, offc0);
667 
668       if (cmp == LT_EXPR)
669 	mpz_sub_ui (bnd, bnd, 1);
670 
671       if (lbound)
672 	{
673 	  mpz_neg (bnd, bnd);
674 	  if (mpz_cmp (bnds->below, bnd) < 0)
675 	    mpz_set (bnds->below, bnd);
676 	}
677       else
678 	{
679 	  if (mpz_cmp (bnd, bnds->up) < 0)
680 	    mpz_set (bnds->up, bnd);
681 	}
682       mpz_clear (bnd);
683     }
684 
685   mpz_clear (loffx);
686   mpz_clear (loffy);
687 end:
688   mpz_clear (offc0);
689   mpz_clear (offc1);
690 }
691 
692 /* Stores the bounds on the value of the expression X - Y in LOOP to BNDS.
693    The subtraction is considered to be performed in arbitrary precision,
694    without overflows.
695 
696    We do not attempt to be too clever regarding the value ranges of X and
697    Y; most of the time, they are just integers or ssa names offsetted by
698    integer.  However, we try to use the information contained in the
699    comparisons before the loop (usually created by loop header copying).  */
700 
701 static void
bound_difference(struct loop * loop,tree x,tree y,bounds * bnds)702 bound_difference (struct loop *loop, tree x, tree y, bounds *bnds)
703 {
704   tree type = TREE_TYPE (x);
705   tree varx, vary;
706   mpz_t offx, offy;
707   mpz_t minx, maxx, miny, maxy;
708   int cnt = 0;
709   edge e;
710   basic_block bb;
711   tree c0, c1;
712   gimple *cond;
713   enum tree_code cmp;
714 
715   /* Get rid of unnecessary casts, but preserve the value of
716      the expressions.  */
717   STRIP_SIGN_NOPS (x);
718   STRIP_SIGN_NOPS (y);
719 
720   mpz_init (bnds->below);
721   mpz_init (bnds->up);
722   mpz_init (offx);
723   mpz_init (offy);
724   split_to_var_and_offset (x, &varx, offx);
725   split_to_var_and_offset (y, &vary, offy);
726 
727   if (!integer_zerop (varx)
728       && operand_equal_p (varx, vary, 0))
729     {
730       /* Special case VARX == VARY -- we just need to compare the
731          offsets.  The matters are a bit more complicated in the
732 	 case addition of offsets may wrap.  */
733       bound_difference_of_offsetted_base (type, offx, offy, bnds);
734     }
735   else
736     {
737       /* Otherwise, use the value ranges to determine the initial
738 	 estimates on below and up.  */
739       mpz_init (minx);
740       mpz_init (maxx);
741       mpz_init (miny);
742       mpz_init (maxy);
743       determine_value_range (loop, type, varx, offx, minx, maxx);
744       determine_value_range (loop, type, vary, offy, miny, maxy);
745 
746       mpz_sub (bnds->below, minx, maxy);
747       mpz_sub (bnds->up, maxx, miny);
748       mpz_clear (minx);
749       mpz_clear (maxx);
750       mpz_clear (miny);
751       mpz_clear (maxy);
752     }
753 
754   /* If both X and Y are constants, we cannot get any more precise.  */
755   if (integer_zerop (varx) && integer_zerop (vary))
756     goto end;
757 
758   /* Now walk the dominators of the loop header and use the entry
759      guards to refine the estimates.  */
760   for (bb = loop->header;
761        bb != ENTRY_BLOCK_PTR_FOR_FN (cfun) && cnt < MAX_DOMINATORS_TO_WALK;
762        bb = get_immediate_dominator (CDI_DOMINATORS, bb))
763     {
764       if (!single_pred_p (bb))
765 	continue;
766       e = single_pred_edge (bb);
767 
768       if (!(e->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)))
769 	continue;
770 
771       cond = last_stmt (e->src);
772       c0 = gimple_cond_lhs (cond);
773       cmp = gimple_cond_code (cond);
774       c1 = gimple_cond_rhs (cond);
775 
776       if (e->flags & EDGE_FALSE_VALUE)
777 	cmp = invert_tree_comparison (cmp, false);
778 
779       refine_bounds_using_guard (type, varx, offx, vary, offy,
780 				 c0, cmp, c1, bnds);
781       ++cnt;
782     }
783 
784 end:
785   mpz_clear (offx);
786   mpz_clear (offy);
787 }
788 
789 /* Update the bounds in BNDS that restrict the value of X to the bounds
790    that restrict the value of X + DELTA.  X can be obtained as a
791    difference of two values in TYPE.  */
792 
793 static void
bounds_add(bounds * bnds,const widest_int & delta,tree type)794 bounds_add (bounds *bnds, const widest_int &delta, tree type)
795 {
796   mpz_t mdelta, max;
797 
798   mpz_init (mdelta);
799   wi::to_mpz (delta, mdelta, SIGNED);
800 
801   mpz_init (max);
802   wi::to_mpz (wi::minus_one (TYPE_PRECISION (type)), max, UNSIGNED);
803 
804   mpz_add (bnds->up, bnds->up, mdelta);
805   mpz_add (bnds->below, bnds->below, mdelta);
806 
807   if (mpz_cmp (bnds->up, max) > 0)
808     mpz_set (bnds->up, max);
809 
810   mpz_neg (max, max);
811   if (mpz_cmp (bnds->below, max) < 0)
812     mpz_set (bnds->below, max);
813 
814   mpz_clear (mdelta);
815   mpz_clear (max);
816 }
817 
818 /* Update the bounds in BNDS that restrict the value of X to the bounds
819    that restrict the value of -X.  */
820 
821 static void
bounds_negate(bounds * bnds)822 bounds_negate (bounds *bnds)
823 {
824   mpz_t tmp;
825 
826   mpz_init_set (tmp, bnds->up);
827   mpz_neg (bnds->up, bnds->below);
828   mpz_neg (bnds->below, tmp);
829   mpz_clear (tmp);
830 }
831 
832 /* Returns inverse of X modulo 2^s, where MASK = 2^s-1.  */
833 
834 static tree
inverse(tree x,tree mask)835 inverse (tree x, tree mask)
836 {
837   tree type = TREE_TYPE (x);
838   tree rslt;
839   unsigned ctr = tree_floor_log2 (mask);
840 
841   if (TYPE_PRECISION (type) <= HOST_BITS_PER_WIDE_INT)
842     {
843       unsigned HOST_WIDE_INT ix;
844       unsigned HOST_WIDE_INT imask;
845       unsigned HOST_WIDE_INT irslt = 1;
846 
847       gcc_assert (cst_and_fits_in_hwi (x));
848       gcc_assert (cst_and_fits_in_hwi (mask));
849 
850       ix = int_cst_value (x);
851       imask = int_cst_value (mask);
852 
853       for (; ctr; ctr--)
854 	{
855 	  irslt *= ix;
856 	  ix *= ix;
857 	}
858       irslt &= imask;
859 
860       rslt = build_int_cst_type (type, irslt);
861     }
862   else
863     {
864       rslt = build_int_cst (type, 1);
865       for (; ctr; ctr--)
866 	{
867 	  rslt = int_const_binop (MULT_EXPR, rslt, x);
868 	  x = int_const_binop (MULT_EXPR, x, x);
869 	}
870       rslt = int_const_binop (BIT_AND_EXPR, rslt, mask);
871     }
872 
873   return rslt;
874 }
875 
876 /* Derives the upper bound BND on the number of executions of loop with exit
877    condition S * i <> C.  If NO_OVERFLOW is true, then the control variable of
878    the loop does not overflow.  EXIT_MUST_BE_TAKEN is true if we are guaranteed
879    that the loop ends through this exit, i.e., the induction variable ever
880    reaches the value of C.
881 
882    The value C is equal to final - base, where final and base are the final and
883    initial value of the actual induction variable in the analysed loop.  BNDS
884    bounds the value of this difference when computed in signed type with
885    unbounded range, while the computation of C is performed in an unsigned
886    type with the range matching the range of the type of the induction variable.
887    In particular, BNDS.up contains an upper bound on C in the following cases:
888    -- if the iv must reach its final value without overflow, i.e., if
889       NO_OVERFLOW && EXIT_MUST_BE_TAKEN is true, or
890    -- if final >= base, which we know to hold when BNDS.below >= 0.  */
891 
892 static void
number_of_iterations_ne_max(mpz_t bnd,bool no_overflow,tree c,tree s,bounds * bnds,bool exit_must_be_taken)893 number_of_iterations_ne_max (mpz_t bnd, bool no_overflow, tree c, tree s,
894 			     bounds *bnds, bool exit_must_be_taken)
895 {
896   widest_int max;
897   mpz_t d;
898   tree type = TREE_TYPE (c);
899   bool bnds_u_valid = ((no_overflow && exit_must_be_taken)
900 		       || mpz_sgn (bnds->below) >= 0);
901 
902   if (integer_onep (s)
903       || (TREE_CODE (c) == INTEGER_CST
904 	  && TREE_CODE (s) == INTEGER_CST
905 	  && wi::mod_trunc (c, s, TYPE_SIGN (type)) == 0)
906       || (TYPE_OVERFLOW_UNDEFINED (type)
907 	  && multiple_of_p (type, c, s)))
908     {
909       /* If C is an exact multiple of S, then its value will be reached before
910 	 the induction variable overflows (unless the loop is exited in some
911 	 other way before).  Note that the actual induction variable in the
912 	 loop (which ranges from base to final instead of from 0 to C) may
913 	 overflow, in which case BNDS.up will not be giving a correct upper
914 	 bound on C; thus, BNDS_U_VALID had to be computed in advance.  */
915       no_overflow = true;
916       exit_must_be_taken = true;
917     }
918 
919   /* If the induction variable can overflow, the number of iterations is at
920      most the period of the control variable (or infinite, but in that case
921      the whole # of iterations analysis will fail).  */
922   if (!no_overflow)
923     {
924       max = wi::mask <widest_int> (TYPE_PRECISION (type) - wi::ctz (s), false);
925       wi::to_mpz (max, bnd, UNSIGNED);
926       return;
927     }
928 
929   /* Now we know that the induction variable does not overflow, so the loop
930      iterates at most (range of type / S) times.  */
931   wi::to_mpz (wi::minus_one (TYPE_PRECISION (type)), bnd, UNSIGNED);
932 
933   /* If the induction variable is guaranteed to reach the value of C before
934      overflow, ... */
935   if (exit_must_be_taken)
936     {
937       /* ... then we can strengthen this to C / S, and possibly we can use
938 	 the upper bound on C given by BNDS.  */
939       if (TREE_CODE (c) == INTEGER_CST)
940 	wi::to_mpz (c, bnd, UNSIGNED);
941       else if (bnds_u_valid)
942 	mpz_set (bnd, bnds->up);
943     }
944 
945   mpz_init (d);
946   wi::to_mpz (s, d, UNSIGNED);
947   mpz_fdiv_q (bnd, bnd, d);
948   mpz_clear (d);
949 }
950 
951 /* Determines number of iterations of loop whose ending condition
952    is IV <> FINAL.  TYPE is the type of the iv.  The number of
953    iterations is stored to NITER.  EXIT_MUST_BE_TAKEN is true if
954    we know that the exit must be taken eventually, i.e., that the IV
955    ever reaches the value FINAL (we derived this earlier, and possibly set
956    NITER->assumptions to make sure this is the case).  BNDS contains the
957    bounds on the difference FINAL - IV->base.  */
958 
959 static bool
number_of_iterations_ne(struct loop * loop,tree type,affine_iv * iv,tree final,struct tree_niter_desc * niter,bool exit_must_be_taken,bounds * bnds)960 number_of_iterations_ne (struct loop *loop, tree type, affine_iv *iv,
961 			 tree final, struct tree_niter_desc *niter,
962 			 bool exit_must_be_taken, bounds *bnds)
963 {
964   tree niter_type = unsigned_type_for (type);
965   tree s, c, d, bits, assumption, tmp, bound;
966   mpz_t max;
967   tree e;
968 
969   niter->control = *iv;
970   niter->bound = final;
971   niter->cmp = NE_EXPR;
972 
973   /* Rearrange the terms so that we get inequality S * i <> C, with S
974      positive.  Also cast everything to the unsigned type.  If IV does
975      not overflow, BNDS bounds the value of C.  Also, this is the
976      case if the computation |FINAL - IV->base| does not overflow, i.e.,
977      if BNDS->below in the result is nonnegative.  */
978   if (tree_int_cst_sign_bit (iv->step))
979     {
980       s = fold_convert (niter_type,
981 			fold_build1 (NEGATE_EXPR, type, iv->step));
982       c = fold_build2 (MINUS_EXPR, niter_type,
983 		       fold_convert (niter_type, iv->base),
984 		       fold_convert (niter_type, final));
985       bounds_negate (bnds);
986     }
987   else
988     {
989       s = fold_convert (niter_type, iv->step);
990       c = fold_build2 (MINUS_EXPR, niter_type,
991 		       fold_convert (niter_type, final),
992 		       fold_convert (niter_type, iv->base));
993     }
994 
995   mpz_init (max);
996   number_of_iterations_ne_max (max, iv->no_overflow, c, s, bnds,
997 			       exit_must_be_taken);
998   niter->max = widest_int::from (wi::from_mpz (niter_type, max, false),
999 				 TYPE_SIGN (niter_type));
1000   mpz_clear (max);
1001 
1002   /* Compute no-overflow information for the control iv.  Note we are
1003      handling NE_EXPR, if iv base equals to final value, the loop exits
1004      immediately, and the iv does not overflow.  */
1005   if (tree_int_cst_sign_bit (iv->step))
1006     e = fold_build2 (GE_EXPR, boolean_type_node, iv->base, final);
1007   else
1008     e = fold_build2 (LE_EXPR, boolean_type_node, iv->base, final);
1009   e = simplify_using_initial_conditions (loop, e);
1010   if (integer_onep (e)
1011       && (integer_onep (s)
1012 	  || (TREE_CODE (c) == INTEGER_CST
1013 	      && TREE_CODE (s) == INTEGER_CST
1014 	      && wi::mod_trunc (c, s, TYPE_SIGN (type)) == 0)))
1015     {
1016       niter->control.no_overflow = true;
1017     }
1018 
1019   /* First the trivial cases -- when the step is 1.  */
1020   if (integer_onep (s))
1021     {
1022       niter->niter = c;
1023       return true;
1024     }
1025 
1026   /* Let nsd (step, size of mode) = d.  If d does not divide c, the loop
1027      is infinite.  Otherwise, the number of iterations is
1028      (inverse(s/d) * (c/d)) mod (size of mode/d).  */
1029   bits = num_ending_zeros (s);
1030   bound = build_low_bits_mask (niter_type,
1031 			       (TYPE_PRECISION (niter_type)
1032 				- tree_to_uhwi (bits)));
1033 
1034   d = fold_binary_to_constant (LSHIFT_EXPR, niter_type,
1035 			       build_int_cst (niter_type, 1), bits);
1036   s = fold_binary_to_constant (RSHIFT_EXPR, niter_type, s, bits);
1037 
1038   if (!exit_must_be_taken)
1039     {
1040       /* If we cannot assume that the exit is taken eventually, record the
1041 	 assumptions for divisibility of c.  */
1042       assumption = fold_build2 (FLOOR_MOD_EXPR, niter_type, c, d);
1043       assumption = fold_build2 (EQ_EXPR, boolean_type_node,
1044 				assumption, build_int_cst (niter_type, 0));
1045       if (!integer_nonzerop (assumption))
1046 	niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
1047 					  niter->assumptions, assumption);
1048     }
1049 
1050   c = fold_build2 (EXACT_DIV_EXPR, niter_type, c, d);
1051   tmp = fold_build2 (MULT_EXPR, niter_type, c, inverse (s, bound));
1052   niter->niter = fold_build2 (BIT_AND_EXPR, niter_type, tmp, bound);
1053   return true;
1054 }
1055 
1056 /* Checks whether we can determine the final value of the control variable
1057    of the loop with ending condition IV0 < IV1 (computed in TYPE).
1058    DELTA is the difference IV1->base - IV0->base, STEP is the absolute value
1059    of the step.  The assumptions necessary to ensure that the computation
1060    of the final value does not overflow are recorded in NITER.  If we
1061    find the final value, we adjust DELTA and return TRUE.  Otherwise
1062    we return false.  BNDS bounds the value of IV1->base - IV0->base,
1063    and will be updated by the same amount as DELTA.  EXIT_MUST_BE_TAKEN is
1064    true if we know that the exit must be taken eventually.  */
1065 
1066 static bool
number_of_iterations_lt_to_ne(tree type,affine_iv * iv0,affine_iv * iv1,struct tree_niter_desc * niter,tree * delta,tree step,bool exit_must_be_taken,bounds * bnds)1067 number_of_iterations_lt_to_ne (tree type, affine_iv *iv0, affine_iv *iv1,
1068 			       struct tree_niter_desc *niter,
1069 			       tree *delta, tree step,
1070 			       bool exit_must_be_taken, bounds *bnds)
1071 {
1072   tree niter_type = TREE_TYPE (step);
1073   tree mod = fold_build2 (FLOOR_MOD_EXPR, niter_type, *delta, step);
1074   tree tmod;
1075   mpz_t mmod;
1076   tree assumption = boolean_true_node, bound, noloop;
1077   bool ret = false, fv_comp_no_overflow;
1078   tree type1 = type;
1079   if (POINTER_TYPE_P (type))
1080     type1 = sizetype;
1081 
1082   if (TREE_CODE (mod) != INTEGER_CST)
1083     return false;
1084   if (integer_nonzerop (mod))
1085     mod = fold_build2 (MINUS_EXPR, niter_type, step, mod);
1086   tmod = fold_convert (type1, mod);
1087 
1088   mpz_init (mmod);
1089   wi::to_mpz (mod, mmod, UNSIGNED);
1090   mpz_neg (mmod, mmod);
1091 
1092   /* If the induction variable does not overflow and the exit is taken,
1093      then the computation of the final value does not overflow.  This is
1094      also obviously the case if the new final value is equal to the
1095      current one.  Finally, we postulate this for pointer type variables,
1096      as the code cannot rely on the object to that the pointer points being
1097      placed at the end of the address space (and more pragmatically,
1098      TYPE_{MIN,MAX}_VALUE is not defined for pointers).  */
1099   if (integer_zerop (mod) || POINTER_TYPE_P (type))
1100     fv_comp_no_overflow = true;
1101   else if (!exit_must_be_taken)
1102     fv_comp_no_overflow = false;
1103   else
1104     fv_comp_no_overflow =
1105 	    (iv0->no_overflow && integer_nonzerop (iv0->step))
1106 	    || (iv1->no_overflow && integer_nonzerop (iv1->step));
1107 
1108   if (integer_nonzerop (iv0->step))
1109     {
1110       /* The final value of the iv is iv1->base + MOD, assuming that this
1111 	 computation does not overflow, and that
1112 	 iv0->base <= iv1->base + MOD.  */
1113       if (!fv_comp_no_overflow)
1114 	{
1115 	  bound = fold_build2 (MINUS_EXPR, type1,
1116 			       TYPE_MAX_VALUE (type1), tmod);
1117 	  assumption = fold_build2 (LE_EXPR, boolean_type_node,
1118 				    iv1->base, bound);
1119 	  if (integer_zerop (assumption))
1120 	    goto end;
1121 	}
1122       if (mpz_cmp (mmod, bnds->below) < 0)
1123 	noloop = boolean_false_node;
1124       else if (POINTER_TYPE_P (type))
1125 	noloop = fold_build2 (GT_EXPR, boolean_type_node,
1126 			      iv0->base,
1127 			      fold_build_pointer_plus (iv1->base, tmod));
1128       else
1129 	noloop = fold_build2 (GT_EXPR, boolean_type_node,
1130 			      iv0->base,
1131 			      fold_build2 (PLUS_EXPR, type1,
1132 					   iv1->base, tmod));
1133     }
1134   else
1135     {
1136       /* The final value of the iv is iv0->base - MOD, assuming that this
1137 	 computation does not overflow, and that
1138 	 iv0->base - MOD <= iv1->base. */
1139       if (!fv_comp_no_overflow)
1140 	{
1141 	  bound = fold_build2 (PLUS_EXPR, type1,
1142 			       TYPE_MIN_VALUE (type1), tmod);
1143 	  assumption = fold_build2 (GE_EXPR, boolean_type_node,
1144 				    iv0->base, bound);
1145 	  if (integer_zerop (assumption))
1146 	    goto end;
1147 	}
1148       if (mpz_cmp (mmod, bnds->below) < 0)
1149 	noloop = boolean_false_node;
1150       else if (POINTER_TYPE_P (type))
1151 	noloop = fold_build2 (GT_EXPR, boolean_type_node,
1152 			      fold_build_pointer_plus (iv0->base,
1153 						       fold_build1 (NEGATE_EXPR,
1154 								    type1, tmod)),
1155 			      iv1->base);
1156       else
1157 	noloop = fold_build2 (GT_EXPR, boolean_type_node,
1158 			      fold_build2 (MINUS_EXPR, type1,
1159 					   iv0->base, tmod),
1160 			      iv1->base);
1161     }
1162 
1163   if (!integer_nonzerop (assumption))
1164     niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
1165 				      niter->assumptions,
1166 				      assumption);
1167   if (!integer_zerop (noloop))
1168     niter->may_be_zero = fold_build2 (TRUTH_OR_EXPR, boolean_type_node,
1169 				      niter->may_be_zero,
1170 				      noloop);
1171   bounds_add (bnds, wi::to_widest (mod), type);
1172   *delta = fold_build2 (PLUS_EXPR, niter_type, *delta, mod);
1173 
1174   ret = true;
1175 end:
1176   mpz_clear (mmod);
1177   return ret;
1178 }
1179 
1180 /* Add assertions to NITER that ensure that the control variable of the loop
1181    with ending condition IV0 < IV1 does not overflow.  Types of IV0 and IV1
1182    are TYPE.  Returns false if we can prove that there is an overflow, true
1183    otherwise.  STEP is the absolute value of the step.  */
1184 
1185 static bool
assert_no_overflow_lt(tree type,affine_iv * iv0,affine_iv * iv1,struct tree_niter_desc * niter,tree step)1186 assert_no_overflow_lt (tree type, affine_iv *iv0, affine_iv *iv1,
1187 		       struct tree_niter_desc *niter, tree step)
1188 {
1189   tree bound, d, assumption, diff;
1190   tree niter_type = TREE_TYPE (step);
1191 
1192   if (integer_nonzerop (iv0->step))
1193     {
1194       /* for (i = iv0->base; i < iv1->base; i += iv0->step) */
1195       if (iv0->no_overflow)
1196 	return true;
1197 
1198       /* If iv0->base is a constant, we can determine the last value before
1199 	 overflow precisely; otherwise we conservatively assume
1200 	 MAX - STEP + 1.  */
1201 
1202       if (TREE_CODE (iv0->base) == INTEGER_CST)
1203 	{
1204 	  d = fold_build2 (MINUS_EXPR, niter_type,
1205 			   fold_convert (niter_type, TYPE_MAX_VALUE (type)),
1206 			   fold_convert (niter_type, iv0->base));
1207 	  diff = fold_build2 (FLOOR_MOD_EXPR, niter_type, d, step);
1208 	}
1209       else
1210 	diff = fold_build2 (MINUS_EXPR, niter_type, step,
1211 			    build_int_cst (niter_type, 1));
1212       bound = fold_build2 (MINUS_EXPR, type,
1213 			   TYPE_MAX_VALUE (type), fold_convert (type, diff));
1214       assumption = fold_build2 (LE_EXPR, boolean_type_node,
1215 				iv1->base, bound);
1216     }
1217   else
1218     {
1219       /* for (i = iv1->base; i > iv0->base; i += iv1->step) */
1220       if (iv1->no_overflow)
1221 	return true;
1222 
1223       if (TREE_CODE (iv1->base) == INTEGER_CST)
1224 	{
1225 	  d = fold_build2 (MINUS_EXPR, niter_type,
1226 			   fold_convert (niter_type, iv1->base),
1227 			   fold_convert (niter_type, TYPE_MIN_VALUE (type)));
1228 	  diff = fold_build2 (FLOOR_MOD_EXPR, niter_type, d, step);
1229 	}
1230       else
1231 	diff = fold_build2 (MINUS_EXPR, niter_type, step,
1232 			    build_int_cst (niter_type, 1));
1233       bound = fold_build2 (PLUS_EXPR, type,
1234 			   TYPE_MIN_VALUE (type), fold_convert (type, diff));
1235       assumption = fold_build2 (GE_EXPR, boolean_type_node,
1236 				iv0->base, bound);
1237     }
1238 
1239   if (integer_zerop (assumption))
1240     return false;
1241   if (!integer_nonzerop (assumption))
1242     niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
1243 				      niter->assumptions, assumption);
1244 
1245   iv0->no_overflow = true;
1246   iv1->no_overflow = true;
1247   return true;
1248 }
1249 
1250 /* Add an assumption to NITER that a loop whose ending condition
1251    is IV0 < IV1 rolls.  TYPE is the type of the control iv.  BNDS
1252    bounds the value of IV1->base - IV0->base.  */
1253 
1254 static void
assert_loop_rolls_lt(tree type,affine_iv * iv0,affine_iv * iv1,struct tree_niter_desc * niter,bounds * bnds)1255 assert_loop_rolls_lt (tree type, affine_iv *iv0, affine_iv *iv1,
1256 		      struct tree_niter_desc *niter, bounds *bnds)
1257 {
1258   tree assumption = boolean_true_node, bound, diff;
1259   tree mbz, mbzl, mbzr, type1;
1260   bool rolls_p, no_overflow_p;
1261   widest_int dstep;
1262   mpz_t mstep, max;
1263 
1264   /* We are going to compute the number of iterations as
1265      (iv1->base - iv0->base + step - 1) / step, computed in the unsigned
1266      variant of TYPE.  This formula only works if
1267 
1268      -step + 1 <= (iv1->base - iv0->base) <= MAX - step + 1
1269 
1270      (where MAX is the maximum value of the unsigned variant of TYPE, and
1271      the computations in this formula are performed in full precision,
1272      i.e., without overflows).
1273 
1274      Usually, for loops with exit condition iv0->base + step * i < iv1->base,
1275      we have a condition of the form iv0->base - step < iv1->base before the loop,
1276      and for loops iv0->base < iv1->base - step * i the condition
1277      iv0->base < iv1->base + step, due to loop header copying, which enable us
1278      to prove the lower bound.
1279 
1280      The upper bound is more complicated.  Unless the expressions for initial
1281      and final value themselves contain enough information, we usually cannot
1282      derive it from the context.  */
1283 
1284   /* First check whether the answer does not follow from the bounds we gathered
1285      before.  */
1286   if (integer_nonzerop (iv0->step))
1287     dstep = wi::to_widest (iv0->step);
1288   else
1289     {
1290       dstep = wi::sext (wi::to_widest (iv1->step), TYPE_PRECISION (type));
1291       dstep = -dstep;
1292     }
1293 
1294   mpz_init (mstep);
1295   wi::to_mpz (dstep, mstep, UNSIGNED);
1296   mpz_neg (mstep, mstep);
1297   mpz_add_ui (mstep, mstep, 1);
1298 
1299   rolls_p = mpz_cmp (mstep, bnds->below) <= 0;
1300 
1301   mpz_init (max);
1302   wi::to_mpz (wi::minus_one (TYPE_PRECISION (type)), max, UNSIGNED);
1303   mpz_add (max, max, mstep);
1304   no_overflow_p = (mpz_cmp (bnds->up, max) <= 0
1305 		   /* For pointers, only values lying inside a single object
1306 		      can be compared or manipulated by pointer arithmetics.
1307 		      Gcc in general does not allow or handle objects larger
1308 		      than half of the address space, hence the upper bound
1309 		      is satisfied for pointers.  */
1310 		   || POINTER_TYPE_P (type));
1311   mpz_clear (mstep);
1312   mpz_clear (max);
1313 
1314   if (rolls_p && no_overflow_p)
1315     return;
1316 
1317   type1 = type;
1318   if (POINTER_TYPE_P (type))
1319     type1 = sizetype;
1320 
1321   /* Now the hard part; we must formulate the assumption(s) as expressions, and
1322      we must be careful not to introduce overflow.  */
1323 
1324   if (integer_nonzerop (iv0->step))
1325     {
1326       diff = fold_build2 (MINUS_EXPR, type1,
1327 			  iv0->step, build_int_cst (type1, 1));
1328 
1329       /* We need to know that iv0->base >= MIN + iv0->step - 1.  Since
1330 	 0 address never belongs to any object, we can assume this for
1331 	 pointers.  */
1332       if (!POINTER_TYPE_P (type))
1333 	{
1334 	  bound = fold_build2 (PLUS_EXPR, type1,
1335 			       TYPE_MIN_VALUE (type), diff);
1336 	  assumption = fold_build2 (GE_EXPR, boolean_type_node,
1337 				    iv0->base, bound);
1338 	}
1339 
1340       /* And then we can compute iv0->base - diff, and compare it with
1341 	 iv1->base.  */
1342       mbzl = fold_build2 (MINUS_EXPR, type1,
1343 			  fold_convert (type1, iv0->base), diff);
1344       mbzr = fold_convert (type1, iv1->base);
1345     }
1346   else
1347     {
1348       diff = fold_build2 (PLUS_EXPR, type1,
1349 			  iv1->step, build_int_cst (type1, 1));
1350 
1351       if (!POINTER_TYPE_P (type))
1352 	{
1353 	  bound = fold_build2 (PLUS_EXPR, type1,
1354 			       TYPE_MAX_VALUE (type), diff);
1355 	  assumption = fold_build2 (LE_EXPR, boolean_type_node,
1356 				    iv1->base, bound);
1357 	}
1358 
1359       mbzl = fold_convert (type1, iv0->base);
1360       mbzr = fold_build2 (MINUS_EXPR, type1,
1361 			  fold_convert (type1, iv1->base), diff);
1362     }
1363 
1364   if (!integer_nonzerop (assumption))
1365     niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
1366 				      niter->assumptions, assumption);
1367   if (!rolls_p)
1368     {
1369       mbz = fold_build2 (GT_EXPR, boolean_type_node, mbzl, mbzr);
1370       niter->may_be_zero = fold_build2 (TRUTH_OR_EXPR, boolean_type_node,
1371 					niter->may_be_zero, mbz);
1372     }
1373 }
1374 
1375 /* Determines number of iterations of loop whose ending condition
1376    is IV0 < IV1.  TYPE is the type of the iv.  The number of
1377    iterations is stored to NITER.  BNDS bounds the difference
1378    IV1->base - IV0->base.  EXIT_MUST_BE_TAKEN is true if we know
1379    that the exit must be taken eventually.  */
1380 
1381 static bool
number_of_iterations_lt(struct loop * loop,tree type,affine_iv * iv0,affine_iv * iv1,struct tree_niter_desc * niter,bool exit_must_be_taken,bounds * bnds)1382 number_of_iterations_lt (struct loop *loop, tree type, affine_iv *iv0,
1383 			 affine_iv *iv1, struct tree_niter_desc *niter,
1384 			 bool exit_must_be_taken, bounds *bnds)
1385 {
1386   tree niter_type = unsigned_type_for (type);
1387   tree delta, step, s;
1388   mpz_t mstep, tmp;
1389 
1390   if (integer_nonzerop (iv0->step))
1391     {
1392       niter->control = *iv0;
1393       niter->cmp = LT_EXPR;
1394       niter->bound = iv1->base;
1395     }
1396   else
1397     {
1398       niter->control = *iv1;
1399       niter->cmp = GT_EXPR;
1400       niter->bound = iv0->base;
1401     }
1402 
1403   delta = fold_build2 (MINUS_EXPR, niter_type,
1404 		       fold_convert (niter_type, iv1->base),
1405 		       fold_convert (niter_type, iv0->base));
1406 
1407   /* First handle the special case that the step is +-1.  */
1408   if ((integer_onep (iv0->step) && integer_zerop (iv1->step))
1409       || (integer_all_onesp (iv1->step) && integer_zerop (iv0->step)))
1410     {
1411       /* for (i = iv0->base; i < iv1->base; i++)
1412 
1413 	 or
1414 
1415 	 for (i = iv1->base; i > iv0->base; i--).
1416 
1417 	 In both cases # of iterations is iv1->base - iv0->base, assuming that
1418 	 iv1->base >= iv0->base.
1419 
1420          First try to derive a lower bound on the value of
1421 	 iv1->base - iv0->base, computed in full precision.  If the difference
1422 	 is nonnegative, we are done, otherwise we must record the
1423 	 condition.  */
1424 
1425       if (mpz_sgn (bnds->below) < 0)
1426 	niter->may_be_zero = fold_build2 (LT_EXPR, boolean_type_node,
1427 					  iv1->base, iv0->base);
1428       niter->niter = delta;
1429       niter->max = widest_int::from (wi::from_mpz (niter_type, bnds->up, false),
1430 				     TYPE_SIGN (niter_type));
1431       niter->control.no_overflow = true;
1432       return true;
1433     }
1434 
1435   if (integer_nonzerop (iv0->step))
1436     step = fold_convert (niter_type, iv0->step);
1437   else
1438     step = fold_convert (niter_type,
1439 			 fold_build1 (NEGATE_EXPR, type, iv1->step));
1440 
1441   /* If we can determine the final value of the control iv exactly, we can
1442      transform the condition to != comparison.  In particular, this will be
1443      the case if DELTA is constant.  */
1444   if (number_of_iterations_lt_to_ne (type, iv0, iv1, niter, &delta, step,
1445 				     exit_must_be_taken, bnds))
1446     {
1447       affine_iv zps;
1448 
1449       zps.base = build_int_cst (niter_type, 0);
1450       zps.step = step;
1451       /* number_of_iterations_lt_to_ne will add assumptions that ensure that
1452 	 zps does not overflow.  */
1453       zps.no_overflow = true;
1454 
1455       return number_of_iterations_ne (loop, type, &zps,
1456 				      delta, niter, true, bnds);
1457     }
1458 
1459   /* Make sure that the control iv does not overflow.  */
1460   if (!assert_no_overflow_lt (type, iv0, iv1, niter, step))
1461     return false;
1462 
1463   /* We determine the number of iterations as (delta + step - 1) / step.  For
1464      this to work, we must know that iv1->base >= iv0->base - step + 1,
1465      otherwise the loop does not roll.  */
1466   assert_loop_rolls_lt (type, iv0, iv1, niter, bnds);
1467 
1468   s = fold_build2 (MINUS_EXPR, niter_type,
1469 		   step, build_int_cst (niter_type, 1));
1470   delta = fold_build2 (PLUS_EXPR, niter_type, delta, s);
1471   niter->niter = fold_build2 (FLOOR_DIV_EXPR, niter_type, delta, step);
1472 
1473   mpz_init (mstep);
1474   mpz_init (tmp);
1475   wi::to_mpz (step, mstep, UNSIGNED);
1476   mpz_add (tmp, bnds->up, mstep);
1477   mpz_sub_ui (tmp, tmp, 1);
1478   mpz_fdiv_q (tmp, tmp, mstep);
1479   niter->max = widest_int::from (wi::from_mpz (niter_type, tmp, false),
1480 				 TYPE_SIGN (niter_type));
1481   mpz_clear (mstep);
1482   mpz_clear (tmp);
1483 
1484   return true;
1485 }
1486 
1487 /* Determines number of iterations of loop whose ending condition
1488    is IV0 <= IV1.  TYPE is the type of the iv.  The number of
1489    iterations is stored to NITER.  EXIT_MUST_BE_TAKEN is true if
1490    we know that this condition must eventually become false (we derived this
1491    earlier, and possibly set NITER->assumptions to make sure this
1492    is the case).  BNDS bounds the difference IV1->base - IV0->base.  */
1493 
1494 static bool
number_of_iterations_le(struct loop * loop,tree type,affine_iv * iv0,affine_iv * iv1,struct tree_niter_desc * niter,bool exit_must_be_taken,bounds * bnds)1495 number_of_iterations_le (struct loop *loop, tree type, affine_iv *iv0,
1496 			 affine_iv *iv1, struct tree_niter_desc *niter,
1497 			 bool exit_must_be_taken, bounds *bnds)
1498 {
1499   tree assumption;
1500   tree type1 = type;
1501   if (POINTER_TYPE_P (type))
1502     type1 = sizetype;
1503 
1504   /* Say that IV0 is the control variable.  Then IV0 <= IV1 iff
1505      IV0 < IV1 + 1, assuming that IV1 is not equal to the greatest
1506      value of the type.  This we must know anyway, since if it is
1507      equal to this value, the loop rolls forever.  We do not check
1508      this condition for pointer type ivs, as the code cannot rely on
1509      the object to that the pointer points being placed at the end of
1510      the address space (and more pragmatically, TYPE_{MIN,MAX}_VALUE is
1511      not defined for pointers).  */
1512 
1513   if (!exit_must_be_taken && !POINTER_TYPE_P (type))
1514     {
1515       if (integer_nonzerop (iv0->step))
1516 	assumption = fold_build2 (NE_EXPR, boolean_type_node,
1517 				  iv1->base, TYPE_MAX_VALUE (type));
1518       else
1519 	assumption = fold_build2 (NE_EXPR, boolean_type_node,
1520 				  iv0->base, TYPE_MIN_VALUE (type));
1521 
1522       if (integer_zerop (assumption))
1523 	return false;
1524       if (!integer_nonzerop (assumption))
1525 	niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
1526 					  niter->assumptions, assumption);
1527     }
1528 
1529   if (integer_nonzerop (iv0->step))
1530     {
1531       if (POINTER_TYPE_P (type))
1532 	iv1->base = fold_build_pointer_plus_hwi (iv1->base, 1);
1533       else
1534 	iv1->base = fold_build2 (PLUS_EXPR, type1, iv1->base,
1535 				 build_int_cst (type1, 1));
1536     }
1537   else if (POINTER_TYPE_P (type))
1538     iv0->base = fold_build_pointer_plus_hwi (iv0->base, -1);
1539   else
1540     iv0->base = fold_build2 (MINUS_EXPR, type1,
1541 			     iv0->base, build_int_cst (type1, 1));
1542 
1543   bounds_add (bnds, 1, type1);
1544 
1545   return number_of_iterations_lt (loop, type, iv0, iv1, niter, exit_must_be_taken,
1546 				  bnds);
1547 }
1548 
1549 /* Dumps description of affine induction variable IV to FILE.  */
1550 
1551 static void
dump_affine_iv(FILE * file,affine_iv * iv)1552 dump_affine_iv (FILE *file, affine_iv *iv)
1553 {
1554   if (!integer_zerop (iv->step))
1555     fprintf (file, "[");
1556 
1557   print_generic_expr (dump_file, iv->base, TDF_SLIM);
1558 
1559   if (!integer_zerop (iv->step))
1560     {
1561       fprintf (file, ", + , ");
1562       print_generic_expr (dump_file, iv->step, TDF_SLIM);
1563       fprintf (file, "]%s", iv->no_overflow ? "(no_overflow)" : "");
1564     }
1565 }
1566 
1567 /* Determine the number of iterations according to condition (for staying
1568    inside loop) which compares two induction variables using comparison
1569    operator CODE.  The induction variable on left side of the comparison
1570    is IV0, the right-hand side is IV1.  Both induction variables must have
1571    type TYPE, which must be an integer or pointer type.  The steps of the
1572    ivs must be constants (or NULL_TREE, which is interpreted as constant zero).
1573 
1574    LOOP is the loop whose number of iterations we are determining.
1575 
1576    ONLY_EXIT is true if we are sure this is the only way the loop could be
1577    exited (including possibly non-returning function calls, exceptions, etc.)
1578    -- in this case we can use the information whether the control induction
1579    variables can overflow or not in a more efficient way.
1580 
1581    if EVERY_ITERATION is true, we know the test is executed on every iteration.
1582 
1583    The results (number of iterations and assumptions as described in
1584    comments at struct tree_niter_desc in tree-ssa-loop.h) are stored to NITER.
1585    Returns false if it fails to determine number of iterations, true if it
1586    was determined (possibly with some assumptions).  */
1587 
1588 static bool
number_of_iterations_cond(struct loop * loop,tree type,affine_iv * iv0,enum tree_code code,affine_iv * iv1,struct tree_niter_desc * niter,bool only_exit,bool every_iteration)1589 number_of_iterations_cond (struct loop *loop,
1590 			   tree type, affine_iv *iv0, enum tree_code code,
1591 			   affine_iv *iv1, struct tree_niter_desc *niter,
1592 			   bool only_exit, bool every_iteration)
1593 {
1594   bool exit_must_be_taken = false, ret;
1595   bounds bnds;
1596 
1597   /* If the test is not executed every iteration, wrapping may make the test
1598      to pass again.
1599      TODO: the overflow case can be still used as unreliable estimate of upper
1600      bound.  But we have no API to pass it down to number of iterations code
1601      and, at present, it will not use it anyway.  */
1602   if (!every_iteration
1603       && (!iv0->no_overflow || !iv1->no_overflow
1604 	  || code == NE_EXPR || code == EQ_EXPR))
1605     return false;
1606 
1607   /* The meaning of these assumptions is this:
1608      if !assumptions
1609        then the rest of information does not have to be valid
1610      if may_be_zero then the loop does not roll, even if
1611        niter != 0.  */
1612   niter->assumptions = boolean_true_node;
1613   niter->may_be_zero = boolean_false_node;
1614   niter->niter = NULL_TREE;
1615   niter->max = 0;
1616   niter->bound = NULL_TREE;
1617   niter->cmp = ERROR_MARK;
1618 
1619   /* Make < comparison from > ones, and for NE_EXPR comparisons, ensure that
1620      the control variable is on lhs.  */
1621   if (code == GE_EXPR || code == GT_EXPR
1622       || (code == NE_EXPR && integer_zerop (iv0->step)))
1623     {
1624       std::swap (iv0, iv1);
1625       code = swap_tree_comparison (code);
1626     }
1627 
1628   if (POINTER_TYPE_P (type))
1629     {
1630       /* Comparison of pointers is undefined unless both iv0 and iv1 point
1631 	 to the same object.  If they do, the control variable cannot wrap
1632 	 (as wrap around the bounds of memory will never return a pointer
1633 	 that would be guaranteed to point to the same object, even if we
1634 	 avoid undefined behavior by casting to size_t and back).  */
1635       iv0->no_overflow = true;
1636       iv1->no_overflow = true;
1637     }
1638 
1639   /* If the control induction variable does not overflow and the only exit
1640      from the loop is the one that we analyze, we know it must be taken
1641      eventually.  */
1642   if (only_exit)
1643     {
1644       if (!integer_zerop (iv0->step) && iv0->no_overflow)
1645 	exit_must_be_taken = true;
1646       else if (!integer_zerop (iv1->step) && iv1->no_overflow)
1647 	exit_must_be_taken = true;
1648     }
1649 
1650   /* We can handle the case when neither of the sides of the comparison is
1651      invariant, provided that the test is NE_EXPR.  This rarely occurs in
1652      practice, but it is simple enough to manage.  */
1653   if (!integer_zerop (iv0->step) && !integer_zerop (iv1->step))
1654     {
1655       tree step_type = POINTER_TYPE_P (type) ? sizetype : type;
1656       if (code != NE_EXPR)
1657 	return false;
1658 
1659       iv0->step = fold_binary_to_constant (MINUS_EXPR, step_type,
1660 					   iv0->step, iv1->step);
1661       iv0->no_overflow = false;
1662       iv1->step = build_int_cst (step_type, 0);
1663       iv1->no_overflow = true;
1664     }
1665 
1666   /* If the result of the comparison is a constant,  the loop is weird.  More
1667      precise handling would be possible, but the situation is not common enough
1668      to waste time on it.  */
1669   if (integer_zerop (iv0->step) && integer_zerop (iv1->step))
1670     return false;
1671 
1672   /* Ignore loops of while (i-- < 10) type.  */
1673   if (code != NE_EXPR)
1674     {
1675       if (iv0->step && tree_int_cst_sign_bit (iv0->step))
1676 	return false;
1677 
1678       if (!integer_zerop (iv1->step) && !tree_int_cst_sign_bit (iv1->step))
1679 	return false;
1680     }
1681 
1682   /* If the loop exits immediately, there is nothing to do.  */
1683   tree tem = fold_binary (code, boolean_type_node, iv0->base, iv1->base);
1684   if (tem && integer_zerop (tem))
1685     {
1686       niter->niter = build_int_cst (unsigned_type_for (type), 0);
1687       niter->max = 0;
1688       return true;
1689     }
1690 
1691   /* OK, now we know we have a senseful loop.  Handle several cases, depending
1692      on what comparison operator is used.  */
1693   bound_difference (loop, iv1->base, iv0->base, &bnds);
1694 
1695   if (dump_file && (dump_flags & TDF_DETAILS))
1696     {
1697       fprintf (dump_file,
1698 	       "Analyzing # of iterations of loop %d\n", loop->num);
1699 
1700       fprintf (dump_file, "  exit condition ");
1701       dump_affine_iv (dump_file, iv0);
1702       fprintf (dump_file, " %s ",
1703 	       code == NE_EXPR ? "!="
1704 	       : code == LT_EXPR ? "<"
1705 	       : "<=");
1706       dump_affine_iv (dump_file, iv1);
1707       fprintf (dump_file, "\n");
1708 
1709       fprintf (dump_file, "  bounds on difference of bases: ");
1710       mpz_out_str (dump_file, 10, bnds.below);
1711       fprintf (dump_file, " ... ");
1712       mpz_out_str (dump_file, 10, bnds.up);
1713       fprintf (dump_file, "\n");
1714     }
1715 
1716   switch (code)
1717     {
1718     case NE_EXPR:
1719       gcc_assert (integer_zerop (iv1->step));
1720       ret = number_of_iterations_ne (loop, type, iv0, iv1->base, niter,
1721 				     exit_must_be_taken, &bnds);
1722       break;
1723 
1724     case LT_EXPR:
1725       ret = number_of_iterations_lt (loop, type, iv0, iv1, niter,
1726 				     exit_must_be_taken, &bnds);
1727       break;
1728 
1729     case LE_EXPR:
1730       ret = number_of_iterations_le (loop, type, iv0, iv1, niter,
1731 				     exit_must_be_taken, &bnds);
1732       break;
1733 
1734     default:
1735       gcc_unreachable ();
1736     }
1737 
1738   mpz_clear (bnds.up);
1739   mpz_clear (bnds.below);
1740 
1741   if (dump_file && (dump_flags & TDF_DETAILS))
1742     {
1743       if (ret)
1744 	{
1745 	  fprintf (dump_file, "  result:\n");
1746 	  if (!integer_nonzerop (niter->assumptions))
1747 	    {
1748 	      fprintf (dump_file, "    under assumptions ");
1749 	      print_generic_expr (dump_file, niter->assumptions, TDF_SLIM);
1750 	      fprintf (dump_file, "\n");
1751 	    }
1752 
1753 	  if (!integer_zerop (niter->may_be_zero))
1754 	    {
1755 	      fprintf (dump_file, "    zero if ");
1756 	      print_generic_expr (dump_file, niter->may_be_zero, TDF_SLIM);
1757 	      fprintf (dump_file, "\n");
1758 	    }
1759 
1760 	  fprintf (dump_file, "    # of iterations ");
1761 	  print_generic_expr (dump_file, niter->niter, TDF_SLIM);
1762 	  fprintf (dump_file, ", bounded by ");
1763 	  print_decu (niter->max, dump_file);
1764 	  fprintf (dump_file, "\n");
1765 	}
1766       else
1767 	fprintf (dump_file, "  failed\n\n");
1768     }
1769   return ret;
1770 }
1771 
1772 /* Substitute NEW for OLD in EXPR and fold the result.  */
1773 
1774 static tree
simplify_replace_tree(tree expr,tree old,tree new_tree)1775 simplify_replace_tree (tree expr, tree old, tree new_tree)
1776 {
1777   unsigned i, n;
1778   tree ret = NULL_TREE, e, se;
1779 
1780   if (!expr)
1781     return NULL_TREE;
1782 
1783   /* Do not bother to replace constants.  */
1784   if (CONSTANT_CLASS_P (old))
1785     return expr;
1786 
1787   if (expr == old
1788       || operand_equal_p (expr, old, 0))
1789     return unshare_expr (new_tree);
1790 
1791   if (!EXPR_P (expr))
1792     return expr;
1793 
1794   n = TREE_OPERAND_LENGTH (expr);
1795   for (i = 0; i < n; i++)
1796     {
1797       e = TREE_OPERAND (expr, i);
1798       se = simplify_replace_tree (e, old, new_tree);
1799       if (e == se)
1800 	continue;
1801 
1802       if (!ret)
1803 	ret = copy_node (expr);
1804 
1805       TREE_OPERAND (ret, i) = se;
1806     }
1807 
1808   return (ret ? fold (ret) : expr);
1809 }
1810 
1811 /* Expand definitions of ssa names in EXPR as long as they are simple
1812    enough, and return the new expression.  If STOP is specified, stop
1813    expanding if EXPR equals to it.  */
1814 
1815 tree
expand_simple_operations(tree expr,tree stop)1816 expand_simple_operations (tree expr, tree stop)
1817 {
1818   unsigned i, n;
1819   tree ret = NULL_TREE, e, ee, e1;
1820   enum tree_code code;
1821   gimple *stmt;
1822 
1823   if (expr == NULL_TREE)
1824     return expr;
1825 
1826   if (is_gimple_min_invariant (expr))
1827     return expr;
1828 
1829   code = TREE_CODE (expr);
1830   if (IS_EXPR_CODE_CLASS (TREE_CODE_CLASS (code)))
1831     {
1832       n = TREE_OPERAND_LENGTH (expr);
1833       for (i = 0; i < n; i++)
1834 	{
1835 	  e = TREE_OPERAND (expr, i);
1836 	  ee = expand_simple_operations (e, stop);
1837 	  if (e == ee)
1838 	    continue;
1839 
1840 	  if (!ret)
1841 	    ret = copy_node (expr);
1842 
1843 	  TREE_OPERAND (ret, i) = ee;
1844 	}
1845 
1846       if (!ret)
1847 	return expr;
1848 
1849       fold_defer_overflow_warnings ();
1850       ret = fold (ret);
1851       fold_undefer_and_ignore_overflow_warnings ();
1852       return ret;
1853     }
1854 
1855   /* Stop if it's not ssa name or the one we don't want to expand.  */
1856   if (TREE_CODE (expr) != SSA_NAME || expr == stop)
1857     return expr;
1858 
1859   stmt = SSA_NAME_DEF_STMT (expr);
1860   if (gimple_code (stmt) == GIMPLE_PHI)
1861     {
1862       basic_block src, dest;
1863 
1864       if (gimple_phi_num_args (stmt) != 1)
1865 	return expr;
1866       e = PHI_ARG_DEF (stmt, 0);
1867 
1868       /* Avoid propagating through loop exit phi nodes, which
1869 	 could break loop-closed SSA form restrictions.  */
1870       dest = gimple_bb (stmt);
1871       src = single_pred (dest);
1872       if (TREE_CODE (e) == SSA_NAME
1873 	  && src->loop_father != dest->loop_father)
1874 	return expr;
1875 
1876       return expand_simple_operations (e, stop);
1877     }
1878   if (gimple_code (stmt) != GIMPLE_ASSIGN)
1879     return expr;
1880 
1881   /* Avoid expanding to expressions that contain SSA names that need
1882      to take part in abnormal coalescing.  */
1883   ssa_op_iter iter;
1884   FOR_EACH_SSA_TREE_OPERAND (e, stmt, iter, SSA_OP_USE)
1885     if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (e))
1886       return expr;
1887 
1888   e = gimple_assign_rhs1 (stmt);
1889   code = gimple_assign_rhs_code (stmt);
1890   if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS)
1891     {
1892       if (is_gimple_min_invariant (e))
1893 	return e;
1894 
1895       if (code == SSA_NAME)
1896 	return expand_simple_operations (e, stop);
1897 
1898       return expr;
1899     }
1900 
1901   switch (code)
1902     {
1903     CASE_CONVERT:
1904       /* Casts are simple.  */
1905       ee = expand_simple_operations (e, stop);
1906       return fold_build1 (code, TREE_TYPE (expr), ee);
1907 
1908     case PLUS_EXPR:
1909     case MINUS_EXPR:
1910       if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (expr))
1911 	  && TYPE_OVERFLOW_TRAPS (TREE_TYPE (expr)))
1912 	return expr;
1913       /* Fallthru.  */
1914     case POINTER_PLUS_EXPR:
1915       /* And increments and decrements by a constant are simple.  */
1916       e1 = gimple_assign_rhs2 (stmt);
1917       if (!is_gimple_min_invariant (e1))
1918 	return expr;
1919 
1920       ee = expand_simple_operations (e, stop);
1921       return fold_build2 (code, TREE_TYPE (expr), ee, e1);
1922 
1923     default:
1924       return expr;
1925     }
1926 }
1927 
1928 /* Tries to simplify EXPR using the condition COND.  Returns the simplified
1929    expression (or EXPR unchanged, if no simplification was possible).  */
1930 
1931 static tree
tree_simplify_using_condition_1(tree cond,tree expr,tree stop)1932 tree_simplify_using_condition_1 (tree cond, tree expr, tree stop)
1933 {
1934   bool changed;
1935   tree e, te, e0, e1, e2, notcond;
1936   enum tree_code code = TREE_CODE (expr);
1937 
1938   if (code == INTEGER_CST)
1939     return expr;
1940 
1941   if (code == TRUTH_OR_EXPR
1942       || code == TRUTH_AND_EXPR
1943       || code == COND_EXPR)
1944     {
1945       changed = false;
1946 
1947       e0 = tree_simplify_using_condition_1 (cond, TREE_OPERAND (expr, 0), stop);
1948       if (TREE_OPERAND (expr, 0) != e0)
1949 	changed = true;
1950 
1951       e1 = tree_simplify_using_condition_1 (cond, TREE_OPERAND (expr, 1), stop);
1952       if (TREE_OPERAND (expr, 1) != e1)
1953 	changed = true;
1954 
1955       if (code == COND_EXPR)
1956 	{
1957 	  e2 = tree_simplify_using_condition_1 (cond, TREE_OPERAND (expr, 2), stop);
1958 	  if (TREE_OPERAND (expr, 2) != e2)
1959 	    changed = true;
1960 	}
1961       else
1962 	e2 = NULL_TREE;
1963 
1964       if (changed)
1965 	{
1966 	  if (code == COND_EXPR)
1967 	    expr = fold_build3 (code, boolean_type_node, e0, e1, e2);
1968 	  else
1969 	    expr = fold_build2 (code, boolean_type_node, e0, e1);
1970 	}
1971 
1972       return expr;
1973     }
1974 
1975   /* In case COND is equality, we may be able to simplify EXPR by copy/constant
1976      propagation, and vice versa.  Fold does not handle this, since it is
1977      considered too expensive.  */
1978   if (TREE_CODE (cond) == EQ_EXPR)
1979     {
1980       e0 = TREE_OPERAND (cond, 0);
1981       e1 = TREE_OPERAND (cond, 1);
1982 
1983       /* We know that e0 == e1.  Check whether we cannot simplify expr
1984 	 using this fact.  */
1985       e = simplify_replace_tree (expr, e0, e1);
1986       if (integer_zerop (e) || integer_nonzerop (e))
1987 	return e;
1988 
1989       e = simplify_replace_tree (expr, e1, e0);
1990       if (integer_zerop (e) || integer_nonzerop (e))
1991 	return e;
1992     }
1993   if (TREE_CODE (expr) == EQ_EXPR)
1994     {
1995       e0 = TREE_OPERAND (expr, 0);
1996       e1 = TREE_OPERAND (expr, 1);
1997 
1998       /* If e0 == e1 (EXPR) implies !COND, then EXPR cannot be true.  */
1999       e = simplify_replace_tree (cond, e0, e1);
2000       if (integer_zerop (e))
2001 	return e;
2002       e = simplify_replace_tree (cond, e1, e0);
2003       if (integer_zerop (e))
2004 	return e;
2005     }
2006   if (TREE_CODE (expr) == NE_EXPR)
2007     {
2008       e0 = TREE_OPERAND (expr, 0);
2009       e1 = TREE_OPERAND (expr, 1);
2010 
2011       /* If e0 == e1 (!EXPR) implies !COND, then EXPR must be true.  */
2012       e = simplify_replace_tree (cond, e0, e1);
2013       if (integer_zerop (e))
2014 	return boolean_true_node;
2015       e = simplify_replace_tree (cond, e1, e0);
2016       if (integer_zerop (e))
2017 	return boolean_true_node;
2018     }
2019 
2020   te = expand_simple_operations (expr, stop);
2021 
2022   /* Check whether COND ==> EXPR.  */
2023   notcond = invert_truthvalue (cond);
2024   e = fold_binary (TRUTH_OR_EXPR, boolean_type_node, notcond, te);
2025   if (e && integer_nonzerop (e))
2026     return e;
2027 
2028   /* Check whether COND ==> not EXPR.  */
2029   e = fold_binary (TRUTH_AND_EXPR, boolean_type_node, cond, te);
2030   if (e && integer_zerop (e))
2031     return e;
2032 
2033   return expr;
2034 }
2035 
2036 /* Tries to simplify EXPR using the condition COND.  Returns the simplified
2037    expression (or EXPR unchanged, if no simplification was possible).
2038    Wrapper around tree_simplify_using_condition_1 that ensures that chains
2039    of simple operations in definitions of ssa names in COND are expanded,
2040    so that things like casts or incrementing the value of the bound before
2041    the loop do not cause us to fail.  */
2042 
2043 static tree
tree_simplify_using_condition(tree cond,tree expr,tree stop)2044 tree_simplify_using_condition (tree cond, tree expr, tree stop)
2045 {
2046   cond = expand_simple_operations (cond, stop);
2047 
2048   return tree_simplify_using_condition_1 (cond, expr, stop);
2049 }
2050 
2051 /* Tries to simplify EXPR using the conditions on entry to LOOP.
2052    Returns the simplified expression (or EXPR unchanged, if no
2053    simplification was possible).  */
2054 
2055 tree
simplify_using_initial_conditions(struct loop * loop,tree expr,tree stop)2056 simplify_using_initial_conditions (struct loop *loop, tree expr, tree stop)
2057 {
2058   edge e;
2059   basic_block bb;
2060   gimple *stmt;
2061   tree cond;
2062   int cnt = 0;
2063 
2064   if (TREE_CODE (expr) == INTEGER_CST)
2065     return expr;
2066 
2067   /* Limit walking the dominators to avoid quadraticness in
2068      the number of BBs times the number of loops in degenerate
2069      cases.  */
2070   for (bb = loop->header;
2071        bb != ENTRY_BLOCK_PTR_FOR_FN (cfun) && cnt < MAX_DOMINATORS_TO_WALK;
2072        bb = get_immediate_dominator (CDI_DOMINATORS, bb))
2073     {
2074       if (!single_pred_p (bb))
2075 	continue;
2076       e = single_pred_edge (bb);
2077 
2078       if (!(e->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)))
2079 	continue;
2080 
2081       stmt = last_stmt (e->src);
2082       cond = fold_build2 (gimple_cond_code (stmt),
2083 			  boolean_type_node,
2084 			  gimple_cond_lhs (stmt),
2085 			  gimple_cond_rhs (stmt));
2086       if (e->flags & EDGE_FALSE_VALUE)
2087 	cond = invert_truthvalue (cond);
2088       expr = tree_simplify_using_condition (cond, expr, stop);
2089       /* Break if EXPR is simplified to const values.  */
2090       if (expr && (integer_zerop (expr) || integer_nonzerop (expr)))
2091 	break;
2092 
2093       ++cnt;
2094     }
2095 
2096   return expr;
2097 }
2098 
2099 /* Tries to simplify EXPR using the evolutions of the loop invariants
2100    in the superloops of LOOP.  Returns the simplified expression
2101    (or EXPR unchanged, if no simplification was possible).  */
2102 
2103 static tree
simplify_using_outer_evolutions(struct loop * loop,tree expr)2104 simplify_using_outer_evolutions (struct loop *loop, tree expr)
2105 {
2106   enum tree_code code = TREE_CODE (expr);
2107   bool changed;
2108   tree e, e0, e1, e2;
2109 
2110   if (is_gimple_min_invariant (expr))
2111     return expr;
2112 
2113   if (code == TRUTH_OR_EXPR
2114       || code == TRUTH_AND_EXPR
2115       || code == COND_EXPR)
2116     {
2117       changed = false;
2118 
2119       e0 = simplify_using_outer_evolutions (loop, TREE_OPERAND (expr, 0));
2120       if (TREE_OPERAND (expr, 0) != e0)
2121 	changed = true;
2122 
2123       e1 = simplify_using_outer_evolutions (loop, TREE_OPERAND (expr, 1));
2124       if (TREE_OPERAND (expr, 1) != e1)
2125 	changed = true;
2126 
2127       if (code == COND_EXPR)
2128 	{
2129 	  e2 = simplify_using_outer_evolutions (loop, TREE_OPERAND (expr, 2));
2130 	  if (TREE_OPERAND (expr, 2) != e2)
2131 	    changed = true;
2132 	}
2133       else
2134 	e2 = NULL_TREE;
2135 
2136       if (changed)
2137 	{
2138 	  if (code == COND_EXPR)
2139 	    expr = fold_build3 (code, boolean_type_node, e0, e1, e2);
2140 	  else
2141 	    expr = fold_build2 (code, boolean_type_node, e0, e1);
2142 	}
2143 
2144       return expr;
2145     }
2146 
2147   e = instantiate_parameters (loop, expr);
2148   if (is_gimple_min_invariant (e))
2149     return e;
2150 
2151   return expr;
2152 }
2153 
2154 /* Returns true if EXIT is the only possible exit from LOOP.  */
2155 
2156 bool
loop_only_exit_p(const struct loop * loop,const_edge exit)2157 loop_only_exit_p (const struct loop *loop, const_edge exit)
2158 {
2159   basic_block *body;
2160   gimple_stmt_iterator bsi;
2161   unsigned i;
2162   gimple *call;
2163 
2164   if (exit != single_exit (loop))
2165     return false;
2166 
2167   body = get_loop_body (loop);
2168   for (i = 0; i < loop->num_nodes; i++)
2169     {
2170       for (bsi = gsi_start_bb (body[i]); !gsi_end_p (bsi); gsi_next (&bsi))
2171 	{
2172 	  call = gsi_stmt (bsi);
2173 	  if (gimple_code (call) != GIMPLE_CALL)
2174 	    continue;
2175 
2176 	  if (gimple_has_side_effects (call))
2177 	    {
2178 	      free (body);
2179 	      return false;
2180 	    }
2181 	}
2182     }
2183 
2184   free (body);
2185   return true;
2186 }
2187 
2188 /* Stores description of number of iterations of LOOP derived from
2189    EXIT (an exit edge of the LOOP) in NITER.  Returns true if some
2190    useful information could be derived (and fields of NITER has
2191    meaning described in comments at struct tree_niter_desc
2192    declaration), false otherwise.  If WARN is true and
2193    -Wunsafe-loop-optimizations was given, warn if the optimizer is going to use
2194    potentially unsafe assumptions.
2195    When EVERY_ITERATION is true, only tests that are known to be executed
2196    every iteration are considered (i.e. only test that alone bounds the loop).
2197  */
2198 
2199 bool
number_of_iterations_exit(struct loop * loop,edge exit,struct tree_niter_desc * niter,bool warn,bool every_iteration)2200 number_of_iterations_exit (struct loop *loop, edge exit,
2201 			   struct tree_niter_desc *niter,
2202 			   bool warn, bool every_iteration)
2203 {
2204   gimple *last;
2205   gcond *stmt;
2206   tree type;
2207   tree op0, op1;
2208   enum tree_code code;
2209   affine_iv iv0, iv1;
2210   bool safe;
2211 
2212   safe = dominated_by_p (CDI_DOMINATORS, loop->latch, exit->src);
2213 
2214   if (every_iteration && !safe)
2215     return false;
2216 
2217   niter->assumptions = boolean_false_node;
2218   niter->control.base = NULL_TREE;
2219   niter->control.step = NULL_TREE;
2220   niter->control.no_overflow = false;
2221   last = last_stmt (exit->src);
2222   if (!last)
2223     return false;
2224   stmt = dyn_cast <gcond *> (last);
2225   if (!stmt)
2226     return false;
2227 
2228   /* We want the condition for staying inside loop.  */
2229   code = gimple_cond_code (stmt);
2230   if (exit->flags & EDGE_TRUE_VALUE)
2231     code = invert_tree_comparison (code, false);
2232 
2233   switch (code)
2234     {
2235     case GT_EXPR:
2236     case GE_EXPR:
2237     case LT_EXPR:
2238     case LE_EXPR:
2239     case NE_EXPR:
2240       break;
2241 
2242     default:
2243       return false;
2244     }
2245 
2246   op0 = gimple_cond_lhs (stmt);
2247   op1 = gimple_cond_rhs (stmt);
2248   type = TREE_TYPE (op0);
2249 
2250   if (TREE_CODE (type) != INTEGER_TYPE
2251       && !POINTER_TYPE_P (type))
2252     return false;
2253 
2254   if (!simple_iv (loop, loop_containing_stmt (stmt), op0, &iv0, false))
2255     return false;
2256   if (!simple_iv (loop, loop_containing_stmt (stmt), op1, &iv1, false))
2257     return false;
2258 
2259   /* We don't want to see undefined signed overflow warnings while
2260      computing the number of iterations.  */
2261   fold_defer_overflow_warnings ();
2262 
2263   iv0.base = expand_simple_operations (iv0.base);
2264   iv1.base = expand_simple_operations (iv1.base);
2265   if (!number_of_iterations_cond (loop, type, &iv0, code, &iv1, niter,
2266 				  loop_only_exit_p (loop, exit), safe))
2267     {
2268       fold_undefer_and_ignore_overflow_warnings ();
2269       return false;
2270     }
2271 
2272   if (optimize >= 3)
2273     {
2274       niter->assumptions = simplify_using_outer_evolutions (loop,
2275 							    niter->assumptions);
2276       niter->may_be_zero = simplify_using_outer_evolutions (loop,
2277 							    niter->may_be_zero);
2278       niter->niter = simplify_using_outer_evolutions (loop, niter->niter);
2279     }
2280 
2281   niter->assumptions
2282 	  = simplify_using_initial_conditions (loop,
2283 					       niter->assumptions);
2284   niter->may_be_zero
2285 	  = simplify_using_initial_conditions (loop,
2286 					       niter->may_be_zero);
2287 
2288   fold_undefer_and_ignore_overflow_warnings ();
2289 
2290   /* If NITER has simplified into a constant, update MAX.  */
2291   if (TREE_CODE (niter->niter) == INTEGER_CST)
2292     niter->max = wi::to_widest (niter->niter);
2293 
2294   if (integer_onep (niter->assumptions))
2295     return true;
2296 
2297   /* With -funsafe-loop-optimizations we assume that nothing bad can happen.
2298      But if we can prove that there is overflow or some other source of weird
2299      behavior, ignore the loop even with -funsafe-loop-optimizations.  */
2300   if (integer_zerop (niter->assumptions) || !single_exit (loop))
2301     return false;
2302 
2303   if (flag_unsafe_loop_optimizations)
2304     niter->assumptions = boolean_true_node;
2305 
2306   if (warn)
2307     {
2308       const char *wording;
2309       location_t loc = gimple_location (stmt);
2310 
2311       /* We can provide a more specific warning if one of the operator is
2312 	 constant and the other advances by +1 or -1.  */
2313       if (!integer_zerop (iv1.step)
2314 	  ? (integer_zerop (iv0.step)
2315 	     && (integer_onep (iv1.step) || integer_all_onesp (iv1.step)))
2316 	  : (integer_onep (iv0.step) || integer_all_onesp (iv0.step)))
2317         wording =
2318           flag_unsafe_loop_optimizations
2319           ? N_("assuming that the loop is not infinite")
2320           : N_("cannot optimize possibly infinite loops");
2321       else
2322 	wording =
2323 	  flag_unsafe_loop_optimizations
2324 	  ? N_("assuming that the loop counter does not overflow")
2325 	  : N_("cannot optimize loop, the loop counter may overflow");
2326 
2327       warning_at ((LOCATION_LINE (loc) > 0) ? loc : input_location,
2328 		  OPT_Wunsafe_loop_optimizations, "%s", gettext (wording));
2329     }
2330 
2331   return flag_unsafe_loop_optimizations;
2332 }
2333 
2334 /* Try to determine the number of iterations of LOOP.  If we succeed,
2335    expression giving number of iterations is returned and *EXIT is
2336    set to the edge from that the information is obtained.  Otherwise
2337    chrec_dont_know is returned.  */
2338 
2339 tree
find_loop_niter(struct loop * loop,edge * exit)2340 find_loop_niter (struct loop *loop, edge *exit)
2341 {
2342   unsigned i;
2343   vec<edge> exits = get_loop_exit_edges (loop);
2344   edge ex;
2345   tree niter = NULL_TREE, aniter;
2346   struct tree_niter_desc desc;
2347 
2348   *exit = NULL;
2349   FOR_EACH_VEC_ELT (exits, i, ex)
2350     {
2351       if (!number_of_iterations_exit (loop, ex, &desc, false))
2352 	continue;
2353 
2354       if (integer_nonzerop (desc.may_be_zero))
2355 	{
2356 	  /* We exit in the first iteration through this exit.
2357 	     We won't find anything better.  */
2358 	  niter = build_int_cst (unsigned_type_node, 0);
2359 	  *exit = ex;
2360 	  break;
2361 	}
2362 
2363       if (!integer_zerop (desc.may_be_zero))
2364 	continue;
2365 
2366       aniter = desc.niter;
2367 
2368       if (!niter)
2369 	{
2370 	  /* Nothing recorded yet.  */
2371 	  niter = aniter;
2372 	  *exit = ex;
2373 	  continue;
2374 	}
2375 
2376       /* Prefer constants, the lower the better.  */
2377       if (TREE_CODE (aniter) != INTEGER_CST)
2378 	continue;
2379 
2380       if (TREE_CODE (niter) != INTEGER_CST)
2381 	{
2382 	  niter = aniter;
2383 	  *exit = ex;
2384 	  continue;
2385 	}
2386 
2387       if (tree_int_cst_lt (aniter, niter))
2388 	{
2389 	  niter = aniter;
2390 	  *exit = ex;
2391 	  continue;
2392 	}
2393     }
2394   exits.release ();
2395 
2396   return niter ? niter : chrec_dont_know;
2397 }
2398 
2399 /* Return true if loop is known to have bounded number of iterations.  */
2400 
2401 bool
finite_loop_p(struct loop * loop)2402 finite_loop_p (struct loop *loop)
2403 {
2404   widest_int nit;
2405   int flags;
2406 
2407   if (flag_unsafe_loop_optimizations)
2408     return true;
2409   flags = flags_from_decl_or_type (current_function_decl);
2410   if ((flags & (ECF_CONST|ECF_PURE)) && !(flags & ECF_LOOPING_CONST_OR_PURE))
2411     {
2412       if (dump_file && (dump_flags & TDF_DETAILS))
2413 	fprintf (dump_file, "Found loop %i to be finite: it is within pure or const function.\n",
2414 		 loop->num);
2415       return true;
2416     }
2417 
2418   if (loop->any_upper_bound
2419       || max_loop_iterations (loop, &nit))
2420     {
2421       if (dump_file && (dump_flags & TDF_DETAILS))
2422 	fprintf (dump_file, "Found loop %i to be finite: upper bound found.\n",
2423 		 loop->num);
2424       return true;
2425     }
2426   return false;
2427 }
2428 
2429 /*
2430 
2431    Analysis of a number of iterations of a loop by a brute-force evaluation.
2432 
2433 */
2434 
2435 /* Bound on the number of iterations we try to evaluate.  */
2436 
2437 #define MAX_ITERATIONS_TO_TRACK \
2438   ((unsigned) PARAM_VALUE (PARAM_MAX_ITERATIONS_TO_TRACK))
2439 
2440 /* Returns the loop phi node of LOOP such that ssa name X is derived from its
2441    result by a chain of operations such that all but exactly one of their
2442    operands are constants.  */
2443 
2444 static gphi *
chain_of_csts_start(struct loop * loop,tree x)2445 chain_of_csts_start (struct loop *loop, tree x)
2446 {
2447   gimple *stmt = SSA_NAME_DEF_STMT (x);
2448   tree use;
2449   basic_block bb = gimple_bb (stmt);
2450   enum tree_code code;
2451 
2452   if (!bb
2453       || !flow_bb_inside_loop_p (loop, bb))
2454     return NULL;
2455 
2456   if (gimple_code (stmt) == GIMPLE_PHI)
2457     {
2458       if (bb == loop->header)
2459 	return as_a <gphi *> (stmt);
2460 
2461       return NULL;
2462     }
2463 
2464   if (gimple_code (stmt) != GIMPLE_ASSIGN
2465       || gimple_assign_rhs_class (stmt) == GIMPLE_TERNARY_RHS)
2466     return NULL;
2467 
2468   code = gimple_assign_rhs_code (stmt);
2469   if (gimple_references_memory_p (stmt)
2470       || TREE_CODE_CLASS (code) == tcc_reference
2471       || (code == ADDR_EXPR
2472 	  && !is_gimple_min_invariant (gimple_assign_rhs1 (stmt))))
2473     return NULL;
2474 
2475   use = SINGLE_SSA_TREE_OPERAND (stmt, SSA_OP_USE);
2476   if (use == NULL_TREE)
2477     return NULL;
2478 
2479   return chain_of_csts_start (loop, use);
2480 }
2481 
2482 /* Determines whether the expression X is derived from a result of a phi node
2483    in header of LOOP such that
2484 
2485    * the derivation of X consists only from operations with constants
2486    * the initial value of the phi node is constant
2487    * the value of the phi node in the next iteration can be derived from the
2488      value in the current iteration by a chain of operations with constants.
2489 
2490    If such phi node exists, it is returned, otherwise NULL is returned.  */
2491 
2492 static gphi *
get_base_for(struct loop * loop,tree x)2493 get_base_for (struct loop *loop, tree x)
2494 {
2495   gphi *phi;
2496   tree init, next;
2497 
2498   if (is_gimple_min_invariant (x))
2499     return NULL;
2500 
2501   phi = chain_of_csts_start (loop, x);
2502   if (!phi)
2503     return NULL;
2504 
2505   init = PHI_ARG_DEF_FROM_EDGE (phi, loop_preheader_edge (loop));
2506   next = PHI_ARG_DEF_FROM_EDGE (phi, loop_latch_edge (loop));
2507 
2508   if (TREE_CODE (next) != SSA_NAME)
2509     return NULL;
2510 
2511   if (!is_gimple_min_invariant (init))
2512     return NULL;
2513 
2514   if (chain_of_csts_start (loop, next) != phi)
2515     return NULL;
2516 
2517   return phi;
2518 }
2519 
2520 /* Given an expression X, then
2521 
2522    * if X is NULL_TREE, we return the constant BASE.
2523    * otherwise X is a SSA name, whose value in the considered loop is derived
2524      by a chain of operations with constant from a result of a phi node in
2525      the header of the loop.  Then we return value of X when the value of the
2526      result of this phi node is given by the constant BASE.  */
2527 
2528 static tree
get_val_for(tree x,tree base)2529 get_val_for (tree x, tree base)
2530 {
2531   gimple *stmt;
2532 
2533   gcc_checking_assert (is_gimple_min_invariant (base));
2534 
2535   if (!x)
2536     return base;
2537 
2538   stmt = SSA_NAME_DEF_STMT (x);
2539   if (gimple_code (stmt) == GIMPLE_PHI)
2540     return base;
2541 
2542   gcc_checking_assert (is_gimple_assign (stmt));
2543 
2544   /* STMT must be either an assignment of a single SSA name or an
2545      expression involving an SSA name and a constant.  Try to fold that
2546      expression using the value for the SSA name.  */
2547   if (gimple_assign_ssa_name_copy_p (stmt))
2548     return get_val_for (gimple_assign_rhs1 (stmt), base);
2549   else if (gimple_assign_rhs_class (stmt) == GIMPLE_UNARY_RHS
2550 	   && TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME)
2551     {
2552       return fold_build1 (gimple_assign_rhs_code (stmt),
2553 			  gimple_expr_type (stmt),
2554 			  get_val_for (gimple_assign_rhs1 (stmt), base));
2555     }
2556   else if (gimple_assign_rhs_class (stmt) == GIMPLE_BINARY_RHS)
2557     {
2558       tree rhs1 = gimple_assign_rhs1 (stmt);
2559       tree rhs2 = gimple_assign_rhs2 (stmt);
2560       if (TREE_CODE (rhs1) == SSA_NAME)
2561 	rhs1 = get_val_for (rhs1, base);
2562       else if (TREE_CODE (rhs2) == SSA_NAME)
2563 	rhs2 = get_val_for (rhs2, base);
2564       else
2565 	gcc_unreachable ();
2566       return fold_build2 (gimple_assign_rhs_code (stmt),
2567 			  gimple_expr_type (stmt), rhs1, rhs2);
2568     }
2569   else
2570     gcc_unreachable ();
2571 }
2572 
2573 
2574 /* Tries to count the number of iterations of LOOP till it exits by EXIT
2575    by brute force -- i.e. by determining the value of the operands of the
2576    condition at EXIT in first few iterations of the loop (assuming that
2577    these values are constant) and determining the first one in that the
2578    condition is not satisfied.  Returns the constant giving the number
2579    of the iterations of LOOP if successful, chrec_dont_know otherwise.  */
2580 
2581 tree
loop_niter_by_eval(struct loop * loop,edge exit)2582 loop_niter_by_eval (struct loop *loop, edge exit)
2583 {
2584   tree acnd;
2585   tree op[2], val[2], next[2], aval[2];
2586   gphi *phi;
2587   gimple *cond;
2588   unsigned i, j;
2589   enum tree_code cmp;
2590 
2591   cond = last_stmt (exit->src);
2592   if (!cond || gimple_code (cond) != GIMPLE_COND)
2593     return chrec_dont_know;
2594 
2595   cmp = gimple_cond_code (cond);
2596   if (exit->flags & EDGE_TRUE_VALUE)
2597     cmp = invert_tree_comparison (cmp, false);
2598 
2599   switch (cmp)
2600     {
2601     case EQ_EXPR:
2602     case NE_EXPR:
2603     case GT_EXPR:
2604     case GE_EXPR:
2605     case LT_EXPR:
2606     case LE_EXPR:
2607       op[0] = gimple_cond_lhs (cond);
2608       op[1] = gimple_cond_rhs (cond);
2609       break;
2610 
2611     default:
2612       return chrec_dont_know;
2613     }
2614 
2615   for (j = 0; j < 2; j++)
2616     {
2617       if (is_gimple_min_invariant (op[j]))
2618 	{
2619 	  val[j] = op[j];
2620 	  next[j] = NULL_TREE;
2621 	  op[j] = NULL_TREE;
2622 	}
2623       else
2624 	{
2625 	  phi = get_base_for (loop, op[j]);
2626 	  if (!phi)
2627 	    return chrec_dont_know;
2628 	  val[j] = PHI_ARG_DEF_FROM_EDGE (phi, loop_preheader_edge (loop));
2629 	  next[j] = PHI_ARG_DEF_FROM_EDGE (phi, loop_latch_edge (loop));
2630 	}
2631     }
2632 
2633   /* Don't issue signed overflow warnings.  */
2634   fold_defer_overflow_warnings ();
2635 
2636   for (i = 0; i < MAX_ITERATIONS_TO_TRACK; i++)
2637     {
2638       for (j = 0; j < 2; j++)
2639 	aval[j] = get_val_for (op[j], val[j]);
2640 
2641       acnd = fold_binary (cmp, boolean_type_node, aval[0], aval[1]);
2642       if (acnd && integer_zerop (acnd))
2643 	{
2644 	  fold_undefer_and_ignore_overflow_warnings ();
2645 	  if (dump_file && (dump_flags & TDF_DETAILS))
2646 	    fprintf (dump_file,
2647 		     "Proved that loop %d iterates %d times using brute force.\n",
2648 		     loop->num, i);
2649 	  return build_int_cst (unsigned_type_node, i);
2650 	}
2651 
2652       for (j = 0; j < 2; j++)
2653 	{
2654 	  val[j] = get_val_for (next[j], val[j]);
2655 	  if (!is_gimple_min_invariant (val[j]))
2656 	    {
2657 	      fold_undefer_and_ignore_overflow_warnings ();
2658 	      return chrec_dont_know;
2659 	    }
2660 	}
2661     }
2662 
2663   fold_undefer_and_ignore_overflow_warnings ();
2664 
2665   return chrec_dont_know;
2666 }
2667 
2668 /* Finds the exit of the LOOP by that the loop exits after a constant
2669    number of iterations and stores the exit edge to *EXIT.  The constant
2670    giving the number of iterations of LOOP is returned.  The number of
2671    iterations is determined using loop_niter_by_eval (i.e. by brute force
2672    evaluation).  If we are unable to find the exit for that loop_niter_by_eval
2673    determines the number of iterations, chrec_dont_know is returned.  */
2674 
2675 tree
find_loop_niter_by_eval(struct loop * loop,edge * exit)2676 find_loop_niter_by_eval (struct loop *loop, edge *exit)
2677 {
2678   unsigned i;
2679   vec<edge> exits = get_loop_exit_edges (loop);
2680   edge ex;
2681   tree niter = NULL_TREE, aniter;
2682 
2683   *exit = NULL;
2684 
2685   /* Loops with multiple exits are expensive to handle and less important.  */
2686   if (!flag_expensive_optimizations
2687       && exits.length () > 1)
2688     {
2689       exits.release ();
2690       return chrec_dont_know;
2691     }
2692 
2693   FOR_EACH_VEC_ELT (exits, i, ex)
2694     {
2695       if (!just_once_each_iteration_p (loop, ex->src))
2696 	continue;
2697 
2698       aniter = loop_niter_by_eval (loop, ex);
2699       if (chrec_contains_undetermined (aniter))
2700 	continue;
2701 
2702       if (niter
2703 	  && !tree_int_cst_lt (aniter, niter))
2704 	continue;
2705 
2706       niter = aniter;
2707       *exit = ex;
2708     }
2709   exits.release ();
2710 
2711   return niter ? niter : chrec_dont_know;
2712 }
2713 
2714 /*
2715 
2716    Analysis of upper bounds on number of iterations of a loop.
2717 
2718 */
2719 
2720 static widest_int derive_constant_upper_bound_ops (tree, tree,
2721 						   enum tree_code, tree);
2722 
2723 /* Returns a constant upper bound on the value of the right-hand side of
2724    an assignment statement STMT.  */
2725 
2726 static widest_int
derive_constant_upper_bound_assign(gimple * stmt)2727 derive_constant_upper_bound_assign (gimple *stmt)
2728 {
2729   enum tree_code code = gimple_assign_rhs_code (stmt);
2730   tree op0 = gimple_assign_rhs1 (stmt);
2731   tree op1 = gimple_assign_rhs2 (stmt);
2732 
2733   return derive_constant_upper_bound_ops (TREE_TYPE (gimple_assign_lhs (stmt)),
2734 					  op0, code, op1);
2735 }
2736 
2737 /* Returns a constant upper bound on the value of expression VAL.  VAL
2738    is considered to be unsigned.  If its type is signed, its value must
2739    be nonnegative.  */
2740 
2741 static widest_int
derive_constant_upper_bound(tree val)2742 derive_constant_upper_bound (tree val)
2743 {
2744   enum tree_code code;
2745   tree op0, op1, op2;
2746 
2747   extract_ops_from_tree (val, &code, &op0, &op1, &op2);
2748   return derive_constant_upper_bound_ops (TREE_TYPE (val), op0, code, op1);
2749 }
2750 
2751 /* Returns a constant upper bound on the value of expression OP0 CODE OP1,
2752    whose type is TYPE.  The expression is considered to be unsigned.  If
2753    its type is signed, its value must be nonnegative.  */
2754 
2755 static widest_int
derive_constant_upper_bound_ops(tree type,tree op0,enum tree_code code,tree op1)2756 derive_constant_upper_bound_ops (tree type, tree op0,
2757 				 enum tree_code code, tree op1)
2758 {
2759   tree subtype, maxt;
2760   widest_int bnd, max, cst;
2761   gimple *stmt;
2762 
2763   if (INTEGRAL_TYPE_P (type))
2764     maxt = TYPE_MAX_VALUE (type);
2765   else
2766     maxt = upper_bound_in_type (type, type);
2767 
2768   max = wi::to_widest (maxt);
2769 
2770   switch (code)
2771     {
2772     case INTEGER_CST:
2773       return wi::to_widest (op0);
2774 
2775     CASE_CONVERT:
2776       subtype = TREE_TYPE (op0);
2777       if (!TYPE_UNSIGNED (subtype)
2778 	  /* If TYPE is also signed, the fact that VAL is nonnegative implies
2779 	     that OP0 is nonnegative.  */
2780 	  && TYPE_UNSIGNED (type)
2781 	  && !tree_expr_nonnegative_p (op0))
2782 	{
2783 	  /* If we cannot prove that the casted expression is nonnegative,
2784 	     we cannot establish more useful upper bound than the precision
2785 	     of the type gives us.  */
2786 	  return max;
2787 	}
2788 
2789       /* We now know that op0 is an nonnegative value.  Try deriving an upper
2790 	 bound for it.  */
2791       bnd = derive_constant_upper_bound (op0);
2792 
2793       /* If the bound does not fit in TYPE, max. value of TYPE could be
2794 	 attained.  */
2795       if (wi::ltu_p (max, bnd))
2796 	return max;
2797 
2798       return bnd;
2799 
2800     case PLUS_EXPR:
2801     case POINTER_PLUS_EXPR:
2802     case MINUS_EXPR:
2803       if (TREE_CODE (op1) != INTEGER_CST
2804 	  || !tree_expr_nonnegative_p (op0))
2805 	return max;
2806 
2807       /* Canonicalize to OP0 - CST.  Consider CST to be signed, in order to
2808 	 choose the most logical way how to treat this constant regardless
2809 	 of the signedness of the type.  */
2810       cst = wi::sext (wi::to_widest (op1), TYPE_PRECISION (type));
2811       if (code != MINUS_EXPR)
2812 	cst = -cst;
2813 
2814       bnd = derive_constant_upper_bound (op0);
2815 
2816       if (wi::neg_p (cst))
2817 	{
2818 	  cst = -cst;
2819 	  /* Avoid CST == 0x80000...  */
2820 	  if (wi::neg_p (cst))
2821 	    return max;
2822 
2823 	  /* OP0 + CST.  We need to check that
2824 	     BND <= MAX (type) - CST.  */
2825 
2826 	  widest_int mmax = max - cst;
2827 	  if (wi::leu_p (bnd, mmax))
2828 	    return max;
2829 
2830 	  return bnd + cst;
2831 	}
2832       else
2833 	{
2834 	  /* OP0 - CST, where CST >= 0.
2835 
2836 	     If TYPE is signed, we have already verified that OP0 >= 0, and we
2837 	     know that the result is nonnegative.  This implies that
2838 	     VAL <= BND - CST.
2839 
2840 	     If TYPE is unsigned, we must additionally know that OP0 >= CST,
2841 	     otherwise the operation underflows.
2842 	   */
2843 
2844 	  /* This should only happen if the type is unsigned; however, for
2845 	     buggy programs that use overflowing signed arithmetics even with
2846 	     -fno-wrapv, this condition may also be true for signed values.  */
2847 	  if (wi::ltu_p (bnd, cst))
2848 	    return max;
2849 
2850 	  if (TYPE_UNSIGNED (type))
2851 	    {
2852 	      tree tem = fold_binary (GE_EXPR, boolean_type_node, op0,
2853 				      wide_int_to_tree (type, cst));
2854 	      if (!tem || integer_nonzerop (tem))
2855 		return max;
2856 	    }
2857 
2858 	  bnd -= cst;
2859 	}
2860 
2861       return bnd;
2862 
2863     case FLOOR_DIV_EXPR:
2864     case EXACT_DIV_EXPR:
2865       if (TREE_CODE (op1) != INTEGER_CST
2866 	  || tree_int_cst_sign_bit (op1))
2867 	return max;
2868 
2869       bnd = derive_constant_upper_bound (op0);
2870       return wi::udiv_floor (bnd, wi::to_widest (op1));
2871 
2872     case BIT_AND_EXPR:
2873       if (TREE_CODE (op1) != INTEGER_CST
2874 	  || tree_int_cst_sign_bit (op1))
2875 	return max;
2876       return wi::to_widest (op1);
2877 
2878     case SSA_NAME:
2879       stmt = SSA_NAME_DEF_STMT (op0);
2880       if (gimple_code (stmt) != GIMPLE_ASSIGN
2881 	  || gimple_assign_lhs (stmt) != op0)
2882 	return max;
2883       return derive_constant_upper_bound_assign (stmt);
2884 
2885     default:
2886       return max;
2887     }
2888 }
2889 
2890 /* Emit a -Waggressive-loop-optimizations warning if needed.  */
2891 
2892 static void
do_warn_aggressive_loop_optimizations(struct loop * loop,widest_int i_bound,gimple * stmt)2893 do_warn_aggressive_loop_optimizations (struct loop *loop,
2894 				       widest_int i_bound, gimple *stmt)
2895 {
2896   /* Don't warn if the loop doesn't have known constant bound.  */
2897   if (!loop->nb_iterations
2898       || TREE_CODE (loop->nb_iterations) != INTEGER_CST
2899       || !warn_aggressive_loop_optimizations
2900       /* To avoid warning multiple times for the same loop,
2901 	 only start warning when we preserve loops.  */
2902       || (cfun->curr_properties & PROP_loops) == 0
2903       /* Only warn once per loop.  */
2904       || loop->warned_aggressive_loop_optimizations
2905       /* Only warn if undefined behavior gives us lower estimate than the
2906 	 known constant bound.  */
2907       || wi::cmpu (i_bound, wi::to_widest (loop->nb_iterations)) >= 0
2908       /* And undefined behavior happens unconditionally.  */
2909       || !dominated_by_p (CDI_DOMINATORS, loop->latch, gimple_bb (stmt)))
2910     return;
2911 
2912   edge e = single_exit (loop);
2913   if (e == NULL)
2914     return;
2915 
2916   gimple *estmt = last_stmt (e->src);
2917   char buf[WIDE_INT_PRINT_BUFFER_SIZE];
2918   print_dec (i_bound, buf, TYPE_UNSIGNED (TREE_TYPE (loop->nb_iterations))
2919 	     ? UNSIGNED : SIGNED);
2920   if (warning_at (gimple_location (stmt), OPT_Waggressive_loop_optimizations,
2921 		  "iteration %s invokes undefined behavior", buf))
2922     inform (gimple_location (estmt), "within this loop");
2923   loop->warned_aggressive_loop_optimizations = true;
2924 }
2925 
2926 /* Records that AT_STMT is executed at most BOUND + 1 times in LOOP.  IS_EXIT
2927    is true if the loop is exited immediately after STMT, and this exit
2928    is taken at last when the STMT is executed BOUND + 1 times.
2929    REALISTIC is true if BOUND is expected to be close to the real number
2930    of iterations.  UPPER is true if we are sure the loop iterates at most
2931    BOUND times.  I_BOUND is a widest_int upper estimate on BOUND.  */
2932 
2933 static void
record_estimate(struct loop * loop,tree bound,const widest_int & i_bound,gimple * at_stmt,bool is_exit,bool realistic,bool upper)2934 record_estimate (struct loop *loop, tree bound, const widest_int &i_bound,
2935 		 gimple *at_stmt, bool is_exit, bool realistic, bool upper)
2936 {
2937   widest_int delta;
2938 
2939   if (dump_file && (dump_flags & TDF_DETAILS))
2940     {
2941       fprintf (dump_file, "Statement %s", is_exit ? "(exit)" : "");
2942       print_gimple_stmt (dump_file, at_stmt, 0, TDF_SLIM);
2943       fprintf (dump_file, " is %sexecuted at most ",
2944 	       upper ? "" : "probably ");
2945       print_generic_expr (dump_file, bound, TDF_SLIM);
2946       fprintf (dump_file, " (bounded by ");
2947       print_decu (i_bound, dump_file);
2948       fprintf (dump_file, ") + 1 times in loop %d.\n", loop->num);
2949     }
2950 
2951   /* If the I_BOUND is just an estimate of BOUND, it rarely is close to the
2952      real number of iterations.  */
2953   if (TREE_CODE (bound) != INTEGER_CST)
2954     realistic = false;
2955   else
2956     gcc_checking_assert (i_bound == wi::to_widest (bound));
2957   if (!upper && !realistic)
2958     return;
2959 
2960   /* If we have a guaranteed upper bound, record it in the appropriate
2961      list, unless this is an !is_exit bound (i.e. undefined behavior in
2962      at_stmt) in a loop with known constant number of iterations.  */
2963   if (upper
2964       && (is_exit
2965 	  || loop->nb_iterations == NULL_TREE
2966 	  || TREE_CODE (loop->nb_iterations) != INTEGER_CST))
2967     {
2968       struct nb_iter_bound *elt = ggc_alloc<nb_iter_bound> ();
2969 
2970       elt->bound = i_bound;
2971       elt->stmt = at_stmt;
2972       elt->is_exit = is_exit;
2973       elt->next = loop->bounds;
2974       loop->bounds = elt;
2975     }
2976 
2977   /* If statement is executed on every path to the loop latch, we can directly
2978      infer the upper bound on the # of iterations of the loop.  */
2979   if (!dominated_by_p (CDI_DOMINATORS, loop->latch, gimple_bb (at_stmt)))
2980     return;
2981 
2982   /* Update the number of iteration estimates according to the bound.
2983      If at_stmt is an exit then the loop latch is executed at most BOUND times,
2984      otherwise it can be executed BOUND + 1 times.  We will lower the estimate
2985      later if such statement must be executed on last iteration  */
2986   if (is_exit)
2987     delta = 0;
2988   else
2989     delta = 1;
2990   widest_int new_i_bound = i_bound + delta;
2991 
2992   /* If an overflow occurred, ignore the result.  */
2993   if (wi::ltu_p (new_i_bound, delta))
2994     return;
2995 
2996   if (upper && !is_exit)
2997     do_warn_aggressive_loop_optimizations (loop, new_i_bound, at_stmt);
2998   record_niter_bound (loop, new_i_bound, realistic, upper);
2999 }
3000 
3001 /* Records the control iv analyzed in NITER for LOOP if the iv is valid
3002    and doesn't overflow.  */
3003 
3004 static void
record_control_iv(struct loop * loop,struct tree_niter_desc * niter)3005 record_control_iv (struct loop *loop, struct tree_niter_desc *niter)
3006 {
3007   struct control_iv *iv;
3008 
3009   if (!niter->control.base || !niter->control.step)
3010     return;
3011 
3012   if (!integer_onep (niter->assumptions) || !niter->control.no_overflow)
3013     return;
3014 
3015   iv = ggc_alloc<control_iv> ();
3016   iv->base = niter->control.base;
3017   iv->step = niter->control.step;
3018   iv->next = loop->control_ivs;
3019   loop->control_ivs = iv;
3020 
3021   return;
3022 }
3023 
3024 /* Record the estimate on number of iterations of LOOP based on the fact that
3025    the induction variable BASE + STEP * i evaluated in STMT does not wrap and
3026    its values belong to the range <LOW, HIGH>.  REALISTIC is true if the
3027    estimated number of iterations is expected to be close to the real one.
3028    UPPER is true if we are sure the induction variable does not wrap.  */
3029 
3030 static void
record_nonwrapping_iv(struct loop * loop,tree base,tree step,gimple * stmt,tree low,tree high,bool realistic,bool upper)3031 record_nonwrapping_iv (struct loop *loop, tree base, tree step, gimple *stmt,
3032 		       tree low, tree high, bool realistic, bool upper)
3033 {
3034   tree niter_bound, extreme, delta;
3035   tree type = TREE_TYPE (base), unsigned_type;
3036   tree orig_base = base;
3037 
3038   if (TREE_CODE (step) != INTEGER_CST || integer_zerop (step))
3039     return;
3040 
3041   if (dump_file && (dump_flags & TDF_DETAILS))
3042     {
3043       fprintf (dump_file, "Induction variable (");
3044       print_generic_expr (dump_file, TREE_TYPE (base), TDF_SLIM);
3045       fprintf (dump_file, ") ");
3046       print_generic_expr (dump_file, base, TDF_SLIM);
3047       fprintf (dump_file, " + ");
3048       print_generic_expr (dump_file, step, TDF_SLIM);
3049       fprintf (dump_file, " * iteration does not wrap in statement ");
3050       print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
3051       fprintf (dump_file, " in loop %d.\n", loop->num);
3052     }
3053 
3054   unsigned_type = unsigned_type_for (type);
3055   base = fold_convert (unsigned_type, base);
3056   step = fold_convert (unsigned_type, step);
3057 
3058   if (tree_int_cst_sign_bit (step))
3059     {
3060       wide_int min, max;
3061       extreme = fold_convert (unsigned_type, low);
3062       if (TREE_CODE (orig_base) == SSA_NAME
3063 	  && TREE_CODE (high) == INTEGER_CST
3064 	  && INTEGRAL_TYPE_P (TREE_TYPE (orig_base))
3065 	  && get_range_info (orig_base, &min, &max) == VR_RANGE
3066 	  && wi::gts_p (high, max))
3067 	base = wide_int_to_tree (unsigned_type, max);
3068       else if (TREE_CODE (base) != INTEGER_CST
3069 	       && dominated_by_p (CDI_DOMINATORS,
3070 				  loop->latch, gimple_bb (stmt)))
3071 	base = fold_convert (unsigned_type, high);
3072       delta = fold_build2 (MINUS_EXPR, unsigned_type, base, extreme);
3073       step = fold_build1 (NEGATE_EXPR, unsigned_type, step);
3074     }
3075   else
3076     {
3077       wide_int min, max;
3078       extreme = fold_convert (unsigned_type, high);
3079       if (TREE_CODE (orig_base) == SSA_NAME
3080 	  && TREE_CODE (low) == INTEGER_CST
3081 	  && INTEGRAL_TYPE_P (TREE_TYPE (orig_base))
3082 	  && get_range_info (orig_base, &min, &max) == VR_RANGE
3083 	  && wi::gts_p (min, low))
3084 	base = wide_int_to_tree (unsigned_type, min);
3085       else if (TREE_CODE (base) != INTEGER_CST
3086 	       && dominated_by_p (CDI_DOMINATORS,
3087 				  loop->latch, gimple_bb (stmt)))
3088 	base = fold_convert (unsigned_type, low);
3089       delta = fold_build2 (MINUS_EXPR, unsigned_type, extreme, base);
3090     }
3091 
3092   /* STMT is executed at most NITER_BOUND + 1 times, since otherwise the value
3093      would get out of the range.  */
3094   niter_bound = fold_build2 (FLOOR_DIV_EXPR, unsigned_type, delta, step);
3095   widest_int max = derive_constant_upper_bound (niter_bound);
3096   record_estimate (loop, niter_bound, max, stmt, false, realistic, upper);
3097 }
3098 
3099 /* Determine information about number of iterations a LOOP from the index
3100    IDX of a data reference accessed in STMT.  RELIABLE is true if STMT is
3101    guaranteed to be executed in every iteration of LOOP.  Callback for
3102    for_each_index.  */
3103 
3104 struct ilb_data
3105 {
3106   struct loop *loop;
3107   gimple *stmt;
3108 };
3109 
3110 static bool
idx_infer_loop_bounds(tree base,tree * idx,void * dta)3111 idx_infer_loop_bounds (tree base, tree *idx, void *dta)
3112 {
3113   struct ilb_data *data = (struct ilb_data *) dta;
3114   tree ev, init, step;
3115   tree low, high, type, next;
3116   bool sign, upper = true, at_end = false;
3117   struct loop *loop = data->loop;
3118   bool reliable = true;
3119 
3120   if (TREE_CODE (base) != ARRAY_REF)
3121     return true;
3122 
3123   /* For arrays at the end of the structure, we are not guaranteed that they
3124      do not really extend over their declared size.  However, for arrays of
3125      size greater than one, this is unlikely to be intended.  */
3126   if (array_at_struct_end_p (base))
3127     {
3128       at_end = true;
3129       upper = false;
3130     }
3131 
3132   struct loop *dloop = loop_containing_stmt (data->stmt);
3133   if (!dloop)
3134     return true;
3135 
3136   ev = analyze_scalar_evolution (dloop, *idx);
3137   ev = instantiate_parameters (loop, ev);
3138   init = initial_condition (ev);
3139   step = evolution_part_in_loop_num (ev, loop->num);
3140 
3141   if (!init
3142       || !step
3143       || TREE_CODE (step) != INTEGER_CST
3144       || integer_zerop (step)
3145       || tree_contains_chrecs (init, NULL)
3146       || chrec_contains_symbols_defined_in_loop (init, loop->num))
3147     return true;
3148 
3149   low = array_ref_low_bound (base);
3150   high = array_ref_up_bound (base);
3151 
3152   /* The case of nonconstant bounds could be handled, but it would be
3153      complicated.  */
3154   if (TREE_CODE (low) != INTEGER_CST
3155       || !high
3156       || TREE_CODE (high) != INTEGER_CST)
3157     return true;
3158   sign = tree_int_cst_sign_bit (step);
3159   type = TREE_TYPE (step);
3160 
3161   /* The array of length 1 at the end of a structure most likely extends
3162      beyond its bounds.  */
3163   if (at_end
3164       && operand_equal_p (low, high, 0))
3165     return true;
3166 
3167   /* In case the relevant bound of the array does not fit in type, or
3168      it does, but bound + step (in type) still belongs into the range of the
3169      array, the index may wrap and still stay within the range of the array
3170      (consider e.g. if the array is indexed by the full range of
3171      unsigned char).
3172 
3173      To make things simpler, we require both bounds to fit into type, although
3174      there are cases where this would not be strictly necessary.  */
3175   if (!int_fits_type_p (high, type)
3176       || !int_fits_type_p (low, type))
3177     return true;
3178   low = fold_convert (type, low);
3179   high = fold_convert (type, high);
3180 
3181   if (sign)
3182     next = fold_binary (PLUS_EXPR, type, low, step);
3183   else
3184     next = fold_binary (PLUS_EXPR, type, high, step);
3185 
3186   if (tree_int_cst_compare (low, next) <= 0
3187       && tree_int_cst_compare (next, high) <= 0)
3188     return true;
3189 
3190   /* If access is not executed on every iteration, we must ensure that overlow may
3191      not make the access valid later.  */
3192   if (!dominated_by_p (CDI_DOMINATORS, loop->latch, gimple_bb (data->stmt))
3193       && scev_probably_wraps_p (initial_condition_in_loop_num (ev, loop->num),
3194 				step, data->stmt, loop, true))
3195     reliable = false;
3196 
3197   record_nonwrapping_iv (loop, init, step, data->stmt, low, high, reliable, upper);
3198   return true;
3199 }
3200 
3201 /* Determine information about number of iterations a LOOP from the bounds
3202    of arrays in the data reference REF accessed in STMT.  RELIABLE is true if
3203    STMT is guaranteed to be executed in every iteration of LOOP.*/
3204 
3205 static void
infer_loop_bounds_from_ref(struct loop * loop,gimple * stmt,tree ref)3206 infer_loop_bounds_from_ref (struct loop *loop, gimple *stmt, tree ref)
3207 {
3208   struct ilb_data data;
3209 
3210   data.loop = loop;
3211   data.stmt = stmt;
3212   for_each_index (&ref, idx_infer_loop_bounds, &data);
3213 }
3214 
3215 /* Determine information about number of iterations of a LOOP from the way
3216    arrays are used in STMT.  RELIABLE is true if STMT is guaranteed to be
3217    executed in every iteration of LOOP.  */
3218 
3219 static void
infer_loop_bounds_from_array(struct loop * loop,gimple * stmt)3220 infer_loop_bounds_from_array (struct loop *loop, gimple *stmt)
3221 {
3222   if (is_gimple_assign (stmt))
3223     {
3224       tree op0 = gimple_assign_lhs (stmt);
3225       tree op1 = gimple_assign_rhs1 (stmt);
3226 
3227       /* For each memory access, analyze its access function
3228 	 and record a bound on the loop iteration domain.  */
3229       if (REFERENCE_CLASS_P (op0))
3230 	infer_loop_bounds_from_ref (loop, stmt, op0);
3231 
3232       if (REFERENCE_CLASS_P (op1))
3233 	infer_loop_bounds_from_ref (loop, stmt, op1);
3234     }
3235   else if (is_gimple_call (stmt))
3236     {
3237       tree arg, lhs;
3238       unsigned i, n = gimple_call_num_args (stmt);
3239 
3240       lhs = gimple_call_lhs (stmt);
3241       if (lhs && REFERENCE_CLASS_P (lhs))
3242 	infer_loop_bounds_from_ref (loop, stmt, lhs);
3243 
3244       for (i = 0; i < n; i++)
3245 	{
3246 	  arg = gimple_call_arg (stmt, i);
3247 	  if (REFERENCE_CLASS_P (arg))
3248 	    infer_loop_bounds_from_ref (loop, stmt, arg);
3249 	}
3250     }
3251 }
3252 
3253 /* Determine information about number of iterations of a LOOP from the fact
3254    that pointer arithmetics in STMT does not overflow.  */
3255 
3256 static void
infer_loop_bounds_from_pointer_arith(struct loop * loop,gimple * stmt)3257 infer_loop_bounds_from_pointer_arith (struct loop *loop, gimple *stmt)
3258 {
3259   tree def, base, step, scev, type, low, high;
3260   tree var, ptr;
3261 
3262   if (!is_gimple_assign (stmt)
3263       || gimple_assign_rhs_code (stmt) != POINTER_PLUS_EXPR)
3264     return;
3265 
3266   def = gimple_assign_lhs (stmt);
3267   if (TREE_CODE (def) != SSA_NAME)
3268     return;
3269 
3270   type = TREE_TYPE (def);
3271   if (!nowrap_type_p (type))
3272     return;
3273 
3274   ptr = gimple_assign_rhs1 (stmt);
3275   if (!expr_invariant_in_loop_p (loop, ptr))
3276     return;
3277 
3278   var = gimple_assign_rhs2 (stmt);
3279   if (TYPE_PRECISION (type) != TYPE_PRECISION (TREE_TYPE (var)))
3280     return;
3281 
3282   scev = instantiate_parameters (loop, analyze_scalar_evolution (loop, def));
3283   if (chrec_contains_undetermined (scev))
3284     return;
3285 
3286   base = initial_condition_in_loop_num (scev, loop->num);
3287   step = evolution_part_in_loop_num (scev, loop->num);
3288 
3289   if (!base || !step
3290       || TREE_CODE (step) != INTEGER_CST
3291       || tree_contains_chrecs (base, NULL)
3292       || chrec_contains_symbols_defined_in_loop (base, loop->num))
3293     return;
3294 
3295   low = lower_bound_in_type (type, type);
3296   high = upper_bound_in_type (type, type);
3297 
3298   /* In C, pointer arithmetic p + 1 cannot use a NULL pointer, and p - 1 cannot
3299      produce a NULL pointer.  The contrary would mean NULL points to an object,
3300      while NULL is supposed to compare unequal with the address of all objects.
3301      Furthermore, p + 1 cannot produce a NULL pointer and p - 1 cannot use a
3302      NULL pointer since that would mean wrapping, which we assume here not to
3303      happen.  So, we can exclude NULL from the valid range of pointer
3304      arithmetic.  */
3305   if (flag_delete_null_pointer_checks && int_cst_value (low) == 0)
3306     low = build_int_cstu (TREE_TYPE (low), TYPE_ALIGN_UNIT (TREE_TYPE (type)));
3307 
3308   record_nonwrapping_iv (loop, base, step, stmt, low, high, false, true);
3309 }
3310 
3311 /* Determine information about number of iterations of a LOOP from the fact
3312    that signed arithmetics in STMT does not overflow.  */
3313 
3314 static void
infer_loop_bounds_from_signedness(struct loop * loop,gimple * stmt)3315 infer_loop_bounds_from_signedness (struct loop *loop, gimple *stmt)
3316 {
3317   tree def, base, step, scev, type, low, high;
3318 
3319   if (gimple_code (stmt) != GIMPLE_ASSIGN)
3320     return;
3321 
3322   def = gimple_assign_lhs (stmt);
3323 
3324   if (TREE_CODE (def) != SSA_NAME)
3325     return;
3326 
3327   type = TREE_TYPE (def);
3328   if (!INTEGRAL_TYPE_P (type)
3329       || !TYPE_OVERFLOW_UNDEFINED (type))
3330     return;
3331 
3332   scev = instantiate_parameters (loop, analyze_scalar_evolution (loop, def));
3333   if (chrec_contains_undetermined (scev))
3334     return;
3335 
3336   base = initial_condition_in_loop_num (scev, loop->num);
3337   step = evolution_part_in_loop_num (scev, loop->num);
3338 
3339   if (!base || !step
3340       || TREE_CODE (step) != INTEGER_CST
3341       || tree_contains_chrecs (base, NULL)
3342       || chrec_contains_symbols_defined_in_loop (base, loop->num))
3343     return;
3344 
3345   low = lower_bound_in_type (type, type);
3346   high = upper_bound_in_type (type, type);
3347 
3348   record_nonwrapping_iv (loop, base, step, stmt, low, high, false, true);
3349 }
3350 
3351 /* The following analyzers are extracting informations on the bounds
3352    of LOOP from the following undefined behaviors:
3353 
3354    - data references should not access elements over the statically
3355      allocated size,
3356 
3357    - signed variables should not overflow when flag_wrapv is not set.
3358 */
3359 
3360 static void
infer_loop_bounds_from_undefined(struct loop * loop)3361 infer_loop_bounds_from_undefined (struct loop *loop)
3362 {
3363   unsigned i;
3364   basic_block *bbs;
3365   gimple_stmt_iterator bsi;
3366   basic_block bb;
3367   bool reliable;
3368 
3369   bbs = get_loop_body (loop);
3370 
3371   for (i = 0; i < loop->num_nodes; i++)
3372     {
3373       bb = bbs[i];
3374 
3375       /* If BB is not executed in each iteration of the loop, we cannot
3376 	 use the operations in it to infer reliable upper bound on the
3377 	 # of iterations of the loop.  However, we can use it as a guess.
3378 	 Reliable guesses come only from array bounds.  */
3379       reliable = dominated_by_p (CDI_DOMINATORS, loop->latch, bb);
3380 
3381       for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
3382 	{
3383 	  gimple *stmt = gsi_stmt (bsi);
3384 
3385 	  infer_loop_bounds_from_array (loop, stmt);
3386 
3387 	  if (reliable)
3388             {
3389               infer_loop_bounds_from_signedness (loop, stmt);
3390               infer_loop_bounds_from_pointer_arith (loop, stmt);
3391             }
3392   	}
3393 
3394     }
3395 
3396   free (bbs);
3397 }
3398 
3399 /* Compare wide ints, callback for qsort.  */
3400 
3401 static int
wide_int_cmp(const void * p1,const void * p2)3402 wide_int_cmp (const void *p1, const void *p2)
3403 {
3404   const widest_int *d1 = (const widest_int *) p1;
3405   const widest_int *d2 = (const widest_int *) p2;
3406   return wi::cmpu (*d1, *d2);
3407 }
3408 
3409 /* Return index of BOUND in BOUNDS array sorted in increasing order.
3410    Lookup by binary search.  */
3411 
3412 static int
bound_index(vec<widest_int> bounds,const widest_int & bound)3413 bound_index (vec<widest_int> bounds, const widest_int &bound)
3414 {
3415   unsigned int end = bounds.length ();
3416   unsigned int begin = 0;
3417 
3418   /* Find a matching index by means of a binary search.  */
3419   while (begin != end)
3420     {
3421       unsigned int middle = (begin + end) / 2;
3422       widest_int index = bounds[middle];
3423 
3424       if (index == bound)
3425 	return middle;
3426       else if (wi::ltu_p (index, bound))
3427 	begin = middle + 1;
3428       else
3429 	end = middle;
3430     }
3431   gcc_unreachable ();
3432 }
3433 
3434 /* We recorded loop bounds only for statements dominating loop latch (and thus
3435    executed each loop iteration).  If there are any bounds on statements not
3436    dominating the loop latch we can improve the estimate by walking the loop
3437    body and seeing if every path from loop header to loop latch contains
3438    some bounded statement.  */
3439 
3440 static void
discover_iteration_bound_by_body_walk(struct loop * loop)3441 discover_iteration_bound_by_body_walk (struct loop *loop)
3442 {
3443   struct nb_iter_bound *elt;
3444   vec<widest_int> bounds = vNULL;
3445   vec<vec<basic_block> > queues = vNULL;
3446   vec<basic_block> queue = vNULL;
3447   ptrdiff_t queue_index;
3448   ptrdiff_t latch_index = 0;
3449 
3450   /* Discover what bounds may interest us.  */
3451   for (elt = loop->bounds; elt; elt = elt->next)
3452     {
3453       widest_int bound = elt->bound;
3454 
3455       /* Exit terminates loop at given iteration, while non-exits produce undefined
3456 	 effect on the next iteration.  */
3457       if (!elt->is_exit)
3458 	{
3459 	  bound += 1;
3460 	  /* If an overflow occurred, ignore the result.  */
3461 	  if (bound == 0)
3462 	    continue;
3463 	}
3464 
3465       if (!loop->any_upper_bound
3466 	  || wi::ltu_p (bound, loop->nb_iterations_upper_bound))
3467         bounds.safe_push (bound);
3468     }
3469 
3470   /* Exit early if there is nothing to do.  */
3471   if (!bounds.exists ())
3472     return;
3473 
3474   if (dump_file && (dump_flags & TDF_DETAILS))
3475     fprintf (dump_file, " Trying to walk loop body to reduce the bound.\n");
3476 
3477   /* Sort the bounds in decreasing order.  */
3478   bounds.qsort (wide_int_cmp);
3479 
3480   /* For every basic block record the lowest bound that is guaranteed to
3481      terminate the loop.  */
3482 
3483   hash_map<basic_block, ptrdiff_t> bb_bounds;
3484   for (elt = loop->bounds; elt; elt = elt->next)
3485     {
3486       widest_int bound = elt->bound;
3487       if (!elt->is_exit)
3488 	{
3489 	  bound += 1;
3490 	  /* If an overflow occurred, ignore the result.  */
3491 	  if (bound == 0)
3492 	    continue;
3493 	}
3494 
3495       if (!loop->any_upper_bound
3496 	  || wi::ltu_p (bound, loop->nb_iterations_upper_bound))
3497 	{
3498 	  ptrdiff_t index = bound_index (bounds, bound);
3499 	  ptrdiff_t *entry = bb_bounds.get (gimple_bb (elt->stmt));
3500 	  if (!entry)
3501 	    bb_bounds.put (gimple_bb (elt->stmt), index);
3502 	  else if ((ptrdiff_t)*entry > index)
3503 	    *entry = index;
3504 	}
3505     }
3506 
3507   hash_map<basic_block, ptrdiff_t> block_priority;
3508 
3509   /* Perform shortest path discovery loop->header ... loop->latch.
3510 
3511      The "distance" is given by the smallest loop bound of basic block
3512      present in the path and we look for path with largest smallest bound
3513      on it.
3514 
3515      To avoid the need for fibonacci heap on double ints we simply compress
3516      double ints into indexes to BOUNDS array and then represent the queue
3517      as arrays of queues for every index.
3518      Index of BOUNDS.length() means that the execution of given BB has
3519      no bounds determined.
3520 
3521      VISITED is a pointer map translating basic block into smallest index
3522      it was inserted into the priority queue with.  */
3523   latch_index = -1;
3524 
3525   /* Start walk in loop header with index set to infinite bound.  */
3526   queue_index = bounds.length ();
3527   queues.safe_grow_cleared (queue_index + 1);
3528   queue.safe_push (loop->header);
3529   queues[queue_index] = queue;
3530   block_priority.put (loop->header, queue_index);
3531 
3532   for (; queue_index >= 0; queue_index--)
3533     {
3534       if (latch_index < queue_index)
3535 	{
3536 	  while (queues[queue_index].length ())
3537 	    {
3538 	      basic_block bb;
3539 	      ptrdiff_t bound_index = queue_index;
3540               edge e;
3541               edge_iterator ei;
3542 
3543 	      queue = queues[queue_index];
3544 	      bb = queue.pop ();
3545 
3546 	      /* OK, we later inserted the BB with lower priority, skip it.  */
3547 	      if (*block_priority.get (bb) > queue_index)
3548 		continue;
3549 
3550 	      /* See if we can improve the bound.  */
3551 	      ptrdiff_t *entry = bb_bounds.get (bb);
3552 	      if (entry && *entry < bound_index)
3553 		bound_index = *entry;
3554 
3555 	      /* Insert succesors into the queue, watch for latch edge
3556 		 and record greatest index we saw.  */
3557 	      FOR_EACH_EDGE (e, ei, bb->succs)
3558 		{
3559 		  bool insert = false;
3560 
3561 		  if (loop_exit_edge_p (loop, e))
3562 		    continue;
3563 
3564 		  if (e == loop_latch_edge (loop)
3565 		      && latch_index < bound_index)
3566 		    latch_index = bound_index;
3567 		  else if (!(entry = block_priority.get (e->dest)))
3568 		    {
3569 		      insert = true;
3570 		      block_priority.put (e->dest, bound_index);
3571 		    }
3572 		  else if (*entry < bound_index)
3573 		    {
3574 		      insert = true;
3575 		      *entry = bound_index;
3576 		    }
3577 
3578 		  if (insert)
3579 		    queues[bound_index].safe_push (e->dest);
3580 		}
3581 	    }
3582 	}
3583       queues[queue_index].release ();
3584     }
3585 
3586   gcc_assert (latch_index >= 0);
3587   if ((unsigned)latch_index < bounds.length ())
3588     {
3589       if (dump_file && (dump_flags & TDF_DETAILS))
3590 	{
3591 	  fprintf (dump_file, "Found better loop bound ");
3592 	  print_decu (bounds[latch_index], dump_file);
3593 	  fprintf (dump_file, "\n");
3594 	}
3595       record_niter_bound (loop, bounds[latch_index], false, true);
3596     }
3597 
3598   queues.release ();
3599   bounds.release ();
3600 }
3601 
3602 /* See if every path cross the loop goes through a statement that is known
3603    to not execute at the last iteration. In that case we can decrese iteration
3604    count by 1.  */
3605 
3606 static void
maybe_lower_iteration_bound(struct loop * loop)3607 maybe_lower_iteration_bound (struct loop *loop)
3608 {
3609   hash_set<gimple *> *not_executed_last_iteration = NULL;
3610   struct nb_iter_bound *elt;
3611   bool found_exit = false;
3612   vec<basic_block> queue = vNULL;
3613   bitmap visited;
3614 
3615   /* Collect all statements with interesting (i.e. lower than
3616      nb_iterations_upper_bound) bound on them.
3617 
3618      TODO: Due to the way record_estimate choose estimates to store, the bounds
3619      will be always nb_iterations_upper_bound-1.  We can change this to record
3620      also statements not dominating the loop latch and update the walk bellow
3621      to the shortest path algorthm.  */
3622   for (elt = loop->bounds; elt; elt = elt->next)
3623     {
3624       if (!elt->is_exit
3625 	  && wi::ltu_p (elt->bound, loop->nb_iterations_upper_bound))
3626 	{
3627 	  if (!not_executed_last_iteration)
3628 	    not_executed_last_iteration = new hash_set<gimple *>;
3629 	  not_executed_last_iteration->add (elt->stmt);
3630 	}
3631     }
3632   if (!not_executed_last_iteration)
3633     return;
3634 
3635   /* Start DFS walk in the loop header and see if we can reach the
3636      loop latch or any of the exits (including statements with side
3637      effects that may terminate the loop otherwise) without visiting
3638      any of the statements known to have undefined effect on the last
3639      iteration.  */
3640   queue.safe_push (loop->header);
3641   visited = BITMAP_ALLOC (NULL);
3642   bitmap_set_bit (visited, loop->header->index);
3643   found_exit = false;
3644 
3645   do
3646     {
3647       basic_block bb = queue.pop ();
3648       gimple_stmt_iterator gsi;
3649       bool stmt_found = false;
3650 
3651       /* Loop for possible exits and statements bounding the execution.  */
3652       for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3653 	{
3654 	  gimple *stmt = gsi_stmt (gsi);
3655 	  if (not_executed_last_iteration->contains (stmt))
3656 	    {
3657 	      stmt_found = true;
3658 	      break;
3659 	    }
3660 	  if (gimple_has_side_effects (stmt))
3661 	    {
3662 	      found_exit = true;
3663 	      break;
3664 	    }
3665 	}
3666       if (found_exit)
3667 	break;
3668 
3669       /* If no bounding statement is found, continue the walk.  */
3670       if (!stmt_found)
3671 	{
3672           edge e;
3673           edge_iterator ei;
3674 
3675           FOR_EACH_EDGE (e, ei, bb->succs)
3676 	    {
3677 	      if (loop_exit_edge_p (loop, e)
3678 		  || e == loop_latch_edge (loop))
3679 		{
3680 		  found_exit = true;
3681 		  break;
3682 		}
3683 	      if (bitmap_set_bit (visited, e->dest->index))
3684 		queue.safe_push (e->dest);
3685 	    }
3686 	}
3687     }
3688   while (queue.length () && !found_exit);
3689 
3690   /* If every path through the loop reach bounding statement before exit,
3691      then we know the last iteration of the loop will have undefined effect
3692      and we can decrease number of iterations.  */
3693 
3694   if (!found_exit)
3695     {
3696       if (dump_file && (dump_flags & TDF_DETAILS))
3697 	fprintf (dump_file, "Reducing loop iteration estimate by 1; "
3698 		 "undefined statement must be executed at the last iteration.\n");
3699       record_niter_bound (loop, loop->nb_iterations_upper_bound - 1,
3700 			  false, true);
3701     }
3702 
3703   BITMAP_FREE (visited);
3704   queue.release ();
3705   delete not_executed_last_iteration;
3706 }
3707 
3708 /* Records estimates on numbers of iterations of LOOP.  If USE_UNDEFINED_P
3709    is true also use estimates derived from undefined behavior.  */
3710 
3711 static void
estimate_numbers_of_iterations_loop(struct loop * loop)3712 estimate_numbers_of_iterations_loop (struct loop *loop)
3713 {
3714   vec<edge> exits;
3715   tree niter, type;
3716   unsigned i;
3717   struct tree_niter_desc niter_desc;
3718   edge ex;
3719   widest_int bound;
3720   edge likely_exit;
3721 
3722   /* Give up if we already have tried to compute an estimation.  */
3723   if (loop->estimate_state != EST_NOT_COMPUTED)
3724     return;
3725 
3726   loop->estimate_state = EST_AVAILABLE;
3727   /* Force estimate compuation but leave any existing upper bound in place.  */
3728   loop->any_estimate = false;
3729 
3730   /* Ensure that loop->nb_iterations is computed if possible.  If it turns out
3731      to be constant, we avoid undefined behavior implied bounds and instead
3732      diagnose those loops with -Waggressive-loop-optimizations.  */
3733   number_of_latch_executions (loop);
3734 
3735   exits = get_loop_exit_edges (loop);
3736   likely_exit = single_likely_exit (loop);
3737   FOR_EACH_VEC_ELT (exits, i, ex)
3738     {
3739       if (!number_of_iterations_exit (loop, ex, &niter_desc, false, false))
3740 	continue;
3741 
3742       niter = niter_desc.niter;
3743       type = TREE_TYPE (niter);
3744       if (TREE_CODE (niter_desc.may_be_zero) != INTEGER_CST)
3745 	niter = build3 (COND_EXPR, type, niter_desc.may_be_zero,
3746 			build_int_cst (type, 0),
3747 			niter);
3748       record_estimate (loop, niter, niter_desc.max,
3749 		       last_stmt (ex->src),
3750 		       true, ex == likely_exit, true);
3751       record_control_iv (loop, &niter_desc);
3752     }
3753   exits.release ();
3754 
3755   if (flag_aggressive_loop_optimizations)
3756     infer_loop_bounds_from_undefined (loop);
3757 
3758   discover_iteration_bound_by_body_walk (loop);
3759 
3760   maybe_lower_iteration_bound (loop);
3761 
3762   /* If we have a measured profile, use it to estimate the number of
3763      iterations.  */
3764   if (loop->header->count != 0)
3765     {
3766       gcov_type nit = expected_loop_iterations_unbounded (loop) + 1;
3767       bound = gcov_type_to_wide_int (nit);
3768       record_niter_bound (loop, bound, true, false);
3769     }
3770 
3771   /* If we know the exact number of iterations of this loop, try to
3772      not break code with undefined behavior by not recording smaller
3773      maximum number of iterations.  */
3774   if (loop->nb_iterations
3775       && TREE_CODE (loop->nb_iterations) == INTEGER_CST)
3776     {
3777       loop->any_upper_bound = true;
3778       loop->nb_iterations_upper_bound = wi::to_widest (loop->nb_iterations);
3779     }
3780 }
3781 
3782 /* Sets NIT to the estimated number of executions of the latch of the
3783    LOOP.  If CONSERVATIVE is true, we must be sure that NIT is at least as
3784    large as the number of iterations.  If we have no reliable estimate,
3785    the function returns false, otherwise returns true.  */
3786 
3787 bool
estimated_loop_iterations(struct loop * loop,widest_int * nit)3788 estimated_loop_iterations (struct loop *loop, widest_int *nit)
3789 {
3790   /* When SCEV information is available, try to update loop iterations
3791      estimate.  Otherwise just return whatever we recorded earlier.  */
3792   if (scev_initialized_p ())
3793     estimate_numbers_of_iterations_loop (loop);
3794 
3795   return (get_estimated_loop_iterations (loop, nit));
3796 }
3797 
3798 /* Similar to estimated_loop_iterations, but returns the estimate only
3799    if it fits to HOST_WIDE_INT.  If this is not the case, or the estimate
3800    on the number of iterations of LOOP could not be derived, returns -1.  */
3801 
3802 HOST_WIDE_INT
estimated_loop_iterations_int(struct loop * loop)3803 estimated_loop_iterations_int (struct loop *loop)
3804 {
3805   widest_int nit;
3806   HOST_WIDE_INT hwi_nit;
3807 
3808   if (!estimated_loop_iterations (loop, &nit))
3809     return -1;
3810 
3811   if (!wi::fits_shwi_p (nit))
3812     return -1;
3813   hwi_nit = nit.to_shwi ();
3814 
3815   return hwi_nit < 0 ? -1 : hwi_nit;
3816 }
3817 
3818 
3819 /* Sets NIT to an upper bound for the maximum number of executions of the
3820    latch of the LOOP.  If we have no reliable estimate, the function returns
3821    false, otherwise returns true.  */
3822 
3823 bool
max_loop_iterations(struct loop * loop,widest_int * nit)3824 max_loop_iterations (struct loop *loop, widest_int *nit)
3825 {
3826   /* When SCEV information is available, try to update loop iterations
3827      estimate.  Otherwise just return whatever we recorded earlier.  */
3828   if (scev_initialized_p ())
3829     estimate_numbers_of_iterations_loop (loop);
3830 
3831   return get_max_loop_iterations (loop, nit);
3832 }
3833 
3834 /* Similar to max_loop_iterations, but returns the estimate only
3835    if it fits to HOST_WIDE_INT.  If this is not the case, or the estimate
3836    on the number of iterations of LOOP could not be derived, returns -1.  */
3837 
3838 HOST_WIDE_INT
max_loop_iterations_int(struct loop * loop)3839 max_loop_iterations_int (struct loop *loop)
3840 {
3841   widest_int nit;
3842   HOST_WIDE_INT hwi_nit;
3843 
3844   if (!max_loop_iterations (loop, &nit))
3845     return -1;
3846 
3847   if (!wi::fits_shwi_p (nit))
3848     return -1;
3849   hwi_nit = nit.to_shwi ();
3850 
3851   return hwi_nit < 0 ? -1 : hwi_nit;
3852 }
3853 
3854 /* Returns an estimate for the number of executions of statements
3855    in the LOOP.  For statements before the loop exit, this exceeds
3856    the number of execution of the latch by one.  */
3857 
3858 HOST_WIDE_INT
estimated_stmt_executions_int(struct loop * loop)3859 estimated_stmt_executions_int (struct loop *loop)
3860 {
3861   HOST_WIDE_INT nit = estimated_loop_iterations_int (loop);
3862   HOST_WIDE_INT snit;
3863 
3864   if (nit == -1)
3865     return -1;
3866 
3867   snit = (HOST_WIDE_INT) ((unsigned HOST_WIDE_INT) nit + 1);
3868 
3869   /* If the computation overflows, return -1.  */
3870   return snit < 0 ? -1 : snit;
3871 }
3872 
3873 /* Sets NIT to the estimated maximum number of executions of the latch of the
3874    LOOP, plus one.  If we have no reliable estimate, the function returns
3875    false, otherwise returns true.  */
3876 
3877 bool
max_stmt_executions(struct loop * loop,widest_int * nit)3878 max_stmt_executions (struct loop *loop, widest_int *nit)
3879 {
3880   widest_int nit_minus_one;
3881 
3882   if (!max_loop_iterations (loop, nit))
3883     return false;
3884 
3885   nit_minus_one = *nit;
3886 
3887   *nit += 1;
3888 
3889   return wi::gtu_p (*nit, nit_minus_one);
3890 }
3891 
3892 /* Sets NIT to the estimated number of executions of the latch of the
3893    LOOP, plus one.  If we have no reliable estimate, the function returns
3894    false, otherwise returns true.  */
3895 
3896 bool
estimated_stmt_executions(struct loop * loop,widest_int * nit)3897 estimated_stmt_executions (struct loop *loop, widest_int *nit)
3898 {
3899   widest_int nit_minus_one;
3900 
3901   if (!estimated_loop_iterations (loop, nit))
3902     return false;
3903 
3904   nit_minus_one = *nit;
3905 
3906   *nit += 1;
3907 
3908   return wi::gtu_p (*nit, nit_minus_one);
3909 }
3910 
3911 /* Records estimates on numbers of iterations of loops.  */
3912 
3913 void
estimate_numbers_of_iterations(void)3914 estimate_numbers_of_iterations (void)
3915 {
3916   struct loop *loop;
3917 
3918   /* We don't want to issue signed overflow warnings while getting
3919      loop iteration estimates.  */
3920   fold_defer_overflow_warnings ();
3921 
3922   FOR_EACH_LOOP (loop, 0)
3923     {
3924       estimate_numbers_of_iterations_loop (loop);
3925     }
3926 
3927   fold_undefer_and_ignore_overflow_warnings ();
3928 }
3929 
3930 /* Returns true if statement S1 dominates statement S2.  */
3931 
3932 bool
stmt_dominates_stmt_p(gimple * s1,gimple * s2)3933 stmt_dominates_stmt_p (gimple *s1, gimple *s2)
3934 {
3935   basic_block bb1 = gimple_bb (s1), bb2 = gimple_bb (s2);
3936 
3937   if (!bb1
3938       || s1 == s2)
3939     return true;
3940 
3941   if (bb1 == bb2)
3942     {
3943       gimple_stmt_iterator bsi;
3944 
3945       if (gimple_code (s2) == GIMPLE_PHI)
3946 	return false;
3947 
3948       if (gimple_code (s1) == GIMPLE_PHI)
3949 	return true;
3950 
3951       for (bsi = gsi_start_bb (bb1); gsi_stmt (bsi) != s2; gsi_next (&bsi))
3952 	if (gsi_stmt (bsi) == s1)
3953 	  return true;
3954 
3955       return false;
3956     }
3957 
3958   return dominated_by_p (CDI_DOMINATORS, bb2, bb1);
3959 }
3960 
3961 /* Returns true when we can prove that the number of executions of
3962    STMT in the loop is at most NITER, according to the bound on
3963    the number of executions of the statement NITER_BOUND->stmt recorded in
3964    NITER_BOUND and fact that NITER_BOUND->stmt dominate STMT.
3965 
3966    ??? This code can become quite a CPU hog - we can have many bounds,
3967    and large basic block forcing stmt_dominates_stmt_p to be queried
3968    many times on a large basic blocks, so the whole thing is O(n^2)
3969    for scev_probably_wraps_p invocation (that can be done n times).
3970 
3971    It would make more sense (and give better answers) to remember BB
3972    bounds computed by discover_iteration_bound_by_body_walk.  */
3973 
3974 static bool
n_of_executions_at_most(gimple * stmt,struct nb_iter_bound * niter_bound,tree niter)3975 n_of_executions_at_most (gimple *stmt,
3976 			 struct nb_iter_bound *niter_bound,
3977 			 tree niter)
3978 {
3979   widest_int bound = niter_bound->bound;
3980   tree nit_type = TREE_TYPE (niter), e;
3981   enum tree_code cmp;
3982 
3983   gcc_assert (TYPE_UNSIGNED (nit_type));
3984 
3985   /* If the bound does not even fit into NIT_TYPE, it cannot tell us that
3986      the number of iterations is small.  */
3987   if (!wi::fits_to_tree_p (bound, nit_type))
3988     return false;
3989 
3990   /* We know that NITER_BOUND->stmt is executed at most NITER_BOUND->bound + 1
3991      times.  This means that:
3992 
3993      -- if NITER_BOUND->is_exit is true, then everything after
3994 	it at most NITER_BOUND->bound times.
3995 
3996      -- If NITER_BOUND->is_exit is false, then if we can prove that when STMT
3997 	is executed, then NITER_BOUND->stmt is executed as well in the same
3998 	iteration then STMT is executed at most NITER_BOUND->bound + 1 times.
3999 
4000 	If we can determine that NITER_BOUND->stmt is always executed
4001 	after STMT, then STMT is executed at most NITER_BOUND->bound + 2 times.
4002 	We conclude that if both statements belong to the same
4003 	basic block and STMT is before NITER_BOUND->stmt and there are no
4004 	statements with side effects in between.  */
4005 
4006   if (niter_bound->is_exit)
4007     {
4008       if (stmt == niter_bound->stmt
4009 	  || !stmt_dominates_stmt_p (niter_bound->stmt, stmt))
4010 	return false;
4011       cmp = GE_EXPR;
4012     }
4013   else
4014     {
4015       if (!stmt_dominates_stmt_p (niter_bound->stmt, stmt))
4016 	{
4017           gimple_stmt_iterator bsi;
4018 	  if (gimple_bb (stmt) != gimple_bb (niter_bound->stmt)
4019 	      || gimple_code (stmt) == GIMPLE_PHI
4020 	      || gimple_code (niter_bound->stmt) == GIMPLE_PHI)
4021 	    return false;
4022 
4023 	  /* By stmt_dominates_stmt_p we already know that STMT appears
4024 	     before NITER_BOUND->STMT.  Still need to test that the loop
4025 	     can not be terinated by a side effect in between.  */
4026 	  for (bsi = gsi_for_stmt (stmt); gsi_stmt (bsi) != niter_bound->stmt;
4027 	       gsi_next (&bsi))
4028 	    if (gimple_has_side_effects (gsi_stmt (bsi)))
4029 	       return false;
4030 	  bound += 1;
4031 	  if (bound == 0
4032 	      || !wi::fits_to_tree_p (bound, nit_type))
4033 	    return false;
4034 	}
4035       cmp = GT_EXPR;
4036     }
4037 
4038   e = fold_binary (cmp, boolean_type_node,
4039 		   niter, wide_int_to_tree (nit_type, bound));
4040   return e && integer_nonzerop (e);
4041 }
4042 
4043 /* Returns true if the arithmetics in TYPE can be assumed not to wrap.  */
4044 
4045 bool
nowrap_type_p(tree type)4046 nowrap_type_p (tree type)
4047 {
4048   if (INTEGRAL_TYPE_P (type)
4049       && TYPE_OVERFLOW_UNDEFINED (type))
4050     return true;
4051 
4052   if (POINTER_TYPE_P (type))
4053     return true;
4054 
4055   return false;
4056 }
4057 
4058 /* Return true if we can prove LOOP is exited before evolution of induction
4059    variabled {BASE, STEP} overflows with respect to its type bound.  */
4060 
4061 static bool
loop_exits_before_overflow(tree base,tree step,gimple * at_stmt,struct loop * loop)4062 loop_exits_before_overflow (tree base, tree step,
4063 			    gimple *at_stmt, struct loop *loop)
4064 {
4065   widest_int niter;
4066   struct control_iv *civ;
4067   struct nb_iter_bound *bound;
4068   tree e, delta, step_abs, unsigned_base;
4069   tree type = TREE_TYPE (step);
4070   tree unsigned_type, valid_niter;
4071 
4072   /* Don't issue signed overflow warnings.  */
4073   fold_defer_overflow_warnings ();
4074 
4075   /* Compute the number of iterations before we reach the bound of the
4076      type, and verify that the loop is exited before this occurs.  */
4077   unsigned_type = unsigned_type_for (type);
4078   unsigned_base = fold_convert (unsigned_type, base);
4079 
4080   if (tree_int_cst_sign_bit (step))
4081     {
4082       tree extreme = fold_convert (unsigned_type,
4083 				   lower_bound_in_type (type, type));
4084       delta = fold_build2 (MINUS_EXPR, unsigned_type, unsigned_base, extreme);
4085       step_abs = fold_build1 (NEGATE_EXPR, unsigned_type,
4086 			      fold_convert (unsigned_type, step));
4087     }
4088   else
4089     {
4090       tree extreme = fold_convert (unsigned_type,
4091 				   upper_bound_in_type (type, type));
4092       delta = fold_build2 (MINUS_EXPR, unsigned_type, extreme, unsigned_base);
4093       step_abs = fold_convert (unsigned_type, step);
4094     }
4095 
4096   valid_niter = fold_build2 (FLOOR_DIV_EXPR, unsigned_type, delta, step_abs);
4097 
4098   estimate_numbers_of_iterations_loop (loop);
4099 
4100   if (max_loop_iterations (loop, &niter)
4101       && wi::fits_to_tree_p (niter, TREE_TYPE (valid_niter))
4102       && (e = fold_binary (GT_EXPR, boolean_type_node, valid_niter,
4103 			   wide_int_to_tree (TREE_TYPE (valid_niter),
4104 					     niter))) != NULL
4105       && integer_nonzerop (e))
4106     {
4107       fold_undefer_and_ignore_overflow_warnings ();
4108       return true;
4109     }
4110   if (at_stmt)
4111     for (bound = loop->bounds; bound; bound = bound->next)
4112       {
4113 	if (n_of_executions_at_most (at_stmt, bound, valid_niter))
4114 	  {
4115 	    fold_undefer_and_ignore_overflow_warnings ();
4116 	    return true;
4117 	  }
4118       }
4119   fold_undefer_and_ignore_overflow_warnings ();
4120 
4121   /* Try to prove loop is exited before {base, step} overflows with the
4122      help of analyzed loop control IV.  This is done only for IVs with
4123      constant step because otherwise we don't have the information.  */
4124   if (TREE_CODE (step) == INTEGER_CST)
4125     {
4126       tree stop = (TREE_CODE (base) == SSA_NAME) ? base : NULL;
4127 
4128       for (civ = loop->control_ivs; civ; civ = civ->next)
4129 	{
4130 	  enum tree_code code;
4131 	  tree stepped, extreme, civ_type = TREE_TYPE (civ->step);
4132 
4133 	  /* Have to consider type difference because operand_equal_p ignores
4134 	     that for constants.  */
4135 	  if (TYPE_UNSIGNED (type) != TYPE_UNSIGNED (civ_type)
4136 	      || element_precision (type) != element_precision (civ_type))
4137 	    continue;
4138 
4139 	  /* Only consider control IV with same step.  */
4140 	  if (!operand_equal_p (step, civ->step, 0))
4141 	    continue;
4142 
4143 	  /* Done proving if this is a no-overflow control IV.  */
4144 	  if (operand_equal_p (base, civ->base, 0))
4145 	    return true;
4146 
4147 	  /* If this is a before stepping control IV, in other words, we have
4148 
4149 	       {civ_base, step} = {base + step, step}
4150 
4151 	     Because civ {base + step, step} doesn't overflow during loop
4152 	     iterations, {base, step} will not overflow if we can prove the
4153 	     operation "base + step" does not overflow.  Specifically, we try
4154 	     to prove below conditions are satisfied:
4155 
4156 	       base <= UPPER_BOUND (type) - step  ;;step > 0
4157 	       base >= LOWER_BOUND (type) - step  ;;step < 0
4158 
4159 	     by proving the reverse conditions are false using loop's initial
4160 	     condition.  */
4161 	  if (POINTER_TYPE_P (TREE_TYPE (base)))
4162 	    code = POINTER_PLUS_EXPR;
4163 	  else
4164 	    code = PLUS_EXPR;
4165 
4166 	  stepped = fold_build2 (code, TREE_TYPE (base), base, step);
4167 	  if (operand_equal_p (stepped, civ->base, 0))
4168 	    {
4169 	      if (tree_int_cst_sign_bit (step))
4170 		{
4171 		  code = LT_EXPR;
4172 		  extreme = lower_bound_in_type (type, type);
4173 		}
4174 	      else
4175 		{
4176 		  code = GT_EXPR;
4177 		  extreme = upper_bound_in_type (type, type);
4178 		}
4179 	      extreme = fold_build2 (MINUS_EXPR, type, extreme, step);
4180 	      e = fold_build2 (code, boolean_type_node, base, extreme);
4181 	      e = simplify_using_initial_conditions (loop, e, stop);
4182 	      if (integer_zerop (e))
4183 		return true;
4184 	    }
4185         }
4186     }
4187 
4188   return false;
4189 }
4190 
4191 /* Return false only when the induction variable BASE + STEP * I is
4192    known to not overflow: i.e. when the number of iterations is small
4193    enough with respect to the step and initial condition in order to
4194    keep the evolution confined in TYPEs bounds.  Return true when the
4195    iv is known to overflow or when the property is not computable.
4196 
4197    USE_OVERFLOW_SEMANTICS is true if this function should assume that
4198    the rules for overflow of the given language apply (e.g., that signed
4199    arithmetics in C does not overflow).  */
4200 
4201 bool
scev_probably_wraps_p(tree base,tree step,gimple * at_stmt,struct loop * loop,bool use_overflow_semantics)4202 scev_probably_wraps_p (tree base, tree step,
4203 		       gimple *at_stmt, struct loop *loop,
4204 		       bool use_overflow_semantics)
4205 {
4206   /* FIXME: We really need something like
4207      http://gcc.gnu.org/ml/gcc-patches/2005-06/msg02025.html.
4208 
4209      We used to test for the following situation that frequently appears
4210      during address arithmetics:
4211 
4212        D.1621_13 = (long unsigned intD.4) D.1620_12;
4213        D.1622_14 = D.1621_13 * 8;
4214        D.1623_15 = (doubleD.29 *) D.1622_14;
4215 
4216      And derived that the sequence corresponding to D_14
4217      can be proved to not wrap because it is used for computing a
4218      memory access; however, this is not really the case -- for example,
4219      if D_12 = (unsigned char) [254,+,1], then D_14 has values
4220      2032, 2040, 0, 8, ..., but the code is still legal.  */
4221 
4222   if (chrec_contains_undetermined (base)
4223       || chrec_contains_undetermined (step))
4224     return true;
4225 
4226   if (integer_zerop (step))
4227     return false;
4228 
4229   /* If we can use the fact that signed and pointer arithmetics does not
4230      wrap, we are done.  */
4231   if (use_overflow_semantics && nowrap_type_p (TREE_TYPE (base)))
4232     return false;
4233 
4234   /* To be able to use estimates on number of iterations of the loop,
4235      we must have an upper bound on the absolute value of the step.  */
4236   if (TREE_CODE (step) != INTEGER_CST)
4237     return true;
4238 
4239   if (loop_exits_before_overflow (base, step, at_stmt, loop))
4240     return false;
4241 
4242   /* At this point we still don't have a proof that the iv does not
4243      overflow: give up.  */
4244   return true;
4245 }
4246 
4247 /* Frees the information on upper bounds on numbers of iterations of LOOP.  */
4248 
4249 void
free_numbers_of_iterations_estimates_loop(struct loop * loop)4250 free_numbers_of_iterations_estimates_loop (struct loop *loop)
4251 {
4252   struct control_iv *civ;
4253   struct nb_iter_bound *bound;
4254 
4255   loop->nb_iterations = NULL;
4256   loop->estimate_state = EST_NOT_COMPUTED;
4257   for (bound = loop->bounds; bound;)
4258     {
4259       struct nb_iter_bound *next = bound->next;
4260       ggc_free (bound);
4261       bound = next;
4262     }
4263   loop->bounds = NULL;
4264 
4265   for (civ = loop->control_ivs; civ;)
4266     {
4267       struct control_iv *next = civ->next;
4268       ggc_free (civ);
4269       civ = next;
4270     }
4271   loop->control_ivs = NULL;
4272 }
4273 
4274 /* Frees the information on upper bounds on numbers of iterations of loops.  */
4275 
4276 void
free_numbers_of_iterations_estimates(function * fn)4277 free_numbers_of_iterations_estimates (function *fn)
4278 {
4279   struct loop *loop;
4280 
4281   FOR_EACH_LOOP_FN (fn, loop, 0)
4282     {
4283       free_numbers_of_iterations_estimates_loop (loop);
4284     }
4285 }
4286 
4287 /* Substitute value VAL for ssa name NAME inside expressions held
4288    at LOOP.  */
4289 
4290 void
substitute_in_loop_info(struct loop * loop,tree name,tree val)4291 substitute_in_loop_info (struct loop *loop, tree name, tree val)
4292 {
4293   loop->nb_iterations = simplify_replace_tree (loop->nb_iterations, name, val);
4294 }
4295