1 /* Functions to determine/estimate number of iterations of a loop.
2    Copyright (C) 2004-2019 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
10 
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14 for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "rtl.h"
25 #include "tree.h"
26 #include "gimple.h"
27 #include "tree-pass.h"
28 #include "ssa.h"
29 #include "gimple-pretty-print.h"
30 #include "diagnostic-core.h"
31 #include "stor-layout.h"
32 #include "fold-const.h"
33 #include "calls.h"
34 #include "intl.h"
35 #include "gimplify.h"
36 #include "gimple-iterator.h"
37 #include "tree-cfg.h"
38 #include "tree-ssa-loop-ivopts.h"
39 #include "tree-ssa-loop-niter.h"
40 #include "tree-ssa-loop.h"
41 #include "cfgloop.h"
42 #include "tree-chrec.h"
43 #include "tree-scalar-evolution.h"
44 #include "params.h"
45 #include "tree-dfa.h"
46 
47 
48 /* The maximum number of dominator BBs we search for conditions
49    of loop header copies we use for simplifying a conditional
50    expression.  */
51 #define MAX_DOMINATORS_TO_WALK 8
52 
53 /*
54 
55    Analysis of number of iterations of an affine exit test.
56 
57 */
58 
59 /* Bounds on some value, BELOW <= X <= UP.  */
60 
61 struct bounds
62 {
63   mpz_t below, up;
64 };
65 
66 static bool number_of_iterations_popcount (loop_p loop, edge exit,
67 					   enum tree_code code,
68 					   struct tree_niter_desc *niter);
69 
70 
71 /* Splits expression EXPR to a variable part VAR and constant OFFSET.  */
72 
73 static void
split_to_var_and_offset(tree expr,tree * var,mpz_t offset)74 split_to_var_and_offset (tree expr, tree *var, mpz_t offset)
75 {
76   tree type = TREE_TYPE (expr);
77   tree op0, op1;
78   bool negate = false;
79 
80   *var = expr;
81   mpz_set_ui (offset, 0);
82 
83   switch (TREE_CODE (expr))
84     {
85     case MINUS_EXPR:
86       negate = true;
87       /* Fallthru.  */
88 
89     case PLUS_EXPR:
90     case POINTER_PLUS_EXPR:
91       op0 = TREE_OPERAND (expr, 0);
92       op1 = TREE_OPERAND (expr, 1);
93 
94       if (TREE_CODE (op1) != INTEGER_CST)
95 	break;
96 
97       *var = op0;
98       /* Always sign extend the offset.  */
99       wi::to_mpz (wi::to_wide (op1), offset, SIGNED);
100       if (negate)
101 	mpz_neg (offset, offset);
102       break;
103 
104     case INTEGER_CST:
105       *var = build_int_cst_type (type, 0);
106       wi::to_mpz (wi::to_wide (expr), offset, TYPE_SIGN (type));
107       break;
108 
109     default:
110       break;
111     }
112 }
113 
114 /* From condition C0 CMP C1 derives information regarding the value range
115    of VAR, which is of TYPE.  Results are stored in to BELOW and UP.  */
116 
117 static void
refine_value_range_using_guard(tree type,tree var,tree c0,enum tree_code cmp,tree c1,mpz_t below,mpz_t up)118 refine_value_range_using_guard (tree type, tree var,
119 				tree c0, enum tree_code cmp, tree c1,
120 				mpz_t below, mpz_t up)
121 {
122   tree varc0, varc1, ctype;
123   mpz_t offc0, offc1;
124   mpz_t mint, maxt, minc1, maxc1;
125   wide_int minv, maxv;
126   bool no_wrap = nowrap_type_p (type);
127   bool c0_ok, c1_ok;
128   signop sgn = TYPE_SIGN (type);
129 
130   switch (cmp)
131     {
132     case LT_EXPR:
133     case LE_EXPR:
134     case GT_EXPR:
135     case GE_EXPR:
136       STRIP_SIGN_NOPS (c0);
137       STRIP_SIGN_NOPS (c1);
138       ctype = TREE_TYPE (c0);
139       if (!useless_type_conversion_p (ctype, type))
140 	return;
141 
142       break;
143 
144     case EQ_EXPR:
145       /* We could derive quite precise information from EQ_EXPR, however,
146 	 such a guard is unlikely to appear, so we do not bother with
147 	 handling it.  */
148       return;
149 
150     case NE_EXPR:
151       /* NE_EXPR comparisons do not contain much of useful information,
152 	 except for cases of comparing with bounds.  */
153       if (TREE_CODE (c1) != INTEGER_CST
154 	  || !INTEGRAL_TYPE_P (type))
155 	return;
156 
157       /* Ensure that the condition speaks about an expression in the same
158 	 type as X and Y.  */
159       ctype = TREE_TYPE (c0);
160       if (TYPE_PRECISION (ctype) != TYPE_PRECISION (type))
161 	return;
162       c0 = fold_convert (type, c0);
163       c1 = fold_convert (type, c1);
164 
165       if (operand_equal_p (var, c0, 0))
166 	{
167 	  mpz_t valc1;
168 
169 	  /* Case of comparing VAR with its below/up bounds.  */
170 	  mpz_init (valc1);
171 	  wi::to_mpz (wi::to_wide (c1), valc1, TYPE_SIGN (type));
172 	  if (mpz_cmp (valc1, below) == 0)
173 	    cmp = GT_EXPR;
174 	  if (mpz_cmp (valc1, up) == 0)
175 	    cmp = LT_EXPR;
176 
177 	  mpz_clear (valc1);
178 	}
179       else
180 	{
181 	  /* Case of comparing with the bounds of the type.  */
182 	  wide_int min = wi::min_value (type);
183 	  wide_int max = wi::max_value (type);
184 
185 	  if (wi::to_wide (c1) == min)
186 	    cmp = GT_EXPR;
187 	  if (wi::to_wide (c1) == max)
188 	    cmp = LT_EXPR;
189 	}
190 
191       /* Quick return if no useful information.  */
192       if (cmp == NE_EXPR)
193 	return;
194 
195       break;
196 
197     default:
198       return;
199     }
200 
201   mpz_init (offc0);
202   mpz_init (offc1);
203   split_to_var_and_offset (expand_simple_operations (c0), &varc0, offc0);
204   split_to_var_and_offset (expand_simple_operations (c1), &varc1, offc1);
205 
206   /* We are only interested in comparisons of expressions based on VAR.  */
207   if (operand_equal_p (var, varc1, 0))
208     {
209       std::swap (varc0, varc1);
210       mpz_swap (offc0, offc1);
211       cmp = swap_tree_comparison (cmp);
212     }
213   else if (!operand_equal_p (var, varc0, 0))
214     {
215       mpz_clear (offc0);
216       mpz_clear (offc1);
217       return;
218     }
219 
220   mpz_init (mint);
221   mpz_init (maxt);
222   get_type_static_bounds (type, mint, maxt);
223   mpz_init (minc1);
224   mpz_init (maxc1);
225   /* Setup range information for varc1.  */
226   if (integer_zerop (varc1))
227     {
228       wi::to_mpz (0, minc1, TYPE_SIGN (type));
229       wi::to_mpz (0, maxc1, TYPE_SIGN (type));
230     }
231   else if (TREE_CODE (varc1) == SSA_NAME
232 	   && INTEGRAL_TYPE_P (type)
233 	   && get_range_info (varc1, &minv, &maxv) == VR_RANGE)
234     {
235       gcc_assert (wi::le_p (minv, maxv, sgn));
236       wi::to_mpz (minv, minc1, sgn);
237       wi::to_mpz (maxv, maxc1, sgn);
238     }
239   else
240     {
241       mpz_set (minc1, mint);
242       mpz_set (maxc1, maxt);
243     }
244 
245   /* Compute valid range information for varc1 + offc1.  Note nothing
246      useful can be derived if it overflows or underflows.  Overflow or
247      underflow could happen when:
248 
249        offc1 > 0 && varc1 + offc1 > MAX_VAL (type)
250        offc1 < 0 && varc1 + offc1 < MIN_VAL (type).  */
251   mpz_add (minc1, minc1, offc1);
252   mpz_add (maxc1, maxc1, offc1);
253   c1_ok = (no_wrap
254 	   || mpz_sgn (offc1) == 0
255 	   || (mpz_sgn (offc1) < 0 && mpz_cmp (minc1, mint) >= 0)
256 	   || (mpz_sgn (offc1) > 0 && mpz_cmp (maxc1, maxt) <= 0));
257   if (!c1_ok)
258     goto end;
259 
260   if (mpz_cmp (minc1, mint) < 0)
261     mpz_set (minc1, mint);
262   if (mpz_cmp (maxc1, maxt) > 0)
263     mpz_set (maxc1, maxt);
264 
265   if (cmp == LT_EXPR)
266     {
267       cmp = LE_EXPR;
268       mpz_sub_ui (maxc1, maxc1, 1);
269     }
270   if (cmp == GT_EXPR)
271     {
272       cmp = GE_EXPR;
273       mpz_add_ui (minc1, minc1, 1);
274     }
275 
276   /* Compute range information for varc0.  If there is no overflow,
277      the condition implied that
278 
279        (varc0) cmp (varc1 + offc1 - offc0)
280 
281      We can possibly improve the upper bound of varc0 if cmp is LE_EXPR,
282      or the below bound if cmp is GE_EXPR.
283 
284      To prove there is no overflow/underflow, we need to check below
285      four cases:
286        1) cmp == LE_EXPR && offc0 > 0
287 
288 	    (varc0 + offc0) doesn't overflow
289 	    && (varc1 + offc1 - offc0) doesn't underflow
290 
291        2) cmp == LE_EXPR && offc0 < 0
292 
293 	    (varc0 + offc0) doesn't underflow
294 	    && (varc1 + offc1 - offc0) doesn't overfloe
295 
296 	  In this case, (varc0 + offc0) will never underflow if we can
297 	  prove (varc1 + offc1 - offc0) doesn't overflow.
298 
299        3) cmp == GE_EXPR && offc0 < 0
300 
301 	    (varc0 + offc0) doesn't underflow
302 	    && (varc1 + offc1 - offc0) doesn't overflow
303 
304        4) cmp == GE_EXPR && offc0 > 0
305 
306 	    (varc0 + offc0) doesn't overflow
307 	    && (varc1 + offc1 - offc0) doesn't underflow
308 
309 	  In this case, (varc0 + offc0) will never overflow if we can
310 	  prove (varc1 + offc1 - offc0) doesn't underflow.
311 
312      Note we only handle case 2 and 4 in below code.  */
313 
314   mpz_sub (minc1, minc1, offc0);
315   mpz_sub (maxc1, maxc1, offc0);
316   c0_ok = (no_wrap
317 	   || mpz_sgn (offc0) == 0
318 	   || (cmp == LE_EXPR
319 	       && mpz_sgn (offc0) < 0 && mpz_cmp (maxc1, maxt) <= 0)
320 	   || (cmp == GE_EXPR
321 	       && mpz_sgn (offc0) > 0 && mpz_cmp (minc1, mint) >= 0));
322   if (!c0_ok)
323     goto end;
324 
325   if (cmp == LE_EXPR)
326     {
327       if (mpz_cmp (up, maxc1) > 0)
328 	mpz_set (up, maxc1);
329     }
330   else
331     {
332       if (mpz_cmp (below, minc1) < 0)
333 	mpz_set (below, minc1);
334     }
335 
336 end:
337   mpz_clear (mint);
338   mpz_clear (maxt);
339   mpz_clear (minc1);
340   mpz_clear (maxc1);
341   mpz_clear (offc0);
342   mpz_clear (offc1);
343 }
344 
345 /* Stores estimate on the minimum/maximum value of the expression VAR + OFF
346    in TYPE to MIN and MAX.  */
347 
348 static void
determine_value_range(struct loop * loop,tree type,tree var,mpz_t off,mpz_t min,mpz_t max)349 determine_value_range (struct loop *loop, tree type, tree var, mpz_t off,
350 		       mpz_t min, mpz_t max)
351 {
352   int cnt = 0;
353   mpz_t minm, maxm;
354   basic_block bb;
355   wide_int minv, maxv;
356   enum value_range_kind rtype = VR_VARYING;
357 
358   /* If the expression is a constant, we know its value exactly.  */
359   if (integer_zerop (var))
360     {
361       mpz_set (min, off);
362       mpz_set (max, off);
363       return;
364     }
365 
366   get_type_static_bounds (type, min, max);
367 
368   /* See if we have some range info from VRP.  */
369   if (TREE_CODE (var) == SSA_NAME && INTEGRAL_TYPE_P (type))
370     {
371       edge e = loop_preheader_edge (loop);
372       signop sgn = TYPE_SIGN (type);
373       gphi_iterator gsi;
374 
375       /* Either for VAR itself...  */
376       rtype = get_range_info (var, &minv, &maxv);
377       /* Or for PHI results in loop->header where VAR is used as
378 	 PHI argument from the loop preheader edge.  */
379       for (gsi = gsi_start_phis (loop->header); !gsi_end_p (gsi); gsi_next (&gsi))
380 	{
381 	  gphi *phi = gsi.phi ();
382 	  wide_int minc, maxc;
383 	  if (PHI_ARG_DEF_FROM_EDGE (phi, e) == var
384 	      && (get_range_info (gimple_phi_result (phi), &minc, &maxc)
385 		  == VR_RANGE))
386 	    {
387 	      if (rtype != VR_RANGE)
388 		{
389 		  rtype = VR_RANGE;
390 		  minv = minc;
391 		  maxv = maxc;
392 		}
393 	      else
394 		{
395 		  minv = wi::max (minv, minc, sgn);
396 		  maxv = wi::min (maxv, maxc, sgn);
397 		  /* If the PHI result range are inconsistent with
398 		     the VAR range, give up on looking at the PHI
399 		     results.  This can happen if VR_UNDEFINED is
400 		     involved.  */
401 		  if (wi::gt_p (minv, maxv, sgn))
402 		    {
403 		      rtype = get_range_info (var, &minv, &maxv);
404 		      break;
405 		    }
406 		}
407 	    }
408 	}
409       mpz_init (minm);
410       mpz_init (maxm);
411       if (rtype != VR_RANGE)
412 	{
413 	  mpz_set (minm, min);
414 	  mpz_set (maxm, max);
415 	}
416       else
417 	{
418 	  gcc_assert (wi::le_p (minv, maxv, sgn));
419 	  wi::to_mpz (minv, minm, sgn);
420 	  wi::to_mpz (maxv, maxm, sgn);
421 	}
422       /* Now walk the dominators of the loop header and use the entry
423 	 guards to refine the estimates.  */
424       for (bb = loop->header;
425 	   bb != ENTRY_BLOCK_PTR_FOR_FN (cfun) && cnt < MAX_DOMINATORS_TO_WALK;
426 	   bb = get_immediate_dominator (CDI_DOMINATORS, bb))
427 	{
428 	  edge e;
429 	  tree c0, c1;
430 	  gimple *cond;
431 	  enum tree_code cmp;
432 
433 	  if (!single_pred_p (bb))
434 	    continue;
435 	  e = single_pred_edge (bb);
436 
437 	  if (!(e->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)))
438 	    continue;
439 
440 	  cond = last_stmt (e->src);
441 	  c0 = gimple_cond_lhs (cond);
442 	  cmp = gimple_cond_code (cond);
443 	  c1 = gimple_cond_rhs (cond);
444 
445 	  if (e->flags & EDGE_FALSE_VALUE)
446 	    cmp = invert_tree_comparison (cmp, false);
447 
448 	  refine_value_range_using_guard (type, var, c0, cmp, c1, minm, maxm);
449 	  ++cnt;
450 	}
451 
452       mpz_add (minm, minm, off);
453       mpz_add (maxm, maxm, off);
454       /* If the computation may not wrap or off is zero, then this
455 	 is always fine.  If off is negative and minv + off isn't
456 	 smaller than type's minimum, or off is positive and
457 	 maxv + off isn't bigger than type's maximum, use the more
458 	 precise range too.  */
459       if (nowrap_type_p (type)
460 	  || mpz_sgn (off) == 0
461 	  || (mpz_sgn (off) < 0 && mpz_cmp (minm, min) >= 0)
462 	  || (mpz_sgn (off) > 0 && mpz_cmp (maxm, max) <= 0))
463 	{
464 	  mpz_set (min, minm);
465 	  mpz_set (max, maxm);
466 	  mpz_clear (minm);
467 	  mpz_clear (maxm);
468 	  return;
469 	}
470       mpz_clear (minm);
471       mpz_clear (maxm);
472     }
473 
474   /* If the computation may wrap, we know nothing about the value, except for
475      the range of the type.  */
476   if (!nowrap_type_p (type))
477     return;
478 
479   /* Since the addition of OFF does not wrap, if OFF is positive, then we may
480      add it to MIN, otherwise to MAX.  */
481   if (mpz_sgn (off) < 0)
482     mpz_add (max, max, off);
483   else
484     mpz_add (min, min, off);
485 }
486 
487 /* Stores the bounds on the difference of the values of the expressions
488    (var + X) and (var + Y), computed in TYPE, to BNDS.  */
489 
490 static void
bound_difference_of_offsetted_base(tree type,mpz_t x,mpz_t y,bounds * bnds)491 bound_difference_of_offsetted_base (tree type, mpz_t x, mpz_t y,
492 				    bounds *bnds)
493 {
494   int rel = mpz_cmp (x, y);
495   bool may_wrap = !nowrap_type_p (type);
496   mpz_t m;
497 
498   /* If X == Y, then the expressions are always equal.
499      If X > Y, there are the following possibilities:
500        a) neither of var + X and var + Y overflow or underflow, or both of
501 	  them do.  Then their difference is X - Y.
502        b) var + X overflows, and var + Y does not.  Then the values of the
503 	  expressions are var + X - M and var + Y, where M is the range of
504 	  the type, and their difference is X - Y - M.
505        c) var + Y underflows and var + X does not.  Their difference again
506 	  is M - X + Y.
507        Therefore, if the arithmetics in type does not overflow, then the
508        bounds are (X - Y, X - Y), otherwise they are (X - Y - M, X - Y)
509      Similarly, if X < Y, the bounds are either (X - Y, X - Y) or
510      (X - Y, X - Y + M).  */
511 
512   if (rel == 0)
513     {
514       mpz_set_ui (bnds->below, 0);
515       mpz_set_ui (bnds->up, 0);
516       return;
517     }
518 
519   mpz_init (m);
520   wi::to_mpz (wi::minus_one (TYPE_PRECISION (type)), m, UNSIGNED);
521   mpz_add_ui (m, m, 1);
522   mpz_sub (bnds->up, x, y);
523   mpz_set (bnds->below, bnds->up);
524 
525   if (may_wrap)
526     {
527       if (rel > 0)
528 	mpz_sub (bnds->below, bnds->below, m);
529       else
530 	mpz_add (bnds->up, bnds->up, m);
531     }
532 
533   mpz_clear (m);
534 }
535 
536 /* From condition C0 CMP C1 derives information regarding the
537    difference of values of VARX + OFFX and VARY + OFFY, computed in TYPE,
538    and stores it to BNDS.  */
539 
540 static void
refine_bounds_using_guard(tree type,tree varx,mpz_t offx,tree vary,mpz_t offy,tree c0,enum tree_code cmp,tree c1,bounds * bnds)541 refine_bounds_using_guard (tree type, tree varx, mpz_t offx,
542 			   tree vary, mpz_t offy,
543 			   tree c0, enum tree_code cmp, tree c1,
544 			   bounds *bnds)
545 {
546   tree varc0, varc1, ctype;
547   mpz_t offc0, offc1, loffx, loffy, bnd;
548   bool lbound = false;
549   bool no_wrap = nowrap_type_p (type);
550   bool x_ok, y_ok;
551 
552   switch (cmp)
553     {
554     case LT_EXPR:
555     case LE_EXPR:
556     case GT_EXPR:
557     case GE_EXPR:
558       STRIP_SIGN_NOPS (c0);
559       STRIP_SIGN_NOPS (c1);
560       ctype = TREE_TYPE (c0);
561       if (!useless_type_conversion_p (ctype, type))
562 	return;
563 
564       break;
565 
566     case EQ_EXPR:
567       /* We could derive quite precise information from EQ_EXPR, however, such
568 	 a guard is unlikely to appear, so we do not bother with handling
569 	 it.  */
570       return;
571 
572     case NE_EXPR:
573       /* NE_EXPR comparisons do not contain much of useful information, except for
574 	 special case of comparing with the bounds of the type.  */
575       if (TREE_CODE (c1) != INTEGER_CST
576 	  || !INTEGRAL_TYPE_P (type))
577 	return;
578 
579       /* Ensure that the condition speaks about an expression in the same type
580 	 as X and Y.  */
581       ctype = TREE_TYPE (c0);
582       if (TYPE_PRECISION (ctype) != TYPE_PRECISION (type))
583 	return;
584       c0 = fold_convert (type, c0);
585       c1 = fold_convert (type, c1);
586 
587       if (TYPE_MIN_VALUE (type)
588 	  && operand_equal_p (c1, TYPE_MIN_VALUE (type), 0))
589 	{
590 	  cmp = GT_EXPR;
591 	  break;
592 	}
593       if (TYPE_MAX_VALUE (type)
594 	  && operand_equal_p (c1, TYPE_MAX_VALUE (type), 0))
595 	{
596 	  cmp = LT_EXPR;
597 	  break;
598 	}
599 
600       return;
601     default:
602       return;
603     }
604 
605   mpz_init (offc0);
606   mpz_init (offc1);
607   split_to_var_and_offset (expand_simple_operations (c0), &varc0, offc0);
608   split_to_var_and_offset (expand_simple_operations (c1), &varc1, offc1);
609 
610   /* We are only interested in comparisons of expressions based on VARX and
611      VARY.  TODO -- we might also be able to derive some bounds from
612      expressions containing just one of the variables.  */
613 
614   if (operand_equal_p (varx, varc1, 0))
615     {
616       std::swap (varc0, varc1);
617       mpz_swap (offc0, offc1);
618       cmp = swap_tree_comparison (cmp);
619     }
620 
621   if (!operand_equal_p (varx, varc0, 0)
622       || !operand_equal_p (vary, varc1, 0))
623     goto end;
624 
625   mpz_init_set (loffx, offx);
626   mpz_init_set (loffy, offy);
627 
628   if (cmp == GT_EXPR || cmp == GE_EXPR)
629     {
630       std::swap (varx, vary);
631       mpz_swap (offc0, offc1);
632       mpz_swap (loffx, loffy);
633       cmp = swap_tree_comparison (cmp);
634       lbound = true;
635     }
636 
637   /* If there is no overflow, the condition implies that
638 
639      (VARX + OFFX) cmp (VARY + OFFY) + (OFFX - OFFY + OFFC1 - OFFC0).
640 
641      The overflows and underflows may complicate things a bit; each
642      overflow decreases the appropriate offset by M, and underflow
643      increases it by M.  The above inequality would not necessarily be
644      true if
645 
646      -- VARX + OFFX underflows and VARX + OFFC0 does not, or
647 	VARX + OFFC0 overflows, but VARX + OFFX does not.
648 	This may only happen if OFFX < OFFC0.
649      -- VARY + OFFY overflows and VARY + OFFC1 does not, or
650 	VARY + OFFC1 underflows and VARY + OFFY does not.
651 	This may only happen if OFFY > OFFC1.  */
652 
653   if (no_wrap)
654     {
655       x_ok = true;
656       y_ok = true;
657     }
658   else
659     {
660       x_ok = (integer_zerop (varx)
661 	      || mpz_cmp (loffx, offc0) >= 0);
662       y_ok = (integer_zerop (vary)
663 	      || mpz_cmp (loffy, offc1) <= 0);
664     }
665 
666   if (x_ok && y_ok)
667     {
668       mpz_init (bnd);
669       mpz_sub (bnd, loffx, loffy);
670       mpz_add (bnd, bnd, offc1);
671       mpz_sub (bnd, bnd, offc0);
672 
673       if (cmp == LT_EXPR)
674 	mpz_sub_ui (bnd, bnd, 1);
675 
676       if (lbound)
677 	{
678 	  mpz_neg (bnd, bnd);
679 	  if (mpz_cmp (bnds->below, bnd) < 0)
680 	    mpz_set (bnds->below, bnd);
681 	}
682       else
683 	{
684 	  if (mpz_cmp (bnd, bnds->up) < 0)
685 	    mpz_set (bnds->up, bnd);
686 	}
687       mpz_clear (bnd);
688     }
689 
690   mpz_clear (loffx);
691   mpz_clear (loffy);
692 end:
693   mpz_clear (offc0);
694   mpz_clear (offc1);
695 }
696 
697 /* Stores the bounds on the value of the expression X - Y in LOOP to BNDS.
698    The subtraction is considered to be performed in arbitrary precision,
699    without overflows.
700 
701    We do not attempt to be too clever regarding the value ranges of X and
702    Y; most of the time, they are just integers or ssa names offsetted by
703    integer.  However, we try to use the information contained in the
704    comparisons before the loop (usually created by loop header copying).  */
705 
706 static void
bound_difference(struct loop * loop,tree x,tree y,bounds * bnds)707 bound_difference (struct loop *loop, tree x, tree y, bounds *bnds)
708 {
709   tree type = TREE_TYPE (x);
710   tree varx, vary;
711   mpz_t offx, offy;
712   mpz_t minx, maxx, miny, maxy;
713   int cnt = 0;
714   edge e;
715   basic_block bb;
716   tree c0, c1;
717   gimple *cond;
718   enum tree_code cmp;
719 
720   /* Get rid of unnecessary casts, but preserve the value of
721      the expressions.  */
722   STRIP_SIGN_NOPS (x);
723   STRIP_SIGN_NOPS (y);
724 
725   mpz_init (bnds->below);
726   mpz_init (bnds->up);
727   mpz_init (offx);
728   mpz_init (offy);
729   split_to_var_and_offset (x, &varx, offx);
730   split_to_var_and_offset (y, &vary, offy);
731 
732   if (!integer_zerop (varx)
733       && operand_equal_p (varx, vary, 0))
734     {
735       /* Special case VARX == VARY -- we just need to compare the
736          offsets.  The matters are a bit more complicated in the
737 	 case addition of offsets may wrap.  */
738       bound_difference_of_offsetted_base (type, offx, offy, bnds);
739     }
740   else
741     {
742       /* Otherwise, use the value ranges to determine the initial
743 	 estimates on below and up.  */
744       mpz_init (minx);
745       mpz_init (maxx);
746       mpz_init (miny);
747       mpz_init (maxy);
748       determine_value_range (loop, type, varx, offx, minx, maxx);
749       determine_value_range (loop, type, vary, offy, miny, maxy);
750 
751       mpz_sub (bnds->below, minx, maxy);
752       mpz_sub (bnds->up, maxx, miny);
753       mpz_clear (minx);
754       mpz_clear (maxx);
755       mpz_clear (miny);
756       mpz_clear (maxy);
757     }
758 
759   /* If both X and Y are constants, we cannot get any more precise.  */
760   if (integer_zerop (varx) && integer_zerop (vary))
761     goto end;
762 
763   /* Now walk the dominators of the loop header and use the entry
764      guards to refine the estimates.  */
765   for (bb = loop->header;
766        bb != ENTRY_BLOCK_PTR_FOR_FN (cfun) && cnt < MAX_DOMINATORS_TO_WALK;
767        bb = get_immediate_dominator (CDI_DOMINATORS, bb))
768     {
769       if (!single_pred_p (bb))
770 	continue;
771       e = single_pred_edge (bb);
772 
773       if (!(e->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)))
774 	continue;
775 
776       cond = last_stmt (e->src);
777       c0 = gimple_cond_lhs (cond);
778       cmp = gimple_cond_code (cond);
779       c1 = gimple_cond_rhs (cond);
780 
781       if (e->flags & EDGE_FALSE_VALUE)
782 	cmp = invert_tree_comparison (cmp, false);
783 
784       refine_bounds_using_guard (type, varx, offx, vary, offy,
785 				 c0, cmp, c1, bnds);
786       ++cnt;
787     }
788 
789 end:
790   mpz_clear (offx);
791   mpz_clear (offy);
792 }
793 
794 /* Update the bounds in BNDS that restrict the value of X to the bounds
795    that restrict the value of X + DELTA.  X can be obtained as a
796    difference of two values in TYPE.  */
797 
798 static void
bounds_add(bounds * bnds,const widest_int & delta,tree type)799 bounds_add (bounds *bnds, const widest_int &delta, tree type)
800 {
801   mpz_t mdelta, max;
802 
803   mpz_init (mdelta);
804   wi::to_mpz (delta, mdelta, SIGNED);
805 
806   mpz_init (max);
807   wi::to_mpz (wi::minus_one (TYPE_PRECISION (type)), max, UNSIGNED);
808 
809   mpz_add (bnds->up, bnds->up, mdelta);
810   mpz_add (bnds->below, bnds->below, mdelta);
811 
812   if (mpz_cmp (bnds->up, max) > 0)
813     mpz_set (bnds->up, max);
814 
815   mpz_neg (max, max);
816   if (mpz_cmp (bnds->below, max) < 0)
817     mpz_set (bnds->below, max);
818 
819   mpz_clear (mdelta);
820   mpz_clear (max);
821 }
822 
823 /* Update the bounds in BNDS that restrict the value of X to the bounds
824    that restrict the value of -X.  */
825 
826 static void
bounds_negate(bounds * bnds)827 bounds_negate (bounds *bnds)
828 {
829   mpz_t tmp;
830 
831   mpz_init_set (tmp, bnds->up);
832   mpz_neg (bnds->up, bnds->below);
833   mpz_neg (bnds->below, tmp);
834   mpz_clear (tmp);
835 }
836 
837 /* Returns inverse of X modulo 2^s, where MASK = 2^s-1.  */
838 
839 static tree
inverse(tree x,tree mask)840 inverse (tree x, tree mask)
841 {
842   tree type = TREE_TYPE (x);
843   tree rslt;
844   unsigned ctr = tree_floor_log2 (mask);
845 
846   if (TYPE_PRECISION (type) <= HOST_BITS_PER_WIDE_INT)
847     {
848       unsigned HOST_WIDE_INT ix;
849       unsigned HOST_WIDE_INT imask;
850       unsigned HOST_WIDE_INT irslt = 1;
851 
852       gcc_assert (cst_and_fits_in_hwi (x));
853       gcc_assert (cst_and_fits_in_hwi (mask));
854 
855       ix = int_cst_value (x);
856       imask = int_cst_value (mask);
857 
858       for (; ctr; ctr--)
859 	{
860 	  irslt *= ix;
861 	  ix *= ix;
862 	}
863       irslt &= imask;
864 
865       rslt = build_int_cst_type (type, irslt);
866     }
867   else
868     {
869       rslt = build_int_cst (type, 1);
870       for (; ctr; ctr--)
871 	{
872 	  rslt = int_const_binop (MULT_EXPR, rslt, x);
873 	  x = int_const_binop (MULT_EXPR, x, x);
874 	}
875       rslt = int_const_binop (BIT_AND_EXPR, rslt, mask);
876     }
877 
878   return rslt;
879 }
880 
881 /* Derives the upper bound BND on the number of executions of loop with exit
882    condition S * i <> C.  If NO_OVERFLOW is true, then the control variable of
883    the loop does not overflow.  EXIT_MUST_BE_TAKEN is true if we are guaranteed
884    that the loop ends through this exit, i.e., the induction variable ever
885    reaches the value of C.
886 
887    The value C is equal to final - base, where final and base are the final and
888    initial value of the actual induction variable in the analysed loop.  BNDS
889    bounds the value of this difference when computed in signed type with
890    unbounded range, while the computation of C is performed in an unsigned
891    type with the range matching the range of the type of the induction variable.
892    In particular, BNDS.up contains an upper bound on C in the following cases:
893    -- if the iv must reach its final value without overflow, i.e., if
894       NO_OVERFLOW && EXIT_MUST_BE_TAKEN is true, or
895    -- if final >= base, which we know to hold when BNDS.below >= 0.  */
896 
897 static void
number_of_iterations_ne_max(mpz_t bnd,bool no_overflow,tree c,tree s,bounds * bnds,bool exit_must_be_taken)898 number_of_iterations_ne_max (mpz_t bnd, bool no_overflow, tree c, tree s,
899 			     bounds *bnds, bool exit_must_be_taken)
900 {
901   widest_int max;
902   mpz_t d;
903   tree type = TREE_TYPE (c);
904   bool bnds_u_valid = ((no_overflow && exit_must_be_taken)
905 		       || mpz_sgn (bnds->below) >= 0);
906 
907   if (integer_onep (s)
908       || (TREE_CODE (c) == INTEGER_CST
909 	  && TREE_CODE (s) == INTEGER_CST
910 	  && wi::mod_trunc (wi::to_wide (c), wi::to_wide (s),
911 			    TYPE_SIGN (type)) == 0)
912       || (TYPE_OVERFLOW_UNDEFINED (type)
913 	  && multiple_of_p (type, c, s)))
914     {
915       /* If C is an exact multiple of S, then its value will be reached before
916 	 the induction variable overflows (unless the loop is exited in some
917 	 other way before).  Note that the actual induction variable in the
918 	 loop (which ranges from base to final instead of from 0 to C) may
919 	 overflow, in which case BNDS.up will not be giving a correct upper
920 	 bound on C; thus, BNDS_U_VALID had to be computed in advance.  */
921       no_overflow = true;
922       exit_must_be_taken = true;
923     }
924 
925   /* If the induction variable can overflow, the number of iterations is at
926      most the period of the control variable (or infinite, but in that case
927      the whole # of iterations analysis will fail).  */
928   if (!no_overflow)
929     {
930       max = wi::mask <widest_int> (TYPE_PRECISION (type)
931 				   - wi::ctz (wi::to_wide (s)), false);
932       wi::to_mpz (max, bnd, UNSIGNED);
933       return;
934     }
935 
936   /* Now we know that the induction variable does not overflow, so the loop
937      iterates at most (range of type / S) times.  */
938   wi::to_mpz (wi::minus_one (TYPE_PRECISION (type)), bnd, UNSIGNED);
939 
940   /* If the induction variable is guaranteed to reach the value of C before
941      overflow, ... */
942   if (exit_must_be_taken)
943     {
944       /* ... then we can strengthen this to C / S, and possibly we can use
945 	 the upper bound on C given by BNDS.  */
946       if (TREE_CODE (c) == INTEGER_CST)
947 	wi::to_mpz (wi::to_wide (c), bnd, UNSIGNED);
948       else if (bnds_u_valid)
949 	mpz_set (bnd, bnds->up);
950     }
951 
952   mpz_init (d);
953   wi::to_mpz (wi::to_wide (s), d, UNSIGNED);
954   mpz_fdiv_q (bnd, bnd, d);
955   mpz_clear (d);
956 }
957 
958 /* Determines number of iterations of loop whose ending condition
959    is IV <> FINAL.  TYPE is the type of the iv.  The number of
960    iterations is stored to NITER.  EXIT_MUST_BE_TAKEN is true if
961    we know that the exit must be taken eventually, i.e., that the IV
962    ever reaches the value FINAL (we derived this earlier, and possibly set
963    NITER->assumptions to make sure this is the case).  BNDS contains the
964    bounds on the difference FINAL - IV->base.  */
965 
966 static bool
number_of_iterations_ne(struct loop * loop,tree type,affine_iv * iv,tree final,struct tree_niter_desc * niter,bool exit_must_be_taken,bounds * bnds)967 number_of_iterations_ne (struct loop *loop, tree type, affine_iv *iv,
968 			 tree final, struct tree_niter_desc *niter,
969 			 bool exit_must_be_taken, bounds *bnds)
970 {
971   tree niter_type = unsigned_type_for (type);
972   tree s, c, d, bits, assumption, tmp, bound;
973   mpz_t max;
974 
975   niter->control = *iv;
976   niter->bound = final;
977   niter->cmp = NE_EXPR;
978 
979   /* Rearrange the terms so that we get inequality S * i <> C, with S
980      positive.  Also cast everything to the unsigned type.  If IV does
981      not overflow, BNDS bounds the value of C.  Also, this is the
982      case if the computation |FINAL - IV->base| does not overflow, i.e.,
983      if BNDS->below in the result is nonnegative.  */
984   if (tree_int_cst_sign_bit (iv->step))
985     {
986       s = fold_convert (niter_type,
987 			fold_build1 (NEGATE_EXPR, type, iv->step));
988       c = fold_build2 (MINUS_EXPR, niter_type,
989 		       fold_convert (niter_type, iv->base),
990 		       fold_convert (niter_type, final));
991       bounds_negate (bnds);
992     }
993   else
994     {
995       s = fold_convert (niter_type, iv->step);
996       c = fold_build2 (MINUS_EXPR, niter_type,
997 		       fold_convert (niter_type, final),
998 		       fold_convert (niter_type, iv->base));
999     }
1000 
1001   mpz_init (max);
1002   number_of_iterations_ne_max (max, iv->no_overflow, c, s, bnds,
1003 			       exit_must_be_taken);
1004   niter->max = widest_int::from (wi::from_mpz (niter_type, max, false),
1005 				 TYPE_SIGN (niter_type));
1006   mpz_clear (max);
1007 
1008   /* Compute no-overflow information for the control iv.  This can be
1009      proven when below two conditions are satisfied:
1010 
1011        1) IV evaluates toward FINAL at beginning, i.e:
1012 	    base <= FINAL ; step > 0
1013 	    base >= FINAL ; step < 0
1014 
1015        2) |FINAL - base| is an exact multiple of step.
1016 
1017      Unfortunately, it's hard to prove above conditions after pass loop-ch
1018      because loop with exit condition (IV != FINAL) usually will be guarded
1019      by initial-condition (IV.base - IV.step != FINAL).  In this case, we
1020      can alternatively try to prove below conditions:
1021 
1022        1') IV evaluates toward FINAL at beginning, i.e:
1023 	    new_base = base - step < FINAL ; step > 0
1024 					     && base - step doesn't underflow
1025 	    new_base = base - step > FINAL ; step < 0
1026 					     && base - step doesn't overflow
1027 
1028        2') |FINAL - new_base| is an exact multiple of step.
1029 
1030      Please refer to PR34114 as an example of loop-ch's impact, also refer
1031      to PR72817 as an example why condition 2') is necessary.
1032 
1033      Note, for NE_EXPR, base equals to FINAL is a special case, in
1034      which the loop exits immediately, and the iv does not overflow.  */
1035   if (!niter->control.no_overflow
1036       && (integer_onep (s) || multiple_of_p (type, c, s)))
1037     {
1038       tree t, cond, new_c, relaxed_cond = boolean_false_node;
1039 
1040       if (tree_int_cst_sign_bit (iv->step))
1041 	{
1042 	  cond = fold_build2 (GE_EXPR, boolean_type_node, iv->base, final);
1043 	  if (TREE_CODE (type) == INTEGER_TYPE)
1044 	    {
1045 	      /* Only when base - step doesn't overflow.  */
1046 	      t = TYPE_MAX_VALUE (type);
1047 	      t = fold_build2 (PLUS_EXPR, type, t, iv->step);
1048 	      t = fold_build2 (GE_EXPR, boolean_type_node, t, iv->base);
1049 	      if (integer_nonzerop (t))
1050 		{
1051 		  t = fold_build2 (MINUS_EXPR, type, iv->base, iv->step);
1052 		  new_c = fold_build2 (MINUS_EXPR, niter_type,
1053 				       fold_convert (niter_type, t),
1054 				       fold_convert (niter_type, final));
1055 		  if (multiple_of_p (type, new_c, s))
1056 		    relaxed_cond = fold_build2 (GT_EXPR, boolean_type_node,
1057 						t, final);
1058 		}
1059 	    }
1060 	}
1061       else
1062 	{
1063 	  cond = fold_build2 (LE_EXPR, boolean_type_node, iv->base, final);
1064 	  if (TREE_CODE (type) == INTEGER_TYPE)
1065 	    {
1066 	      /* Only when base - step doesn't underflow.  */
1067 	      t = TYPE_MIN_VALUE (type);
1068 	      t = fold_build2 (PLUS_EXPR, type, t, iv->step);
1069 	      t = fold_build2 (LE_EXPR, boolean_type_node, t, iv->base);
1070 	      if (integer_nonzerop (t))
1071 		{
1072 		  t = fold_build2 (MINUS_EXPR, type, iv->base, iv->step);
1073 		  new_c = fold_build2 (MINUS_EXPR, niter_type,
1074 				       fold_convert (niter_type, final),
1075 				       fold_convert (niter_type, t));
1076 		  if (multiple_of_p (type, new_c, s))
1077 		    relaxed_cond = fold_build2 (LT_EXPR, boolean_type_node,
1078 						t, final);
1079 		}
1080 	    }
1081 	}
1082 
1083       t = simplify_using_initial_conditions (loop, cond);
1084       if (!t || !integer_onep (t))
1085 	t = simplify_using_initial_conditions (loop, relaxed_cond);
1086 
1087       if (t && integer_onep (t))
1088 	niter->control.no_overflow = true;
1089     }
1090 
1091   /* First the trivial cases -- when the step is 1.  */
1092   if (integer_onep (s))
1093     {
1094       niter->niter = c;
1095       return true;
1096     }
1097   if (niter->control.no_overflow && multiple_of_p (type, c, s))
1098     {
1099       niter->niter = fold_build2 (FLOOR_DIV_EXPR, niter_type, c, s);
1100       return true;
1101     }
1102 
1103   /* Let nsd (step, size of mode) = d.  If d does not divide c, the loop
1104      is infinite.  Otherwise, the number of iterations is
1105      (inverse(s/d) * (c/d)) mod (size of mode/d).  */
1106   bits = num_ending_zeros (s);
1107   bound = build_low_bits_mask (niter_type,
1108 			       (TYPE_PRECISION (niter_type)
1109 				- tree_to_uhwi (bits)));
1110 
1111   d = fold_binary_to_constant (LSHIFT_EXPR, niter_type,
1112 			       build_int_cst (niter_type, 1), bits);
1113   s = fold_binary_to_constant (RSHIFT_EXPR, niter_type, s, bits);
1114 
1115   if (!exit_must_be_taken)
1116     {
1117       /* If we cannot assume that the exit is taken eventually, record the
1118 	 assumptions for divisibility of c.  */
1119       assumption = fold_build2 (FLOOR_MOD_EXPR, niter_type, c, d);
1120       assumption = fold_build2 (EQ_EXPR, boolean_type_node,
1121 				assumption, build_int_cst (niter_type, 0));
1122       if (!integer_nonzerop (assumption))
1123 	niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
1124 					  niter->assumptions, assumption);
1125     }
1126 
1127   c = fold_build2 (EXACT_DIV_EXPR, niter_type, c, d);
1128   tmp = fold_build2 (MULT_EXPR, niter_type, c, inverse (s, bound));
1129   niter->niter = fold_build2 (BIT_AND_EXPR, niter_type, tmp, bound);
1130   return true;
1131 }
1132 
1133 /* Checks whether we can determine the final value of the control variable
1134    of the loop with ending condition IV0 < IV1 (computed in TYPE).
1135    DELTA is the difference IV1->base - IV0->base, STEP is the absolute value
1136    of the step.  The assumptions necessary to ensure that the computation
1137    of the final value does not overflow are recorded in NITER.  If we
1138    find the final value, we adjust DELTA and return TRUE.  Otherwise
1139    we return false.  BNDS bounds the value of IV1->base - IV0->base,
1140    and will be updated by the same amount as DELTA.  EXIT_MUST_BE_TAKEN is
1141    true if we know that the exit must be taken eventually.  */
1142 
1143 static bool
number_of_iterations_lt_to_ne(tree type,affine_iv * iv0,affine_iv * iv1,struct tree_niter_desc * niter,tree * delta,tree step,bool exit_must_be_taken,bounds * bnds)1144 number_of_iterations_lt_to_ne (tree type, affine_iv *iv0, affine_iv *iv1,
1145 			       struct tree_niter_desc *niter,
1146 			       tree *delta, tree step,
1147 			       bool exit_must_be_taken, bounds *bnds)
1148 {
1149   tree niter_type = TREE_TYPE (step);
1150   tree mod = fold_build2 (FLOOR_MOD_EXPR, niter_type, *delta, step);
1151   tree tmod;
1152   mpz_t mmod;
1153   tree assumption = boolean_true_node, bound, noloop;
1154   bool ret = false, fv_comp_no_overflow;
1155   tree type1 = type;
1156   if (POINTER_TYPE_P (type))
1157     type1 = sizetype;
1158 
1159   if (TREE_CODE (mod) != INTEGER_CST)
1160     return false;
1161   if (integer_nonzerop (mod))
1162     mod = fold_build2 (MINUS_EXPR, niter_type, step, mod);
1163   tmod = fold_convert (type1, mod);
1164 
1165   mpz_init (mmod);
1166   wi::to_mpz (wi::to_wide (mod), mmod, UNSIGNED);
1167   mpz_neg (mmod, mmod);
1168 
1169   /* If the induction variable does not overflow and the exit is taken,
1170      then the computation of the final value does not overflow.  This is
1171      also obviously the case if the new final value is equal to the
1172      current one.  Finally, we postulate this for pointer type variables,
1173      as the code cannot rely on the object to that the pointer points being
1174      placed at the end of the address space (and more pragmatically,
1175      TYPE_{MIN,MAX}_VALUE is not defined for pointers).  */
1176   if (integer_zerop (mod) || POINTER_TYPE_P (type))
1177     fv_comp_no_overflow = true;
1178   else if (!exit_must_be_taken)
1179     fv_comp_no_overflow = false;
1180   else
1181     fv_comp_no_overflow =
1182 	    (iv0->no_overflow && integer_nonzerop (iv0->step))
1183 	    || (iv1->no_overflow && integer_nonzerop (iv1->step));
1184 
1185   if (integer_nonzerop (iv0->step))
1186     {
1187       /* The final value of the iv is iv1->base + MOD, assuming that this
1188 	 computation does not overflow, and that
1189 	 iv0->base <= iv1->base + MOD.  */
1190       if (!fv_comp_no_overflow)
1191 	{
1192 	  bound = fold_build2 (MINUS_EXPR, type1,
1193 			       TYPE_MAX_VALUE (type1), tmod);
1194 	  assumption = fold_build2 (LE_EXPR, boolean_type_node,
1195 				    iv1->base, bound);
1196 	  if (integer_zerop (assumption))
1197 	    goto end;
1198 	}
1199       if (mpz_cmp (mmod, bnds->below) < 0)
1200 	noloop = boolean_false_node;
1201       else if (POINTER_TYPE_P (type))
1202 	noloop = fold_build2 (GT_EXPR, boolean_type_node,
1203 			      iv0->base,
1204 			      fold_build_pointer_plus (iv1->base, tmod));
1205       else
1206 	noloop = fold_build2 (GT_EXPR, boolean_type_node,
1207 			      iv0->base,
1208 			      fold_build2 (PLUS_EXPR, type1,
1209 					   iv1->base, tmod));
1210     }
1211   else
1212     {
1213       /* The final value of the iv is iv0->base - MOD, assuming that this
1214 	 computation does not overflow, and that
1215 	 iv0->base - MOD <= iv1->base. */
1216       if (!fv_comp_no_overflow)
1217 	{
1218 	  bound = fold_build2 (PLUS_EXPR, type1,
1219 			       TYPE_MIN_VALUE (type1), tmod);
1220 	  assumption = fold_build2 (GE_EXPR, boolean_type_node,
1221 				    iv0->base, bound);
1222 	  if (integer_zerop (assumption))
1223 	    goto end;
1224 	}
1225       if (mpz_cmp (mmod, bnds->below) < 0)
1226 	noloop = boolean_false_node;
1227       else if (POINTER_TYPE_P (type))
1228 	noloop = fold_build2 (GT_EXPR, boolean_type_node,
1229 			      fold_build_pointer_plus (iv0->base,
1230 						       fold_build1 (NEGATE_EXPR,
1231 								    type1, tmod)),
1232 			      iv1->base);
1233       else
1234 	noloop = fold_build2 (GT_EXPR, boolean_type_node,
1235 			      fold_build2 (MINUS_EXPR, type1,
1236 					   iv0->base, tmod),
1237 			      iv1->base);
1238     }
1239 
1240   if (!integer_nonzerop (assumption))
1241     niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
1242 				      niter->assumptions,
1243 				      assumption);
1244   if (!integer_zerop (noloop))
1245     niter->may_be_zero = fold_build2 (TRUTH_OR_EXPR, boolean_type_node,
1246 				      niter->may_be_zero,
1247 				      noloop);
1248   bounds_add (bnds, wi::to_widest (mod), type);
1249   *delta = fold_build2 (PLUS_EXPR, niter_type, *delta, mod);
1250 
1251   ret = true;
1252 end:
1253   mpz_clear (mmod);
1254   return ret;
1255 }
1256 
1257 /* Add assertions to NITER that ensure that the control variable of the loop
1258    with ending condition IV0 < IV1 does not overflow.  Types of IV0 and IV1
1259    are TYPE.  Returns false if we can prove that there is an overflow, true
1260    otherwise.  STEP is the absolute value of the step.  */
1261 
1262 static bool
assert_no_overflow_lt(tree type,affine_iv * iv0,affine_iv * iv1,struct tree_niter_desc * niter,tree step)1263 assert_no_overflow_lt (tree type, affine_iv *iv0, affine_iv *iv1,
1264 		       struct tree_niter_desc *niter, tree step)
1265 {
1266   tree bound, d, assumption, diff;
1267   tree niter_type = TREE_TYPE (step);
1268 
1269   if (integer_nonzerop (iv0->step))
1270     {
1271       /* for (i = iv0->base; i < iv1->base; i += iv0->step) */
1272       if (iv0->no_overflow)
1273 	return true;
1274 
1275       /* If iv0->base is a constant, we can determine the last value before
1276 	 overflow precisely; otherwise we conservatively assume
1277 	 MAX - STEP + 1.  */
1278 
1279       if (TREE_CODE (iv0->base) == INTEGER_CST)
1280 	{
1281 	  d = fold_build2 (MINUS_EXPR, niter_type,
1282 			   fold_convert (niter_type, TYPE_MAX_VALUE (type)),
1283 			   fold_convert (niter_type, iv0->base));
1284 	  diff = fold_build2 (FLOOR_MOD_EXPR, niter_type, d, step);
1285 	}
1286       else
1287 	diff = fold_build2 (MINUS_EXPR, niter_type, step,
1288 			    build_int_cst (niter_type, 1));
1289       bound = fold_build2 (MINUS_EXPR, type,
1290 			   TYPE_MAX_VALUE (type), fold_convert (type, diff));
1291       assumption = fold_build2 (LE_EXPR, boolean_type_node,
1292 				iv1->base, bound);
1293     }
1294   else
1295     {
1296       /* for (i = iv1->base; i > iv0->base; i += iv1->step) */
1297       if (iv1->no_overflow)
1298 	return true;
1299 
1300       if (TREE_CODE (iv1->base) == INTEGER_CST)
1301 	{
1302 	  d = fold_build2 (MINUS_EXPR, niter_type,
1303 			   fold_convert (niter_type, iv1->base),
1304 			   fold_convert (niter_type, TYPE_MIN_VALUE (type)));
1305 	  diff = fold_build2 (FLOOR_MOD_EXPR, niter_type, d, step);
1306 	}
1307       else
1308 	diff = fold_build2 (MINUS_EXPR, niter_type, step,
1309 			    build_int_cst (niter_type, 1));
1310       bound = fold_build2 (PLUS_EXPR, type,
1311 			   TYPE_MIN_VALUE (type), fold_convert (type, diff));
1312       assumption = fold_build2 (GE_EXPR, boolean_type_node,
1313 				iv0->base, bound);
1314     }
1315 
1316   if (integer_zerop (assumption))
1317     return false;
1318   if (!integer_nonzerop (assumption))
1319     niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
1320 				      niter->assumptions, assumption);
1321 
1322   iv0->no_overflow = true;
1323   iv1->no_overflow = true;
1324   return true;
1325 }
1326 
1327 /* Add an assumption to NITER that a loop whose ending condition
1328    is IV0 < IV1 rolls.  TYPE is the type of the control iv.  BNDS
1329    bounds the value of IV1->base - IV0->base.  */
1330 
1331 static void
assert_loop_rolls_lt(tree type,affine_iv * iv0,affine_iv * iv1,struct tree_niter_desc * niter,bounds * bnds)1332 assert_loop_rolls_lt (tree type, affine_iv *iv0, affine_iv *iv1,
1333 		      struct tree_niter_desc *niter, bounds *bnds)
1334 {
1335   tree assumption = boolean_true_node, bound, diff;
1336   tree mbz, mbzl, mbzr, type1;
1337   bool rolls_p, no_overflow_p;
1338   widest_int dstep;
1339   mpz_t mstep, max;
1340 
1341   /* We are going to compute the number of iterations as
1342      (iv1->base - iv0->base + step - 1) / step, computed in the unsigned
1343      variant of TYPE.  This formula only works if
1344 
1345      -step + 1 <= (iv1->base - iv0->base) <= MAX - step + 1
1346 
1347      (where MAX is the maximum value of the unsigned variant of TYPE, and
1348      the computations in this formula are performed in full precision,
1349      i.e., without overflows).
1350 
1351      Usually, for loops with exit condition iv0->base + step * i < iv1->base,
1352      we have a condition of the form iv0->base - step < iv1->base before the loop,
1353      and for loops iv0->base < iv1->base - step * i the condition
1354      iv0->base < iv1->base + step, due to loop header copying, which enable us
1355      to prove the lower bound.
1356 
1357      The upper bound is more complicated.  Unless the expressions for initial
1358      and final value themselves contain enough information, we usually cannot
1359      derive it from the context.  */
1360 
1361   /* First check whether the answer does not follow from the bounds we gathered
1362      before.  */
1363   if (integer_nonzerop (iv0->step))
1364     dstep = wi::to_widest (iv0->step);
1365   else
1366     {
1367       dstep = wi::sext (wi::to_widest (iv1->step), TYPE_PRECISION (type));
1368       dstep = -dstep;
1369     }
1370 
1371   mpz_init (mstep);
1372   wi::to_mpz (dstep, mstep, UNSIGNED);
1373   mpz_neg (mstep, mstep);
1374   mpz_add_ui (mstep, mstep, 1);
1375 
1376   rolls_p = mpz_cmp (mstep, bnds->below) <= 0;
1377 
1378   mpz_init (max);
1379   wi::to_mpz (wi::minus_one (TYPE_PRECISION (type)), max, UNSIGNED);
1380   mpz_add (max, max, mstep);
1381   no_overflow_p = (mpz_cmp (bnds->up, max) <= 0
1382 		   /* For pointers, only values lying inside a single object
1383 		      can be compared or manipulated by pointer arithmetics.
1384 		      Gcc in general does not allow or handle objects larger
1385 		      than half of the address space, hence the upper bound
1386 		      is satisfied for pointers.  */
1387 		   || POINTER_TYPE_P (type));
1388   mpz_clear (mstep);
1389   mpz_clear (max);
1390 
1391   if (rolls_p && no_overflow_p)
1392     return;
1393 
1394   type1 = type;
1395   if (POINTER_TYPE_P (type))
1396     type1 = sizetype;
1397 
1398   /* Now the hard part; we must formulate the assumption(s) as expressions, and
1399      we must be careful not to introduce overflow.  */
1400 
1401   if (integer_nonzerop (iv0->step))
1402     {
1403       diff = fold_build2 (MINUS_EXPR, type1,
1404 			  iv0->step, build_int_cst (type1, 1));
1405 
1406       /* We need to know that iv0->base >= MIN + iv0->step - 1.  Since
1407 	 0 address never belongs to any object, we can assume this for
1408 	 pointers.  */
1409       if (!POINTER_TYPE_P (type))
1410 	{
1411 	  bound = fold_build2 (PLUS_EXPR, type1,
1412 			       TYPE_MIN_VALUE (type), diff);
1413 	  assumption = fold_build2 (GE_EXPR, boolean_type_node,
1414 				    iv0->base, bound);
1415 	}
1416 
1417       /* And then we can compute iv0->base - diff, and compare it with
1418 	 iv1->base.  */
1419       mbzl = fold_build2 (MINUS_EXPR, type1,
1420 			  fold_convert (type1, iv0->base), diff);
1421       mbzr = fold_convert (type1, iv1->base);
1422     }
1423   else
1424     {
1425       diff = fold_build2 (PLUS_EXPR, type1,
1426 			  iv1->step, build_int_cst (type1, 1));
1427 
1428       if (!POINTER_TYPE_P (type))
1429 	{
1430 	  bound = fold_build2 (PLUS_EXPR, type1,
1431 			       TYPE_MAX_VALUE (type), diff);
1432 	  assumption = fold_build2 (LE_EXPR, boolean_type_node,
1433 				    iv1->base, bound);
1434 	}
1435 
1436       mbzl = fold_convert (type1, iv0->base);
1437       mbzr = fold_build2 (MINUS_EXPR, type1,
1438 			  fold_convert (type1, iv1->base), diff);
1439     }
1440 
1441   if (!integer_nonzerop (assumption))
1442     niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
1443 				      niter->assumptions, assumption);
1444   if (!rolls_p)
1445     {
1446       mbz = fold_build2 (GT_EXPR, boolean_type_node, mbzl, mbzr);
1447       niter->may_be_zero = fold_build2 (TRUTH_OR_EXPR, boolean_type_node,
1448 					niter->may_be_zero, mbz);
1449     }
1450 }
1451 
1452 /* Determines number of iterations of loop whose ending condition
1453    is IV0 < IV1.  TYPE is the type of the iv.  The number of
1454    iterations is stored to NITER.  BNDS bounds the difference
1455    IV1->base - IV0->base.  EXIT_MUST_BE_TAKEN is true if we know
1456    that the exit must be taken eventually.  */
1457 
1458 static bool
number_of_iterations_lt(struct loop * loop,tree type,affine_iv * iv0,affine_iv * iv1,struct tree_niter_desc * niter,bool exit_must_be_taken,bounds * bnds)1459 number_of_iterations_lt (struct loop *loop, tree type, affine_iv *iv0,
1460 			 affine_iv *iv1, struct tree_niter_desc *niter,
1461 			 bool exit_must_be_taken, bounds *bnds)
1462 {
1463   tree niter_type = unsigned_type_for (type);
1464   tree delta, step, s;
1465   mpz_t mstep, tmp;
1466 
1467   if (integer_nonzerop (iv0->step))
1468     {
1469       niter->control = *iv0;
1470       niter->cmp = LT_EXPR;
1471       niter->bound = iv1->base;
1472     }
1473   else
1474     {
1475       niter->control = *iv1;
1476       niter->cmp = GT_EXPR;
1477       niter->bound = iv0->base;
1478     }
1479 
1480   delta = fold_build2 (MINUS_EXPR, niter_type,
1481 		       fold_convert (niter_type, iv1->base),
1482 		       fold_convert (niter_type, iv0->base));
1483 
1484   /* First handle the special case that the step is +-1.  */
1485   if ((integer_onep (iv0->step) && integer_zerop (iv1->step))
1486       || (integer_all_onesp (iv1->step) && integer_zerop (iv0->step)))
1487     {
1488       /* for (i = iv0->base; i < iv1->base; i++)
1489 
1490 	 or
1491 
1492 	 for (i = iv1->base; i > iv0->base; i--).
1493 
1494 	 In both cases # of iterations is iv1->base - iv0->base, assuming that
1495 	 iv1->base >= iv0->base.
1496 
1497          First try to derive a lower bound on the value of
1498 	 iv1->base - iv0->base, computed in full precision.  If the difference
1499 	 is nonnegative, we are done, otherwise we must record the
1500 	 condition.  */
1501 
1502       if (mpz_sgn (bnds->below) < 0)
1503 	niter->may_be_zero = fold_build2 (LT_EXPR, boolean_type_node,
1504 					  iv1->base, iv0->base);
1505       niter->niter = delta;
1506       niter->max = widest_int::from (wi::from_mpz (niter_type, bnds->up, false),
1507 				     TYPE_SIGN (niter_type));
1508       niter->control.no_overflow = true;
1509       return true;
1510     }
1511 
1512   if (integer_nonzerop (iv0->step))
1513     step = fold_convert (niter_type, iv0->step);
1514   else
1515     step = fold_convert (niter_type,
1516 			 fold_build1 (NEGATE_EXPR, type, iv1->step));
1517 
1518   /* If we can determine the final value of the control iv exactly, we can
1519      transform the condition to != comparison.  In particular, this will be
1520      the case if DELTA is constant.  */
1521   if (number_of_iterations_lt_to_ne (type, iv0, iv1, niter, &delta, step,
1522 				     exit_must_be_taken, bnds))
1523     {
1524       affine_iv zps;
1525 
1526       zps.base = build_int_cst (niter_type, 0);
1527       zps.step = step;
1528       /* number_of_iterations_lt_to_ne will add assumptions that ensure that
1529 	 zps does not overflow.  */
1530       zps.no_overflow = true;
1531 
1532       return number_of_iterations_ne (loop, type, &zps,
1533 				      delta, niter, true, bnds);
1534     }
1535 
1536   /* Make sure that the control iv does not overflow.  */
1537   if (!assert_no_overflow_lt (type, iv0, iv1, niter, step))
1538     return false;
1539 
1540   /* We determine the number of iterations as (delta + step - 1) / step.  For
1541      this to work, we must know that iv1->base >= iv0->base - step + 1,
1542      otherwise the loop does not roll.  */
1543   assert_loop_rolls_lt (type, iv0, iv1, niter, bnds);
1544 
1545   s = fold_build2 (MINUS_EXPR, niter_type,
1546 		   step, build_int_cst (niter_type, 1));
1547   delta = fold_build2 (PLUS_EXPR, niter_type, delta, s);
1548   niter->niter = fold_build2 (FLOOR_DIV_EXPR, niter_type, delta, step);
1549 
1550   mpz_init (mstep);
1551   mpz_init (tmp);
1552   wi::to_mpz (wi::to_wide (step), mstep, UNSIGNED);
1553   mpz_add (tmp, bnds->up, mstep);
1554   mpz_sub_ui (tmp, tmp, 1);
1555   mpz_fdiv_q (tmp, tmp, mstep);
1556   niter->max = widest_int::from (wi::from_mpz (niter_type, tmp, false),
1557 				 TYPE_SIGN (niter_type));
1558   mpz_clear (mstep);
1559   mpz_clear (tmp);
1560 
1561   return true;
1562 }
1563 
1564 /* Determines number of iterations of loop whose ending condition
1565    is IV0 <= IV1.  TYPE is the type of the iv.  The number of
1566    iterations is stored to NITER.  EXIT_MUST_BE_TAKEN is true if
1567    we know that this condition must eventually become false (we derived this
1568    earlier, and possibly set NITER->assumptions to make sure this
1569    is the case).  BNDS bounds the difference IV1->base - IV0->base.  */
1570 
1571 static bool
number_of_iterations_le(struct loop * loop,tree type,affine_iv * iv0,affine_iv * iv1,struct tree_niter_desc * niter,bool exit_must_be_taken,bounds * bnds)1572 number_of_iterations_le (struct loop *loop, tree type, affine_iv *iv0,
1573 			 affine_iv *iv1, struct tree_niter_desc *niter,
1574 			 bool exit_must_be_taken, bounds *bnds)
1575 {
1576   tree assumption;
1577   tree type1 = type;
1578   if (POINTER_TYPE_P (type))
1579     type1 = sizetype;
1580 
1581   /* Say that IV0 is the control variable.  Then IV0 <= IV1 iff
1582      IV0 < IV1 + 1, assuming that IV1 is not equal to the greatest
1583      value of the type.  This we must know anyway, since if it is
1584      equal to this value, the loop rolls forever.  We do not check
1585      this condition for pointer type ivs, as the code cannot rely on
1586      the object to that the pointer points being placed at the end of
1587      the address space (and more pragmatically, TYPE_{MIN,MAX}_VALUE is
1588      not defined for pointers).  */
1589 
1590   if (!exit_must_be_taken && !POINTER_TYPE_P (type))
1591     {
1592       if (integer_nonzerop (iv0->step))
1593 	assumption = fold_build2 (NE_EXPR, boolean_type_node,
1594 				  iv1->base, TYPE_MAX_VALUE (type));
1595       else
1596 	assumption = fold_build2 (NE_EXPR, boolean_type_node,
1597 				  iv0->base, TYPE_MIN_VALUE (type));
1598 
1599       if (integer_zerop (assumption))
1600 	return false;
1601       if (!integer_nonzerop (assumption))
1602 	niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
1603 					  niter->assumptions, assumption);
1604     }
1605 
1606   if (integer_nonzerop (iv0->step))
1607     {
1608       if (POINTER_TYPE_P (type))
1609 	iv1->base = fold_build_pointer_plus_hwi (iv1->base, 1);
1610       else
1611 	iv1->base = fold_build2 (PLUS_EXPR, type1, iv1->base,
1612 				 build_int_cst (type1, 1));
1613     }
1614   else if (POINTER_TYPE_P (type))
1615     iv0->base = fold_build_pointer_plus_hwi (iv0->base, -1);
1616   else
1617     iv0->base = fold_build2 (MINUS_EXPR, type1,
1618 			     iv0->base, build_int_cst (type1, 1));
1619 
1620   bounds_add (bnds, 1, type1);
1621 
1622   return number_of_iterations_lt (loop, type, iv0, iv1, niter, exit_must_be_taken,
1623 				  bnds);
1624 }
1625 
1626 /* Dumps description of affine induction variable IV to FILE.  */
1627 
1628 static void
dump_affine_iv(FILE * file,affine_iv * iv)1629 dump_affine_iv (FILE *file, affine_iv *iv)
1630 {
1631   if (!integer_zerop (iv->step))
1632     fprintf (file, "[");
1633 
1634   print_generic_expr (dump_file, iv->base, TDF_SLIM);
1635 
1636   if (!integer_zerop (iv->step))
1637     {
1638       fprintf (file, ", + , ");
1639       print_generic_expr (dump_file, iv->step, TDF_SLIM);
1640       fprintf (file, "]%s", iv->no_overflow ? "(no_overflow)" : "");
1641     }
1642 }
1643 
1644 /* Given exit condition IV0 CODE IV1 in TYPE, this function adjusts
1645    the condition for loop-until-wrap cases.  For example:
1646      (unsigned){8, -1}_loop < 10        => {0, 1} != 9
1647      10 < (unsigned){0, max - 7}_loop   => {0, 1} != 8
1648    Return true if condition is successfully adjusted.  */
1649 
1650 static bool
adjust_cond_for_loop_until_wrap(tree type,affine_iv * iv0,tree_code * code,affine_iv * iv1)1651 adjust_cond_for_loop_until_wrap (tree type, affine_iv *iv0, tree_code *code,
1652 				 affine_iv *iv1)
1653 {
1654   /* Only support simple cases for the moment.  */
1655   if (TREE_CODE (iv0->base) != INTEGER_CST
1656       || TREE_CODE (iv1->base) != INTEGER_CST)
1657     return false;
1658 
1659   tree niter_type = unsigned_type_for (type), high, low;
1660   /* Case: i-- < 10.  */
1661   if (integer_zerop (iv1->step))
1662     {
1663       /* TODO: Should handle case in which abs(step) != 1.  */
1664       if (!integer_minus_onep (iv0->step))
1665 	return false;
1666       /* Give up on infinite loop.  */
1667       if (*code == LE_EXPR
1668 	  && tree_int_cst_equal (iv1->base, TYPE_MAX_VALUE (type)))
1669 	return false;
1670       high = fold_build2 (PLUS_EXPR, niter_type,
1671 			  fold_convert (niter_type, iv0->base),
1672 			  build_int_cst (niter_type, 1));
1673       low = fold_convert (niter_type, TYPE_MIN_VALUE (type));
1674     }
1675   else if (integer_zerop (iv0->step))
1676     {
1677       /* TODO: Should handle case in which abs(step) != 1.  */
1678       if (!integer_onep (iv1->step))
1679 	return false;
1680       /* Give up on infinite loop.  */
1681       if (*code == LE_EXPR
1682 	  && tree_int_cst_equal (iv0->base, TYPE_MIN_VALUE (type)))
1683 	return false;
1684       high = fold_convert (niter_type, TYPE_MAX_VALUE (type));
1685       low = fold_build2 (MINUS_EXPR, niter_type,
1686 			 fold_convert (niter_type, iv1->base),
1687 			 build_int_cst (niter_type, 1));
1688     }
1689   else
1690     gcc_unreachable ();
1691 
1692   iv0->base = low;
1693   iv0->step = fold_convert (niter_type, integer_one_node);
1694   iv1->base = high;
1695   iv1->step = build_int_cst (niter_type, 0);
1696   *code = NE_EXPR;
1697   return true;
1698 }
1699 
1700 /* Determine the number of iterations according to condition (for staying
1701    inside loop) which compares two induction variables using comparison
1702    operator CODE.  The induction variable on left side of the comparison
1703    is IV0, the right-hand side is IV1.  Both induction variables must have
1704    type TYPE, which must be an integer or pointer type.  The steps of the
1705    ivs must be constants (or NULL_TREE, which is interpreted as constant zero).
1706 
1707    LOOP is the loop whose number of iterations we are determining.
1708 
1709    ONLY_EXIT is true if we are sure this is the only way the loop could be
1710    exited (including possibly non-returning function calls, exceptions, etc.)
1711    -- in this case we can use the information whether the control induction
1712    variables can overflow or not in a more efficient way.
1713 
1714    if EVERY_ITERATION is true, we know the test is executed on every iteration.
1715 
1716    The results (number of iterations and assumptions as described in
1717    comments at struct tree_niter_desc in tree-ssa-loop.h) are stored to NITER.
1718    Returns false if it fails to determine number of iterations, true if it
1719    was determined (possibly with some assumptions).  */
1720 
1721 static bool
number_of_iterations_cond(struct loop * loop,tree type,affine_iv * iv0,enum tree_code code,affine_iv * iv1,struct tree_niter_desc * niter,bool only_exit,bool every_iteration)1722 number_of_iterations_cond (struct loop *loop,
1723 			   tree type, affine_iv *iv0, enum tree_code code,
1724 			   affine_iv *iv1, struct tree_niter_desc *niter,
1725 			   bool only_exit, bool every_iteration)
1726 {
1727   bool exit_must_be_taken = false, ret;
1728   bounds bnds;
1729 
1730   /* If the test is not executed every iteration, wrapping may make the test
1731      to pass again.
1732      TODO: the overflow case can be still used as unreliable estimate of upper
1733      bound.  But we have no API to pass it down to number of iterations code
1734      and, at present, it will not use it anyway.  */
1735   if (!every_iteration
1736       && (!iv0->no_overflow || !iv1->no_overflow
1737 	  || code == NE_EXPR || code == EQ_EXPR))
1738     return false;
1739 
1740   /* The meaning of these assumptions is this:
1741      if !assumptions
1742        then the rest of information does not have to be valid
1743      if may_be_zero then the loop does not roll, even if
1744        niter != 0.  */
1745   niter->assumptions = boolean_true_node;
1746   niter->may_be_zero = boolean_false_node;
1747   niter->niter = NULL_TREE;
1748   niter->max = 0;
1749   niter->bound = NULL_TREE;
1750   niter->cmp = ERROR_MARK;
1751 
1752   /* Make < comparison from > ones, and for NE_EXPR comparisons, ensure that
1753      the control variable is on lhs.  */
1754   if (code == GE_EXPR || code == GT_EXPR
1755       || (code == NE_EXPR && integer_zerop (iv0->step)))
1756     {
1757       std::swap (iv0, iv1);
1758       code = swap_tree_comparison (code);
1759     }
1760 
1761   if (POINTER_TYPE_P (type))
1762     {
1763       /* Comparison of pointers is undefined unless both iv0 and iv1 point
1764 	 to the same object.  If they do, the control variable cannot wrap
1765 	 (as wrap around the bounds of memory will never return a pointer
1766 	 that would be guaranteed to point to the same object, even if we
1767 	 avoid undefined behavior by casting to size_t and back).  */
1768       iv0->no_overflow = true;
1769       iv1->no_overflow = true;
1770     }
1771 
1772   /* If the control induction variable does not overflow and the only exit
1773      from the loop is the one that we analyze, we know it must be taken
1774      eventually.  */
1775   if (only_exit)
1776     {
1777       if (!integer_zerop (iv0->step) && iv0->no_overflow)
1778 	exit_must_be_taken = true;
1779       else if (!integer_zerop (iv1->step) && iv1->no_overflow)
1780 	exit_must_be_taken = true;
1781     }
1782 
1783   /* We can handle cases which neither of the sides of the comparison is
1784      invariant:
1785 
1786        {iv0.base, iv0.step} cmp_code {iv1.base, iv1.step}
1787      as if:
1788        {iv0.base, iv0.step - iv1.step} cmp_code {iv1.base, 0}
1789 
1790      provided that either below condition is satisfied:
1791 
1792        a) the test is NE_EXPR;
1793        b) iv0.step - iv1.step is integer and iv0/iv1 don't overflow.
1794 
1795      This rarely occurs in practice, but it is simple enough to manage.  */
1796   if (!integer_zerop (iv0->step) && !integer_zerop (iv1->step))
1797     {
1798       tree step_type = POINTER_TYPE_P (type) ? sizetype : type;
1799       tree step = fold_binary_to_constant (MINUS_EXPR, step_type,
1800 					   iv0->step, iv1->step);
1801 
1802       /* No need to check sign of the new step since below code takes care
1803 	 of this well.  */
1804       if (code != NE_EXPR
1805 	  && (TREE_CODE (step) != INTEGER_CST
1806 	      || !iv0->no_overflow || !iv1->no_overflow))
1807 	return false;
1808 
1809       iv0->step = step;
1810       if (!POINTER_TYPE_P (type))
1811 	iv0->no_overflow = false;
1812 
1813       iv1->step = build_int_cst (step_type, 0);
1814       iv1->no_overflow = true;
1815     }
1816 
1817   /* If the result of the comparison is a constant,  the loop is weird.  More
1818      precise handling would be possible, but the situation is not common enough
1819      to waste time on it.  */
1820   if (integer_zerop (iv0->step) && integer_zerop (iv1->step))
1821     return false;
1822 
1823   /* If the loop exits immediately, there is nothing to do.  */
1824   tree tem = fold_binary (code, boolean_type_node, iv0->base, iv1->base);
1825   if (tem && integer_zerop (tem))
1826     {
1827       if (!every_iteration)
1828 	return false;
1829       niter->niter = build_int_cst (unsigned_type_for (type), 0);
1830       niter->max = 0;
1831       return true;
1832     }
1833 
1834   /* Handle special case loops: while (i-- < 10) and while (10 < i++) by
1835      adjusting iv0, iv1 and code.  */
1836   if (code != NE_EXPR
1837       && (tree_int_cst_sign_bit (iv0->step)
1838 	  || (!integer_zerop (iv1->step)
1839 	      && !tree_int_cst_sign_bit (iv1->step)))
1840       && !adjust_cond_for_loop_until_wrap (type, iv0, &code, iv1))
1841     return false;
1842 
1843   /* OK, now we know we have a senseful loop.  Handle several cases, depending
1844      on what comparison operator is used.  */
1845   bound_difference (loop, iv1->base, iv0->base, &bnds);
1846 
1847   if (dump_file && (dump_flags & TDF_DETAILS))
1848     {
1849       fprintf (dump_file,
1850 	       "Analyzing # of iterations of loop %d\n", loop->num);
1851 
1852       fprintf (dump_file, "  exit condition ");
1853       dump_affine_iv (dump_file, iv0);
1854       fprintf (dump_file, " %s ",
1855 	       code == NE_EXPR ? "!="
1856 	       : code == LT_EXPR ? "<"
1857 	       : "<=");
1858       dump_affine_iv (dump_file, iv1);
1859       fprintf (dump_file, "\n");
1860 
1861       fprintf (dump_file, "  bounds on difference of bases: ");
1862       mpz_out_str (dump_file, 10, bnds.below);
1863       fprintf (dump_file, " ... ");
1864       mpz_out_str (dump_file, 10, bnds.up);
1865       fprintf (dump_file, "\n");
1866     }
1867 
1868   switch (code)
1869     {
1870     case NE_EXPR:
1871       gcc_assert (integer_zerop (iv1->step));
1872       ret = number_of_iterations_ne (loop, type, iv0, iv1->base, niter,
1873 				     exit_must_be_taken, &bnds);
1874       break;
1875 
1876     case LT_EXPR:
1877       ret = number_of_iterations_lt (loop, type, iv0, iv1, niter,
1878 				     exit_must_be_taken, &bnds);
1879       break;
1880 
1881     case LE_EXPR:
1882       ret = number_of_iterations_le (loop, type, iv0, iv1, niter,
1883 				     exit_must_be_taken, &bnds);
1884       break;
1885 
1886     default:
1887       gcc_unreachable ();
1888     }
1889 
1890   mpz_clear (bnds.up);
1891   mpz_clear (bnds.below);
1892 
1893   if (dump_file && (dump_flags & TDF_DETAILS))
1894     {
1895       if (ret)
1896 	{
1897 	  fprintf (dump_file, "  result:\n");
1898 	  if (!integer_nonzerop (niter->assumptions))
1899 	    {
1900 	      fprintf (dump_file, "    under assumptions ");
1901 	      print_generic_expr (dump_file, niter->assumptions, TDF_SLIM);
1902 	      fprintf (dump_file, "\n");
1903 	    }
1904 
1905 	  if (!integer_zerop (niter->may_be_zero))
1906 	    {
1907 	      fprintf (dump_file, "    zero if ");
1908 	      print_generic_expr (dump_file, niter->may_be_zero, TDF_SLIM);
1909 	      fprintf (dump_file, "\n");
1910 	    }
1911 
1912 	  fprintf (dump_file, "    # of iterations ");
1913 	  print_generic_expr (dump_file, niter->niter, TDF_SLIM);
1914 	  fprintf (dump_file, ", bounded by ");
1915 	  print_decu (niter->max, dump_file);
1916 	  fprintf (dump_file, "\n");
1917 	}
1918       else
1919 	fprintf (dump_file, "  failed\n\n");
1920     }
1921   return ret;
1922 }
1923 
1924 /* Substitute NEW_TREE for OLD in EXPR and fold the result.
1925    If VALUEIZE is non-NULL then OLD and NEW_TREE are ignored and instead
1926    all SSA names are replaced with the result of calling the VALUEIZE
1927    function with the SSA name as argument.  */
1928 
1929 tree
simplify_replace_tree(tree expr,tree old,tree new_tree,tree (* valueize)(tree))1930 simplify_replace_tree (tree expr, tree old, tree new_tree,
1931 		       tree (*valueize) (tree))
1932 {
1933   unsigned i, n;
1934   tree ret = NULL_TREE, e, se;
1935 
1936   if (!expr)
1937     return NULL_TREE;
1938 
1939   /* Do not bother to replace constants.  */
1940   if (CONSTANT_CLASS_P (expr))
1941     return expr;
1942 
1943   if (valueize)
1944     {
1945       if (TREE_CODE (expr) == SSA_NAME)
1946 	{
1947 	  new_tree = valueize (expr);
1948 	  if (new_tree != expr)
1949 	    return new_tree;
1950 	}
1951     }
1952   else if (expr == old
1953 	   || operand_equal_p (expr, old, 0))
1954     return unshare_expr (new_tree);
1955 
1956   if (!EXPR_P (expr))
1957     return expr;
1958 
1959   n = TREE_OPERAND_LENGTH (expr);
1960   for (i = 0; i < n; i++)
1961     {
1962       e = TREE_OPERAND (expr, i);
1963       se = simplify_replace_tree (e, old, new_tree, valueize);
1964       if (e == se)
1965 	continue;
1966 
1967       if (!ret)
1968 	ret = copy_node (expr);
1969 
1970       TREE_OPERAND (ret, i) = se;
1971     }
1972 
1973   return (ret ? fold (ret) : expr);
1974 }
1975 
1976 /* Expand definitions of ssa names in EXPR as long as they are simple
1977    enough, and return the new expression.  If STOP is specified, stop
1978    expanding if EXPR equals to it.  */
1979 
1980 tree
expand_simple_operations(tree expr,tree stop)1981 expand_simple_operations (tree expr, tree stop)
1982 {
1983   unsigned i, n;
1984   tree ret = NULL_TREE, e, ee, e1;
1985   enum tree_code code;
1986   gimple *stmt;
1987 
1988   if (expr == NULL_TREE)
1989     return expr;
1990 
1991   if (is_gimple_min_invariant (expr))
1992     return expr;
1993 
1994   code = TREE_CODE (expr);
1995   if (IS_EXPR_CODE_CLASS (TREE_CODE_CLASS (code)))
1996     {
1997       n = TREE_OPERAND_LENGTH (expr);
1998       for (i = 0; i < n; i++)
1999 	{
2000 	  e = TREE_OPERAND (expr, i);
2001 	  ee = expand_simple_operations (e, stop);
2002 	  if (e == ee)
2003 	    continue;
2004 
2005 	  if (!ret)
2006 	    ret = copy_node (expr);
2007 
2008 	  TREE_OPERAND (ret, i) = ee;
2009 	}
2010 
2011       if (!ret)
2012 	return expr;
2013 
2014       fold_defer_overflow_warnings ();
2015       ret = fold (ret);
2016       fold_undefer_and_ignore_overflow_warnings ();
2017       return ret;
2018     }
2019 
2020   /* Stop if it's not ssa name or the one we don't want to expand.  */
2021   if (TREE_CODE (expr) != SSA_NAME || expr == stop)
2022     return expr;
2023 
2024   stmt = SSA_NAME_DEF_STMT (expr);
2025   if (gimple_code (stmt) == GIMPLE_PHI)
2026     {
2027       basic_block src, dest;
2028 
2029       if (gimple_phi_num_args (stmt) != 1)
2030 	return expr;
2031       e = PHI_ARG_DEF (stmt, 0);
2032 
2033       /* Avoid propagating through loop exit phi nodes, which
2034 	 could break loop-closed SSA form restrictions.  */
2035       dest = gimple_bb (stmt);
2036       src = single_pred (dest);
2037       if (TREE_CODE (e) == SSA_NAME
2038 	  && src->loop_father != dest->loop_father)
2039 	return expr;
2040 
2041       return expand_simple_operations (e, stop);
2042     }
2043   if (gimple_code (stmt) != GIMPLE_ASSIGN)
2044     return expr;
2045 
2046   /* Avoid expanding to expressions that contain SSA names that need
2047      to take part in abnormal coalescing.  */
2048   ssa_op_iter iter;
2049   FOR_EACH_SSA_TREE_OPERAND (e, stmt, iter, SSA_OP_USE)
2050     if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (e))
2051       return expr;
2052 
2053   e = gimple_assign_rhs1 (stmt);
2054   code = gimple_assign_rhs_code (stmt);
2055   if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS)
2056     {
2057       if (is_gimple_min_invariant (e))
2058 	return e;
2059 
2060       if (code == SSA_NAME)
2061 	return expand_simple_operations (e, stop);
2062       else if (code == ADDR_EXPR)
2063 	{
2064 	  poly_int64 offset;
2065 	  tree base = get_addr_base_and_unit_offset (TREE_OPERAND (e, 0),
2066 						     &offset);
2067 	  if (base
2068 	      && TREE_CODE (base) == MEM_REF)
2069 	    {
2070 	      ee = expand_simple_operations (TREE_OPERAND (base, 0), stop);
2071 	      return fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (expr), ee,
2072 				  wide_int_to_tree (sizetype,
2073 						    mem_ref_offset (base)
2074 						    + offset));
2075 	    }
2076 	}
2077 
2078       return expr;
2079     }
2080 
2081   switch (code)
2082     {
2083     CASE_CONVERT:
2084       /* Casts are simple.  */
2085       ee = expand_simple_operations (e, stop);
2086       return fold_build1 (code, TREE_TYPE (expr), ee);
2087 
2088     case PLUS_EXPR:
2089     case MINUS_EXPR:
2090       if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (expr))
2091 	  && TYPE_OVERFLOW_TRAPS (TREE_TYPE (expr)))
2092 	return expr;
2093       /* Fallthru.  */
2094     case POINTER_PLUS_EXPR:
2095       /* And increments and decrements by a constant are simple.  */
2096       e1 = gimple_assign_rhs2 (stmt);
2097       if (!is_gimple_min_invariant (e1))
2098 	return expr;
2099 
2100       ee = expand_simple_operations (e, stop);
2101       return fold_build2 (code, TREE_TYPE (expr), ee, e1);
2102 
2103     default:
2104       return expr;
2105     }
2106 }
2107 
2108 /* Tries to simplify EXPR using the condition COND.  Returns the simplified
2109    expression (or EXPR unchanged, if no simplification was possible).  */
2110 
2111 static tree
tree_simplify_using_condition_1(tree cond,tree expr)2112 tree_simplify_using_condition_1 (tree cond, tree expr)
2113 {
2114   bool changed;
2115   tree e, e0, e1, e2, notcond;
2116   enum tree_code code = TREE_CODE (expr);
2117 
2118   if (code == INTEGER_CST)
2119     return expr;
2120 
2121   if (code == TRUTH_OR_EXPR
2122       || code == TRUTH_AND_EXPR
2123       || code == COND_EXPR)
2124     {
2125       changed = false;
2126 
2127       e0 = tree_simplify_using_condition_1 (cond, TREE_OPERAND (expr, 0));
2128       if (TREE_OPERAND (expr, 0) != e0)
2129 	changed = true;
2130 
2131       e1 = tree_simplify_using_condition_1 (cond, TREE_OPERAND (expr, 1));
2132       if (TREE_OPERAND (expr, 1) != e1)
2133 	changed = true;
2134 
2135       if (code == COND_EXPR)
2136 	{
2137 	  e2 = tree_simplify_using_condition_1 (cond, TREE_OPERAND (expr, 2));
2138 	  if (TREE_OPERAND (expr, 2) != e2)
2139 	    changed = true;
2140 	}
2141       else
2142 	e2 = NULL_TREE;
2143 
2144       if (changed)
2145 	{
2146 	  if (code == COND_EXPR)
2147 	    expr = fold_build3 (code, boolean_type_node, e0, e1, e2);
2148 	  else
2149 	    expr = fold_build2 (code, boolean_type_node, e0, e1);
2150 	}
2151 
2152       return expr;
2153     }
2154 
2155   /* In case COND is equality, we may be able to simplify EXPR by copy/constant
2156      propagation, and vice versa.  Fold does not handle this, since it is
2157      considered too expensive.  */
2158   if (TREE_CODE (cond) == EQ_EXPR)
2159     {
2160       e0 = TREE_OPERAND (cond, 0);
2161       e1 = TREE_OPERAND (cond, 1);
2162 
2163       /* We know that e0 == e1.  Check whether we cannot simplify expr
2164 	 using this fact.  */
2165       e = simplify_replace_tree (expr, e0, e1);
2166       if (integer_zerop (e) || integer_nonzerop (e))
2167 	return e;
2168 
2169       e = simplify_replace_tree (expr, e1, e0);
2170       if (integer_zerop (e) || integer_nonzerop (e))
2171 	return e;
2172     }
2173   if (TREE_CODE (expr) == EQ_EXPR)
2174     {
2175       e0 = TREE_OPERAND (expr, 0);
2176       e1 = TREE_OPERAND (expr, 1);
2177 
2178       /* If e0 == e1 (EXPR) implies !COND, then EXPR cannot be true.  */
2179       e = simplify_replace_tree (cond, e0, e1);
2180       if (integer_zerop (e))
2181 	return e;
2182       e = simplify_replace_tree (cond, e1, e0);
2183       if (integer_zerop (e))
2184 	return e;
2185     }
2186   if (TREE_CODE (expr) == NE_EXPR)
2187     {
2188       e0 = TREE_OPERAND (expr, 0);
2189       e1 = TREE_OPERAND (expr, 1);
2190 
2191       /* If e0 == e1 (!EXPR) implies !COND, then EXPR must be true.  */
2192       e = simplify_replace_tree (cond, e0, e1);
2193       if (integer_zerop (e))
2194 	return boolean_true_node;
2195       e = simplify_replace_tree (cond, e1, e0);
2196       if (integer_zerop (e))
2197 	return boolean_true_node;
2198     }
2199 
2200   /* Check whether COND ==> EXPR.  */
2201   notcond = invert_truthvalue (cond);
2202   e = fold_binary (TRUTH_OR_EXPR, boolean_type_node, notcond, expr);
2203   if (e && integer_nonzerop (e))
2204     return e;
2205 
2206   /* Check whether COND ==> not EXPR.  */
2207   e = fold_binary (TRUTH_AND_EXPR, boolean_type_node, cond, expr);
2208   if (e && integer_zerop (e))
2209     return e;
2210 
2211   return expr;
2212 }
2213 
2214 /* Tries to simplify EXPR using the condition COND.  Returns the simplified
2215    expression (or EXPR unchanged, if no simplification was possible).
2216    Wrapper around tree_simplify_using_condition_1 that ensures that chains
2217    of simple operations in definitions of ssa names in COND are expanded,
2218    so that things like casts or incrementing the value of the bound before
2219    the loop do not cause us to fail.  */
2220 
2221 static tree
tree_simplify_using_condition(tree cond,tree expr)2222 tree_simplify_using_condition (tree cond, tree expr)
2223 {
2224   cond = expand_simple_operations (cond);
2225 
2226   return tree_simplify_using_condition_1 (cond, expr);
2227 }
2228 
2229 /* Tries to simplify EXPR using the conditions on entry to LOOP.
2230    Returns the simplified expression (or EXPR unchanged, if no
2231    simplification was possible).  */
2232 
2233 tree
simplify_using_initial_conditions(struct loop * loop,tree expr)2234 simplify_using_initial_conditions (struct loop *loop, tree expr)
2235 {
2236   edge e;
2237   basic_block bb;
2238   gimple *stmt;
2239   tree cond, expanded, backup;
2240   int cnt = 0;
2241 
2242   if (TREE_CODE (expr) == INTEGER_CST)
2243     return expr;
2244 
2245   backup = expanded = expand_simple_operations (expr);
2246 
2247   /* Limit walking the dominators to avoid quadraticness in
2248      the number of BBs times the number of loops in degenerate
2249      cases.  */
2250   for (bb = loop->header;
2251        bb != ENTRY_BLOCK_PTR_FOR_FN (cfun) && cnt < MAX_DOMINATORS_TO_WALK;
2252        bb = get_immediate_dominator (CDI_DOMINATORS, bb))
2253     {
2254       if (!single_pred_p (bb))
2255 	continue;
2256       e = single_pred_edge (bb);
2257 
2258       if (!(e->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)))
2259 	continue;
2260 
2261       stmt = last_stmt (e->src);
2262       cond = fold_build2 (gimple_cond_code (stmt),
2263 			  boolean_type_node,
2264 			  gimple_cond_lhs (stmt),
2265 			  gimple_cond_rhs (stmt));
2266       if (e->flags & EDGE_FALSE_VALUE)
2267 	cond = invert_truthvalue (cond);
2268       expanded = tree_simplify_using_condition (cond, expanded);
2269       /* Break if EXPR is simplified to const values.  */
2270       if (expanded
2271 	  && (integer_zerop (expanded) || integer_nonzerop (expanded)))
2272 	return expanded;
2273 
2274       ++cnt;
2275     }
2276 
2277   /* Return the original expression if no simplification is done.  */
2278   return operand_equal_p (backup, expanded, 0) ? expr : expanded;
2279 }
2280 
2281 /* Tries to simplify EXPR using the evolutions of the loop invariants
2282    in the superloops of LOOP.  Returns the simplified expression
2283    (or EXPR unchanged, if no simplification was possible).  */
2284 
2285 static tree
simplify_using_outer_evolutions(struct loop * loop,tree expr)2286 simplify_using_outer_evolutions (struct loop *loop, tree expr)
2287 {
2288   enum tree_code code = TREE_CODE (expr);
2289   bool changed;
2290   tree e, e0, e1, e2;
2291 
2292   if (is_gimple_min_invariant (expr))
2293     return expr;
2294 
2295   if (code == TRUTH_OR_EXPR
2296       || code == TRUTH_AND_EXPR
2297       || code == COND_EXPR)
2298     {
2299       changed = false;
2300 
2301       e0 = simplify_using_outer_evolutions (loop, TREE_OPERAND (expr, 0));
2302       if (TREE_OPERAND (expr, 0) != e0)
2303 	changed = true;
2304 
2305       e1 = simplify_using_outer_evolutions (loop, TREE_OPERAND (expr, 1));
2306       if (TREE_OPERAND (expr, 1) != e1)
2307 	changed = true;
2308 
2309       if (code == COND_EXPR)
2310 	{
2311 	  e2 = simplify_using_outer_evolutions (loop, TREE_OPERAND (expr, 2));
2312 	  if (TREE_OPERAND (expr, 2) != e2)
2313 	    changed = true;
2314 	}
2315       else
2316 	e2 = NULL_TREE;
2317 
2318       if (changed)
2319 	{
2320 	  if (code == COND_EXPR)
2321 	    expr = fold_build3 (code, boolean_type_node, e0, e1, e2);
2322 	  else
2323 	    expr = fold_build2 (code, boolean_type_node, e0, e1);
2324 	}
2325 
2326       return expr;
2327     }
2328 
2329   e = instantiate_parameters (loop, expr);
2330   if (is_gimple_min_invariant (e))
2331     return e;
2332 
2333   return expr;
2334 }
2335 
2336 /* Returns true if EXIT is the only possible exit from LOOP.  */
2337 
2338 bool
loop_only_exit_p(const struct loop * loop,const_edge exit)2339 loop_only_exit_p (const struct loop *loop, const_edge exit)
2340 {
2341   basic_block *body;
2342   gimple_stmt_iterator bsi;
2343   unsigned i;
2344 
2345   if (exit != single_exit (loop))
2346     return false;
2347 
2348   body = get_loop_body (loop);
2349   for (i = 0; i < loop->num_nodes; i++)
2350     {
2351       for (bsi = gsi_start_bb (body[i]); !gsi_end_p (bsi); gsi_next (&bsi))
2352 	if (stmt_can_terminate_bb_p (gsi_stmt (bsi)))
2353 	  {
2354 	    free (body);
2355 	    return true;
2356 	  }
2357     }
2358 
2359   free (body);
2360   return true;
2361 }
2362 
2363 /* Stores description of number of iterations of LOOP derived from
2364    EXIT (an exit edge of the LOOP) in NITER.  Returns true if some useful
2365    information could be derived (and fields of NITER have meaning described
2366    in comments at struct tree_niter_desc declaration), false otherwise.
2367    When EVERY_ITERATION is true, only tests that are known to be executed
2368    every iteration are considered (i.e. only test that alone bounds the loop).
2369    If AT_STMT is not NULL, this function stores LOOP's condition statement in
2370    it when returning true.  */
2371 
2372 bool
number_of_iterations_exit_assumptions(struct loop * loop,edge exit,struct tree_niter_desc * niter,gcond ** at_stmt,bool every_iteration)2373 number_of_iterations_exit_assumptions (struct loop *loop, edge exit,
2374 				       struct tree_niter_desc *niter,
2375 				       gcond **at_stmt, bool every_iteration)
2376 {
2377   gimple *last;
2378   gcond *stmt;
2379   tree type;
2380   tree op0, op1;
2381   enum tree_code code;
2382   affine_iv iv0, iv1;
2383   bool safe;
2384 
2385   /* Nothing to analyze if the loop is known to be infinite.  */
2386   if (loop_constraint_set_p (loop, LOOP_C_INFINITE))
2387     return false;
2388 
2389   safe = dominated_by_p (CDI_DOMINATORS, loop->latch, exit->src);
2390 
2391   if (every_iteration && !safe)
2392     return false;
2393 
2394   niter->assumptions = boolean_false_node;
2395   niter->control.base = NULL_TREE;
2396   niter->control.step = NULL_TREE;
2397   niter->control.no_overflow = false;
2398   last = last_stmt (exit->src);
2399   if (!last)
2400     return false;
2401   stmt = dyn_cast <gcond *> (last);
2402   if (!stmt)
2403     return false;
2404 
2405   /* We want the condition for staying inside loop.  */
2406   code = gimple_cond_code (stmt);
2407   if (exit->flags & EDGE_TRUE_VALUE)
2408     code = invert_tree_comparison (code, false);
2409 
2410   switch (code)
2411     {
2412     case GT_EXPR:
2413     case GE_EXPR:
2414     case LT_EXPR:
2415     case LE_EXPR:
2416     case NE_EXPR:
2417       break;
2418 
2419     default:
2420       return false;
2421     }
2422 
2423   op0 = gimple_cond_lhs (stmt);
2424   op1 = gimple_cond_rhs (stmt);
2425   type = TREE_TYPE (op0);
2426 
2427   if (TREE_CODE (type) != INTEGER_TYPE
2428       && !POINTER_TYPE_P (type))
2429     return false;
2430 
2431   tree iv0_niters = NULL_TREE;
2432   if (!simple_iv_with_niters (loop, loop_containing_stmt (stmt),
2433 			      op0, &iv0, safe ? &iv0_niters : NULL, false))
2434     return number_of_iterations_popcount (loop, exit, code, niter);
2435   tree iv1_niters = NULL_TREE;
2436   if (!simple_iv_with_niters (loop, loop_containing_stmt (stmt),
2437 			      op1, &iv1, safe ? &iv1_niters : NULL, false))
2438     return false;
2439   /* Give up on complicated case.  */
2440   if (iv0_niters && iv1_niters)
2441     return false;
2442 
2443   /* We don't want to see undefined signed overflow warnings while
2444      computing the number of iterations.  */
2445   fold_defer_overflow_warnings ();
2446 
2447   iv0.base = expand_simple_operations (iv0.base);
2448   iv1.base = expand_simple_operations (iv1.base);
2449   if (!number_of_iterations_cond (loop, type, &iv0, code, &iv1, niter,
2450 				  loop_only_exit_p (loop, exit), safe))
2451     {
2452       fold_undefer_and_ignore_overflow_warnings ();
2453       return false;
2454     }
2455 
2456   /* Incorporate additional assumption implied by control iv.  */
2457   tree iv_niters = iv0_niters ? iv0_niters : iv1_niters;
2458   if (iv_niters)
2459     {
2460       tree assumption = fold_build2 (LE_EXPR, boolean_type_node, niter->niter,
2461 				     fold_convert (TREE_TYPE (niter->niter),
2462 						   iv_niters));
2463 
2464       if (!integer_nonzerop (assumption))
2465 	niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
2466 					  niter->assumptions, assumption);
2467 
2468       /* Refine upper bound if possible.  */
2469       if (TREE_CODE (iv_niters) == INTEGER_CST
2470 	  && niter->max > wi::to_widest (iv_niters))
2471 	niter->max = wi::to_widest (iv_niters);
2472     }
2473 
2474   /* There is no assumptions if the loop is known to be finite.  */
2475   if (!integer_zerop (niter->assumptions)
2476       && loop_constraint_set_p (loop, LOOP_C_FINITE))
2477     niter->assumptions = boolean_true_node;
2478 
2479   if (optimize >= 3)
2480     {
2481       niter->assumptions = simplify_using_outer_evolutions (loop,
2482 							    niter->assumptions);
2483       niter->may_be_zero = simplify_using_outer_evolutions (loop,
2484 							    niter->may_be_zero);
2485       niter->niter = simplify_using_outer_evolutions (loop, niter->niter);
2486     }
2487 
2488   niter->assumptions
2489 	  = simplify_using_initial_conditions (loop,
2490 					       niter->assumptions);
2491   niter->may_be_zero
2492 	  = simplify_using_initial_conditions (loop,
2493 					       niter->may_be_zero);
2494 
2495   fold_undefer_and_ignore_overflow_warnings ();
2496 
2497   /* If NITER has simplified into a constant, update MAX.  */
2498   if (TREE_CODE (niter->niter) == INTEGER_CST)
2499     niter->max = wi::to_widest (niter->niter);
2500 
2501   if (at_stmt)
2502     *at_stmt = stmt;
2503 
2504   return (!integer_zerop (niter->assumptions));
2505 }
2506 
2507 
2508 /* Utility function to check if OP is defined by a stmt
2509    that is a val - 1.  */
2510 
2511 static bool
ssa_defined_by_minus_one_stmt_p(tree op,tree val)2512 ssa_defined_by_minus_one_stmt_p (tree op, tree val)
2513 {
2514   gimple *stmt;
2515   return (TREE_CODE (op) == SSA_NAME
2516 	  && (stmt = SSA_NAME_DEF_STMT (op))
2517 	  && is_gimple_assign (stmt)
2518 	  && (gimple_assign_rhs_code (stmt) == PLUS_EXPR)
2519 	  && val == gimple_assign_rhs1 (stmt)
2520 	  && integer_minus_onep (gimple_assign_rhs2 (stmt)));
2521 }
2522 
2523 
2524 /* See if LOOP is a popcout implementation, determine NITER for the loop
2525 
2526    We match:
2527    <bb 2>
2528    goto <bb 4>
2529 
2530    <bb 3>
2531    _1 = b_11 + -1
2532    b_6 = _1 & b_11
2533 
2534    <bb 4>
2535    b_11 = PHI <b_5(D)(2), b_6(3)>
2536 
2537    exit block
2538    if (b_11 != 0)
2539 	goto <bb 3>
2540    else
2541 	goto <bb 5>
2542 
2543    OR we match copy-header version:
2544    if (b_5 != 0)
2545 	goto <bb 3>
2546    else
2547 	goto <bb 4>
2548 
2549    <bb 3>
2550    b_11 = PHI <b_5(2), b_6(3)>
2551    _1 = b_11 + -1
2552    b_6 = _1 & b_11
2553 
2554    exit block
2555    if (b_6 != 0)
2556 	goto <bb 3>
2557    else
2558 	goto <bb 4>
2559 
2560    If popcount pattern, update NITER accordingly.
2561    i.e., set NITER to  __builtin_popcount (b)
2562    return true if we did, false otherwise.
2563 
2564  */
2565 
2566 static bool
number_of_iterations_popcount(loop_p loop,edge exit,enum tree_code code,struct tree_niter_desc * niter)2567 number_of_iterations_popcount (loop_p loop, edge exit,
2568 			       enum tree_code code,
2569 			       struct tree_niter_desc *niter)
2570 {
2571   bool adjust = true;
2572   tree iter;
2573   HOST_WIDE_INT max;
2574   adjust = true;
2575   tree fn = NULL_TREE;
2576 
2577   /* Check loop terminating branch is like
2578      if (b != 0).  */
2579   gimple *stmt = last_stmt (exit->src);
2580   if (!stmt
2581       || gimple_code (stmt) != GIMPLE_COND
2582       || code != NE_EXPR
2583       || !integer_zerop (gimple_cond_rhs (stmt))
2584       || TREE_CODE (gimple_cond_lhs (stmt)) != SSA_NAME)
2585     return false;
2586 
2587   gimple *and_stmt = SSA_NAME_DEF_STMT (gimple_cond_lhs (stmt));
2588 
2589   /* Depending on copy-header is performed, feeding PHI stmts might be in
2590      the loop header or loop latch, handle this.  */
2591   if (gimple_code (and_stmt) == GIMPLE_PHI
2592       && gimple_bb (and_stmt) == loop->header
2593       && gimple_phi_num_args (and_stmt) == 2
2594       && (TREE_CODE (gimple_phi_arg_def (and_stmt,
2595 					 loop_latch_edge (loop)->dest_idx))
2596 	  == SSA_NAME))
2597     {
2598       /* SSA used in exit condition is defined by PHI stmt
2599 	b_11 = PHI <b_5(D)(2), b_6(3)>
2600 	from the PHI stmt, get the and_stmt
2601 	b_6 = _1 & b_11.  */
2602       tree t = gimple_phi_arg_def (and_stmt, loop_latch_edge (loop)->dest_idx);
2603       and_stmt = SSA_NAME_DEF_STMT (t);
2604       adjust = false;
2605     }
2606 
2607   /* Make sure it is indeed an and stmt (b_6 = _1 & b_11).  */
2608   if (!is_gimple_assign (and_stmt)
2609       || gimple_assign_rhs_code (and_stmt) != BIT_AND_EXPR)
2610     return false;
2611 
2612   tree b_11 = gimple_assign_rhs1 (and_stmt);
2613   tree _1 = gimple_assign_rhs2 (and_stmt);
2614 
2615   /* Check that _1 is defined by _b11 + -1 (_1 = b_11 + -1).
2616      Also make sure that b_11 is the same in and_stmt and _1 defining stmt.
2617      Also canonicalize if _1 and _b11 are revrsed.  */
2618   if (ssa_defined_by_minus_one_stmt_p (b_11, _1))
2619     std::swap (b_11, _1);
2620   else if (ssa_defined_by_minus_one_stmt_p (_1, b_11))
2621     ;
2622   else
2623     return false;
2624   /* Check the recurrence:
2625    ... = PHI <b_5(2), b_6(3)>.  */
2626   gimple *phi = SSA_NAME_DEF_STMT (b_11);
2627   if (gimple_code (phi) != GIMPLE_PHI
2628       || (gimple_bb (phi) != loop_latch_edge (loop)->dest)
2629       || (gimple_assign_lhs (and_stmt)
2630 	  != gimple_phi_arg_def (phi, loop_latch_edge (loop)->dest_idx)))
2631     return false;
2632 
2633   /* We found a match. Get the corresponding popcount builtin.  */
2634   tree src = gimple_phi_arg_def (phi, loop_preheader_edge (loop)->dest_idx);
2635   if (TYPE_PRECISION (TREE_TYPE (src)) == TYPE_PRECISION (integer_type_node))
2636     fn = builtin_decl_implicit (BUILT_IN_POPCOUNT);
2637   else if (TYPE_PRECISION (TREE_TYPE (src)) == TYPE_PRECISION
2638 	   (long_integer_type_node))
2639     fn = builtin_decl_implicit (BUILT_IN_POPCOUNTL);
2640   else if (TYPE_PRECISION (TREE_TYPE (src)) == TYPE_PRECISION
2641 	   (long_long_integer_type_node))
2642     fn = builtin_decl_implicit (BUILT_IN_POPCOUNTLL);
2643 
2644   /* ??? Support promoting char/short to int.  */
2645   if (!fn)
2646     return false;
2647 
2648   /* Update NITER params accordingly  */
2649   tree utype = unsigned_type_for (TREE_TYPE (src));
2650   src = fold_convert (utype, src);
2651   tree call = fold_convert (utype, build_call_expr (fn, 1, src));
2652   if (adjust)
2653     iter = fold_build2 (MINUS_EXPR, utype,
2654 			call,
2655 			build_int_cst (utype, 1));
2656   else
2657     iter = call;
2658 
2659   if (TREE_CODE (call) == INTEGER_CST)
2660     max = tree_to_uhwi (call);
2661   else
2662     max = TYPE_PRECISION (TREE_TYPE (src));
2663   if (adjust)
2664     max = max - 1;
2665 
2666   niter->niter = iter;
2667   niter->assumptions = boolean_true_node;
2668 
2669   if (adjust)
2670     {
2671       tree may_be_zero = fold_build2 (EQ_EXPR, boolean_type_node, src,
2672 				      build_zero_cst
2673 				      (TREE_TYPE (src)));
2674       niter->may_be_zero =
2675 	simplify_using_initial_conditions (loop, may_be_zero);
2676     }
2677   else
2678     niter->may_be_zero = boolean_false_node;
2679 
2680   niter->max = max;
2681   niter->bound = NULL_TREE;
2682   niter->cmp = ERROR_MARK;
2683   return true;
2684 }
2685 
2686 
2687 /* Like number_of_iterations_exit_assumptions, but return TRUE only if
2688    the niter information holds unconditionally.  */
2689 
2690 bool
number_of_iterations_exit(struct loop * loop,edge exit,struct tree_niter_desc * niter,bool warn,bool every_iteration)2691 number_of_iterations_exit (struct loop *loop, edge exit,
2692 			   struct tree_niter_desc *niter,
2693 			   bool warn, bool every_iteration)
2694 {
2695   gcond *stmt;
2696   if (!number_of_iterations_exit_assumptions (loop, exit, niter,
2697 					      &stmt, every_iteration))
2698     return false;
2699 
2700   if (integer_nonzerop (niter->assumptions))
2701     return true;
2702 
2703   if (warn && dump_enabled_p ())
2704     dump_printf_loc (MSG_MISSED_OPTIMIZATION, stmt,
2705 		     "missed loop optimization: niters analysis ends up "
2706 		     "with assumptions.\n");
2707 
2708   return false;
2709 }
2710 
2711 /* Try to determine the number of iterations of LOOP.  If we succeed,
2712    expression giving number of iterations is returned and *EXIT is
2713    set to the edge from that the information is obtained.  Otherwise
2714    chrec_dont_know is returned.  */
2715 
2716 tree
find_loop_niter(struct loop * loop,edge * exit)2717 find_loop_niter (struct loop *loop, edge *exit)
2718 {
2719   unsigned i;
2720   vec<edge> exits = get_loop_exit_edges (loop);
2721   edge ex;
2722   tree niter = NULL_TREE, aniter;
2723   struct tree_niter_desc desc;
2724 
2725   *exit = NULL;
2726   FOR_EACH_VEC_ELT (exits, i, ex)
2727     {
2728       if (!number_of_iterations_exit (loop, ex, &desc, false))
2729 	continue;
2730 
2731       if (integer_nonzerop (desc.may_be_zero))
2732 	{
2733 	  /* We exit in the first iteration through this exit.
2734 	     We won't find anything better.  */
2735 	  niter = build_int_cst (unsigned_type_node, 0);
2736 	  *exit = ex;
2737 	  break;
2738 	}
2739 
2740       if (!integer_zerop (desc.may_be_zero))
2741 	continue;
2742 
2743       aniter = desc.niter;
2744 
2745       if (!niter)
2746 	{
2747 	  /* Nothing recorded yet.  */
2748 	  niter = aniter;
2749 	  *exit = ex;
2750 	  continue;
2751 	}
2752 
2753       /* Prefer constants, the lower the better.  */
2754       if (TREE_CODE (aniter) != INTEGER_CST)
2755 	continue;
2756 
2757       if (TREE_CODE (niter) != INTEGER_CST)
2758 	{
2759 	  niter = aniter;
2760 	  *exit = ex;
2761 	  continue;
2762 	}
2763 
2764       if (tree_int_cst_lt (aniter, niter))
2765 	{
2766 	  niter = aniter;
2767 	  *exit = ex;
2768 	  continue;
2769 	}
2770     }
2771   exits.release ();
2772 
2773   return niter ? niter : chrec_dont_know;
2774 }
2775 
2776 /* Return true if loop is known to have bounded number of iterations.  */
2777 
2778 bool
finite_loop_p(struct loop * loop)2779 finite_loop_p (struct loop *loop)
2780 {
2781   widest_int nit;
2782   int flags;
2783 
2784   flags = flags_from_decl_or_type (current_function_decl);
2785   if ((flags & (ECF_CONST|ECF_PURE)) && !(flags & ECF_LOOPING_CONST_OR_PURE))
2786     {
2787       if (dump_file && (dump_flags & TDF_DETAILS))
2788 	fprintf (dump_file, "Found loop %i to be finite: it is within pure or const function.\n",
2789 		 loop->num);
2790       return true;
2791     }
2792 
2793   if (loop->any_upper_bound
2794       || max_loop_iterations (loop, &nit))
2795     {
2796       if (dump_file && (dump_flags & TDF_DETAILS))
2797 	fprintf (dump_file, "Found loop %i to be finite: upper bound found.\n",
2798 		 loop->num);
2799       return true;
2800     }
2801   return false;
2802 }
2803 
2804 /*
2805 
2806    Analysis of a number of iterations of a loop by a brute-force evaluation.
2807 
2808 */
2809 
2810 /* Bound on the number of iterations we try to evaluate.  */
2811 
2812 #define MAX_ITERATIONS_TO_TRACK \
2813   ((unsigned) PARAM_VALUE (PARAM_MAX_ITERATIONS_TO_TRACK))
2814 
2815 /* Returns the loop phi node of LOOP such that ssa name X is derived from its
2816    result by a chain of operations such that all but exactly one of their
2817    operands are constants.  */
2818 
2819 static gphi *
chain_of_csts_start(struct loop * loop,tree x)2820 chain_of_csts_start (struct loop *loop, tree x)
2821 {
2822   gimple *stmt = SSA_NAME_DEF_STMT (x);
2823   tree use;
2824   basic_block bb = gimple_bb (stmt);
2825   enum tree_code code;
2826 
2827   if (!bb
2828       || !flow_bb_inside_loop_p (loop, bb))
2829     return NULL;
2830 
2831   if (gimple_code (stmt) == GIMPLE_PHI)
2832     {
2833       if (bb == loop->header)
2834 	return as_a <gphi *> (stmt);
2835 
2836       return NULL;
2837     }
2838 
2839   if (gimple_code (stmt) != GIMPLE_ASSIGN
2840       || gimple_assign_rhs_class (stmt) == GIMPLE_TERNARY_RHS)
2841     return NULL;
2842 
2843   code = gimple_assign_rhs_code (stmt);
2844   if (gimple_references_memory_p (stmt)
2845       || TREE_CODE_CLASS (code) == tcc_reference
2846       || (code == ADDR_EXPR
2847 	  && !is_gimple_min_invariant (gimple_assign_rhs1 (stmt))))
2848     return NULL;
2849 
2850   use = SINGLE_SSA_TREE_OPERAND (stmt, SSA_OP_USE);
2851   if (use == NULL_TREE)
2852     return NULL;
2853 
2854   return chain_of_csts_start (loop, use);
2855 }
2856 
2857 /* Determines whether the expression X is derived from a result of a phi node
2858    in header of LOOP such that
2859 
2860    * the derivation of X consists only from operations with constants
2861    * the initial value of the phi node is constant
2862    * the value of the phi node in the next iteration can be derived from the
2863      value in the current iteration by a chain of operations with constants,
2864      or is also a constant
2865 
2866    If such phi node exists, it is returned, otherwise NULL is returned.  */
2867 
2868 static gphi *
get_base_for(struct loop * loop,tree x)2869 get_base_for (struct loop *loop, tree x)
2870 {
2871   gphi *phi;
2872   tree init, next;
2873 
2874   if (is_gimple_min_invariant (x))
2875     return NULL;
2876 
2877   phi = chain_of_csts_start (loop, x);
2878   if (!phi)
2879     return NULL;
2880 
2881   init = PHI_ARG_DEF_FROM_EDGE (phi, loop_preheader_edge (loop));
2882   next = PHI_ARG_DEF_FROM_EDGE (phi, loop_latch_edge (loop));
2883 
2884   if (!is_gimple_min_invariant (init))
2885     return NULL;
2886 
2887   if (TREE_CODE (next) == SSA_NAME
2888       && chain_of_csts_start (loop, next) != phi)
2889     return NULL;
2890 
2891   return phi;
2892 }
2893 
2894 /* Given an expression X, then
2895 
2896    * if X is NULL_TREE, we return the constant BASE.
2897    * if X is a constant, we return the constant X.
2898    * otherwise X is a SSA name, whose value in the considered loop is derived
2899      by a chain of operations with constant from a result of a phi node in
2900      the header of the loop.  Then we return value of X when the value of the
2901      result of this phi node is given by the constant BASE.  */
2902 
2903 static tree
get_val_for(tree x,tree base)2904 get_val_for (tree x, tree base)
2905 {
2906   gimple *stmt;
2907 
2908   gcc_checking_assert (is_gimple_min_invariant (base));
2909 
2910   if (!x)
2911     return base;
2912   else if (is_gimple_min_invariant (x))
2913     return x;
2914 
2915   stmt = SSA_NAME_DEF_STMT (x);
2916   if (gimple_code (stmt) == GIMPLE_PHI)
2917     return base;
2918 
2919   gcc_checking_assert (is_gimple_assign (stmt));
2920 
2921   /* STMT must be either an assignment of a single SSA name or an
2922      expression involving an SSA name and a constant.  Try to fold that
2923      expression using the value for the SSA name.  */
2924   if (gimple_assign_ssa_name_copy_p (stmt))
2925     return get_val_for (gimple_assign_rhs1 (stmt), base);
2926   else if (gimple_assign_rhs_class (stmt) == GIMPLE_UNARY_RHS
2927 	   && TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME)
2928     return fold_build1 (gimple_assign_rhs_code (stmt),
2929 			gimple_expr_type (stmt),
2930 			get_val_for (gimple_assign_rhs1 (stmt), base));
2931   else if (gimple_assign_rhs_class (stmt) == GIMPLE_BINARY_RHS)
2932     {
2933       tree rhs1 = gimple_assign_rhs1 (stmt);
2934       tree rhs2 = gimple_assign_rhs2 (stmt);
2935       if (TREE_CODE (rhs1) == SSA_NAME)
2936 	rhs1 = get_val_for (rhs1, base);
2937       else if (TREE_CODE (rhs2) == SSA_NAME)
2938 	rhs2 = get_val_for (rhs2, base);
2939       else
2940 	gcc_unreachable ();
2941       return fold_build2 (gimple_assign_rhs_code (stmt),
2942 			  gimple_expr_type (stmt), rhs1, rhs2);
2943     }
2944   else
2945     gcc_unreachable ();
2946 }
2947 
2948 
2949 /* Tries to count the number of iterations of LOOP till it exits by EXIT
2950    by brute force -- i.e. by determining the value of the operands of the
2951    condition at EXIT in first few iterations of the loop (assuming that
2952    these values are constant) and determining the first one in that the
2953    condition is not satisfied.  Returns the constant giving the number
2954    of the iterations of LOOP if successful, chrec_dont_know otherwise.  */
2955 
2956 tree
loop_niter_by_eval(struct loop * loop,edge exit)2957 loop_niter_by_eval (struct loop *loop, edge exit)
2958 {
2959   tree acnd;
2960   tree op[2], val[2], next[2], aval[2];
2961   gphi *phi;
2962   gimple *cond;
2963   unsigned i, j;
2964   enum tree_code cmp;
2965 
2966   cond = last_stmt (exit->src);
2967   if (!cond || gimple_code (cond) != GIMPLE_COND)
2968     return chrec_dont_know;
2969 
2970   cmp = gimple_cond_code (cond);
2971   if (exit->flags & EDGE_TRUE_VALUE)
2972     cmp = invert_tree_comparison (cmp, false);
2973 
2974   switch (cmp)
2975     {
2976     case EQ_EXPR:
2977     case NE_EXPR:
2978     case GT_EXPR:
2979     case GE_EXPR:
2980     case LT_EXPR:
2981     case LE_EXPR:
2982       op[0] = gimple_cond_lhs (cond);
2983       op[1] = gimple_cond_rhs (cond);
2984       break;
2985 
2986     default:
2987       return chrec_dont_know;
2988     }
2989 
2990   for (j = 0; j < 2; j++)
2991     {
2992       if (is_gimple_min_invariant (op[j]))
2993 	{
2994 	  val[j] = op[j];
2995 	  next[j] = NULL_TREE;
2996 	  op[j] = NULL_TREE;
2997 	}
2998       else
2999 	{
3000 	  phi = get_base_for (loop, op[j]);
3001 	  if (!phi)
3002 	    return chrec_dont_know;
3003 	  val[j] = PHI_ARG_DEF_FROM_EDGE (phi, loop_preheader_edge (loop));
3004 	  next[j] = PHI_ARG_DEF_FROM_EDGE (phi, loop_latch_edge (loop));
3005 	}
3006     }
3007 
3008   /* Don't issue signed overflow warnings.  */
3009   fold_defer_overflow_warnings ();
3010 
3011   for (i = 0; i < MAX_ITERATIONS_TO_TRACK; i++)
3012     {
3013       for (j = 0; j < 2; j++)
3014 	aval[j] = get_val_for (op[j], val[j]);
3015 
3016       acnd = fold_binary (cmp, boolean_type_node, aval[0], aval[1]);
3017       if (acnd && integer_zerop (acnd))
3018 	{
3019 	  fold_undefer_and_ignore_overflow_warnings ();
3020 	  if (dump_file && (dump_flags & TDF_DETAILS))
3021 	    fprintf (dump_file,
3022 		     "Proved that loop %d iterates %d times using brute force.\n",
3023 		     loop->num, i);
3024 	  return build_int_cst (unsigned_type_node, i);
3025 	}
3026 
3027       for (j = 0; j < 2; j++)
3028 	{
3029 	  aval[j] = val[j];
3030 	  val[j] = get_val_for (next[j], val[j]);
3031 	  if (!is_gimple_min_invariant (val[j]))
3032 	    {
3033 	      fold_undefer_and_ignore_overflow_warnings ();
3034 	      return chrec_dont_know;
3035 	    }
3036 	}
3037 
3038       /* If the next iteration would use the same base values
3039 	 as the current one, there is no point looping further,
3040 	 all following iterations will be the same as this one.  */
3041       if (val[0] == aval[0] && val[1] == aval[1])
3042 	break;
3043     }
3044 
3045   fold_undefer_and_ignore_overflow_warnings ();
3046 
3047   return chrec_dont_know;
3048 }
3049 
3050 /* Finds the exit of the LOOP by that the loop exits after a constant
3051    number of iterations and stores the exit edge to *EXIT.  The constant
3052    giving the number of iterations of LOOP is returned.  The number of
3053    iterations is determined using loop_niter_by_eval (i.e. by brute force
3054    evaluation).  If we are unable to find the exit for that loop_niter_by_eval
3055    determines the number of iterations, chrec_dont_know is returned.  */
3056 
3057 tree
find_loop_niter_by_eval(struct loop * loop,edge * exit)3058 find_loop_niter_by_eval (struct loop *loop, edge *exit)
3059 {
3060   unsigned i;
3061   vec<edge> exits = get_loop_exit_edges (loop);
3062   edge ex;
3063   tree niter = NULL_TREE, aniter;
3064 
3065   *exit = NULL;
3066 
3067   /* Loops with multiple exits are expensive to handle and less important.  */
3068   if (!flag_expensive_optimizations
3069       && exits.length () > 1)
3070     {
3071       exits.release ();
3072       return chrec_dont_know;
3073     }
3074 
3075   FOR_EACH_VEC_ELT (exits, i, ex)
3076     {
3077       if (!just_once_each_iteration_p (loop, ex->src))
3078 	continue;
3079 
3080       aniter = loop_niter_by_eval (loop, ex);
3081       if (chrec_contains_undetermined (aniter))
3082 	continue;
3083 
3084       if (niter
3085 	  && !tree_int_cst_lt (aniter, niter))
3086 	continue;
3087 
3088       niter = aniter;
3089       *exit = ex;
3090     }
3091   exits.release ();
3092 
3093   return niter ? niter : chrec_dont_know;
3094 }
3095 
3096 /*
3097 
3098    Analysis of upper bounds on number of iterations of a loop.
3099 
3100 */
3101 
3102 static widest_int derive_constant_upper_bound_ops (tree, tree,
3103 						   enum tree_code, tree);
3104 
3105 /* Returns a constant upper bound on the value of the right-hand side of
3106    an assignment statement STMT.  */
3107 
3108 static widest_int
derive_constant_upper_bound_assign(gimple * stmt)3109 derive_constant_upper_bound_assign (gimple *stmt)
3110 {
3111   enum tree_code code = gimple_assign_rhs_code (stmt);
3112   tree op0 = gimple_assign_rhs1 (stmt);
3113   tree op1 = gimple_assign_rhs2 (stmt);
3114 
3115   return derive_constant_upper_bound_ops (TREE_TYPE (gimple_assign_lhs (stmt)),
3116 					  op0, code, op1);
3117 }
3118 
3119 /* Returns a constant upper bound on the value of expression VAL.  VAL
3120    is considered to be unsigned.  If its type is signed, its value must
3121    be nonnegative.  */
3122 
3123 static widest_int
derive_constant_upper_bound(tree val)3124 derive_constant_upper_bound (tree val)
3125 {
3126   enum tree_code code;
3127   tree op0, op1, op2;
3128 
3129   extract_ops_from_tree (val, &code, &op0, &op1, &op2);
3130   return derive_constant_upper_bound_ops (TREE_TYPE (val), op0, code, op1);
3131 }
3132 
3133 /* Returns a constant upper bound on the value of expression OP0 CODE OP1,
3134    whose type is TYPE.  The expression is considered to be unsigned.  If
3135    its type is signed, its value must be nonnegative.  */
3136 
3137 static widest_int
derive_constant_upper_bound_ops(tree type,tree op0,enum tree_code code,tree op1)3138 derive_constant_upper_bound_ops (tree type, tree op0,
3139 				 enum tree_code code, tree op1)
3140 {
3141   tree subtype, maxt;
3142   widest_int bnd, max, cst;
3143   gimple *stmt;
3144 
3145   if (INTEGRAL_TYPE_P (type))
3146     maxt = TYPE_MAX_VALUE (type);
3147   else
3148     maxt = upper_bound_in_type (type, type);
3149 
3150   max = wi::to_widest (maxt);
3151 
3152   switch (code)
3153     {
3154     case INTEGER_CST:
3155       return wi::to_widest (op0);
3156 
3157     CASE_CONVERT:
3158       subtype = TREE_TYPE (op0);
3159       if (!TYPE_UNSIGNED (subtype)
3160 	  /* If TYPE is also signed, the fact that VAL is nonnegative implies
3161 	     that OP0 is nonnegative.  */
3162 	  && TYPE_UNSIGNED (type)
3163 	  && !tree_expr_nonnegative_p (op0))
3164 	{
3165 	  /* If we cannot prove that the casted expression is nonnegative,
3166 	     we cannot establish more useful upper bound than the precision
3167 	     of the type gives us.  */
3168 	  return max;
3169 	}
3170 
3171       /* We now know that op0 is an nonnegative value.  Try deriving an upper
3172 	 bound for it.  */
3173       bnd = derive_constant_upper_bound (op0);
3174 
3175       /* If the bound does not fit in TYPE, max. value of TYPE could be
3176 	 attained.  */
3177       if (wi::ltu_p (max, bnd))
3178 	return max;
3179 
3180       return bnd;
3181 
3182     case PLUS_EXPR:
3183     case POINTER_PLUS_EXPR:
3184     case MINUS_EXPR:
3185       if (TREE_CODE (op1) != INTEGER_CST
3186 	  || !tree_expr_nonnegative_p (op0))
3187 	return max;
3188 
3189       /* Canonicalize to OP0 - CST.  Consider CST to be signed, in order to
3190 	 choose the most logical way how to treat this constant regardless
3191 	 of the signedness of the type.  */
3192       cst = wi::sext (wi::to_widest (op1), TYPE_PRECISION (type));
3193       if (code != MINUS_EXPR)
3194 	cst = -cst;
3195 
3196       bnd = derive_constant_upper_bound (op0);
3197 
3198       if (wi::neg_p (cst))
3199 	{
3200 	  cst = -cst;
3201 	  /* Avoid CST == 0x80000...  */
3202 	  if (wi::neg_p (cst))
3203 	    return max;
3204 
3205 	  /* OP0 + CST.  We need to check that
3206 	     BND <= MAX (type) - CST.  */
3207 
3208 	  widest_int mmax = max - cst;
3209 	  if (wi::leu_p (bnd, mmax))
3210 	    return max;
3211 
3212 	  return bnd + cst;
3213 	}
3214       else
3215 	{
3216 	  /* OP0 - CST, where CST >= 0.
3217 
3218 	     If TYPE is signed, we have already verified that OP0 >= 0, and we
3219 	     know that the result is nonnegative.  This implies that
3220 	     VAL <= BND - CST.
3221 
3222 	     If TYPE is unsigned, we must additionally know that OP0 >= CST,
3223 	     otherwise the operation underflows.
3224 	   */
3225 
3226 	  /* This should only happen if the type is unsigned; however, for
3227 	     buggy programs that use overflowing signed arithmetics even with
3228 	     -fno-wrapv, this condition may also be true for signed values.  */
3229 	  if (wi::ltu_p (bnd, cst))
3230 	    return max;
3231 
3232 	  if (TYPE_UNSIGNED (type))
3233 	    {
3234 	      tree tem = fold_binary (GE_EXPR, boolean_type_node, op0,
3235 				      wide_int_to_tree (type, cst));
3236 	      if (!tem || integer_nonzerop (tem))
3237 		return max;
3238 	    }
3239 
3240 	  bnd -= cst;
3241 	}
3242 
3243       return bnd;
3244 
3245     case FLOOR_DIV_EXPR:
3246     case EXACT_DIV_EXPR:
3247       if (TREE_CODE (op1) != INTEGER_CST
3248 	  || tree_int_cst_sign_bit (op1))
3249 	return max;
3250 
3251       bnd = derive_constant_upper_bound (op0);
3252       return wi::udiv_floor (bnd, wi::to_widest (op1));
3253 
3254     case BIT_AND_EXPR:
3255       if (TREE_CODE (op1) != INTEGER_CST
3256 	  || tree_int_cst_sign_bit (op1))
3257 	return max;
3258       return wi::to_widest (op1);
3259 
3260     case SSA_NAME:
3261       stmt = SSA_NAME_DEF_STMT (op0);
3262       if (gimple_code (stmt) != GIMPLE_ASSIGN
3263 	  || gimple_assign_lhs (stmt) != op0)
3264 	return max;
3265       return derive_constant_upper_bound_assign (stmt);
3266 
3267     default:
3268       return max;
3269     }
3270 }
3271 
3272 /* Emit a -Waggressive-loop-optimizations warning if needed.  */
3273 
3274 static void
do_warn_aggressive_loop_optimizations(struct loop * loop,widest_int i_bound,gimple * stmt)3275 do_warn_aggressive_loop_optimizations (struct loop *loop,
3276 				       widest_int i_bound, gimple *stmt)
3277 {
3278   /* Don't warn if the loop doesn't have known constant bound.  */
3279   if (!loop->nb_iterations
3280       || TREE_CODE (loop->nb_iterations) != INTEGER_CST
3281       || !warn_aggressive_loop_optimizations
3282       /* To avoid warning multiple times for the same loop,
3283 	 only start warning when we preserve loops.  */
3284       || (cfun->curr_properties & PROP_loops) == 0
3285       /* Only warn once per loop.  */
3286       || loop->warned_aggressive_loop_optimizations
3287       /* Only warn if undefined behavior gives us lower estimate than the
3288 	 known constant bound.  */
3289       || wi::cmpu (i_bound, wi::to_widest (loop->nb_iterations)) >= 0
3290       /* And undefined behavior happens unconditionally.  */
3291       || !dominated_by_p (CDI_DOMINATORS, loop->latch, gimple_bb (stmt)))
3292     return;
3293 
3294   edge e = single_exit (loop);
3295   if (e == NULL)
3296     return;
3297 
3298   gimple *estmt = last_stmt (e->src);
3299   char buf[WIDE_INT_PRINT_BUFFER_SIZE];
3300   print_dec (i_bound, buf, TYPE_UNSIGNED (TREE_TYPE (loop->nb_iterations))
3301 	     ? UNSIGNED : SIGNED);
3302   auto_diagnostic_group d;
3303   if (warning_at (gimple_location (stmt), OPT_Waggressive_loop_optimizations,
3304 		  "iteration %s invokes undefined behavior", buf))
3305     inform (gimple_location (estmt), "within this loop");
3306   loop->warned_aggressive_loop_optimizations = true;
3307 }
3308 
3309 /* Records that AT_STMT is executed at most BOUND + 1 times in LOOP.  IS_EXIT
3310    is true if the loop is exited immediately after STMT, and this exit
3311    is taken at last when the STMT is executed BOUND + 1 times.
3312    REALISTIC is true if BOUND is expected to be close to the real number
3313    of iterations.  UPPER is true if we are sure the loop iterates at most
3314    BOUND times.  I_BOUND is a widest_int upper estimate on BOUND.  */
3315 
3316 static void
record_estimate(struct loop * loop,tree bound,const widest_int & i_bound,gimple * at_stmt,bool is_exit,bool realistic,bool upper)3317 record_estimate (struct loop *loop, tree bound, const widest_int &i_bound,
3318 		 gimple *at_stmt, bool is_exit, bool realistic, bool upper)
3319 {
3320   widest_int delta;
3321 
3322   if (dump_file && (dump_flags & TDF_DETAILS))
3323     {
3324       fprintf (dump_file, "Statement %s", is_exit ? "(exit)" : "");
3325       print_gimple_stmt (dump_file, at_stmt, 0, TDF_SLIM);
3326       fprintf (dump_file, " is %sexecuted at most ",
3327 	       upper ? "" : "probably ");
3328       print_generic_expr (dump_file, bound, TDF_SLIM);
3329       fprintf (dump_file, " (bounded by ");
3330       print_decu (i_bound, dump_file);
3331       fprintf (dump_file, ") + 1 times in loop %d.\n", loop->num);
3332     }
3333 
3334   /* If the I_BOUND is just an estimate of BOUND, it rarely is close to the
3335      real number of iterations.  */
3336   if (TREE_CODE (bound) != INTEGER_CST)
3337     realistic = false;
3338   else
3339     gcc_checking_assert (i_bound == wi::to_widest (bound));
3340 
3341   /* If we have a guaranteed upper bound, record it in the appropriate
3342      list, unless this is an !is_exit bound (i.e. undefined behavior in
3343      at_stmt) in a loop with known constant number of iterations.  */
3344   if (upper
3345       && (is_exit
3346 	  || loop->nb_iterations == NULL_TREE
3347 	  || TREE_CODE (loop->nb_iterations) != INTEGER_CST))
3348     {
3349       struct nb_iter_bound *elt = ggc_alloc<nb_iter_bound> ();
3350 
3351       elt->bound = i_bound;
3352       elt->stmt = at_stmt;
3353       elt->is_exit = is_exit;
3354       elt->next = loop->bounds;
3355       loop->bounds = elt;
3356     }
3357 
3358   /* If statement is executed on every path to the loop latch, we can directly
3359      infer the upper bound on the # of iterations of the loop.  */
3360   if (!dominated_by_p (CDI_DOMINATORS, loop->latch, gimple_bb (at_stmt)))
3361     upper = false;
3362 
3363   /* Update the number of iteration estimates according to the bound.
3364      If at_stmt is an exit then the loop latch is executed at most BOUND times,
3365      otherwise it can be executed BOUND + 1 times.  We will lower the estimate
3366      later if such statement must be executed on last iteration  */
3367   if (is_exit)
3368     delta = 0;
3369   else
3370     delta = 1;
3371   widest_int new_i_bound = i_bound + delta;
3372 
3373   /* If an overflow occurred, ignore the result.  */
3374   if (wi::ltu_p (new_i_bound, delta))
3375     return;
3376 
3377   if (upper && !is_exit)
3378     do_warn_aggressive_loop_optimizations (loop, new_i_bound, at_stmt);
3379   record_niter_bound (loop, new_i_bound, realistic, upper);
3380 }
3381 
3382 /* Records the control iv analyzed in NITER for LOOP if the iv is valid
3383    and doesn't overflow.  */
3384 
3385 static void
record_control_iv(struct loop * loop,struct tree_niter_desc * niter)3386 record_control_iv (struct loop *loop, struct tree_niter_desc *niter)
3387 {
3388   struct control_iv *iv;
3389 
3390   if (!niter->control.base || !niter->control.step)
3391     return;
3392 
3393   if (!integer_onep (niter->assumptions) || !niter->control.no_overflow)
3394     return;
3395 
3396   iv = ggc_alloc<control_iv> ();
3397   iv->base = niter->control.base;
3398   iv->step = niter->control.step;
3399   iv->next = loop->control_ivs;
3400   loop->control_ivs = iv;
3401 
3402   return;
3403 }
3404 
3405 /* This function returns TRUE if below conditions are satisfied:
3406      1) VAR is SSA variable.
3407      2) VAR is an IV:{base, step} in its defining loop.
3408      3) IV doesn't overflow.
3409      4) Both base and step are integer constants.
3410      5) Base is the MIN/MAX value depends on IS_MIN.
3411    Store value of base to INIT correspondingly.  */
3412 
3413 static bool
get_cst_init_from_scev(tree var,wide_int * init,bool is_min)3414 get_cst_init_from_scev (tree var, wide_int *init, bool is_min)
3415 {
3416   if (TREE_CODE (var) != SSA_NAME)
3417     return false;
3418 
3419   gimple *def_stmt = SSA_NAME_DEF_STMT (var);
3420   struct loop *loop = loop_containing_stmt (def_stmt);
3421 
3422   if (loop == NULL)
3423     return false;
3424 
3425   affine_iv iv;
3426   if (!simple_iv (loop, loop, var, &iv, false))
3427     return false;
3428 
3429   if (!iv.no_overflow)
3430     return false;
3431 
3432   if (TREE_CODE (iv.base) != INTEGER_CST || TREE_CODE (iv.step) != INTEGER_CST)
3433     return false;
3434 
3435   if (is_min == tree_int_cst_sign_bit (iv.step))
3436     return false;
3437 
3438   *init = wi::to_wide (iv.base);
3439   return true;
3440 }
3441 
3442 /* Record the estimate on number of iterations of LOOP based on the fact that
3443    the induction variable BASE + STEP * i evaluated in STMT does not wrap and
3444    its values belong to the range <LOW, HIGH>.  REALISTIC is true if the
3445    estimated number of iterations is expected to be close to the real one.
3446    UPPER is true if we are sure the induction variable does not wrap.  */
3447 
3448 static void
record_nonwrapping_iv(struct loop * loop,tree base,tree step,gimple * stmt,tree low,tree high,bool realistic,bool upper)3449 record_nonwrapping_iv (struct loop *loop, tree base, tree step, gimple *stmt,
3450 		       tree low, tree high, bool realistic, bool upper)
3451 {
3452   tree niter_bound, extreme, delta;
3453   tree type = TREE_TYPE (base), unsigned_type;
3454   tree orig_base = base;
3455 
3456   if (TREE_CODE (step) != INTEGER_CST || integer_zerop (step))
3457     return;
3458 
3459   if (dump_file && (dump_flags & TDF_DETAILS))
3460     {
3461       fprintf (dump_file, "Induction variable (");
3462       print_generic_expr (dump_file, TREE_TYPE (base), TDF_SLIM);
3463       fprintf (dump_file, ") ");
3464       print_generic_expr (dump_file, base, TDF_SLIM);
3465       fprintf (dump_file, " + ");
3466       print_generic_expr (dump_file, step, TDF_SLIM);
3467       fprintf (dump_file, " * iteration does not wrap in statement ");
3468       print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
3469       fprintf (dump_file, " in loop %d.\n", loop->num);
3470     }
3471 
3472   unsigned_type = unsigned_type_for (type);
3473   base = fold_convert (unsigned_type, base);
3474   step = fold_convert (unsigned_type, step);
3475 
3476   if (tree_int_cst_sign_bit (step))
3477     {
3478       wide_int min, max;
3479       extreme = fold_convert (unsigned_type, low);
3480       if (TREE_CODE (orig_base) == SSA_NAME
3481 	  && TREE_CODE (high) == INTEGER_CST
3482 	  && INTEGRAL_TYPE_P (TREE_TYPE (orig_base))
3483 	  && (get_range_info (orig_base, &min, &max) == VR_RANGE
3484 	      || get_cst_init_from_scev (orig_base, &max, false))
3485 	  && wi::gts_p (wi::to_wide (high), max))
3486 	base = wide_int_to_tree (unsigned_type, max);
3487       else if (TREE_CODE (base) != INTEGER_CST
3488 	       && dominated_by_p (CDI_DOMINATORS,
3489 				  loop->latch, gimple_bb (stmt)))
3490 	base = fold_convert (unsigned_type, high);
3491       delta = fold_build2 (MINUS_EXPR, unsigned_type, base, extreme);
3492       step = fold_build1 (NEGATE_EXPR, unsigned_type, step);
3493     }
3494   else
3495     {
3496       wide_int min, max;
3497       extreme = fold_convert (unsigned_type, high);
3498       if (TREE_CODE (orig_base) == SSA_NAME
3499 	  && TREE_CODE (low) == INTEGER_CST
3500 	  && INTEGRAL_TYPE_P (TREE_TYPE (orig_base))
3501 	  && (get_range_info (orig_base, &min, &max) == VR_RANGE
3502 	      || get_cst_init_from_scev (orig_base, &min, true))
3503 	  && wi::gts_p (min, wi::to_wide (low)))
3504 	base = wide_int_to_tree (unsigned_type, min);
3505       else if (TREE_CODE (base) != INTEGER_CST
3506 	       && dominated_by_p (CDI_DOMINATORS,
3507 				  loop->latch, gimple_bb (stmt)))
3508 	base = fold_convert (unsigned_type, low);
3509       delta = fold_build2 (MINUS_EXPR, unsigned_type, extreme, base);
3510     }
3511 
3512   /* STMT is executed at most NITER_BOUND + 1 times, since otherwise the value
3513      would get out of the range.  */
3514   niter_bound = fold_build2 (FLOOR_DIV_EXPR, unsigned_type, delta, step);
3515   widest_int max = derive_constant_upper_bound (niter_bound);
3516   record_estimate (loop, niter_bound, max, stmt, false, realistic, upper);
3517 }
3518 
3519 /* Determine information about number of iterations a LOOP from the index
3520    IDX of a data reference accessed in STMT.  RELIABLE is true if STMT is
3521    guaranteed to be executed in every iteration of LOOP.  Callback for
3522    for_each_index.  */
3523 
3524 struct ilb_data
3525 {
3526   struct loop *loop;
3527   gimple *stmt;
3528 };
3529 
3530 static bool
idx_infer_loop_bounds(tree base,tree * idx,void * dta)3531 idx_infer_loop_bounds (tree base, tree *idx, void *dta)
3532 {
3533   struct ilb_data *data = (struct ilb_data *) dta;
3534   tree ev, init, step;
3535   tree low, high, type, next;
3536   bool sign, upper = true, at_end = false;
3537   struct loop *loop = data->loop;
3538 
3539   if (TREE_CODE (base) != ARRAY_REF)
3540     return true;
3541 
3542   /* For arrays at the end of the structure, we are not guaranteed that they
3543      do not really extend over their declared size.  However, for arrays of
3544      size greater than one, this is unlikely to be intended.  */
3545   if (array_at_struct_end_p (base))
3546     {
3547       at_end = true;
3548       upper = false;
3549     }
3550 
3551   struct loop *dloop = loop_containing_stmt (data->stmt);
3552   if (!dloop)
3553     return true;
3554 
3555   ev = analyze_scalar_evolution (dloop, *idx);
3556   ev = instantiate_parameters (loop, ev);
3557   init = initial_condition (ev);
3558   step = evolution_part_in_loop_num (ev, loop->num);
3559 
3560   if (!init
3561       || !step
3562       || TREE_CODE (step) != INTEGER_CST
3563       || integer_zerop (step)
3564       || tree_contains_chrecs (init, NULL)
3565       || chrec_contains_symbols_defined_in_loop (init, loop->num))
3566     return true;
3567 
3568   low = array_ref_low_bound (base);
3569   high = array_ref_up_bound (base);
3570 
3571   /* The case of nonconstant bounds could be handled, but it would be
3572      complicated.  */
3573   if (TREE_CODE (low) != INTEGER_CST
3574       || !high
3575       || TREE_CODE (high) != INTEGER_CST)
3576     return true;
3577   sign = tree_int_cst_sign_bit (step);
3578   type = TREE_TYPE (step);
3579 
3580   /* The array of length 1 at the end of a structure most likely extends
3581      beyond its bounds.  */
3582   if (at_end
3583       && operand_equal_p (low, high, 0))
3584     return true;
3585 
3586   /* In case the relevant bound of the array does not fit in type, or
3587      it does, but bound + step (in type) still belongs into the range of the
3588      array, the index may wrap and still stay within the range of the array
3589      (consider e.g. if the array is indexed by the full range of
3590      unsigned char).
3591 
3592      To make things simpler, we require both bounds to fit into type, although
3593      there are cases where this would not be strictly necessary.  */
3594   if (!int_fits_type_p (high, type)
3595       || !int_fits_type_p (low, type))
3596     return true;
3597   low = fold_convert (type, low);
3598   high = fold_convert (type, high);
3599 
3600   if (sign)
3601     next = fold_binary (PLUS_EXPR, type, low, step);
3602   else
3603     next = fold_binary (PLUS_EXPR, type, high, step);
3604 
3605   if (tree_int_cst_compare (low, next) <= 0
3606       && tree_int_cst_compare (next, high) <= 0)
3607     return true;
3608 
3609   /* If access is not executed on every iteration, we must ensure that overlow
3610      may not make the access valid later.  */
3611   if (!dominated_by_p (CDI_DOMINATORS, loop->latch, gimple_bb (data->stmt))
3612       && scev_probably_wraps_p (NULL_TREE,
3613 				initial_condition_in_loop_num (ev, loop->num),
3614 				step, data->stmt, loop, true))
3615     upper = false;
3616 
3617   record_nonwrapping_iv (loop, init, step, data->stmt, low, high, false, upper);
3618   return true;
3619 }
3620 
3621 /* Determine information about number of iterations a LOOP from the bounds
3622    of arrays in the data reference REF accessed in STMT.  RELIABLE is true if
3623    STMT is guaranteed to be executed in every iteration of LOOP.*/
3624 
3625 static void
infer_loop_bounds_from_ref(struct loop * loop,gimple * stmt,tree ref)3626 infer_loop_bounds_from_ref (struct loop *loop, gimple *stmt, tree ref)
3627 {
3628   struct ilb_data data;
3629 
3630   data.loop = loop;
3631   data.stmt = stmt;
3632   for_each_index (&ref, idx_infer_loop_bounds, &data);
3633 }
3634 
3635 /* Determine information about number of iterations of a LOOP from the way
3636    arrays are used in STMT.  RELIABLE is true if STMT is guaranteed to be
3637    executed in every iteration of LOOP.  */
3638 
3639 static void
infer_loop_bounds_from_array(struct loop * loop,gimple * stmt)3640 infer_loop_bounds_from_array (struct loop *loop, gimple *stmt)
3641 {
3642   if (is_gimple_assign (stmt))
3643     {
3644       tree op0 = gimple_assign_lhs (stmt);
3645       tree op1 = gimple_assign_rhs1 (stmt);
3646 
3647       /* For each memory access, analyze its access function
3648 	 and record a bound on the loop iteration domain.  */
3649       if (REFERENCE_CLASS_P (op0))
3650 	infer_loop_bounds_from_ref (loop, stmt, op0);
3651 
3652       if (REFERENCE_CLASS_P (op1))
3653 	infer_loop_bounds_from_ref (loop, stmt, op1);
3654     }
3655   else if (is_gimple_call (stmt))
3656     {
3657       tree arg, lhs;
3658       unsigned i, n = gimple_call_num_args (stmt);
3659 
3660       lhs = gimple_call_lhs (stmt);
3661       if (lhs && REFERENCE_CLASS_P (lhs))
3662 	infer_loop_bounds_from_ref (loop, stmt, lhs);
3663 
3664       for (i = 0; i < n; i++)
3665 	{
3666 	  arg = gimple_call_arg (stmt, i);
3667 	  if (REFERENCE_CLASS_P (arg))
3668 	    infer_loop_bounds_from_ref (loop, stmt, arg);
3669 	}
3670     }
3671 }
3672 
3673 /* Determine information about number of iterations of a LOOP from the fact
3674    that pointer arithmetics in STMT does not overflow.  */
3675 
3676 static void
infer_loop_bounds_from_pointer_arith(struct loop * loop,gimple * stmt)3677 infer_loop_bounds_from_pointer_arith (struct loop *loop, gimple *stmt)
3678 {
3679   tree def, base, step, scev, type, low, high;
3680   tree var, ptr;
3681 
3682   if (!is_gimple_assign (stmt)
3683       || gimple_assign_rhs_code (stmt) != POINTER_PLUS_EXPR)
3684     return;
3685 
3686   def = gimple_assign_lhs (stmt);
3687   if (TREE_CODE (def) != SSA_NAME)
3688     return;
3689 
3690   type = TREE_TYPE (def);
3691   if (!nowrap_type_p (type))
3692     return;
3693 
3694   ptr = gimple_assign_rhs1 (stmt);
3695   if (!expr_invariant_in_loop_p (loop, ptr))
3696     return;
3697 
3698   var = gimple_assign_rhs2 (stmt);
3699   if (TYPE_PRECISION (type) != TYPE_PRECISION (TREE_TYPE (var)))
3700     return;
3701 
3702   struct loop *uloop = loop_containing_stmt (stmt);
3703   scev = instantiate_parameters (loop, analyze_scalar_evolution (uloop, def));
3704   if (chrec_contains_undetermined (scev))
3705     return;
3706 
3707   base = initial_condition_in_loop_num (scev, loop->num);
3708   step = evolution_part_in_loop_num (scev, loop->num);
3709 
3710   if (!base || !step
3711       || TREE_CODE (step) != INTEGER_CST
3712       || tree_contains_chrecs (base, NULL)
3713       || chrec_contains_symbols_defined_in_loop (base, loop->num))
3714     return;
3715 
3716   low = lower_bound_in_type (type, type);
3717   high = upper_bound_in_type (type, type);
3718 
3719   /* In C, pointer arithmetic p + 1 cannot use a NULL pointer, and p - 1 cannot
3720      produce a NULL pointer.  The contrary would mean NULL points to an object,
3721      while NULL is supposed to compare unequal with the address of all objects.
3722      Furthermore, p + 1 cannot produce a NULL pointer and p - 1 cannot use a
3723      NULL pointer since that would mean wrapping, which we assume here not to
3724      happen.  So, we can exclude NULL from the valid range of pointer
3725      arithmetic.  */
3726   if (flag_delete_null_pointer_checks && int_cst_value (low) == 0)
3727     low = build_int_cstu (TREE_TYPE (low), TYPE_ALIGN_UNIT (TREE_TYPE (type)));
3728 
3729   record_nonwrapping_iv (loop, base, step, stmt, low, high, false, true);
3730 }
3731 
3732 /* Determine information about number of iterations of a LOOP from the fact
3733    that signed arithmetics in STMT does not overflow.  */
3734 
3735 static void
infer_loop_bounds_from_signedness(struct loop * loop,gimple * stmt)3736 infer_loop_bounds_from_signedness (struct loop *loop, gimple *stmt)
3737 {
3738   tree def, base, step, scev, type, low, high;
3739 
3740   if (gimple_code (stmt) != GIMPLE_ASSIGN)
3741     return;
3742 
3743   def = gimple_assign_lhs (stmt);
3744 
3745   if (TREE_CODE (def) != SSA_NAME)
3746     return;
3747 
3748   type = TREE_TYPE (def);
3749   if (!INTEGRAL_TYPE_P (type)
3750       || !TYPE_OVERFLOW_UNDEFINED (type))
3751     return;
3752 
3753   scev = instantiate_parameters (loop, analyze_scalar_evolution (loop, def));
3754   if (chrec_contains_undetermined (scev))
3755     return;
3756 
3757   base = initial_condition_in_loop_num (scev, loop->num);
3758   step = evolution_part_in_loop_num (scev, loop->num);
3759 
3760   if (!base || !step
3761       || TREE_CODE (step) != INTEGER_CST
3762       || tree_contains_chrecs (base, NULL)
3763       || chrec_contains_symbols_defined_in_loop (base, loop->num))
3764     return;
3765 
3766   low = lower_bound_in_type (type, type);
3767   high = upper_bound_in_type (type, type);
3768   wide_int minv, maxv;
3769   if (get_range_info (def, &minv, &maxv) == VR_RANGE)
3770     {
3771       low = wide_int_to_tree (type, minv);
3772       high = wide_int_to_tree (type, maxv);
3773     }
3774 
3775   record_nonwrapping_iv (loop, base, step, stmt, low, high, false, true);
3776 }
3777 
3778 /* The following analyzers are extracting informations on the bounds
3779    of LOOP from the following undefined behaviors:
3780 
3781    - data references should not access elements over the statically
3782      allocated size,
3783 
3784    - signed variables should not overflow when flag_wrapv is not set.
3785 */
3786 
3787 static void
infer_loop_bounds_from_undefined(struct loop * loop)3788 infer_loop_bounds_from_undefined (struct loop *loop)
3789 {
3790   unsigned i;
3791   basic_block *bbs;
3792   gimple_stmt_iterator bsi;
3793   basic_block bb;
3794   bool reliable;
3795 
3796   bbs = get_loop_body (loop);
3797 
3798   for (i = 0; i < loop->num_nodes; i++)
3799     {
3800       bb = bbs[i];
3801 
3802       /* If BB is not executed in each iteration of the loop, we cannot
3803 	 use the operations in it to infer reliable upper bound on the
3804 	 # of iterations of the loop.  However, we can use it as a guess.
3805 	 Reliable guesses come only from array bounds.  */
3806       reliable = dominated_by_p (CDI_DOMINATORS, loop->latch, bb);
3807 
3808       for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
3809 	{
3810 	  gimple *stmt = gsi_stmt (bsi);
3811 
3812 	  infer_loop_bounds_from_array (loop, stmt);
3813 
3814 	  if (reliable)
3815             {
3816               infer_loop_bounds_from_signedness (loop, stmt);
3817               infer_loop_bounds_from_pointer_arith (loop, stmt);
3818             }
3819   	}
3820 
3821     }
3822 
3823   free (bbs);
3824 }
3825 
3826 /* Compare wide ints, callback for qsort.  */
3827 
3828 static int
wide_int_cmp(const void * p1,const void * p2)3829 wide_int_cmp (const void *p1, const void *p2)
3830 {
3831   const widest_int *d1 = (const widest_int *) p1;
3832   const widest_int *d2 = (const widest_int *) p2;
3833   return wi::cmpu (*d1, *d2);
3834 }
3835 
3836 /* Return index of BOUND in BOUNDS array sorted in increasing order.
3837    Lookup by binary search.  */
3838 
3839 static int
bound_index(vec<widest_int> bounds,const widest_int & bound)3840 bound_index (vec<widest_int> bounds, const widest_int &bound)
3841 {
3842   unsigned int end = bounds.length ();
3843   unsigned int begin = 0;
3844 
3845   /* Find a matching index by means of a binary search.  */
3846   while (begin != end)
3847     {
3848       unsigned int middle = (begin + end) / 2;
3849       widest_int index = bounds[middle];
3850 
3851       if (index == bound)
3852 	return middle;
3853       else if (wi::ltu_p (index, bound))
3854 	begin = middle + 1;
3855       else
3856 	end = middle;
3857     }
3858   gcc_unreachable ();
3859 }
3860 
3861 /* We recorded loop bounds only for statements dominating loop latch (and thus
3862    executed each loop iteration).  If there are any bounds on statements not
3863    dominating the loop latch we can improve the estimate by walking the loop
3864    body and seeing if every path from loop header to loop latch contains
3865    some bounded statement.  */
3866 
3867 static void
discover_iteration_bound_by_body_walk(struct loop * loop)3868 discover_iteration_bound_by_body_walk (struct loop *loop)
3869 {
3870   struct nb_iter_bound *elt;
3871   auto_vec<widest_int> bounds;
3872   vec<vec<basic_block> > queues = vNULL;
3873   vec<basic_block> queue = vNULL;
3874   ptrdiff_t queue_index;
3875   ptrdiff_t latch_index = 0;
3876 
3877   /* Discover what bounds may interest us.  */
3878   for (elt = loop->bounds; elt; elt = elt->next)
3879     {
3880       widest_int bound = elt->bound;
3881 
3882       /* Exit terminates loop at given iteration, while non-exits produce undefined
3883 	 effect on the next iteration.  */
3884       if (!elt->is_exit)
3885 	{
3886 	  bound += 1;
3887 	  /* If an overflow occurred, ignore the result.  */
3888 	  if (bound == 0)
3889 	    continue;
3890 	}
3891 
3892       if (!loop->any_upper_bound
3893 	  || wi::ltu_p (bound, loop->nb_iterations_upper_bound))
3894         bounds.safe_push (bound);
3895     }
3896 
3897   /* Exit early if there is nothing to do.  */
3898   if (!bounds.exists ())
3899     return;
3900 
3901   if (dump_file && (dump_flags & TDF_DETAILS))
3902     fprintf (dump_file, " Trying to walk loop body to reduce the bound.\n");
3903 
3904   /* Sort the bounds in decreasing order.  */
3905   bounds.qsort (wide_int_cmp);
3906 
3907   /* For every basic block record the lowest bound that is guaranteed to
3908      terminate the loop.  */
3909 
3910   hash_map<basic_block, ptrdiff_t> bb_bounds;
3911   for (elt = loop->bounds; elt; elt = elt->next)
3912     {
3913       widest_int bound = elt->bound;
3914       if (!elt->is_exit)
3915 	{
3916 	  bound += 1;
3917 	  /* If an overflow occurred, ignore the result.  */
3918 	  if (bound == 0)
3919 	    continue;
3920 	}
3921 
3922       if (!loop->any_upper_bound
3923 	  || wi::ltu_p (bound, loop->nb_iterations_upper_bound))
3924 	{
3925 	  ptrdiff_t index = bound_index (bounds, bound);
3926 	  ptrdiff_t *entry = bb_bounds.get (gimple_bb (elt->stmt));
3927 	  if (!entry)
3928 	    bb_bounds.put (gimple_bb (elt->stmt), index);
3929 	  else if ((ptrdiff_t)*entry > index)
3930 	    *entry = index;
3931 	}
3932     }
3933 
3934   hash_map<basic_block, ptrdiff_t> block_priority;
3935 
3936   /* Perform shortest path discovery loop->header ... loop->latch.
3937 
3938      The "distance" is given by the smallest loop bound of basic block
3939      present in the path and we look for path with largest smallest bound
3940      on it.
3941 
3942      To avoid the need for fibonacci heap on double ints we simply compress
3943      double ints into indexes to BOUNDS array and then represent the queue
3944      as arrays of queues for every index.
3945      Index of BOUNDS.length() means that the execution of given BB has
3946      no bounds determined.
3947 
3948      VISITED is a pointer map translating basic block into smallest index
3949      it was inserted into the priority queue with.  */
3950   latch_index = -1;
3951 
3952   /* Start walk in loop header with index set to infinite bound.  */
3953   queue_index = bounds.length ();
3954   queues.safe_grow_cleared (queue_index + 1);
3955   queue.safe_push (loop->header);
3956   queues[queue_index] = queue;
3957   block_priority.put (loop->header, queue_index);
3958 
3959   for (; queue_index >= 0; queue_index--)
3960     {
3961       if (latch_index < queue_index)
3962 	{
3963 	  while (queues[queue_index].length ())
3964 	    {
3965 	      basic_block bb;
3966 	      ptrdiff_t bound_index = queue_index;
3967               edge e;
3968               edge_iterator ei;
3969 
3970 	      queue = queues[queue_index];
3971 	      bb = queue.pop ();
3972 
3973 	      /* OK, we later inserted the BB with lower priority, skip it.  */
3974 	      if (*block_priority.get (bb) > queue_index)
3975 		continue;
3976 
3977 	      /* See if we can improve the bound.  */
3978 	      ptrdiff_t *entry = bb_bounds.get (bb);
3979 	      if (entry && *entry < bound_index)
3980 		bound_index = *entry;
3981 
3982 	      /* Insert succesors into the queue, watch for latch edge
3983 		 and record greatest index we saw.  */
3984 	      FOR_EACH_EDGE (e, ei, bb->succs)
3985 		{
3986 		  bool insert = false;
3987 
3988 		  if (loop_exit_edge_p (loop, e))
3989 		    continue;
3990 
3991 		  if (e == loop_latch_edge (loop)
3992 		      && latch_index < bound_index)
3993 		    latch_index = bound_index;
3994 		  else if (!(entry = block_priority.get (e->dest)))
3995 		    {
3996 		      insert = true;
3997 		      block_priority.put (e->dest, bound_index);
3998 		    }
3999 		  else if (*entry < bound_index)
4000 		    {
4001 		      insert = true;
4002 		      *entry = bound_index;
4003 		    }
4004 
4005 		  if (insert)
4006 		    queues[bound_index].safe_push (e->dest);
4007 		}
4008 	    }
4009 	}
4010       queues[queue_index].release ();
4011     }
4012 
4013   gcc_assert (latch_index >= 0);
4014   if ((unsigned)latch_index < bounds.length ())
4015     {
4016       if (dump_file && (dump_flags & TDF_DETAILS))
4017 	{
4018 	  fprintf (dump_file, "Found better loop bound ");
4019 	  print_decu (bounds[latch_index], dump_file);
4020 	  fprintf (dump_file, "\n");
4021 	}
4022       record_niter_bound (loop, bounds[latch_index], false, true);
4023     }
4024 
4025   queues.release ();
4026 }
4027 
4028 /* See if every path cross the loop goes through a statement that is known
4029    to not execute at the last iteration. In that case we can decrese iteration
4030    count by 1.  */
4031 
4032 static void
maybe_lower_iteration_bound(struct loop * loop)4033 maybe_lower_iteration_bound (struct loop *loop)
4034 {
4035   hash_set<gimple *> *not_executed_last_iteration = NULL;
4036   struct nb_iter_bound *elt;
4037   bool found_exit = false;
4038   auto_vec<basic_block> queue;
4039   bitmap visited;
4040 
4041   /* Collect all statements with interesting (i.e. lower than
4042      nb_iterations_upper_bound) bound on them.
4043 
4044      TODO: Due to the way record_estimate choose estimates to store, the bounds
4045      will be always nb_iterations_upper_bound-1.  We can change this to record
4046      also statements not dominating the loop latch and update the walk bellow
4047      to the shortest path algorithm.  */
4048   for (elt = loop->bounds; elt; elt = elt->next)
4049     {
4050       if (!elt->is_exit
4051 	  && wi::ltu_p (elt->bound, loop->nb_iterations_upper_bound))
4052 	{
4053 	  if (!not_executed_last_iteration)
4054 	    not_executed_last_iteration = new hash_set<gimple *>;
4055 	  not_executed_last_iteration->add (elt->stmt);
4056 	}
4057     }
4058   if (!not_executed_last_iteration)
4059     return;
4060 
4061   /* Start DFS walk in the loop header and see if we can reach the
4062      loop latch or any of the exits (including statements with side
4063      effects that may terminate the loop otherwise) without visiting
4064      any of the statements known to have undefined effect on the last
4065      iteration.  */
4066   queue.safe_push (loop->header);
4067   visited = BITMAP_ALLOC (NULL);
4068   bitmap_set_bit (visited, loop->header->index);
4069   found_exit = false;
4070 
4071   do
4072     {
4073       basic_block bb = queue.pop ();
4074       gimple_stmt_iterator gsi;
4075       bool stmt_found = false;
4076 
4077       /* Loop for possible exits and statements bounding the execution.  */
4078       for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4079 	{
4080 	  gimple *stmt = gsi_stmt (gsi);
4081 	  if (not_executed_last_iteration->contains (stmt))
4082 	    {
4083 	      stmt_found = true;
4084 	      break;
4085 	    }
4086 	  if (gimple_has_side_effects (stmt))
4087 	    {
4088 	      found_exit = true;
4089 	      break;
4090 	    }
4091 	}
4092       if (found_exit)
4093 	break;
4094 
4095       /* If no bounding statement is found, continue the walk.  */
4096       if (!stmt_found)
4097 	{
4098           edge e;
4099           edge_iterator ei;
4100 
4101           FOR_EACH_EDGE (e, ei, bb->succs)
4102 	    {
4103 	      if (loop_exit_edge_p (loop, e)
4104 		  || e == loop_latch_edge (loop))
4105 		{
4106 		  found_exit = true;
4107 		  break;
4108 		}
4109 	      if (bitmap_set_bit (visited, e->dest->index))
4110 		queue.safe_push (e->dest);
4111 	    }
4112 	}
4113     }
4114   while (queue.length () && !found_exit);
4115 
4116   /* If every path through the loop reach bounding statement before exit,
4117      then we know the last iteration of the loop will have undefined effect
4118      and we can decrease number of iterations.  */
4119 
4120   if (!found_exit)
4121     {
4122       if (dump_file && (dump_flags & TDF_DETAILS))
4123 	fprintf (dump_file, "Reducing loop iteration estimate by 1; "
4124 		 "undefined statement must be executed at the last iteration.\n");
4125       record_niter_bound (loop, loop->nb_iterations_upper_bound - 1,
4126 			  false, true);
4127     }
4128 
4129   BITMAP_FREE (visited);
4130   delete not_executed_last_iteration;
4131 }
4132 
4133 /* Records estimates on numbers of iterations of LOOP.  If USE_UNDEFINED_P
4134    is true also use estimates derived from undefined behavior.  */
4135 
4136 void
estimate_numbers_of_iterations(struct loop * loop)4137 estimate_numbers_of_iterations (struct loop *loop)
4138 {
4139   vec<edge> exits;
4140   tree niter, type;
4141   unsigned i;
4142   struct tree_niter_desc niter_desc;
4143   edge ex;
4144   widest_int bound;
4145   edge likely_exit;
4146 
4147   /* Give up if we already have tried to compute an estimation.  */
4148   if (loop->estimate_state != EST_NOT_COMPUTED)
4149     return;
4150 
4151   loop->estimate_state = EST_AVAILABLE;
4152 
4153   /* If we have a measured profile, use it to estimate the number of
4154      iterations.  Normally this is recorded by branch_prob right after
4155      reading the profile.  In case we however found a new loop, record the
4156      information here.
4157 
4158      Explicitly check for profile status so we do not report
4159      wrong prediction hitrates for guessed loop iterations heuristics.
4160      Do not recompute already recorded bounds - we ought to be better on
4161      updating iteration bounds than updating profile in general and thus
4162      recomputing iteration bounds later in the compilation process will just
4163      introduce random roundoff errors.  */
4164   if (!loop->any_estimate
4165       && loop->header->count.reliable_p ())
4166     {
4167       gcov_type nit = expected_loop_iterations_unbounded (loop);
4168       bound = gcov_type_to_wide_int (nit);
4169       record_niter_bound (loop, bound, true, false);
4170     }
4171 
4172   /* Ensure that loop->nb_iterations is computed if possible.  If it turns out
4173      to be constant, we avoid undefined behavior implied bounds and instead
4174      diagnose those loops with -Waggressive-loop-optimizations.  */
4175   number_of_latch_executions (loop);
4176 
4177   exits = get_loop_exit_edges (loop);
4178   likely_exit = single_likely_exit (loop);
4179   FOR_EACH_VEC_ELT (exits, i, ex)
4180     {
4181       if (!number_of_iterations_exit (loop, ex, &niter_desc, false, false))
4182 	continue;
4183 
4184       niter = niter_desc.niter;
4185       type = TREE_TYPE (niter);
4186       if (TREE_CODE (niter_desc.may_be_zero) != INTEGER_CST)
4187 	niter = build3 (COND_EXPR, type, niter_desc.may_be_zero,
4188 			build_int_cst (type, 0),
4189 			niter);
4190       record_estimate (loop, niter, niter_desc.max,
4191 		       last_stmt (ex->src),
4192 		       true, ex == likely_exit, true);
4193       record_control_iv (loop, &niter_desc);
4194     }
4195   exits.release ();
4196 
4197   if (flag_aggressive_loop_optimizations)
4198     infer_loop_bounds_from_undefined (loop);
4199 
4200   discover_iteration_bound_by_body_walk (loop);
4201 
4202   maybe_lower_iteration_bound (loop);
4203 
4204   /* If we know the exact number of iterations of this loop, try to
4205      not break code with undefined behavior by not recording smaller
4206      maximum number of iterations.  */
4207   if (loop->nb_iterations
4208       && TREE_CODE (loop->nb_iterations) == INTEGER_CST)
4209     {
4210       loop->any_upper_bound = true;
4211       loop->nb_iterations_upper_bound = wi::to_widest (loop->nb_iterations);
4212     }
4213 }
4214 
4215 /* Sets NIT to the estimated number of executions of the latch of the
4216    LOOP.  If CONSERVATIVE is true, we must be sure that NIT is at least as
4217    large as the number of iterations.  If we have no reliable estimate,
4218    the function returns false, otherwise returns true.  */
4219 
4220 bool
estimated_loop_iterations(struct loop * loop,widest_int * nit)4221 estimated_loop_iterations (struct loop *loop, widest_int *nit)
4222 {
4223   /* When SCEV information is available, try to update loop iterations
4224      estimate.  Otherwise just return whatever we recorded earlier.  */
4225   if (scev_initialized_p ())
4226     estimate_numbers_of_iterations (loop);
4227 
4228   return (get_estimated_loop_iterations (loop, nit));
4229 }
4230 
4231 /* Similar to estimated_loop_iterations, but returns the estimate only
4232    if it fits to HOST_WIDE_INT.  If this is not the case, or the estimate
4233    on the number of iterations of LOOP could not be derived, returns -1.  */
4234 
4235 HOST_WIDE_INT
estimated_loop_iterations_int(struct loop * loop)4236 estimated_loop_iterations_int (struct loop *loop)
4237 {
4238   widest_int nit;
4239   HOST_WIDE_INT hwi_nit;
4240 
4241   if (!estimated_loop_iterations (loop, &nit))
4242     return -1;
4243 
4244   if (!wi::fits_shwi_p (nit))
4245     return -1;
4246   hwi_nit = nit.to_shwi ();
4247 
4248   return hwi_nit < 0 ? -1 : hwi_nit;
4249 }
4250 
4251 
4252 /* Sets NIT to an upper bound for the maximum number of executions of the
4253    latch of the LOOP.  If we have no reliable estimate, the function returns
4254    false, otherwise returns true.  */
4255 
4256 bool
max_loop_iterations(struct loop * loop,widest_int * nit)4257 max_loop_iterations (struct loop *loop, widest_int *nit)
4258 {
4259   /* When SCEV information is available, try to update loop iterations
4260      estimate.  Otherwise just return whatever we recorded earlier.  */
4261   if (scev_initialized_p ())
4262     estimate_numbers_of_iterations (loop);
4263 
4264   return get_max_loop_iterations (loop, nit);
4265 }
4266 
4267 /* Similar to max_loop_iterations, but returns the estimate only
4268    if it fits to HOST_WIDE_INT.  If this is not the case, or the estimate
4269    on the number of iterations of LOOP could not be derived, returns -1.  */
4270 
4271 HOST_WIDE_INT
max_loop_iterations_int(struct loop * loop)4272 max_loop_iterations_int (struct loop *loop)
4273 {
4274   widest_int nit;
4275   HOST_WIDE_INT hwi_nit;
4276 
4277   if (!max_loop_iterations (loop, &nit))
4278     return -1;
4279 
4280   if (!wi::fits_shwi_p (nit))
4281     return -1;
4282   hwi_nit = nit.to_shwi ();
4283 
4284   return hwi_nit < 0 ? -1 : hwi_nit;
4285 }
4286 
4287 /* Sets NIT to an likely upper bound for the maximum number of executions of the
4288    latch of the LOOP.  If we have no reliable estimate, the function returns
4289    false, otherwise returns true.  */
4290 
4291 bool
likely_max_loop_iterations(struct loop * loop,widest_int * nit)4292 likely_max_loop_iterations (struct loop *loop, widest_int *nit)
4293 {
4294   /* When SCEV information is available, try to update loop iterations
4295      estimate.  Otherwise just return whatever we recorded earlier.  */
4296   if (scev_initialized_p ())
4297     estimate_numbers_of_iterations (loop);
4298 
4299   return get_likely_max_loop_iterations (loop, nit);
4300 }
4301 
4302 /* Similar to max_loop_iterations, but returns the estimate only
4303    if it fits to HOST_WIDE_INT.  If this is not the case, or the estimate
4304    on the number of iterations of LOOP could not be derived, returns -1.  */
4305 
4306 HOST_WIDE_INT
likely_max_loop_iterations_int(struct loop * loop)4307 likely_max_loop_iterations_int (struct loop *loop)
4308 {
4309   widest_int nit;
4310   HOST_WIDE_INT hwi_nit;
4311 
4312   if (!likely_max_loop_iterations (loop, &nit))
4313     return -1;
4314 
4315   if (!wi::fits_shwi_p (nit))
4316     return -1;
4317   hwi_nit = nit.to_shwi ();
4318 
4319   return hwi_nit < 0 ? -1 : hwi_nit;
4320 }
4321 
4322 /* Returns an estimate for the number of executions of statements
4323    in the LOOP.  For statements before the loop exit, this exceeds
4324    the number of execution of the latch by one.  */
4325 
4326 HOST_WIDE_INT
estimated_stmt_executions_int(struct loop * loop)4327 estimated_stmt_executions_int (struct loop *loop)
4328 {
4329   HOST_WIDE_INT nit = estimated_loop_iterations_int (loop);
4330   HOST_WIDE_INT snit;
4331 
4332   if (nit == -1)
4333     return -1;
4334 
4335   snit = (HOST_WIDE_INT) ((unsigned HOST_WIDE_INT) nit + 1);
4336 
4337   /* If the computation overflows, return -1.  */
4338   return snit < 0 ? -1 : snit;
4339 }
4340 
4341 /* Sets NIT to the maximum number of executions of the latch of the
4342    LOOP, plus one.  If we have no reliable estimate, the function returns
4343    false, otherwise returns true.  */
4344 
4345 bool
max_stmt_executions(struct loop * loop,widest_int * nit)4346 max_stmt_executions (struct loop *loop, widest_int *nit)
4347 {
4348   widest_int nit_minus_one;
4349 
4350   if (!max_loop_iterations (loop, nit))
4351     return false;
4352 
4353   nit_minus_one = *nit;
4354 
4355   *nit += 1;
4356 
4357   return wi::gtu_p (*nit, nit_minus_one);
4358 }
4359 
4360 /* Sets NIT to the estimated maximum number of executions of the latch of the
4361    LOOP, plus one.  If we have no likely estimate, the function returns
4362    false, otherwise returns true.  */
4363 
4364 bool
likely_max_stmt_executions(struct loop * loop,widest_int * nit)4365 likely_max_stmt_executions (struct loop *loop, widest_int *nit)
4366 {
4367   widest_int nit_minus_one;
4368 
4369   if (!likely_max_loop_iterations (loop, nit))
4370     return false;
4371 
4372   nit_minus_one = *nit;
4373 
4374   *nit += 1;
4375 
4376   return wi::gtu_p (*nit, nit_minus_one);
4377 }
4378 
4379 /* Sets NIT to the estimated number of executions of the latch of the
4380    LOOP, plus one.  If we have no reliable estimate, the function returns
4381    false, otherwise returns true.  */
4382 
4383 bool
estimated_stmt_executions(struct loop * loop,widest_int * nit)4384 estimated_stmt_executions (struct loop *loop, widest_int *nit)
4385 {
4386   widest_int nit_minus_one;
4387 
4388   if (!estimated_loop_iterations (loop, nit))
4389     return false;
4390 
4391   nit_minus_one = *nit;
4392 
4393   *nit += 1;
4394 
4395   return wi::gtu_p (*nit, nit_minus_one);
4396 }
4397 
4398 /* Records estimates on numbers of iterations of loops.  */
4399 
4400 void
estimate_numbers_of_iterations(function * fn)4401 estimate_numbers_of_iterations (function *fn)
4402 {
4403   struct loop *loop;
4404 
4405   /* We don't want to issue signed overflow warnings while getting
4406      loop iteration estimates.  */
4407   fold_defer_overflow_warnings ();
4408 
4409   FOR_EACH_LOOP_FN (fn, loop, 0)
4410     estimate_numbers_of_iterations (loop);
4411 
4412   fold_undefer_and_ignore_overflow_warnings ();
4413 }
4414 
4415 /* Returns true if statement S1 dominates statement S2.  */
4416 
4417 bool
stmt_dominates_stmt_p(gimple * s1,gimple * s2)4418 stmt_dominates_stmt_p (gimple *s1, gimple *s2)
4419 {
4420   basic_block bb1 = gimple_bb (s1), bb2 = gimple_bb (s2);
4421 
4422   if (!bb1
4423       || s1 == s2)
4424     return true;
4425 
4426   if (bb1 == bb2)
4427     {
4428       gimple_stmt_iterator bsi;
4429 
4430       if (gimple_code (s2) == GIMPLE_PHI)
4431 	return false;
4432 
4433       if (gimple_code (s1) == GIMPLE_PHI)
4434 	return true;
4435 
4436       for (bsi = gsi_start_bb (bb1); gsi_stmt (bsi) != s2; gsi_next (&bsi))
4437 	if (gsi_stmt (bsi) == s1)
4438 	  return true;
4439 
4440       return false;
4441     }
4442 
4443   return dominated_by_p (CDI_DOMINATORS, bb2, bb1);
4444 }
4445 
4446 /* Returns true when we can prove that the number of executions of
4447    STMT in the loop is at most NITER, according to the bound on
4448    the number of executions of the statement NITER_BOUND->stmt recorded in
4449    NITER_BOUND and fact that NITER_BOUND->stmt dominate STMT.
4450 
4451    ??? This code can become quite a CPU hog - we can have many bounds,
4452    and large basic block forcing stmt_dominates_stmt_p to be queried
4453    many times on a large basic blocks, so the whole thing is O(n^2)
4454    for scev_probably_wraps_p invocation (that can be done n times).
4455 
4456    It would make more sense (and give better answers) to remember BB
4457    bounds computed by discover_iteration_bound_by_body_walk.  */
4458 
4459 static bool
n_of_executions_at_most(gimple * stmt,struct nb_iter_bound * niter_bound,tree niter)4460 n_of_executions_at_most (gimple *stmt,
4461 			 struct nb_iter_bound *niter_bound,
4462 			 tree niter)
4463 {
4464   widest_int bound = niter_bound->bound;
4465   tree nit_type = TREE_TYPE (niter), e;
4466   enum tree_code cmp;
4467 
4468   gcc_assert (TYPE_UNSIGNED (nit_type));
4469 
4470   /* If the bound does not even fit into NIT_TYPE, it cannot tell us that
4471      the number of iterations is small.  */
4472   if (!wi::fits_to_tree_p (bound, nit_type))
4473     return false;
4474 
4475   /* We know that NITER_BOUND->stmt is executed at most NITER_BOUND->bound + 1
4476      times.  This means that:
4477 
4478      -- if NITER_BOUND->is_exit is true, then everything after
4479 	it at most NITER_BOUND->bound times.
4480 
4481      -- If NITER_BOUND->is_exit is false, then if we can prove that when STMT
4482 	is executed, then NITER_BOUND->stmt is executed as well in the same
4483 	iteration then STMT is executed at most NITER_BOUND->bound + 1 times.
4484 
4485 	If we can determine that NITER_BOUND->stmt is always executed
4486 	after STMT, then STMT is executed at most NITER_BOUND->bound + 2 times.
4487 	We conclude that if both statements belong to the same
4488 	basic block and STMT is before NITER_BOUND->stmt and there are no
4489 	statements with side effects in between.  */
4490 
4491   if (niter_bound->is_exit)
4492     {
4493       if (stmt == niter_bound->stmt
4494 	  || !stmt_dominates_stmt_p (niter_bound->stmt, stmt))
4495 	return false;
4496       cmp = GE_EXPR;
4497     }
4498   else
4499     {
4500       if (!stmt_dominates_stmt_p (niter_bound->stmt, stmt))
4501 	{
4502           gimple_stmt_iterator bsi;
4503 	  if (gimple_bb (stmt) != gimple_bb (niter_bound->stmt)
4504 	      || gimple_code (stmt) == GIMPLE_PHI
4505 	      || gimple_code (niter_bound->stmt) == GIMPLE_PHI)
4506 	    return false;
4507 
4508 	  /* By stmt_dominates_stmt_p we already know that STMT appears
4509 	     before NITER_BOUND->STMT.  Still need to test that the loop
4510 	     cannot be terinated by a side effect in between.  */
4511 	  for (bsi = gsi_for_stmt (stmt); gsi_stmt (bsi) != niter_bound->stmt;
4512 	       gsi_next (&bsi))
4513 	    if (gimple_has_side_effects (gsi_stmt (bsi)))
4514 	       return false;
4515 	  bound += 1;
4516 	  if (bound == 0
4517 	      || !wi::fits_to_tree_p (bound, nit_type))
4518 	    return false;
4519 	}
4520       cmp = GT_EXPR;
4521     }
4522 
4523   e = fold_binary (cmp, boolean_type_node,
4524 		   niter, wide_int_to_tree (nit_type, bound));
4525   return e && integer_nonzerop (e);
4526 }
4527 
4528 /* Returns true if the arithmetics in TYPE can be assumed not to wrap.  */
4529 
4530 bool
nowrap_type_p(tree type)4531 nowrap_type_p (tree type)
4532 {
4533   if (ANY_INTEGRAL_TYPE_P (type)
4534       && TYPE_OVERFLOW_UNDEFINED (type))
4535     return true;
4536 
4537   if (POINTER_TYPE_P (type))
4538     return true;
4539 
4540   return false;
4541 }
4542 
4543 /* Return true if we can prove LOOP is exited before evolution of induction
4544    variable {BASE, STEP} overflows with respect to its type bound.  */
4545 
4546 static bool
loop_exits_before_overflow(tree base,tree step,gimple * at_stmt,struct loop * loop)4547 loop_exits_before_overflow (tree base, tree step,
4548 			    gimple *at_stmt, struct loop *loop)
4549 {
4550   widest_int niter;
4551   struct control_iv *civ;
4552   struct nb_iter_bound *bound;
4553   tree e, delta, step_abs, unsigned_base;
4554   tree type = TREE_TYPE (step);
4555   tree unsigned_type, valid_niter;
4556 
4557   /* Don't issue signed overflow warnings.  */
4558   fold_defer_overflow_warnings ();
4559 
4560   /* Compute the number of iterations before we reach the bound of the
4561      type, and verify that the loop is exited before this occurs.  */
4562   unsigned_type = unsigned_type_for (type);
4563   unsigned_base = fold_convert (unsigned_type, base);
4564 
4565   if (tree_int_cst_sign_bit (step))
4566     {
4567       tree extreme = fold_convert (unsigned_type,
4568 				   lower_bound_in_type (type, type));
4569       delta = fold_build2 (MINUS_EXPR, unsigned_type, unsigned_base, extreme);
4570       step_abs = fold_build1 (NEGATE_EXPR, unsigned_type,
4571 			      fold_convert (unsigned_type, step));
4572     }
4573   else
4574     {
4575       tree extreme = fold_convert (unsigned_type,
4576 				   upper_bound_in_type (type, type));
4577       delta = fold_build2 (MINUS_EXPR, unsigned_type, extreme, unsigned_base);
4578       step_abs = fold_convert (unsigned_type, step);
4579     }
4580 
4581   valid_niter = fold_build2 (FLOOR_DIV_EXPR, unsigned_type, delta, step_abs);
4582 
4583   estimate_numbers_of_iterations (loop);
4584 
4585   if (max_loop_iterations (loop, &niter)
4586       && wi::fits_to_tree_p (niter, TREE_TYPE (valid_niter))
4587       && (e = fold_binary (GT_EXPR, boolean_type_node, valid_niter,
4588 			   wide_int_to_tree (TREE_TYPE (valid_niter),
4589 					     niter))) != NULL
4590       && integer_nonzerop (e))
4591     {
4592       fold_undefer_and_ignore_overflow_warnings ();
4593       return true;
4594     }
4595   if (at_stmt)
4596     for (bound = loop->bounds; bound; bound = bound->next)
4597       {
4598 	if (n_of_executions_at_most (at_stmt, bound, valid_niter))
4599 	  {
4600 	    fold_undefer_and_ignore_overflow_warnings ();
4601 	    return true;
4602 	  }
4603       }
4604   fold_undefer_and_ignore_overflow_warnings ();
4605 
4606   /* Try to prove loop is exited before {base, step} overflows with the
4607      help of analyzed loop control IV.  This is done only for IVs with
4608      constant step because otherwise we don't have the information.  */
4609   if (TREE_CODE (step) == INTEGER_CST)
4610     {
4611       for (civ = loop->control_ivs; civ; civ = civ->next)
4612 	{
4613 	  enum tree_code code;
4614 	  tree civ_type = TREE_TYPE (civ->step);
4615 
4616 	  /* Have to consider type difference because operand_equal_p ignores
4617 	     that for constants.  */
4618 	  if (TYPE_UNSIGNED (type) != TYPE_UNSIGNED (civ_type)
4619 	      || element_precision (type) != element_precision (civ_type))
4620 	    continue;
4621 
4622 	  /* Only consider control IV with same step.  */
4623 	  if (!operand_equal_p (step, civ->step, 0))
4624 	    continue;
4625 
4626 	  /* Done proving if this is a no-overflow control IV.  */
4627 	  if (operand_equal_p (base, civ->base, 0))
4628 	    return true;
4629 
4630 	  /* Control IV is recorded after expanding simple operations,
4631 	     Here we expand base and compare it too.  */
4632 	  tree expanded_base = expand_simple_operations (base);
4633 	  if (operand_equal_p (expanded_base, civ->base, 0))
4634 	    return true;
4635 
4636 	  /* If this is a before stepping control IV, in other words, we have
4637 
4638 	       {civ_base, step} = {base + step, step}
4639 
4640 	     Because civ {base + step, step} doesn't overflow during loop
4641 	     iterations, {base, step} will not overflow if we can prove the
4642 	     operation "base + step" does not overflow.  Specifically, we try
4643 	     to prove below conditions are satisfied:
4644 
4645 	       base <= UPPER_BOUND (type) - step  ;;step > 0
4646 	       base >= LOWER_BOUND (type) - step  ;;step < 0
4647 
4648 	     by proving the reverse conditions are false using loop's initial
4649 	     condition.  */
4650 	  if (POINTER_TYPE_P (TREE_TYPE (base)))
4651 	    code = POINTER_PLUS_EXPR;
4652 	  else
4653 	    code = PLUS_EXPR;
4654 
4655 	  tree stepped = fold_build2 (code, TREE_TYPE (base), base, step);
4656 	  tree expanded_stepped = fold_build2 (code, TREE_TYPE (base),
4657 					       expanded_base, step);
4658 	  if (operand_equal_p (stepped, civ->base, 0)
4659 	      || operand_equal_p (expanded_stepped, civ->base, 0))
4660 	    {
4661 	      tree extreme;
4662 
4663 	      if (tree_int_cst_sign_bit (step))
4664 		{
4665 		  code = LT_EXPR;
4666 		  extreme = lower_bound_in_type (type, type);
4667 		}
4668 	      else
4669 		{
4670 		  code = GT_EXPR;
4671 		  extreme = upper_bound_in_type (type, type);
4672 		}
4673 	      extreme = fold_build2 (MINUS_EXPR, type, extreme, step);
4674 	      e = fold_build2 (code, boolean_type_node, base, extreme);
4675 	      e = simplify_using_initial_conditions (loop, e);
4676 	      if (integer_zerop (e))
4677 		return true;
4678 	    }
4679         }
4680     }
4681 
4682   return false;
4683 }
4684 
4685 /* VAR is scev variable whose evolution part is constant STEP, this function
4686    proves that VAR can't overflow by using value range info.  If VAR's value
4687    range is [MIN, MAX], it can be proven by:
4688      MAX + step doesn't overflow    ; if step > 0
4689    or
4690      MIN + step doesn't underflow   ; if step < 0.
4691 
4692    We can only do this if var is computed in every loop iteration, i.e, var's
4693    definition has to dominate loop latch.  Consider below example:
4694 
4695      {
4696        unsigned int i;
4697 
4698        <bb 3>:
4699 
4700        <bb 4>:
4701        # RANGE [0, 4294967294] NONZERO 65535
4702        # i_21 = PHI <0(3), i_18(9)>
4703        if (i_21 != 0)
4704 	 goto <bb 6>;
4705        else
4706 	 goto <bb 8>;
4707 
4708        <bb 6>:
4709        # RANGE [0, 65533] NONZERO 65535
4710        _6 = i_21 + 4294967295;
4711        # RANGE [0, 65533] NONZERO 65535
4712        _7 = (long unsigned int) _6;
4713        # RANGE [0, 524264] NONZERO 524280
4714        _8 = _7 * 8;
4715        # PT = nonlocal escaped
4716        _9 = a_14 + _8;
4717        *_9 = 0;
4718 
4719        <bb 8>:
4720        # RANGE [1, 65535] NONZERO 65535
4721        i_18 = i_21 + 1;
4722        if (i_18 >= 65535)
4723 	 goto <bb 10>;
4724        else
4725 	 goto <bb 9>;
4726 
4727        <bb 9>:
4728        goto <bb 4>;
4729 
4730        <bb 10>:
4731        return;
4732      }
4733 
4734    VAR _6 doesn't overflow only with pre-condition (i_21 != 0), here we
4735    can't use _6 to prove no-overlfow for _7.  In fact, var _7 takes value
4736    sequence (4294967295, 0, 1, ..., 65533) in loop life time, rather than
4737    (4294967295, 4294967296, ...).  */
4738 
4739 static bool
scev_var_range_cant_overflow(tree var,tree step,struct loop * loop)4740 scev_var_range_cant_overflow (tree var, tree step, struct loop *loop)
4741 {
4742   tree type;
4743   wide_int minv, maxv, diff, step_wi;
4744   enum value_range_kind rtype;
4745 
4746   if (TREE_CODE (step) != INTEGER_CST || !INTEGRAL_TYPE_P (TREE_TYPE (var)))
4747     return false;
4748 
4749   /* Check if VAR evaluates in every loop iteration.  It's not the case
4750      if VAR is default definition or does not dominate loop's latch.  */
4751   basic_block def_bb = gimple_bb (SSA_NAME_DEF_STMT (var));
4752   if (!def_bb || !dominated_by_p (CDI_DOMINATORS, loop->latch, def_bb))
4753     return false;
4754 
4755   rtype = get_range_info (var, &minv, &maxv);
4756   if (rtype != VR_RANGE)
4757     return false;
4758 
4759   /* VAR is a scev whose evolution part is STEP and value range info
4760      is [MIN, MAX], we can prove its no-overflowness by conditions:
4761 
4762        type_MAX - MAX >= step   ; if step > 0
4763        MIN - type_MIN >= |step| ; if step < 0.
4764 
4765      Or VAR must take value outside of value range, which is not true.  */
4766   step_wi = wi::to_wide (step);
4767   type = TREE_TYPE (var);
4768   if (tree_int_cst_sign_bit (step))
4769     {
4770       diff = minv - wi::to_wide (lower_bound_in_type (type, type));
4771       step_wi = - step_wi;
4772     }
4773   else
4774     diff = wi::to_wide (upper_bound_in_type (type, type)) - maxv;
4775 
4776   return (wi::geu_p (diff, step_wi));
4777 }
4778 
4779 /* Return false only when the induction variable BASE + STEP * I is
4780    known to not overflow: i.e. when the number of iterations is small
4781    enough with respect to the step and initial condition in order to
4782    keep the evolution confined in TYPEs bounds.  Return true when the
4783    iv is known to overflow or when the property is not computable.
4784 
4785    USE_OVERFLOW_SEMANTICS is true if this function should assume that
4786    the rules for overflow of the given language apply (e.g., that signed
4787    arithmetics in C does not overflow).
4788 
4789    If VAR is a ssa variable, this function also returns false if VAR can
4790    be proven not overflow with value range info.  */
4791 
4792 bool
scev_probably_wraps_p(tree var,tree base,tree step,gimple * at_stmt,struct loop * loop,bool use_overflow_semantics)4793 scev_probably_wraps_p (tree var, tree base, tree step,
4794 		       gimple *at_stmt, struct loop *loop,
4795 		       bool use_overflow_semantics)
4796 {
4797   /* FIXME: We really need something like
4798      http://gcc.gnu.org/ml/gcc-patches/2005-06/msg02025.html.
4799 
4800      We used to test for the following situation that frequently appears
4801      during address arithmetics:
4802 
4803        D.1621_13 = (long unsigned intD.4) D.1620_12;
4804        D.1622_14 = D.1621_13 * 8;
4805        D.1623_15 = (doubleD.29 *) D.1622_14;
4806 
4807      And derived that the sequence corresponding to D_14
4808      can be proved to not wrap because it is used for computing a
4809      memory access; however, this is not really the case -- for example,
4810      if D_12 = (unsigned char) [254,+,1], then D_14 has values
4811      2032, 2040, 0, 8, ..., but the code is still legal.  */
4812 
4813   if (chrec_contains_undetermined (base)
4814       || chrec_contains_undetermined (step))
4815     return true;
4816 
4817   if (integer_zerop (step))
4818     return false;
4819 
4820   /* If we can use the fact that signed and pointer arithmetics does not
4821      wrap, we are done.  */
4822   if (use_overflow_semantics && nowrap_type_p (TREE_TYPE (base)))
4823     return false;
4824 
4825   /* To be able to use estimates on number of iterations of the loop,
4826      we must have an upper bound on the absolute value of the step.  */
4827   if (TREE_CODE (step) != INTEGER_CST)
4828     return true;
4829 
4830   /* Check if var can be proven not overflow with value range info.  */
4831   if (var && TREE_CODE (var) == SSA_NAME
4832       && scev_var_range_cant_overflow (var, step, loop))
4833     return false;
4834 
4835   if (loop_exits_before_overflow (base, step, at_stmt, loop))
4836     return false;
4837 
4838   /* At this point we still don't have a proof that the iv does not
4839      overflow: give up.  */
4840   return true;
4841 }
4842 
4843 /* Frees the information on upper bounds on numbers of iterations of LOOP.  */
4844 
4845 void
free_numbers_of_iterations_estimates(struct loop * loop)4846 free_numbers_of_iterations_estimates (struct loop *loop)
4847 {
4848   struct control_iv *civ;
4849   struct nb_iter_bound *bound;
4850 
4851   loop->nb_iterations = NULL;
4852   loop->estimate_state = EST_NOT_COMPUTED;
4853   for (bound = loop->bounds; bound;)
4854     {
4855       struct nb_iter_bound *next = bound->next;
4856       ggc_free (bound);
4857       bound = next;
4858     }
4859   loop->bounds = NULL;
4860 
4861   for (civ = loop->control_ivs; civ;)
4862     {
4863       struct control_iv *next = civ->next;
4864       ggc_free (civ);
4865       civ = next;
4866     }
4867   loop->control_ivs = NULL;
4868 }
4869 
4870 /* Frees the information on upper bounds on numbers of iterations of loops.  */
4871 
4872 void
free_numbers_of_iterations_estimates(function * fn)4873 free_numbers_of_iterations_estimates (function *fn)
4874 {
4875   struct loop *loop;
4876 
4877   FOR_EACH_LOOP_FN (fn, loop, 0)
4878     free_numbers_of_iterations_estimates (loop);
4879 }
4880 
4881 /* Substitute value VAL for ssa name NAME inside expressions held
4882    at LOOP.  */
4883 
4884 void
substitute_in_loop_info(struct loop * loop,tree name,tree val)4885 substitute_in_loop_info (struct loop *loop, tree name, tree val)
4886 {
4887   loop->nb_iterations = simplify_replace_tree (loop->nb_iterations, name, val);
4888 }
4889